ata: libata: simplify qc_fill_rtf port operation interface
[linux-2.6-block.git] / drivers / scsi / ipr.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * ipr.c -- driver for IBM Power Linux RAID adapters
4 *
5 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2003, 2004 IBM Corporation
1da177e4
LT
8 */
9
10/*
11 * Notes:
12 *
13 * This driver is used to control the following SCSI adapters:
14 *
15 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
16 *
17 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18 * PCI-X Dual Channel Ultra 320 SCSI Adapter
19 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
20 * Embedded SCSI adapter on p615 and p655 systems
21 *
22 * Supported Hardware Features:
23 * - Ultra 320 SCSI controller
24 * - PCI-X host interface
25 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26 * - Non-Volatile Write Cache
27 * - Supports attachment of non-RAID disks, tape, and optical devices
28 * - RAID Levels 0, 5, 10
29 * - Hot spare
30 * - Background Parity Checking
31 * - Background Data Scrubbing
32 * - Ability to increase the capacity of an existing RAID 5 disk array
33 * by adding disks
34 *
35 * Driver Features:
36 * - Tagged command queuing
37 * - Adapter microcode download
38 * - PCI hot plug
39 * - SCSI device hot plug
40 *
41 */
42
1da177e4
LT
43#include <linux/fs.h>
44#include <linux/init.h>
45#include <linux/types.h>
46#include <linux/errno.h>
47#include <linux/kernel.h>
5a0e3ad6 48#include <linux/slab.h>
4d4dd706 49#include <linux/vmalloc.h>
1da177e4
LT
50#include <linux/ioport.h>
51#include <linux/delay.h>
52#include <linux/pci.h>
53#include <linux/wait.h>
54#include <linux/spinlock.h>
55#include <linux/sched.h>
56#include <linux/interrupt.h>
57#include <linux/blkdev.h>
58#include <linux/firmware.h>
59#include <linux/module.h>
60#include <linux/moduleparam.h>
35a39691 61#include <linux/libata.h>
0ce3a7e5 62#include <linux/hdreg.h>
f72919ec 63#include <linux/reboot.h>
3e7ebdfa 64#include <linux/stringify.h>
1da177e4
LT
65#include <asm/io.h>
66#include <asm/irq.h>
67#include <asm/processor.h>
68#include <scsi/scsi.h>
69#include <scsi/scsi_host.h>
70#include <scsi/scsi_tcq.h>
71#include <scsi/scsi_eh.h>
72#include <scsi/scsi_cmnd.h>
1da177e4
LT
73#include "ipr.h"
74
75/*
76 * Global Data
77 */
b7d68ca3 78static LIST_HEAD(ipr_ioa_head);
1da177e4
LT
79static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
80static unsigned int ipr_max_speed = 1;
81static int ipr_testmode = 0;
82static unsigned int ipr_fastfail = 0;
5469cb5b 83static unsigned int ipr_transop_timeout = 0;
d3c74871 84static unsigned int ipr_debug = 0;
3e7ebdfa 85static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
ac09c349 86static unsigned int ipr_dual_ioa_raid = 1;
cb05cbb3 87static unsigned int ipr_number_of_msix = 16;
4fdd7c7a 88static unsigned int ipr_fast_reboot;
1da177e4
LT
89static DEFINE_SPINLOCK(ipr_driver_lock);
90
91/* This table describes the differences between DMA controller chips */
92static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
60e7486b 93 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
1da177e4 94 .mailbox = 0x0042C,
89aad428 95 .max_cmds = 100,
1da177e4 96 .cache_line_size = 0x20,
7dd21308 97 .clear_isr = 1,
b53d124a 98 .iopoll_weight = 0,
1da177e4
LT
99 {
100 .set_interrupt_mask_reg = 0x0022C,
101 .clr_interrupt_mask_reg = 0x00230,
214777ba 102 .clr_interrupt_mask_reg32 = 0x00230,
1da177e4 103 .sense_interrupt_mask_reg = 0x0022C,
214777ba 104 .sense_interrupt_mask_reg32 = 0x0022C,
1da177e4 105 .clr_interrupt_reg = 0x00228,
214777ba 106 .clr_interrupt_reg32 = 0x00228,
1da177e4 107 .sense_interrupt_reg = 0x00224,
214777ba 108 .sense_interrupt_reg32 = 0x00224,
1da177e4
LT
109 .ioarrin_reg = 0x00404,
110 .sense_uproc_interrupt_reg = 0x00214,
214777ba 111 .sense_uproc_interrupt_reg32 = 0x00214,
1da177e4 112 .set_uproc_interrupt_reg = 0x00214,
214777ba
WB
113 .set_uproc_interrupt_reg32 = 0x00214,
114 .clr_uproc_interrupt_reg = 0x00218,
115 .clr_uproc_interrupt_reg32 = 0x00218
1da177e4
LT
116 }
117 },
118 { /* Snipe and Scamp */
119 .mailbox = 0x0052C,
89aad428 120 .max_cmds = 100,
1da177e4 121 .cache_line_size = 0x20,
7dd21308 122 .clear_isr = 1,
b53d124a 123 .iopoll_weight = 0,
1da177e4
LT
124 {
125 .set_interrupt_mask_reg = 0x00288,
126 .clr_interrupt_mask_reg = 0x0028C,
214777ba 127 .clr_interrupt_mask_reg32 = 0x0028C,
1da177e4 128 .sense_interrupt_mask_reg = 0x00288,
214777ba 129 .sense_interrupt_mask_reg32 = 0x00288,
1da177e4 130 .clr_interrupt_reg = 0x00284,
214777ba 131 .clr_interrupt_reg32 = 0x00284,
1da177e4 132 .sense_interrupt_reg = 0x00280,
214777ba 133 .sense_interrupt_reg32 = 0x00280,
1da177e4
LT
134 .ioarrin_reg = 0x00504,
135 .sense_uproc_interrupt_reg = 0x00290,
214777ba 136 .sense_uproc_interrupt_reg32 = 0x00290,
1da177e4 137 .set_uproc_interrupt_reg = 0x00290,
214777ba
WB
138 .set_uproc_interrupt_reg32 = 0x00290,
139 .clr_uproc_interrupt_reg = 0x00294,
140 .clr_uproc_interrupt_reg32 = 0x00294
1da177e4
LT
141 }
142 },
a74c1639 143 { /* CRoC */
110def85 144 .mailbox = 0x00044,
89aad428 145 .max_cmds = 1000,
a74c1639 146 .cache_line_size = 0x20,
7dd21308 147 .clear_isr = 0,
b53d124a 148 .iopoll_weight = 64,
a74c1639
WB
149 {
150 .set_interrupt_mask_reg = 0x00010,
151 .clr_interrupt_mask_reg = 0x00018,
214777ba 152 .clr_interrupt_mask_reg32 = 0x0001C,
a74c1639 153 .sense_interrupt_mask_reg = 0x00010,
214777ba 154 .sense_interrupt_mask_reg32 = 0x00014,
a74c1639 155 .clr_interrupt_reg = 0x00008,
214777ba 156 .clr_interrupt_reg32 = 0x0000C,
a74c1639 157 .sense_interrupt_reg = 0x00000,
214777ba 158 .sense_interrupt_reg32 = 0x00004,
a74c1639
WB
159 .ioarrin_reg = 0x00070,
160 .sense_uproc_interrupt_reg = 0x00020,
214777ba 161 .sense_uproc_interrupt_reg32 = 0x00024,
a74c1639 162 .set_uproc_interrupt_reg = 0x00020,
214777ba 163 .set_uproc_interrupt_reg32 = 0x00024,
dcbad00e 164 .clr_uproc_interrupt_reg = 0x00028,
214777ba
WB
165 .clr_uproc_interrupt_reg32 = 0x0002C,
166 .init_feedback_reg = 0x0005C,
dcbad00e 167 .dump_addr_reg = 0x00064,
8701f185
WB
168 .dump_data_reg = 0x00068,
169 .endian_swap_reg = 0x00084
a74c1639
WB
170 }
171 },
1da177e4
LT
172};
173
174static const struct ipr_chip_t ipr_chip[] = {
a299ee62
CH
175 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
181 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
182 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
184 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
1da177e4
LT
185};
186
203fa3fe 187static int ipr_max_bus_speeds[] = {
1da177e4
LT
188 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
189};
190
191MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193module_param_named(max_speed, ipr_max_speed, uint, 0);
194MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195module_param_named(log_level, ipr_log_level, uint, 0);
196MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197module_param_named(testmode, ipr_testmode, int, 0);
198MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
2cf22be0 199module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
1da177e4
LT
200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
2cf22be0 203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
d3c74871 204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
ac09c349
BK
205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
3e7ebdfa
WB
207module_param_named(max_devs, ipr_max_devs, int, 0);
208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
05a6538a 210module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
cb05cbb3 211MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
4fdd7c7a
BK
212module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
213MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
1da177e4
LT
214MODULE_LICENSE("GPL");
215MODULE_VERSION(IPR_DRIVER_VERSION);
216
1da177e4
LT
217/* A constant array of IOASCs/URCs/Error Messages */
218static const
219struct ipr_error_table_t ipr_error_table[] = {
933916f3 220 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
221 "8155: An unknown error was received"},
222 {0x00330000, 0, 0,
223 "Soft underlength error"},
224 {0x005A0000, 0, 0,
225 "Command to be cancelled not found"},
226 {0x00808000, 0, 0,
227 "Qualified success"},
933916f3 228 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 229 "FFFE: Soft device bus error recovered by the IOA"},
933916f3 230 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 231 "4101: Soft device bus fabric error"},
5aa3a333
WB
232 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
233 "FFFC: Logical block guard error recovered by the device"},
234 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
235 "FFFC: Logical block reference tag error recovered by the device"},
236 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
237 "4171: Recovered scatter list tag / sequence number error"},
238 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
239 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
240 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
241 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
242 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
243 "FFFD: Recovered logical block reference tag error detected by the IOA"},
244 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
245 "FFFD: Logical block guard error recovered by the IOA"},
933916f3 246 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 247 "FFF9: Device sector reassign successful"},
933916f3 248 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 249 "FFF7: Media error recovered by device rewrite procedures"},
933916f3 250 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 251 "7001: IOA sector reassignment successful"},
933916f3 252 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 253 "FFF9: Soft media error. Sector reassignment recommended"},
933916f3 254 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 255 "FFF7: Media error recovered by IOA rewrite procedures"},
933916f3 256 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 257 "FF3D: Soft PCI bus error recovered by the IOA"},
933916f3 258 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 259 "FFF6: Device hardware error recovered by the IOA"},
933916f3 260 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 261 "FFF6: Device hardware error recovered by the device"},
933916f3 262 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 263 "FF3D: Soft IOA error recovered by the IOA"},
933916f3 264 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 265 "FFFA: Undefined device response recovered by the IOA"},
933916f3 266 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 267 "FFF6: Device bus error, message or command phase"},
933916f3 268 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
35a39691 269 "FFFE: Task Management Function failed"},
933916f3 270 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 271 "FFF6: Failure prediction threshold exceeded"},
933916f3 272 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 273 "8009: Impending cache battery pack failure"},
ed7bd661 274 {0x02040100, 0, 0,
275 "Logical Unit in process of becoming ready"},
276 {0x02040200, 0, 0,
277 "Initializing command required"},
1da177e4
LT
278 {0x02040400, 0, 0,
279 "34FF: Disk device format in progress"},
ed7bd661 280 {0x02040C00, 0, 0,
281 "Logical unit not accessible, target port in unavailable state"},
65f56475
BK
282 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
283 "9070: IOA requested reset"},
1da177e4
LT
284 {0x023F0000, 0, 0,
285 "Synchronization required"},
ed7bd661 286 {0x02408500, 0, 0,
287 "IOA microcode download required"},
288 {0x02408600, 0, 0,
289 "Device bus connection is prohibited by host"},
1da177e4
LT
290 {0x024E0000, 0, 0,
291 "No ready, IOA shutdown"},
292 {0x025A0000, 0, 0,
293 "Not ready, IOA has been shutdown"},
933916f3 294 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
295 "3020: Storage subsystem configuration error"},
296 {0x03110B00, 0, 0,
297 "FFF5: Medium error, data unreadable, recommend reassign"},
298 {0x03110C00, 0, 0,
299 "7000: Medium error, data unreadable, do not reassign"},
933916f3 300 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 301 "FFF3: Disk media format bad"},
933916f3 302 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 303 "3002: Addressed device failed to respond to selection"},
933916f3 304 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 305 "3100: Device bus error"},
933916f3 306 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
307 "3109: IOA timed out a device command"},
308 {0x04088000, 0, 0,
309 "3120: SCSI bus is not operational"},
933916f3 310 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 311 "4100: Hard device bus fabric error"},
5aa3a333
WB
312 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
313 "310C: Logical block guard error detected by the device"},
314 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
315 "310C: Logical block reference tag error detected by the device"},
316 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
317 "4170: Scatter list tag / sequence number error"},
318 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
319 "8150: Logical block CRC error on IOA to Host transfer"},
320 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
321 "4170: Logical block sequence number error on IOA to Host transfer"},
322 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
323 "310D: Logical block reference tag error detected by the IOA"},
324 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
325 "310D: Logical block guard error detected by the IOA"},
933916f3 326 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 327 "9000: IOA reserved area data check"},
933916f3 328 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 329 "9001: IOA reserved area invalid data pattern"},
933916f3 330 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 331 "9002: IOA reserved area LRC error"},
5aa3a333
WB
332 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
333 "Hardware Error, IOA metadata access error"},
933916f3 334 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 335 "102E: Out of alternate sectors for disk storage"},
933916f3 336 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 337 "FFF4: Data transfer underlength error"},
933916f3 338 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 339 "FFF4: Data transfer overlength error"},
933916f3 340 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 341 "3400: Logical unit failure"},
933916f3 342 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 343 "FFF4: Device microcode is corrupt"},
933916f3 344 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
345 "8150: PCI bus error"},
346 {0x04430000, 1, 0,
347 "Unsupported device bus message received"},
933916f3 348 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 349 "FFF4: Disk device problem"},
933916f3 350 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 351 "8150: Permanent IOA failure"},
933916f3 352 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 353 "3010: Disk device returned wrong response to IOA"},
933916f3 354 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
355 "8151: IOA microcode error"},
356 {0x04448500, 0, 0,
357 "Device bus status error"},
933916f3 358 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 359 "8157: IOA error requiring IOA reset to recover"},
35a39691
BK
360 {0x04448700, 0, 0,
361 "ATA device status error"},
1da177e4
LT
362 {0x04490000, 0, 0,
363 "Message reject received from the device"},
933916f3 364 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 365 "8008: A permanent cache battery pack failure occurred"},
933916f3 366 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 367 "9090: Disk unit has been modified after the last known status"},
933916f3 368 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 369 "9081: IOA detected device error"},
933916f3 370 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 371 "9082: IOA detected device error"},
933916f3 372 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
1da177e4 373 "3110: Device bus error, message or command phase"},
933916f3 374 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
35a39691 375 "3110: SAS Command / Task Management Function failed"},
933916f3 376 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 377 "9091: Incorrect hardware configuration change has been detected"},
933916f3 378 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 379 "9073: Invalid multi-adapter configuration"},
933916f3 380 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 381 "4010: Incorrect connection between cascaded expanders"},
933916f3 382 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 383 "4020: Connections exceed IOA design limits"},
933916f3 384 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 385 "4030: Incorrect multipath connection"},
933916f3 386 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 387 "4110: Unsupported enclosure function"},
ed7bd661 388 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
389 "4120: SAS cable VPD cannot be read"},
933916f3 390 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
391 "FFF4: Command to logical unit failed"},
392 {0x05240000, 1, 0,
393 "Illegal request, invalid request type or request packet"},
394 {0x05250000, 0, 0,
395 "Illegal request, invalid resource handle"},
b0df54bb 396 {0x05258000, 0, 0,
397 "Illegal request, commands not allowed to this device"},
398 {0x05258100, 0, 0,
399 "Illegal request, command not allowed to a secondary adapter"},
5aa3a333
WB
400 {0x05258200, 0, 0,
401 "Illegal request, command not allowed to a non-optimized resource"},
1da177e4
LT
402 {0x05260000, 0, 0,
403 "Illegal request, invalid field in parameter list"},
404 {0x05260100, 0, 0,
405 "Illegal request, parameter not supported"},
406 {0x05260200, 0, 0,
407 "Illegal request, parameter value invalid"},
408 {0x052C0000, 0, 0,
409 "Illegal request, command sequence error"},
b0df54bb 410 {0x052C8000, 1, 0,
411 "Illegal request, dual adapter support not enabled"},
ed7bd661 412 {0x052C8100, 1, 0,
413 "Illegal request, another cable connector was physically disabled"},
414 {0x054E8000, 1, 0,
415 "Illegal request, inconsistent group id/group count"},
933916f3 416 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 417 "9031: Array protection temporarily suspended, protection resuming"},
933916f3 418 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 419 "9040: Array protection temporarily suspended, protection resuming"},
ed7bd661 420 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
421 "4080: IOA exceeded maximum operating temperature"},
422 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
423 "4085: Service required"},
81471b07
WX
424 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
425 "4086: SAS Adapter Hardware Configuration Error"},
933916f3 426 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 427 "3140: Device bus not ready to ready transition"},
933916f3 428 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4
LT
429 "FFFB: SCSI bus was reset"},
430 {0x06290500, 0, 0,
431 "FFFE: SCSI bus transition to single ended"},
432 {0x06290600, 0, 0,
433 "FFFE: SCSI bus transition to LVD"},
933916f3 434 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 435 "FFFB: SCSI bus was reset by another initiator"},
933916f3 436 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 437 "3029: A device replacement has occurred"},
ed7bd661 438 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4102: Device bus fabric performance degradation"},
933916f3 440 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 441 "9051: IOA cache data exists for a missing or failed device"},
933916f3 442 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 443 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
933916f3 444 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 445 "9025: Disk unit is not supported at its physical location"},
933916f3 446 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 447 "3020: IOA detected a SCSI bus configuration error"},
933916f3 448 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 449 "3150: SCSI bus configuration error"},
933916f3 450 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 451 "9074: Asymmetric advanced function disk configuration"},
933916f3 452 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 453 "4040: Incomplete multipath connection between IOA and enclosure"},
933916f3 454 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 455 "4041: Incomplete multipath connection between enclosure and device"},
933916f3 456 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 457 "9075: Incomplete multipath connection between IOA and remote IOA"},
933916f3 458 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 459 "9076: Configuration error, missing remote IOA"},
933916f3 460 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 461 "4050: Enclosure does not support a required multipath function"},
ed7bd661 462 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
463 "4121: Configuration error, required cable is missing"},
464 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
465 "4122: Cable is not plugged into the correct location on remote IOA"},
466 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
467 "4123: Configuration error, invalid cable vital product data"},
468 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
469 "4124: Configuration error, both cable ends are plugged into the same IOA"},
b75424fc
WB
470 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
471 "4070: Logically bad block written on device"},
933916f3 472 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 473 "9041: Array protection temporarily suspended"},
933916f3 474 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 475 "9042: Corrupt array parity detected on specified device"},
933916f3 476 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 477 "9030: Array no longer protected due to missing or failed disk unit"},
933916f3 478 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 479 "9071: Link operational transition"},
933916f3 480 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
b0df54bb 481 "9072: Link not operational transition"},
933916f3 482 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 483 "9032: Array exposed but still protected"},
7b3871fd 484 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
e435340c 485 "70DD: Device forced failed by disrupt device command"},
933916f3 486 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 487 "4061: Multipath redundancy level got better"},
933916f3 488 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
896bbd21 489 "4060: Multipath redundancy level got worse"},
7b3871fd 490 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
f8ee25d7 491 "9083: Device raw mode enabled"},
7b3871fd 492 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
f8ee25d7 493 "9084: Device raw mode disabled"},
1da177e4
LT
494 {0x07270000, 0, 0,
495 "Failure due to other device"},
933916f3 496 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 497 "9008: IOA does not support functions expected by devices"},
933916f3 498 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 499 "9010: Cache data associated with attached devices cannot be found"},
933916f3 500 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 501 "9011: Cache data belongs to devices other than those attached"},
933916f3 502 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 503 "9020: Array missing 2 or more devices with only 1 device present"},
933916f3 504 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 505 "9021: Array missing 2 or more devices with 2 or more devices present"},
933916f3 506 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 507 "9022: Exposed array is missing a required device"},
933916f3 508 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 509 "9023: Array member(s) not at required physical locations"},
933916f3 510 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 511 "9024: Array not functional due to present hardware configuration"},
933916f3 512 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 513 "9026: Array not functional due to present hardware configuration"},
933916f3 514 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 515 "9027: Array is missing a device and parity is out of sync"},
933916f3 516 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 517 "9028: Maximum number of arrays already exist"},
933916f3 518 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 519 "9050: Required cache data cannot be located for a disk unit"},
933916f3 520 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 521 "9052: Cache data exists for a device that has been modified"},
933916f3 522 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 523 "9054: IOA resources not available due to previous problems"},
933916f3 524 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 525 "9092: Disk unit requires initialization before use"},
933916f3 526 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 527 "9029: Incorrect hardware configuration change has been detected"},
933916f3 528 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 529 "9060: One or more disk pairs are missing from an array"},
933916f3 530 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 531 "9061: One or more disks are missing from an array"},
933916f3 532 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 533 "9062: One or more disks are missing from an array"},
933916f3 534 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
1da177e4 535 "9063: Maximum number of functional arrays has been exceeded"},
ed7bd661 536 {0x07279A00, 0, 0,
537 "Data protect, other volume set problem"},
1da177e4
LT
538 {0x0B260000, 0, 0,
539 "Aborted command, invalid descriptor"},
ed7bd661 540 {0x0B3F9000, 0, 0,
541 "Target operating conditions have changed, dual adapter takeover"},
542 {0x0B530200, 0, 0,
543 "Aborted command, medium removal prevented"},
1da177e4 544 {0x0B5A0000, 0, 0,
ed7bd661 545 "Command terminated by host"},
546 {0x0B5B8000, 0, 0,
547 "Aborted command, command terminated by host"}
1da177e4
LT
548};
549
550static const struct ipr_ses_table_entry ipr_ses_table[] = {
551 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
552 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
553 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
554 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
555 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
556 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
557 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
558 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
559 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
560 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
561 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
562 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
563 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
564};
565
566/*
567 * Function Prototypes
568 */
569static int ipr_reset_alert(struct ipr_cmnd *);
570static void ipr_process_ccn(struct ipr_cmnd *);
571static void ipr_process_error(struct ipr_cmnd *);
572static void ipr_reset_ioa_job(struct ipr_cmnd *);
573static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
574 enum ipr_shutdown_type);
575
576#ifdef CONFIG_SCSI_IPR_TRACE
577/**
578 * ipr_trc_hook - Add a trace entry to the driver trace
579 * @ipr_cmd: ipr command struct
580 * @type: trace type
581 * @add_data: additional data
582 *
583 * Return value:
584 * none
585 **/
586static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
587 u8 type, u32 add_data)
588{
589 struct ipr_trace_entry *trace_entry;
590 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
bb7c5433 591 unsigned int trace_index;
1da177e4 592
bb7c5433
BK
593 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
594 trace_entry = &ioa_cfg->trace[trace_index];
1da177e4
LT
595 trace_entry->time = jiffies;
596 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
597 trace_entry->type = type;
a32c055f
WB
598 if (ipr_cmd->ioa_cfg->sis64)
599 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
600 else
601 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
35a39691 602 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
1da177e4
LT
603 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
604 trace_entry->u.add_data = add_data;
56d6aa33 605 wmb();
1da177e4
LT
606}
607#else
203fa3fe 608#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
1da177e4
LT
609#endif
610
172cd6e1
BK
611/**
612 * ipr_lock_and_done - Acquire lock and complete command
613 * @ipr_cmd: ipr command struct
614 *
615 * Return value:
616 * none
617 **/
618static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
619{
620 unsigned long lock_flags;
621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
622
623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
624 ipr_cmd->done(ipr_cmd);
625 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
626}
627
1da177e4
LT
628/**
629 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
630 * @ipr_cmd: ipr command struct
631 *
632 * Return value:
633 * none
634 **/
635static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
636{
637 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00
WB
638 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
639 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
a32c055f 640 dma_addr_t dma_addr = ipr_cmd->dma_addr;
05a6538a 641 int hrrq_id;
1da177e4 642
05a6538a 643 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
1da177e4 644 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
05a6538a 645 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
a32c055f 646 ioarcb->data_transfer_length = 0;
1da177e4 647 ioarcb->read_data_transfer_length = 0;
a32c055f 648 ioarcb->ioadl_len = 0;
1da177e4 649 ioarcb->read_ioadl_len = 0;
a32c055f 650
96d21f00 651 if (ipr_cmd->ioa_cfg->sis64) {
a32c055f
WB
652 ioarcb->u.sis64_addr_data.data_ioadl_addr =
653 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
96d21f00
WB
654 ioasa64->u.gata.status = 0;
655 } else {
a32c055f
WB
656 ioarcb->write_ioadl_addr =
657 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
658 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
96d21f00 659 ioasa->u.gata.status = 0;
a32c055f
WB
660 }
661
96d21f00
WB
662 ioasa->hdr.ioasc = 0;
663 ioasa->hdr.residual_data_len = 0;
1da177e4 664 ipr_cmd->scsi_cmd = NULL;
35a39691 665 ipr_cmd->qc = NULL;
1da177e4
LT
666 ipr_cmd->sense_buffer[0] = 0;
667 ipr_cmd->dma_use_sg = 0;
668}
669
670/**
671 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
672 * @ipr_cmd: ipr command struct
a96099e2 673 * @fast_done: fast done function call-back
1da177e4
LT
674 *
675 * Return value:
676 * none
677 **/
172cd6e1
BK
678static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
679 void (*fast_done) (struct ipr_cmnd *))
1da177e4
LT
680{
681 ipr_reinit_ipr_cmnd(ipr_cmd);
682 ipr_cmd->u.scratch = 0;
683 ipr_cmd->sibling = NULL;
6cdb0817 684 ipr_cmd->eh_comp = NULL;
172cd6e1 685 ipr_cmd->fast_done = fast_done;
738c6ec5 686 timer_setup(&ipr_cmd->timer, NULL, 0);
1da177e4
LT
687}
688
689/**
00bfef2c 690 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
a96099e2 691 * @hrrq: hrr queue
1da177e4
LT
692 *
693 * Return value:
694 * pointer to ipr command struct
695 **/
696static
05a6538a 697struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
1da177e4 698{
05a6538a 699 struct ipr_cmnd *ipr_cmd = NULL;
700
701 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
702 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
703 struct ipr_cmnd, queue);
704 list_del(&ipr_cmd->queue);
705 }
1da177e4 706
1da177e4
LT
707
708 return ipr_cmd;
709}
710
00bfef2c
BK
711/**
712 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
713 * @ioa_cfg: ioa config struct
714 *
715 * Return value:
716 * pointer to ipr command struct
717 **/
718static
719struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
720{
05a6538a 721 struct ipr_cmnd *ipr_cmd =
722 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
172cd6e1 723 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
00bfef2c
BK
724 return ipr_cmd;
725}
726
1da177e4
LT
727/**
728 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
729 * @ioa_cfg: ioa config struct
730 * @clr_ints: interrupts to clear
731 *
732 * This function masks all interrupts on the adapter, then clears the
733 * interrupts specified in the mask
734 *
735 * Return value:
736 * none
737 **/
738static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
739 u32 clr_ints)
740{
56d6aa33 741 int i;
1da177e4
LT
742
743 /* Stop new interrupts */
56d6aa33 744 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
745 spin_lock(&ioa_cfg->hrrq[i]._lock);
746 ioa_cfg->hrrq[i].allow_interrupts = 0;
747 spin_unlock(&ioa_cfg->hrrq[i]._lock);
748 }
1da177e4
LT
749
750 /* Set interrupt mask to stop all new interrupts */
214777ba
WB
751 if (ioa_cfg->sis64)
752 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
753 else
754 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
755
756 /* Clear any pending interrupts */
214777ba
WB
757 if (ioa_cfg->sis64)
758 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
759 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
4dc83399 760 readl(ioa_cfg->regs.sense_interrupt_reg);
1da177e4
LT
761}
762
763/**
764 * ipr_save_pcix_cmd_reg - Save PCI-X command register
765 * @ioa_cfg: ioa config struct
766 *
767 * Return value:
768 * 0 on success / -EIO on failure
769 **/
770static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
771{
772 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
773
7dce0e1c
BK
774 if (pcix_cmd_reg == 0)
775 return 0;
1da177e4
LT
776
777 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
778 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
779 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
780 return -EIO;
781 }
782
783 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
784 return 0;
785}
786
787/**
788 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
789 * @ioa_cfg: ioa config struct
790 *
791 * Return value:
792 * 0 on success / -EIO on failure
793 **/
794static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
795{
796 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
797
798 if (pcix_cmd_reg) {
799 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
800 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
801 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
802 return -EIO;
803 }
1da177e4
LT
804 }
805
806 return 0;
807}
808
35a39691 809/**
f646f325 810 * __ipr_sata_eh_done - done function for aborted SATA commands
35a39691
BK
811 * @ipr_cmd: ipr command struct
812 *
813 * This function is invoked for ops generated to SATA
814 * devices which are being aborted.
815 *
816 * Return value:
817 * none
818 **/
f646f325 819static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
35a39691 820{
35a39691
BK
821 struct ata_queued_cmd *qc = ipr_cmd->qc;
822 struct ipr_sata_port *sata_port = qc->ap->private_data;
823
824 qc->err_mask |= AC_ERR_OTHER;
825 sata_port->ioasa.status |= ATA_BUSY;
35a39691 826 ata_qc_complete(qc);
66a0d59c
BK
827 if (ipr_cmd->eh_comp)
828 complete(ipr_cmd->eh_comp);
829 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
35a39691
BK
830}
831
1da177e4 832/**
f646f325
BK
833 * ipr_sata_eh_done - done function for aborted SATA commands
834 * @ipr_cmd: ipr command struct
835 *
836 * This function is invoked for ops generated to SATA
837 * devices which are being aborted.
838 *
839 * Return value:
840 * none
841 **/
842static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
843{
844 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
845 unsigned long hrrq_flags;
846
847 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
848 __ipr_sata_eh_done(ipr_cmd);
849 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
850}
851
852/**
853 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
1da177e4
LT
854 * @ipr_cmd: ipr command struct
855 *
856 * This function is invoked by the interrupt handler for
857 * ops generated by the SCSI mid-layer which are being aborted.
858 *
859 * Return value:
860 * none
861 **/
f646f325 862static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
1da177e4 863{
1da177e4
LT
864 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
865
866 scsi_cmd->result |= (DID_ERROR << 16);
867
63015bc9 868 scsi_dma_unmap(ipr_cmd->scsi_cmd);
acd3c42d 869 scsi_done(scsi_cmd);
6cdb0817
BK
870 if (ipr_cmd->eh_comp)
871 complete(ipr_cmd->eh_comp);
05a6538a 872 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
873}
874
f646f325
BK
875/**
876 * ipr_scsi_eh_done - mid-layer done function for aborted ops
877 * @ipr_cmd: ipr command struct
878 *
879 * This function is invoked by the interrupt handler for
880 * ops generated by the SCSI mid-layer which are being aborted.
881 *
882 * Return value:
883 * none
884 **/
885static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
886{
887 unsigned long hrrq_flags;
888 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
889
890 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
891 __ipr_scsi_eh_done(ipr_cmd);
892 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
893}
894
1da177e4
LT
895/**
896 * ipr_fail_all_ops - Fails all outstanding ops.
897 * @ioa_cfg: ioa config struct
898 *
899 * This function fails all outstanding ops.
900 *
901 * Return value:
902 * none
903 **/
904static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
905{
906 struct ipr_cmnd *ipr_cmd, *temp;
05a6538a 907 struct ipr_hrr_queue *hrrq;
1da177e4
LT
908
909 ENTER;
05a6538a 910 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 911 spin_lock(&hrrq->_lock);
05a6538a 912 list_for_each_entry_safe(ipr_cmd,
913 temp, &hrrq->hrrq_pending_q, queue) {
914 list_del(&ipr_cmd->queue);
1da177e4 915
05a6538a 916 ipr_cmd->s.ioasa.hdr.ioasc =
917 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
918 ipr_cmd->s.ioasa.hdr.ilid =
919 cpu_to_be32(IPR_DRIVER_ILID);
1da177e4 920
05a6538a 921 if (ipr_cmd->scsi_cmd)
f646f325 922 ipr_cmd->done = __ipr_scsi_eh_done;
05a6538a 923 else if (ipr_cmd->qc)
f646f325 924 ipr_cmd->done = __ipr_sata_eh_done;
1da177e4 925
05a6538a 926 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
927 IPR_IOASC_IOA_WAS_RESET);
928 del_timer(&ipr_cmd->timer);
929 ipr_cmd->done(ipr_cmd);
930 }
56d6aa33 931 spin_unlock(&hrrq->_lock);
1da177e4 932 }
1da177e4
LT
933 LEAVE;
934}
935
a32c055f
WB
936/**
937 * ipr_send_command - Send driver initiated requests.
938 * @ipr_cmd: ipr command struct
939 *
940 * This function sends a command to the adapter using the correct write call.
941 * In the case of sis64, calculate the ioarcb size required. Then or in the
942 * appropriate bits.
943 *
944 * Return value:
945 * none
946 **/
947static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
948{
949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
950 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
951
952 if (ioa_cfg->sis64) {
953 /* The default size is 256 bytes */
954 send_dma_addr |= 0x1;
955
956 /* If the number of ioadls * size of ioadl > 128 bytes,
957 then use a 512 byte ioarcb */
958 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
959 send_dma_addr |= 0x4;
960 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
961 } else
962 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
963}
964
1da177e4
LT
965/**
966 * ipr_do_req - Send driver initiated requests.
967 * @ipr_cmd: ipr command struct
968 * @done: done function
969 * @timeout_func: timeout function
970 * @timeout: timeout value
971 *
972 * This function sends the specified command to the adapter with the
973 * timeout given. The done function is invoked on command completion.
974 *
975 * Return value:
976 * none
977 **/
978static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
979 void (*done) (struct ipr_cmnd *),
738c6ec5 980 void (*timeout_func) (struct timer_list *), u32 timeout)
1da177e4 981{
05a6538a 982 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
983
984 ipr_cmd->done = done;
985
1da177e4 986 ipr_cmd->timer.expires = jiffies + timeout;
841b86f3 987 ipr_cmd->timer.function = timeout_func;
1da177e4
LT
988
989 add_timer(&ipr_cmd->timer);
990
991 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
992
a32c055f 993 ipr_send_command(ipr_cmd);
1da177e4
LT
994}
995
996/**
997 * ipr_internal_cmd_done - Op done function for an internally generated op.
998 * @ipr_cmd: ipr command struct
999 *
1000 * This function is the op done function for an internally generated,
1001 * blocking op. It simply wakes the sleeping thread.
1002 *
1003 * Return value:
1004 * none
1005 **/
1006static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1007{
1008 if (ipr_cmd->sibling)
1009 ipr_cmd->sibling = NULL;
1010 else
1011 complete(&ipr_cmd->completion);
1012}
1013
a32c055f
WB
1014/**
1015 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1016 * @ipr_cmd: ipr command struct
1017 * @dma_addr: dma address
1018 * @len: transfer length
1019 * @flags: ioadl flag value
1020 *
1021 * This function initializes an ioadl in the case where there is only a single
1022 * descriptor.
1023 *
1024 * Return value:
1025 * nothing
1026 **/
1027static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1028 u32 len, int flags)
1029{
1030 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1031 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1032
1033 ipr_cmd->dma_use_sg = 1;
1034
1035 if (ipr_cmd->ioa_cfg->sis64) {
1036 ioadl64->flags = cpu_to_be32(flags);
1037 ioadl64->data_len = cpu_to_be32(len);
1038 ioadl64->address = cpu_to_be64(dma_addr);
1039
1040 ipr_cmd->ioarcb.ioadl_len =
1041 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1042 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1043 } else {
1044 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1045 ioadl->address = cpu_to_be32(dma_addr);
1046
1047 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1048 ipr_cmd->ioarcb.read_ioadl_len =
1049 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1050 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1051 } else {
1052 ipr_cmd->ioarcb.ioadl_len =
1053 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1054 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1055 }
1056 }
1057}
1058
1da177e4
LT
1059/**
1060 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1061 * @ipr_cmd: ipr command struct
1062 * @timeout_func: function to invoke if command times out
1063 * @timeout: timeout
1064 *
1065 * Return value:
1066 * none
1067 **/
1068static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
738c6ec5 1069 void (*timeout_func) (struct timer_list *),
1da177e4
LT
1070 u32 timeout)
1071{
1072 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1073
1074 init_completion(&ipr_cmd->completion);
1075 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1076
1077 spin_unlock_irq(ioa_cfg->host->host_lock);
1078 wait_for_completion(&ipr_cmd->completion);
1079 spin_lock_irq(ioa_cfg->host->host_lock);
1080}
1081
05a6538a 1082static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1083{
3f1c0581
BK
1084 unsigned int hrrq;
1085
05a6538a 1086 if (ioa_cfg->hrrq_num == 1)
3f1c0581
BK
1087 hrrq = 0;
1088 else {
1089 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1090 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1091 }
1092 return hrrq;
05a6538a 1093}
1094
1da177e4
LT
1095/**
1096 * ipr_send_hcam - Send an HCAM to the adapter.
1097 * @ioa_cfg: ioa config struct
1098 * @type: HCAM type
1099 * @hostrcb: hostrcb struct
1100 *
1101 * This function will send a Host Controlled Async command to the adapter.
1102 * If HCAMs are currently not allowed to be issued to the adapter, it will
1103 * place the hostrcb on the free queue.
1104 *
1105 * Return value:
1106 * none
1107 **/
1108static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1109 struct ipr_hostrcb *hostrcb)
1110{
1111 struct ipr_cmnd *ipr_cmd;
1112 struct ipr_ioarcb *ioarcb;
1113
56d6aa33 1114 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1da177e4 1115 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
05a6538a 1116 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
1117 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1118
1119 ipr_cmd->u.hostrcb = hostrcb;
1120 ioarcb = &ipr_cmd->ioarcb;
1121
1122 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1123 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1124 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1125 ioarcb->cmd_pkt.cdb[1] = type;
1126 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1127 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1128
a32c055f
WB
1129 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1130 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
1131
1132 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1133 ipr_cmd->done = ipr_process_ccn;
1134 else
1135 ipr_cmd->done = ipr_process_error;
1136
1137 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1138
a32c055f 1139 ipr_send_command(ipr_cmd);
1da177e4
LT
1140 } else {
1141 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1142 }
1143}
1144
3e7ebdfa
WB
1145/**
1146 * ipr_update_ata_class - Update the ata class in the resource entry
1147 * @res: resource entry struct
1148 * @proto: cfgte device bus protocol value
1149 *
1150 * Return value:
1151 * none
1152 **/
1153static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1154{
203fa3fe 1155 switch (proto) {
3e7ebdfa
WB
1156 case IPR_PROTO_SATA:
1157 case IPR_PROTO_SAS_STP:
1158 res->ata_class = ATA_DEV_ATA;
1159 break;
1160 case IPR_PROTO_SATA_ATAPI:
1161 case IPR_PROTO_SAS_STP_ATAPI:
1162 res->ata_class = ATA_DEV_ATAPI;
1163 break;
1164 default:
1165 res->ata_class = ATA_DEV_UNKNOWN;
1166 break;
4a0b746f 1167 }
3e7ebdfa
WB
1168}
1169
1da177e4
LT
1170/**
1171 * ipr_init_res_entry - Initialize a resource entry struct.
1172 * @res: resource entry struct
3e7ebdfa 1173 * @cfgtew: config table entry wrapper struct
1da177e4
LT
1174 *
1175 * Return value:
1176 * none
1177 **/
3e7ebdfa
WB
1178static void ipr_init_res_entry(struct ipr_resource_entry *res,
1179 struct ipr_config_table_entry_wrapper *cfgtew)
1da177e4 1180{
3e7ebdfa
WB
1181 int found = 0;
1182 unsigned int proto;
1183 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1184 struct ipr_resource_entry *gscsi_res = NULL;
1185
ee0a90fa 1186 res->needs_sync_complete = 0;
1da177e4
LT
1187 res->in_erp = 0;
1188 res->add_to_ml = 0;
1189 res->del_from_ml = 0;
1190 res->resetting_device = 0;
0b1f8d44 1191 res->reset_occurred = 0;
1da177e4 1192 res->sdev = NULL;
35a39691 1193 res->sata_port = NULL;
3e7ebdfa
WB
1194
1195 if (ioa_cfg->sis64) {
1196 proto = cfgtew->u.cfgte64->proto;
359d96e7
BK
1197 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1198 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
3e7ebdfa 1199 res->qmodel = IPR_QUEUEING_MODEL64(res);
438b0331 1200 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1201
1202 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1203 sizeof(res->res_path));
1204
1205 res->bus = 0;
0cb992ed
WB
1206 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1207 sizeof(res->dev_lun.scsi_lun));
3e7ebdfa
WB
1208 res->lun = scsilun_to_int(&res->dev_lun);
1209
1210 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1211 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1212 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1213 found = 1;
1214 res->target = gscsi_res->target;
1215 break;
1216 }
1217 }
1218 if (!found) {
1219 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1220 ioa_cfg->max_devs_supported);
1221 set_bit(res->target, ioa_cfg->target_ids);
1222 }
3e7ebdfa
WB
1223 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1224 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1225 res->target = 0;
1226 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1227 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1228 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1229 ioa_cfg->max_devs_supported);
1230 set_bit(res->target, ioa_cfg->array_ids);
1231 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1232 res->bus = IPR_VSET_VIRTUAL_BUS;
1233 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1234 ioa_cfg->max_devs_supported);
1235 set_bit(res->target, ioa_cfg->vset_ids);
1236 } else {
1237 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1238 ioa_cfg->max_devs_supported);
1239 set_bit(res->target, ioa_cfg->target_ids);
1240 }
1241 } else {
1242 proto = cfgtew->u.cfgte->proto;
1243 res->qmodel = IPR_QUEUEING_MODEL(res);
1244 res->flags = cfgtew->u.cfgte->flags;
1245 if (res->flags & IPR_IS_IOA_RESOURCE)
1246 res->type = IPR_RES_TYPE_IOAFP;
1247 else
1248 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1249
1250 res->bus = cfgtew->u.cfgte->res_addr.bus;
1251 res->target = cfgtew->u.cfgte->res_addr.target;
1252 res->lun = cfgtew->u.cfgte->res_addr.lun;
46d74563 1253 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
3e7ebdfa
WB
1254 }
1255
1256 ipr_update_ata_class(res, proto);
1257}
1258
1259/**
1260 * ipr_is_same_device - Determine if two devices are the same.
1261 * @res: resource entry struct
1262 * @cfgtew: config table entry wrapper struct
1263 *
1264 * Return value:
1265 * 1 if the devices are the same / 0 otherwise
1266 **/
1267static int ipr_is_same_device(struct ipr_resource_entry *res,
1268 struct ipr_config_table_entry_wrapper *cfgtew)
1269{
1270 if (res->ioa_cfg->sis64) {
1271 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1272 sizeof(cfgtew->u.cfgte64->dev_id)) &&
0cb992ed 1273 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
3e7ebdfa
WB
1274 sizeof(cfgtew->u.cfgte64->lun))) {
1275 return 1;
1276 }
1277 } else {
1278 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1279 res->target == cfgtew->u.cfgte->res_addr.target &&
1280 res->lun == cfgtew->u.cfgte->res_addr.lun)
1281 return 1;
1282 }
1283
1284 return 0;
1285}
1286
1287/**
b3b3b407 1288 * __ipr_format_res_path - Format the resource path for printing.
3e7ebdfa 1289 * @res_path: resource path
a96099e2 1290 * @buffer: buffer
b3b3b407 1291 * @len: length of buffer provided
3e7ebdfa
WB
1292 *
1293 * Return value:
1294 * pointer to buffer
1295 **/
b3b3b407 1296static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
3e7ebdfa
WB
1297{
1298 int i;
5adcbeb3 1299 char *p = buffer;
3e7ebdfa 1300
46d74563 1301 *p = '\0';
6f0cf424 1302 p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
7df47cdf 1303 for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++)
6f0cf424 1304 p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
3e7ebdfa
WB
1305
1306 return buffer;
1307}
1308
b3b3b407
BK
1309/**
1310 * ipr_format_res_path - Format the resource path for printing.
1311 * @ioa_cfg: ioa config struct
1312 * @res_path: resource path
a96099e2 1313 * @buffer: buffer
b3b3b407
BK
1314 * @len: length of buffer provided
1315 *
1316 * Return value:
1317 * pointer to buffer
1318 **/
1319static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1320 u8 *res_path, char *buffer, int len)
1321{
1322 char *p = buffer;
1323
1324 *p = '\0';
6f0cf424 1325 p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
7df47cdf 1326 __ipr_format_res_path(res_path, p, len - (p - buffer));
b3b3b407
BK
1327 return buffer;
1328}
1329
3e7ebdfa
WB
1330/**
1331 * ipr_update_res_entry - Update the resource entry.
1332 * @res: resource entry struct
1333 * @cfgtew: config table entry wrapper struct
1334 *
1335 * Return value:
1336 * none
1337 **/
1338static void ipr_update_res_entry(struct ipr_resource_entry *res,
1339 struct ipr_config_table_entry_wrapper *cfgtew)
1340{
1341 char buffer[IPR_MAX_RES_PATH_LENGTH];
1342 unsigned int proto;
1343 int new_path = 0;
1344
1345 if (res->ioa_cfg->sis64) {
359d96e7
BK
1346 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1347 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
75576bb9 1348 res->type = cfgtew->u.cfgte64->res_type;
3e7ebdfa
WB
1349
1350 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1351 sizeof(struct ipr_std_inq_data));
1352
1353 res->qmodel = IPR_QUEUEING_MODEL64(res);
1354 proto = cfgtew->u.cfgte64->proto;
1355 res->res_handle = cfgtew->u.cfgte64->res_handle;
1356 res->dev_id = cfgtew->u.cfgte64->dev_id;
1357
1358 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1359 sizeof(res->dev_lun.scsi_lun));
1360
1361 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1362 sizeof(res->res_path))) {
1363 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1364 sizeof(res->res_path));
1365 new_path = 1;
1366 }
1367
1368 if (res->sdev && new_path)
1369 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
b3b3b407
BK
1370 ipr_format_res_path(res->ioa_cfg,
1371 res->res_path, buffer, sizeof(buffer)));
3e7ebdfa
WB
1372 } else {
1373 res->flags = cfgtew->u.cfgte->flags;
1374 if (res->flags & IPR_IS_IOA_RESOURCE)
1375 res->type = IPR_RES_TYPE_IOAFP;
1376 else
1377 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1378
1379 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1380 sizeof(struct ipr_std_inq_data));
1381
1382 res->qmodel = IPR_QUEUEING_MODEL(res);
1383 proto = cfgtew->u.cfgte->proto;
1384 res->res_handle = cfgtew->u.cfgte->res_handle;
1385 }
1386
1387 ipr_update_ata_class(res, proto);
1388}
1389
1390/**
1391 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1392 * for the resource.
1393 * @res: resource entry struct
3e7ebdfa
WB
1394 *
1395 * Return value:
1396 * none
1397 **/
1398static void ipr_clear_res_target(struct ipr_resource_entry *res)
1399{
1400 struct ipr_resource_entry *gscsi_res = NULL;
1401 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1402
1403 if (!ioa_cfg->sis64)
1404 return;
1405
1406 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1407 clear_bit(res->target, ioa_cfg->array_ids);
1408 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1409 clear_bit(res->target, ioa_cfg->vset_ids);
1410 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1411 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1412 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1413 return;
1414 clear_bit(res->target, ioa_cfg->target_ids);
1415
1416 } else if (res->bus == 0)
1417 clear_bit(res->target, ioa_cfg->target_ids);
1da177e4
LT
1418}
1419
1420/**
1421 * ipr_handle_config_change - Handle a config change from the adapter
1422 * @ioa_cfg: ioa config struct
1423 * @hostrcb: hostrcb
1424 *
1425 * Return value:
1426 * none
1427 **/
1428static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
3e7ebdfa 1429 struct ipr_hostrcb *hostrcb)
1da177e4
LT
1430{
1431 struct ipr_resource_entry *res = NULL;
3e7ebdfa
WB
1432 struct ipr_config_table_entry_wrapper cfgtew;
1433 __be32 cc_res_handle;
1434
1da177e4
LT
1435 u32 is_ndn = 1;
1436
3e7ebdfa
WB
1437 if (ioa_cfg->sis64) {
1438 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1439 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1440 } else {
1441 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1442 cc_res_handle = cfgtew.u.cfgte->res_handle;
1443 }
1da177e4
LT
1444
1445 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 1446 if (res->res_handle == cc_res_handle) {
1da177e4
LT
1447 is_ndn = 0;
1448 break;
1449 }
1450 }
1451
1452 if (is_ndn) {
1453 if (list_empty(&ioa_cfg->free_res_q)) {
1454 ipr_send_hcam(ioa_cfg,
1455 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1456 hostrcb);
1457 return;
1458 }
1459
1460 res = list_entry(ioa_cfg->free_res_q.next,
1461 struct ipr_resource_entry, queue);
1462
1463 list_del(&res->queue);
3e7ebdfa 1464 ipr_init_res_entry(res, &cfgtew);
1da177e4
LT
1465 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1466 }
1467
3e7ebdfa 1468 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
1469
1470 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1471 if (res->sdev) {
1da177e4 1472 res->del_from_ml = 1;
3e7ebdfa 1473 res->res_handle = IPR_INVALID_RES_HANDLE;
f688f96d 1474 schedule_work(&ioa_cfg->work_q);
3e7ebdfa
WB
1475 } else {
1476 ipr_clear_res_target(res);
1da177e4 1477 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3e7ebdfa 1478 }
5767a1c4 1479 } else if (!res->sdev || res->del_from_ml) {
1da177e4 1480 res->add_to_ml = 1;
f688f96d 1481 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
1482 }
1483
1484 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1485}
1486
1487/**
1488 * ipr_process_ccn - Op done function for a CCN.
1489 * @ipr_cmd: ipr command struct
1490 *
1491 * This function is the op done function for a configuration
1492 * change notification host controlled async from the adapter.
1493 *
1494 * Return value:
1495 * none
1496 **/
1497static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1498{
1499 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1500 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 1501 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4 1502
afc3f83c 1503 list_del_init(&hostrcb->queue);
05a6538a 1504 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
1505
1506 if (ioasc) {
4fdd7c7a
BK
1507 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1508 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1da177e4
LT
1509 dev_err(&ioa_cfg->pdev->dev,
1510 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1511
1512 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1513 } else {
1514 ipr_handle_config_change(ioa_cfg, hostrcb);
1515 }
1516}
1517
8cf093e2
BK
1518/**
1519 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1520 * @i: index into buffer
1521 * @buf: string to modify
1522 *
1523 * This function will strip all trailing whitespace, pad the end
1524 * of the string with a single space, and NULL terminate the string.
1525 *
1526 * Return value:
1527 * new length of string
1528 **/
1529static int strip_and_pad_whitespace(int i, char *buf)
1530{
1531 while (i && buf[i] == ' ')
1532 i--;
1533 buf[i+1] = ' ';
1534 buf[i+2] = '\0';
1535 return i + 2;
1536}
1537
1538/**
1539 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1540 * @prefix: string to print at start of printk
1541 * @hostrcb: hostrcb pointer
1542 * @vpd: vendor/product id/sn struct
1543 *
1544 * Return value:
1545 * none
1546 **/
1547static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1548 struct ipr_vpd *vpd)
1549{
1550 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1551 int i = 0;
1552
1553 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1554 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1555
1556 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1557 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1558
1559 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1560 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1561
1562 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1563}
1564
1da177e4
LT
1565/**
1566 * ipr_log_vpd - Log the passed VPD to the error log.
cfc32139 1567 * @vpd: vendor/product id/sn struct
1da177e4
LT
1568 *
1569 * Return value:
1570 * none
1571 **/
cfc32139 1572static void ipr_log_vpd(struct ipr_vpd *vpd)
1da177e4
LT
1573{
1574 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1575 + IPR_SERIAL_NUM_LEN];
1576
cfc32139 1577 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1578 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1da177e4
LT
1579 IPR_PROD_ID_LEN);
1580 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1581 ipr_err("Vendor/Product ID: %s\n", buffer);
1582
cfc32139 1583 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1da177e4
LT
1584 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1585 ipr_err(" Serial Number: %s\n", buffer);
1586}
1587
8cf093e2
BK
1588/**
1589 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1590 * @prefix: string to print at start of printk
1591 * @hostrcb: hostrcb pointer
1592 * @vpd: vendor/product id/sn/wwn struct
1593 *
1594 * Return value:
1595 * none
1596 **/
1597static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1598 struct ipr_ext_vpd *vpd)
1599{
1600 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1601 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1602 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1603}
1604
ee0f05b8 1605/**
1606 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1607 * @vpd: vendor/product id/sn/wwn struct
1608 *
1609 * Return value:
1610 * none
1611 **/
1612static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1613{
1614 ipr_log_vpd(&vpd->vpd);
1615 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1616 be32_to_cpu(vpd->wwid[1]));
1617}
1618
1619/**
1620 * ipr_log_enhanced_cache_error - Log a cache error.
1621 * @ioa_cfg: ioa config struct
1622 * @hostrcb: hostrcb struct
1623 *
1624 * Return value:
1625 * none
1626 **/
1627static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1628 struct ipr_hostrcb *hostrcb)
1629{
4565e370
WB
1630 struct ipr_hostrcb_type_12_error *error;
1631
1632 if (ioa_cfg->sis64)
1633 error = &hostrcb->hcam.u.error64.u.type_12_error;
1634 else
1635 error = &hostrcb->hcam.u.error.u.type_12_error;
ee0f05b8 1636
1637 ipr_err("-----Current Configuration-----\n");
1638 ipr_err("Cache Directory Card Information:\n");
1639 ipr_log_ext_vpd(&error->ioa_vpd);
1640 ipr_err("Adapter Card Information:\n");
1641 ipr_log_ext_vpd(&error->cfc_vpd);
1642
1643 ipr_err("-----Expected Configuration-----\n");
1644 ipr_err("Cache Directory Card Information:\n");
1645 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1646 ipr_err("Adapter Card Information:\n");
1647 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1648
1649 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1650 be32_to_cpu(error->ioa_data[0]),
1651 be32_to_cpu(error->ioa_data[1]),
1652 be32_to_cpu(error->ioa_data[2]));
1653}
1654
1da177e4
LT
1655/**
1656 * ipr_log_cache_error - Log a cache error.
1657 * @ioa_cfg: ioa config struct
1658 * @hostrcb: hostrcb struct
1659 *
1660 * Return value:
1661 * none
1662 **/
1663static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1664 struct ipr_hostrcb *hostrcb)
1665{
1666 struct ipr_hostrcb_type_02_error *error =
1667 &hostrcb->hcam.u.error.u.type_02_error;
1668
1669 ipr_err("-----Current Configuration-----\n");
1670 ipr_err("Cache Directory Card Information:\n");
cfc32139 1671 ipr_log_vpd(&error->ioa_vpd);
1da177e4 1672 ipr_err("Adapter Card Information:\n");
cfc32139 1673 ipr_log_vpd(&error->cfc_vpd);
1da177e4
LT
1674
1675 ipr_err("-----Expected Configuration-----\n");
1676 ipr_err("Cache Directory Card Information:\n");
cfc32139 1677 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1da177e4 1678 ipr_err("Adapter Card Information:\n");
cfc32139 1679 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1da177e4
LT
1680
1681 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1682 be32_to_cpu(error->ioa_data[0]),
1683 be32_to_cpu(error->ioa_data[1]),
1684 be32_to_cpu(error->ioa_data[2]));
1685}
1686
ee0f05b8 1687/**
1688 * ipr_log_enhanced_config_error - Log a configuration error.
1689 * @ioa_cfg: ioa config struct
1690 * @hostrcb: hostrcb struct
1691 *
1692 * Return value:
1693 * none
1694 **/
1695static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1696 struct ipr_hostrcb *hostrcb)
1697{
1698 int errors_logged, i;
1699 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1700 struct ipr_hostrcb_type_13_error *error;
1701
1702 error = &hostrcb->hcam.u.error.u.type_13_error;
1703 errors_logged = be32_to_cpu(error->errors_logged);
1704
1705 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1706 be32_to_cpu(error->errors_detected), errors_logged);
1707
1708 dev_entry = error->dev;
1709
1710 for (i = 0; i < errors_logged; i++, dev_entry++) {
1711 ipr_err_separator;
1712
1713 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1714 ipr_log_ext_vpd(&dev_entry->vpd);
1715
1716 ipr_err("-----New Device Information-----\n");
1717 ipr_log_ext_vpd(&dev_entry->new_vpd);
1718
1719 ipr_err("Cache Directory Card Information:\n");
1720 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1721
1722 ipr_err("Adapter Card Information:\n");
1723 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1724 }
1725}
1726
4565e370
WB
1727/**
1728 * ipr_log_sis64_config_error - Log a device error.
1729 * @ioa_cfg: ioa config struct
1730 * @hostrcb: hostrcb struct
1731 *
1732 * Return value:
1733 * none
1734 **/
1735static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1736 struct ipr_hostrcb *hostrcb)
1737{
1738 int errors_logged, i;
1739 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1740 struct ipr_hostrcb_type_23_error *error;
1741 char buffer[IPR_MAX_RES_PATH_LENGTH];
1742
1743 error = &hostrcb->hcam.u.error64.u.type_23_error;
1744 errors_logged = be32_to_cpu(error->errors_logged);
1745
1746 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1747 be32_to_cpu(error->errors_detected), errors_logged);
1748
1749 dev_entry = error->dev;
1750
1751 for (i = 0; i < errors_logged; i++, dev_entry++) {
1752 ipr_err_separator;
1753
1754 ipr_err("Device %d : %s", i + 1,
b3b3b407
BK
1755 __ipr_format_res_path(dev_entry->res_path,
1756 buffer, sizeof(buffer)));
4565e370
WB
1757 ipr_log_ext_vpd(&dev_entry->vpd);
1758
1759 ipr_err("-----New Device Information-----\n");
1760 ipr_log_ext_vpd(&dev_entry->new_vpd);
1761
1762 ipr_err("Cache Directory Card Information:\n");
1763 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1764
1765 ipr_err("Adapter Card Information:\n");
1766 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1767 }
1768}
1769
1da177e4
LT
1770/**
1771 * ipr_log_config_error - Log a configuration error.
1772 * @ioa_cfg: ioa config struct
1773 * @hostrcb: hostrcb struct
1774 *
1775 * Return value:
1776 * none
1777 **/
1778static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1779 struct ipr_hostrcb *hostrcb)
1780{
1781 int errors_logged, i;
1782 struct ipr_hostrcb_device_data_entry *dev_entry;
1783 struct ipr_hostrcb_type_03_error *error;
1784
1785 error = &hostrcb->hcam.u.error.u.type_03_error;
1786 errors_logged = be32_to_cpu(error->errors_logged);
1787
1788 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1789 be32_to_cpu(error->errors_detected), errors_logged);
1790
cfc32139 1791 dev_entry = error->dev;
1da177e4
LT
1792
1793 for (i = 0; i < errors_logged; i++, dev_entry++) {
1794 ipr_err_separator;
1795
fa15b1f6 1796 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
cfc32139 1797 ipr_log_vpd(&dev_entry->vpd);
1da177e4
LT
1798
1799 ipr_err("-----New Device Information-----\n");
cfc32139 1800 ipr_log_vpd(&dev_entry->new_vpd);
1da177e4
LT
1801
1802 ipr_err("Cache Directory Card Information:\n");
cfc32139 1803 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1da177e4
LT
1804
1805 ipr_err("Adapter Card Information:\n");
cfc32139 1806 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1da177e4
LT
1807
1808 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1809 be32_to_cpu(dev_entry->ioa_data[0]),
1810 be32_to_cpu(dev_entry->ioa_data[1]),
1811 be32_to_cpu(dev_entry->ioa_data[2]),
1812 be32_to_cpu(dev_entry->ioa_data[3]),
1813 be32_to_cpu(dev_entry->ioa_data[4]));
1814 }
1815}
1816
ee0f05b8 1817/**
1818 * ipr_log_enhanced_array_error - Log an array configuration error.
1819 * @ioa_cfg: ioa config struct
1820 * @hostrcb: hostrcb struct
1821 *
1822 * Return value:
1823 * none
1824 **/
1825static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1826 struct ipr_hostrcb *hostrcb)
1827{
1828 int i, num_entries;
1829 struct ipr_hostrcb_type_14_error *error;
1830 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1831 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1832
1833 error = &hostrcb->hcam.u.error.u.type_14_error;
1834
1835 ipr_err_separator;
1836
1837 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1838 error->protection_level,
1839 ioa_cfg->host->host_no,
1840 error->last_func_vset_res_addr.bus,
1841 error->last_func_vset_res_addr.target,
1842 error->last_func_vset_res_addr.lun);
1843
1844 ipr_err_separator;
1845
1846 array_entry = error->array_member;
1847 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
7262026f 1848 ARRAY_SIZE(error->array_member));
ee0f05b8 1849
1850 for (i = 0; i < num_entries; i++, array_entry++) {
1851 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1852 continue;
1853
1854 if (be32_to_cpu(error->exposed_mode_adn) == i)
1855 ipr_err("Exposed Array Member %d:\n", i);
1856 else
1857 ipr_err("Array Member %d:\n", i);
1858
1859 ipr_log_ext_vpd(&array_entry->vpd);
1860 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1861 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1862 "Expected Location");
1863
1864 ipr_err_separator;
1865 }
1866}
1867
1da177e4
LT
1868/**
1869 * ipr_log_array_error - Log an array configuration error.
1870 * @ioa_cfg: ioa config struct
1871 * @hostrcb: hostrcb struct
1872 *
1873 * Return value:
1874 * none
1875 **/
1876static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1877 struct ipr_hostrcb *hostrcb)
1878{
1879 int i;
1880 struct ipr_hostrcb_type_04_error *error;
1881 struct ipr_hostrcb_array_data_entry *array_entry;
1882 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1883
1884 error = &hostrcb->hcam.u.error.u.type_04_error;
1885
1886 ipr_err_separator;
1887
1888 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1889 error->protection_level,
1890 ioa_cfg->host->host_no,
1891 error->last_func_vset_res_addr.bus,
1892 error->last_func_vset_res_addr.target,
1893 error->last_func_vset_res_addr.lun);
1894
1895 ipr_err_separator;
1896
1897 array_entry = error->array_member;
1898
1899 for (i = 0; i < 18; i++) {
cfc32139 1900 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1da177e4
LT
1901 continue;
1902
fa15b1f6 1903 if (be32_to_cpu(error->exposed_mode_adn) == i)
1da177e4 1904 ipr_err("Exposed Array Member %d:\n", i);
fa15b1f6 1905 else
1da177e4 1906 ipr_err("Array Member %d:\n", i);
1da177e4 1907
cfc32139 1908 ipr_log_vpd(&array_entry->vpd);
1da177e4 1909
fa15b1f6 1910 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1911 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1912 "Expected Location");
1da177e4
LT
1913
1914 ipr_err_separator;
1915
1916 if (i == 9)
1917 array_entry = error->array_member2;
1918 else
1919 array_entry++;
1920 }
1921}
1922
1923/**
b0df54bb 1924 * ipr_log_hex_data - Log additional hex IOA error data.
ac719aba 1925 * @ioa_cfg: ioa config struct
b0df54bb 1926 * @data: IOA error data
1927 * @len: data length
1da177e4
LT
1928 *
1929 * Return value:
1930 * none
1931 **/
359d96e7 1932static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1da177e4
LT
1933{
1934 int i;
1da177e4 1935
b0df54bb 1936 if (len == 0)
1da177e4
LT
1937 return;
1938
ac719aba
BK
1939 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1940 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1941
b0df54bb 1942 for (i = 0; i < len / 4; i += 4) {
1da177e4 1943 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
b0df54bb 1944 be32_to_cpu(data[i]),
1945 be32_to_cpu(data[i+1]),
1946 be32_to_cpu(data[i+2]),
1947 be32_to_cpu(data[i+3]));
1da177e4
LT
1948 }
1949}
1950
ee0f05b8 1951/**
1952 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1953 * @ioa_cfg: ioa config struct
1954 * @hostrcb: hostrcb struct
1955 *
1956 * Return value:
1957 * none
1958 **/
1959static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1960 struct ipr_hostrcb *hostrcb)
1961{
1962 struct ipr_hostrcb_type_17_error *error;
1963
4565e370
WB
1964 if (ioa_cfg->sis64)
1965 error = &hostrcb->hcam.u.error64.u.type_17_error;
1966 else
1967 error = &hostrcb->hcam.u.error.u.type_17_error;
1968
ee0f05b8 1969 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1970 strim(error->failure_reason);
ee0f05b8 1971
8cf093e2
BK
1972 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1973 be32_to_cpu(hostrcb->hcam.u.error.prc));
1974 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 1975 ipr_log_hex_data(ioa_cfg, error->data,
ee0f05b8 1976 be32_to_cpu(hostrcb->hcam.length) -
1977 (offsetof(struct ipr_hostrcb_error, u) +
1978 offsetof(struct ipr_hostrcb_type_17_error, data)));
1979}
1980
b0df54bb 1981/**
1982 * ipr_log_dual_ioa_error - Log a dual adapter error.
1983 * @ioa_cfg: ioa config struct
1984 * @hostrcb: hostrcb struct
1985 *
1986 * Return value:
1987 * none
1988 **/
1989static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1990 struct ipr_hostrcb *hostrcb)
1991{
1992 struct ipr_hostrcb_type_07_error *error;
1993
1994 error = &hostrcb->hcam.u.error.u.type_07_error;
1995 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
ca54cb8c 1996 strim(error->failure_reason);
b0df54bb 1997
8cf093e2
BK
1998 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1999 be32_to_cpu(hostrcb->hcam.u.error.prc));
2000 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
ac719aba 2001 ipr_log_hex_data(ioa_cfg, error->data,
b0df54bb 2002 be32_to_cpu(hostrcb->hcam.length) -
2003 (offsetof(struct ipr_hostrcb_error, u) +
2004 offsetof(struct ipr_hostrcb_type_07_error, data)));
2005}
2006
49dc6a18
BK
2007static const struct {
2008 u8 active;
2009 char *desc;
2010} path_active_desc[] = {
2011 { IPR_PATH_NO_INFO, "Path" },
2012 { IPR_PATH_ACTIVE, "Active path" },
2013 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2014};
2015
2016static const struct {
2017 u8 state;
2018 char *desc;
2019} path_state_desc[] = {
2020 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2021 { IPR_PATH_HEALTHY, "is healthy" },
2022 { IPR_PATH_DEGRADED, "is degraded" },
2023 { IPR_PATH_FAILED, "is failed" }
2024};
2025
2026/**
2027 * ipr_log_fabric_path - Log a fabric path error
2028 * @hostrcb: hostrcb struct
2029 * @fabric: fabric descriptor
2030 *
2031 * Return value:
2032 * none
2033 **/
2034static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2035 struct ipr_hostrcb_fabric_desc *fabric)
2036{
2037 int i, j;
2038 u8 path_state = fabric->path_state;
2039 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2040 u8 state = path_state & IPR_PATH_STATE_MASK;
2041
2042 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2043 if (path_active_desc[i].active != active)
2044 continue;
2045
2046 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2047 if (path_state_desc[j].state != state)
2048 continue;
2049
2050 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2051 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2052 path_active_desc[i].desc, path_state_desc[j].desc,
2053 fabric->ioa_port);
2054 } else if (fabric->cascaded_expander == 0xff) {
2055 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2056 path_active_desc[i].desc, path_state_desc[j].desc,
2057 fabric->ioa_port, fabric->phy);
2058 } else if (fabric->phy == 0xff) {
2059 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2060 path_active_desc[i].desc, path_state_desc[j].desc,
2061 fabric->ioa_port, fabric->cascaded_expander);
2062 } else {
2063 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2064 path_active_desc[i].desc, path_state_desc[j].desc,
2065 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2066 }
2067 return;
2068 }
2069 }
2070
2071 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2072 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2073}
2074
4565e370
WB
2075/**
2076 * ipr_log64_fabric_path - Log a fabric path error
2077 * @hostrcb: hostrcb struct
2078 * @fabric: fabric descriptor
2079 *
2080 * Return value:
2081 * none
2082 **/
2083static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2084 struct ipr_hostrcb64_fabric_desc *fabric)
2085{
2086 int i, j;
2087 u8 path_state = fabric->path_state;
2088 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2089 u8 state = path_state & IPR_PATH_STATE_MASK;
2090 char buffer[IPR_MAX_RES_PATH_LENGTH];
2091
2092 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2093 if (path_active_desc[i].active != active)
2094 continue;
2095
2096 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2097 if (path_state_desc[j].state != state)
2098 continue;
2099
2100 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2101 path_active_desc[i].desc, path_state_desc[j].desc,
b3b3b407
BK
2102 ipr_format_res_path(hostrcb->ioa_cfg,
2103 fabric->res_path,
2104 buffer, sizeof(buffer)));
4565e370
WB
2105 return;
2106 }
2107 }
2108
2109 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
b3b3b407
BK
2110 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2111 buffer, sizeof(buffer)));
4565e370
WB
2112}
2113
49dc6a18
BK
2114static const struct {
2115 u8 type;
2116 char *desc;
2117} path_type_desc[] = {
2118 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2119 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2120 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2121 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2122};
2123
2124static const struct {
2125 u8 status;
2126 char *desc;
2127} path_status_desc[] = {
2128 { IPR_PATH_CFG_NO_PROB, "Functional" },
2129 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2130 { IPR_PATH_CFG_FAILED, "Failed" },
2131 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2132 { IPR_PATH_NOT_DETECTED, "Missing" },
2133 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2134};
2135
2136static const char *link_rate[] = {
2137 "unknown",
2138 "disabled",
2139 "phy reset problem",
2140 "spinup hold",
2141 "port selector",
2142 "unknown",
2143 "unknown",
2144 "unknown",
2145 "1.5Gbps",
2146 "3.0Gbps",
2147 "unknown",
2148 "unknown",
2149 "unknown",
2150 "unknown",
2151 "unknown",
2152 "unknown"
2153};
2154
2155/**
2156 * ipr_log_path_elem - Log a fabric path element.
2157 * @hostrcb: hostrcb struct
2158 * @cfg: fabric path element struct
2159 *
2160 * Return value:
2161 * none
2162 **/
2163static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2164 struct ipr_hostrcb_config_element *cfg)
2165{
2166 int i, j;
2167 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2168 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2169
2170 if (type == IPR_PATH_CFG_NOT_EXIST)
2171 return;
2172
2173 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2174 if (path_type_desc[i].type != type)
2175 continue;
2176
2177 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2178 if (path_status_desc[j].status != status)
2179 continue;
2180
2181 if (type == IPR_PATH_CFG_IOA_PORT) {
2182 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2183 path_status_desc[j].desc, path_type_desc[i].desc,
2184 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2185 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2186 } else {
2187 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2188 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2189 path_status_desc[j].desc, path_type_desc[i].desc,
2190 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2191 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2192 } else if (cfg->cascaded_expander == 0xff) {
2193 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2194 "WWN=%08X%08X\n", path_status_desc[j].desc,
2195 path_type_desc[i].desc, cfg->phy,
2196 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2197 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2198 } else if (cfg->phy == 0xff) {
2199 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2200 "WWN=%08X%08X\n", path_status_desc[j].desc,
2201 path_type_desc[i].desc, cfg->cascaded_expander,
2202 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2203 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2204 } else {
2205 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2206 "WWN=%08X%08X\n", path_status_desc[j].desc,
2207 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2208 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2209 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2210 }
2211 }
2212 return;
2213 }
2214 }
2215
2216 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2217 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2220}
2221
4565e370
WB
2222/**
2223 * ipr_log64_path_elem - Log a fabric path element.
2224 * @hostrcb: hostrcb struct
2225 * @cfg: fabric path element struct
2226 *
2227 * Return value:
2228 * none
2229 **/
2230static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2231 struct ipr_hostrcb64_config_element *cfg)
2232{
2233 int i, j;
2234 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2235 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2236 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2237 char buffer[IPR_MAX_RES_PATH_LENGTH];
2238
2239 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2240 return;
2241
2242 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2243 if (path_type_desc[i].type != type)
2244 continue;
2245
2246 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2247 if (path_status_desc[j].status != status)
2248 continue;
2249
2250 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2251 path_status_desc[j].desc, path_type_desc[i].desc,
b3b3b407
BK
2252 ipr_format_res_path(hostrcb->ioa_cfg,
2253 cfg->res_path, buffer, sizeof(buffer)),
2254 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2255 be32_to_cpu(cfg->wwid[0]),
2256 be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2257 return;
2258 }
2259 }
2260 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2261 "WWN=%08X%08X\n", cfg->type_status,
b3b3b407
BK
2262 ipr_format_res_path(hostrcb->ioa_cfg,
2263 cfg->res_path, buffer, sizeof(buffer)),
2264 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2265 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
4565e370
WB
2266}
2267
49dc6a18
BK
2268/**
2269 * ipr_log_fabric_error - Log a fabric error.
2270 * @ioa_cfg: ioa config struct
2271 * @hostrcb: hostrcb struct
2272 *
2273 * Return value:
2274 * none
2275 **/
2276static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2277 struct ipr_hostrcb *hostrcb)
2278{
2279 struct ipr_hostrcb_type_20_error *error;
2280 struct ipr_hostrcb_fabric_desc *fabric;
2281 struct ipr_hostrcb_config_element *cfg;
2282 int i, add_len;
2283
2284 error = &hostrcb->hcam.u.error.u.type_20_error;
2285 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2286 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2287
2288 add_len = be32_to_cpu(hostrcb->hcam.length) -
2289 (offsetof(struct ipr_hostrcb_error, u) +
2290 offsetof(struct ipr_hostrcb_type_20_error, desc));
2291
2292 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2293 ipr_log_fabric_path(hostrcb, fabric);
2294 for_each_fabric_cfg(fabric, cfg)
2295 ipr_log_path_elem(hostrcb, cfg);
2296
2297 add_len -= be16_to_cpu(fabric->length);
2298 fabric = (struct ipr_hostrcb_fabric_desc *)
2299 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2300 }
2301
359d96e7 2302 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
49dc6a18
BK
2303}
2304
4565e370
WB
2305/**
2306 * ipr_log_sis64_array_error - Log a sis64 array error.
2307 * @ioa_cfg: ioa config struct
2308 * @hostrcb: hostrcb struct
2309 *
2310 * Return value:
2311 * none
2312 **/
2313static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2314 struct ipr_hostrcb *hostrcb)
2315{
2316 int i, num_entries;
2317 struct ipr_hostrcb_type_24_error *error;
2318 struct ipr_hostrcb64_array_data_entry *array_entry;
2319 char buffer[IPR_MAX_RES_PATH_LENGTH];
2320 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2321
2322 error = &hostrcb->hcam.u.error64.u.type_24_error;
2323
2324 ipr_err_separator;
2325
2326 ipr_err("RAID %s Array Configuration: %s\n",
2327 error->protection_level,
b3b3b407
BK
2328 ipr_format_res_path(ioa_cfg, error->last_res_path,
2329 buffer, sizeof(buffer)));
4565e370
WB
2330
2331 ipr_err_separator;
2332
2333 array_entry = error->array_member;
7262026f
WB
2334 num_entries = min_t(u32, error->num_entries,
2335 ARRAY_SIZE(error->array_member));
4565e370
WB
2336
2337 for (i = 0; i < num_entries; i++, array_entry++) {
2338
2339 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2340 continue;
2341
2342 if (error->exposed_mode_adn == i)
2343 ipr_err("Exposed Array Member %d:\n", i);
2344 else
2345 ipr_err("Array Member %d:\n", i);
2346
2347 ipr_err("Array Member %d:\n", i);
2348 ipr_log_ext_vpd(&array_entry->vpd);
7262026f 2349 ipr_err("Current Location: %s\n",
b3b3b407
BK
2350 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2351 buffer, sizeof(buffer)));
7262026f 2352 ipr_err("Expected Location: %s\n",
b3b3b407
BK
2353 ipr_format_res_path(ioa_cfg,
2354 array_entry->expected_res_path,
2355 buffer, sizeof(buffer)));
4565e370
WB
2356
2357 ipr_err_separator;
2358 }
2359}
2360
2361/**
2362 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2363 * @ioa_cfg: ioa config struct
2364 * @hostrcb: hostrcb struct
2365 *
2366 * Return value:
2367 * none
2368 **/
2369static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2370 struct ipr_hostrcb *hostrcb)
2371{
2372 struct ipr_hostrcb_type_30_error *error;
2373 struct ipr_hostrcb64_fabric_desc *fabric;
2374 struct ipr_hostrcb64_config_element *cfg;
2375 int i, add_len;
2376
2377 error = &hostrcb->hcam.u.error64.u.type_30_error;
2378
2379 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2380 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2381
2382 add_len = be32_to_cpu(hostrcb->hcam.length) -
2383 (offsetof(struct ipr_hostrcb64_error, u) +
2384 offsetof(struct ipr_hostrcb_type_30_error, desc));
2385
2386 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2387 ipr_log64_fabric_path(hostrcb, fabric);
2388 for_each_fabric_cfg(fabric, cfg)
2389 ipr_log64_path_elem(hostrcb, cfg);
2390
2391 add_len -= be16_to_cpu(fabric->length);
2392 fabric = (struct ipr_hostrcb64_fabric_desc *)
2393 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2394 }
2395
359d96e7 2396 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
4565e370
WB
2397}
2398
15c5a5e0
WX
2399/**
2400 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2401 * @ioa_cfg: ioa config struct
2402 * @hostrcb: hostrcb struct
2403 *
2404 * Return value:
2405 * none
2406 **/
2407static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2408 struct ipr_hostrcb *hostrcb)
2409{
2410 struct ipr_hostrcb_type_41_error *error;
2411
2412 error = &hostrcb->hcam.u.error64.u.type_41_error;
2413
2414 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2415 ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2416 ipr_log_hex_data(ioa_cfg, error->data,
2417 be32_to_cpu(hostrcb->hcam.length) -
2418 (offsetof(struct ipr_hostrcb_error, u) +
2419 offsetof(struct ipr_hostrcb_type_41_error, data)));
2420}
b0df54bb 2421/**
2422 * ipr_log_generic_error - Log an adapter error.
2423 * @ioa_cfg: ioa config struct
2424 * @hostrcb: hostrcb struct
2425 *
2426 * Return value:
2427 * none
2428 **/
2429static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2430 struct ipr_hostrcb *hostrcb)
2431{
ac719aba 2432 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
b0df54bb 2433 be32_to_cpu(hostrcb->hcam.length));
2434}
2435
169b9ec8
WX
2436/**
2437 * ipr_log_sis64_device_error - Log a cache error.
2438 * @ioa_cfg: ioa config struct
2439 * @hostrcb: hostrcb struct
2440 *
2441 * Return value:
2442 * none
2443 **/
2444static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2445 struct ipr_hostrcb *hostrcb)
2446{
2447 struct ipr_hostrcb_type_21_error *error;
2448 char buffer[IPR_MAX_RES_PATH_LENGTH];
2449
2450 error = &hostrcb->hcam.u.error64.u.type_21_error;
2451
2452 ipr_err("-----Failing Device Information-----\n");
2453 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2454 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2455 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2456 ipr_err("Device Resource Path: %s\n",
2457 __ipr_format_res_path(error->res_path,
2458 buffer, sizeof(buffer)));
2459 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2460 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2461 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2462 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2463 ipr_err("SCSI Sense Data:\n");
2464 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2465 ipr_err("SCSI Command Descriptor Block: \n");
2466 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2467
2468 ipr_err("Additional IOA Data:\n");
2469 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2470}
2471
1da177e4
LT
2472/**
2473 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2474 * @ioasc: IOASC
2475 *
2476 * This function will return the index of into the ipr_error_table
2477 * for the specified IOASC. If the IOASC is not in the table,
2478 * 0 will be returned, which points to the entry used for unknown errors.
2479 *
2480 * Return value:
2481 * index into the ipr_error_table
2482 **/
2483static u32 ipr_get_error(u32 ioasc)
2484{
2485 int i;
2486
2487 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
35a39691 2488 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
1da177e4
LT
2489 return i;
2490
2491 return 0;
2492}
2493
2494/**
2495 * ipr_handle_log_data - Log an adapter error.
2496 * @ioa_cfg: ioa config struct
2497 * @hostrcb: hostrcb struct
2498 *
2499 * This function logs an adapter error to the system.
2500 *
2501 * Return value:
2502 * none
2503 **/
2504static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2505 struct ipr_hostrcb *hostrcb)
2506{
2507 u32 ioasc;
2508 int error_index;
3185ea63 2509 struct ipr_hostrcb_type_21_error *error;
1da177e4
LT
2510
2511 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2512 return;
2513
2514 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2515 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2516
4565e370
WB
2517 if (ioa_cfg->sis64)
2518 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2519 else
2520 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2521
4565e370
WB
2522 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2523 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
1da177e4
LT
2524 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2525 scsi_report_bus_reset(ioa_cfg->host,
4565e370 2526 hostrcb->hcam.u.error.fd_res_addr.bus);
1da177e4
LT
2527 }
2528
2529 error_index = ipr_get_error(ioasc);
2530
2531 if (!ipr_error_table[error_index].log_hcam)
2532 return;
2533
3185ea63 2534 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2535 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2536 error = &hostrcb->hcam.u.error64.u.type_21_error;
2537
2538 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2539 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2540 return;
2541 }
2542
49dc6a18 2543 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
1da177e4
LT
2544
2545 /* Set indication we have logged an error */
2546 ioa_cfg->errors_logged++;
2547
933916f3 2548 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
1da177e4 2549 return;
cf852037 2550 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2551 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1da177e4
LT
2552
2553 switch (hostrcb->hcam.overlay_id) {
1da177e4
LT
2554 case IPR_HOST_RCB_OVERLAY_ID_2:
2555 ipr_log_cache_error(ioa_cfg, hostrcb);
2556 break;
2557 case IPR_HOST_RCB_OVERLAY_ID_3:
2558 ipr_log_config_error(ioa_cfg, hostrcb);
2559 break;
2560 case IPR_HOST_RCB_OVERLAY_ID_4:
2561 case IPR_HOST_RCB_OVERLAY_ID_6:
2562 ipr_log_array_error(ioa_cfg, hostrcb);
2563 break;
b0df54bb 2564 case IPR_HOST_RCB_OVERLAY_ID_7:
2565 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2566 break;
ee0f05b8 2567 case IPR_HOST_RCB_OVERLAY_ID_12:
2568 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2569 break;
2570 case IPR_HOST_RCB_OVERLAY_ID_13:
2571 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2572 break;
2573 case IPR_HOST_RCB_OVERLAY_ID_14:
2574 case IPR_HOST_RCB_OVERLAY_ID_16:
2575 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2576 break;
2577 case IPR_HOST_RCB_OVERLAY_ID_17:
2578 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2579 break;
49dc6a18
BK
2580 case IPR_HOST_RCB_OVERLAY_ID_20:
2581 ipr_log_fabric_error(ioa_cfg, hostrcb);
2582 break;
169b9ec8
WX
2583 case IPR_HOST_RCB_OVERLAY_ID_21:
2584 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2585 break;
4565e370
WB
2586 case IPR_HOST_RCB_OVERLAY_ID_23:
2587 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2588 break;
2589 case IPR_HOST_RCB_OVERLAY_ID_24:
2590 case IPR_HOST_RCB_OVERLAY_ID_26:
2591 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2592 break;
2593 case IPR_HOST_RCB_OVERLAY_ID_30:
2594 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2595 break;
15c5a5e0
WX
2596 case IPR_HOST_RCB_OVERLAY_ID_41:
2597 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2598 break;
cf852037 2599 case IPR_HOST_RCB_OVERLAY_ID_1:
1da177e4 2600 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1da177e4 2601 default:
a9cfca96 2602 ipr_log_generic_error(ioa_cfg, hostrcb);
1da177e4
LT
2603 break;
2604 }
2605}
2606
afc3f83c
BK
2607static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2608{
2609 struct ipr_hostrcb *hostrcb;
2610
2611 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2612 struct ipr_hostrcb, queue);
2613
2614 if (unlikely(!hostrcb)) {
2615 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2616 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2617 struct ipr_hostrcb, queue);
2618 }
2619
2620 list_del_init(&hostrcb->queue);
2621 return hostrcb;
2622}
2623
1da177e4
LT
2624/**
2625 * ipr_process_error - Op done function for an adapter error log.
2626 * @ipr_cmd: ipr command struct
2627 *
2628 * This function is the op done function for an error log host
2629 * controlled async from the adapter. It will log the error and
2630 * send the HCAM back to the adapter.
2631 *
2632 * Return value:
2633 * none
2634 **/
2635static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2636{
2637 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2638 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
96d21f00 2639 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4565e370
WB
2640 u32 fd_ioasc;
2641
2642 if (ioa_cfg->sis64)
2643 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2644 else
2645 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
1da177e4 2646
afc3f83c 2647 list_del_init(&hostrcb->queue);
05a6538a 2648 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
2649
2650 if (!ioasc) {
2651 ipr_handle_log_data(ioa_cfg, hostrcb);
65f56475
BK
2652 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2653 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4fdd7c7a
BK
2654 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2655 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
1da177e4
LT
2656 dev_err(&ioa_cfg->pdev->dev,
2657 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2658 }
2659
afc3f83c 2660 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
8a4236a2 2661 schedule_work(&ioa_cfg->work_q);
afc3f83c 2662 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
afc3f83c 2663
1da177e4
LT
2664 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2665}
2666
2667/**
2668 * ipr_timeout - An internally generated op has timed out.
a96099e2 2669 * @t: Timer context used to fetch ipr command struct
1da177e4
LT
2670 *
2671 * This function blocks host requests and initiates an
2672 * adapter reset.
2673 *
2674 * Return value:
2675 * none
2676 **/
738c6ec5 2677static void ipr_timeout(struct timer_list *t)
1da177e4 2678{
738c6ec5 2679 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
2680 unsigned long lock_flags = 0;
2681 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2682
2683 ENTER;
2684 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2685
2686 ioa_cfg->errors_logged++;
2687 dev_err(&ioa_cfg->pdev->dev,
2688 "Adapter being reset due to command timeout.\n");
2689
2690 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2691 ioa_cfg->sdt_state = GET_DUMP;
2692
2693 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2694 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2695
2696 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2697 LEAVE;
2698}
2699
2700/**
2701 * ipr_oper_timeout - Adapter timed out transitioning to operational
a96099e2 2702 * @t: Timer context used to fetch ipr command struct
1da177e4
LT
2703 *
2704 * This function blocks host requests and initiates an
2705 * adapter reset.
2706 *
2707 * Return value:
2708 * none
2709 **/
738c6ec5 2710static void ipr_oper_timeout(struct timer_list *t)
1da177e4 2711{
738c6ec5 2712 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
2713 unsigned long lock_flags = 0;
2714 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2715
2716 ENTER;
2717 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2718
2719 ioa_cfg->errors_logged++;
2720 dev_err(&ioa_cfg->pdev->dev,
2721 "Adapter timed out transitioning to operational.\n");
2722
2723 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2724 ioa_cfg->sdt_state = GET_DUMP;
2725
2726 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2727 if (ipr_fastfail)
2728 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2729 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2730 }
2731
2732 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2733 LEAVE;
2734}
2735
1da177e4
LT
2736/**
2737 * ipr_find_ses_entry - Find matching SES in SES table
2738 * @res: resource entry struct of SES
2739 *
2740 * Return value:
2741 * pointer to SES table entry / NULL on failure
2742 **/
2743static const struct ipr_ses_table_entry *
2744ipr_find_ses_entry(struct ipr_resource_entry *res)
2745{
2746 int i, j, matches;
3e7ebdfa 2747 struct ipr_std_inq_vpids *vpids;
1da177e4
LT
2748 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2749
2750 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2751 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2752 if (ste->compare_product_id_byte[j] == 'X') {
3e7ebdfa
WB
2753 vpids = &res->std_inq_data.vpids;
2754 if (vpids->product_id[j] == ste->product_id[j])
1da177e4
LT
2755 matches++;
2756 else
2757 break;
2758 } else
2759 matches++;
2760 }
2761
2762 if (matches == IPR_PROD_ID_LEN)
2763 return ste;
2764 }
2765
2766 return NULL;
2767}
2768
2769/**
2770 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2771 * @ioa_cfg: ioa config struct
2772 * @bus: SCSI bus
2773 * @bus_width: bus width
2774 *
2775 * Return value:
2776 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2777 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2778 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2779 * max 160MHz = max 320MB/sec).
2780 **/
2781static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2782{
2783 struct ipr_resource_entry *res;
2784 const struct ipr_ses_table_entry *ste;
2785 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2786
2787 /* Loop through each config table entry in the config table buffer */
2788 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 2789 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
1da177e4
LT
2790 continue;
2791
3e7ebdfa 2792 if (bus != res->bus)
1da177e4
LT
2793 continue;
2794
2795 if (!(ste = ipr_find_ses_entry(res)))
2796 continue;
2797
2798 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2799 }
2800
2801 return max_xfer_rate;
2802}
2803
2804/**
2805 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2806 * @ioa_cfg: ioa config struct
2807 * @max_delay: max delay in micro-seconds to wait
2808 *
2809 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2810 *
2811 * Return value:
2812 * 0 on success / other on failure
2813 **/
2814static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2815{
2816 volatile u32 pcii_reg;
2817 int delay = 1;
2818
2819 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2820 while (delay < max_delay) {
2821 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2822
2823 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2824 return 0;
2825
2826 /* udelay cannot be used if delay is more than a few milliseconds */
2827 if ((delay / 1000) > MAX_UDELAY_MS)
2828 mdelay(delay / 1000);
2829 else
2830 udelay(delay);
2831
2832 delay += delay;
2833 }
2834 return -EIO;
2835}
2836
dcbad00e
WB
2837/**
2838 * ipr_get_sis64_dump_data_section - Dump IOA memory
2839 * @ioa_cfg: ioa config struct
2840 * @start_addr: adapter address to dump
2841 * @dest: destination kernel buffer
2842 * @length_in_words: length to dump in 4 byte words
2843 *
2844 * Return value:
2845 * 0 on success
2846 **/
2847static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2848 u32 start_addr,
2849 __be32 *dest, u32 length_in_words)
2850{
2851 int i;
2852
2853 for (i = 0; i < length_in_words; i++) {
2854 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2855 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2856 dest++;
2857 }
2858
2859 return 0;
2860}
2861
1da177e4
LT
2862/**
2863 * ipr_get_ldump_data_section - Dump IOA memory
2864 * @ioa_cfg: ioa config struct
2865 * @start_addr: adapter address to dump
2866 * @dest: destination kernel buffer
2867 * @length_in_words: length to dump in 4 byte words
2868 *
2869 * Return value:
2870 * 0 on success / -EIO on failure
2871 **/
2872static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2873 u32 start_addr,
2874 __be32 *dest, u32 length_in_words)
2875{
2876 volatile u32 temp_pcii_reg;
2877 int i, delay = 0;
2878
dcbad00e
WB
2879 if (ioa_cfg->sis64)
2880 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2881 dest, length_in_words);
2882
1da177e4
LT
2883 /* Write IOA interrupt reg starting LDUMP state */
2884 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
214777ba 2885 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2886
2887 /* Wait for IO debug acknowledge */
2888 if (ipr_wait_iodbg_ack(ioa_cfg,
2889 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2890 dev_err(&ioa_cfg->pdev->dev,
2891 "IOA dump long data transfer timeout\n");
2892 return -EIO;
2893 }
2894
2895 /* Signal LDUMP interlocked - clear IO debug ack */
2896 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2897 ioa_cfg->regs.clr_interrupt_reg);
2898
2899 /* Write Mailbox with starting address */
2900 writel(start_addr, ioa_cfg->ioa_mailbox);
2901
2902 /* Signal address valid - clear IOA Reset alert */
2903 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2904 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2905
2906 for (i = 0; i < length_in_words; i++) {
2907 /* Wait for IO debug acknowledge */
2908 if (ipr_wait_iodbg_ack(ioa_cfg,
2909 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2910 dev_err(&ioa_cfg->pdev->dev,
2911 "IOA dump short data transfer timeout\n");
2912 return -EIO;
2913 }
2914
2915 /* Read data from mailbox and increment destination pointer */
2916 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2917 dest++;
2918
2919 /* For all but the last word of data, signal data received */
2920 if (i < (length_in_words - 1)) {
2921 /* Signal dump data received - Clear IO debug Ack */
2922 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2923 ioa_cfg->regs.clr_interrupt_reg);
2924 }
2925 }
2926
2927 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2928 writel(IPR_UPROCI_RESET_ALERT,
214777ba 2929 ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
2930
2931 writel(IPR_UPROCI_IO_DEBUG_ALERT,
214777ba 2932 ioa_cfg->regs.clr_uproc_interrupt_reg32);
1da177e4
LT
2933
2934 /* Signal dump data received - Clear IO debug Ack */
2935 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2936 ioa_cfg->regs.clr_interrupt_reg);
2937
2938 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2939 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2940 temp_pcii_reg =
214777ba 2941 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
1da177e4
LT
2942
2943 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2944 return 0;
2945
2946 udelay(10);
2947 delay += 10;
2948 }
2949
2950 return 0;
2951}
2952
2953#ifdef CONFIG_SCSI_IPR_DUMP
2954/**
2955 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2956 * @ioa_cfg: ioa config struct
2957 * @pci_address: adapter address
2958 * @length: length of data to copy
2959 *
2960 * Copy data from PCI adapter to kernel buffer.
2961 * Note: length MUST be a 4 byte multiple
2962 * Return value:
2963 * 0 on success / other on failure
2964 **/
2965static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2966 unsigned long pci_address, u32 length)
2967{
2968 int bytes_copied = 0;
4d4dd706 2969 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
1da177e4
LT
2970 __be32 *page;
2971 unsigned long lock_flags = 0;
2972 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2973
4d4dd706
KSS
2974 if (ioa_cfg->sis64)
2975 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2976 else
2977 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2978
1da177e4 2979 while (bytes_copied < length &&
4d4dd706 2980 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
1da177e4
LT
2981 if (ioa_dump->page_offset >= PAGE_SIZE ||
2982 ioa_dump->page_offset == 0) {
2983 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2984
2985 if (!page) {
2986 ipr_trace;
2987 return bytes_copied;
2988 }
2989
2990 ioa_dump->page_offset = 0;
2991 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2992 ioa_dump->next_page_index++;
2993 } else
2994 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2995
2996 rem_len = length - bytes_copied;
2997 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2998 cur_len = min(rem_len, rem_page_len);
2999
3000 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3001 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3002 rc = -EIO;
3003 } else {
3004 rc = ipr_get_ldump_data_section(ioa_cfg,
3005 pci_address + bytes_copied,
3006 &page[ioa_dump->page_offset / 4],
3007 (cur_len / sizeof(u32)));
3008 }
3009 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3010
3011 if (!rc) {
3012 ioa_dump->page_offset += cur_len;
3013 bytes_copied += cur_len;
3014 } else {
3015 ipr_trace;
3016 break;
3017 }
3018 schedule();
3019 }
3020
3021 return bytes_copied;
3022}
3023
3024/**
3025 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3026 * @hdr: dump entry header struct
3027 *
3028 * Return value:
3029 * nothing
3030 **/
3031static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3032{
3033 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3034 hdr->num_elems = 1;
3035 hdr->offset = sizeof(*hdr);
3036 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3037}
3038
3039/**
3040 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3041 * @ioa_cfg: ioa config struct
3042 * @driver_dump: driver dump struct
3043 *
3044 * Return value:
3045 * nothing
3046 **/
3047static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3048 struct ipr_driver_dump *driver_dump)
3049{
3050 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3051
3052 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3053 driver_dump->ioa_type_entry.hdr.len =
3054 sizeof(struct ipr_dump_ioa_type_entry) -
3055 sizeof(struct ipr_dump_entry_header);
3056 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3057 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3058 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3059 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3060 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3061 ucode_vpd->minor_release[1];
3062 driver_dump->hdr.num_entries++;
3063}
3064
3065/**
3066 * ipr_dump_version_data - Fill in the driver version in the dump.
3067 * @ioa_cfg: ioa config struct
3068 * @driver_dump: driver dump struct
3069 *
3070 * Return value:
3071 * nothing
3072 **/
3073static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3074 struct ipr_driver_dump *driver_dump)
3075{
3076 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3077 driver_dump->version_entry.hdr.len =
3078 sizeof(struct ipr_dump_version_entry) -
3079 sizeof(struct ipr_dump_entry_header);
3080 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3081 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3082 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3083 driver_dump->hdr.num_entries++;
3084}
3085
3086/**
3087 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3088 * @ioa_cfg: ioa config struct
3089 * @driver_dump: driver dump struct
3090 *
3091 * Return value:
3092 * nothing
3093 **/
3094static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3095 struct ipr_driver_dump *driver_dump)
3096{
3097 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3098 driver_dump->trace_entry.hdr.len =
3099 sizeof(struct ipr_dump_trace_entry) -
3100 sizeof(struct ipr_dump_entry_header);
3101 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3102 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3103 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3104 driver_dump->hdr.num_entries++;
3105}
3106
3107/**
3108 * ipr_dump_location_data - Fill in the IOA location in the dump.
3109 * @ioa_cfg: ioa config struct
3110 * @driver_dump: driver dump struct
3111 *
3112 * Return value:
3113 * nothing
3114 **/
3115static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3116 struct ipr_driver_dump *driver_dump)
3117{
3118 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3119 driver_dump->location_entry.hdr.len =
3120 sizeof(struct ipr_dump_location_entry) -
3121 sizeof(struct ipr_dump_entry_header);
3122 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3123 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
71610f55 3124 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
1da177e4
LT
3125 driver_dump->hdr.num_entries++;
3126}
3127
3128/**
3129 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3130 * @ioa_cfg: ioa config struct
3131 * @dump: dump struct
3132 *
3133 * Return value:
3134 * nothing
3135 **/
3136static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3137{
3138 unsigned long start_addr, sdt_word;
3139 unsigned long lock_flags = 0;
3140 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3141 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
4d4dd706
KSS
3142 u32 num_entries, max_num_entries, start_off, end_off;
3143 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
1da177e4 3144 struct ipr_sdt *sdt;
dcbad00e 3145 int valid = 1;
1da177e4
LT
3146 int i;
3147
3148 ENTER;
3149
3150 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3151
41e9a696 3152 if (ioa_cfg->sdt_state != READ_DUMP) {
1da177e4
LT
3153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3154 return;
3155 }
3156
110def85
WB
3157 if (ioa_cfg->sis64) {
3158 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3159 ssleep(IPR_DUMP_DELAY_SECONDS);
3160 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3161 }
3162
1da177e4
LT
3163 start_addr = readl(ioa_cfg->ioa_mailbox);
3164
dcbad00e 3165 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
1da177e4
LT
3166 dev_err(&ioa_cfg->pdev->dev,
3167 "Invalid dump table format: %lx\n", start_addr);
3168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3169 return;
3170 }
3171
3172 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3173
3174 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3175
3176 /* Initialize the overall dump header */
3177 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3178 driver_dump->hdr.num_entries = 1;
3179 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3180 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3181 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3182 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3183
3184 ipr_dump_version_data(ioa_cfg, driver_dump);
3185 ipr_dump_location_data(ioa_cfg, driver_dump);
3186 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3187 ipr_dump_trace_data(ioa_cfg, driver_dump);
3188
3189 /* Update dump_header */
3190 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3191
3192 /* IOA Dump entry */
3193 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1da177e4
LT
3194 ioa_dump->hdr.len = 0;
3195 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3196 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3197
3198 /* First entries in sdt are actually a list of dump addresses and
3199 lengths to gather the real dump data. sdt represents the pointer
3200 to the ioa generated dump table. Dump data will be extracted based
3201 on entries in this table */
3202 sdt = &ioa_dump->sdt;
3203
4d4dd706
KSS
3204 if (ioa_cfg->sis64) {
3205 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3206 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3207 } else {
3208 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3209 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3210 }
3211
3212 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3213 (max_num_entries * sizeof(struct ipr_sdt_entry));
1da177e4 3214 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
4d4dd706 3215 bytes_to_copy / sizeof(__be32));
1da177e4
LT
3216
3217 /* Smart Dump table is ready to use and the first entry is valid */
dcbad00e
WB
3218 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3219 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
3220 dev_err(&ioa_cfg->pdev->dev,
3221 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3222 rc, be32_to_cpu(sdt->hdr.state));
3223 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3224 ioa_cfg->sdt_state = DUMP_OBTAINED;
3225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3226 return;
3227 }
3228
3229 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3230
4d4dd706
KSS
3231 if (num_entries > max_num_entries)
3232 num_entries = max_num_entries;
3233
3234 /* Update dump length to the actual data to be copied */
3235 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3236 if (ioa_cfg->sis64)
3237 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3238 else
3239 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
1da177e4
LT
3240
3241 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3242
3243 for (i = 0; i < num_entries; i++) {
4d4dd706 3244 if (ioa_dump->hdr.len > max_dump_size) {
1da177e4
LT
3245 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3246 break;
3247 }
3248
3249 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
dcbad00e
WB
3250 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3251 if (ioa_cfg->sis64)
3252 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3253 else {
3254 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3255 end_off = be32_to_cpu(sdt->entry[i].end_token);
3256
3257 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3258 bytes_to_copy = end_off - start_off;
3259 else
3260 valid = 0;
3261 }
3262 if (valid) {
4d4dd706 3263 if (bytes_to_copy > max_dump_size) {
1da177e4
LT
3264 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3265 continue;
3266 }
3267
3268 /* Copy data from adapter to driver buffers */
3269 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3270 bytes_to_copy);
3271
3272 ioa_dump->hdr.len += bytes_copied;
3273
3274 if (bytes_copied != bytes_to_copy) {
3275 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3276 break;
3277 }
3278 }
3279 }
3280 }
3281
3282 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3283
3284 /* Update dump_header */
3285 driver_dump->hdr.len += ioa_dump->hdr.len;
3286 wmb();
3287 ioa_cfg->sdt_state = DUMP_OBTAINED;
3288 LEAVE;
3289}
3290
3291#else
203fa3fe 3292#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
1da177e4
LT
3293#endif
3294
3295/**
3296 * ipr_release_dump - Free adapter dump memory
3297 * @kref: kref struct
3298 *
3299 * Return value:
3300 * nothing
3301 **/
3302static void ipr_release_dump(struct kref *kref)
3303{
203fa3fe 3304 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
1da177e4
LT
3305 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3306 unsigned long lock_flags = 0;
3307 int i;
3308
3309 ENTER;
3310 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3311 ioa_cfg->dump = NULL;
3312 ioa_cfg->sdt_state = INACTIVE;
3313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3314
3315 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3316 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3317
4d4dd706 3318 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
3319 kfree(dump);
3320 LEAVE;
3321}
3322
318ddb34
WX
3323static void ipr_add_remove_thread(struct work_struct *work)
3324{
3325 unsigned long lock_flags;
3326 struct ipr_resource_entry *res;
3327 struct scsi_device *sdev;
3328 struct ipr_ioa_cfg *ioa_cfg =
3329 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3330 u8 bus, target, lun;
3331 int did_work;
3332
3333 ENTER;
3334 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3335
3336restart:
3337 do {
3338 did_work = 0;
3339 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3341 return;
3342 }
3343
3344 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3345 if (res->del_from_ml && res->sdev) {
3346 did_work = 1;
3347 sdev = res->sdev;
3348 if (!scsi_device_get(sdev)) {
3349 if (!res->add_to_ml)
3350 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3351 else
3352 res->del_from_ml = 0;
3353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3354 scsi_remove_device(sdev);
3355 scsi_device_put(sdev);
3356 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3357 }
3358 break;
3359 }
3360 }
3361 } while (did_work);
3362
3363 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3364 if (res->add_to_ml) {
3365 bus = res->bus;
3366 target = res->target;
3367 lun = res->lun;
3368 res->add_to_ml = 0;
3369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3370 scsi_add_device(ioa_cfg->host, bus, target, lun);
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372 goto restart;
3373 }
3374 }
3375
3376 ioa_cfg->scan_done = 1;
3377 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3378 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3379 LEAVE;
3380}
3381
1da177e4
LT
3382/**
3383 * ipr_worker_thread - Worker thread
c4028958 3384 * @work: ioa config struct
1da177e4
LT
3385 *
3386 * Called at task level from a work thread. This function takes care
3387 * of adding and removing device from the mid-layer as configuration
3388 * changes are detected by the adapter.
3389 *
3390 * Return value:
3391 * nothing
3392 **/
c4028958 3393static void ipr_worker_thread(struct work_struct *work)
1da177e4
LT
3394{
3395 unsigned long lock_flags;
1da177e4 3396 struct ipr_dump *dump;
c4028958
DH
3397 struct ipr_ioa_cfg *ioa_cfg =
3398 container_of(work, struct ipr_ioa_cfg, work_q);
1da177e4
LT
3399
3400 ENTER;
3401 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3402
41e9a696 3403 if (ioa_cfg->sdt_state == READ_DUMP) {
1da177e4
LT
3404 dump = ioa_cfg->dump;
3405 if (!dump) {
3406 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3407 return;
3408 }
3409 kref_get(&dump->kref);
3410 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3411 ipr_get_ioa_dump(ioa_cfg, dump);
3412 kref_put(&dump->kref, ipr_release_dump);
3413
3414 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4c647e90 3415 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
1da177e4
LT
3416 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3417 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3418 return;
3419 }
3420
b0e17a9b
BK
3421 if (ioa_cfg->scsi_unblock) {
3422 ioa_cfg->scsi_unblock = 0;
3423 ioa_cfg->scsi_blocked = 0;
3424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3425 scsi_unblock_requests(ioa_cfg->host);
3426 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3427 if (ioa_cfg->scsi_blocked)
3428 scsi_block_requests(ioa_cfg->host);
3429 }
3430
b195d5e2
BK
3431 if (!ioa_cfg->scan_enabled) {
3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433 return;
3434 }
3435
318ddb34 3436 schedule_work(&ioa_cfg->scsi_add_work_q);
1da177e4 3437
1da177e4 3438 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
3439 LEAVE;
3440}
3441
3442#ifdef CONFIG_SCSI_IPR_TRACE
3443/**
3444 * ipr_read_trace - Dump the adapter trace
2c3c8bea 3445 * @filp: open sysfs file
1da177e4 3446 * @kobj: kobject struct
91a69029 3447 * @bin_attr: bin_attribute struct
1da177e4
LT
3448 * @buf: buffer
3449 * @off: offset
3450 * @count: buffer size
3451 *
3452 * Return value:
3453 * number of bytes printed to buffer
3454 **/
2c3c8bea 3455static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
91a69029
ZR
3456 struct bin_attribute *bin_attr,
3457 char *buf, loff_t off, size_t count)
1da177e4 3458{
aabd5fea 3459 struct device *dev = kobj_to_dev(kobj);
ee959b00 3460 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3462 unsigned long lock_flags = 0;
d777aaf3 3463 ssize_t ret;
1da177e4
LT
3464
3465 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3466 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3467 IPR_TRACE_SIZE);
1da177e4 3468 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
d777aaf3
AM
3469
3470 return ret;
1da177e4
LT
3471}
3472
3473static struct bin_attribute ipr_trace_attr = {
3474 .attr = {
3475 .name = "trace",
3476 .mode = S_IRUGO,
3477 },
3478 .size = 0,
3479 .read = ipr_read_trace,
3480};
3481#endif
3482
3483/**
3484 * ipr_show_fw_version - Show the firmware version
ee959b00 3485 * @dev: class device struct
a96099e2 3486 * @attr: device attribute (unused)
ee959b00 3487 * @buf: buffer
1da177e4
LT
3488 *
3489 * Return value:
3490 * number of bytes printed to buffer
3491 **/
ee959b00
TJ
3492static ssize_t ipr_show_fw_version(struct device *dev,
3493 struct device_attribute *attr, char *buf)
1da177e4 3494{
ee959b00 3495 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3496 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3497 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3498 unsigned long lock_flags = 0;
3499 int len;
3500
3501 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3502 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3503 ucode_vpd->major_release, ucode_vpd->card_type,
3504 ucode_vpd->minor_release[0],
3505 ucode_vpd->minor_release[1]);
3506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3507 return len;
3508}
3509
ee959b00 3510static struct device_attribute ipr_fw_version_attr = {
1da177e4
LT
3511 .attr = {
3512 .name = "fw_version",
3513 .mode = S_IRUGO,
3514 },
3515 .show = ipr_show_fw_version,
3516};
3517
3518/**
3519 * ipr_show_log_level - Show the adapter's error logging level
ee959b00 3520 * @dev: class device struct
a96099e2 3521 * @attr: device attribute (unused)
ee959b00 3522 * @buf: buffer
1da177e4
LT
3523 *
3524 * Return value:
3525 * number of bytes printed to buffer
3526 **/
ee959b00
TJ
3527static ssize_t ipr_show_log_level(struct device *dev,
3528 struct device_attribute *attr, char *buf)
1da177e4 3529{
ee959b00 3530 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3531 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3532 unsigned long lock_flags = 0;
3533 int len;
3534
3535 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3536 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3538 return len;
3539}
3540
3541/**
3542 * ipr_store_log_level - Change the adapter's error logging level
ee959b00 3543 * @dev: class device struct
a96099e2 3544 * @attr: device attribute (unused)
ee959b00 3545 * @buf: buffer
a96099e2 3546 * @count: buffer size
1da177e4
LT
3547 *
3548 * Return value:
3549 * number of bytes printed to buffer
3550 **/
ee959b00 3551static ssize_t ipr_store_log_level(struct device *dev,
203fa3fe 3552 struct device_attribute *attr,
1da177e4
LT
3553 const char *buf, size_t count)
3554{
ee959b00 3555 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3556 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3557 unsigned long lock_flags = 0;
3558
3559 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3560 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3561 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3562 return strlen(buf);
3563}
3564
ee959b00 3565static struct device_attribute ipr_log_level_attr = {
1da177e4
LT
3566 .attr = {
3567 .name = "log_level",
3568 .mode = S_IRUGO | S_IWUSR,
3569 },
3570 .show = ipr_show_log_level,
3571 .store = ipr_store_log_level
3572};
3573
3574/**
3575 * ipr_store_diagnostics - IOA Diagnostics interface
ee959b00 3576 * @dev: device struct
a96099e2 3577 * @attr: device attribute (unused)
ee959b00
TJ
3578 * @buf: buffer
3579 * @count: buffer size
1da177e4
LT
3580 *
3581 * This function will reset the adapter and wait a reasonable
3582 * amount of time for any errors that the adapter might log.
3583 *
3584 * Return value:
3585 * count on success / other on failure
3586 **/
ee959b00
TJ
3587static ssize_t ipr_store_diagnostics(struct device *dev,
3588 struct device_attribute *attr,
1da177e4
LT
3589 const char *buf, size_t count)
3590{
ee959b00 3591 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3592 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3593 unsigned long lock_flags = 0;
3594 int rc = count;
3595
3596 if (!capable(CAP_SYS_ADMIN))
3597 return -EACCES;
3598
1da177e4 3599 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 3600 while (ioa_cfg->in_reset_reload) {
970ea294
BK
3601 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3602 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3603 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3604 }
3605
1da177e4
LT
3606 ioa_cfg->errors_logged = 0;
3607 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3608
3609 if (ioa_cfg->in_reset_reload) {
3610 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3611 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3612
3613 /* Wait for a second for any errors to be logged */
3614 msleep(1000);
3615 } else {
3616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3617 return -EIO;
3618 }
3619
3620 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3621 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3622 rc = -EIO;
3623 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3624
3625 return rc;
3626}
3627
ee959b00 3628static struct device_attribute ipr_diagnostics_attr = {
1da177e4
LT
3629 .attr = {
3630 .name = "run_diagnostics",
3631 .mode = S_IWUSR,
3632 },
3633 .store = ipr_store_diagnostics
3634};
3635
f37eb54b 3636/**
3637 * ipr_show_adapter_state - Show the adapter's state
a96099e2
LJ
3638 * @dev: device struct
3639 * @attr: device attribute (unused)
ee959b00 3640 * @buf: buffer
f37eb54b 3641 *
3642 * Return value:
3643 * number of bytes printed to buffer
3644 **/
ee959b00
TJ
3645static ssize_t ipr_show_adapter_state(struct device *dev,
3646 struct device_attribute *attr, char *buf)
f37eb54b 3647{
ee959b00 3648 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3649 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3650 unsigned long lock_flags = 0;
3651 int len;
3652
3653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3654 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
f37eb54b 3655 len = snprintf(buf, PAGE_SIZE, "offline\n");
3656 else
3657 len = snprintf(buf, PAGE_SIZE, "online\n");
3658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3659 return len;
3660}
3661
3662/**
3663 * ipr_store_adapter_state - Change adapter state
ee959b00 3664 * @dev: device struct
a96099e2 3665 * @attr: device attribute (unused)
ee959b00
TJ
3666 * @buf: buffer
3667 * @count: buffer size
f37eb54b 3668 *
3669 * This function will change the adapter's state.
3670 *
3671 * Return value:
3672 * count on success / other on failure
3673 **/
ee959b00
TJ
3674static ssize_t ipr_store_adapter_state(struct device *dev,
3675 struct device_attribute *attr,
f37eb54b 3676 const char *buf, size_t count)
3677{
ee959b00 3678 struct Scsi_Host *shost = class_to_shost(dev);
f37eb54b 3679 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3680 unsigned long lock_flags;
56d6aa33 3681 int result = count, i;
f37eb54b 3682
3683 if (!capable(CAP_SYS_ADMIN))
3684 return -EACCES;
3685
3686 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
56d6aa33 3687 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3688 !strncmp(buf, "online", 6)) {
3689 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3690 spin_lock(&ioa_cfg->hrrq[i]._lock);
3691 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3692 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3693 }
3694 wmb();
f37eb54b 3695 ioa_cfg->reset_retries = 0;
3696 ioa_cfg->in_ioa_bringdown = 0;
3697 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3698 }
3699 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3700 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3701
3702 return result;
3703}
3704
ee959b00 3705static struct device_attribute ipr_ioa_state_attr = {
f37eb54b 3706 .attr = {
49dd0961 3707 .name = "online_state",
f37eb54b 3708 .mode = S_IRUGO | S_IWUSR,
3709 },
3710 .show = ipr_show_adapter_state,
3711 .store = ipr_store_adapter_state
3712};
3713
1da177e4
LT
3714/**
3715 * ipr_store_reset_adapter - Reset the adapter
ee959b00 3716 * @dev: device struct
a96099e2 3717 * @attr: device attribute (unused)
ee959b00
TJ
3718 * @buf: buffer
3719 * @count: buffer size
1da177e4
LT
3720 *
3721 * This function will reset the adapter.
3722 *
3723 * Return value:
3724 * count on success / other on failure
3725 **/
ee959b00
TJ
3726static ssize_t ipr_store_reset_adapter(struct device *dev,
3727 struct device_attribute *attr,
1da177e4
LT
3728 const char *buf, size_t count)
3729{
ee959b00 3730 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
3731 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3732 unsigned long lock_flags;
3733 int result = count;
3734
3735 if (!capable(CAP_SYS_ADMIN))
3736 return -EACCES;
3737
3738 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3739 if (!ioa_cfg->in_reset_reload)
3740 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3741 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3742 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3743
3744 return result;
3745}
3746
ee959b00 3747static struct device_attribute ipr_ioa_reset_attr = {
1da177e4
LT
3748 .attr = {
3749 .name = "reset_host",
3750 .mode = S_IWUSR,
3751 },
3752 .store = ipr_store_reset_adapter
3753};
3754
511cbce2 3755static int ipr_iopoll(struct irq_poll *iop, int budget);
b53d124a 3756 /**
3757 * ipr_show_iopoll_weight - Show ipr polling mode
3758 * @dev: class device struct
a96099e2 3759 * @attr: device attribute (unused)
b53d124a 3760 * @buf: buffer
3761 *
3762 * Return value:
3763 * number of bytes printed to buffer
3764 **/
3765static ssize_t ipr_show_iopoll_weight(struct device *dev,
3766 struct device_attribute *attr, char *buf)
3767{
3768 struct Scsi_Host *shost = class_to_shost(dev);
3769 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3770 unsigned long lock_flags = 0;
3771 int len;
3772
3773 spin_lock_irqsave(shost->host_lock, lock_flags);
3774 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3775 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3776
3777 return len;
3778}
3779
3780/**
3781 * ipr_store_iopoll_weight - Change the adapter's polling mode
3782 * @dev: class device struct
a96099e2 3783 * @attr: device attribute (unused)
b53d124a 3784 * @buf: buffer
a96099e2 3785 * @count: buffer size
b53d124a 3786 *
3787 * Return value:
3788 * number of bytes printed to buffer
3789 **/
3790static ssize_t ipr_store_iopoll_weight(struct device *dev,
3791 struct device_attribute *attr,
3792 const char *buf, size_t count)
3793{
3794 struct Scsi_Host *shost = class_to_shost(dev);
3795 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3796 unsigned long user_iopoll_weight;
3797 unsigned long lock_flags = 0;
3798 int i;
3799
3800 if (!ioa_cfg->sis64) {
511cbce2 3801 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
b53d124a 3802 return -EINVAL;
3803 }
3804 if (kstrtoul(buf, 10, &user_iopoll_weight))
3805 return -EINVAL;
3806
3807 if (user_iopoll_weight > 256) {
511cbce2 3808 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
b53d124a 3809 return -EINVAL;
3810 }
3811
3812 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
511cbce2 3813 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
b53d124a 3814 return strlen(buf);
3815 }
3816
89f8b33c 3817 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3818 for (i = 1; i < ioa_cfg->hrrq_num; i++)
511cbce2 3819 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
b53d124a 3820 }
3821
3822 spin_lock_irqsave(shost->host_lock, lock_flags);
3823 ioa_cfg->iopoll_weight = user_iopoll_weight;
89f8b33c 3824 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 3825 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
511cbce2 3826 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
b53d124a 3827 ioa_cfg->iopoll_weight, ipr_iopoll);
b53d124a 3828 }
3829 }
3830 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3831
3832 return strlen(buf);
3833}
3834
3835static struct device_attribute ipr_iopoll_weight_attr = {
3836 .attr = {
3837 .name = "iopoll_weight",
3838 .mode = S_IRUGO | S_IWUSR,
3839 },
3840 .show = ipr_show_iopoll_weight,
3841 .store = ipr_store_iopoll_weight
3842};
3843
1da177e4
LT
3844/**
3845 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3846 * @buf_len: buffer length
3847 *
3848 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3849 * list to use for microcode download
3850 *
3851 * Return value:
3852 * pointer to sglist / NULL on failure
3853 **/
3854static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3855{
f95dc1bb 3856 int sg_size, order;
1da177e4 3857 struct ipr_sglist *sglist;
1da177e4
LT
3858
3859 /* Get the minimum size per scatter/gather element */
3860 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3861
3862 /* Get the actual size per element */
3863 order = get_order(sg_size);
3864
1da177e4 3865 /* Allocate a scatter/gather list for the DMA */
f95dc1bb 3866 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
1da177e4
LT
3867 if (sglist == NULL) {
3868 ipr_trace;
3869 return NULL;
3870 }
1da177e4 3871 sglist->order = order;
f95dc1bb
BVA
3872 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3873 &sglist->num_sg);
3874 if (!sglist->scatterlist) {
3875 kfree(sglist);
3876 return NULL;
1da177e4
LT
3877 }
3878
3879 return sglist;
3880}
3881
3882/**
3883 * ipr_free_ucode_buffer - Frees a microcode download buffer
a96099e2 3884 * @sglist: scatter/gather list pointer
1da177e4
LT
3885 *
3886 * Free a DMA'able ucode download buffer previously allocated with
3887 * ipr_alloc_ucode_buffer
3888 *
3889 * Return value:
3890 * nothing
3891 **/
3892static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3893{
f95dc1bb 3894 sgl_free_order(sglist->scatterlist, sglist->order);
1da177e4
LT
3895 kfree(sglist);
3896}
3897
3898/**
3899 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3900 * @sglist: scatter/gather list pointer
3901 * @buffer: buffer pointer
3902 * @len: buffer length
3903 *
3904 * Copy a microcode image from a user buffer into a buffer allocated by
3905 * ipr_alloc_ucode_buffer
3906 *
3907 * Return value:
3908 * 0 on success / other on failure
3909 **/
3910static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3911 u8 *buffer, u32 len)
3912{
3913 int bsize_elem, i, result = 0;
c71ae886 3914 struct scatterlist *sg;
1da177e4
LT
3915 void *kaddr;
3916
3917 /* Determine the actual number of bytes per element */
3918 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3919
c71ae886 3920 sg = sglist->scatterlist;
1da177e4 3921
c71ae886
ML
3922 for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3923 buffer += bsize_elem) {
3924 struct page *page = sg_page(sg);
45711f1a
JA
3925
3926 kaddr = kmap(page);
1da177e4 3927 memcpy(kaddr, buffer, bsize_elem);
45711f1a 3928 kunmap(page);
1da177e4 3929
c71ae886 3930 sg->length = bsize_elem;
1da177e4
LT
3931
3932 if (result != 0) {
3933 ipr_trace;
3934 return result;
3935 }
3936 }
3937
3938 if (len % bsize_elem) {
c71ae886 3939 struct page *page = sg_page(sg);
45711f1a
JA
3940
3941 kaddr = kmap(page);
1da177e4 3942 memcpy(kaddr, buffer, len % bsize_elem);
45711f1a 3943 kunmap(page);
1da177e4 3944
c71ae886 3945 sg->length = len % bsize_elem;
1da177e4
LT
3946 }
3947
3948 sglist->buffer_len = len;
3949 return result;
3950}
3951
a32c055f
WB
3952/**
3953 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3954 * @ipr_cmd: ipr command struct
3955 * @sglist: scatter/gather list
3956 *
3957 * Builds a microcode download IOA data list (IOADL).
3958 *
3959 **/
3960static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3961 struct ipr_sglist *sglist)
3962{
3963 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3964 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3965 struct scatterlist *scatterlist = sglist->scatterlist;
c71ae886 3966 struct scatterlist *sg;
a32c055f
WB
3967 int i;
3968
3969 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3970 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3971 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3972
3973 ioarcb->ioadl_len =
3974 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
c71ae886 3975 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
a32c055f 3976 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
c71ae886
ML
3977 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3978 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
a32c055f
WB
3979 }
3980
3981 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3982}
3983
1da177e4 3984/**
12baa420 3985 * ipr_build_ucode_ioadl - Build a microcode download IOADL
1da177e4
LT
3986 * @ipr_cmd: ipr command struct
3987 * @sglist: scatter/gather list
1da177e4 3988 *
12baa420 3989 * Builds a microcode download IOA data list (IOADL).
1da177e4 3990 *
1da177e4 3991 **/
12baa420 3992static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3993 struct ipr_sglist *sglist)
1da177e4 3994{
1da177e4 3995 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 3996 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 3997 struct scatterlist *scatterlist = sglist->scatterlist;
c71ae886 3998 struct scatterlist *sg;
1da177e4
LT
3999 int i;
4000
12baa420 4001 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
1da177e4 4002 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
4003 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4004
4005 ioarcb->ioadl_len =
1da177e4
LT
4006 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4007
c71ae886 4008 for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
1da177e4 4009 ioadl[i].flags_and_data_len =
c71ae886 4010 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
1da177e4 4011 ioadl[i].address =
c71ae886 4012 cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
4013 }
4014
12baa420 4015 ioadl[i-1].flags_and_data_len |=
4016 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4017}
4018
4019/**
4020 * ipr_update_ioa_ucode - Update IOA's microcode
4021 * @ioa_cfg: ioa config struct
4022 * @sglist: scatter/gather list
4023 *
4024 * Initiate an adapter reset to update the IOA's microcode
4025 *
4026 * Return value:
4027 * 0 on success / -EIO on failure
4028 **/
4029static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4030 struct ipr_sglist *sglist)
4031{
4032 unsigned long lock_flags;
4033
4034 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 4035 while (ioa_cfg->in_reset_reload) {
970ea294
BK
4036 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4037 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4038 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4039 }
12baa420 4040
4041 if (ioa_cfg->ucode_sglist) {
4042 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4043 dev_err(&ioa_cfg->pdev->dev,
4044 "Microcode download already in progress\n");
4045 return -EIO;
1da177e4 4046 }
12baa420 4047
d73341bf
AB
4048 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4049 sglist->scatterlist, sglist->num_sg,
4050 DMA_TO_DEVICE);
12baa420 4051
4052 if (!sglist->num_dma_sg) {
4053 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4054 dev_err(&ioa_cfg->pdev->dev,
4055 "Failed to map microcode download buffer!\n");
1da177e4
LT
4056 return -EIO;
4057 }
4058
12baa420 4059 ioa_cfg->ucode_sglist = sglist;
4060 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4061 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4062 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4063
4064 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4065 ioa_cfg->ucode_sglist = NULL;
4066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4
LT
4067 return 0;
4068}
4069
4070/**
4071 * ipr_store_update_fw - Update the firmware on the adapter
a96099e2
LJ
4072 * @dev: device struct
4073 * @attr: device attribute (unused)
ee959b00
TJ
4074 * @buf: buffer
4075 * @count: buffer size
1da177e4
LT
4076 *
4077 * This function will update the firmware on the adapter.
4078 *
4079 * Return value:
4080 * count on success / other on failure
4081 **/
ee959b00
TJ
4082static ssize_t ipr_store_update_fw(struct device *dev,
4083 struct device_attribute *attr,
4084 const char *buf, size_t count)
1da177e4 4085{
ee959b00 4086 struct Scsi_Host *shost = class_to_shost(dev);
1da177e4
LT
4087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4088 struct ipr_ucode_image_header *image_hdr;
4089 const struct firmware *fw_entry;
4090 struct ipr_sglist *sglist;
1da177e4
LT
4091 char fname[100];
4092 char *src;
21b81716 4093 char *endline;
d63c7dd5 4094 int result, dnld_size;
1da177e4
LT
4095
4096 if (!capable(CAP_SYS_ADMIN))
4097 return -EACCES;
4098
d63c7dd5 4099 snprintf(fname, sizeof(fname), "%s", buf);
1da177e4 4100
21b81716
GKB
4101 endline = strchr(fname, '\n');
4102 if (endline)
4103 *endline = '\0';
4104
203fa3fe 4105 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
1da177e4
LT
4106 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4107 return -EIO;
4108 }
4109
4110 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4111
1da177e4
LT
4112 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4113 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4114 sglist = ipr_alloc_ucode_buffer(dnld_size);
4115
4116 if (!sglist) {
4117 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4118 release_firmware(fw_entry);
4119 return -ENOMEM;
4120 }
4121
4122 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4123
4124 if (result) {
4125 dev_err(&ioa_cfg->pdev->dev,
4126 "Microcode buffer copy to DMA buffer failed\n");
12baa420 4127 goto out;
1da177e4
LT
4128 }
4129
14ed9cc7
WB
4130 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4131
12baa420 4132 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
1da177e4 4133
12baa420 4134 if (!result)
4135 result = count;
4136out:
1da177e4
LT
4137 ipr_free_ucode_buffer(sglist);
4138 release_firmware(fw_entry);
12baa420 4139 return result;
1da177e4
LT
4140}
4141
ee959b00 4142static struct device_attribute ipr_update_fw_attr = {
1da177e4
LT
4143 .attr = {
4144 .name = "update_fw",
4145 .mode = S_IWUSR,
4146 },
4147 .store = ipr_store_update_fw
4148};
4149
75576bb9
WB
4150/**
4151 * ipr_show_fw_type - Show the adapter's firmware type.
4152 * @dev: class device struct
a96099e2 4153 * @attr: device attribute (unused)
75576bb9
WB
4154 * @buf: buffer
4155 *
4156 * Return value:
4157 * number of bytes printed to buffer
4158 **/
4159static ssize_t ipr_show_fw_type(struct device *dev,
4160 struct device_attribute *attr, char *buf)
4161{
4162 struct Scsi_Host *shost = class_to_shost(dev);
4163 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4164 unsigned long lock_flags = 0;
4165 int len;
4166
4167 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4168 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4169 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4170 return len;
4171}
4172
4173static struct device_attribute ipr_ioa_fw_type_attr = {
4174 .attr = {
4175 .name = "fw_type",
4176 .mode = S_IRUGO,
4177 },
4178 .show = ipr_show_fw_type
4179};
4180
afc3f83c
BK
4181static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4182 struct bin_attribute *bin_attr, char *buf,
4183 loff_t off, size_t count)
4184{
aabd5fea 4185 struct device *cdev = kobj_to_dev(kobj);
afc3f83c
BK
4186 struct Scsi_Host *shost = class_to_shost(cdev);
4187 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4188 struct ipr_hostrcb *hostrcb;
4189 unsigned long lock_flags = 0;
4190 int ret;
4191
4192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4193 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4194 struct ipr_hostrcb, queue);
4195 if (!hostrcb) {
4196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4197 return 0;
4198 }
4199 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4200 sizeof(hostrcb->hcam));
4201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4202 return ret;
4203}
4204
4205static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4206 struct bin_attribute *bin_attr, char *buf,
4207 loff_t off, size_t count)
4208{
aabd5fea 4209 struct device *cdev = kobj_to_dev(kobj);
afc3f83c
BK
4210 struct Scsi_Host *shost = class_to_shost(cdev);
4211 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4212 struct ipr_hostrcb *hostrcb;
4213 unsigned long lock_flags = 0;
4214
4215 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4216 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4217 struct ipr_hostrcb, queue);
4218 if (!hostrcb) {
4219 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4220 return count;
4221 }
4222
4223 /* Reclaim hostrcb before exit */
4224 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4225 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4226 return count;
4227}
4228
4229static struct bin_attribute ipr_ioa_async_err_log = {
4230 .attr = {
4231 .name = "async_err_log",
4232 .mode = S_IRUGO | S_IWUSR,
4233 },
4234 .size = 0,
4235 .read = ipr_read_async_err_log,
4236 .write = ipr_next_async_err_log
4237};
4238
47d1e6ae
BVA
4239static struct attribute *ipr_ioa_attrs[] = {
4240 &ipr_fw_version_attr.attr,
4241 &ipr_log_level_attr.attr,
4242 &ipr_diagnostics_attr.attr,
4243 &ipr_ioa_state_attr.attr,
4244 &ipr_ioa_reset_attr.attr,
4245 &ipr_update_fw_attr.attr,
4246 &ipr_ioa_fw_type_attr.attr,
4247 &ipr_iopoll_weight_attr.attr,
1da177e4
LT
4248 NULL,
4249};
4250
47d1e6ae
BVA
4251ATTRIBUTE_GROUPS(ipr_ioa);
4252
1da177e4
LT
4253#ifdef CONFIG_SCSI_IPR_DUMP
4254/**
4255 * ipr_read_dump - Dump the adapter
2c3c8bea 4256 * @filp: open sysfs file
1da177e4 4257 * @kobj: kobject struct
91a69029 4258 * @bin_attr: bin_attribute struct
1da177e4
LT
4259 * @buf: buffer
4260 * @off: offset
4261 * @count: buffer size
4262 *
4263 * Return value:
4264 * number of bytes printed to buffer
4265 **/
2c3c8bea 4266static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4267 struct bin_attribute *bin_attr,
4268 char *buf, loff_t off, size_t count)
1da177e4 4269{
aabd5fea 4270 struct device *cdev = kobj_to_dev(kobj);
1da177e4
LT
4271 struct Scsi_Host *shost = class_to_shost(cdev);
4272 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4273 struct ipr_dump *dump;
4274 unsigned long lock_flags = 0;
4275 char *src;
4d4dd706 4276 int len, sdt_end;
1da177e4
LT
4277 size_t rc = count;
4278
4279 if (!capable(CAP_SYS_ADMIN))
4280 return -EACCES;
4281
4282 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4283 dump = ioa_cfg->dump;
4284
4285 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4287 return 0;
4288 }
4289 kref_get(&dump->kref);
4290 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4291
4292 if (off > dump->driver_dump.hdr.len) {
4293 kref_put(&dump->kref, ipr_release_dump);
4294 return 0;
4295 }
4296
4297 if (off + count > dump->driver_dump.hdr.len) {
4298 count = dump->driver_dump.hdr.len - off;
4299 rc = count;
4300 }
4301
4302 if (count && off < sizeof(dump->driver_dump)) {
4303 if (off + count > sizeof(dump->driver_dump))
4304 len = sizeof(dump->driver_dump) - off;
4305 else
4306 len = count;
4307 src = (u8 *)&dump->driver_dump + off;
4308 memcpy(buf, src, len);
4309 buf += len;
4310 off += len;
4311 count -= len;
4312 }
4313
4314 off -= sizeof(dump->driver_dump);
4315
4d4dd706
KSS
4316 if (ioa_cfg->sis64)
4317 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4318 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4319 sizeof(struct ipr_sdt_entry));
4320 else
4321 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4322 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4323
4324 if (count && off < sdt_end) {
4325 if (off + count > sdt_end)
4326 len = sdt_end - off;
1da177e4
LT
4327 else
4328 len = count;
4329 src = (u8 *)&dump->ioa_dump + off;
4330 memcpy(buf, src, len);
4331 buf += len;
4332 off += len;
4333 count -= len;
4334 }
4335
4d4dd706 4336 off -= sdt_end;
1da177e4
LT
4337
4338 while (count) {
4339 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4340 len = PAGE_ALIGN(off) - off;
4341 else
4342 len = count;
4343 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4344 src += off & ~PAGE_MASK;
4345 memcpy(buf, src, len);
4346 buf += len;
4347 off += len;
4348 count -= len;
4349 }
4350
4351 kref_put(&dump->kref, ipr_release_dump);
4352 return rc;
4353}
4354
4355/**
4356 * ipr_alloc_dump - Prepare for adapter dump
4357 * @ioa_cfg: ioa config struct
4358 *
4359 * Return value:
4360 * 0 on success / other on failure
4361 **/
4362static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4363{
4364 struct ipr_dump *dump;
4d4dd706 4365 __be32 **ioa_data;
1da177e4
LT
4366 unsigned long lock_flags = 0;
4367
0bc42e35 4368 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
1da177e4
LT
4369
4370 if (!dump) {
4371 ipr_err("Dump memory allocation failed\n");
4372 return -ENOMEM;
4373 }
4374
4d4dd706 4375 if (ioa_cfg->sis64)
42bc47b3
KC
4376 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4377 sizeof(__be32 *)));
4d4dd706 4378 else
42bc47b3
KC
4379 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4380 sizeof(__be32 *)));
4d4dd706
KSS
4381
4382 if (!ioa_data) {
4383 ipr_err("Dump memory allocation failed\n");
4384 kfree(dump);
4385 return -ENOMEM;
4386 }
4387
4388 dump->ioa_dump.ioa_data = ioa_data;
4389
1da177e4
LT
4390 kref_init(&dump->kref);
4391 dump->ioa_cfg = ioa_cfg;
4392
4393 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4394
4395 if (INACTIVE != ioa_cfg->sdt_state) {
4396 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4d4dd706 4397 vfree(dump->ioa_dump.ioa_data);
1da177e4
LT
4398 kfree(dump);
4399 return 0;
4400 }
4401
4402 ioa_cfg->dump = dump;
4403 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
56d6aa33 4404 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
1da177e4
LT
4405 ioa_cfg->dump_taken = 1;
4406 schedule_work(&ioa_cfg->work_q);
4407 }
4408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4409
1da177e4
LT
4410 return 0;
4411}
4412
4413/**
4414 * ipr_free_dump - Free adapter dump memory
4415 * @ioa_cfg: ioa config struct
4416 *
4417 * Return value:
4418 * 0 on success / other on failure
4419 **/
4420static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4421{
4422 struct ipr_dump *dump;
4423 unsigned long lock_flags = 0;
4424
4425 ENTER;
4426
4427 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4428 dump = ioa_cfg->dump;
4429 if (!dump) {
4430 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4431 return 0;
4432 }
4433
4434 ioa_cfg->dump = NULL;
4435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4436
4437 kref_put(&dump->kref, ipr_release_dump);
4438
4439 LEAVE;
4440 return 0;
4441}
4442
4443/**
4444 * ipr_write_dump - Setup dump state of adapter
2c3c8bea 4445 * @filp: open sysfs file
1da177e4 4446 * @kobj: kobject struct
91a69029 4447 * @bin_attr: bin_attribute struct
1da177e4
LT
4448 * @buf: buffer
4449 * @off: offset
4450 * @count: buffer size
4451 *
4452 * Return value:
4453 * number of bytes printed to buffer
4454 **/
2c3c8bea 4455static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
91a69029
ZR
4456 struct bin_attribute *bin_attr,
4457 char *buf, loff_t off, size_t count)
1da177e4 4458{
aabd5fea 4459 struct device *cdev = kobj_to_dev(kobj);
1da177e4
LT
4460 struct Scsi_Host *shost = class_to_shost(cdev);
4461 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4462 int rc;
4463
4464 if (!capable(CAP_SYS_ADMIN))
4465 return -EACCES;
4466
4467 if (buf[0] == '1')
4468 rc = ipr_alloc_dump(ioa_cfg);
4469 else if (buf[0] == '0')
4470 rc = ipr_free_dump(ioa_cfg);
4471 else
4472 return -EINVAL;
4473
4474 if (rc)
4475 return rc;
4476 else
4477 return count;
4478}
4479
4480static struct bin_attribute ipr_dump_attr = {
4481 .attr = {
4482 .name = "dump",
4483 .mode = S_IRUSR | S_IWUSR,
4484 },
4485 .size = 0,
4486 .read = ipr_read_dump,
4487 .write = ipr_write_dump
4488};
4489#else
4490static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4491#endif
4492
4493/**
4494 * ipr_change_queue_depth - Change the device's queue depth
4495 * @sdev: scsi device struct
4496 * @qdepth: depth to set
4497 *
4498 * Return value:
4499 * actual depth set
4500 **/
db5ed4df 4501static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
1da177e4 4502{
35a39691
BK
4503 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4504 struct ipr_resource_entry *res;
4505 unsigned long lock_flags = 0;
4506
4507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4508 res = (struct ipr_resource_entry *)sdev->hostdata;
4509
4510 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4511 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4513
db5ed4df 4514 scsi_change_queue_depth(sdev, qdepth);
1da177e4
LT
4515 return sdev->queue_depth;
4516}
4517
1da177e4
LT
4518/**
4519 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4520 * @dev: device struct
46d74563 4521 * @attr: device attribute structure
1da177e4
LT
4522 * @buf: buffer
4523 *
4524 * Return value:
4525 * number of bytes printed to buffer
4526 **/
10523b3b 4527static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
4528{
4529 struct scsi_device *sdev = to_scsi_device(dev);
4530 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4531 struct ipr_resource_entry *res;
4532 unsigned long lock_flags = 0;
4533 ssize_t len = -ENXIO;
4534
4535 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4536 res = (struct ipr_resource_entry *)sdev->hostdata;
4537 if (res)
3e7ebdfa 4538 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
1da177e4
LT
4539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4540 return len;
4541}
4542
4543static struct device_attribute ipr_adapter_handle_attr = {
4544 .attr = {
4545 .name = "adapter_handle",
4546 .mode = S_IRUSR,
4547 },
4548 .show = ipr_show_adapter_handle
4549};
4550
3e7ebdfa 4551/**
5adcbeb3
WB
4552 * ipr_show_resource_path - Show the resource path or the resource address for
4553 * this device.
3e7ebdfa 4554 * @dev: device struct
46d74563 4555 * @attr: device attribute structure
3e7ebdfa
WB
4556 * @buf: buffer
4557 *
4558 * Return value:
4559 * number of bytes printed to buffer
4560 **/
4561static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4562{
4563 struct scsi_device *sdev = to_scsi_device(dev);
4564 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4565 struct ipr_resource_entry *res;
4566 unsigned long lock_flags = 0;
4567 ssize_t len = -ENXIO;
4568 char buffer[IPR_MAX_RES_PATH_LENGTH];
4569
4570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4571 res = (struct ipr_resource_entry *)sdev->hostdata;
5adcbeb3 4572 if (res && ioa_cfg->sis64)
3e7ebdfa 4573 len = snprintf(buf, PAGE_SIZE, "%s\n",
b3b3b407
BK
4574 __ipr_format_res_path(res->res_path, buffer,
4575 sizeof(buffer)));
5adcbeb3
WB
4576 else if (res)
4577 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4578 res->bus, res->target, res->lun);
4579
3e7ebdfa
WB
4580 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4581 return len;
4582}
4583
4584static struct device_attribute ipr_resource_path_attr = {
4585 .attr = {
4586 .name = "resource_path",
75576bb9 4587 .mode = S_IRUGO,
3e7ebdfa
WB
4588 },
4589 .show = ipr_show_resource_path
4590};
4591
46d74563
WB
4592/**
4593 * ipr_show_device_id - Show the device_id for this device.
4594 * @dev: device struct
4595 * @attr: device attribute structure
4596 * @buf: buffer
4597 *
4598 * Return value:
4599 * number of bytes printed to buffer
4600 **/
4601static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4602{
4603 struct scsi_device *sdev = to_scsi_device(dev);
4604 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4605 struct ipr_resource_entry *res;
4606 unsigned long lock_flags = 0;
4607 ssize_t len = -ENXIO;
4608
4609 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4610 res = (struct ipr_resource_entry *)sdev->hostdata;
4611 if (res && ioa_cfg->sis64)
bb8647e8 4612 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
46d74563
WB
4613 else if (res)
4614 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4615
4616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4617 return len;
4618}
4619
4620static struct device_attribute ipr_device_id_attr = {
4621 .attr = {
4622 .name = "device_id",
4623 .mode = S_IRUGO,
4624 },
4625 .show = ipr_show_device_id
4626};
4627
75576bb9
WB
4628/**
4629 * ipr_show_resource_type - Show the resource type for this device.
4630 * @dev: device struct
46d74563 4631 * @attr: device attribute structure
75576bb9
WB
4632 * @buf: buffer
4633 *
4634 * Return value:
4635 * number of bytes printed to buffer
4636 **/
4637static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4638{
4639 struct scsi_device *sdev = to_scsi_device(dev);
4640 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4641 struct ipr_resource_entry *res;
4642 unsigned long lock_flags = 0;
4643 ssize_t len = -ENXIO;
4644
4645 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4646 res = (struct ipr_resource_entry *)sdev->hostdata;
4647
4648 if (res)
4649 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4650
4651 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4652 return len;
4653}
4654
4655static struct device_attribute ipr_resource_type_attr = {
4656 .attr = {
4657 .name = "resource_type",
4658 .mode = S_IRUGO,
4659 },
4660 .show = ipr_show_resource_type
4661};
4662
f8ee25d7
WX
4663/**
4664 * ipr_show_raw_mode - Show the adapter's raw mode
4665 * @dev: class device struct
a96099e2 4666 * @attr: device attribute (unused)
f8ee25d7
WX
4667 * @buf: buffer
4668 *
4669 * Return value:
4670 * number of bytes printed to buffer
4671 **/
4672static ssize_t ipr_show_raw_mode(struct device *dev,
4673 struct device_attribute *attr, char *buf)
4674{
4675 struct scsi_device *sdev = to_scsi_device(dev);
4676 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4677 struct ipr_resource_entry *res;
4678 unsigned long lock_flags = 0;
4679 ssize_t len;
4680
4681 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4682 res = (struct ipr_resource_entry *)sdev->hostdata;
4683 if (res)
4684 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4685 else
4686 len = -ENXIO;
4687 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4688 return len;
4689}
4690
4691/**
4692 * ipr_store_raw_mode - Change the adapter's raw mode
4693 * @dev: class device struct
a96099e2 4694 * @attr: device attribute (unused)
f8ee25d7 4695 * @buf: buffer
a96099e2 4696 * @count: buffer size
f8ee25d7
WX
4697 *
4698 * Return value:
4699 * number of bytes printed to buffer
4700 **/
4701static ssize_t ipr_store_raw_mode(struct device *dev,
4702 struct device_attribute *attr,
4703 const char *buf, size_t count)
4704{
4705 struct scsi_device *sdev = to_scsi_device(dev);
4706 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4707 struct ipr_resource_entry *res;
4708 unsigned long lock_flags = 0;
4709 ssize_t len;
4710
4711 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4712 res = (struct ipr_resource_entry *)sdev->hostdata;
4713 if (res) {
e35d7f27 4714 if (ipr_is_af_dasd_device(res)) {
f8ee25d7
WX
4715 res->raw_mode = simple_strtoul(buf, NULL, 10);
4716 len = strlen(buf);
4717 if (res->sdev)
4718 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4719 res->raw_mode ? "enabled" : "disabled");
4720 } else
4721 len = -EINVAL;
4722 } else
4723 len = -ENXIO;
4724 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4725 return len;
4726}
4727
4728static struct device_attribute ipr_raw_mode_attr = {
4729 .attr = {
4730 .name = "raw_mode",
4731 .mode = S_IRUGO | S_IWUSR,
4732 },
4733 .show = ipr_show_raw_mode,
4734 .store = ipr_store_raw_mode
4735};
4736
47d1e6ae
BVA
4737static struct attribute *ipr_dev_attrs[] = {
4738 &ipr_adapter_handle_attr.attr,
4739 &ipr_resource_path_attr.attr,
4740 &ipr_device_id_attr.attr,
4741 &ipr_resource_type_attr.attr,
4742 &ipr_raw_mode_attr.attr,
1da177e4
LT
4743 NULL,
4744};
4745
47d1e6ae
BVA
4746ATTRIBUTE_GROUPS(ipr_dev);
4747
1da177e4
LT
4748/**
4749 * ipr_biosparam - Return the HSC mapping
4750 * @sdev: scsi device struct
4751 * @block_device: block device pointer
4752 * @capacity: capacity of the device
4753 * @parm: Array containing returned HSC values.
4754 *
4755 * This function generates the HSC parms that fdisk uses.
4756 * We want to make sure we return something that places partitions
4757 * on 4k boundaries for best performance with the IOA.
4758 *
4759 * Return value:
4760 * 0 on success
4761 **/
4762static int ipr_biosparam(struct scsi_device *sdev,
4763 struct block_device *block_device,
4764 sector_t capacity, int *parm)
4765{
4766 int heads, sectors;
4767 sector_t cylinders;
4768
4769 heads = 128;
4770 sectors = 32;
4771
4772 cylinders = capacity;
4773 sector_div(cylinders, (128 * 32));
4774
4775 /* return result */
4776 parm[0] = heads;
4777 parm[1] = sectors;
4778 parm[2] = cylinders;
4779
4780 return 0;
4781}
4782
35a39691
BK
4783/**
4784 * ipr_find_starget - Find target based on bus/target.
4785 * @starget: scsi target struct
4786 *
4787 * Return value:
4788 * resource entry pointer if found / NULL if not found
4789 **/
4790static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4791{
4792 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4793 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4794 struct ipr_resource_entry *res;
4795
4796 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa 4797 if ((res->bus == starget->channel) &&
0ee1d714 4798 (res->target == starget->id)) {
35a39691
BK
4799 return res;
4800 }
4801 }
4802
4803 return NULL;
4804}
4805
4806static struct ata_port_info sata_port_info;
4807
4808/**
4809 * ipr_target_alloc - Prepare for commands to a SCSI target
4810 * @starget: scsi target struct
4811 *
4812 * If the device is a SATA device, this function allocates an
4813 * ATA port with libata, else it does nothing.
4814 *
4815 * Return value:
4816 * 0 on success / non-0 on failure
4817 **/
4818static int ipr_target_alloc(struct scsi_target *starget)
4819{
4820 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4821 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4822 struct ipr_sata_port *sata_port;
4823 struct ata_port *ap;
4824 struct ipr_resource_entry *res;
4825 unsigned long lock_flags;
4826
4827 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4828 res = ipr_find_starget(starget);
4829 starget->hostdata = NULL;
4830
4831 if (res && ipr_is_gata(res)) {
4832 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4833 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4834 if (!sata_port)
4835 return -ENOMEM;
4836
4837 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4838 if (ap) {
4839 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4840 sata_port->ioa_cfg = ioa_cfg;
4841 sata_port->ap = ap;
4842 sata_port->res = res;
4843
4844 res->sata_port = sata_port;
4845 ap->private_data = sata_port;
4846 starget->hostdata = sata_port;
4847 } else {
4848 kfree(sata_port);
4849 return -ENOMEM;
4850 }
4851 }
4852 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4853
4854 return 0;
4855}
4856
4857/**
4858 * ipr_target_destroy - Destroy a SCSI target
4859 * @starget: scsi target struct
4860 *
4861 * If the device was a SATA device, this function frees the libata
4862 * ATA port, else it does nothing.
4863 *
4864 **/
4865static void ipr_target_destroy(struct scsi_target *starget)
4866{
4867 struct ipr_sata_port *sata_port = starget->hostdata;
3e7ebdfa
WB
4868 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4869 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4870
4871 if (ioa_cfg->sis64) {
0ee1d714
BK
4872 if (!ipr_find_starget(starget)) {
4873 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4874 clear_bit(starget->id, ioa_cfg->array_ids);
4875 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4876 clear_bit(starget->id, ioa_cfg->vset_ids);
4877 else if (starget->channel == 0)
4878 clear_bit(starget->id, ioa_cfg->target_ids);
4879 }
3e7ebdfa 4880 }
35a39691
BK
4881
4882 if (sata_port) {
4883 starget->hostdata = NULL;
4884 ata_sas_port_destroy(sata_port->ap);
4885 kfree(sata_port);
4886 }
4887}
4888
4889/**
4890 * ipr_find_sdev - Find device based on bus/target/lun.
4891 * @sdev: scsi device struct
4892 *
4893 * Return value:
4894 * resource entry pointer if found / NULL if not found
4895 **/
4896static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4897{
4898 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4899 struct ipr_resource_entry *res;
4900
4901 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3e7ebdfa
WB
4902 if ((res->bus == sdev->channel) &&
4903 (res->target == sdev->id) &&
4904 (res->lun == sdev->lun))
35a39691
BK
4905 return res;
4906 }
4907
4908 return NULL;
4909}
4910
1da177e4
LT
4911/**
4912 * ipr_slave_destroy - Unconfigure a SCSI device
4913 * @sdev: scsi device struct
4914 *
4915 * Return value:
4916 * nothing
4917 **/
4918static void ipr_slave_destroy(struct scsi_device *sdev)
4919{
4920 struct ipr_resource_entry *res;
4921 struct ipr_ioa_cfg *ioa_cfg;
4922 unsigned long lock_flags = 0;
4923
4924 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4925
4926 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4927 res = (struct ipr_resource_entry *) sdev->hostdata;
4928 if (res) {
35a39691 4929 if (res->sata_port)
3e4ec344 4930 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
1da177e4
LT
4931 sdev->hostdata = NULL;
4932 res->sdev = NULL;
35a39691 4933 res->sata_port = NULL;
1da177e4
LT
4934 }
4935 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4936}
4937
4938/**
4939 * ipr_slave_configure - Configure a SCSI device
4940 * @sdev: scsi device struct
4941 *
4942 * This function configures the specified scsi device.
4943 *
4944 * Return value:
4945 * 0 on success
4946 **/
4947static int ipr_slave_configure(struct scsi_device *sdev)
4948{
4949 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4950 struct ipr_resource_entry *res;
dd406ef8 4951 struct ata_port *ap = NULL;
1da177e4 4952 unsigned long lock_flags = 0;
3e7ebdfa 4953 char buffer[IPR_MAX_RES_PATH_LENGTH];
1da177e4
LT
4954
4955 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4956 res = sdev->hostdata;
4957 if (res) {
4958 if (ipr_is_af_dasd_device(res))
4959 sdev->type = TYPE_RAID;
0726ce26 4960 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
1da177e4 4961 sdev->scsi_level = 4;
0726ce26 4962 sdev->no_uld_attach = 1;
4963 }
1da177e4 4964 if (ipr_is_vset_device(res)) {
60654e25 4965 sdev->scsi_level = SCSI_SPC_3;
723cd772 4966 sdev->no_report_opcodes = 1;
242f9dcb
JA
4967 blk_queue_rq_timeout(sdev->request_queue,
4968 IPR_VSET_RW_TIMEOUT);
086fa5ff 4969 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
1da177e4 4970 }
dd406ef8
BK
4971 if (ipr_is_gata(res) && res->sata_port)
4972 ap = res->sata_port->ap;
4973 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4974
4975 if (ap) {
db5ed4df 4976 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
dd406ef8 4977 ata_sas_slave_configure(sdev, ap);
c8b09f6f
CH
4978 }
4979
3e7ebdfa
WB
4980 if (ioa_cfg->sis64)
4981 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
b3b3b407
BK
4982 ipr_format_res_path(ioa_cfg,
4983 res->res_path, buffer, sizeof(buffer)));
dd406ef8 4984 return 0;
1da177e4
LT
4985 }
4986 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4987 return 0;
4988}
4989
35a39691
BK
4990/**
4991 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4992 * @sdev: scsi device struct
4993 *
4994 * This function initializes an ATA port so that future commands
4995 * sent through queuecommand will work.
4996 *
4997 * Return value:
4998 * 0 on success
4999 **/
5000static int ipr_ata_slave_alloc(struct scsi_device *sdev)
5001{
5002 struct ipr_sata_port *sata_port = NULL;
5003 int rc = -ENXIO;
5004
5005 ENTER;
5006 if (sdev->sdev_target)
5007 sata_port = sdev->sdev_target->hostdata;
b2024459 5008 if (sata_port) {
35a39691 5009 rc = ata_sas_port_init(sata_port->ap);
b2024459
DW
5010 if (rc == 0)
5011 rc = ata_sas_sync_probe(sata_port->ap);
5012 }
5013
35a39691
BK
5014 if (rc)
5015 ipr_slave_destroy(sdev);
5016
5017 LEAVE;
5018 return rc;
5019}
5020
1da177e4
LT
5021/**
5022 * ipr_slave_alloc - Prepare for commands to a device.
5023 * @sdev: scsi device struct
5024 *
5025 * This function saves a pointer to the resource entry
5026 * in the scsi device struct if the device exists. We
5027 * can then use this pointer in ipr_queuecommand when
5028 * handling new commands.
5029 *
5030 * Return value:
692aebfc 5031 * 0 on success / -ENXIO if device does not exist
1da177e4
LT
5032 **/
5033static int ipr_slave_alloc(struct scsi_device *sdev)
5034{
5035 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5036 struct ipr_resource_entry *res;
5037 unsigned long lock_flags;
692aebfc 5038 int rc = -ENXIO;
1da177e4
LT
5039
5040 sdev->hostdata = NULL;
5041
5042 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5043
35a39691
BK
5044 res = ipr_find_sdev(sdev);
5045 if (res) {
5046 res->sdev = sdev;
5047 res->add_to_ml = 0;
5048 res->in_erp = 0;
5049 sdev->hostdata = res;
5050 if (!ipr_is_naca_model(res))
5051 res->needs_sync_complete = 1;
5052 rc = 0;
5053 if (ipr_is_gata(res)) {
5054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5055 return ipr_ata_slave_alloc(sdev);
1da177e4
LT
5056 }
5057 }
5058
5059 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5060
692aebfc 5061 return rc;
1da177e4
LT
5062}
5063
6cdb0817
BK
5064/**
5065 * ipr_match_lun - Match function for specified LUN
5066 * @ipr_cmd: ipr command struct
5067 * @device: device to match (sdev)
5068 *
5069 * Returns:
5070 * 1 if command matches sdev / 0 if command does not match sdev
5071 **/
5072static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5073{
5074 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5075 return 1;
5076 return 0;
5077}
5078
439ae285
BK
5079/**
5080 * ipr_cmnd_is_free - Check if a command is free or not
a96099e2 5081 * @ipr_cmd: ipr command struct
439ae285
BK
5082 *
5083 * Returns:
5084 * true / false
5085 **/
5086static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5087{
5088 struct ipr_cmnd *loop_cmd;
5089
5090 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5091 if (loop_cmd == ipr_cmd)
5092 return true;
5093 }
5094
5095 return false;
5096}
5097
ef97d8ae
BK
5098/**
5099 * ipr_match_res - Match function for specified resource entry
5100 * @ipr_cmd: ipr command struct
5101 * @resource: resource entry to match
5102 *
5103 * Returns:
5104 * 1 if command matches sdev / 0 if command does not match sdev
5105 **/
5106static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5107{
5108 struct ipr_resource_entry *res = resource;
5109
5110 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5111 return 1;
5112 return 0;
5113}
5114
6cdb0817
BK
5115/**
5116 * ipr_wait_for_ops - Wait for matching commands to complete
a96099e2 5117 * @ioa_cfg: ioa config struct
6cdb0817
BK
5118 * @device: device to match (sdev)
5119 * @match: match function to use
5120 *
5121 * Returns:
5122 * SUCCESS / FAILED
5123 **/
5124static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5125 int (*match)(struct ipr_cmnd *, void *))
5126{
5127 struct ipr_cmnd *ipr_cmd;
439ae285 5128 int wait, i;
6cdb0817
BK
5129 unsigned long flags;
5130 struct ipr_hrr_queue *hrrq;
5131 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5132 DECLARE_COMPLETION_ONSTACK(comp);
5133
5134 ENTER;
5135 do {
5136 wait = 0;
5137
5138 for_each_hrrq(hrrq, ioa_cfg) {
5139 spin_lock_irqsave(hrrq->lock, flags);
439ae285
BK
5140 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5141 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5142 if (!ipr_cmnd_is_free(ipr_cmd)) {
5143 if (match(ipr_cmd, device)) {
5144 ipr_cmd->eh_comp = &comp;
5145 wait++;
5146 }
6cdb0817
BK
5147 }
5148 }
5149 spin_unlock_irqrestore(hrrq->lock, flags);
5150 }
5151
5152 if (wait) {
5153 timeout = wait_for_completion_timeout(&comp, timeout);
5154
5155 if (!timeout) {
5156 wait = 0;
5157
5158 for_each_hrrq(hrrq, ioa_cfg) {
5159 spin_lock_irqsave(hrrq->lock, flags);
439ae285
BK
5160 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5161 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5162 if (!ipr_cmnd_is_free(ipr_cmd)) {
5163 if (match(ipr_cmd, device)) {
5164 ipr_cmd->eh_comp = NULL;
5165 wait++;
5166 }
6cdb0817
BK
5167 }
5168 }
5169 spin_unlock_irqrestore(hrrq->lock, flags);
5170 }
5171
5172 if (wait)
5173 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5174 LEAVE;
5175 return wait ? FAILED : SUCCESS;
5176 }
5177 }
5178 } while (wait);
5179
5180 LEAVE;
5181 return SUCCESS;
5182}
5183
70233ac5 5184static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
1da177e4
LT
5185{
5186 struct ipr_ioa_cfg *ioa_cfg;
70233ac5 5187 unsigned long lock_flags = 0;
5188 int rc = SUCCESS;
1da177e4
LT
5189
5190 ENTER;
70233ac5 5191 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5192 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 5193
96b04db9 5194 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
70233ac5 5195 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
a92fa25c
KSS
5196 dev_err(&ioa_cfg->pdev->dev,
5197 "Adapter being reset as a result of error recovery.\n");
1da177e4 5198
a92fa25c
KSS
5199 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5200 ioa_cfg->sdt_state = GET_DUMP;
5201 }
1da177e4 5202
70233ac5 5203 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5204 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5205 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
df0ae249 5206
70233ac5 5207 /* If we got hit with a host reset while we were already resetting
5208 the adapter for some reason, and the reset failed. */
5209 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5210 ipr_trace;
5211 rc = FAILED;
5212 }
df0ae249 5213
70233ac5 5214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5215 LEAVE;
df0ae249
JG
5216 return rc;
5217}
5218
c6513096
BK
5219/**
5220 * ipr_device_reset - Reset the device
5221 * @ioa_cfg: ioa config struct
5222 * @res: resource entry struct
5223 *
5224 * This function issues a device reset to the affected device.
5225 * If the device is a SCSI device, a LUN reset will be sent
5226 * to the device first. If that does not work, a target reset
35a39691
BK
5227 * will be sent. If the device is a SATA device, a PHY reset will
5228 * be sent.
c6513096
BK
5229 *
5230 * Return value:
5231 * 0 on success / non-zero on failure
5232 **/
5233static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5234 struct ipr_resource_entry *res)
5235{
5236 struct ipr_cmnd *ipr_cmd;
5237 struct ipr_ioarcb *ioarcb;
5238 struct ipr_cmd_pkt *cmd_pkt;
35a39691 5239 struct ipr_ioarcb_ata_regs *regs;
c6513096
BK
5240 u32 ioasc;
5241
5242 ENTER;
5243 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5244 ioarcb = &ipr_cmd->ioarcb;
5245 cmd_pkt = &ioarcb->cmd_pkt;
a32c055f
WB
5246
5247 if (ipr_cmd->ioa_cfg->sis64) {
5248 regs = &ipr_cmd->i.ata_ioadl.regs;
5249 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5250 } else
5251 regs = &ioarcb->u.add_data.u.regs;
c6513096 5252
3e7ebdfa 5253 ioarcb->res_handle = res->res_handle;
c6513096
BK
5254 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5255 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
35a39691
BK
5256 if (ipr_is_gata(res)) {
5257 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
a32c055f 5258 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
35a39691
BK
5259 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5260 }
c6513096
BK
5261
5262 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
96d21f00 5263 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
05a6538a 5264 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
96d21f00
WB
5265 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5266 if (ipr_cmd->ioa_cfg->sis64)
5267 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5268 sizeof(struct ipr_ioasa_gata));
5269 else
5270 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5271 sizeof(struct ipr_ioasa_gata));
5272 }
c6513096
BK
5273
5274 LEAVE;
203fa3fe 5275 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
c6513096
BK
5276}
5277
35a39691
BK
5278/**
5279 * ipr_sata_reset - Reset the SATA port
cc0680a5 5280 * @link: SATA link to reset
35a39691 5281 * @classes: class of the attached device
a96099e2 5282 * @deadline: unused
35a39691 5283 *
cc0680a5 5284 * This function issues a SATA phy reset to the affected ATA link.
35a39691
BK
5285 *
5286 * Return value:
5287 * 0 on success / non-zero on failure
5288 **/
cc0680a5 5289static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
120bda35 5290 unsigned long deadline)
35a39691 5291{
cc0680a5 5292 struct ipr_sata_port *sata_port = link->ap->private_data;
35a39691
BK
5293 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5294 struct ipr_resource_entry *res;
5295 unsigned long lock_flags = 0;
ef97d8ae 5296 int rc = -ENXIO, ret;
35a39691
BK
5297
5298 ENTER;
5299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
203fa3fe 5300 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
5301 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5302 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5303 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5304 }
5305
35a39691
BK
5306 res = sata_port->res;
5307 if (res) {
5308 rc = ipr_device_reset(ioa_cfg, res);
3e7ebdfa 5309 *classes = res->ata_class;
ef97d8ae
BK
5310 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5311
5312 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5313 if (ret != SUCCESS) {
5314 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5315 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5316 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5317
5318 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5319 }
5320 } else
5321 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
35a39691 5322
35a39691
BK
5323 LEAVE;
5324 return rc;
5325}
5326
1da177e4 5327/**
637b5c3e 5328 * __ipr_eh_dev_reset - Reset the device
1da177e4
LT
5329 * @scsi_cmd: scsi command struct
5330 *
5331 * This function issues a device reset to the affected device.
5332 * A LUN reset will be sent to the device first. If that does
5333 * not work, a target reset will be sent.
5334 *
5335 * Return value:
5336 * SUCCESS / FAILED
5337 **/
203fa3fe 5338static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5339{
5340 struct ipr_cmnd *ipr_cmd;
5341 struct ipr_ioa_cfg *ioa_cfg;
5342 struct ipr_resource_entry *res;
35a39691 5343 struct ata_port *ap;
439ae285 5344 int rc = 0, i;
05a6538a 5345 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5346
5347 ENTER;
5348 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5349 res = scsi_cmd->device->hostdata;
5350
1da177e4
LT
5351 /*
5352 * If we are currently going through reset/reload, return failed. This will force the
5353 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5354 * reset to complete
5355 */
5356 if (ioa_cfg->in_reset_reload)
5357 return FAILED;
56d6aa33 5358 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
5359 return FAILED;
5360
05a6538a 5361 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5362 spin_lock(&hrrq->_lock);
439ae285
BK
5363 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5364 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5365
05a6538a 5366 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
960e9648
BK
5367 if (!ipr_cmd->qc)
5368 continue;
439ae285
BK
5369 if (ipr_cmnd_is_free(ipr_cmd))
5370 continue;
960e9648
BK
5371
5372 ipr_cmd->done = ipr_sata_eh_done;
87629312 5373 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_EH)) {
05a6538a 5374 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
87629312 5375 ipr_cmd->qc->flags |= ATA_QCFLAG_EH;
05a6538a 5376 }
7402ecef 5377 }
1da177e4 5378 }
56d6aa33 5379 spin_unlock(&hrrq->_lock);
1da177e4 5380 }
1da177e4 5381 res->resetting_device = 1;
fb3ed3cb 5382 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
35a39691
BK
5383
5384 if (ipr_is_gata(res) && res->sata_port) {
5385 ap = res->sata_port->ap;
5386 spin_unlock_irq(scsi_cmd->device->host->host_lock);
a1efdaba 5387 ata_std_error_handler(ap);
35a39691
BK
5388 spin_lock_irq(scsi_cmd->device->host->host_lock);
5389 } else
5390 rc = ipr_device_reset(ioa_cfg, res);
1da177e4 5391 res->resetting_device = 0;
0b1f8d44 5392 res->reset_occurred = 1;
1da177e4 5393
1da177e4 5394 LEAVE;
203fa3fe 5395 return rc ? FAILED : SUCCESS;
1da177e4
LT
5396}
5397
203fa3fe 5398static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
94d0e7b8
JG
5399{
5400 int rc;
6cdb0817 5401 struct ipr_ioa_cfg *ioa_cfg;
ef97d8ae 5402 struct ipr_resource_entry *res;
6cdb0817
BK
5403
5404 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
ef97d8ae
BK
5405 res = cmd->device->hostdata;
5406
5407 if (!res)
5408 return FAILED;
94d0e7b8
JG
5409
5410 spin_lock_irq(cmd->device->host->host_lock);
5411 rc = __ipr_eh_dev_reset(cmd);
5412 spin_unlock_irq(cmd->device->host->host_lock);
5413
ef97d8ae
BK
5414 if (rc == SUCCESS) {
5415 if (ipr_is_gata(res) && res->sata_port)
5416 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5417 else
5418 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5419 }
6cdb0817 5420
94d0e7b8
JG
5421 return rc;
5422}
5423
1da177e4
LT
5424/**
5425 * ipr_bus_reset_done - Op done function for bus reset.
5426 * @ipr_cmd: ipr command struct
5427 *
5428 * This function is the op done function for a bus reset
5429 *
5430 * Return value:
5431 * none
5432 **/
5433static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5434{
5435 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5436 struct ipr_resource_entry *res;
5437
5438 ENTER;
3e7ebdfa
WB
5439 if (!ioa_cfg->sis64)
5440 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5441 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5442 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5443 break;
5444 }
1da177e4 5445 }
1da177e4
LT
5446
5447 /*
5448 * If abort has not completed, indicate the reset has, else call the
5449 * abort's done function to wake the sleeping eh thread
5450 */
5451 if (ipr_cmd->sibling->sibling)
5452 ipr_cmd->sibling->sibling = NULL;
5453 else
5454 ipr_cmd->sibling->done(ipr_cmd->sibling);
5455
05a6538a 5456 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
5457 LEAVE;
5458}
5459
5460/**
5461 * ipr_abort_timeout - An abort task has timed out
a96099e2 5462 * @t: Timer context used to fetch ipr command struct
1da177e4
LT
5463 *
5464 * This function handles when an abort task times out. If this
5465 * happens we issue a bus reset since we have resources tied
5466 * up that must be freed before returning to the midlayer.
5467 *
5468 * Return value:
5469 * none
5470 **/
738c6ec5 5471static void ipr_abort_timeout(struct timer_list *t)
1da177e4 5472{
738c6ec5 5473 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
5474 struct ipr_cmnd *reset_cmd;
5475 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5476 struct ipr_cmd_pkt *cmd_pkt;
5477 unsigned long lock_flags = 0;
5478
5479 ENTER;
5480 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5481 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5482 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5483 return;
5484 }
5485
fb3ed3cb 5486 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
1da177e4
LT
5487 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5488 ipr_cmd->sibling = reset_cmd;
5489 reset_cmd->sibling = ipr_cmd;
5490 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5491 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5492 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5493 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5494 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5495
5496 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5498 LEAVE;
5499}
5500
5501/**
5502 * ipr_cancel_op - Cancel specified op
5503 * @scsi_cmd: scsi command struct
5504 *
5505 * This function cancels specified op.
5506 *
5507 * Return value:
5508 * SUCCESS / FAILED
5509 **/
203fa3fe 5510static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
1da177e4
LT
5511{
5512 struct ipr_cmnd *ipr_cmd;
5513 struct ipr_ioa_cfg *ioa_cfg;
5514 struct ipr_resource_entry *res;
5515 struct ipr_cmd_pkt *cmd_pkt;
4dc83399 5516 u32 ioasc;
439ae285 5517 int i, op_found = 0;
05a6538a 5518 struct ipr_hrr_queue *hrrq;
1da177e4
LT
5519
5520 ENTER;
5521 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5522 res = scsi_cmd->device->hostdata;
5523
8fa728a2
JG
5524 /* If we are currently going through reset/reload, return failed.
5525 * This will force the mid-layer to call ipr_eh_host_reset,
5526 * which will then go to sleep and wait for the reset to complete
5527 */
56d6aa33 5528 if (ioa_cfg->in_reset_reload ||
5529 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8fa728a2 5530 return FAILED;
a92fa25c
KSS
5531 if (!res)
5532 return FAILED;
5533
5534 /*
5535 * If we are aborting a timed out op, chances are that the timeout was caused
5536 * by a still not detected EEH error. In such cases, reading a register will
5537 * trigger the EEH recovery infrastructure.
5538 */
4dc83399 5539 readl(ioa_cfg->regs.sense_interrupt_reg);
a92fa25c
KSS
5540
5541 if (!ipr_is_gscsi(res))
1da177e4
LT
5542 return FAILED;
5543
05a6538a 5544 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 5545 spin_lock(&hrrq->_lock);
439ae285
BK
5546 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5547 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5548 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5549 op_found = 1;
5550 break;
5551 }
05a6538a 5552 }
1da177e4 5553 }
56d6aa33 5554 spin_unlock(&hrrq->_lock);
1da177e4
LT
5555 }
5556
5557 if (!op_found)
5558 return SUCCESS;
5559
5560 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3e7ebdfa 5561 ipr_cmd->ioarcb.res_handle = res->res_handle;
1da177e4
LT
5562 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5563 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5564 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5565 ipr_cmd->u.sdev = scsi_cmd->device;
5566
fb3ed3cb
BK
5567 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5568 scsi_cmd->cmnd[0]);
1da177e4 5569 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
96d21f00 5570 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
5571
5572 /*
5573 * If the abort task timed out and we sent a bus reset, we will get
5574 * one the following responses to the abort
5575 */
5576 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5577 ioasc = 0;
5578 ipr_trace;
5579 }
5580
c4ee22a3 5581 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ee0a90fa 5582 if (!ipr_is_naca_model(res))
5583 res->needs_sync_complete = 1;
1da177e4
LT
5584
5585 LEAVE;
203fa3fe 5586 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
1da177e4
LT
5587}
5588
5589/**
637b5c3e 5590 * ipr_scan_finished - Report whether scan is done
a96099e2
LJ
5591 * @shost: scsi host struct
5592 * @elapsed_time: elapsed time
1da177e4
LT
5593 *
5594 * Return value:
f688f96d
BK
5595 * 0 if scan in progress / 1 if scan is complete
5596 **/
5597static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5598{
5599 unsigned long lock_flags;
5600 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5601 int rc = 0;
5602
5603 spin_lock_irqsave(shost->host_lock, lock_flags);
5604 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5605 rc = 1;
5606 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5607 rc = 1;
5608 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5609 return rc;
5610}
5611
5612/**
637b5c3e 5613 * ipr_eh_abort - Reset the host adapter
f688f96d
BK
5614 * @scsi_cmd: scsi command struct
5615 *
5616 * Return value:
1da177e4
LT
5617 * SUCCESS / FAILED
5618 **/
203fa3fe 5619static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
1da177e4 5620{
8fa728a2
JG
5621 unsigned long flags;
5622 int rc;
6cdb0817 5623 struct ipr_ioa_cfg *ioa_cfg;
1da177e4
LT
5624
5625 ENTER;
1da177e4 5626
6cdb0817
BK
5627 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5628
8fa728a2
JG
5629 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5630 rc = ipr_cancel_op(scsi_cmd);
5631 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
1da177e4 5632
6cdb0817
BK
5633 if (rc == SUCCESS)
5634 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
1da177e4 5635 LEAVE;
8fa728a2 5636 return rc;
1da177e4
LT
5637}
5638
5639/**
5640 * ipr_handle_other_interrupt - Handle "other" interrupts
5641 * @ioa_cfg: ioa config struct
634651fa 5642 * @int_reg: interrupt register
1da177e4
LT
5643 *
5644 * Return value:
5645 * IRQ_NONE / IRQ_HANDLED
5646 **/
634651fa 5647static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
630ad831 5648 u32 int_reg)
1da177e4
LT
5649{
5650 irqreturn_t rc = IRQ_HANDLED;
7dacb64f 5651 u32 int_mask_reg;
56d6aa33 5652
7dacb64f
WB
5653 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5654 int_reg &= ~int_mask_reg;
5655
5656 /* If an interrupt on the adapter did not occur, ignore it.
5657 * Or in the case of SIS 64, check for a stage change interrupt.
5658 */
5659 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5660 if (ioa_cfg->sis64) {
5661 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5662 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5663 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5664
5665 /* clear stage change */
5666 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5667 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5668 list_del(&ioa_cfg->reset_cmd->queue);
5669 del_timer(&ioa_cfg->reset_cmd->timer);
5670 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5671 return IRQ_HANDLED;
5672 }
5673 }
5674
5675 return IRQ_NONE;
5676 }
1da177e4
LT
5677
5678 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5679 /* Mask the interrupt */
5680 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
1da177e4
LT
5681 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5682
5683 list_del(&ioa_cfg->reset_cmd->queue);
5684 del_timer(&ioa_cfg->reset_cmd->timer);
5685 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
7dacb64f 5686 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
7dd21308
BK
5687 if (ioa_cfg->clear_isr) {
5688 if (ipr_debug && printk_ratelimit())
5689 dev_err(&ioa_cfg->pdev->dev,
5690 "Spurious interrupt detected. 0x%08X\n", int_reg);
5691 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5692 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5693 return IRQ_NONE;
5694 }
1da177e4
LT
5695 } else {
5696 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5697 ioa_cfg->ioa_unit_checked = 1;
05a6538a 5698 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5699 dev_err(&ioa_cfg->pdev->dev,
5700 "No Host RRQ. 0x%08X\n", int_reg);
1da177e4
LT
5701 else
5702 dev_err(&ioa_cfg->pdev->dev,
5703 "Permanent IOA failure. 0x%08X\n", int_reg);
5704
5705 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5706 ioa_cfg->sdt_state = GET_DUMP;
5707
5708 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5709 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5710 }
56d6aa33 5711
1da177e4
LT
5712 return rc;
5713}
5714
3feeb89d
WB
5715/**
5716 * ipr_isr_eh - Interrupt service routine error handler
5717 * @ioa_cfg: ioa config struct
5718 * @msg: message to log
a96099e2 5719 * @number: various meanings depending on the caller/message
3feeb89d
WB
5720 *
5721 * Return value:
5722 * none
5723 **/
05a6538a 5724static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
3feeb89d
WB
5725{
5726 ioa_cfg->errors_logged++;
05a6538a 5727 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
3feeb89d
WB
5728
5729 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5730 ioa_cfg->sdt_state = GET_DUMP;
5731
5732 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5733}
5734
b53d124a 5735static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
05a6538a 5736 struct list_head *doneq)
5737{
5738 u32 ioasc;
5739 u16 cmd_index;
5740 struct ipr_cmnd *ipr_cmd;
5741 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5742 int num_hrrq = 0;
5743
5744 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5745 if (!hrr_queue->allow_interrupts)
05a6538a 5746 return 0;
5747
5748 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5749 hrr_queue->toggle_bit) {
5750
5751 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5752 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5753 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5754
5755 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5756 cmd_index < hrr_queue->min_cmd_id)) {
5757 ipr_isr_eh(ioa_cfg,
5758 "Invalid response handle from IOA: ",
5759 cmd_index);
5760 break;
5761 }
5762
5763 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5764 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5765
5766 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5767
5768 list_move_tail(&ipr_cmd->queue, doneq);
5769
5770 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5771 hrr_queue->hrrq_curr++;
5772 } else {
5773 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5774 hrr_queue->toggle_bit ^= 1u;
5775 }
5776 num_hrrq++;
b53d124a 5777 if (budget > 0 && num_hrrq >= budget)
5778 break;
05a6538a 5779 }
b53d124a 5780
05a6538a 5781 return num_hrrq;
5782}
b53d124a 5783
511cbce2 5784static int ipr_iopoll(struct irq_poll *iop, int budget)
b53d124a 5785{
b53d124a 5786 struct ipr_hrr_queue *hrrq;
5787 struct ipr_cmnd *ipr_cmd, *temp;
5788 unsigned long hrrq_flags;
5789 int completed_ops;
5790 LIST_HEAD(doneq);
5791
5792 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
b53d124a 5793
5794 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5795 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5796
5797 if (completed_ops < budget)
511cbce2 5798 irq_poll_complete(iop);
b53d124a 5799 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5800
5801 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5802 list_del(&ipr_cmd->queue);
5803 del_timer(&ipr_cmd->timer);
5804 ipr_cmd->fast_done(ipr_cmd);
5805 }
5806
5807 return completed_ops;
5808}
5809
1da177e4
LT
5810/**
5811 * ipr_isr - Interrupt service routine
5812 * @irq: irq number
5813 * @devp: pointer to ioa config struct
1da177e4
LT
5814 *
5815 * Return value:
5816 * IRQ_NONE / IRQ_HANDLED
5817 **/
7d12e780 5818static irqreturn_t ipr_isr(int irq, void *devp)
1da177e4 5819{
05a6538a 5820 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5821 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5822 unsigned long hrrq_flags = 0;
7dacb64f 5823 u32 int_reg = 0;
3feeb89d 5824 int num_hrrq = 0;
7dacb64f 5825 int irq_none = 0;
172cd6e1 5826 struct ipr_cmnd *ipr_cmd, *temp;
1da177e4 5827 irqreturn_t rc = IRQ_NONE;
172cd6e1 5828 LIST_HEAD(doneq);
1da177e4 5829
56d6aa33 5830 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4 5831 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5832 if (!hrrq->allow_interrupts) {
5833 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4
LT
5834 return IRQ_NONE;
5835 }
5836
1da177e4 5837 while (1) {
b53d124a 5838 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5839 rc = IRQ_HANDLED;
1da177e4 5840
b53d124a 5841 if (!ioa_cfg->clear_isr)
5842 break;
7dd21308 5843
1da177e4 5844 /* Clear the PCI interrupt */
a5442ba4 5845 num_hrrq = 0;
3feeb89d 5846 do {
b53d124a 5847 writel(IPR_PCII_HRRQ_UPDATED,
5848 ioa_cfg->regs.clr_interrupt_reg32);
7dacb64f 5849 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
3feeb89d 5850 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
b53d124a 5851 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
3feeb89d 5852
7dacb64f
WB
5853 } else if (rc == IRQ_NONE && irq_none == 0) {
5854 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5855 irq_none++;
a5442ba4
WB
5856 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5857 int_reg & IPR_PCII_HRRQ_UPDATED) {
b53d124a 5858 ipr_isr_eh(ioa_cfg,
5859 "Error clearing HRRQ: ", num_hrrq);
172cd6e1 5860 rc = IRQ_HANDLED;
b53d124a 5861 break;
1da177e4
LT
5862 } else
5863 break;
5864 }
5865
5866 if (unlikely(rc == IRQ_NONE))
634651fa 5867 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
1da177e4 5868
56d6aa33 5869 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
172cd6e1
BK
5870 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5871 list_del(&ipr_cmd->queue);
5872 del_timer(&ipr_cmd->timer);
5873 ipr_cmd->fast_done(ipr_cmd);
5874 }
05a6538a 5875 return rc;
5876}
5877
5878/**
5879 * ipr_isr_mhrrq - Interrupt service routine
5880 * @irq: irq number
5881 * @devp: pointer to ioa config struct
5882 *
5883 * Return value:
5884 * IRQ_NONE / IRQ_HANDLED
5885 **/
5886static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5887{
5888 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
b53d124a 5889 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
56d6aa33 5890 unsigned long hrrq_flags = 0;
05a6538a 5891 struct ipr_cmnd *ipr_cmd, *temp;
5892 irqreturn_t rc = IRQ_NONE;
5893 LIST_HEAD(doneq);
172cd6e1 5894
56d6aa33 5895 spin_lock_irqsave(hrrq->lock, hrrq_flags);
05a6538a 5896
5897 /* If interrupts are disabled, ignore the interrupt */
56d6aa33 5898 if (!hrrq->allow_interrupts) {
5899 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5900 return IRQ_NONE;
5901 }
5902
89f8b33c 5903 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 5904 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5905 hrrq->toggle_bit) {
ea51190c 5906 irq_poll_sched(&hrrq->iopoll);
b53d124a 5907 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5908 return IRQ_HANDLED;
5909 }
5910 } else {
5911 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5912 hrrq->toggle_bit)
05a6538a 5913
b53d124a 5914 if (ipr_process_hrrq(hrrq, -1, &doneq))
5915 rc = IRQ_HANDLED;
5916 }
05a6538a 5917
56d6aa33 5918 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 5919
5920 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5921 list_del(&ipr_cmd->queue);
5922 del_timer(&ipr_cmd->timer);
5923 ipr_cmd->fast_done(ipr_cmd);
5924 }
1da177e4
LT
5925 return rc;
5926}
5927
a32c055f
WB
5928/**
5929 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5930 * @ioa_cfg: ioa config struct
5931 * @ipr_cmd: ipr command struct
5932 *
5933 * Return value:
5934 * 0 on success / -1 on failure
5935 **/
5936static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5937 struct ipr_cmnd *ipr_cmd)
5938{
5939 int i, nseg;
5940 struct scatterlist *sg;
5941 u32 length;
5942 u32 ioadl_flags = 0;
5943 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5944 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5945 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5946
5947 length = scsi_bufflen(scsi_cmd);
5948 if (!length)
5949 return 0;
5950
5951 nseg = scsi_dma_map(scsi_cmd);
5952 if (nseg < 0) {
51f52a47 5953 if (printk_ratelimit())
d73341bf 5954 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
a32c055f
WB
5955 return -1;
5956 }
5957
5958 ipr_cmd->dma_use_sg = nseg;
5959
438b0331 5960 ioarcb->data_transfer_length = cpu_to_be32(length);
b8803b1c
WB
5961 ioarcb->ioadl_len =
5962 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
438b0331 5963
a32c055f
WB
5964 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5965 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5966 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5967 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5968 ioadl_flags = IPR_IOADL_FLAGS_READ;
5969
5970 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5971 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5972 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5973 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5974 }
5975
5976 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5977 return 0;
5978}
5979
1da177e4
LT
5980/**
5981 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5982 * @ioa_cfg: ioa config struct
5983 * @ipr_cmd: ipr command struct
5984 *
5985 * Return value:
5986 * 0 on success / -1 on failure
5987 **/
5988static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5989 struct ipr_cmnd *ipr_cmd)
5990{
63015bc9
FT
5991 int i, nseg;
5992 struct scatterlist *sg;
1da177e4
LT
5993 u32 length;
5994 u32 ioadl_flags = 0;
5995 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5996 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 5997 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1da177e4 5998
63015bc9
FT
5999 length = scsi_bufflen(scsi_cmd);
6000 if (!length)
1da177e4
LT
6001 return 0;
6002
63015bc9
FT
6003 nseg = scsi_dma_map(scsi_cmd);
6004 if (nseg < 0) {
d73341bf 6005 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
63015bc9
FT
6006 return -1;
6007 }
51b1c7e1 6008
63015bc9
FT
6009 ipr_cmd->dma_use_sg = nseg;
6010
6011 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6012 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6013 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6014 ioarcb->data_transfer_length = cpu_to_be32(length);
6015 ioarcb->ioadl_len =
63015bc9
FT
6016 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6017 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6018 ioadl_flags = IPR_IOADL_FLAGS_READ;
6019 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6020 ioarcb->read_ioadl_len =
6021 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6022 }
1da177e4 6023
a32c055f
WB
6024 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6025 ioadl = ioarcb->u.add_data.u.ioadl;
6026 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6027 offsetof(struct ipr_ioarcb, u.add_data));
63015bc9
FT
6028 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6029 }
1da177e4 6030
63015bc9
FT
6031 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6032 ioadl[i].flags_and_data_len =
6033 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6034 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
1da177e4
LT
6035 }
6036
63015bc9
FT
6037 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6038 return 0;
1da177e4
LT
6039}
6040
1da177e4 6041/**
f646f325 6042 * __ipr_erp_done - Process completion of ERP for a device
1da177e4
LT
6043 * @ipr_cmd: ipr command struct
6044 *
6045 * This function copies the sense buffer into the scsi_cmd
6046 * struct and pushes the scsi_done function.
6047 *
6048 * Return value:
6049 * nothing
6050 **/
f646f325 6051static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6052{
6053 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6054 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 6055 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
6056
6057 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6058 scsi_cmd->result |= (DID_ERROR << 16);
fb3ed3cb
BK
6059 scmd_printk(KERN_ERR, scsi_cmd,
6060 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
1da177e4
LT
6061 } else {
6062 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6063 SCSI_SENSE_BUFFERSIZE);
6064 }
6065
6066 if (res) {
ee0a90fa 6067 if (!ipr_is_naca_model(res))
6068 res->needs_sync_complete = 1;
1da177e4
LT
6069 res->in_erp = 0;
6070 }
63015bc9 6071 scsi_dma_unmap(ipr_cmd->scsi_cmd);
acd3c42d 6072 scsi_done(scsi_cmd);
66a0d59c
BK
6073 if (ipr_cmd->eh_comp)
6074 complete(ipr_cmd->eh_comp);
6075 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6076}
6077
f646f325
BK
6078/**
6079 * ipr_erp_done - Process completion of ERP for a device
6080 * @ipr_cmd: ipr command struct
6081 *
6082 * This function copies the sense buffer into the scsi_cmd
6083 * struct and pushes the scsi_done function.
6084 *
6085 * Return value:
6086 * nothing
6087 **/
6088static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6089{
6090 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6091 unsigned long hrrq_flags;
6092
6093 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6094 __ipr_erp_done(ipr_cmd);
6095 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
1da177e4
LT
6096}
6097
6098/**
6099 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6100 * @ipr_cmd: ipr command struct
6101 *
6102 * Return value:
6103 * none
6104 **/
6105static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6106{
51b1c7e1 6107 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
96d21f00 6108 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
a32c055f 6109 dma_addr_t dma_addr = ipr_cmd->dma_addr;
1da177e4
LT
6110
6111 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
a32c055f 6112 ioarcb->data_transfer_length = 0;
1da177e4 6113 ioarcb->read_data_transfer_length = 0;
a32c055f 6114 ioarcb->ioadl_len = 0;
1da177e4 6115 ioarcb->read_ioadl_len = 0;
96d21f00
WB
6116 ioasa->hdr.ioasc = 0;
6117 ioasa->hdr.residual_data_len = 0;
a32c055f
WB
6118
6119 if (ipr_cmd->ioa_cfg->sis64)
6120 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6121 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6122 else {
6123 ioarcb->write_ioadl_addr =
6124 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6125 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6126 }
1da177e4
LT
6127}
6128
6129/**
f646f325 6130 * __ipr_erp_request_sense - Send request sense to a device
1da177e4
LT
6131 * @ipr_cmd: ipr command struct
6132 *
6133 * This function sends a request sense to a device as a result
6134 * of a check condition.
6135 *
6136 * Return value:
6137 * nothing
6138 **/
f646f325 6139static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
6140{
6141 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
96d21f00 6142 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
6143
6144 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
f646f325 6145 __ipr_erp_done(ipr_cmd);
1da177e4
LT
6146 return;
6147 }
6148
6149 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6150
6151 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6152 cmd_pkt->cdb[0] = REQUEST_SENSE;
6153 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6154 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6155 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6156 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6157
a32c055f
WB
6158 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6159 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
6160
6161 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6162 IPR_REQUEST_SENSE_TIMEOUT * 2);
6163}
6164
f646f325
BK
6165/**
6166 * ipr_erp_request_sense - Send request sense to a device
6167 * @ipr_cmd: ipr command struct
6168 *
6169 * This function sends a request sense to a device as a result
6170 * of a check condition.
6171 *
6172 * Return value:
6173 * nothing
6174 **/
6175static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6176{
6177 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6178 unsigned long hrrq_flags;
6179
6180 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6181 __ipr_erp_request_sense(ipr_cmd);
6182 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6183}
6184
1da177e4
LT
6185/**
6186 * ipr_erp_cancel_all - Send cancel all to a device
6187 * @ipr_cmd: ipr command struct
6188 *
6189 * This function sends a cancel all to a device to clear the
6190 * queue. If we are running TCQ on the device, QERR is set to 1,
6191 * which means all outstanding ops have been dropped on the floor.
6192 * Cancel all will return them to us.
6193 *
6194 * Return value:
6195 * nothing
6196 **/
6197static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6198{
6199 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6200 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6201 struct ipr_cmd_pkt *cmd_pkt;
6202
6203 res->in_erp = 1;
6204
6205 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6206
17ea0126 6207 if (!scsi_cmd->device->simple_tags) {
f646f325 6208 __ipr_erp_request_sense(ipr_cmd);
1da177e4
LT
6209 return;
6210 }
6211
6212 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6213 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6214 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6215
6216 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6217 IPR_CANCEL_ALL_TIMEOUT);
6218}
6219
6220/**
6221 * ipr_dump_ioasa - Dump contents of IOASA
6222 * @ioa_cfg: ioa config struct
6223 * @ipr_cmd: ipr command struct
fe964d0a 6224 * @res: resource entry struct
1da177e4
LT
6225 *
6226 * This function is invoked by the interrupt handler when ops
6227 * fail. It will log the IOASA if appropriate. Only called
6228 * for GPDD ops.
6229 *
6230 * Return value:
6231 * none
6232 **/
6233static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
fe964d0a 6234 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
1da177e4
LT
6235{
6236 int i;
6237 u16 data_len;
b0692dd4 6238 u32 ioasc, fd_ioasc;
96d21f00 6239 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
1da177e4
LT
6240 __be32 *ioasa_data = (__be32 *)ioasa;
6241 int error_index;
6242
96d21f00
WB
6243 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6244 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
1da177e4
LT
6245
6246 if (0 == ioasc)
6247 return;
6248
6249 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6250 return;
6251
b0692dd4
BK
6252 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6253 error_index = ipr_get_error(fd_ioasc);
6254 else
6255 error_index = ipr_get_error(ioasc);
1da177e4
LT
6256
6257 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6258 /* Don't log an error if the IOA already logged one */
96d21f00 6259 if (ioasa->hdr.ilid != 0)
1da177e4
LT
6260 return;
6261
cc9bd5d4
BK
6262 if (!ipr_is_gscsi(res))
6263 return;
6264
1da177e4
LT
6265 if (ipr_error_table[error_index].log_ioasa == 0)
6266 return;
6267 }
6268
fe964d0a 6269 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
1da177e4 6270
96d21f00
WB
6271 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6272 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6273 data_len = sizeof(struct ipr_ioasa64);
6274 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
1da177e4 6275 data_len = sizeof(struct ipr_ioasa);
1da177e4
LT
6276
6277 ipr_err("IOASA Dump:\n");
6278
6279 for (i = 0; i < data_len / 4; i += 4) {
6280 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6281 be32_to_cpu(ioasa_data[i]),
6282 be32_to_cpu(ioasa_data[i+1]),
6283 be32_to_cpu(ioasa_data[i+2]),
6284 be32_to_cpu(ioasa_data[i+3]));
6285 }
6286}
6287
6288/**
6289 * ipr_gen_sense - Generate SCSI sense data from an IOASA
a96099e2 6290 * @ipr_cmd: ipr command struct
1da177e4
LT
6291 *
6292 * Return value:
6293 * none
6294 **/
6295static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6296{
6297 u32 failing_lba;
6298 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6299 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
96d21f00
WB
6300 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6301 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
1da177e4
LT
6302
6303 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6304
6305 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6306 return;
6307
6308 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6309
6310 if (ipr_is_vset_device(res) &&
6311 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6312 ioasa->u.vset.failing_lba_hi != 0) {
6313 sense_buf[0] = 0x72;
6314 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6315 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6316 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6317
6318 sense_buf[7] = 12;
6319 sense_buf[8] = 0;
6320 sense_buf[9] = 0x0A;
6321 sense_buf[10] = 0x80;
6322
6323 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6324
6325 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6326 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6327 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6328 sense_buf[15] = failing_lba & 0x000000ff;
6329
6330 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6331
6332 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6333 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6334 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6335 sense_buf[19] = failing_lba & 0x000000ff;
6336 } else {
6337 sense_buf[0] = 0x70;
6338 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6339 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6340 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6341
6342 /* Illegal request */
6343 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
96d21f00 6344 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
1da177e4
LT
6345 sense_buf[7] = 10; /* additional length */
6346
6347 /* IOARCB was in error */
6348 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6349 sense_buf[15] = 0xC0;
6350 else /* Parameter data was invalid */
6351 sense_buf[15] = 0x80;
6352
6353 sense_buf[16] =
6354 ((IPR_FIELD_POINTER_MASK &
96d21f00 6355 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
1da177e4
LT
6356 sense_buf[17] =
6357 (IPR_FIELD_POINTER_MASK &
96d21f00 6358 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
1da177e4
LT
6359 } else {
6360 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6361 if (ipr_is_vset_device(res))
6362 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6363 else
6364 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6365
6366 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6367 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6368 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6369 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6370 sense_buf[6] = failing_lba & 0x000000ff;
6371 }
6372
6373 sense_buf[7] = 6; /* additional length */
6374 }
6375 }
6376}
6377
ee0a90fa 6378/**
6379 * ipr_get_autosense - Copy autosense data to sense buffer
6380 * @ipr_cmd: ipr command struct
6381 *
6382 * This function copies the autosense buffer to the buffer
6383 * in the scsi_cmd, if there is autosense available.
6384 *
6385 * Return value:
6386 * 1 if autosense was available / 0 if not
6387 **/
6388static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6389{
96d21f00
WB
6390 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6391 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
ee0a90fa 6392
96d21f00 6393 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
ee0a90fa 6394 return 0;
6395
96d21f00
WB
6396 if (ipr_cmd->ioa_cfg->sis64)
6397 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6398 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6399 SCSI_SENSE_BUFFERSIZE));
6400 else
6401 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6402 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6403 SCSI_SENSE_BUFFERSIZE));
ee0a90fa 6404 return 1;
6405}
6406
1da177e4
LT
6407/**
6408 * ipr_erp_start - Process an error response for a SCSI op
6409 * @ioa_cfg: ioa config struct
6410 * @ipr_cmd: ipr command struct
6411 *
6412 * This function determines whether or not to initiate ERP
6413 * on the affected device.
6414 *
6415 * Return value:
6416 * nothing
6417 **/
6418static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6419 struct ipr_cmnd *ipr_cmd)
6420{
6421 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6422 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
96d21f00 6423 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8a048994 6424 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
1da177e4
LT
6425
6426 if (!res) {
f646f325 6427 __ipr_scsi_eh_done(ipr_cmd);
1da177e4
LT
6428 return;
6429 }
6430
8a048994 6431 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
1da177e4
LT
6432 ipr_gen_sense(ipr_cmd);
6433
cc9bd5d4
BK
6434 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6435
8a048994 6436 switch (masked_ioasc) {
1da177e4 6437 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
ee0a90fa 6438 if (ipr_is_naca_model(res))
6439 scsi_cmd->result |= (DID_ABORT << 16);
6440 else
6441 scsi_cmd->result |= (DID_IMM_RETRY << 16);
1da177e4
LT
6442 break;
6443 case IPR_IOASC_IR_RESOURCE_HANDLE:
b0df54bb 6444 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
1da177e4
LT
6445 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6446 break;
6447 case IPR_IOASC_HW_SEL_TIMEOUT:
6448 scsi_cmd->result |= (DID_NO_CONNECT << 16);
ee0a90fa 6449 if (!ipr_is_naca_model(res))
6450 res->needs_sync_complete = 1;
1da177e4
LT
6451 break;
6452 case IPR_IOASC_SYNC_REQUIRED:
6453 if (!res->in_erp)
6454 res->needs_sync_complete = 1;
6455 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6456 break;
6457 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
b0df54bb 6458 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
785a4704
MFO
6459 /*
6460 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6461 * so SCSI mid-layer and upper layers handle it accordingly.
6462 */
6463 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6464 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
1da177e4
LT
6465 break;
6466 case IPR_IOASC_BUS_WAS_RESET:
6467 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6468 /*
6469 * Report the bus reset and ask for a retry. The device
6470 * will give CC/UA the next command.
6471 */
6472 if (!res->resetting_device)
6473 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6474 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6475 if (!ipr_is_naca_model(res))
6476 res->needs_sync_complete = 1;
1da177e4
LT
6477 break;
6478 case IPR_IOASC_HW_DEV_BUS_STATUS:
6479 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6480 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
ee0a90fa 6481 if (!ipr_get_autosense(ipr_cmd)) {
6482 if (!ipr_is_naca_model(res)) {
6483 ipr_erp_cancel_all(ipr_cmd);
6484 return;
6485 }
6486 }
1da177e4 6487 }
ee0a90fa 6488 if (!ipr_is_naca_model(res))
6489 res->needs_sync_complete = 1;
1da177e4
LT
6490 break;
6491 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6492 break;
f8ee25d7
WX
6493 case IPR_IOASC_IR_NON_OPTIMIZED:
6494 if (res->raw_mode) {
6495 res->raw_mode = 0;
6496 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6497 } else
6498 scsi_cmd->result |= (DID_ERROR << 16);
6499 break;
1da177e4 6500 default:
5b7304fb
BK
6501 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6502 scsi_cmd->result |= (DID_ERROR << 16);
ee0a90fa 6503 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
1da177e4
LT
6504 res->needs_sync_complete = 1;
6505 break;
6506 }
6507
63015bc9 6508 scsi_dma_unmap(ipr_cmd->scsi_cmd);
acd3c42d 6509 scsi_done(scsi_cmd);
66a0d59c
BK
6510 if (ipr_cmd->eh_comp)
6511 complete(ipr_cmd->eh_comp);
6512 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
6513}
6514
6515/**
6516 * ipr_scsi_done - mid-layer done function
6517 * @ipr_cmd: ipr command struct
6518 *
6519 * This function is invoked by the interrupt handler for
6520 * ops generated by the SCSI mid-layer
6521 *
6522 * Return value:
6523 * none
6524 **/
6525static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6526{
6527 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6528 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
96d21f00 6529 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
36b8e180 6530 unsigned long lock_flags;
1da177e4 6531
96d21f00 6532 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
1da177e4
LT
6533
6534 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
172cd6e1
BK
6535 scsi_dma_unmap(scsi_cmd);
6536
36b8e180 6537 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
acd3c42d 6538 scsi_done(scsi_cmd);
66a0d59c
BK
6539 if (ipr_cmd->eh_comp)
6540 complete(ipr_cmd->eh_comp);
6541 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
36b8e180 6542 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
172cd6e1 6543 } else {
36b8e180
BK
6544 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6545 spin_lock(&ipr_cmd->hrrq->_lock);
1da177e4 6546 ipr_erp_start(ioa_cfg, ipr_cmd);
36b8e180
BK
6547 spin_unlock(&ipr_cmd->hrrq->_lock);
6548 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
172cd6e1 6549 }
1da177e4
LT
6550}
6551
1da177e4
LT
6552/**
6553 * ipr_queuecommand - Queue a mid-layer request
00bfef2c 6554 * @shost: scsi host struct
1da177e4 6555 * @scsi_cmd: scsi command struct
1da177e4
LT
6556 *
6557 * This function queues a request generated by the mid-layer.
6558 *
6559 * Return value:
6560 * 0 on success
6561 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6562 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6563 **/
00bfef2c
BK
6564static int ipr_queuecommand(struct Scsi_Host *shost,
6565 struct scsi_cmnd *scsi_cmd)
1da177e4
LT
6566{
6567 struct ipr_ioa_cfg *ioa_cfg;
6568 struct ipr_resource_entry *res;
6569 struct ipr_ioarcb *ioarcb;
6570 struct ipr_cmnd *ipr_cmd;
56d6aa33 6571 unsigned long hrrq_flags, lock_flags;
d12f1576 6572 int rc;
05a6538a 6573 struct ipr_hrr_queue *hrrq;
6574 int hrrq_id;
1da177e4 6575
00bfef2c
BK
6576 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6577
1da177e4 6578 scsi_cmd->result = (DID_OK << 16);
00bfef2c 6579 res = scsi_cmd->device->hostdata;
56d6aa33 6580
6581 if (ipr_is_gata(res) && res->sata_port) {
6582 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6583 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6584 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6585 return rc;
6586 }
6587
05a6538a 6588 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6589 hrrq = &ioa_cfg->hrrq[hrrq_id];
1da177e4 6590
56d6aa33 6591 spin_lock_irqsave(hrrq->lock, hrrq_flags);
1da177e4
LT
6592 /*
6593 * We are currently blocking all devices due to a host reset
6594 * We have told the host to stop giving us new requests, but
6595 * ERP ops don't count. FIXME
6596 */
bfae7820 6597 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
56d6aa33 6598 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6599 return SCSI_MLQUEUE_HOST_BUSY;
00bfef2c 6600 }
1da177e4
LT
6601
6602 /*
6603 * FIXME - Create scsi_set_host_offline interface
6604 * and the ioa_is_dead check can be removed
6605 */
bfae7820 6606 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
56d6aa33 6607 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c 6608 goto err_nodev;
1da177e4
LT
6609 }
6610
05a6538a 6611 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6612 if (ipr_cmd == NULL) {
56d6aa33 6613 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
05a6538a 6614 return SCSI_MLQUEUE_HOST_BUSY;
6615 }
56d6aa33 6616 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
35a39691 6617
172cd6e1 6618 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
1da177e4 6619 ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
6620
6621 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6622 ipr_cmd->scsi_cmd = scsi_cmd;
172cd6e1 6623 ipr_cmd->done = ipr_scsi_eh_done;
1da177e4 6624
4f92d01a 6625 if (ipr_is_gscsi(res)) {
1da177e4
LT
6626 if (scsi_cmd->underflow == 0)
6627 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6628
4f92d01a 6629 if (res->reset_occurred) {
0b1f8d44 6630 res->reset_occurred = 0;
ab6c10b1 6631 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
0b1f8d44 6632 }
4f92d01a
GKB
6633 }
6634
6635 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6636 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6637
1da177e4 6638 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
50668633
CH
6639 if (scsi_cmd->flags & SCMD_TAGGED)
6640 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6641 else
6642 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
1da177e4
LT
6643 }
6644
6645 if (scsi_cmd->cmnd[0] >= 0xC0 &&
05a6538a 6646 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
1da177e4 6647 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
05a6538a 6648 }
3cb4fc1f 6649 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
f8ee25d7 6650 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
1da177e4 6651
3cb4fc1f
GKB
6652 if (scsi_cmd->underflow == 0)
6653 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6654 }
6655
d12f1576
DC
6656 if (ioa_cfg->sis64)
6657 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6658 else
6659 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
1da177e4 6660
56d6aa33 6661 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6662 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
05a6538a 6663 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6664 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6665 if (!rc)
6666 scsi_dma_unmap(scsi_cmd);
a5fb407e 6667 return SCSI_MLQUEUE_HOST_BUSY;
1da177e4
LT
6668 }
6669
56d6aa33 6670 if (unlikely(hrrq->ioa_is_dead)) {
05a6538a 6671 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
56d6aa33 6672 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6673 scsi_dma_unmap(scsi_cmd);
6674 goto err_nodev;
6675 }
6676
6677 ioarcb->res_handle = res->res_handle;
6678 if (res->needs_sync_complete) {
6679 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6680 res->needs_sync_complete = 0;
6681 }
05a6538a 6682 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
00bfef2c 6683 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
a5fb407e 6684 ipr_send_command(ipr_cmd);
56d6aa33 6685 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
1da177e4 6686 return 0;
1da177e4 6687
00bfef2c 6688err_nodev:
56d6aa33 6689 spin_lock_irqsave(hrrq->lock, hrrq_flags);
00bfef2c
BK
6690 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6691 scsi_cmd->result = (DID_NO_CONNECT << 16);
acd3c42d 6692 scsi_done(scsi_cmd);
56d6aa33 6693 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
00bfef2c
BK
6694 return 0;
6695}
f281233d 6696
35a39691
BK
6697/**
6698 * ipr_ioctl - IOCTL handler
6699 * @sdev: scsi device struct
6700 * @cmd: IOCTL cmd
6701 * @arg: IOCTL arg
6702 *
6703 * Return value:
6704 * 0 on success / other on failure
6705 **/
6f4e626f
NC
6706static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
6707 void __user *arg)
35a39691
BK
6708{
6709 struct ipr_resource_entry *res;
6710
6711 res = (struct ipr_resource_entry *)sdev->hostdata;
0ce3a7e5
BK
6712 if (res && ipr_is_gata(res)) {
6713 if (cmd == HDIO_GET_IDENTITY)
6714 return -ENOTTY;
94be9a58 6715 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
0ce3a7e5 6716 }
35a39691
BK
6717
6718 return -EINVAL;
6719}
6720
1da177e4 6721/**
637b5c3e 6722 * ipr_ioa_info - Get information about the card/driver
a96099e2 6723 * @host: scsi host struct
1da177e4
LT
6724 *
6725 * Return value:
6726 * pointer to buffer with description string
6727 **/
203fa3fe 6728static const char *ipr_ioa_info(struct Scsi_Host *host)
1da177e4
LT
6729{
6730 static char buffer[512];
6731 struct ipr_ioa_cfg *ioa_cfg;
6732 unsigned long lock_flags = 0;
6733
6734 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6735
6736 spin_lock_irqsave(host->host_lock, lock_flags);
6737 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6738 spin_unlock_irqrestore(host->host_lock, lock_flags);
6739
6740 return buffer;
6741}
6742
6743static struct scsi_host_template driver_template = {
6744 .module = THIS_MODULE,
6745 .name = "IPR",
6746 .info = ipr_ioa_info,
35a39691 6747 .ioctl = ipr_ioctl,
75c0b0e1
AB
6748#ifdef CONFIG_COMPAT
6749 .compat_ioctl = ipr_ioctl,
6750#endif
1da177e4 6751 .queuecommand = ipr_queuecommand,
b8f1d1e0 6752 .dma_need_drain = ata_scsi_dma_need_drain,
1da177e4
LT
6753 .eh_abort_handler = ipr_eh_abort,
6754 .eh_device_reset_handler = ipr_eh_dev_reset,
6755 .eh_host_reset_handler = ipr_eh_host_reset,
6756 .slave_alloc = ipr_slave_alloc,
6757 .slave_configure = ipr_slave_configure,
6758 .slave_destroy = ipr_slave_destroy,
f688f96d 6759 .scan_finished = ipr_scan_finished,
35a39691
BK
6760 .target_alloc = ipr_target_alloc,
6761 .target_destroy = ipr_target_destroy,
1da177e4 6762 .change_queue_depth = ipr_change_queue_depth,
1da177e4
LT
6763 .bios_param = ipr_biosparam,
6764 .can_queue = IPR_MAX_COMMANDS,
6765 .this_id = -1,
6766 .sg_tablesize = IPR_MAX_SGLIST,
6767 .max_sectors = IPR_IOA_MAX_SECTORS,
6768 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
47d1e6ae
BVA
6769 .shost_groups = ipr_ioa_groups,
6770 .sdev_groups = ipr_dev_groups,
54b2b50c 6771 .proc_name = IPR_NAME,
1da177e4
LT
6772};
6773
35a39691
BK
6774/**
6775 * ipr_ata_phy_reset - libata phy_reset handler
6776 * @ap: ata port to reset
6777 *
6778 **/
6779static void ipr_ata_phy_reset(struct ata_port *ap)
6780{
6781 unsigned long flags;
6782 struct ipr_sata_port *sata_port = ap->private_data;
6783 struct ipr_resource_entry *res = sata_port->res;
6784 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6785 int rc;
6786
6787 ENTER;
6788 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6789 while (ioa_cfg->in_reset_reload) {
35a39691
BK
6790 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6791 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6792 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6793 }
6794
56d6aa33 6795 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
35a39691
BK
6796 goto out_unlock;
6797
6798 rc = ipr_device_reset(ioa_cfg, res);
6799
6800 if (rc) {
3e4ec344 6801 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6802 goto out_unlock;
6803 }
6804
3e7ebdfa
WB
6805 ap->link.device[0].class = res->ata_class;
6806 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
3e4ec344 6807 ap->link.device[0].class = ATA_DEV_NONE;
35a39691
BK
6808
6809out_unlock:
6810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6811 LEAVE;
6812}
6813
6814/**
6815 * ipr_ata_post_internal - Cleanup after an internal command
6816 * @qc: ATA queued command
6817 *
6818 * Return value:
6819 * none
6820 **/
6821static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6822{
6823 struct ipr_sata_port *sata_port = qc->ap->private_data;
6824 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6825 struct ipr_cmnd *ipr_cmd;
05a6538a 6826 struct ipr_hrr_queue *hrrq;
35a39691
BK
6827 unsigned long flags;
6828
6829 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
203fa3fe 6830 while (ioa_cfg->in_reset_reload) {
73d98ff0
BK
6831 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6832 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6833 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6834 }
6835
05a6538a 6836 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 6837 spin_lock(&hrrq->_lock);
05a6538a 6838 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6839 if (ipr_cmd->qc == qc) {
6840 ipr_device_reset(ioa_cfg, sata_port->res);
6841 break;
6842 }
35a39691 6843 }
56d6aa33 6844 spin_unlock(&hrrq->_lock);
35a39691
BK
6845 }
6846 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6847}
6848
35a39691
BK
6849/**
6850 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6851 * @regs: destination
6852 * @tf: source ATA taskfile
6853 *
6854 * Return value:
6855 * none
6856 **/
6857static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6858 struct ata_taskfile *tf)
6859{
6860 regs->feature = tf->feature;
6861 regs->nsect = tf->nsect;
6862 regs->lbal = tf->lbal;
6863 regs->lbam = tf->lbam;
6864 regs->lbah = tf->lbah;
6865 regs->device = tf->device;
6866 regs->command = tf->command;
6867 regs->hob_feature = tf->hob_feature;
6868 regs->hob_nsect = tf->hob_nsect;
6869 regs->hob_lbal = tf->hob_lbal;
6870 regs->hob_lbam = tf->hob_lbam;
6871 regs->hob_lbah = tf->hob_lbah;
6872 regs->ctl = tf->ctl;
6873}
6874
6875/**
6876 * ipr_sata_done - done function for SATA commands
6877 * @ipr_cmd: ipr command struct
6878 *
6879 * This function is invoked by the interrupt handler for
6880 * ops generated by the SCSI mid-layer to SATA devices
6881 *
6882 * Return value:
6883 * none
6884 **/
6885static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6886{
6887 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6888 struct ata_queued_cmd *qc = ipr_cmd->qc;
6889 struct ipr_sata_port *sata_port = qc->ap->private_data;
6890 struct ipr_resource_entry *res = sata_port->res;
96d21f00 6891 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
35a39691 6892
56d6aa33 6893 spin_lock(&ipr_cmd->hrrq->_lock);
96d21f00
WB
6894 if (ipr_cmd->ioa_cfg->sis64)
6895 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6896 sizeof(struct ipr_ioasa_gata));
6897 else
6898 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6899 sizeof(struct ipr_ioasa_gata));
35a39691
BK
6900 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6901
96d21f00 6902 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
3e7ebdfa 6903 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
35a39691
BK
6904
6905 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
96d21f00 6906 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
35a39691 6907 else
96d21f00 6908 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
05a6538a 6909 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
56d6aa33 6910 spin_unlock(&ipr_cmd->hrrq->_lock);
35a39691
BK
6911 ata_qc_complete(qc);
6912}
6913
a32c055f
WB
6914/**
6915 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6916 * @ipr_cmd: ipr command struct
6917 * @qc: ATA queued command
6918 *
6919 **/
6920static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6921 struct ata_queued_cmd *qc)
6922{
6923 u32 ioadl_flags = 0;
6924 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1ac7c26d 6925 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
a32c055f
WB
6926 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6927 int len = qc->nbytes;
6928 struct scatterlist *sg;
6929 unsigned int si;
6930 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6931
6932 if (len == 0)
6933 return;
6934
6935 if (qc->dma_dir == DMA_TO_DEVICE) {
6936 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6937 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6938 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6939 ioadl_flags = IPR_IOADL_FLAGS_READ;
6940
6941 ioarcb->data_transfer_length = cpu_to_be32(len);
6942 ioarcb->ioadl_len =
6943 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6944 ioarcb->u.sis64_addr_data.data_ioadl_addr =
1ac7c26d 6945 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
a32c055f
WB
6946
6947 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6948 ioadl64->flags = cpu_to_be32(ioadl_flags);
6949 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6950 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6951
6952 last_ioadl64 = ioadl64;
6953 ioadl64++;
6954 }
6955
6956 if (likely(last_ioadl64))
6957 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6958}
6959
35a39691
BK
6960/**
6961 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6962 * @ipr_cmd: ipr command struct
6963 * @qc: ATA queued command
6964 *
6965 **/
6966static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6967 struct ata_queued_cmd *qc)
6968{
6969 u32 ioadl_flags = 0;
6970 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
a32c055f 6971 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3be6cbd7 6972 struct ipr_ioadl_desc *last_ioadl = NULL;
dde20207 6973 int len = qc->nbytes;
35a39691 6974 struct scatterlist *sg;
ff2aeb1e 6975 unsigned int si;
35a39691
BK
6976
6977 if (len == 0)
6978 return;
6979
6980 if (qc->dma_dir == DMA_TO_DEVICE) {
6981 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6982 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
a32c055f
WB
6983 ioarcb->data_transfer_length = cpu_to_be32(len);
6984 ioarcb->ioadl_len =
35a39691
BK
6985 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6986 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6987 ioadl_flags = IPR_IOADL_FLAGS_READ;
6988 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6989 ioarcb->read_ioadl_len =
6990 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6991 }
6992
ff2aeb1e 6993 for_each_sg(qc->sg, sg, qc->n_elem, si) {
35a39691
BK
6994 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6995 ioadl->address = cpu_to_be32(sg_dma_address(sg));
3be6cbd7
JG
6996
6997 last_ioadl = ioadl;
6998 ioadl++;
35a39691 6999 }
3be6cbd7
JG
7000
7001 if (likely(last_ioadl))
7002 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
35a39691
BK
7003}
7004
56d6aa33 7005/**
7006 * ipr_qc_defer - Get a free ipr_cmd
7007 * @qc: queued command
7008 *
7009 * Return value:
7010 * 0 if success
7011 **/
7012static int ipr_qc_defer(struct ata_queued_cmd *qc)
7013{
7014 struct ata_port *ap = qc->ap;
7015 struct ipr_sata_port *sata_port = ap->private_data;
7016 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7017 struct ipr_cmnd *ipr_cmd;
7018 struct ipr_hrr_queue *hrrq;
7019 int hrrq_id;
7020
7021 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7022 hrrq = &ioa_cfg->hrrq[hrrq_id];
7023
7024 qc->lldd_task = NULL;
7025 spin_lock(&hrrq->_lock);
7026 if (unlikely(hrrq->ioa_is_dead)) {
7027 spin_unlock(&hrrq->_lock);
7028 return 0;
7029 }
7030
7031 if (unlikely(!hrrq->allow_cmds)) {
7032 spin_unlock(&hrrq->_lock);
7033 return ATA_DEFER_LINK;
7034 }
7035
7036 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7037 if (ipr_cmd == NULL) {
7038 spin_unlock(&hrrq->_lock);
7039 return ATA_DEFER_LINK;
7040 }
7041
7042 qc->lldd_task = ipr_cmd;
7043 spin_unlock(&hrrq->_lock);
7044 return 0;
7045}
7046
35a39691
BK
7047/**
7048 * ipr_qc_issue - Issue a SATA qc to a device
7049 * @qc: queued command
7050 *
7051 * Return value:
7052 * 0 if success
7053 **/
7054static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7055{
7056 struct ata_port *ap = qc->ap;
7057 struct ipr_sata_port *sata_port = ap->private_data;
7058 struct ipr_resource_entry *res = sata_port->res;
7059 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7060 struct ipr_cmnd *ipr_cmd;
7061 struct ipr_ioarcb *ioarcb;
7062 struct ipr_ioarcb_ata_regs *regs;
7063
56d6aa33 7064 if (qc->lldd_task == NULL)
7065 ipr_qc_defer(qc);
7066
7067 ipr_cmd = qc->lldd_task;
7068 if (ipr_cmd == NULL)
0feeed82 7069 return AC_ERR_SYSTEM;
35a39691 7070
56d6aa33 7071 qc->lldd_task = NULL;
7072 spin_lock(&ipr_cmd->hrrq->_lock);
7073 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7074 ipr_cmd->hrrq->ioa_is_dead)) {
7075 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7076 spin_unlock(&ipr_cmd->hrrq->_lock);
7077 return AC_ERR_SYSTEM;
7078 }
7079
05a6538a 7080 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
35a39691 7081 ioarcb = &ipr_cmd->ioarcb;
35a39691 7082
a32c055f
WB
7083 if (ioa_cfg->sis64) {
7084 regs = &ipr_cmd->i.ata_ioadl.regs;
7085 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7086 } else
7087 regs = &ioarcb->u.add_data.u.regs;
7088
7089 memset(regs, 0, sizeof(*regs));
7090 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
35a39691 7091
56d6aa33 7092 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
35a39691
BK
7093 ipr_cmd->qc = qc;
7094 ipr_cmd->done = ipr_sata_done;
3e7ebdfa 7095 ipr_cmd->ioarcb.res_handle = res->res_handle;
35a39691
BK
7096 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7097 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7098 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
dde20207 7099 ipr_cmd->dma_use_sg = qc->n_elem;
35a39691 7100
a32c055f
WB
7101 if (ioa_cfg->sis64)
7102 ipr_build_ata_ioadl64(ipr_cmd, qc);
7103 else
7104 ipr_build_ata_ioadl(ipr_cmd, qc);
7105
35a39691
BK
7106 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7107 ipr_copy_sata_tf(regs, &qc->tf);
7108 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
3e7ebdfa 7109 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
35a39691
BK
7110
7111 switch (qc->tf.protocol) {
7112 case ATA_PROT_NODATA:
7113 case ATA_PROT_PIO:
7114 break;
7115
7116 case ATA_PROT_DMA:
7117 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7118 break;
7119
0dc36888
TH
7120 case ATAPI_PROT_PIO:
7121 case ATAPI_PROT_NODATA:
35a39691
BK
7122 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7123 break;
7124
0dc36888 7125 case ATAPI_PROT_DMA:
35a39691
BK
7126 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7127 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7128 break;
7129
7130 default:
7131 WARN_ON(1);
56d6aa33 7132 spin_unlock(&ipr_cmd->hrrq->_lock);
0feeed82 7133 return AC_ERR_INVALID;
35a39691
BK
7134 }
7135
a32c055f 7136 ipr_send_command(ipr_cmd);
56d6aa33 7137 spin_unlock(&ipr_cmd->hrrq->_lock);
a32c055f 7138
35a39691
BK
7139 return 0;
7140}
7141
4c9bf4e7
TH
7142/**
7143 * ipr_qc_fill_rtf - Read result TF
7144 * @qc: ATA queued command
4c9bf4e7 7145 **/
931139af 7146static void ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
4c9bf4e7
TH
7147{
7148 struct ipr_sata_port *sata_port = qc->ap->private_data;
7149 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7150 struct ata_taskfile *tf = &qc->result_tf;
7151
7152 tf->feature = g->error;
7153 tf->nsect = g->nsect;
7154 tf->lbal = g->lbal;
7155 tf->lbam = g->lbam;
7156 tf->lbah = g->lbah;
7157 tf->device = g->device;
7158 tf->command = g->status;
7159 tf->hob_nsect = g->hob_nsect;
7160 tf->hob_lbal = g->hob_lbal;
7161 tf->hob_lbam = g->hob_lbam;
7162 tf->hob_lbah = g->hob_lbah;
4c9bf4e7
TH
7163}
7164
35a39691 7165static struct ata_port_operations ipr_sata_ops = {
35a39691 7166 .phy_reset = ipr_ata_phy_reset,
a1efdaba 7167 .hardreset = ipr_sata_reset,
35a39691 7168 .post_internal_cmd = ipr_ata_post_internal,
35a39691 7169 .qc_prep = ata_noop_qc_prep,
56d6aa33 7170 .qc_defer = ipr_qc_defer,
35a39691 7171 .qc_issue = ipr_qc_issue,
4c9bf4e7 7172 .qc_fill_rtf = ipr_qc_fill_rtf,
35a39691
BK
7173 .port_start = ata_sas_port_start,
7174 .port_stop = ata_sas_port_stop
7175};
7176
7177static struct ata_port_info sata_port_info = {
5067c046
SL
7178 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7179 ATA_FLAG_SAS_HOST,
0f2e0330
SS
7180 .pio_mask = ATA_PIO4_ONLY,
7181 .mwdma_mask = ATA_MWDMA2,
7182 .udma_mask = ATA_UDMA6,
35a39691
BK
7183 .port_ops = &ipr_sata_ops
7184};
7185
1da177e4
LT
7186#ifdef CONFIG_PPC_PSERIES
7187static const u16 ipr_blocked_processors[] = {
d3dbeef6
ME
7188 PVR_NORTHSTAR,
7189 PVR_PULSAR,
7190 PVR_POWER4,
7191 PVR_ICESTAR,
7192 PVR_SSTAR,
7193 PVR_POWER4p,
7194 PVR_630,
7195 PVR_630p
1da177e4
LT
7196};
7197
7198/**
7199 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7200 * @ioa_cfg: ioa cfg struct
7201 *
7202 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7203 * certain pSeries hardware. This function determines if the given
7204 * adapter is in one of these confgurations or not.
7205 *
7206 * Return value:
7207 * 1 if adapter is not supported / 0 if adapter is supported
7208 **/
7209static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7210{
1da177e4
LT
7211 int i;
7212
44c10138 7213 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
203fa3fe 7214 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
d3dbeef6 7215 if (pvr_version_is(ipr_blocked_processors[i]))
44c10138 7216 return 1;
1da177e4
LT
7217 }
7218 }
7219 return 0;
7220}
7221#else
7222#define ipr_invalid_adapter(ioa_cfg) 0
7223#endif
7224
7225/**
7226 * ipr_ioa_bringdown_done - IOA bring down completion.
7227 * @ipr_cmd: ipr command struct
7228 *
7229 * This function processes the completion of an adapter bring down.
7230 * It wakes any reset sleepers.
7231 *
7232 * Return value:
7233 * IPR_RC_JOB_RETURN
7234 **/
7235static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7236{
7237 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96b04db9 7238 int i;
1da177e4
LT
7239
7240 ENTER;
bfae7820
BK
7241 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7242 ipr_trace;
b0e17a9b
BK
7243 ioa_cfg->scsi_unblock = 1;
7244 schedule_work(&ioa_cfg->work_q);
bfae7820
BK
7245 }
7246
1da177e4
LT
7247 ioa_cfg->in_reset_reload = 0;
7248 ioa_cfg->reset_retries = 0;
96b04db9 7249 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7250 spin_lock(&ioa_cfg->hrrq[i]._lock);
7251 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7252 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7253 }
7254 wmb();
7255
05a6538a 7256 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4 7257 wake_up_all(&ioa_cfg->reset_wait_q);
1da177e4
LT
7258 LEAVE;
7259
7260 return IPR_RC_JOB_RETURN;
7261}
7262
7263/**
7264 * ipr_ioa_reset_done - IOA reset completion.
7265 * @ipr_cmd: ipr command struct
7266 *
7267 * This function processes the completion of an adapter reset.
7268 * It schedules any necessary mid-layer add/removes and
7269 * wakes any reset sleepers.
7270 *
7271 * Return value:
7272 * IPR_RC_JOB_RETURN
7273 **/
7274static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7275{
7276 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7277 struct ipr_resource_entry *res;
afc3f83c 7278 int j;
1da177e4
LT
7279
7280 ENTER;
7281 ioa_cfg->in_reset_reload = 0;
56d6aa33 7282 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7283 spin_lock(&ioa_cfg->hrrq[j]._lock);
7284 ioa_cfg->hrrq[j].allow_cmds = 1;
7285 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7286 }
7287 wmb();
1da177e4 7288 ioa_cfg->reset_cmd = NULL;
3d1d0da6 7289 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
1da177e4
LT
7290
7291 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
f688f96d 7292 if (res->add_to_ml || res->del_from_ml) {
1da177e4
LT
7293 ipr_trace;
7294 break;
7295 }
7296 }
7297 schedule_work(&ioa_cfg->work_q);
7298
afc3f83c
BK
7299 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7300 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7301 if (j < IPR_NUM_LOG_HCAMS)
7302 ipr_send_hcam(ioa_cfg,
7303 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7304 ioa_cfg->hostrcb[j]);
1da177e4 7305 else
afc3f83c
BK
7306 ipr_send_hcam(ioa_cfg,
7307 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7308 ioa_cfg->hostrcb[j]);
1da177e4
LT
7309 }
7310
6bb04170 7311 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
1da177e4
LT
7312 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7313
7314 ioa_cfg->reset_retries = 0;
05a6538a 7315 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
7316 wake_up_all(&ioa_cfg->reset_wait_q);
7317
b0e17a9b 7318 ioa_cfg->scsi_unblock = 1;
f688f96d 7319 schedule_work(&ioa_cfg->work_q);
1da177e4
LT
7320 LEAVE;
7321 return IPR_RC_JOB_RETURN;
7322}
7323
7324/**
7325 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7326 * @supported_dev: supported device struct
7327 * @vpids: vendor product id struct
7328 *
7329 * Return value:
7330 * none
7331 **/
7332static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7333 struct ipr_std_inq_vpids *vpids)
7334{
7335 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7336 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7337 supported_dev->num_records = 1;
7338 supported_dev->data_length =
7339 cpu_to_be16(sizeof(struct ipr_supported_device));
7340 supported_dev->reserved = 0;
7341}
7342
7343/**
7344 * ipr_set_supported_devs - Send Set Supported Devices for a device
7345 * @ipr_cmd: ipr command struct
7346 *
a32c055f 7347 * This function sends a Set Supported Devices to the adapter
1da177e4
LT
7348 *
7349 * Return value:
7350 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7351 **/
7352static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7353{
7354 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7355 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
1da177e4
LT
7356 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7357 struct ipr_resource_entry *res = ipr_cmd->u.res;
7358
7359 ipr_cmd->job_step = ipr_ioa_reset_done;
7360
7361 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
e4fbf44e 7362 if (!ipr_is_scsi_disk(res))
1da177e4
LT
7363 continue;
7364
7365 ipr_cmd->u.res = res;
3e7ebdfa 7366 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
1da177e4
LT
7367
7368 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7369 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7370 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7371
7372 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
3e7ebdfa 7373 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
1da177e4
LT
7374 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7375 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7376
a32c055f
WB
7377 ipr_init_ioadl(ipr_cmd,
7378 ioa_cfg->vpd_cbs_dma +
7379 offsetof(struct ipr_misc_cbs, supp_dev),
7380 sizeof(struct ipr_supported_device),
7381 IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7382
7383 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7384 IPR_SET_SUP_DEVICE_TIMEOUT);
7385
3e7ebdfa
WB
7386 if (!ioa_cfg->sis64)
7387 ipr_cmd->job_step = ipr_set_supported_devs;
05a6538a 7388 LEAVE;
1da177e4
LT
7389 return IPR_RC_JOB_RETURN;
7390 }
7391
05a6538a 7392 LEAVE;
1da177e4
LT
7393 return IPR_RC_JOB_CONTINUE;
7394}
7395
7396/**
7397 * ipr_get_mode_page - Locate specified mode page
7398 * @mode_pages: mode page buffer
7399 * @page_code: page code to find
7400 * @len: minimum required length for mode page
7401 *
7402 * Return value:
7403 * pointer to mode page / NULL on failure
7404 **/
7405static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7406 u32 page_code, u32 len)
7407{
7408 struct ipr_mode_page_hdr *mode_hdr;
7409 u32 page_length;
7410 u32 length;
7411
7412 if (!mode_pages || (mode_pages->hdr.length == 0))
7413 return NULL;
7414
7415 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7416 mode_hdr = (struct ipr_mode_page_hdr *)
7417 (mode_pages->data + mode_pages->hdr.block_desc_len);
7418
7419 while (length) {
7420 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7421 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7422 return mode_hdr;
7423 break;
7424 } else {
7425 page_length = (sizeof(struct ipr_mode_page_hdr) +
7426 mode_hdr->page_length);
7427 length -= page_length;
7428 mode_hdr = (struct ipr_mode_page_hdr *)
7429 ((unsigned long)mode_hdr + page_length);
7430 }
7431 }
7432 return NULL;
7433}
7434
7435/**
7436 * ipr_check_term_power - Check for term power errors
7437 * @ioa_cfg: ioa config struct
7438 * @mode_pages: IOAFP mode pages buffer
7439 *
7440 * Check the IOAFP's mode page 28 for term power errors
7441 *
7442 * Return value:
7443 * nothing
7444 **/
7445static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7446 struct ipr_mode_pages *mode_pages)
7447{
7448 int i;
7449 int entry_length;
7450 struct ipr_dev_bus_entry *bus;
7451 struct ipr_mode_page28 *mode_page;
7452
7453 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7454 sizeof(struct ipr_mode_page28));
7455
7456 entry_length = mode_page->entry_length;
7457
7458 bus = mode_page->bus;
7459
7460 for (i = 0; i < mode_page->num_entries; i++) {
7461 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7462 dev_err(&ioa_cfg->pdev->dev,
7463 "Term power is absent on scsi bus %d\n",
7464 bus->res_addr.bus);
7465 }
7466
7467 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7468 }
7469}
7470
7471/**
7472 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7473 * @ioa_cfg: ioa config struct
7474 *
7475 * Looks through the config table checking for SES devices. If
7476 * the SES device is in the SES table indicating a maximum SCSI
7477 * bus speed, the speed is limited for the bus.
7478 *
7479 * Return value:
7480 * none
7481 **/
7482static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7483{
7484 u32 max_xfer_rate;
7485 int i;
7486
7487 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7488 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7489 ioa_cfg->bus_attr[i].bus_width);
7490
7491 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7492 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7493 }
7494}
7495
7496/**
7497 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7498 * @ioa_cfg: ioa config struct
7499 * @mode_pages: mode page 28 buffer
7500 *
7501 * Updates mode page 28 based on driver configuration
7502 *
7503 * Return value:
7504 * none
7505 **/
7506static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
203fa3fe 7507 struct ipr_mode_pages *mode_pages)
1da177e4
LT
7508{
7509 int i, entry_length;
7510 struct ipr_dev_bus_entry *bus;
7511 struct ipr_bus_attributes *bus_attr;
7512 struct ipr_mode_page28 *mode_page;
7513
7514 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7515 sizeof(struct ipr_mode_page28));
7516
7517 entry_length = mode_page->entry_length;
7518
7519 /* Loop for each device bus entry */
7520 for (i = 0, bus = mode_page->bus;
7521 i < mode_page->num_entries;
7522 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7523 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7524 dev_err(&ioa_cfg->pdev->dev,
7525 "Invalid resource address reported: 0x%08X\n",
7526 IPR_GET_PHYS_LOC(bus->res_addr));
7527 continue;
7528 }
7529
7530 bus_attr = &ioa_cfg->bus_attr[i];
7531 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7532 bus->bus_width = bus_attr->bus_width;
7533 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7534 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7535 if (bus_attr->qas_enabled)
7536 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7537 else
7538 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7539 }
7540}
7541
7542/**
7543 * ipr_build_mode_select - Build a mode select command
7544 * @ipr_cmd: ipr command struct
7545 * @res_handle: resource handle to send command to
7546 * @parm: Byte 2 of Mode Sense command
7547 * @dma_addr: DMA buffer address
7548 * @xfer_len: data transfer length
7549 *
7550 * Return value:
7551 * none
7552 **/
7553static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
a32c055f
WB
7554 __be32 res_handle, u8 parm,
7555 dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7556{
1da177e4
LT
7557 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7558
7559 ioarcb->res_handle = res_handle;
7560 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7561 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7562 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7563 ioarcb->cmd_pkt.cdb[1] = parm;
7564 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7565
a32c055f 7566 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
1da177e4
LT
7567}
7568
7569/**
7570 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7571 * @ipr_cmd: ipr command struct
7572 *
7573 * This function sets up the SCSI bus attributes and sends
7574 * a Mode Select for Page 28 to activate them.
7575 *
7576 * Return value:
7577 * IPR_RC_JOB_RETURN
7578 **/
7579static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7580{
7581 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7582 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7583 int length;
7584
7585 ENTER;
4733804c
BK
7586 ipr_scsi_bus_speed_limit(ioa_cfg);
7587 ipr_check_term_power(ioa_cfg, mode_pages);
7588 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7589 length = mode_pages->hdr.length + 1;
7590 mode_pages->hdr.length = 0;
1da177e4
LT
7591
7592 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7593 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7594 length);
7595
f72919ec
WB
7596 ipr_cmd->job_step = ipr_set_supported_devs;
7597 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7598 struct ipr_resource_entry, queue);
1da177e4
LT
7599 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7600
7601 LEAVE;
7602 return IPR_RC_JOB_RETURN;
7603}
7604
7605/**
7606 * ipr_build_mode_sense - Builds a mode sense command
7607 * @ipr_cmd: ipr command struct
a96099e2 7608 * @res_handle: resource entry struct
1da177e4
LT
7609 * @parm: Byte 2 of mode sense command
7610 * @dma_addr: DMA address of mode sense buffer
7611 * @xfer_len: Size of DMA buffer
7612 *
7613 * Return value:
7614 * none
7615 **/
7616static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7617 __be32 res_handle,
a32c055f 7618 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
1da177e4 7619{
1da177e4
LT
7620 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7621
7622 ioarcb->res_handle = res_handle;
7623 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7624 ioarcb->cmd_pkt.cdb[2] = parm;
7625 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7626 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7627
a32c055f 7628 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7629}
7630
dfed823e 7631/**
7632 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7633 * @ipr_cmd: ipr command struct
7634 *
7635 * This function handles the failure of an IOA bringup command.
7636 *
7637 * Return value:
7638 * IPR_RC_JOB_RETURN
7639 **/
7640static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7641{
7642 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7643 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 7644
7645 dev_err(&ioa_cfg->pdev->dev,
7646 "0x%02X failed with IOASC: 0x%08X\n",
7647 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7648
7649 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
05a6538a 7650 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
dfed823e 7651 return IPR_RC_JOB_RETURN;
7652}
7653
7654/**
7655 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7656 * @ipr_cmd: ipr command struct
7657 *
7658 * This function handles the failure of a Mode Sense to the IOAFP.
7659 * Some adapters do not handle all mode pages.
7660 *
7661 * Return value:
7662 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7663 **/
7664static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7665{
f72919ec 7666 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
96d21f00 7667 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
dfed823e 7668
7669 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
f72919ec
WB
7670 ipr_cmd->job_step = ipr_set_supported_devs;
7671 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7672 struct ipr_resource_entry, queue);
dfed823e 7673 return IPR_RC_JOB_CONTINUE;
7674 }
7675
7676 return ipr_reset_cmd_failed(ipr_cmd);
7677}
7678
1da177e4
LT
7679/**
7680 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7681 * @ipr_cmd: ipr command struct
7682 *
7683 * This function send a Page 28 mode sense to the IOA to
7684 * retrieve SCSI bus attributes.
7685 *
7686 * Return value:
7687 * IPR_RC_JOB_RETURN
7688 **/
7689static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7690{
7691 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7692
7693 ENTER;
7694 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7695 0x28, ioa_cfg->vpd_cbs_dma +
7696 offsetof(struct ipr_misc_cbs, mode_pages),
7697 sizeof(struct ipr_mode_pages));
7698
7699 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
dfed823e 7700 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
1da177e4
LT
7701
7702 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7703
7704 LEAVE;
7705 return IPR_RC_JOB_RETURN;
7706}
7707
ac09c349
BK
7708/**
7709 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7710 * @ipr_cmd: ipr command struct
7711 *
7712 * This function enables dual IOA RAID support if possible.
7713 *
7714 * Return value:
7715 * IPR_RC_JOB_RETURN
7716 **/
7717static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7718{
7719 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7720 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7721 struct ipr_mode_page24 *mode_page;
7722 int length;
7723
7724 ENTER;
7725 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7726 sizeof(struct ipr_mode_page24));
7727
7728 if (mode_page)
7729 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7730
7731 length = mode_pages->hdr.length + 1;
7732 mode_pages->hdr.length = 0;
7733
7734 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7735 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7736 length);
7737
7738 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7739 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7740
7741 LEAVE;
7742 return IPR_RC_JOB_RETURN;
7743}
7744
7745/**
7746 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7747 * @ipr_cmd: ipr command struct
7748 *
7749 * This function handles the failure of a Mode Sense to the IOAFP.
7750 * Some adapters do not handle all mode pages.
7751 *
7752 * Return value:
7753 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7754 **/
7755static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7756{
96d21f00 7757 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
ac09c349
BK
7758
7759 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7760 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7761 return IPR_RC_JOB_CONTINUE;
7762 }
7763
7764 return ipr_reset_cmd_failed(ipr_cmd);
7765}
7766
7767/**
7768 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7769 * @ipr_cmd: ipr command struct
7770 *
7771 * This function send a mode sense to the IOA to retrieve
7772 * the IOA Advanced Function Control mode page.
7773 *
7774 * Return value:
7775 * IPR_RC_JOB_RETURN
7776 **/
7777static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7778{
7779 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7780
7781 ENTER;
7782 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7783 0x24, ioa_cfg->vpd_cbs_dma +
7784 offsetof(struct ipr_misc_cbs, mode_pages),
7785 sizeof(struct ipr_mode_pages));
7786
7787 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7788 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7789
7790 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7791
7792 LEAVE;
7793 return IPR_RC_JOB_RETURN;
7794}
7795
1da177e4
LT
7796/**
7797 * ipr_init_res_table - Initialize the resource table
7798 * @ipr_cmd: ipr command struct
7799 *
7800 * This function looks through the existing resource table, comparing
7801 * it with the config table. This function will take care of old/new
7802 * devices and schedule adding/removing them from the mid-layer
7803 * as appropriate.
7804 *
7805 * Return value:
7806 * IPR_RC_JOB_CONTINUE
7807 **/
7808static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7809{
7810 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7811 struct ipr_resource_entry *res, *temp;
3e7ebdfa
WB
7812 struct ipr_config_table_entry_wrapper cfgtew;
7813 int entries, found, flag, i;
1da177e4
LT
7814 LIST_HEAD(old_res);
7815
7816 ENTER;
3e7ebdfa
WB
7817 if (ioa_cfg->sis64)
7818 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7819 else
7820 flag = ioa_cfg->u.cfg_table->hdr.flags;
7821
7822 if (flag & IPR_UCODE_DOWNLOAD_REQ)
1da177e4
LT
7823 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7824
7825 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7826 list_move_tail(&res->queue, &old_res);
7827
3e7ebdfa 7828 if (ioa_cfg->sis64)
438b0331 7829 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
3e7ebdfa
WB
7830 else
7831 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7832
7833 for (i = 0; i < entries; i++) {
7834 if (ioa_cfg->sis64)
7835 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7836 else
7837 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
1da177e4
LT
7838 found = 0;
7839
7840 list_for_each_entry_safe(res, temp, &old_res, queue) {
3e7ebdfa 7841 if (ipr_is_same_device(res, &cfgtew)) {
1da177e4
LT
7842 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7843 found = 1;
7844 break;
7845 }
7846 }
7847
7848 if (!found) {
7849 if (list_empty(&ioa_cfg->free_res_q)) {
7850 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7851 break;
7852 }
7853
7854 found = 1;
7855 res = list_entry(ioa_cfg->free_res_q.next,
7856 struct ipr_resource_entry, queue);
7857 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
3e7ebdfa 7858 ipr_init_res_entry(res, &cfgtew);
1da177e4 7859 res->add_to_ml = 1;
56115598
WB
7860 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7861 res->sdev->allow_restart = 1;
1da177e4
LT
7862
7863 if (found)
3e7ebdfa 7864 ipr_update_res_entry(res, &cfgtew);
1da177e4
LT
7865 }
7866
7867 list_for_each_entry_safe(res, temp, &old_res, queue) {
7868 if (res->sdev) {
7869 res->del_from_ml = 1;
3e7ebdfa 7870 res->res_handle = IPR_INVALID_RES_HANDLE;
1da177e4 7871 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
1da177e4
LT
7872 }
7873 }
7874
3e7ebdfa
WB
7875 list_for_each_entry_safe(res, temp, &old_res, queue) {
7876 ipr_clear_res_target(res);
7877 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7878 }
7879
ac09c349
BK
7880 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7881 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7882 else
7883 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
1da177e4
LT
7884
7885 LEAVE;
7886 return IPR_RC_JOB_CONTINUE;
7887}
7888
7889/**
7890 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7891 * @ipr_cmd: ipr command struct
7892 *
7893 * This function sends a Query IOA Configuration command
7894 * to the adapter to retrieve the IOA configuration table.
7895 *
7896 * Return value:
7897 * IPR_RC_JOB_RETURN
7898 **/
7899static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7900{
7901 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7902 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4 7903 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
ac09c349 7904 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
1da177e4
LT
7905
7906 ENTER;
ac09c349
BK
7907 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7908 ioa_cfg->dual_raid = 1;
1da177e4
LT
7909 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7910 ucode_vpd->major_release, ucode_vpd->card_type,
7911 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7912 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7913 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7914
7915 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
438b0331 7916 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
3e7ebdfa
WB
7917 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7918 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
1da177e4 7919
3e7ebdfa 7920 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
a32c055f 7921 IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
7922
7923 ipr_cmd->job_step = ipr_init_res_table;
7924
7925 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7926
7927 LEAVE;
7928 return IPR_RC_JOB_RETURN;
7929}
7930
1a47af26
GKB
7931static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7932{
7933 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7934
7935 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7936 return IPR_RC_JOB_CONTINUE;
7937
7938 return ipr_reset_cmd_failed(ipr_cmd);
7939}
7940
7941static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7942 __be32 res_handle, u8 sa_code)
7943{
7944 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7945
7946 ioarcb->res_handle = res_handle;
7947 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7948 ioarcb->cmd_pkt.cdb[1] = sa_code;
7949 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7950}
7951
7952/**
7953 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7954 * action
a96099e2 7955 * @ipr_cmd: ipr command struct
1a47af26
GKB
7956 *
7957 * Return value:
7958 * none
7959 **/
7960static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7961{
7962 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7963 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7964 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7965
7966 ENTER;
7967
7968 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7969
7970 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7971 ipr_build_ioa_service_action(ipr_cmd,
7972 cpu_to_be32(IPR_IOA_RES_HANDLE),
7973 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7974
7975 ioarcb->cmd_pkt.cdb[2] = 0x40;
7976
7977 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7978 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7979 IPR_SET_SUP_DEVICE_TIMEOUT);
7980
7981 LEAVE;
7982 return IPR_RC_JOB_RETURN;
7983 }
7984
7985 LEAVE;
7986 return IPR_RC_JOB_CONTINUE;
7987}
7988
1da177e4
LT
7989/**
7990 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7991 * @ipr_cmd: ipr command struct
a96099e2
LJ
7992 * @flags: flags to send
7993 * @page: page to inquire
7994 * @dma_addr: DMA address
7995 * @xfer_len: transfer data length
1da177e4
LT
7996 *
7997 * This utility function sends an inquiry to the adapter.
7998 *
7999 * Return value:
8000 * none
8001 **/
8002static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
a32c055f 8003 dma_addr_t dma_addr, u8 xfer_len)
1da177e4
LT
8004{
8005 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
1da177e4
LT
8006
8007 ENTER;
8008 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8009 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8010
8011 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8012 ioarcb->cmd_pkt.cdb[1] = flags;
8013 ioarcb->cmd_pkt.cdb[2] = page;
8014 ioarcb->cmd_pkt.cdb[4] = xfer_len;
8015
a32c055f 8016 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
1da177e4
LT
8017
8018 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8019 LEAVE;
8020}
8021
62275040 8022/**
8023 * ipr_inquiry_page_supported - Is the given inquiry page supported
8024 * @page0: inquiry page 0 buffer
8025 * @page: page code.
8026 *
8027 * This function determines if the specified inquiry page is supported.
8028 *
8029 * Return value:
8030 * 1 if page is supported / 0 if not
8031 **/
8032static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8033{
8034 int i;
8035
8036 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8037 if (page0->page[i] == page)
8038 return 1;
8039
8040 return 0;
8041}
8042
1021b3ff
GKB
8043/**
8044 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8045 * @ipr_cmd: ipr command struct
8046 *
8047 * This function sends a Page 0xC4 inquiry to the adapter
8048 * to retrieve software VPD information.
8049 *
8050 * Return value:
8051 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8052 **/
8053static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8054{
8055 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8056 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8057 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8058
8059 ENTER;
1a47af26 8060 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
1021b3ff
GKB
8061 memset(pageC4, 0, sizeof(*pageC4));
8062
8063 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8064 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8065 (ioa_cfg->vpd_cbs_dma
8066 + offsetof(struct ipr_misc_cbs,
8067 pageC4_data)),
8068 sizeof(struct ipr_inquiry_pageC4));
8069 return IPR_RC_JOB_RETURN;
8070 }
8071
8072 LEAVE;
8073 return IPR_RC_JOB_CONTINUE;
8074}
8075
ac09c349
BK
8076/**
8077 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8078 * @ipr_cmd: ipr command struct
8079 *
8080 * This function sends a Page 0xD0 inquiry to the adapter
8081 * to retrieve adapter capabilities.
8082 *
8083 * Return value:
8084 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8085 **/
8086static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8087{
8088 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8089 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8090 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8091
8092 ENTER;
1021b3ff 8093 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
ac09c349
BK
8094 memset(cap, 0, sizeof(*cap));
8095
8096 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8097 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8098 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8099 sizeof(struct ipr_inquiry_cap));
8100 return IPR_RC_JOB_RETURN;
8101 }
8102
8103 LEAVE;
8104 return IPR_RC_JOB_CONTINUE;
8105}
8106
1da177e4
LT
8107/**
8108 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8109 * @ipr_cmd: ipr command struct
8110 *
8111 * This function sends a Page 3 inquiry to the adapter
8112 * to retrieve software VPD information.
8113 *
8114 * Return value:
8115 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8116 **/
8117static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
62275040 8118{
8119 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
62275040 8120
8121 ENTER;
8122
ac09c349 8123 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
62275040 8124
8125 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8126 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8127 sizeof(struct ipr_inquiry_page3));
8128
8129 LEAVE;
8130 return IPR_RC_JOB_RETURN;
8131}
8132
8133/**
8134 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8135 * @ipr_cmd: ipr command struct
8136 *
8137 * This function sends a Page 0 inquiry to the adapter
8138 * to retrieve supported inquiry pages.
8139 *
8140 * Return value:
8141 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8142 **/
8143static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
8144{
8145 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8146 char type[5];
8147
8148 ENTER;
8149
8150 /* Grab the type out of the VPD and store it away */
8151 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8152 type[4] = '\0';
8153 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8154
f688f96d
BK
8155 if (ipr_invalid_adapter(ioa_cfg)) {
8156 dev_err(&ioa_cfg->pdev->dev,
8157 "Adapter not supported in this hardware configuration.\n");
8158
8159 if (!ipr_testmode) {
8160 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8161 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8162 list_add_tail(&ipr_cmd->queue,
8163 &ioa_cfg->hrrq->hrrq_free_q);
8164 return IPR_RC_JOB_RETURN;
8165 }
8166 }
8167
62275040 8168 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
1da177e4 8169
62275040 8170 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8171 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8172 sizeof(struct ipr_inquiry_page0));
1da177e4
LT
8173
8174 LEAVE;
8175 return IPR_RC_JOB_RETURN;
8176}
8177
8178/**
8179 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8180 * @ipr_cmd: ipr command struct
8181 *
8182 * This function sends a standard inquiry to the adapter.
8183 *
8184 * Return value:
8185 * IPR_RC_JOB_RETURN
8186 **/
8187static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8188{
8189 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8190
8191 ENTER;
62275040 8192 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
1da177e4
LT
8193
8194 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8195 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8196 sizeof(struct ipr_ioa_vpd));
8197
8198 LEAVE;
8199 return IPR_RC_JOB_RETURN;
8200}
8201
8202/**
214777ba 8203 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
1da177e4
LT
8204 * @ipr_cmd: ipr command struct
8205 *
8206 * This function send an Identify Host Request Response Queue
8207 * command to establish the HRRQ with the adapter.
8208 *
8209 * Return value:
8210 * IPR_RC_JOB_RETURN
8211 **/
214777ba 8212static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
1da177e4
LT
8213{
8214 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8215 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
05a6538a 8216 struct ipr_hrr_queue *hrrq;
1da177e4
LT
8217
8218 ENTER;
05a6538a 8219 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
87adbe08
BK
8220 if (ioa_cfg->identify_hrrq_index == 0)
8221 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
1da177e4 8222
56d6aa33 8223 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8224 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
1da177e4 8225
05a6538a 8226 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8227 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1da177e4 8228
05a6538a 8229 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8230 if (ioa_cfg->sis64)
8231 ioarcb->cmd_pkt.cdb[1] = 0x1;
214777ba 8232
05a6538a 8233 if (ioa_cfg->nvectors == 1)
8234 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8235 else
8236 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8237
8238 ioarcb->cmd_pkt.cdb[2] =
8239 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8240 ioarcb->cmd_pkt.cdb[3] =
8241 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8242 ioarcb->cmd_pkt.cdb[4] =
8243 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8244 ioarcb->cmd_pkt.cdb[5] =
8245 ((u64) hrrq->host_rrq_dma) & 0xff;
8246 ioarcb->cmd_pkt.cdb[7] =
8247 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8248 ioarcb->cmd_pkt.cdb[8] =
8249 (sizeof(u32) * hrrq->size) & 0xff;
8250
8251 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 8252 ioarcb->cmd_pkt.cdb[9] =
8253 ioa_cfg->identify_hrrq_index;
1da177e4 8254
05a6538a 8255 if (ioa_cfg->sis64) {
8256 ioarcb->cmd_pkt.cdb[10] =
8257 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8258 ioarcb->cmd_pkt.cdb[11] =
8259 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8260 ioarcb->cmd_pkt.cdb[12] =
8261 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8262 ioarcb->cmd_pkt.cdb[13] =
8263 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8264 }
8265
8266 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
56d6aa33 8267 ioarcb->cmd_pkt.cdb[14] =
8268 ioa_cfg->identify_hrrq_index;
05a6538a 8269
8270 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8271 IPR_INTERNAL_TIMEOUT);
8272
56d6aa33 8273 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8274 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
05a6538a 8275
8276 LEAVE;
8277 return IPR_RC_JOB_RETURN;
05a6538a 8278 }
8279
1da177e4 8280 LEAVE;
05a6538a 8281 return IPR_RC_JOB_CONTINUE;
1da177e4
LT
8282}
8283
8284/**
8285 * ipr_reset_timer_done - Adapter reset timer function
a96099e2 8286 * @t: Timer context used to fetch ipr command struct
1da177e4
LT
8287 *
8288 * Description: This function is used in adapter reset processing
8289 * for timing events. If the reset_cmd pointer in the IOA
8290 * config struct is not this adapter's we are doing nested
8291 * resets and fail_all_ops will take care of freeing the
8292 * command block.
8293 *
8294 * Return value:
8295 * none
8296 **/
738c6ec5 8297static void ipr_reset_timer_done(struct timer_list *t)
1da177e4 8298{
738c6ec5 8299 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
1da177e4
LT
8300 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8301 unsigned long lock_flags = 0;
8302
8303 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8304
8305 if (ioa_cfg->reset_cmd == ipr_cmd) {
8306 list_del(&ipr_cmd->queue);
8307 ipr_cmd->done(ipr_cmd);
8308 }
8309
8310 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8311}
8312
8313/**
8314 * ipr_reset_start_timer - Start a timer for adapter reset job
8315 * @ipr_cmd: ipr command struct
8316 * @timeout: timeout value
8317 *
8318 * Description: This function is used in adapter reset processing
8319 * for timing events. If the reset_cmd pointer in the IOA
8320 * config struct is not this adapter's we are doing nested
8321 * resets and fail_all_ops will take care of freeing the
8322 * command block.
8323 *
8324 * Return value:
8325 * none
8326 **/
8327static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8328 unsigned long timeout)
8329{
05a6538a 8330
8331 ENTER;
8332 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
8333 ipr_cmd->done = ipr_reset_ioa_job;
8334
1da177e4 8335 ipr_cmd->timer.expires = jiffies + timeout;
841b86f3 8336 ipr_cmd->timer.function = ipr_reset_timer_done;
1da177e4
LT
8337 add_timer(&ipr_cmd->timer);
8338}
8339
8340/**
8341 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8342 * @ioa_cfg: ioa cfg struct
8343 *
8344 * Return value:
8345 * nothing
8346 **/
8347static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8348{
05a6538a 8349 struct ipr_hrr_queue *hrrq;
1da177e4 8350
05a6538a 8351 for_each_hrrq(hrrq, ioa_cfg) {
56d6aa33 8352 spin_lock(&hrrq->_lock);
05a6538a 8353 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8354
8355 /* Initialize Host RRQ pointers */
8356 hrrq->hrrq_start = hrrq->host_rrq;
8357 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8358 hrrq->hrrq_curr = hrrq->hrrq_start;
8359 hrrq->toggle_bit = 1;
56d6aa33 8360 spin_unlock(&hrrq->_lock);
05a6538a 8361 }
56d6aa33 8362 wmb();
05a6538a 8363
56d6aa33 8364 ioa_cfg->identify_hrrq_index = 0;
8365 if (ioa_cfg->hrrq_num == 1)
8366 atomic_set(&ioa_cfg->hrrq_index, 0);
8367 else
8368 atomic_set(&ioa_cfg->hrrq_index, 1);
1da177e4
LT
8369
8370 /* Zero out config table */
3e7ebdfa 8371 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
1da177e4
LT
8372}
8373
214777ba
WB
8374/**
8375 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8376 * @ipr_cmd: ipr command struct
8377 *
8378 * Return value:
8379 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8380 **/
8381static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8382{
8383 unsigned long stage, stage_time;
8384 u32 feedback;
8385 volatile u32 int_reg;
8386 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8387 u64 maskval = 0;
8388
8389 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8390 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8391 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8392
8393 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8394
8395 /* sanity check the stage_time value */
438b0331
WB
8396 if (stage_time == 0)
8397 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8398 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
214777ba
WB
8399 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8400 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8401 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8402
8403 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8404 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8405 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8406 stage_time = ioa_cfg->transop_timeout;
8407 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8408 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
1df79ca4
WB
8409 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8410 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8411 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8412 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8413 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8414 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8415 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8416 return IPR_RC_JOB_CONTINUE;
8417 }
214777ba
WB
8418 }
8419
214777ba 8420 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
841b86f3 8421 ipr_cmd->timer.function = ipr_oper_timeout;
214777ba
WB
8422 ipr_cmd->done = ipr_reset_ioa_job;
8423 add_timer(&ipr_cmd->timer);
05a6538a 8424
8425 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
214777ba
WB
8426
8427 return IPR_RC_JOB_RETURN;
8428}
8429
1da177e4
LT
8430/**
8431 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8432 * @ipr_cmd: ipr command struct
8433 *
8434 * This function reinitializes some control blocks and
8435 * enables destructive diagnostics on the adapter.
8436 *
8437 * Return value:
8438 * IPR_RC_JOB_RETURN
8439 **/
8440static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8441{
8442 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8443 volatile u32 int_reg;
7be96900 8444 volatile u64 maskval;
56d6aa33 8445 int i;
1da177e4
LT
8446
8447 ENTER;
214777ba 8448 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
1da177e4
LT
8449 ipr_init_ioa_mem(ioa_cfg);
8450
56d6aa33 8451 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8452 spin_lock(&ioa_cfg->hrrq[i]._lock);
8453 ioa_cfg->hrrq[i].allow_interrupts = 1;
8454 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8455 }
8701f185
WB
8456 if (ioa_cfg->sis64) {
8457 /* Set the adapter to the correct endian mode. */
8458 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8459 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8460 }
8461
7be96900 8462 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
1da177e4
LT
8463
8464 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8465 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
214777ba 8466 ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4
LT
8467 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8468 return IPR_RC_JOB_CONTINUE;
8469 }
8470
8471 /* Enable destructive diagnostics on IOA */
214777ba
WB
8472 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8473
7be96900
WB
8474 if (ioa_cfg->sis64) {
8475 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8476 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8477 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8478 } else
8479 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
1da177e4 8480
1da177e4
LT
8481 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8482
8483 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8484
214777ba
WB
8485 if (ioa_cfg->sis64) {
8486 ipr_cmd->job_step = ipr_reset_next_stage;
8487 return IPR_RC_JOB_CONTINUE;
8488 }
8489
5469cb5b 8490 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
841b86f3 8491 ipr_cmd->timer.function = ipr_oper_timeout;
1da177e4
LT
8492 ipr_cmd->done = ipr_reset_ioa_job;
8493 add_timer(&ipr_cmd->timer);
05a6538a 8494 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1da177e4
LT
8495
8496 LEAVE;
8497 return IPR_RC_JOB_RETURN;
8498}
8499
8500/**
8501 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8502 * @ipr_cmd: ipr command struct
8503 *
8504 * This function is invoked when an adapter dump has run out
8505 * of processing time.
8506 *
8507 * Return value:
8508 * IPR_RC_JOB_CONTINUE
8509 **/
8510static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8511{
8512 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8513
8514 if (ioa_cfg->sdt_state == GET_DUMP)
41e9a696
BK
8515 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8516 else if (ioa_cfg->sdt_state == READ_DUMP)
1da177e4
LT
8517 ioa_cfg->sdt_state = ABORT_DUMP;
8518
4c647e90 8519 ioa_cfg->dump_timeout = 1;
1da177e4
LT
8520 ipr_cmd->job_step = ipr_reset_alert;
8521
8522 return IPR_RC_JOB_CONTINUE;
8523}
8524
8525/**
8526 * ipr_unit_check_no_data - Log a unit check/no data error log
8527 * @ioa_cfg: ioa config struct
8528 *
8529 * Logs an error indicating the adapter unit checked, but for some
8530 * reason, we were unable to fetch the unit check buffer.
8531 *
8532 * Return value:
8533 * nothing
8534 **/
8535static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8536{
8537 ioa_cfg->errors_logged++;
8538 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8539}
8540
8541/**
8542 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8543 * @ioa_cfg: ioa config struct
8544 *
8545 * Fetches the unit check buffer from the adapter by clocking the data
8546 * through the mailbox register.
8547 *
8548 * Return value:
8549 * nothing
8550 **/
8551static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8552{
8553 unsigned long mailbox;
8554 struct ipr_hostrcb *hostrcb;
8555 struct ipr_uc_sdt sdt;
8556 int rc, length;
65f56475 8557 u32 ioasc;
1da177e4
LT
8558
8559 mailbox = readl(ioa_cfg->ioa_mailbox);
8560
dcbad00e 8561 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
1da177e4
LT
8562 ipr_unit_check_no_data(ioa_cfg);
8563 return;
8564 }
8565
8566 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8567 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8568 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8569
dcbad00e
WB
8570 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8571 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8572 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
1da177e4
LT
8573 ipr_unit_check_no_data(ioa_cfg);
8574 return;
8575 }
8576
8577 /* Find length of the first sdt entry (UC buffer) */
dcbad00e
WB
8578 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8579 length = be32_to_cpu(sdt.entry[0].end_token);
8580 else
8581 length = (be32_to_cpu(sdt.entry[0].end_token) -
8582 be32_to_cpu(sdt.entry[0].start_token)) &
8583 IPR_FMT2_MBX_ADDR_MASK;
1da177e4
LT
8584
8585 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8586 struct ipr_hostrcb, queue);
afc3f83c 8587 list_del_init(&hostrcb->queue);
1da177e4
LT
8588 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8589
8590 rc = ipr_get_ldump_data_section(ioa_cfg,
dcbad00e 8591 be32_to_cpu(sdt.entry[0].start_token),
1da177e4
LT
8592 (__be32 *)&hostrcb->hcam,
8593 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8594
65f56475 8595 if (!rc) {
1da177e4 8596 ipr_handle_log_data(ioa_cfg, hostrcb);
4565e370 8597 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
65f56475
BK
8598 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8599 ioa_cfg->sdt_state == GET_DUMP)
8600 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8601 } else
1da177e4
LT
8602 ipr_unit_check_no_data(ioa_cfg);
8603
8604 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8605}
8606
110def85
WB
8607/**
8608 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8609 * @ipr_cmd: ipr command struct
8610 *
8611 * Description: This function will call to get the unit check buffer.
8612 *
8613 * Return value:
8614 * IPR_RC_JOB_RETURN
8615 **/
8616static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8617{
8618 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8619
8620 ENTER;
8621 ioa_cfg->ioa_unit_checked = 0;
8622 ipr_get_unit_check_buffer(ioa_cfg);
8623 ipr_cmd->job_step = ipr_reset_alert;
8624 ipr_reset_start_timer(ipr_cmd, 0);
8625
8626 LEAVE;
8627 return IPR_RC_JOB_RETURN;
8628}
8629
f41f1d99
GKB
8630static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8631{
8632 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8633
8634 ENTER;
8635
8636 if (ioa_cfg->sdt_state != GET_DUMP)
8637 return IPR_RC_JOB_RETURN;
8638
8639 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8640 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8641 IPR_PCII_MAILBOX_STABLE)) {
8642
8643 if (!ipr_cmd->u.time_left)
8644 dev_err(&ioa_cfg->pdev->dev,
8645 "Timed out waiting for Mailbox register.\n");
8646
8647 ioa_cfg->sdt_state = READ_DUMP;
8648 ioa_cfg->dump_timeout = 0;
8649 if (ioa_cfg->sis64)
8650 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8651 else
8652 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8653 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8654 schedule_work(&ioa_cfg->work_q);
8655
8656 } else {
8657 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8658 ipr_reset_start_timer(ipr_cmd,
8659 IPR_CHECK_FOR_RESET_TIMEOUT);
8660 }
8661
8662 LEAVE;
8663 return IPR_RC_JOB_RETURN;
8664}
8665
1da177e4
LT
8666/**
8667 * ipr_reset_restore_cfg_space - Restore PCI config space.
8668 * @ipr_cmd: ipr command struct
8669 *
8670 * Description: This function restores the saved PCI config space of
8671 * the adapter, fails all outstanding ops back to the callers, and
8672 * fetches the dump/unit check if applicable to this reset.
8673 *
8674 * Return value:
8675 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8676 **/
8677static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8678{
8679 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1da177e4
LT
8680
8681 ENTER;
99c965dd 8682 ioa_cfg->pdev->state_saved = true;
1d3c16a8 8683 pci_restore_state(ioa_cfg->pdev);
1da177e4
LT
8684
8685 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
96d21f00 8686 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
1da177e4
LT
8687 return IPR_RC_JOB_CONTINUE;
8688 }
8689
8690 ipr_fail_all_ops(ioa_cfg);
8691
8701f185
WB
8692 if (ioa_cfg->sis64) {
8693 /* Set the adapter to the correct endian mode. */
8694 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
4dc83399 8695 readl(ioa_cfg->regs.endian_swap_reg);
8701f185
WB
8696 }
8697
1da177e4 8698 if (ioa_cfg->ioa_unit_checked) {
110def85
WB
8699 if (ioa_cfg->sis64) {
8700 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8701 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8702 return IPR_RC_JOB_RETURN;
8703 } else {
8704 ioa_cfg->ioa_unit_checked = 0;
8705 ipr_get_unit_check_buffer(ioa_cfg);
8706 ipr_cmd->job_step = ipr_reset_alert;
8707 ipr_reset_start_timer(ipr_cmd, 0);
8708 return IPR_RC_JOB_RETURN;
8709 }
1da177e4
LT
8710 }
8711
8712 if (ioa_cfg->in_ioa_bringdown) {
8713 ipr_cmd->job_step = ipr_ioa_bringdown_done;
f41f1d99
GKB
8714 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8715 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8716 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
1da177e4
LT
8717 } else {
8718 ipr_cmd->job_step = ipr_reset_enable_ioa;
1da177e4
LT
8719 }
8720
438b0331 8721 LEAVE;
1da177e4
LT
8722 return IPR_RC_JOB_CONTINUE;
8723}
8724
e619e1a7
BK
8725/**
8726 * ipr_reset_bist_done - BIST has completed on the adapter.
8727 * @ipr_cmd: ipr command struct
8728 *
8729 * Description: Unblock config space and resume the reset process.
8730 *
8731 * Return value:
8732 * IPR_RC_JOB_CONTINUE
8733 **/
8734static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8735{
fb51ccbf
JK
8736 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8737
e619e1a7 8738 ENTER;
fb51ccbf
JK
8739 if (ioa_cfg->cfg_locked)
8740 pci_cfg_access_unlock(ioa_cfg->pdev);
8741 ioa_cfg->cfg_locked = 0;
e619e1a7
BK
8742 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8743 LEAVE;
8744 return IPR_RC_JOB_CONTINUE;
8745}
8746
1da177e4
LT
8747/**
8748 * ipr_reset_start_bist - Run BIST on the adapter.
8749 * @ipr_cmd: ipr command struct
8750 *
8751 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8752 *
8753 * Return value:
8754 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8755 **/
8756static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8757{
8758 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
cb237ef7 8759 int rc = PCIBIOS_SUCCESSFUL;
1da177e4
LT
8760
8761 ENTER;
cb237ef7
WB
8762 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8763 writel(IPR_UPROCI_SIS64_START_BIST,
8764 ioa_cfg->regs.set_uproc_interrupt_reg32);
8765 else
8766 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8767
8768 if (rc == PCIBIOS_SUCCESSFUL) {
e619e1a7 8769 ipr_cmd->job_step = ipr_reset_bist_done;
1da177e4
LT
8770 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8771 rc = IPR_RC_JOB_RETURN;
cb237ef7 8772 } else {
fb51ccbf
JK
8773 if (ioa_cfg->cfg_locked)
8774 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8775 ioa_cfg->cfg_locked = 0;
cb237ef7
WB
8776 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8777 rc = IPR_RC_JOB_CONTINUE;
1da177e4
LT
8778 }
8779
8780 LEAVE;
8781 return rc;
8782}
8783
463fc696
BK
8784/**
8785 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8786 * @ipr_cmd: ipr command struct
8787 *
8788 * Description: This clears PCI reset to the adapter and delays two seconds.
8789 *
8790 * Return value:
8791 * IPR_RC_JOB_RETURN
8792 **/
8793static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8794{
8795 ENTER;
463fc696
BK
8796 ipr_cmd->job_step = ipr_reset_bist_done;
8797 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8798 LEAVE;
8799 return IPR_RC_JOB_RETURN;
8800}
8801
2796ca5e
BK
8802/**
8803 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8804 * @work: work struct
8805 *
8806 * Description: This pulses warm reset to a slot.
8807 *
8808 **/
8809static void ipr_reset_reset_work(struct work_struct *work)
8810{
8811 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8812 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8813 struct pci_dev *pdev = ioa_cfg->pdev;
8814 unsigned long lock_flags = 0;
8815
8816 ENTER;
8817 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8818 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8819 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8820
8821 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8822 if (ioa_cfg->reset_cmd == ipr_cmd)
8823 ipr_reset_ioa_job(ipr_cmd);
8824 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8825 LEAVE;
8826}
8827
463fc696
BK
8828/**
8829 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8830 * @ipr_cmd: ipr command struct
8831 *
8832 * Description: This asserts PCI reset to the adapter.
8833 *
8834 * Return value:
8835 * IPR_RC_JOB_RETURN
8836 **/
8837static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8838{
8839 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
463fc696
BK
8840
8841 ENTER;
2796ca5e
BK
8842 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8843 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
463fc696 8844 ipr_cmd->job_step = ipr_reset_slot_reset_done;
463fc696
BK
8845 LEAVE;
8846 return IPR_RC_JOB_RETURN;
8847}
8848
fb51ccbf
JK
8849/**
8850 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8851 * @ipr_cmd: ipr command struct
8852 *
8853 * Description: This attempts to block config access to the IOA.
8854 *
8855 * Return value:
8856 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8857 **/
8858static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8859{
8860 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8861 int rc = IPR_RC_JOB_CONTINUE;
8862
8863 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8864 ioa_cfg->cfg_locked = 1;
8865 ipr_cmd->job_step = ioa_cfg->reset;
8866 } else {
8867 if (ipr_cmd->u.time_left) {
8868 rc = IPR_RC_JOB_RETURN;
8869 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8870 ipr_reset_start_timer(ipr_cmd,
8871 IPR_CHECK_FOR_RESET_TIMEOUT);
8872 } else {
8873 ipr_cmd->job_step = ioa_cfg->reset;
8874 dev_err(&ioa_cfg->pdev->dev,
8875 "Timed out waiting to lock config access. Resetting anyway.\n");
8876 }
8877 }
8878
8879 return rc;
8880}
8881
8882/**
8883 * ipr_reset_block_config_access - Block config access to the IOA
8884 * @ipr_cmd: ipr command struct
8885 *
8886 * Description: This attempts to block config access to the IOA
8887 *
8888 * Return value:
8889 * IPR_RC_JOB_CONTINUE
8890 **/
8891static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8892{
8893 ipr_cmd->ioa_cfg->cfg_locked = 0;
8894 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8895 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8896 return IPR_RC_JOB_CONTINUE;
8897}
8898
1da177e4
LT
8899/**
8900 * ipr_reset_allowed - Query whether or not IOA can be reset
8901 * @ioa_cfg: ioa config struct
8902 *
8903 * Return value:
8904 * 0 if reset not allowed / non-zero if reset is allowed
8905 **/
8906static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8907{
8908 volatile u32 temp_reg;
8909
8910 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8911 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8912}
8913
8914/**
8915 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8916 * @ipr_cmd: ipr command struct
8917 *
8918 * Description: This function waits for adapter permission to run BIST,
8919 * then runs BIST. If the adapter does not give permission after a
8920 * reasonable time, we will reset the adapter anyway. The impact of
8921 * resetting the adapter without warning the adapter is the risk of
8922 * losing the persistent error log on the adapter. If the adapter is
8923 * reset while it is writing to the flash on the adapter, the flash
8924 * segment will have bad ECC and be zeroed.
8925 *
8926 * Return value:
8927 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8928 **/
8929static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8930{
8931 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8932 int rc = IPR_RC_JOB_RETURN;
8933
8934 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8935 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8936 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8937 } else {
fb51ccbf 8938 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8939 rc = IPR_RC_JOB_CONTINUE;
8940 }
8941
8942 return rc;
8943}
8944
8945/**
8701f185 8946 * ipr_reset_alert - Alert the adapter of a pending reset
1da177e4
LT
8947 * @ipr_cmd: ipr command struct
8948 *
8949 * Description: This function alerts the adapter that it will be reset.
8950 * If memory space is not currently enabled, proceed directly
8951 * to running BIST on the adapter. The timer must always be started
8952 * so we guarantee we do not run BIST from ipr_isr.
8953 *
8954 * Return value:
8955 * IPR_RC_JOB_RETURN
8956 **/
8957static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8958{
8959 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8960 u16 cmd_reg;
8961 int rc;
8962
8963 ENTER;
8964 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8965
8966 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8967 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
214777ba 8968 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
1da177e4
LT
8969 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8970 } else {
fb51ccbf 8971 ipr_cmd->job_step = ipr_reset_block_config_access;
1da177e4
LT
8972 }
8973
8974 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8975 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8976
8977 LEAVE;
8978 return IPR_RC_JOB_RETURN;
8979}
8980
4fdd7c7a
BK
8981/**
8982 * ipr_reset_quiesce_done - Complete IOA disconnect
8983 * @ipr_cmd: ipr command struct
8984 *
8985 * Description: Freeze the adapter to complete quiesce processing
8986 *
8987 * Return value:
8988 * IPR_RC_JOB_CONTINUE
8989 **/
8990static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8991{
8992 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8993
8994 ENTER;
8995 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8996 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8997 LEAVE;
8998 return IPR_RC_JOB_CONTINUE;
8999}
9000
9001/**
9002 * ipr_reset_cancel_hcam_done - Check for outstanding commands
9003 * @ipr_cmd: ipr command struct
9004 *
9005 * Description: Ensure nothing is outstanding to the IOA and
9006 * proceed with IOA disconnect. Otherwise reset the IOA.
9007 *
9008 * Return value:
9009 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9010 **/
9011static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9012{
9013 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9014 struct ipr_cmnd *loop_cmd;
9015 struct ipr_hrr_queue *hrrq;
9016 int rc = IPR_RC_JOB_CONTINUE;
9017 int count = 0;
9018
9019 ENTER;
9020 ipr_cmd->job_step = ipr_reset_quiesce_done;
9021
9022 for_each_hrrq(hrrq, ioa_cfg) {
9023 spin_lock(&hrrq->_lock);
9024 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9025 count++;
9026 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9027 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9028 rc = IPR_RC_JOB_RETURN;
9029 break;
9030 }
9031 spin_unlock(&hrrq->_lock);
9032
9033 if (count)
9034 break;
9035 }
9036
9037 LEAVE;
9038 return rc;
9039}
9040
9041/**
9042 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9043 * @ipr_cmd: ipr command struct
9044 *
9045 * Description: Cancel any oustanding HCAMs to the IOA.
9046 *
9047 * Return value:
9048 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9049 **/
9050static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9051{
9052 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9053 int rc = IPR_RC_JOB_CONTINUE;
9054 struct ipr_cmd_pkt *cmd_pkt;
9055 struct ipr_cmnd *hcam_cmd;
9056 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9057
9058 ENTER;
9059 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9060
9061 if (!hrrq->ioa_is_dead) {
9062 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9063 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9064 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9065 continue;
9066
9067 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9068 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9069 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9070 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9071 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9072 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9073 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9074 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9075 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9076 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9077 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9078 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9079 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9080 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9081
9082 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9083 IPR_CANCEL_TIMEOUT);
9084
9085 rc = IPR_RC_JOB_RETURN;
9086 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9087 break;
9088 }
9089 }
9090 } else
9091 ipr_cmd->job_step = ipr_reset_alert;
9092
9093 LEAVE;
9094 return rc;
9095}
9096
1da177e4
LT
9097/**
9098 * ipr_reset_ucode_download_done - Microcode download completion
9099 * @ipr_cmd: ipr command struct
9100 *
9101 * Description: This function unmaps the microcode download buffer.
9102 *
9103 * Return value:
9104 * IPR_RC_JOB_CONTINUE
9105 **/
9106static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9107{
9108 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9109 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9110
d73341bf 9111 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
1da177e4
LT
9112 sglist->num_sg, DMA_TO_DEVICE);
9113
9114 ipr_cmd->job_step = ipr_reset_alert;
9115 return IPR_RC_JOB_CONTINUE;
9116}
9117
9118/**
9119 * ipr_reset_ucode_download - Download microcode to the adapter
9120 * @ipr_cmd: ipr command struct
9121 *
9122 * Description: This function checks to see if it there is microcode
9123 * to download to the adapter. If there is, a download is performed.
9124 *
9125 * Return value:
9126 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9127 **/
9128static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9129{
9130 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9131 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9132
9133 ENTER;
9134 ipr_cmd->job_step = ipr_reset_alert;
9135
9136 if (!sglist)
9137 return IPR_RC_JOB_CONTINUE;
9138
9139 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9140 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9141 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9142 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9143 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9144 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9145 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9146
a32c055f
WB
9147 if (ioa_cfg->sis64)
9148 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9149 else
9150 ipr_build_ucode_ioadl(ipr_cmd, sglist);
1da177e4
LT
9151 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9152
9153 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9154 IPR_WRITE_BUFFER_TIMEOUT);
9155
9156 LEAVE;
9157 return IPR_RC_JOB_RETURN;
9158}
9159
9160/**
9161 * ipr_reset_shutdown_ioa - Shutdown the adapter
9162 * @ipr_cmd: ipr command struct
9163 *
9164 * Description: This function issues an adapter shutdown of the
9165 * specified type to the specified adapter as part of the
9166 * adapter reset job.
9167 *
9168 * Return value:
9169 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9170 **/
9171static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9172{
9173 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9174 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9175 unsigned long timeout;
9176 int rc = IPR_RC_JOB_CONTINUE;
9177
9178 ENTER;
4fdd7c7a
BK
9179 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9180 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9181 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
56d6aa33 9182 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
1da177e4
LT
9183 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9184 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9185 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9186 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9187
ac09c349
BK
9188 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9189 timeout = IPR_SHUTDOWN_TIMEOUT;
1da177e4
LT
9190 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9191 timeout = IPR_INTERNAL_TIMEOUT;
ac09c349
BK
9192 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9193 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
1da177e4 9194 else
ac09c349 9195 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
1da177e4
LT
9196
9197 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9198
9199 rc = IPR_RC_JOB_RETURN;
9200 ipr_cmd->job_step = ipr_reset_ucode_download;
9201 } else
9202 ipr_cmd->job_step = ipr_reset_alert;
9203
9204 LEAVE;
9205 return rc;
9206}
9207
9208/**
9209 * ipr_reset_ioa_job - Adapter reset job
9210 * @ipr_cmd: ipr command struct
9211 *
9212 * Description: This function is the job router for the adapter reset job.
9213 *
9214 * Return value:
9215 * none
9216 **/
9217static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9218{
9219 u32 rc, ioasc;
1da177e4
LT
9220 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9221
9222 do {
96d21f00 9223 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1da177e4
LT
9224
9225 if (ioa_cfg->reset_cmd != ipr_cmd) {
9226 /*
9227 * We are doing nested adapter resets and this is
9228 * not the current reset job.
9229 */
05a6538a 9230 list_add_tail(&ipr_cmd->queue,
9231 &ipr_cmd->hrrq->hrrq_free_q);
1da177e4
LT
9232 return;
9233 }
9234
9235 if (IPR_IOASC_SENSE_KEY(ioasc)) {
dfed823e 9236 rc = ipr_cmd->job_step_failed(ipr_cmd);
9237 if (rc == IPR_RC_JOB_RETURN)
9238 return;
1da177e4
LT
9239 }
9240
9241 ipr_reinit_ipr_cmnd(ipr_cmd);
dfed823e 9242 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
1da177e4 9243 rc = ipr_cmd->job_step(ipr_cmd);
203fa3fe 9244 } while (rc == IPR_RC_JOB_CONTINUE);
1da177e4
LT
9245}
9246
9247/**
9248 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9249 * @ioa_cfg: ioa config struct
9250 * @job_step: first job step of reset job
9251 * @shutdown_type: shutdown type
9252 *
9253 * Description: This function will initiate the reset of the given adapter
9254 * starting at the selected job step.
9255 * If the caller needs to wait on the completion of the reset,
9256 * the caller must sleep on the reset_wait_q.
9257 *
9258 * Return value:
9259 * none
9260 **/
9261static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9262 int (*job_step) (struct ipr_cmnd *),
9263 enum ipr_shutdown_type shutdown_type)
9264{
9265 struct ipr_cmnd *ipr_cmd;
56d6aa33 9266 int i;
1da177e4
LT
9267
9268 ioa_cfg->in_reset_reload = 1;
56d6aa33 9269 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9270 spin_lock(&ioa_cfg->hrrq[i]._lock);
9271 ioa_cfg->hrrq[i].allow_cmds = 0;
9272 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9273 }
9274 wmb();
b0e17a9b
BK
9275 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9276 ioa_cfg->scsi_unblock = 0;
9277 ioa_cfg->scsi_blocked = 1;
bfae7820 9278 scsi_block_requests(ioa_cfg->host);
b0e17a9b 9279 }
1da177e4
LT
9280
9281 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9282 ioa_cfg->reset_cmd = ipr_cmd;
9283 ipr_cmd->job_step = job_step;
9284 ipr_cmd->u.shutdown_type = shutdown_type;
9285
9286 ipr_reset_ioa_job(ipr_cmd);
9287}
9288
9289/**
9290 * ipr_initiate_ioa_reset - Initiate an adapter reset
9291 * @ioa_cfg: ioa config struct
9292 * @shutdown_type: shutdown type
9293 *
9294 * Description: This function will initiate the reset of the given adapter.
9295 * If the caller needs to wait on the completion of the reset,
9296 * the caller must sleep on the reset_wait_q.
9297 *
9298 * Return value:
9299 * none
9300 **/
9301static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9302 enum ipr_shutdown_type shutdown_type)
9303{
56d6aa33 9304 int i;
9305
9306 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
1da177e4
LT
9307 return;
9308
41e9a696
BK
9309 if (ioa_cfg->in_reset_reload) {
9310 if (ioa_cfg->sdt_state == GET_DUMP)
9311 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9312 else if (ioa_cfg->sdt_state == READ_DUMP)
9313 ioa_cfg->sdt_state = ABORT_DUMP;
9314 }
1da177e4
LT
9315
9316 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9317 dev_err(&ioa_cfg->pdev->dev,
9318 "IOA taken offline - error recovery failed\n");
9319
9320 ioa_cfg->reset_retries = 0;
56d6aa33 9321 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9322 spin_lock(&ioa_cfg->hrrq[i]._lock);
9323 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9324 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9325 }
9326 wmb();
1da177e4
LT
9327
9328 if (ioa_cfg->in_ioa_bringdown) {
9329 ioa_cfg->reset_cmd = NULL;
9330 ioa_cfg->in_reset_reload = 0;
9331 ipr_fail_all_ops(ioa_cfg);
9332 wake_up_all(&ioa_cfg->reset_wait_q);
9333
bfae7820 9334 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
b0e17a9b
BK
9335 ioa_cfg->scsi_unblock = 1;
9336 schedule_work(&ioa_cfg->work_q);
bfae7820 9337 }
1da177e4
LT
9338 return;
9339 } else {
9340 ioa_cfg->in_ioa_bringdown = 1;
9341 shutdown_type = IPR_SHUTDOWN_NONE;
9342 }
9343 }
9344
9345 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9346 shutdown_type);
9347}
9348
f8a88b19
LV
9349/**
9350 * ipr_reset_freeze - Hold off all I/O activity
9351 * @ipr_cmd: ipr command struct
9352 *
9353 * Description: If the PCI slot is frozen, hold off all I/O
9354 * activity; then, as soon as the slot is available again,
9355 * initiate an adapter reset.
9356 */
9357static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9358{
56d6aa33 9359 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9360 int i;
9361
f8a88b19 9362 /* Disallow new interrupts, avoid loop */
56d6aa33 9363 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9364 spin_lock(&ioa_cfg->hrrq[i]._lock);
9365 ioa_cfg->hrrq[i].allow_interrupts = 0;
9366 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9367 }
9368 wmb();
05a6538a 9369 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
f8a88b19
LV
9370 ipr_cmd->done = ipr_reset_ioa_job;
9371 return IPR_RC_JOB_RETURN;
9372}
9373
6270e593
BK
9374/**
9375 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9376 * @pdev: PCI device struct
9377 *
9378 * Description: This routine is called to tell us that the MMIO
9379 * access to the IOA has been restored
9380 */
9381static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9382{
9383 unsigned long flags = 0;
9384 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9385
9386 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9387 if (!ioa_cfg->probe_done)
9388 pci_save_state(pdev);
9389 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9390 return PCI_ERS_RESULT_NEED_RESET;
9391}
9392
f8a88b19
LV
9393/**
9394 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9395 * @pdev: PCI device struct
9396 *
9397 * Description: This routine is called to tell us that the PCI bus
9398 * is down. Can't do anything here, except put the device driver
9399 * into a holding pattern, waiting for the PCI bus to come back.
9400 */
9401static void ipr_pci_frozen(struct pci_dev *pdev)
9402{
9403 unsigned long flags = 0;
9404 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9405
9406 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9407 if (ioa_cfg->probe_done)
9408 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
f8a88b19
LV
9409 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9410}
9411
9412/**
9413 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9414 * @pdev: PCI device struct
9415 *
9416 * Description: This routine is called by the pci error recovery
9417 * code after the PCI slot has been reset, just before we
9418 * should resume normal operations.
9419 */
9420static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9421{
9422 unsigned long flags = 0;
9423 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9424
9425 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9426 if (ioa_cfg->probe_done) {
9427 if (ioa_cfg->needs_warm_reset)
9428 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9429 else
9430 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9431 IPR_SHUTDOWN_NONE);
9432 } else
9433 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
9434 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9435 return PCI_ERS_RESULT_RECOVERED;
9436}
9437
9438/**
9439 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9440 * @pdev: PCI device struct
9441 *
9442 * Description: This routine is called when the PCI bus has
9443 * permanently failed.
9444 */
9445static void ipr_pci_perm_failure(struct pci_dev *pdev)
9446{
9447 unsigned long flags = 0;
9448 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
56d6aa33 9449 int i;
f8a88b19
LV
9450
9451 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6270e593
BK
9452 if (ioa_cfg->probe_done) {
9453 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9454 ioa_cfg->sdt_state = ABORT_DUMP;
9455 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9456 ioa_cfg->in_ioa_bringdown = 1;
9457 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9458 spin_lock(&ioa_cfg->hrrq[i]._lock);
9459 ioa_cfg->hrrq[i].allow_cmds = 0;
9460 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9461 }
9462 wmb();
9463 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9464 } else
9465 wake_up_all(&ioa_cfg->eeh_wait_q);
f8a88b19
LV
9466 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9467}
9468
9469/**
9470 * ipr_pci_error_detected - Called when a PCI error is detected.
9471 * @pdev: PCI device struct
9472 * @state: PCI channel state
9473 *
9474 * Description: Called when a PCI error is detected.
9475 *
9476 * Return value:
9477 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9478 */
9479static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9480 pci_channel_state_t state)
9481{
9482 switch (state) {
9483 case pci_channel_io_frozen:
9484 ipr_pci_frozen(pdev);
6270e593 9485 return PCI_ERS_RESULT_CAN_RECOVER;
f8a88b19
LV
9486 case pci_channel_io_perm_failure:
9487 ipr_pci_perm_failure(pdev);
9488 return PCI_ERS_RESULT_DISCONNECT;
f8a88b19
LV
9489 default:
9490 break;
9491 }
9492 return PCI_ERS_RESULT_NEED_RESET;
9493}
9494
1da177e4
LT
9495/**
9496 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9497 * @ioa_cfg: ioa cfg struct
9498 *
183b8021 9499 * Description: This is the second phase of adapter initialization
1da177e4
LT
9500 * This function takes care of initilizing the adapter to the point
9501 * where it can accept new commands.
1da177e4 9502 * Return value:
b1c11812 9503 * 0 on success / -EIO on failure
1da177e4 9504 **/
6f039790 9505static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9506{
9507 int rc = 0;
9508 unsigned long host_lock_flags = 0;
9509
9510 ENTER;
9511 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9512 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
6270e593 9513 ioa_cfg->probe_done = 1;
ce155cce 9514 if (ioa_cfg->needs_hard_reset) {
9515 ioa_cfg->needs_hard_reset = 0;
9516 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9517 } else
9518 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9519 IPR_SHUTDOWN_NONE);
1da177e4 9520 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
1da177e4
LT
9521
9522 LEAVE;
9523 return rc;
9524}
9525
9526/**
9527 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9528 * @ioa_cfg: ioa config struct
9529 *
9530 * Return value:
9531 * none
9532 **/
9533static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9534{
9535 int i;
9536
a65e8f12
BK
9537 if (ioa_cfg->ipr_cmnd_list) {
9538 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9539 if (ioa_cfg->ipr_cmnd_list[i])
9540 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9541 ioa_cfg->ipr_cmnd_list[i],
9542 ioa_cfg->ipr_cmnd_list_dma[i]);
1da177e4 9543
a65e8f12
BK
9544 ioa_cfg->ipr_cmnd_list[i] = NULL;
9545 }
1da177e4
LT
9546 }
9547
2e9ef0fc 9548 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
1da177e4 9549
89aad428
BK
9550 kfree(ioa_cfg->ipr_cmnd_list);
9551 kfree(ioa_cfg->ipr_cmnd_list_dma);
9552 ioa_cfg->ipr_cmnd_list = NULL;
9553 ioa_cfg->ipr_cmnd_list_dma = NULL;
1da177e4
LT
9554 ioa_cfg->ipr_cmd_pool = NULL;
9555}
9556
9557/**
9558 * ipr_free_mem - Frees memory allocated for an adapter
9559 * @ioa_cfg: ioa cfg struct
9560 *
9561 * Return value:
9562 * nothing
9563 **/
9564static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9565{
9566 int i;
9567
9568 kfree(ioa_cfg->res_entries);
d73341bf
AB
9569 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9570 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4 9571 ipr_free_cmd_blks(ioa_cfg);
05a6538a 9572
9573 for (i = 0; i < ioa_cfg->hrrq_num; i++)
d73341bf
AB
9574 dma_free_coherent(&ioa_cfg->pdev->dev,
9575 sizeof(u32) * ioa_cfg->hrrq[i].size,
9576 ioa_cfg->hrrq[i].host_rrq,
9577 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9578
d73341bf
AB
9579 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9580 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9581
afc3f83c 9582 for (i = 0; i < IPR_MAX_HCAMS; i++) {
d73341bf
AB
9583 dma_free_coherent(&ioa_cfg->pdev->dev,
9584 sizeof(struct ipr_hostrcb),
9585 ioa_cfg->hostrcb[i],
9586 ioa_cfg->hostrcb_dma[i]);
1da177e4
LT
9587 }
9588
9589 ipr_free_dump(ioa_cfg);
1da177e4
LT
9590 kfree(ioa_cfg->trace);
9591}
9592
9593/**
2796ca5e
BK
9594 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9595 * @ioa_cfg: ipr cfg struct
1da177e4 9596 *
2796ca5e 9597 * This function frees all allocated IRQs for the
1da177e4
LT
9598 * specified adapter.
9599 *
9600 * Return value:
9601 * none
9602 **/
2796ca5e 9603static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9604{
9605 struct pci_dev *pdev = ioa_cfg->pdev;
a299ee62 9606 int i;
1da177e4 9607
a299ee62
CH
9608 for (i = 0; i < ioa_cfg->nvectors; i++)
9609 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9610 pci_free_irq_vectors(pdev);
2796ca5e 9611}
05a6538a 9612
2796ca5e
BK
9613/**
9614 * ipr_free_all_resources - Free all allocated resources for an adapter.
a96099e2 9615 * @ioa_cfg: ioa config struct
2796ca5e
BK
9616 *
9617 * This function frees all allocated resources for the
9618 * specified adapter.
9619 *
9620 * Return value:
9621 * none
9622 **/
9623static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9624{
9625 struct pci_dev *pdev = ioa_cfg->pdev;
05a6538a 9626
2796ca5e
BK
9627 ENTER;
9628 ipr_free_irqs(ioa_cfg);
9629 if (ioa_cfg->reset_work_q)
9630 destroy_workqueue(ioa_cfg->reset_work_q);
1da177e4
LT
9631 iounmap(ioa_cfg->hdw_dma_regs);
9632 pci_release_regions(pdev);
9633 ipr_free_mem(ioa_cfg);
9634 scsi_host_put(ioa_cfg->host);
9635 pci_disable_device(pdev);
9636 LEAVE;
9637}
9638
9639/**
9640 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9641 * @ioa_cfg: ioa config struct
9642 *
9643 * Return value:
9644 * 0 on success / -ENOMEM on allocation failure
9645 **/
6f039790 9646static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9647{
9648 struct ipr_cmnd *ipr_cmd;
9649 struct ipr_ioarcb *ioarcb;
9650 dma_addr_t dma_addr;
05a6538a 9651 int i, entries_each_hrrq, hrrq_id = 0;
1da177e4 9652
d73341bf 9653 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
203fa3fe 9654 sizeof(struct ipr_cmnd), 512, 0);
1da177e4
LT
9655
9656 if (!ioa_cfg->ipr_cmd_pool)
9657 return -ENOMEM;
9658
89aad428
BK
9659 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9660 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9661
9662 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9663 ipr_free_cmd_blks(ioa_cfg);
9664 return -ENOMEM;
9665 }
9666
05a6538a 9667 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9668 if (ioa_cfg->hrrq_num > 1) {
9669 if (i == 0) {
9670 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9671 ioa_cfg->hrrq[i].min_cmd_id = 0;
b82378e6
CIK
9672 ioa_cfg->hrrq[i].max_cmd_id =
9673 (entries_each_hrrq - 1);
05a6538a 9674 } else {
9675 entries_each_hrrq =
9676 IPR_NUM_BASE_CMD_BLKS/
9677 (ioa_cfg->hrrq_num - 1);
9678 ioa_cfg->hrrq[i].min_cmd_id =
9679 IPR_NUM_INTERNAL_CMD_BLKS +
9680 (i - 1) * entries_each_hrrq;
9681 ioa_cfg->hrrq[i].max_cmd_id =
9682 (IPR_NUM_INTERNAL_CMD_BLKS +
9683 i * entries_each_hrrq - 1);
9684 }
9685 } else {
9686 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9687 ioa_cfg->hrrq[i].min_cmd_id = 0;
9688 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9689 }
9690 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9691 }
9692
9693 BUG_ON(ioa_cfg->hrrq_num == 0);
9694
9695 i = IPR_NUM_CMD_BLKS -
9696 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9697 if (i > 0) {
9698 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9699 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9700 }
9701
1da177e4 9702 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8b1bb6dc
SJ
9703 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9704 GFP_KERNEL, &dma_addr);
1da177e4
LT
9705
9706 if (!ipr_cmd) {
9707 ipr_free_cmd_blks(ioa_cfg);
9708 return -ENOMEM;
9709 }
9710
1da177e4
LT
9711 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9712 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9713
9714 ioarcb = &ipr_cmd->ioarcb;
a32c055f
WB
9715 ipr_cmd->dma_addr = dma_addr;
9716 if (ioa_cfg->sis64)
9717 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9718 else
9719 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9720
1da177e4 9721 ioarcb->host_response_handle = cpu_to_be32(i << 2);
a32c055f
WB
9722 if (ioa_cfg->sis64) {
9723 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9724 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9725 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
96d21f00 9726 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
a32c055f
WB
9727 } else {
9728 ioarcb->write_ioadl_addr =
9729 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9730 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9731 ioarcb->ioasa_host_pci_addr =
96d21f00 9732 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
a32c055f 9733 }
1da177e4
LT
9734 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9735 ipr_cmd->cmd_index = i;
9736 ipr_cmd->ioa_cfg = ioa_cfg;
9737 ipr_cmd->sense_buffer_dma = dma_addr +
9738 offsetof(struct ipr_cmnd, sense_buffer);
9739
05a6538a 9740 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9741 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9742 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9743 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9744 hrrq_id++;
1da177e4
LT
9745 }
9746
9747 return 0;
9748}
9749
9750/**
9751 * ipr_alloc_mem - Allocate memory for an adapter
9752 * @ioa_cfg: ioa config struct
9753 *
9754 * Return value:
9755 * 0 on success / non-zero for error
9756 **/
6f039790 9757static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9758{
9759 struct pci_dev *pdev = ioa_cfg->pdev;
9760 int i, rc = -ENOMEM;
9761
9762 ENTER;
6396bb22
KC
9763 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9764 sizeof(struct ipr_resource_entry),
9765 GFP_KERNEL);
1da177e4
LT
9766
9767 if (!ioa_cfg->res_entries)
9768 goto out;
9769
3e7ebdfa 9770 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
1da177e4 9771 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
3e7ebdfa
WB
9772 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9773 }
1da177e4 9774
d73341bf
AB
9775 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9776 sizeof(struct ipr_misc_cbs),
9777 &ioa_cfg->vpd_cbs_dma,
9778 GFP_KERNEL);
1da177e4
LT
9779
9780 if (!ioa_cfg->vpd_cbs)
9781 goto out_free_res_entries;
9782
9783 if (ipr_alloc_cmd_blks(ioa_cfg))
9784 goto out_free_vpd_cbs;
9785
05a6538a 9786 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf 9787 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
05a6538a 9788 sizeof(u32) * ioa_cfg->hrrq[i].size,
d73341bf
AB
9789 &ioa_cfg->hrrq[i].host_rrq_dma,
9790 GFP_KERNEL);
05a6538a 9791
9792 if (!ioa_cfg->hrrq[i].host_rrq) {
d64c4919 9793 while (--i >= 0)
d73341bf 9794 dma_free_coherent(&pdev->dev,
05a6538a 9795 sizeof(u32) * ioa_cfg->hrrq[i].size,
9796 ioa_cfg->hrrq[i].host_rrq,
9797 ioa_cfg->hrrq[i].host_rrq_dma);
9798 goto out_ipr_free_cmd_blocks;
9799 }
9800 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9801 }
1da177e4 9802
d73341bf
AB
9803 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9804 ioa_cfg->cfg_table_size,
9805 &ioa_cfg->cfg_table_dma,
9806 GFP_KERNEL);
1da177e4 9807
3e7ebdfa 9808 if (!ioa_cfg->u.cfg_table)
1da177e4
LT
9809 goto out_free_host_rrq;
9810
afc3f83c 9811 for (i = 0; i < IPR_MAX_HCAMS; i++) {
d73341bf
AB
9812 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9813 sizeof(struct ipr_hostrcb),
9814 &ioa_cfg->hostrcb_dma[i],
9815 GFP_KERNEL);
1da177e4
LT
9816
9817 if (!ioa_cfg->hostrcb[i])
9818 goto out_free_hostrcb_dma;
9819
9820 ioa_cfg->hostrcb[i]->hostrcb_dma =
9821 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
49dc6a18 9822 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
1da177e4
LT
9823 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9824 }
9825
6396bb22
KC
9826 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9827 sizeof(struct ipr_trace_entry),
9828 GFP_KERNEL);
1da177e4
LT
9829
9830 if (!ioa_cfg->trace)
9831 goto out_free_hostrcb_dma;
9832
1da177e4
LT
9833 rc = 0;
9834out:
9835 LEAVE;
9836 return rc;
9837
9838out_free_hostrcb_dma:
9839 while (i-- > 0) {
d73341bf
AB
9840 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9841 ioa_cfg->hostrcb[i],
9842 ioa_cfg->hostrcb_dma[i]);
1da177e4 9843 }
d73341bf
AB
9844 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9845 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
1da177e4 9846out_free_host_rrq:
05a6538a 9847 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
d73341bf
AB
9848 dma_free_coherent(&pdev->dev,
9849 sizeof(u32) * ioa_cfg->hrrq[i].size,
9850 ioa_cfg->hrrq[i].host_rrq,
9851 ioa_cfg->hrrq[i].host_rrq_dma);
05a6538a 9852 }
1da177e4
LT
9853out_ipr_free_cmd_blocks:
9854 ipr_free_cmd_blks(ioa_cfg);
9855out_free_vpd_cbs:
d73341bf
AB
9856 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9857 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
1da177e4
LT
9858out_free_res_entries:
9859 kfree(ioa_cfg->res_entries);
9860 goto out;
9861}
9862
9863/**
9864 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9865 * @ioa_cfg: ioa config struct
9866 *
9867 * Return value:
9868 * none
9869 **/
6f039790 9870static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
1da177e4
LT
9871{
9872 int i;
9873
9874 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9875 ioa_cfg->bus_attr[i].bus = i;
9876 ioa_cfg->bus_attr[i].qas_enabled = 0;
9877 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9878 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9879 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9880 else
9881 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9882 }
9883}
9884
6270e593
BK
9885/**
9886 * ipr_init_regs - Initialize IOA registers
9887 * @ioa_cfg: ioa config struct
9888 *
9889 * Return value:
9890 * none
9891 **/
9892static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9893{
9894 const struct ipr_interrupt_offsets *p;
9895 struct ipr_interrupts *t;
9896 void __iomem *base;
9897
9898 p = &ioa_cfg->chip_cfg->regs;
9899 t = &ioa_cfg->regs;
9900 base = ioa_cfg->hdw_dma_regs;
9901
9902 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9903 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9904 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9905 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9906 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9907 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9908 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9909 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9910 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9911 t->ioarrin_reg = base + p->ioarrin_reg;
9912 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9913 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9914 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9915 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9916 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9917 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9918
9919 if (ioa_cfg->sis64) {
9920 t->init_feedback_reg = base + p->init_feedback_reg;
9921 t->dump_addr_reg = base + p->dump_addr_reg;
9922 t->dump_data_reg = base + p->dump_data_reg;
9923 t->endian_swap_reg = base + p->endian_swap_reg;
9924 }
9925}
9926
1da177e4
LT
9927/**
9928 * ipr_init_ioa_cfg - Initialize IOA config struct
9929 * @ioa_cfg: ioa config struct
9930 * @host: scsi host struct
9931 * @pdev: PCI dev struct
9932 *
9933 * Return value:
9934 * none
9935 **/
6f039790
GKH
9936static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9937 struct Scsi_Host *host, struct pci_dev *pdev)
1da177e4 9938{
6270e593 9939 int i;
1da177e4
LT
9940
9941 ioa_cfg->host = host;
9942 ioa_cfg->pdev = pdev;
9943 ioa_cfg->log_level = ipr_log_level;
3d1d0da6 9944 ioa_cfg->doorbell = IPR_DOORBELL;
1da177e4
LT
9945 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9946 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
1da177e4
LT
9947 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9948 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9949 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9950 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9951
1da177e4
LT
9952 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9953 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
afc3f83c 9954 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
1da177e4
LT
9955 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9956 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
c4028958 9957 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
318ddb34 9958 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
1da177e4 9959 init_waitqueue_head(&ioa_cfg->reset_wait_q);
95fecd90 9960 init_waitqueue_head(&ioa_cfg->msi_wait_q);
6270e593 9961 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
1da177e4
LT
9962 ioa_cfg->sdt_state = INACTIVE;
9963
9964 ipr_initialize_bus_attr(ioa_cfg);
3e7ebdfa 9965 ioa_cfg->max_devs_supported = ipr_max_devs;
1da177e4 9966
3e7ebdfa 9967 if (ioa_cfg->sis64) {
394b6171 9968 host->max_channel = IPR_MAX_SIS64_BUSES;
3e7ebdfa
WB
9969 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9970 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9971 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9972 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
6270e593
BK
9973 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9974 + ((sizeof(struct ipr_config_table_entry64)
9975 * ioa_cfg->max_devs_supported)));
3e7ebdfa 9976 } else {
394b6171 9977 host->max_channel = IPR_VSET_BUS;
3e7ebdfa
WB
9978 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9979 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9980 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9981 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
6270e593
BK
9982 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9983 + ((sizeof(struct ipr_config_table_entry)
9984 * ioa_cfg->max_devs_supported)));
3e7ebdfa 9985 }
6270e593 9986
1da177e4
LT
9987 host->unique_id = host->host_no;
9988 host->max_cmd_len = IPR_MAX_CDB_LEN;
89aad428 9989 host->can_queue = ioa_cfg->max_cmds;
1da177e4
LT
9990 pci_set_drvdata(pdev, ioa_cfg);
9991
6270e593
BK
9992 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9993 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9994 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9995 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9996 if (i == 0)
9997 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9998 else
9999 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
dcbad00e 10000 }
1da177e4
LT
10001}
10002
10003/**
1be7bd82 10004 * ipr_get_chip_info - Find adapter chip information
1da177e4
LT
10005 * @dev_id: PCI device id struct
10006 *
10007 * Return value:
1be7bd82 10008 * ptr to chip information on success / NULL on failure
1da177e4 10009 **/
6f039790 10010static const struct ipr_chip_t *
1be7bd82 10011ipr_get_chip_info(const struct pci_device_id *dev_id)
1da177e4
LT
10012{
10013 int i;
10014
1da177e4
LT
10015 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10016 if (ipr_chip[i].vendor == dev_id->vendor &&
10017 ipr_chip[i].device == dev_id->device)
1be7bd82 10018 return &ipr_chip[i];
1da177e4
LT
10019 return NULL;
10020}
10021
6270e593
BK
10022/**
10023 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10024 * during probe time
10025 * @ioa_cfg: ioa config struct
10026 *
10027 * Return value:
10028 * None
10029 **/
10030static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10031{
10032 struct pci_dev *pdev = ioa_cfg->pdev;
10033
10034 if (pci_channel_offline(pdev)) {
10035 wait_event_timeout(ioa_cfg->eeh_wait_q,
10036 !pci_channel_offline(pdev),
10037 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10038 pci_restore_state(pdev);
10039 }
10040}
10041
05a6538a 10042static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10043{
10044 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10045
10046 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10047 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10048 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10049 ioa_cfg->vectors_info[vec_idx].
10050 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10051 }
10052}
10053
a299ee62
CH
10054static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10055 struct pci_dev *pdev)
05a6538a 10056{
10057 int i, rc;
10058
10059 for (i = 1; i < ioa_cfg->nvectors; i++) {
a299ee62 10060 rc = request_irq(pci_irq_vector(pdev, i),
05a6538a 10061 ipr_isr_mhrrq,
10062 0,
10063 ioa_cfg->vectors_info[i].desc,
10064 &ioa_cfg->hrrq[i]);
10065 if (rc) {
d64c4919 10066 while (--i > 0)
a299ee62 10067 free_irq(pci_irq_vector(pdev, i),
05a6538a 10068 &ioa_cfg->hrrq[i]);
10069 return rc;
10070 }
10071 }
10072 return 0;
10073}
10074
95fecd90
WB
10075/**
10076 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
a96099e2
LJ
10077 * @devp: PCI device struct
10078 * @irq: IRQ number
95fecd90
WB
10079 *
10080 * Description: Simply set the msi_received flag to 1 indicating that
10081 * Message Signaled Interrupts are supported.
10082 *
10083 * Return value:
10084 * 0 on success / non-zero on failure
10085 **/
6f039790 10086static irqreturn_t ipr_test_intr(int irq, void *devp)
95fecd90
WB
10087{
10088 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10089 unsigned long lock_flags = 0;
95fecd90 10090
05a6538a 10091 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
95fecd90
WB
10092 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10093
10094 ioa_cfg->msi_received = 1;
10095 wake_up(&ioa_cfg->msi_wait_q);
10096
10097 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
391b8dac 10098 return IRQ_HANDLED;
95fecd90
WB
10099}
10100
10101/**
10102 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
a96099e2 10103 * @ioa_cfg: ioa config struct
95fecd90
WB
10104 * @pdev: PCI device struct
10105 *
a299ee62 10106 * Description: This routine sets up and initiates a test interrupt to determine
95fecd90
WB
10107 * if the interrupt is received via the ipr_test_intr() service routine.
10108 * If the tests fails, the driver will fall back to LSI.
10109 *
10110 * Return value:
10111 * 0 on success / non-zero on failure
10112 **/
6f039790 10113static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
95fecd90
WB
10114{
10115 int rc;
95fecd90 10116 unsigned long lock_flags = 0;
a299ee62 10117 int irq = pci_irq_vector(pdev, 0);
95fecd90
WB
10118
10119 ENTER;
10120
10121 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10122 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10123 ioa_cfg->msi_received = 0;
10124 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
214777ba 10125 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
4dc83399 10126 readl(ioa_cfg->regs.sense_interrupt_mask_reg);
95fecd90
WB
10127 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10128
a299ee62 10129 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
95fecd90 10130 if (rc) {
a299ee62 10131 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
95fecd90
WB
10132 return rc;
10133 } else if (ipr_debug)
a299ee62 10134 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
95fecd90 10135
214777ba 10136 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
4dc83399 10137 readl(ioa_cfg->regs.sense_interrupt_reg);
95fecd90 10138 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
56d6aa33 10139 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
95fecd90
WB
10140 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10141
95fecd90
WB
10142 if (!ioa_cfg->msi_received) {
10143 /* MSI test failed */
10144 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10145 rc = -EOPNOTSUPP;
10146 } else if (ipr_debug)
10147 dev_info(&pdev->dev, "MSI test succeeded.\n");
10148
10149 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10150
a299ee62 10151 free_irq(irq, ioa_cfg);
95fecd90
WB
10152
10153 LEAVE;
10154
10155 return rc;
10156}
10157
05a6538a 10158 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
1da177e4
LT
10159 * @pdev: PCI device struct
10160 * @dev_id: PCI device id struct
10161 *
10162 * Return value:
10163 * 0 on success / non-zero on failure
10164 **/
6f039790
GKH
10165static int ipr_probe_ioa(struct pci_dev *pdev,
10166 const struct pci_device_id *dev_id)
1da177e4
LT
10167{
10168 struct ipr_ioa_cfg *ioa_cfg;
10169 struct Scsi_Host *host;
10170 unsigned long ipr_regs_pci;
10171 void __iomem *ipr_regs;
a2a65a3e 10172 int rc = PCIBIOS_SUCCESSFUL;
473b1e8e 10173 volatile u32 mask, uproc, interrupts;
feccada9 10174 unsigned long lock_flags, driver_lock_flags;
a299ee62 10175 unsigned int irq_flag;
1da177e4
LT
10176
10177 ENTER;
10178
1da177e4 10179 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
1da177e4
LT
10180 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10181
10182 if (!host) {
10183 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10184 rc = -ENOMEM;
6270e593 10185 goto out;
1da177e4
LT
10186 }
10187
10188 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10189 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8d8e7d13 10190 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
1da177e4 10191
1be7bd82 10192 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
1da177e4 10193
1be7bd82 10194 if (!ioa_cfg->ipr_chip) {
1da177e4
LT
10195 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10196 dev_id->vendor, dev_id->device);
10197 goto out_scsi_host_put;
10198 }
10199
a32c055f
WB
10200 /* set SIS 32 or SIS 64 */
10201 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
1be7bd82 10202 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
7dd21308 10203 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
89aad428 10204 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
1be7bd82 10205
5469cb5b
BK
10206 if (ipr_transop_timeout)
10207 ioa_cfg->transop_timeout = ipr_transop_timeout;
10208 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10209 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10210 else
10211 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10212
44c10138 10213 ioa_cfg->revid = pdev->revision;
463fc696 10214
6270e593
BK
10215 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10216
1da177e4
LT
10217 ipr_regs_pci = pci_resource_start(pdev, 0);
10218
10219 rc = pci_request_regions(pdev, IPR_NAME);
10220 if (rc < 0) {
10221 dev_err(&pdev->dev,
10222 "Couldn't register memory range of registers\n");
10223 goto out_scsi_host_put;
10224 }
10225
6270e593
BK
10226 rc = pci_enable_device(pdev);
10227
10228 if (rc || pci_channel_offline(pdev)) {
10229 if (pci_channel_offline(pdev)) {
10230 ipr_wait_for_pci_err_recovery(ioa_cfg);
10231 rc = pci_enable_device(pdev);
10232 }
10233
10234 if (rc) {
10235 dev_err(&pdev->dev, "Cannot enable adapter\n");
10236 ipr_wait_for_pci_err_recovery(ioa_cfg);
10237 goto out_release_regions;
10238 }
10239 }
10240
25729a7f 10241 ipr_regs = pci_ioremap_bar(pdev, 0);
1da177e4
LT
10242
10243 if (!ipr_regs) {
10244 dev_err(&pdev->dev,
10245 "Couldn't map memory range of registers\n");
10246 rc = -ENOMEM;
6270e593 10247 goto out_disable;
1da177e4
LT
10248 }
10249
10250 ioa_cfg->hdw_dma_regs = ipr_regs;
10251 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10252 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10253
6270e593 10254 ipr_init_regs(ioa_cfg);
1da177e4 10255
a32c055f 10256 if (ioa_cfg->sis64) {
869404cb 10257 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
a32c055f 10258 if (rc < 0) {
869404cb
AB
10259 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10260 rc = dma_set_mask_and_coherent(&pdev->dev,
10261 DMA_BIT_MASK(32));
a32c055f 10262 }
a32c055f 10263 } else
869404cb 10264 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
a32c055f 10265
1da177e4 10266 if (rc < 0) {
869404cb 10267 dev_err(&pdev->dev, "Failed to set DMA mask\n");
1da177e4
LT
10268 goto cleanup_nomem;
10269 }
10270
10271 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10272 ioa_cfg->chip_cfg->cache_line_size);
10273
10274 if (rc != PCIBIOS_SUCCESSFUL) {
10275 dev_err(&pdev->dev, "Write of cache line size failed\n");
6270e593 10276 ipr_wait_for_pci_err_recovery(ioa_cfg);
1da177e4
LT
10277 rc = -EIO;
10278 goto cleanup_nomem;
10279 }
10280
6270e593
BK
10281 /* Issue MMIO read to ensure card is not in EEH */
10282 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10283 ipr_wait_for_pci_err_recovery(ioa_cfg);
10284
05a6538a 10285 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10286 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10287 IPR_MAX_MSIX_VECTORS);
10288 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10289 }
10290
a299ee62
CH
10291 irq_flag = PCI_IRQ_LEGACY;
10292 if (ioa_cfg->ipr_chip->has_msi)
10293 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10294 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10295 if (rc < 0) {
10296 ipr_wait_for_pci_err_recovery(ioa_cfg);
10297 goto cleanup_nomem;
05a6538a 10298 }
a299ee62
CH
10299 ioa_cfg->nvectors = rc;
10300
10301 if (!pdev->msi_enabled && !pdev->msix_enabled)
10302 ioa_cfg->clear_isr = 1;
05a6538a 10303
6270e593
BK
10304 pci_set_master(pdev);
10305
10306 if (pci_channel_offline(pdev)) {
10307 ipr_wait_for_pci_err_recovery(ioa_cfg);
10308 pci_set_master(pdev);
10309 if (pci_channel_offline(pdev)) {
10310 rc = -EIO;
10311 goto out_msi_disable;
10312 }
10313 }
10314
a299ee62 10315 if (pdev->msi_enabled || pdev->msix_enabled) {
95fecd90 10316 rc = ipr_test_msi(ioa_cfg, pdev);
a299ee62
CH
10317 switch (rc) {
10318 case 0:
10319 dev_info(&pdev->dev,
10320 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10321 pdev->msix_enabled ? "-X" : "");
10322 break;
10323 case -EOPNOTSUPP:
6270e593 10324 ipr_wait_for_pci_err_recovery(ioa_cfg);
a299ee62 10325 pci_free_irq_vectors(pdev);
05a6538a 10326
05a6538a 10327 ioa_cfg->nvectors = 1;
9dadfb97 10328 ioa_cfg->clear_isr = 1;
a299ee62
CH
10329 break;
10330 default:
95fecd90 10331 goto out_msi_disable;
05a6538a 10332 }
10333 }
10334
10335 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10336 (unsigned int)num_online_cpus(),
10337 (unsigned int)IPR_MAX_HRRQ_NUM);
95fecd90 10338
1da177e4 10339 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
f170c684 10340 goto out_msi_disable;
1da177e4
LT
10341
10342 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
f170c684 10343 goto out_msi_disable;
1da177e4
LT
10344
10345 rc = ipr_alloc_mem(ioa_cfg);
10346 if (rc < 0) {
10347 dev_err(&pdev->dev,
10348 "Couldn't allocate enough memory for device driver!\n");
f170c684 10349 goto out_msi_disable;
1da177e4
LT
10350 }
10351
6270e593
BK
10352 /* Save away PCI config space for use following IOA reset */
10353 rc = pci_save_state(pdev);
10354
10355 if (rc != PCIBIOS_SUCCESSFUL) {
10356 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10357 rc = -EIO;
10358 goto cleanup_nolog;
10359 }
10360
ce155cce 10361 /*
10362 * If HRRQ updated interrupt is not masked, or reset alert is set,
10363 * the card is in an unknown state and needs a hard reset
10364 */
214777ba
WB
10365 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10366 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10367 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
ce155cce 10368 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10369 ioa_cfg->needs_hard_reset = 1;
5d7c20b7 10370 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
473b1e8e
BK
10371 ioa_cfg->needs_hard_reset = 1;
10372 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10373 ioa_cfg->ioa_unit_checked = 1;
ce155cce 10374
56d6aa33 10375 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1da177e4 10376 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
56d6aa33 10377 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1da177e4 10378
a299ee62 10379 if (pdev->msi_enabled || pdev->msix_enabled) {
05a6538a 10380 name_msi_vectors(ioa_cfg);
a299ee62 10381 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
05a6538a 10382 ioa_cfg->vectors_info[0].desc,
10383 &ioa_cfg->hrrq[0]);
10384 if (!rc)
a299ee62 10385 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
05a6538a 10386 } else {
10387 rc = request_irq(pdev->irq, ipr_isr,
10388 IRQF_SHARED,
10389 IPR_NAME, &ioa_cfg->hrrq[0]);
10390 }
1da177e4
LT
10391 if (rc) {
10392 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10393 pdev->irq, rc);
10394 goto cleanup_nolog;
10395 }
10396
463fc696
BK
10397 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10398 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10399 ioa_cfg->needs_warm_reset = 1;
10400 ioa_cfg->reset = ipr_reset_slot_reset;
2796ca5e
BK
10401
10402 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10403 WQ_MEM_RECLAIM, host->host_no);
10404
10405 if (!ioa_cfg->reset_work_q) {
10406 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
c8e18acc 10407 rc = -ENOMEM;
2796ca5e
BK
10408 goto out_free_irq;
10409 }
463fc696
BK
10410 } else
10411 ioa_cfg->reset = ipr_reset_start_bist;
10412
feccada9 10413 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10414 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
feccada9 10415 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10416
10417 LEAVE;
10418out:
10419 return rc;
10420
2796ca5e
BK
10421out_free_irq:
10422 ipr_free_irqs(ioa_cfg);
1da177e4
LT
10423cleanup_nolog:
10424 ipr_free_mem(ioa_cfg);
95fecd90 10425out_msi_disable:
6270e593 10426 ipr_wait_for_pci_err_recovery(ioa_cfg);
a299ee62 10427 pci_free_irq_vectors(pdev);
f170c684
JL
10428cleanup_nomem:
10429 iounmap(ipr_regs);
6270e593
BK
10430out_disable:
10431 pci_disable_device(pdev);
1da177e4
LT
10432out_release_regions:
10433 pci_release_regions(pdev);
10434out_scsi_host_put:
10435 scsi_host_put(host);
1da177e4
LT
10436 goto out;
10437}
10438
1da177e4
LT
10439/**
10440 * ipr_initiate_ioa_bringdown - Bring down an adapter
10441 * @ioa_cfg: ioa config struct
10442 * @shutdown_type: shutdown type
10443 *
10444 * Description: This function will initiate bringing down the adapter.
10445 * This consists of issuing an IOA shutdown to the adapter
10446 * to flush the cache, and running BIST.
10447 * If the caller needs to wait on the completion of the reset,
10448 * the caller must sleep on the reset_wait_q.
10449 *
10450 * Return value:
10451 * none
10452 **/
10453static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10454 enum ipr_shutdown_type shutdown_type)
10455{
10456 ENTER;
10457 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10458 ioa_cfg->sdt_state = ABORT_DUMP;
10459 ioa_cfg->reset_retries = 0;
10460 ioa_cfg->in_ioa_bringdown = 1;
10461 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10462 LEAVE;
10463}
10464
10465/**
10466 * __ipr_remove - Remove a single adapter
10467 * @pdev: pci device struct
10468 *
10469 * Adapter hot plug remove entry point.
10470 *
10471 * Return value:
10472 * none
10473 **/
10474static void __ipr_remove(struct pci_dev *pdev)
10475{
10476 unsigned long host_lock_flags = 0;
10477 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
bfae7820 10478 int i;
feccada9 10479 unsigned long driver_lock_flags;
1da177e4
LT
10480 ENTER;
10481
10482 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
203fa3fe 10483 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10484 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10485 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10486 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10487 }
10488
bfae7820
BK
10489 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10490 spin_lock(&ioa_cfg->hrrq[i]._lock);
10491 ioa_cfg->hrrq[i].removing_ioa = 1;
10492 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10493 }
10494 wmb();
1da177e4
LT
10495 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10496
10497 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10498 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
43829731 10499 flush_work(&ioa_cfg->work_q);
2796ca5e
BK
10500 if (ioa_cfg->reset_work_q)
10501 flush_workqueue(ioa_cfg->reset_work_q);
9077a944 10502 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
1da177e4
LT
10503 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10504
feccada9 10505 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
1da177e4 10506 list_del(&ioa_cfg->queue);
feccada9 10507 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
1da177e4
LT
10508
10509 if (ioa_cfg->sdt_state == ABORT_DUMP)
10510 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10512
10513 ipr_free_all_resources(ioa_cfg);
10514
10515 LEAVE;
10516}
10517
10518/**
10519 * ipr_remove - IOA hot plug remove entry point
10520 * @pdev: pci device struct
10521 *
10522 * Adapter hot plug remove entry point.
10523 *
10524 * Return value:
10525 * none
10526 **/
6f039790 10527static void ipr_remove(struct pci_dev *pdev)
1da177e4
LT
10528{
10529 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10530
10531 ENTER;
10532
ee959b00 10533 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 10534 &ipr_trace_attr);
ee959b00 10535 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4 10536 &ipr_dump_attr);
afc3f83c
BK
10537 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10538 &ipr_ioa_async_err_log);
1da177e4
LT
10539 scsi_remove_host(ioa_cfg->host);
10540
10541 __ipr_remove(pdev);
10542
10543 LEAVE;
10544}
10545
10546/**
10547 * ipr_probe - Adapter hot plug add entry point
a96099e2
LJ
10548 * @pdev: pci device struct
10549 * @dev_id: pci device ID
1da177e4
LT
10550 *
10551 * Return value:
10552 * 0 on success / non-zero on failure
10553 **/
6f039790 10554static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1da177e4
LT
10555{
10556 struct ipr_ioa_cfg *ioa_cfg;
b195d5e2 10557 unsigned long flags;
b53d124a 10558 int rc, i;
1da177e4
LT
10559
10560 rc = ipr_probe_ioa(pdev, dev_id);
10561
10562 if (rc)
10563 return rc;
10564
10565 ioa_cfg = pci_get_drvdata(pdev);
10566 rc = ipr_probe_ioa_part2(ioa_cfg);
10567
10568 if (rc) {
10569 __ipr_remove(pdev);
10570 return rc;
10571 }
10572
10573 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10574
10575 if (rc) {
10576 __ipr_remove(pdev);
10577 return rc;
10578 }
10579
ee959b00 10580 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10581 &ipr_trace_attr);
10582
10583 if (rc) {
10584 scsi_remove_host(ioa_cfg->host);
10585 __ipr_remove(pdev);
10586 return rc;
10587 }
10588
afc3f83c
BK
10589 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10590 &ipr_ioa_async_err_log);
10591
10592 if (rc) {
10593 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10594 &ipr_dump_attr);
10595 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10596 &ipr_trace_attr);
10597 scsi_remove_host(ioa_cfg->host);
10598 __ipr_remove(pdev);
10599 return rc;
10600 }
10601
ee959b00 10602 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10603 &ipr_dump_attr);
10604
10605 if (rc) {
afc3f83c
BK
10606 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10607 &ipr_ioa_async_err_log);
ee959b00 10608 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
1da177e4
LT
10609 &ipr_trace_attr);
10610 scsi_remove_host(ioa_cfg->host);
10611 __ipr_remove(pdev);
10612 return rc;
10613 }
a3d1ddd9
BK
10614 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10615 ioa_cfg->scan_enabled = 1;
10616 schedule_work(&ioa_cfg->work_q);
10617 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
1da177e4 10618
b53d124a 10619 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10620
89f8b33c 10621 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10622 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
511cbce2 10623 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
b53d124a 10624 ioa_cfg->iopoll_weight, ipr_iopoll);
b53d124a 10625 }
10626 }
10627
a3d1ddd9
BK
10628 scsi_scan_host(ioa_cfg->host);
10629
1da177e4
LT
10630 return 0;
10631}
10632
10633/**
10634 * ipr_shutdown - Shutdown handler.
d18c3db5 10635 * @pdev: pci device struct
1da177e4
LT
10636 *
10637 * This function is invoked upon system shutdown/reboot. It will issue
10638 * an adapter shutdown to the adapter to flush the write cache.
10639 *
10640 * Return value:
10641 * none
10642 **/
d18c3db5 10643static void ipr_shutdown(struct pci_dev *pdev)
1da177e4 10644{
d18c3db5 10645 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
1da177e4 10646 unsigned long lock_flags = 0;
4fdd7c7a 10647 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
b53d124a 10648 int i;
1da177e4
LT
10649
10650 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
89f8b33c 10651 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
b53d124a 10652 ioa_cfg->iopoll_weight = 0;
10653 for (i = 1; i < ioa_cfg->hrrq_num; i++)
511cbce2 10654 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
b53d124a 10655 }
10656
203fa3fe 10657 while (ioa_cfg->in_reset_reload) {
970ea294
BK
10658 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10659 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10660 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10661 }
10662
4fdd7c7a
BK
10663 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10664 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10665
10666 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
1da177e4
LT
10667 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10668 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4fdd7c7a 10669 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
2796ca5e 10670 ipr_free_irqs(ioa_cfg);
4fdd7c7a
BK
10671 pci_disable_device(ioa_cfg->pdev);
10672 }
1da177e4
LT
10673}
10674
6f039790 10675static struct pci_device_id ipr_pci_table[] = {
1da177e4 10676 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10677 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
1da177e4 10678 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
1da177e4 10680 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
1da177e4 10682 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6d84c944 10683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
1da177e4 10684 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
1da177e4 10686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
1da177e4 10688 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6d84c944 10689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
86f51436 10690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
5469cb5b
BK
10691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10692 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10693 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6d84c944 10694 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10695 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
22d2e402
BK
10696 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10697 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10698 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
5469cb5b
BK
10699 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10700 IPR_USE_LONG_TRANSOP_TIMEOUT },
86f51436 10701 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6d84c944 10702 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
86f51436 10703 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
22d2e402
BK
10704 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10705 IPR_USE_LONG_TRANSOP_TIMEOUT},
60e7486b 10706 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
5469cb5b
BK
10707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10708 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c 10709 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
22d2e402
BK
10710 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10711 IPR_USE_LONG_TRANSOP_TIMEOUT },
185eb31c
BK
10712 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10713 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
b0f56d3d
WB
10714 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10715 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
60e7486b 10716 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
5469cb5b 10717 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
463fc696 10718 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
1da177e4 10719 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6d84c944 10720 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
1da177e4 10721 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6d84c944 10722 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
86f51436 10723 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10724 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10725 IPR_USE_LONG_TRANSOP_TIMEOUT },
60e7486b 10726 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
5469cb5b
BK
10727 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10728 IPR_USE_LONG_TRANSOP_TIMEOUT },
d7b4627f
WB
10729 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10730 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10731 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10732 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10733 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10734 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
b8d5d568 10735 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10736 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
5a918353
WB
10737 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10738 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
32622bde
WB
10739 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10740 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
cd9b3d04 10741 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10742 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
cd9b3d04 10743 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10744 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
cd9b3d04 10745 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10746 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
cd9b3d04
WB
10747 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10749 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
d7b4627f 10750 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
b8d5d568 10751 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10752 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10753 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10754 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10755 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10756 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10757 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10758 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
43c5fdaf 10759 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10760 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10761 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
f94d9964
WX
10762 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10763 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
43c5fdaf 10764 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10765 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10766 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10767 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10768 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10769 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10770 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10771 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10772 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10773 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10774 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
5eeac3e9
WX
10775 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10776 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10777 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10778 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10779 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10780 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
00da9ffa
WX
10781 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10782 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10783 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10784 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
1da177e4
LT
10785 { }
10786};
10787MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10788
a55b2d21 10789static const struct pci_error_handlers ipr_err_handler = {
f8a88b19 10790 .error_detected = ipr_pci_error_detected,
6270e593 10791 .mmio_enabled = ipr_pci_mmio_enabled,
f8a88b19
LV
10792 .slot_reset = ipr_pci_slot_reset,
10793};
10794
1da177e4
LT
10795static struct pci_driver ipr_driver = {
10796 .name = IPR_NAME,
10797 .id_table = ipr_pci_table,
10798 .probe = ipr_probe,
6f039790 10799 .remove = ipr_remove,
d18c3db5 10800 .shutdown = ipr_shutdown,
f8a88b19 10801 .err_handler = &ipr_err_handler,
1da177e4
LT
10802};
10803
f72919ec
WB
10804/**
10805 * ipr_halt_done - Shutdown prepare completion
a96099e2 10806 * @ipr_cmd: ipr command struct
f72919ec
WB
10807 *
10808 * Return value:
10809 * none
10810 **/
10811static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10812{
05a6538a 10813 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
f72919ec
WB
10814}
10815
10816/**
10817 * ipr_halt - Issue shutdown prepare to all adapters
a96099e2
LJ
10818 * @nb: Notifier block
10819 * @event: Notifier event
10820 * @buf: Notifier data (unused)
f72919ec
WB
10821 *
10822 * Return value:
10823 * NOTIFY_OK on success / NOTIFY_DONE on failure
10824 **/
10825static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10826{
10827 struct ipr_cmnd *ipr_cmd;
10828 struct ipr_ioa_cfg *ioa_cfg;
feccada9 10829 unsigned long flags = 0, driver_lock_flags;
f72919ec
WB
10830
10831 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10832 return NOTIFY_DONE;
10833
feccada9 10834 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10835
10836 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10837 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
4fdd7c7a
BK
10838 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10839 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
f72919ec
WB
10840 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10841 continue;
10842 }
10843
10844 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10845 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10846 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10847 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10848 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10849
10850 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10851 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10852 }
feccada9 10853 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
f72919ec
WB
10854
10855 return NOTIFY_OK;
10856}
10857
10858static struct notifier_block ipr_notifier = {
10859 ipr_halt, NULL, 0
10860};
10861
1da177e4
LT
10862/**
10863 * ipr_init - Module entry point
10864 *
10865 * Return value:
10866 * 0 on success / negative value on failure
10867 **/
10868static int __init ipr_init(void)
10869{
e6f108bf
SX
10870 int rc;
10871
1da177e4
LT
10872 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10873 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10874
f72919ec 10875 register_reboot_notifier(&ipr_notifier);
e6f108bf
SX
10876 rc = pci_register_driver(&ipr_driver);
10877 if (rc) {
10878 unregister_reboot_notifier(&ipr_notifier);
10879 return rc;
10880 }
10881
10882 return 0;
1da177e4
LT
10883}
10884
10885/**
10886 * ipr_exit - Module unload
10887 *
10888 * Module unload entry point.
10889 *
10890 * Return value:
10891 * none
10892 **/
10893static void __exit ipr_exit(void)
10894{
f72919ec 10895 unregister_reboot_notifier(&ipr_notifier);
1da177e4
LT
10896 pci_unregister_driver(&ipr_driver);
10897}
10898
10899module_init(ipr_init);
10900module_exit(ipr_exit);