scsi: scsi_ioctl: Drop needless assignment in sg_io()
[linux-block.git] / drivers / scsi / mpt3sas / mpt3sas_scsih.c
CommitLineData
f92363d1
SR
1/*
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 *
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
a4ffce0d 5 * Copyright (C) 2012-2014 LSI Corporation
a03bd153
SR
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
f92363d1
SR
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
f92363d1
SR
45#include <linux/module.h>
46#include <linux/kernel.h>
47#include <linux/init.h>
48#include <linux/errno.h>
49#include <linux/blkdev.h>
50#include <linux/sched.h>
51#include <linux/workqueue.h>
52#include <linux/delay.h>
53#include <linux/pci.h>
54#include <linux/interrupt.h>
55#include <linux/aer.h>
56#include <linux/raid_class.h>
664f0dce 57#include <linux/blk-mq-pci.h>
7786ab6a 58#include <asm/unaligned.h>
f92363d1
SR
59
60#include "mpt3sas_base.h"
61
f92363d1 62#define RAID_CHANNEL 1
d88e1eab
SPS
63
64#define PCIE_CHANNEL 2
65
f92363d1
SR
66/* forward proto's */
67static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69static void _firmware_event_work(struct work_struct *work);
70
71static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
c102e00c 75static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
3075ac49
SPS
76static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
4318c734
SPS
78static void
79_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
f92363d1 80static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
a0815c45 81static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
f92363d1 82
f92363d1
SR
83/* global parameters */
84LIST_HEAD(mpt3sas_ioc_list);
08c4d550
SR
85/* global ioc lock for list operations */
86DEFINE_SPINLOCK(gioc_lock);
f92363d1 87
c84b06a4
SR
88MODULE_AUTHOR(MPT3SAS_AUTHOR);
89MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
90MODULE_LICENSE("GPL");
91MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
92MODULE_ALIAS("mpt2sas");
93
f92363d1
SR
94/* local parameters */
95static u8 scsi_io_cb_idx = -1;
96static u8 tm_cb_idx = -1;
97static u8 ctl_cb_idx = -1;
98static u8 base_cb_idx = -1;
99static u8 port_enable_cb_idx = -1;
100static u8 transport_cb_idx = -1;
101static u8 scsih_cb_idx = -1;
102static u8 config_cb_idx = -1;
c84b06a4
SR
103static int mpt2_ids;
104static int mpt3_ids;
f92363d1
SR
105
106static u8 tm_tr_cb_idx = -1 ;
107static u8 tm_tr_volume_cb_idx = -1 ;
108static u8 tm_sas_control_cb_idx = -1;
109
110/* command line options */
111static u32 logging_level;
112MODULE_PARM_DESC(logging_level,
113 " bits for enabling additional logging info (default=0)");
114
115
116static ushort max_sectors = 0xFFFF;
ab9f5adb 117module_param(max_sectors, ushort, 0444);
f92363d1
SR
118MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
119
120
121static int missing_delay[2] = {-1, -1};
ab9f5adb 122module_param_array(missing_delay, int, NULL, 0444);
f92363d1
SR
123MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
124
125/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
126#define MPT3SAS_MAX_LUN (16895)
1abf635d 127static u64 max_lun = MPT3SAS_MAX_LUN;
ab9f5adb 128module_param(max_lun, ullong, 0444);
f92363d1
SR
129MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
130
c84b06a4 131static ushort hbas_to_enumerate;
ab9f5adb 132module_param(hbas_to_enumerate, ushort, 0444);
c84b06a4
SR
133MODULE_PARM_DESC(hbas_to_enumerate,
134 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
135 1 - enumerates only SAS 2.0 generation HBAs\n \
136 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
f92363d1
SR
137
138/* diag_buffer_enable is bitwise
139 * bit 0 set = TRACE
140 * bit 1 set = SNAPSHOT
141 * bit 2 set = EXTENDED
142 *
143 * Either bit can be set, or both
144 */
145static int diag_buffer_enable = -1;
ab9f5adb 146module_param(diag_buffer_enable, int, 0444);
f92363d1
SR
147MODULE_PARM_DESC(diag_buffer_enable,
148 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
149static int disable_discovery = -1;
ab9f5adb 150module_param(disable_discovery, int, 0444);
f92363d1
SR
151MODULE_PARM_DESC(disable_discovery, " disable discovery ");
152
153
154/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
155static int prot_mask = -1;
ab9f5adb 156module_param(prot_mask, int, 0444);
f92363d1
SR
157MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
158
8dc8d29a
SR
159static bool enable_sdev_max_qd;
160module_param(enable_sdev_max_qd, bool, 0444);
161MODULE_PARM_DESC(enable_sdev_max_qd,
162 "Enable sdev max qd as can_queue, def=disabled(0)");
f92363d1 163
324c122f
SR
164static int multipath_on_hba = -1;
165module_param(multipath_on_hba, int, 0);
166MODULE_PARM_DESC(multipath_on_hba,
167 "Multipath support to add same target device\n\t\t"
168 "as many times as it is visible to HBA from various paths\n\t\t"
169 "(by default:\n\t\t"
170 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
171 "\t SAS 3.5 HBA - This will be enabled)");
172
664f0dce
SR
173static int host_tagset_enable = 1;
174module_param(host_tagset_enable, int, 0444);
175MODULE_PARM_DESC(host_tagset_enable,
176 "Shared host tagset enable/disable Default: enable(1)");
177
f92363d1 178/* raid transport support */
cf6bf971
C
179static struct raid_template *mpt3sas_raid_template;
180static struct raid_template *mpt2sas_raid_template;
f92363d1
SR
181
182
183/**
184 * struct sense_info - common structure for obtaining sense keys
185 * @skey: sense key
186 * @asc: additional sense code
187 * @ascq: additional sense code qualifier
188 */
189struct sense_info {
190 u8 skey;
191 u8 asc;
192 u8 ascq;
193};
194
195#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
0f624c39 196#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
f92363d1
SR
197#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
198#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
199#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
200/**
201 * struct fw_event_work - firmware event struct
202 * @list: link list framework
203 * @work: work object (ioc->fault_reset_work_q)
f92363d1
SR
204 * @ioc: per adapter object
205 * @device_handle: device handle
206 * @VF_ID: virtual function id
207 * @VP_ID: virtual port id
208 * @ignore: flag meaning this event has been marked to ignore
b8ac0cc7
JL
209 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
210 * @refcount: kref for this event
f92363d1
SR
211 * @event_data: reply event data payload follows
212 *
213 * This object stored on ioc->fw_event_list.
214 */
215struct fw_event_work {
216 struct list_head list;
217 struct work_struct work;
f92363d1
SR
218
219 struct MPT3SAS_ADAPTER *ioc;
220 u16 device_handle;
221 u8 VF_ID;
222 u8 VP_ID;
223 u8 ignore;
224 u16 event;
146b16c8 225 struct kref refcount;
5febf6d6 226 char event_data[] __aligned(4);
f92363d1
SR
227};
228
146b16c8
SR
229static void fw_event_work_free(struct kref *r)
230{
231 kfree(container_of(r, struct fw_event_work, refcount));
232}
233
234static void fw_event_work_get(struct fw_event_work *fw_work)
235{
236 kref_get(&fw_work->refcount);
237}
238
239static void fw_event_work_put(struct fw_event_work *fw_work)
240{
241 kref_put(&fw_work->refcount, fw_event_work_free);
242}
243
244static struct fw_event_work *alloc_fw_event_work(int len)
245{
246 struct fw_event_work *fw_event;
247
248 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
249 if (!fw_event)
250 return NULL;
251
252 kref_init(&fw_event->refcount);
253 return fw_event;
254}
255
f92363d1
SR
256/**
257 * struct _scsi_io_transfer - scsi io transfer
258 * @handle: sas device handle (assigned by firmware)
259 * @is_raid: flag set for hidden raid components
260 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
261 * @data_length: data transfer length
262 * @data_dma: dma pointer to data
263 * @sense: sense data
264 * @lun: lun number
265 * @cdb_length: cdb length
266 * @cdb: cdb contents
267 * @timeout: timeout for this command
268 * @VF_ID: virtual function id
269 * @VP_ID: virtual port id
270 * @valid_reply: flag set for reply message
271 * @sense_length: sense length
272 * @ioc_status: ioc status
273 * @scsi_state: scsi state
274 * @scsi_status: scsi staus
275 * @log_info: log information
276 * @transfer_length: data length transfer when there is a reply message
277 *
278 * Used for sending internal scsi commands to devices within this module.
279 * Refer to _scsi_send_scsi_io().
280 */
281struct _scsi_io_transfer {
282 u16 handle;
283 u8 is_raid;
284 enum dma_data_direction dir;
285 u32 data_length;
286 dma_addr_t data_dma;
287 u8 sense[SCSI_SENSE_BUFFERSIZE];
288 u32 lun;
289 u8 cdb_length;
290 u8 cdb[32];
291 u8 timeout;
292 u8 VF_ID;
293 u8 VP_ID;
294 u8 valid_reply;
295 /* the following bits are only valid when 'valid_reply = 1' */
296 u32 sense_length;
297 u16 ioc_status;
298 u8 scsi_state;
299 u8 scsi_status;
300 u32 log_info;
301 u32 transfer_length;
302};
303
f92363d1
SR
304/**
305 * _scsih_set_debug_level - global setting of ioc->logging_level.
4beb4867
BVA
306 * @val: ?
307 * @kp: ?
f92363d1
SR
308 *
309 * Note: The logging levels are defined in mpt3sas_debug.h.
310 */
311static int
e4dca7b7 312_scsih_set_debug_level(const char *val, const struct kernel_param *kp)
f92363d1
SR
313{
314 int ret = param_set_int(val, kp);
315 struct MPT3SAS_ADAPTER *ioc;
316
317 if (ret)
318 return ret;
319
320 pr_info("setting logging_level(0x%08x)\n", logging_level);
08c4d550 321 spin_lock(&gioc_lock);
f92363d1
SR
322 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
323 ioc->logging_level = logging_level;
08c4d550 324 spin_unlock(&gioc_lock);
f92363d1
SR
325 return 0;
326}
327module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
328 &logging_level, 0644);
329
330/**
331 * _scsih_srch_boot_sas_address - search based on sas_address
332 * @sas_address: sas address
333 * @boot_device: boot device object from bios page 2
334 *
4beb4867 335 * Return: 1 when there's a match, 0 means no match.
f92363d1
SR
336 */
337static inline int
338_scsih_srch_boot_sas_address(u64 sas_address,
339 Mpi2BootDeviceSasWwid_t *boot_device)
340{
341 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
342}
343
344/**
345 * _scsih_srch_boot_device_name - search based on device name
346 * @device_name: device name specified in INDENTIFY fram
347 * @boot_device: boot device object from bios page 2
348 *
4beb4867 349 * Return: 1 when there's a match, 0 means no match.
f92363d1
SR
350 */
351static inline int
352_scsih_srch_boot_device_name(u64 device_name,
353 Mpi2BootDeviceDeviceName_t *boot_device)
354{
355 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
356}
357
358/**
359 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
360 * @enclosure_logical_id: enclosure logical id
361 * @slot_number: slot number
362 * @boot_device: boot device object from bios page 2
363 *
4beb4867 364 * Return: 1 when there's a match, 0 means no match.
f92363d1
SR
365 */
366static inline int
367_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
368 Mpi2BootDeviceEnclosureSlot_t *boot_device)
369{
370 return (enclosure_logical_id == le64_to_cpu(boot_device->
371 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
372 SlotNumber)) ? 1 : 0;
373}
374
e238e71b
SR
375/**
376 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
377 * port number from port list
378 * @ioc: per adapter object
379 * @port_id: port number
34b0a785
SR
380 * @bypass_dirty_port_flag: when set look the matching hba port entry even
381 * if hba port entry is marked as dirty.
e238e71b
SR
382 *
383 * Search for hba port entry corresponding to provided port number,
384 * if available return port object otherwise return NULL.
385 */
386struct hba_port *
34b0a785
SR
387mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
388 u8 port_id, u8 bypass_dirty_port_flag)
e238e71b
SR
389{
390 struct hba_port *port, *port_next;
391
324c122f
SR
392 /*
393 * When multipath_on_hba is disabled then
394 * search the hba_port entry using default
395 * port id i.e. 255
396 */
397 if (!ioc->multipath_on_hba)
398 port_id = MULTIPATH_DISABLED_PORT_ID;
399
e238e71b
SR
400 list_for_each_entry_safe(port, port_next,
401 &ioc->port_table_list, list) {
34b0a785
SR
402 if (port->port_id != port_id)
403 continue;
404 if (bypass_dirty_port_flag)
e238e71b 405 return port;
34b0a785
SR
406 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
407 continue;
408 return port;
e238e71b
SR
409 }
410
324c122f
SR
411 /*
412 * Allocate hba_port object for default port id (i.e. 255)
413 * when multipath_on_hba is disabled for the HBA.
414 * And add this object to port_table_list.
415 */
416 if (!ioc->multipath_on_hba) {
a50bd646 417 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
324c122f
SR
418 if (!port)
419 return NULL;
420
421 port->port_id = port_id;
422 ioc_info(ioc,
423 "hba_port entry: %p, port: %d is added to hba_port list\n",
424 port, port->port_id);
425 list_add_tail(&port->list,
426 &ioc->port_table_list);
427 return port;
428 }
e238e71b
SR
429 return NULL;
430}
431
ccc59923
SR
432/**
433 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
434 * @ioc: per adapter object
435 * @port: hba_port object
436 * @phy: phy number
437 *
438 * Return virtual_phy object corresponding to phy number.
439 */
440struct virtual_phy *
441mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
442 struct hba_port *port, u32 phy)
443{
444 struct virtual_phy *vphy, *vphy_next;
445
446 if (!port->vphys_mask)
447 return NULL;
448
449 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
450 if (vphy->phy_mask & (1 << phy))
451 return vphy;
452 }
453 return NULL;
454}
455
f92363d1
SR
456/**
457 * _scsih_is_boot_device - search for matching boot device.
458 * @sas_address: sas address
459 * @device_name: device name specified in INDENTIFY fram
460 * @enclosure_logical_id: enclosure logical id
4beb4867 461 * @slot: slot number
f92363d1
SR
462 * @form: specifies boot device form
463 * @boot_device: boot device object from bios page 2
464 *
4beb4867 465 * Return: 1 when there's a match, 0 means no match.
f92363d1
SR
466 */
467static int
468_scsih_is_boot_device(u64 sas_address, u64 device_name,
469 u64 enclosure_logical_id, u16 slot, u8 form,
470 Mpi2BiosPage2BootDevice_t *boot_device)
471{
472 int rc = 0;
473
474 switch (form) {
475 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
476 if (!sas_address)
477 break;
478 rc = _scsih_srch_boot_sas_address(
479 sas_address, &boot_device->SasWwid);
480 break;
481 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
482 if (!enclosure_logical_id)
483 break;
484 rc = _scsih_srch_boot_encl_slot(
485 enclosure_logical_id,
486 slot, &boot_device->EnclosureSlot);
487 break;
488 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
489 if (!device_name)
490 break;
491 rc = _scsih_srch_boot_device_name(
492 device_name, &boot_device->DeviceName);
493 break;
494 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
495 break;
496 }
497
498 return rc;
499}
500
501/**
502 * _scsih_get_sas_address - set the sas_address for given device handle
4beb4867 503 * @ioc: ?
f92363d1
SR
504 * @handle: device handle
505 * @sas_address: sas address
506 *
4beb4867 507 * Return: 0 success, non-zero when failure
f92363d1
SR
508 */
509static int
510_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
511 u64 *sas_address)
512{
513 Mpi2SasDevicePage0_t sas_device_pg0;
514 Mpi2ConfigReply_t mpi_reply;
515 u32 ioc_status;
516
517 *sas_address = 0;
518
f92363d1
SR
519 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
520 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
919d8a3f
JP
521 ioc_err(ioc, "failure at %s:%d/%s()!\n",
522 __FILE__, __LINE__, __func__);
f92363d1
SR
523 return -ENXIO;
524 }
525
526 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
527 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
758f8139
SR
528 /* For HBA, vSES doesn't return HBA SAS address. Instead return
529 * vSES's sas address.
530 */
531 if ((handle <= ioc->sas_hba.num_phys) &&
532 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
533 MPI2_SAS_DEVICE_INFO_SEP)))
534 *sas_address = ioc->sas_hba.sas_address;
535 else
536 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
f92363d1
SR
537 return 0;
538 }
539
6c7abffc 540 /* we hit this because the given parent handle doesn't exist */
f92363d1
SR
541 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
542 return -ENXIO;
543
544 /* else error case */
919d8a3f
JP
545 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
546 handle, ioc_status, __FILE__, __LINE__, __func__);
f92363d1
SR
547 return -EIO;
548}
549
550/**
551 * _scsih_determine_boot_device - determine boot device.
552 * @ioc: per adapter object
d88e1eab
SPS
553 * @device: sas_device or pcie_device object
554 * @channel: SAS or PCIe channel
f92363d1
SR
555 *
556 * Determines whether this device should be first reported device to
557 * to scsi-ml or sas transport, this purpose is for persistent boot device.
558 * There are primary, alternate, and current entries in bios page 2. The order
559 * priority is primary, alternate, then current. This routine saves
d88e1eab 560 * the corresponding device object.
f92363d1
SR
561 * The saved data to be used later in _scsih_probe_boot_devices().
562 */
563static void
d88e1eab
SPS
564_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
565 u32 channel)
f92363d1
SR
566{
567 struct _sas_device *sas_device;
d88e1eab 568 struct _pcie_device *pcie_device;
f92363d1
SR
569 struct _raid_device *raid_device;
570 u64 sas_address;
571 u64 device_name;
572 u64 enclosure_logical_id;
573 u16 slot;
574
575 /* only process this function when driver loads */
576 if (!ioc->is_driver_loading)
577 return;
578
579 /* no Bios, return immediately */
580 if (!ioc->bios_pg3.BiosVersion)
581 return;
582
d88e1eab 583 if (channel == RAID_CHANNEL) {
f92363d1
SR
584 raid_device = device;
585 sas_address = raid_device->wwid;
586 device_name = 0;
587 enclosure_logical_id = 0;
588 slot = 0;
d88e1eab
SPS
589 } else if (channel == PCIE_CHANNEL) {
590 pcie_device = device;
591 sas_address = pcie_device->wwid;
592 device_name = 0;
593 enclosure_logical_id = 0;
594 slot = 0;
595 } else {
596 sas_device = device;
597 sas_address = sas_device->sas_address;
598 device_name = sas_device->device_name;
599 enclosure_logical_id = sas_device->enclosure_logical_id;
600 slot = sas_device->slot;
f92363d1
SR
601 }
602
603 if (!ioc->req_boot_device.device) {
604 if (_scsih_is_boot_device(sas_address, device_name,
605 enclosure_logical_id, slot,
606 (ioc->bios_pg2.ReqBootDeviceForm &
607 MPI2_BIOSPAGE2_FORM_MASK),
608 &ioc->bios_pg2.RequestedBootDevice)) {
919d8a3f
JP
609 dinitprintk(ioc,
610 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
611 __func__, (u64)sas_address));
f92363d1 612 ioc->req_boot_device.device = device;
d88e1eab 613 ioc->req_boot_device.channel = channel;
f92363d1
SR
614 }
615 }
616
617 if (!ioc->req_alt_boot_device.device) {
618 if (_scsih_is_boot_device(sas_address, device_name,
619 enclosure_logical_id, slot,
620 (ioc->bios_pg2.ReqAltBootDeviceForm &
621 MPI2_BIOSPAGE2_FORM_MASK),
622 &ioc->bios_pg2.RequestedAltBootDevice)) {
919d8a3f
JP
623 dinitprintk(ioc,
624 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
625 __func__, (u64)sas_address));
f92363d1 626 ioc->req_alt_boot_device.device = device;
d88e1eab 627 ioc->req_alt_boot_device.channel = channel;
f92363d1
SR
628 }
629 }
630
631 if (!ioc->current_boot_device.device) {
632 if (_scsih_is_boot_device(sas_address, device_name,
633 enclosure_logical_id, slot,
634 (ioc->bios_pg2.CurrentBootDeviceForm &
635 MPI2_BIOSPAGE2_FORM_MASK),
636 &ioc->bios_pg2.CurrentBootDevice)) {
919d8a3f
JP
637 dinitprintk(ioc,
638 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
639 __func__, (u64)sas_address));
f92363d1 640 ioc->current_boot_device.device = device;
d88e1eab 641 ioc->current_boot_device.channel = channel;
f92363d1
SR
642 }
643 }
644}
645
d1cb5e49
SR
646static struct _sas_device *
647__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
648 struct MPT3SAS_TARGET *tgt_priv)
649{
650 struct _sas_device *ret;
651
652 assert_spin_locked(&ioc->sas_device_lock);
653
d88e1eab 654 ret = tgt_priv->sas_dev;
d1cb5e49
SR
655 if (ret)
656 sas_device_get(ret);
657
658 return ret;
659}
660
661static struct _sas_device *
662mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
663 struct MPT3SAS_TARGET *tgt_priv)
664{
665 struct _sas_device *ret;
666 unsigned long flags;
667
668 spin_lock_irqsave(&ioc->sas_device_lock, flags);
669 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
670 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
671
672 return ret;
673}
674
d88e1eab
SPS
675static struct _pcie_device *
676__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
677 struct MPT3SAS_TARGET *tgt_priv)
678{
679 struct _pcie_device *ret;
680
681 assert_spin_locked(&ioc->pcie_device_lock);
682
683 ret = tgt_priv->pcie_dev;
684 if (ret)
685 pcie_device_get(ret);
686
687 return ret;
688}
689
690/**
691 * mpt3sas_get_pdev_from_target - pcie device search
692 * @ioc: per adapter object
693 * @tgt_priv: starget private object
694 *
695 * Context: This function will acquire ioc->pcie_device_lock and will release
696 * before returning the pcie_device object.
697 *
698 * This searches for pcie_device from target, then return pcie_device object.
699 */
494f401b 700static struct _pcie_device *
d88e1eab
SPS
701mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
702 struct MPT3SAS_TARGET *tgt_priv)
703{
704 struct _pcie_device *ret;
705 unsigned long flags;
706
707 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
708 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
709 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
710
711 return ret;
712}
d1cb5e49 713
6df6be91
SR
714
715/**
716 * __mpt3sas_get_sdev_by_rphy - sas device search
717 * @ioc: per adapter object
718 * @rphy: sas_rphy pointer
719 *
720 * Context: This function will acquire ioc->sas_device_lock and will release
721 * before returning the sas_device object.
722 *
723 * This searches for sas_device from rphy object
724 * then return sas_device object.
725 */
726struct _sas_device *
727__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
728 struct sas_rphy *rphy)
729{
730 struct _sas_device *sas_device;
731
732 assert_spin_locked(&ioc->sas_device_lock);
733
734 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
735 if (sas_device->rphy != rphy)
736 continue;
737 sas_device_get(sas_device);
738 return sas_device;
739 }
740
741 sas_device = NULL;
742 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
743 if (sas_device->rphy != rphy)
744 continue;
745 sas_device_get(sas_device);
746 return sas_device;
747 }
748
749 return NULL;
750}
751
7d310f24 752/**
a8d548b0 753 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
7d310f24
SR
754 * sas address from sas_device_list list
755 * @ioc: per adapter object
a8d548b0 756 * @sas_address: device sas address
7d310f24
SR
757 * @port: port number
758 *
759 * Search for _sas_device object corresponding to provided sas address,
760 * if available return _sas_device object address otherwise return NULL.
761 */
d1cb5e49
SR
762struct _sas_device *
763__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
7d310f24 764 u64 sas_address, struct hba_port *port)
d1cb5e49
SR
765{
766 struct _sas_device *sas_device;
767
7d310f24
SR
768 if (!port)
769 return NULL;
770
d1cb5e49
SR
771 assert_spin_locked(&ioc->sas_device_lock);
772
7d310f24
SR
773 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
774 if (sas_device->sas_address != sas_address)
775 continue;
776 if (sas_device->port != port)
777 continue;
778 sas_device_get(sas_device);
779 return sas_device;
780 }
d1cb5e49 781
7d310f24
SR
782 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
783 if (sas_device->sas_address != sas_address)
784 continue;
785 if (sas_device->port != port)
786 continue;
787 sas_device_get(sas_device);
788 return sas_device;
789 }
d1cb5e49
SR
790
791 return NULL;
d1cb5e49
SR
792}
793
f92363d1 794/**
d1cb5e49 795 * mpt3sas_get_sdev_by_addr - sas device search
f92363d1
SR
796 * @ioc: per adapter object
797 * @sas_address: sas address
7d310f24 798 * @port: hba port entry
f92363d1
SR
799 * Context: Calling function should acquire ioc->sas_device_lock
800 *
7d310f24
SR
801 * This searches for sas_device based on sas_address & port number,
802 * then return sas_device object.
f92363d1
SR
803 */
804struct _sas_device *
d1cb5e49 805mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
7d310f24 806 u64 sas_address, struct hba_port *port)
f92363d1
SR
807{
808 struct _sas_device *sas_device;
d1cb5e49
SR
809 unsigned long flags;
810
811 spin_lock_irqsave(&ioc->sas_device_lock, flags);
812 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7d310f24 813 sas_address, port);
d1cb5e49
SR
814 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
815
816 return sas_device;
817}
818
819static struct _sas_device *
820__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
821{
822 struct _sas_device *sas_device;
823
824 assert_spin_locked(&ioc->sas_device_lock);
f92363d1
SR
825
826 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
d1cb5e49
SR
827 if (sas_device->handle == handle)
828 goto found_device;
f92363d1
SR
829
830 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
d1cb5e49
SR
831 if (sas_device->handle == handle)
832 goto found_device;
f92363d1
SR
833
834 return NULL;
d1cb5e49
SR
835
836found_device:
837 sas_device_get(sas_device);
838 return sas_device;
f92363d1
SR
839}
840
841/**
d1cb5e49 842 * mpt3sas_get_sdev_by_handle - sas device search
f92363d1
SR
843 * @ioc: per adapter object
844 * @handle: sas device handle (assigned by firmware)
845 * Context: Calling function should acquire ioc->sas_device_lock
846 *
847 * This searches for sas_device based on sas_address, then return sas_device
848 * object.
849 */
c102e00c 850struct _sas_device *
d1cb5e49 851mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
f92363d1
SR
852{
853 struct _sas_device *sas_device;
d1cb5e49 854 unsigned long flags;
f92363d1 855
d1cb5e49
SR
856 spin_lock_irqsave(&ioc->sas_device_lock, flags);
857 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
858 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
f92363d1 859
d1cb5e49 860 return sas_device;
f92363d1
SR
861}
862
75888956
SR
863/**
864 * _scsih_display_enclosure_chassis_info - display device location info
865 * @ioc: per adapter object
866 * @sas_device: per sas device object
867 * @sdev: scsi device struct
868 * @starget: scsi target struct
75888956
SR
869 */
870static void
871_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
872 struct _sas_device *sas_device, struct scsi_device *sdev,
873 struct scsi_target *starget)
874{
875 if (sdev) {
876 if (sas_device->enclosure_handle != 0)
877 sdev_printk(KERN_INFO, sdev,
878 "enclosure logical id (0x%016llx), slot(%d) \n",
879 (unsigned long long)
880 sas_device->enclosure_logical_id,
881 sas_device->slot);
882 if (sas_device->connector_name[0] != '\0')
883 sdev_printk(KERN_INFO, sdev,
884 "enclosure level(0x%04x), connector name( %s)\n",
885 sas_device->enclosure_level,
886 sas_device->connector_name);
887 if (sas_device->is_chassis_slot_valid)
888 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
889 sas_device->chassis_slot);
890 } else if (starget) {
891 if (sas_device->enclosure_handle != 0)
892 starget_printk(KERN_INFO, starget,
893 "enclosure logical id(0x%016llx), slot(%d) \n",
894 (unsigned long long)
895 sas_device->enclosure_logical_id,
896 sas_device->slot);
897 if (sas_device->connector_name[0] != '\0')
898 starget_printk(KERN_INFO, starget,
899 "enclosure level(0x%04x), connector name( %s)\n",
900 sas_device->enclosure_level,
901 sas_device->connector_name);
902 if (sas_device->is_chassis_slot_valid)
903 starget_printk(KERN_INFO, starget,
904 "chassis slot(0x%04x)\n",
905 sas_device->chassis_slot);
906 } else {
907 if (sas_device->enclosure_handle != 0)
919d8a3f
JP
908 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
909 (u64)sas_device->enclosure_logical_id,
910 sas_device->slot);
75888956 911 if (sas_device->connector_name[0] != '\0')
919d8a3f
JP
912 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
913 sas_device->enclosure_level,
914 sas_device->connector_name);
75888956 915 if (sas_device->is_chassis_slot_valid)
919d8a3f
JP
916 ioc_info(ioc, "chassis slot(0x%04x)\n",
917 sas_device->chassis_slot);
75888956
SR
918 }
919}
920
f92363d1
SR
921/**
922 * _scsih_sas_device_remove - remove sas_device from list.
923 * @ioc: per adapter object
924 * @sas_device: the sas_device object
925 * Context: This function will acquire ioc->sas_device_lock.
926 *
d1cb5e49 927 * If sas_device is on the list, remove it and decrement its reference count.
f92363d1
SR
928 */
929static void
930_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
931 struct _sas_device *sas_device)
932{
933 unsigned long flags;
934
935 if (!sas_device)
936 return;
919d8a3f
JP
937 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
938 sas_device->handle, (u64)sas_device->sas_address);
e6d45e3e 939
75888956 940 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
f92363d1 941
d1cb5e49
SR
942 /*
943 * The lock serializes access to the list, but we still need to verify
944 * that nobody removed the entry while we were waiting on the lock.
945 */
f92363d1 946 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49
SR
947 if (!list_empty(&sas_device->list)) {
948 list_del_init(&sas_device->list);
949 sas_device_put(sas_device);
950 }
f92363d1
SR
951 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
952}
953
954/**
955 * _scsih_device_remove_by_handle - removing device object by handle
956 * @ioc: per adapter object
957 * @handle: device handle
f92363d1
SR
958 */
959static void
960_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
961{
962 struct _sas_device *sas_device;
963 unsigned long flags;
964
965 if (ioc->shost_recovery)
966 return;
967
968 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49
SR
969 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
970 if (sas_device) {
971 list_del_init(&sas_device->list);
972 sas_device_put(sas_device);
973 }
f92363d1 974 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
d1cb5e49 975 if (sas_device) {
f92363d1 976 _scsih_remove_device(ioc, sas_device);
d1cb5e49
SR
977 sas_device_put(sas_device);
978 }
f92363d1
SR
979}
980
981/**
7d310f24
SR
982 * mpt3sas_device_remove_by_sas_address - removing device object by
983 * sas address & port number
f92363d1
SR
984 * @ioc: per adapter object
985 * @sas_address: device sas_address
7d310f24
SR
986 * @port: hba port entry
987 *
988 * Return nothing.
f92363d1
SR
989 */
990void
991mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
7d310f24 992 u64 sas_address, struct hba_port *port)
f92363d1
SR
993{
994 struct _sas_device *sas_device;
995 unsigned long flags;
996
997 if (ioc->shost_recovery)
998 return;
999
1000 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7d310f24 1001 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
d1cb5e49
SR
1002 if (sas_device) {
1003 list_del_init(&sas_device->list);
1004 sas_device_put(sas_device);
1005 }
f92363d1 1006 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
d1cb5e49 1007 if (sas_device) {
f92363d1 1008 _scsih_remove_device(ioc, sas_device);
d1cb5e49
SR
1009 sas_device_put(sas_device);
1010 }
f92363d1
SR
1011}
1012
1013/**
1014 * _scsih_sas_device_add - insert sas_device to the list.
1015 * @ioc: per adapter object
1016 * @sas_device: the sas_device object
1017 * Context: This function will acquire ioc->sas_device_lock.
1018 *
1019 * Adding new object to the ioc->sas_device_list.
1020 */
1021static void
1022_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1023 struct _sas_device *sas_device)
1024{
1025 unsigned long flags;
1026
919d8a3f
JP
1027 dewtprintk(ioc,
1028 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1029 __func__, sas_device->handle,
1030 (u64)sas_device->sas_address));
f92363d1 1031
75888956
SR
1032 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1033 NULL, NULL));
e6d45e3e 1034
f92363d1 1035 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49 1036 sas_device_get(sas_device);
f92363d1
SR
1037 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1038 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1039
c696f7b8
SPS
1040 if (ioc->hide_drives) {
1041 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1042 return;
1043 }
1044
f92363d1 1045 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
e2f0cdf7 1046 sas_device->sas_address_parent, sas_device->port)) {
f92363d1
SR
1047 _scsih_sas_device_remove(ioc, sas_device);
1048 } else if (!sas_device->starget) {
1049 /*
1050 * When asyn scanning is enabled, its not possible to remove
1051 * devices while scanning is turned on due to an oops in
1052 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1053 */
f5edbe77 1054 if (!ioc->is_driver_loading) {
f92363d1
SR
1055 mpt3sas_transport_port_remove(ioc,
1056 sas_device->sas_address,
e2f0cdf7
SR
1057 sas_device->sas_address_parent,
1058 sas_device->port);
f5edbe77
SR
1059 _scsih_sas_device_remove(ioc, sas_device);
1060 }
c696f7b8
SPS
1061 } else
1062 clear_bit(sas_device->handle, ioc->pend_os_device_add);
f92363d1
SR
1063}
1064
1065/**
1066 * _scsih_sas_device_init_add - insert sas_device to the list.
1067 * @ioc: per adapter object
1068 * @sas_device: the sas_device object
1069 * Context: This function will acquire ioc->sas_device_lock.
1070 *
1071 * Adding new object at driver load time to the ioc->sas_device_init_list.
1072 */
1073static void
1074_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1075 struct _sas_device *sas_device)
1076{
1077 unsigned long flags;
1078
919d8a3f
JP
1079 dewtprintk(ioc,
1080 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1081 __func__, sas_device->handle,
1082 (u64)sas_device->sas_address));
f92363d1 1083
75888956
SR
1084 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1085 NULL, NULL));
e6d45e3e 1086
f92363d1 1087 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49 1088 sas_device_get(sas_device);
f92363d1
SR
1089 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1090 _scsih_determine_boot_device(ioc, sas_device, 0);
1091 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1092}
1093
d88e1eab 1094
494f401b 1095static struct _pcie_device *
d88e1eab
SPS
1096__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1097{
1098 struct _pcie_device *pcie_device;
1099
1100 assert_spin_locked(&ioc->pcie_device_lock);
1101
1102 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1103 if (pcie_device->wwid == wwid)
1104 goto found_device;
1105
1106 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1107 if (pcie_device->wwid == wwid)
1108 goto found_device;
1109
1110 return NULL;
1111
1112found_device:
1113 pcie_device_get(pcie_device);
1114 return pcie_device;
1115}
1116
1117
1118/**
1119 * mpt3sas_get_pdev_by_wwid - pcie device search
1120 * @ioc: per adapter object
1121 * @wwid: wwid
1122 *
1123 * Context: This function will acquire ioc->pcie_device_lock and will release
1124 * before returning the pcie_device object.
1125 *
1126 * This searches for pcie_device based on wwid, then return pcie_device object.
1127 */
494f401b 1128static struct _pcie_device *
d88e1eab
SPS
1129mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1130{
1131 struct _pcie_device *pcie_device;
1132 unsigned long flags;
1133
1134 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1135 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1136 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1137
1138 return pcie_device;
1139}
1140
1141
494f401b 1142static struct _pcie_device *
d88e1eab
SPS
1143__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1144 int channel)
1145{
1146 struct _pcie_device *pcie_device;
1147
1148 assert_spin_locked(&ioc->pcie_device_lock);
1149
1150 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1151 if (pcie_device->id == id && pcie_device->channel == channel)
1152 goto found_device;
1153
1154 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1155 if (pcie_device->id == id && pcie_device->channel == channel)
1156 goto found_device;
1157
1158 return NULL;
1159
1160found_device:
1161 pcie_device_get(pcie_device);
1162 return pcie_device;
1163}
1164
494f401b 1165static struct _pcie_device *
c102e00c
SPS
1166__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1167{
1168 struct _pcie_device *pcie_device;
1169
1170 assert_spin_locked(&ioc->pcie_device_lock);
1171
1172 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1173 if (pcie_device->handle == handle)
1174 goto found_device;
1175
1176 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1177 if (pcie_device->handle == handle)
1178 goto found_device;
1179
1180 return NULL;
1181
1182found_device:
1183 pcie_device_get(pcie_device);
1184 return pcie_device;
1185}
1186
1187
1188/**
1189 * mpt3sas_get_pdev_by_handle - pcie device search
1190 * @ioc: per adapter object
1191 * @handle: Firmware device handle
1192 *
1193 * Context: This function will acquire ioc->pcie_device_lock and will release
1194 * before returning the pcie_device object.
1195 *
1196 * This searches for pcie_device based on handle, then return pcie_device
1197 * object.
1198 */
1199struct _pcie_device *
1200mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1201{
1202 struct _pcie_device *pcie_device;
1203 unsigned long flags;
1204
1205 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1206 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1207 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1208
1209 return pcie_device;
1210}
1211
d3f623ae
SR
1212/**
1213 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1214 * @ioc: per adapter object
1215 * Context: This function will acquire ioc->pcie_device_lock
1216 *
1217 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1218 * which has reported maximum among all available NVMe drives.
1219 * Minimum max_shutdown_latency will be six seconds.
1220 */
1221static void
1222_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1223{
1224 struct _pcie_device *pcie_device;
1225 unsigned long flags;
1226 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1227
1228 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1229 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1230 if (pcie_device->shutdown_latency) {
1231 if (shutdown_latency < pcie_device->shutdown_latency)
1232 shutdown_latency =
1233 pcie_device->shutdown_latency;
1234 }
1235 }
1236 ioc->max_shutdown_latency = shutdown_latency;
1237 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1238}
1239
d88e1eab
SPS
1240/**
1241 * _scsih_pcie_device_remove - remove pcie_device from list.
1242 * @ioc: per adapter object
1243 * @pcie_device: the pcie_device object
1244 * Context: This function will acquire ioc->pcie_device_lock.
1245 *
1246 * If pcie_device is on the list, remove it and decrement its reference count.
1247 */
1248static void
1249_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1250 struct _pcie_device *pcie_device)
1251{
1252 unsigned long flags;
1253 int was_on_pcie_device_list = 0;
d3f623ae 1254 u8 update_latency = 0;
d88e1eab
SPS
1255
1256 if (!pcie_device)
1257 return;
919d8a3f
JP
1258 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1259 pcie_device->handle, (u64)pcie_device->wwid);
d88e1eab 1260 if (pcie_device->enclosure_handle != 0)
919d8a3f
JP
1261 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1262 (u64)pcie_device->enclosure_logical_id,
1263 pcie_device->slot);
d88e1eab 1264 if (pcie_device->connector_name[0] != '\0')
919d8a3f
JP
1265 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1266 pcie_device->enclosure_level,
1267 pcie_device->connector_name);
d88e1eab
SPS
1268
1269 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1270 if (!list_empty(&pcie_device->list)) {
1271 list_del_init(&pcie_device->list);
1272 was_on_pcie_device_list = 1;
1273 }
d3f623ae
SR
1274 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1275 update_latency = 1;
d88e1eab
SPS
1276 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1277 if (was_on_pcie_device_list) {
1278 kfree(pcie_device->serial_number);
1279 pcie_device_put(pcie_device);
1280 }
d3f623ae
SR
1281
1282 /*
1283 * This device's RTD3 Entry Latency matches IOC's
1284 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1285 * from the available drives as current drive is getting removed.
1286 */
1287 if (update_latency)
1288 _scsih_set_nvme_max_shutdown_latency(ioc);
d88e1eab 1289}
3075ac49
SPS
1290
1291
1292/**
1293 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1294 * @ioc: per adapter object
1295 * @handle: device handle
3075ac49
SPS
1296 */
1297static void
1298_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1299{
1300 struct _pcie_device *pcie_device;
1301 unsigned long flags;
1302 int was_on_pcie_device_list = 0;
d3f623ae 1303 u8 update_latency = 0;
3075ac49
SPS
1304
1305 if (ioc->shost_recovery)
1306 return;
1307
1308 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1309 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1310 if (pcie_device) {
1311 if (!list_empty(&pcie_device->list)) {
1312 list_del_init(&pcie_device->list);
1313 was_on_pcie_device_list = 1;
1314 pcie_device_put(pcie_device);
1315 }
d3f623ae
SR
1316 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1317 update_latency = 1;
3075ac49
SPS
1318 }
1319 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1320 if (was_on_pcie_device_list) {
1321 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1322 pcie_device_put(pcie_device);
1323 }
d3f623ae
SR
1324
1325 /*
1326 * This device's RTD3 Entry Latency matches IOC's
1327 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1328 * from the available drives as current drive is getting removed.
1329 */
1330 if (update_latency)
1331 _scsih_set_nvme_max_shutdown_latency(ioc);
3075ac49
SPS
1332}
1333
c102e00c
SPS
1334/**
1335 * _scsih_pcie_device_add - add pcie_device object
1336 * @ioc: per adapter object
1337 * @pcie_device: pcie_device object
1338 *
1339 * This is added to the pcie_device_list link list.
1340 */
1341static void
1342_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1343 struct _pcie_device *pcie_device)
1344{
1345 unsigned long flags;
1346
919d8a3f
JP
1347 dewtprintk(ioc,
1348 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1349 __func__,
1350 pcie_device->handle, (u64)pcie_device->wwid));
c102e00c 1351 if (pcie_device->enclosure_handle != 0)
919d8a3f
JP
1352 dewtprintk(ioc,
1353 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1354 __func__,
1355 (u64)pcie_device->enclosure_logical_id,
1356 pcie_device->slot));
c102e00c 1357 if (pcie_device->connector_name[0] != '\0')
919d8a3f
JP
1358 dewtprintk(ioc,
1359 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1360 __func__, pcie_device->enclosure_level,
1361 pcie_device->connector_name));
c102e00c
SPS
1362
1363 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1364 pcie_device_get(pcie_device);
1365 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1366 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1367
3c090ce3
SP
1368 if (pcie_device->access_status ==
1369 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1370 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1371 return;
1372 }
c102e00c
SPS
1373 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1374 _scsih_pcie_device_remove(ioc, pcie_device);
1375 } else if (!pcie_device->starget) {
1376 if (!ioc->is_driver_loading) {
1377/*TODO-- Need to find out whether this condition will occur or not*/
1378 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1379 }
1380 } else
1381 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1382}
1383
1384/*
1385 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1386 * @ioc: per adapter object
1387 * @pcie_device: the pcie_device object
1388 * Context: This function will acquire ioc->pcie_device_lock.
1389 *
1390 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1391 */
1392static void
1393_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1394 struct _pcie_device *pcie_device)
1395{
1396 unsigned long flags;
1397
919d8a3f
JP
1398 dewtprintk(ioc,
1399 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1400 __func__,
1401 pcie_device->handle, (u64)pcie_device->wwid));
c102e00c 1402 if (pcie_device->enclosure_handle != 0)
919d8a3f
JP
1403 dewtprintk(ioc,
1404 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1405 __func__,
1406 (u64)pcie_device->enclosure_logical_id,
1407 pcie_device->slot));
c102e00c 1408 if (pcie_device->connector_name[0] != '\0')
919d8a3f
JP
1409 dewtprintk(ioc,
1410 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1411 __func__, pcie_device->enclosure_level,
1412 pcie_device->connector_name));
c102e00c
SPS
1413
1414 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1415 pcie_device_get(pcie_device);
1416 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
3c090ce3
SP
1417 if (pcie_device->access_status !=
1418 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1419 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
c102e00c
SPS
1420 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1421}
f92363d1
SR
1422/**
1423 * _scsih_raid_device_find_by_id - raid device search
1424 * @ioc: per adapter object
1425 * @id: sas device target id
1426 * @channel: sas device channel
1427 * Context: Calling function should acquire ioc->raid_device_lock
1428 *
1429 * This searches for raid_device based on target id, then return raid_device
1430 * object.
1431 */
1432static struct _raid_device *
1433_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1434{
1435 struct _raid_device *raid_device, *r;
1436
1437 r = NULL;
1438 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1439 if (raid_device->id == id && raid_device->channel == channel) {
1440 r = raid_device;
1441 goto out;
1442 }
1443 }
1444
1445 out:
1446 return r;
1447}
1448
1449/**
c84b06a4 1450 * mpt3sas_raid_device_find_by_handle - raid device search
f92363d1
SR
1451 * @ioc: per adapter object
1452 * @handle: sas device handle (assigned by firmware)
1453 * Context: Calling function should acquire ioc->raid_device_lock
1454 *
1455 * This searches for raid_device based on handle, then return raid_device
1456 * object.
1457 */
c84b06a4
SR
1458struct _raid_device *
1459mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
f92363d1
SR
1460{
1461 struct _raid_device *raid_device, *r;
1462
1463 r = NULL;
1464 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1465 if (raid_device->handle != handle)
1466 continue;
1467 r = raid_device;
1468 goto out;
1469 }
1470
1471 out:
1472 return r;
1473}
1474
1475/**
1476 * _scsih_raid_device_find_by_wwid - raid device search
1477 * @ioc: per adapter object
4beb4867 1478 * @wwid: ?
f92363d1
SR
1479 * Context: Calling function should acquire ioc->raid_device_lock
1480 *
1481 * This searches for raid_device based on wwid, then return raid_device
1482 * object.
1483 */
1484static struct _raid_device *
1485_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1486{
1487 struct _raid_device *raid_device, *r;
1488
1489 r = NULL;
1490 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1491 if (raid_device->wwid != wwid)
1492 continue;
1493 r = raid_device;
1494 goto out;
1495 }
1496
1497 out:
1498 return r;
1499}
1500
1501/**
1502 * _scsih_raid_device_add - add raid_device object
1503 * @ioc: per adapter object
1504 * @raid_device: raid_device object
1505 *
1506 * This is added to the raid_device_list link list.
1507 */
1508static void
1509_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1510 struct _raid_device *raid_device)
1511{
1512 unsigned long flags;
1513
919d8a3f
JP
1514 dewtprintk(ioc,
1515 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1516 __func__,
1517 raid_device->handle, (u64)raid_device->wwid));
f92363d1
SR
1518
1519 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1520 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1521 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1522}
1523
1524/**
1525 * _scsih_raid_device_remove - delete raid_device object
1526 * @ioc: per adapter object
1527 * @raid_device: raid_device object
1528 *
1529 */
1530static void
1531_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1532 struct _raid_device *raid_device)
1533{
1534 unsigned long flags;
1535
1536 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1537 list_del(&raid_device->list);
1538 kfree(raid_device);
1539 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1540}
1541
1542/**
1543 * mpt3sas_scsih_expander_find_by_handle - expander device search
1544 * @ioc: per adapter object
1545 * @handle: expander handle (assigned by firmware)
1546 * Context: Calling function should acquire ioc->sas_device_lock
1547 *
1548 * This searches for expander device based on handle, then returns the
1549 * sas_node object.
1550 */
1551struct _sas_node *
1552mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1553{
1554 struct _sas_node *sas_expander, *r;
1555
1556 r = NULL;
1557 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1558 if (sas_expander->handle != handle)
1559 continue;
1560 r = sas_expander;
1561 goto out;
1562 }
1563 out:
1564 return r;
1565}
1566
22a923c3
C
1567/**
1568 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1569 * @ioc: per adapter object
1570 * @handle: enclosure handle (assigned by firmware)
1571 * Context: Calling function should acquire ioc->sas_device_lock
1572 *
1573 * This searches for enclosure device based on handle, then returns the
1574 * enclosure object.
1575 */
1576static struct _enclosure_node *
1577mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1578{
1579 struct _enclosure_node *enclosure_dev, *r;
1580
1581 r = NULL;
1582 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1583 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1584 continue;
1585 r = enclosure_dev;
1586 goto out;
1587 }
1588out:
1589 return r;
1590}
f92363d1
SR
1591/**
1592 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1593 * @ioc: per adapter object
1594 * @sas_address: sas address
7d310f24 1595 * @port: hba port entry
f92363d1
SR
1596 * Context: Calling function should acquire ioc->sas_node_lock.
1597 *
7d310f24
SR
1598 * This searches for expander device based on sas_address & port number,
1599 * then returns the sas_node object.
f92363d1
SR
1600 */
1601struct _sas_node *
1602mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
7d310f24 1603 u64 sas_address, struct hba_port *port)
f92363d1 1604{
7d310f24
SR
1605 struct _sas_node *sas_expander, *r = NULL;
1606
1607 if (!port)
1608 return r;
f92363d1 1609
f92363d1
SR
1610 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1611 if (sas_expander->sas_address != sas_address)
1612 continue;
7d310f24
SR
1613 if (sas_expander->port != port)
1614 continue;
f92363d1
SR
1615 r = sas_expander;
1616 goto out;
1617 }
1618 out:
1619 return r;
1620}
1621
1622/**
1623 * _scsih_expander_node_add - insert expander device to the list.
1624 * @ioc: per adapter object
1625 * @sas_expander: the sas_device object
1626 * Context: This function will acquire ioc->sas_node_lock.
1627 *
1628 * Adding new object to the ioc->sas_expander_list.
f92363d1
SR
1629 */
1630static void
1631_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1632 struct _sas_node *sas_expander)
1633{
1634 unsigned long flags;
1635
1636 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1637 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1638 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1639}
1640
1641/**
1642 * _scsih_is_end_device - determines if device is an end device
1643 * @device_info: bitfield providing information about the device.
1644 * Context: none
1645 *
4beb4867 1646 * Return: 1 if end device.
f92363d1
SR
1647 */
1648static int
1649_scsih_is_end_device(u32 device_info)
1650{
1651 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1652 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1653 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1654 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1655 return 1;
1656 else
1657 return 0;
1658}
1659
c102e00c 1660/**
5bb309db
SP
1661 * _scsih_is_nvme_pciescsi_device - determines if
1662 * device is an pcie nvme/scsi device
c102e00c
SPS
1663 * @device_info: bitfield providing information about the device.
1664 * Context: none
1665 *
5bb309db 1666 * Returns 1 if device is pcie device type nvme/scsi.
c102e00c
SPS
1667 */
1668static int
5bb309db 1669_scsih_is_nvme_pciescsi_device(u32 device_info)
c102e00c 1670{
5bb309db
SP
1671 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1672 == MPI26_PCIE_DEVINFO_NVME) ||
1673 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1674 == MPI26_PCIE_DEVINFO_SCSI))
c102e00c
SPS
1675 return 1;
1676 else
1677 return 0;
1678}
1679
521e9c0b
SP
1680/**
1681 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1682 * @ioc: per adapter object
1683 * @id: target id
1684 * @channel: channel
1685 * Context: This function will acquire ioc->scsi_lookup_lock.
1686 *
1687 * This will search for a matching channel:id in the scsi_lookup array,
1688 * returning 1 if found.
1689 */
1690static u8
1691_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1692 int channel)
1693{
1694 int smid;
1695 struct scsi_cmnd *scmd;
1696
1697 for (smid = 1;
1698 smid <= ioc->shost->can_queue; smid++) {
1699 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1700 if (!scmd)
1701 continue;
1702 if (scmd->device->id == id &&
1703 scmd->device->channel == channel)
1704 return 1;
1705 }
1706 return 0;
1707}
1708
1709/**
1710 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1711 * @ioc: per adapter object
1712 * @id: target id
1713 * @lun: lun number
1714 * @channel: channel
1715 * Context: This function will acquire ioc->scsi_lookup_lock.
1716 *
1717 * This will search for a matching channel:id:lun in the scsi_lookup array,
1718 * returning 1 if found.
1719 */
1720static u8
1721_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1722 unsigned int lun, int channel)
1723{
1724 int smid;
1725 struct scsi_cmnd *scmd;
1726
1727 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1728
1729 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1730 if (!scmd)
1731 continue;
1732 if (scmd->device->id == id &&
1733 scmd->device->channel == channel &&
1734 scmd->device->lun == lun)
1735 return 1;
1736 }
1737 return 0;
1738}
1739
459325c4 1740/**
dbec4c90 1741 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
459325c4
C
1742 * @ioc: per adapter object
1743 * @smid: system request message index
1744 *
4beb4867 1745 * Return: the smid stored scmd pointer.
459325c4
C
1746 * Then will dereference the stored scmd pointer.
1747 */
dbec4c90
SPS
1748struct scsi_cmnd *
1749mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
459325c4
C
1750{
1751 struct scsi_cmnd *scmd = NULL;
dbec4c90 1752 struct scsiio_tracker *st;
c2fe742f 1753 Mpi25SCSIIORequest_t *mpi_request;
664f0dce 1754 u16 tag = smid - 1;
459325c4 1755
dbec4c90
SPS
1756 if (smid > 0 &&
1757 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
664f0dce
SR
1758 u32 unique_tag =
1759 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
f92363d1 1760
c2fe742f
SR
1761 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1762
1763 /*
1764 * If SCSI IO request is outstanding at driver level then
1765 * DevHandle filed must be non-zero. If DevHandle is zero
1766 * then it means that this smid is free at driver level,
1767 * so return NULL.
1768 */
1769 if (!mpi_request->DevHandle)
1770 return scmd;
1771
dbec4c90
SPS
1772 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1773 if (scmd) {
1774 st = scsi_cmd_priv(scmd);
e7018314 1775 if (st->cb_idx == 0xFF || st->smid == 0)
dbec4c90 1776 scmd = NULL;
f92363d1
SR
1777 }
1778 }
dbec4c90 1779 return scmd;
f92363d1
SR
1780}
1781
cf75d5d6 1782/**
8a7e4c24 1783 * scsih_change_queue_depth - setting device queue depth
cf75d5d6
CH
1784 * @sdev: scsi device struct
1785 * @qdepth: requested queue depth
1786 *
4beb4867 1787 * Return: queue depth.
cf75d5d6 1788 */
8bbb1cf6 1789static int
8a7e4c24 1790scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
f92363d1
SR
1791{
1792 struct Scsi_Host *shost = sdev->host;
1793 int max_depth;
1794 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1795 struct MPT3SAS_DEVICE *sas_device_priv_data;
1796 struct MPT3SAS_TARGET *sas_target_priv_data;
1797 struct _sas_device *sas_device;
1798 unsigned long flags;
1799
1800 max_depth = shost->can_queue;
1801
8dc8d29a
SR
1802 /*
1803 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1804 * is disabled.
1805 */
787f2448 1806 if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
8dc8d29a
SR
1807 goto not_sata;
1808
f92363d1
SR
1809 sas_device_priv_data = sdev->hostdata;
1810 if (!sas_device_priv_data)
1811 goto not_sata;
1812 sas_target_priv_data = sas_device_priv_data->sas_target;
1813 if (!sas_target_priv_data)
1814 goto not_sata;
1815 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1816 goto not_sata;
d1cb5e49 1817
f92363d1 1818 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49
SR
1819 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1820 if (sas_device) {
1821 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1822 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1823
1824 sas_device_put(sas_device);
1825 }
f92363d1
SR
1826 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1827
1828 not_sata:
1829
1830 if (!sdev->tagged_supported)
1831 max_depth = 1;
1832 if (qdepth > max_depth)
1833 qdepth = max_depth;
5b061980
SR
1834 scsi_change_queue_depth(sdev, qdepth);
1835 sdev_printk(KERN_INFO, sdev,
1836 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1837 sdev->queue_depth, sdev->tagged_supported,
1838 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1839 return sdev->queue_depth;
f92363d1
SR
1840}
1841
8dc8d29a
SR
1842/**
1843 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1844 * @sdev: scsi device struct
1845 * @qdepth: requested queue depth
1846 *
1847 * Returns nothing.
1848 */
1849void
1850mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1851{
1852 struct Scsi_Host *shost = sdev->host;
1853 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1854
1855 if (ioc->enable_sdev_max_qd)
1856 qdepth = shost->can_queue;
1857
1858 scsih_change_queue_depth(sdev, qdepth);
1859}
1860
f92363d1 1861/**
8a7e4c24 1862 * scsih_target_alloc - target add routine
f92363d1
SR
1863 * @starget: scsi target struct
1864 *
4beb4867 1865 * Return: 0 if ok. Any other return is assumed to be an error and
f92363d1
SR
1866 * the device is ignored.
1867 */
8bbb1cf6 1868static int
8a7e4c24 1869scsih_target_alloc(struct scsi_target *starget)
f92363d1
SR
1870{
1871 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1872 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1873 struct MPT3SAS_TARGET *sas_target_priv_data;
1874 struct _sas_device *sas_device;
1875 struct _raid_device *raid_device;
d88e1eab 1876 struct _pcie_device *pcie_device;
f92363d1
SR
1877 unsigned long flags;
1878 struct sas_rphy *rphy;
1879
62c4da44
JL
1880 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1881 GFP_KERNEL);
f92363d1
SR
1882 if (!sas_target_priv_data)
1883 return -ENOMEM;
1884
1885 starget->hostdata = sas_target_priv_data;
1886 sas_target_priv_data->starget = starget;
1887 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1888
1889 /* RAID volumes */
1890 if (starget->channel == RAID_CHANNEL) {
1891 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1892 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1893 starget->channel);
1894 if (raid_device) {
1895 sas_target_priv_data->handle = raid_device->handle;
1896 sas_target_priv_data->sas_address = raid_device->wwid;
1897 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
7786ab6a 1898 if (ioc->is_warpdrive)
6d3a56ed
SR
1899 sas_target_priv_data->raid_device = raid_device;
1900 raid_device->starget = starget;
f92363d1
SR
1901 }
1902 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1903 return 0;
1904 }
1905
d88e1eab
SPS
1906 /* PCIe devices */
1907 if (starget->channel == PCIE_CHANNEL) {
1908 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1909 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1910 starget->channel);
1911 if (pcie_device) {
1912 sas_target_priv_data->handle = pcie_device->handle;
1913 sas_target_priv_data->sas_address = pcie_device->wwid;
e2f0cdf7 1914 sas_target_priv_data->port = NULL;
d88e1eab
SPS
1915 sas_target_priv_data->pcie_dev = pcie_device;
1916 pcie_device->starget = starget;
1917 pcie_device->id = starget->id;
1918 pcie_device->channel = starget->channel;
1919 sas_target_priv_data->flags |=
1920 MPT_TARGET_FLAGS_PCIE_DEVICE;
1921 if (pcie_device->fast_path)
1922 sas_target_priv_data->flags |=
1923 MPT_TARGET_FASTPATH_IO;
1924 }
1925 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1926 return 0;
1927 }
1928
f92363d1
SR
1929 /* sas/sata devices */
1930 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1931 rphy = dev_to_rphy(starget->dev.parent);
6df6be91 1932 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
f92363d1
SR
1933
1934 if (sas_device) {
1935 sas_target_priv_data->handle = sas_device->handle;
1936 sas_target_priv_data->sas_address = sas_device->sas_address;
e2f0cdf7 1937 sas_target_priv_data->port = sas_device->port;
d88e1eab 1938 sas_target_priv_data->sas_dev = sas_device;
f92363d1
SR
1939 sas_device->starget = starget;
1940 sas_device->id = starget->id;
1941 sas_device->channel = starget->channel;
1942 if (test_bit(sas_device->handle, ioc->pd_handles))
1943 sas_target_priv_data->flags |=
1944 MPT_TARGET_FLAGS_RAID_COMPONENT;
1945 if (sas_device->fast_path)
d88e1eab
SPS
1946 sas_target_priv_data->flags |=
1947 MPT_TARGET_FASTPATH_IO;
f92363d1
SR
1948 }
1949 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1950
1951 return 0;
1952}
1953
1954/**
8a7e4c24 1955 * scsih_target_destroy - target destroy routine
f92363d1 1956 * @starget: scsi target struct
f92363d1 1957 */
8bbb1cf6 1958static void
8a7e4c24 1959scsih_target_destroy(struct scsi_target *starget)
f92363d1
SR
1960{
1961 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1962 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1963 struct MPT3SAS_TARGET *sas_target_priv_data;
1964 struct _sas_device *sas_device;
1965 struct _raid_device *raid_device;
d88e1eab 1966 struct _pcie_device *pcie_device;
f92363d1 1967 unsigned long flags;
f92363d1
SR
1968
1969 sas_target_priv_data = starget->hostdata;
1970 if (!sas_target_priv_data)
1971 return;
1972
1973 if (starget->channel == RAID_CHANNEL) {
1974 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1975 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1976 starget->channel);
1977 if (raid_device) {
1978 raid_device->starget = NULL;
1979 raid_device->sdev = NULL;
1980 }
1981 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1982 goto out;
1983 }
1984
d88e1eab
SPS
1985 if (starget->channel == PCIE_CHANNEL) {
1986 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1987 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1988 sas_target_priv_data);
1989 if (pcie_device && (pcie_device->starget == starget) &&
1990 (pcie_device->id == starget->id) &&
1991 (pcie_device->channel == starget->channel))
1992 pcie_device->starget = NULL;
1993
1994 if (pcie_device) {
1995 /*
1996 * Corresponding get() is in _scsih_target_alloc()
1997 */
1998 sas_target_priv_data->pcie_dev = NULL;
1999 pcie_device_put(pcie_device);
2000 pcie_device_put(pcie_device);
2001 }
2002 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2003 goto out;
2004 }
2005
f92363d1 2006 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49 2007 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
f92363d1
SR
2008 if (sas_device && (sas_device->starget == starget) &&
2009 (sas_device->id == starget->id) &&
2010 (sas_device->channel == starget->channel))
2011 sas_device->starget = NULL;
2012
d1cb5e49
SR
2013 if (sas_device) {
2014 /*
2015 * Corresponding get() is in _scsih_target_alloc()
2016 */
d88e1eab 2017 sas_target_priv_data->sas_dev = NULL;
d1cb5e49
SR
2018 sas_device_put(sas_device);
2019
2020 sas_device_put(sas_device);
2021 }
f92363d1
SR
2022 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2023
2024 out:
2025 kfree(sas_target_priv_data);
2026 starget->hostdata = NULL;
2027}
2028
2029/**
8a7e4c24 2030 * scsih_slave_alloc - device add routine
f92363d1
SR
2031 * @sdev: scsi device struct
2032 *
4beb4867 2033 * Return: 0 if ok. Any other return is assumed to be an error and
f92363d1
SR
2034 * the device is ignored.
2035 */
8bbb1cf6 2036static int
8a7e4c24 2037scsih_slave_alloc(struct scsi_device *sdev)
f92363d1
SR
2038{
2039 struct Scsi_Host *shost;
2040 struct MPT3SAS_ADAPTER *ioc;
2041 struct MPT3SAS_TARGET *sas_target_priv_data;
2042 struct MPT3SAS_DEVICE *sas_device_priv_data;
2043 struct scsi_target *starget;
2044 struct _raid_device *raid_device;
b65cfedf 2045 struct _sas_device *sas_device;
d88e1eab 2046 struct _pcie_device *pcie_device;
f92363d1
SR
2047 unsigned long flags;
2048
62c4da44
JL
2049 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2050 GFP_KERNEL);
f92363d1
SR
2051 if (!sas_device_priv_data)
2052 return -ENOMEM;
2053
2054 sas_device_priv_data->lun = sdev->lun;
2055 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2056
2057 starget = scsi_target(sdev);
2058 sas_target_priv_data = starget->hostdata;
2059 sas_target_priv_data->num_luns++;
2060 sas_device_priv_data->sas_target = sas_target_priv_data;
2061 sdev->hostdata = sas_device_priv_data;
2062 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2063 sdev->no_uld_attach = 1;
2064
2065 shost = dev_to_shost(&starget->dev);
2066 ioc = shost_priv(shost);
2067 if (starget->channel == RAID_CHANNEL) {
2068 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2069 raid_device = _scsih_raid_device_find_by_id(ioc,
2070 starget->id, starget->channel);
2071 if (raid_device)
2072 raid_device->sdev = sdev; /* raid is single lun */
2073 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2074 }
d88e1eab
SPS
2075 if (starget->channel == PCIE_CHANNEL) {
2076 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2077 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2078 sas_target_priv_data->sas_address);
2079 if (pcie_device && (pcie_device->starget == NULL)) {
2080 sdev_printk(KERN_INFO, sdev,
2081 "%s : pcie_device->starget set to starget @ %d\n",
2082 __func__, __LINE__);
2083 pcie_device->starget = starget;
2084 }
2085
2086 if (pcie_device)
2087 pcie_device_put(pcie_device);
2088 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
f92363d1 2089
d88e1eab 2090 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
b65cfedf 2091 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49 2092 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7d310f24
SR
2093 sas_target_priv_data->sas_address,
2094 sas_target_priv_data->port);
b65cfedf
SR
2095 if (sas_device && (sas_device->starget == NULL)) {
2096 sdev_printk(KERN_INFO, sdev,
2097 "%s : sas_device->starget set to starget @ %d\n",
d1cb5e49 2098 __func__, __LINE__);
b65cfedf
SR
2099 sas_device->starget = starget;
2100 }
d1cb5e49
SR
2101
2102 if (sas_device)
2103 sas_device_put(sas_device);
2104
b65cfedf
SR
2105 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2106 }
2107
f92363d1
SR
2108 return 0;
2109}
2110
2111/**
8a7e4c24 2112 * scsih_slave_destroy - device destroy routine
f92363d1 2113 * @sdev: scsi device struct
f92363d1 2114 */
8bbb1cf6 2115static void
8a7e4c24 2116scsih_slave_destroy(struct scsi_device *sdev)
f92363d1
SR
2117{
2118 struct MPT3SAS_TARGET *sas_target_priv_data;
2119 struct scsi_target *starget;
2120 struct Scsi_Host *shost;
2121 struct MPT3SAS_ADAPTER *ioc;
2122 struct _sas_device *sas_device;
d88e1eab 2123 struct _pcie_device *pcie_device;
f92363d1
SR
2124 unsigned long flags;
2125
2126 if (!sdev->hostdata)
2127 return;
2128
2129 starget = scsi_target(sdev);
2130 sas_target_priv_data = starget->hostdata;
2131 sas_target_priv_data->num_luns--;
2132
2133 shost = dev_to_shost(&starget->dev);
2134 ioc = shost_priv(shost);
2135
d88e1eab
SPS
2136 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2137 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2138 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2139 sas_target_priv_data);
2140 if (pcie_device && !sas_target_priv_data->num_luns)
2141 pcie_device->starget = NULL;
2142
2143 if (pcie_device)
2144 pcie_device_put(pcie_device);
2145
2146 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2147
2148 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
f92363d1 2149 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49
SR
2150 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2151 sas_target_priv_data);
f92363d1
SR
2152 if (sas_device && !sas_target_priv_data->num_luns)
2153 sas_device->starget = NULL;
d1cb5e49
SR
2154
2155 if (sas_device)
2156 sas_device_put(sas_device);
f92363d1
SR
2157 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2158 }
2159
2160 kfree(sdev->hostdata);
2161 sdev->hostdata = NULL;
2162}
2163
2164/**
2165 * _scsih_display_sata_capabilities - sata capabilities
2166 * @ioc: per adapter object
2167 * @handle: device handle
2168 * @sdev: scsi device struct
2169 */
2170static void
2171_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2172 u16 handle, struct scsi_device *sdev)
2173{
2174 Mpi2ConfigReply_t mpi_reply;
2175 Mpi2SasDevicePage0_t sas_device_pg0;
2176 u32 ioc_status;
2177 u16 flags;
2178 u32 device_info;
2179
2180 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2181 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
919d8a3f
JP
2182 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2183 __FILE__, __LINE__, __func__);
f92363d1
SR
2184 return;
2185 }
2186
2187 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2188 MPI2_IOCSTATUS_MASK;
2189 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
2190 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2191 __FILE__, __LINE__, __func__);
f92363d1
SR
2192 return;
2193 }
2194
2195 flags = le16_to_cpu(sas_device_pg0.Flags);
2196 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2197
2198 sdev_printk(KERN_INFO, sdev,
2199 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2200 "sw_preserve(%s)\n",
2201 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2203 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2204 "n",
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2207 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2208}
2209
2210/*
2211 * raid transport support -
2212 * Enabled for SLES11 and newer, in older kernels the driver will panic when
6c7abffc 2213 * unloading the driver followed by a load - I believe that the subroutine
f92363d1
SR
2214 * raid_class_release() is not cleaning up properly.
2215 */
2216
2217/**
8a7e4c24 2218 * scsih_is_raid - return boolean indicating device is raid volume
4beb4867 2219 * @dev: the device struct object
f92363d1 2220 */
8bbb1cf6 2221static int
8a7e4c24 2222scsih_is_raid(struct device *dev)
f92363d1
SR
2223{
2224 struct scsi_device *sdev = to_scsi_device(dev);
7786ab6a 2225 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
f92363d1 2226
7786ab6a
SR
2227 if (ioc->is_warpdrive)
2228 return 0;
f92363d1
SR
2229 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2230}
2231
cd5897ed
SPS
2232static int
2233scsih_is_nvme(struct device *dev)
2234{
2235 struct scsi_device *sdev = to_scsi_device(dev);
2236
2237 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2238}
2239
f92363d1 2240/**
8a7e4c24 2241 * scsih_get_resync - get raid volume resync percent complete
4beb4867 2242 * @dev: the device struct object
f92363d1 2243 */
8bbb1cf6 2244static void
8a7e4c24 2245scsih_get_resync(struct device *dev)
f92363d1
SR
2246{
2247 struct scsi_device *sdev = to_scsi_device(dev);
2248 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2249 static struct _raid_device *raid_device;
2250 unsigned long flags;
2251 Mpi2RaidVolPage0_t vol_pg0;
2252 Mpi2ConfigReply_t mpi_reply;
2253 u32 volume_status_flags;
2254 u8 percent_complete;
2255 u16 handle;
2256
2257 percent_complete = 0;
2258 handle = 0;
7786ab6a
SR
2259 if (ioc->is_warpdrive)
2260 goto out;
2261
f92363d1
SR
2262 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2263 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2264 sdev->channel);
2265 if (raid_device) {
2266 handle = raid_device->handle;
2267 percent_complete = raid_device->percent_complete;
2268 }
2269 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2270
2271 if (!handle)
2272 goto out;
2273
2274 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2275 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2276 sizeof(Mpi2RaidVolPage0_t))) {
919d8a3f
JP
2277 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2278 __FILE__, __LINE__, __func__);
f92363d1
SR
2279 percent_complete = 0;
2280 goto out;
2281 }
2282
2283 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2284 if (!(volume_status_flags &
2285 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2286 percent_complete = 0;
2287
2288 out:
b130b0d5
SS
2289
2290 switch (ioc->hba_mpi_version_belonged) {
2291 case MPI2_VERSION:
c84b06a4 2292 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
b130b0d5
SS
2293 break;
2294 case MPI25_VERSION:
2295 case MPI26_VERSION:
c84b06a4 2296 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
b130b0d5
SS
2297 break;
2298 }
f92363d1
SR
2299}
2300
2301/**
8a7e4c24 2302 * scsih_get_state - get raid volume level
4beb4867 2303 * @dev: the device struct object
f92363d1 2304 */
8bbb1cf6 2305static void
8a7e4c24 2306scsih_get_state(struct device *dev)
f92363d1
SR
2307{
2308 struct scsi_device *sdev = to_scsi_device(dev);
2309 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2310 static struct _raid_device *raid_device;
2311 unsigned long flags;
2312 Mpi2RaidVolPage0_t vol_pg0;
2313 Mpi2ConfigReply_t mpi_reply;
2314 u32 volstate;
2315 enum raid_state state = RAID_STATE_UNKNOWN;
2316 u16 handle = 0;
2317
2318 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2319 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2320 sdev->channel);
2321 if (raid_device)
2322 handle = raid_device->handle;
2323 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2324
2325 if (!raid_device)
2326 goto out;
2327
2328 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2329 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2330 sizeof(Mpi2RaidVolPage0_t))) {
919d8a3f
JP
2331 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2332 __FILE__, __LINE__, __func__);
f92363d1
SR
2333 goto out;
2334 }
2335
2336 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2337 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2338 state = RAID_STATE_RESYNCING;
2339 goto out;
2340 }
2341
2342 switch (vol_pg0.VolumeState) {
2343 case MPI2_RAID_VOL_STATE_OPTIMAL:
2344 case MPI2_RAID_VOL_STATE_ONLINE:
2345 state = RAID_STATE_ACTIVE;
2346 break;
2347 case MPI2_RAID_VOL_STATE_DEGRADED:
2348 state = RAID_STATE_DEGRADED;
2349 break;
2350 case MPI2_RAID_VOL_STATE_FAILED:
2351 case MPI2_RAID_VOL_STATE_MISSING:
2352 state = RAID_STATE_OFFLINE;
2353 break;
2354 }
2355 out:
b130b0d5
SS
2356 switch (ioc->hba_mpi_version_belonged) {
2357 case MPI2_VERSION:
c84b06a4 2358 raid_set_state(mpt2sas_raid_template, dev, state);
b130b0d5
SS
2359 break;
2360 case MPI25_VERSION:
2361 case MPI26_VERSION:
c84b06a4 2362 raid_set_state(mpt3sas_raid_template, dev, state);
b130b0d5
SS
2363 break;
2364 }
f92363d1
SR
2365}
2366
2367/**
2368 * _scsih_set_level - set raid level
4beb4867 2369 * @ioc: ?
f92363d1
SR
2370 * @sdev: scsi device struct
2371 * @volume_type: volume type
2372 */
2373static void
c84b06a4
SR
2374_scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2375 struct scsi_device *sdev, u8 volume_type)
f92363d1
SR
2376{
2377 enum raid_level level = RAID_LEVEL_UNKNOWN;
2378
2379 switch (volume_type) {
2380 case MPI2_RAID_VOL_TYPE_RAID0:
2381 level = RAID_LEVEL_0;
2382 break;
2383 case MPI2_RAID_VOL_TYPE_RAID10:
2384 level = RAID_LEVEL_10;
2385 break;
2386 case MPI2_RAID_VOL_TYPE_RAID1E:
2387 level = RAID_LEVEL_1E;
2388 break;
2389 case MPI2_RAID_VOL_TYPE_RAID1:
2390 level = RAID_LEVEL_1;
2391 break;
2392 }
2393
b130b0d5
SS
2394 switch (ioc->hba_mpi_version_belonged) {
2395 case MPI2_VERSION:
c84b06a4 2396 raid_set_level(mpt2sas_raid_template,
b130b0d5
SS
2397 &sdev->sdev_gendev, level);
2398 break;
2399 case MPI25_VERSION:
2400 case MPI26_VERSION:
c84b06a4 2401 raid_set_level(mpt3sas_raid_template,
b130b0d5
SS
2402 &sdev->sdev_gendev, level);
2403 break;
2404 }
f92363d1
SR
2405}
2406
2407
2408/**
2409 * _scsih_get_volume_capabilities - volume capabilities
2410 * @ioc: per adapter object
4beb4867 2411 * @raid_device: the raid_device object
f92363d1 2412 *
4beb4867 2413 * Return: 0 for success, else 1
f92363d1
SR
2414 */
2415static int
2416_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2417 struct _raid_device *raid_device)
2418{
2419 Mpi2RaidVolPage0_t *vol_pg0;
2420 Mpi2RaidPhysDiskPage0_t pd_pg0;
2421 Mpi2SasDevicePage0_t sas_device_pg0;
2422 Mpi2ConfigReply_t mpi_reply;
2423 u16 sz;
2424 u8 num_pds;
2425
2426 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2427 &num_pds)) || !num_pds) {
919d8a3f
JP
2428 dfailprintk(ioc,
2429 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2430 __FILE__, __LINE__, __func__));
f92363d1
SR
2431 return 1;
2432 }
2433
2434 raid_device->num_pds = num_pds;
2435 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2436 sizeof(Mpi2RaidVol0PhysDisk_t));
2437 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2438 if (!vol_pg0) {
919d8a3f
JP
2439 dfailprintk(ioc,
2440 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2441 __FILE__, __LINE__, __func__));
f92363d1
SR
2442 return 1;
2443 }
2444
2445 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2446 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
919d8a3f
JP
2447 dfailprintk(ioc,
2448 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2449 __FILE__, __LINE__, __func__));
f92363d1
SR
2450 kfree(vol_pg0);
2451 return 1;
2452 }
2453
2454 raid_device->volume_type = vol_pg0->VolumeType;
2455
2456 /* figure out what the underlying devices are by
2457 * obtaining the device_info bits for the 1st device
2458 */
2459 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2460 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2461 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2462 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2463 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2464 le16_to_cpu(pd_pg0.DevHandle)))) {
2465 raid_device->device_info =
2466 le32_to_cpu(sas_device_pg0.DeviceInfo);
2467 }
2468 }
2469
2470 kfree(vol_pg0);
2471 return 0;
2472}
2473
f92363d1
SR
2474/**
2475 * _scsih_enable_tlr - setting TLR flags
2476 * @ioc: per adapter object
2477 * @sdev: scsi device struct
2478 *
2479 * Enabling Transaction Layer Retries for tape devices when
2480 * vpd page 0x90 is present
2481 *
2482 */
2483static void
2484_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2485{
2486
2487 /* only for TAPE */
2488 if (sdev->type != TYPE_TAPE)
2489 return;
2490
2491 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2492 return;
2493
2494 sas_enable_tlr(sdev);
2495 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2496 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2497 return;
2498
2499}
2500
2501/**
8a7e4c24 2502 * scsih_slave_configure - device configure routine.
f92363d1
SR
2503 * @sdev: scsi device struct
2504 *
4beb4867 2505 * Return: 0 if ok. Any other return is assumed to be an error and
f92363d1
SR
2506 * the device is ignored.
2507 */
8bbb1cf6 2508static int
8a7e4c24 2509scsih_slave_configure(struct scsi_device *sdev)
f92363d1
SR
2510{
2511 struct Scsi_Host *shost = sdev->host;
2512 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2513 struct MPT3SAS_DEVICE *sas_device_priv_data;
2514 struct MPT3SAS_TARGET *sas_target_priv_data;
2515 struct _sas_device *sas_device;
d1b01d14 2516 struct _pcie_device *pcie_device;
f92363d1
SR
2517 struct _raid_device *raid_device;
2518 unsigned long flags;
2519 int qdepth;
2520 u8 ssp_target = 0;
2521 char *ds = "";
2522 char *r_level = "";
2523 u16 handle, volume_handle = 0;
2524 u64 volume_wwid = 0;
2525
2526 qdepth = 1;
2527 sas_device_priv_data = sdev->hostdata;
2528 sas_device_priv_data->configured_lun = 1;
2529 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2530 sas_target_priv_data = sas_device_priv_data->sas_target;
2531 handle = sas_target_priv_data->handle;
2532
2533 /* raid volume handling */
2534 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2535
2536 spin_lock_irqsave(&ioc->raid_device_lock, flags);
c84b06a4 2537 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
f92363d1
SR
2538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2539 if (!raid_device) {
919d8a3f
JP
2540 dfailprintk(ioc,
2541 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2542 __FILE__, __LINE__, __func__));
f92363d1
SR
2543 return 1;
2544 }
2545
2546 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
919d8a3f
JP
2547 dfailprintk(ioc,
2548 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2549 __FILE__, __LINE__, __func__));
f92363d1
SR
2550 return 1;
2551 }
2552
7786ab6a
SR
2553 /*
2554 * WARPDRIVE: Initialize the required data for Direct IO
2555 */
c84b06a4 2556 mpt3sas_init_warpdrive_properties(ioc, raid_device);
f92363d1
SR
2557
2558 /* RAID Queue Depth Support
2559 * IS volume = underlying qdepth of drive type, either
2560 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2561 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2562 */
2563 if (raid_device->device_info &
2564 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2565 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2566 ds = "SSP";
2567 } else {
2568 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
199fd79a 2569 if (raid_device->device_info &
f92363d1
SR
2570 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2571 ds = "SATA";
2572 else
2573 ds = "STP";
2574 }
2575
2576 switch (raid_device->volume_type) {
2577 case MPI2_RAID_VOL_TYPE_RAID0:
2578 r_level = "RAID0";
2579 break;
2580 case MPI2_RAID_VOL_TYPE_RAID1E:
2581 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2582 if (ioc->manu_pg10.OEMIdentifier &&
2583 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2584 MFG10_GF0_R10_DISPLAY) &&
2585 !(raid_device->num_pds % 2))
2586 r_level = "RAID10";
2587 else
2588 r_level = "RAID1E";
2589 break;
2590 case MPI2_RAID_VOL_TYPE_RAID1:
2591 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2592 r_level = "RAID1";
2593 break;
2594 case MPI2_RAID_VOL_TYPE_RAID10:
2595 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2596 r_level = "RAID10";
2597 break;
2598 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2599 default:
2600 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2601 r_level = "RAIDX";
2602 break;
2603 }
2604
7786ab6a
SR
2605 if (!ioc->hide_ir_msg)
2606 sdev_printk(KERN_INFO, sdev,
2607 "%s: handle(0x%04x), wwid(0x%016llx),"
2608 " pd_count(%d), type(%s)\n",
2609 r_level, raid_device->handle,
2610 (unsigned long long)raid_device->wwid,
2611 raid_device->num_pds, ds);
f92363d1 2612
6c197093
C
2613 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2614 blk_queue_max_hw_sectors(sdev->request_queue,
2615 MPT3SAS_RAID_MAX_SECTORS);
2616 sdev_printk(KERN_INFO, sdev,
2617 "Set queue's max_sector to: %u\n",
2618 MPT3SAS_RAID_MAX_SECTORS);
2619 }
2620
8dc8d29a 2621 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
f92363d1 2622
7786ab6a
SR
2623 /* raid transport support */
2624 if (!ioc->is_warpdrive)
c84b06a4 2625 _scsih_set_level(ioc, sdev, raid_device->volume_type);
f92363d1
SR
2626 return 0;
2627 }
2628
2629 /* non-raid handling */
2630 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2631 if (mpt3sas_config_get_volume_handle(ioc, handle,
2632 &volume_handle)) {
919d8a3f
JP
2633 dfailprintk(ioc,
2634 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2635 __FILE__, __LINE__, __func__));
f92363d1
SR
2636 return 1;
2637 }
2638 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2639 volume_handle, &volume_wwid)) {
919d8a3f
JP
2640 dfailprintk(ioc,
2641 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2642 __FILE__, __LINE__, __func__));
f92363d1
SR
2643 return 1;
2644 }
2645 }
2646
d1b01d14
SPS
2647 /* PCIe handling */
2648 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2649 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2650 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2651 sas_device_priv_data->sas_target->sas_address);
2652 if (!pcie_device) {
2653 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
919d8a3f
JP
2654 dfailprintk(ioc,
2655 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2656 __FILE__, __LINE__, __func__));
d1b01d14
SPS
2657 return 1;
2658 }
2659
787f2448 2660 qdepth = ioc->max_nvme_qd;
d1b01d14
SPS
2661 ds = "NVMe";
2662 sdev_printk(KERN_INFO, sdev,
2663 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2664 ds, handle, (unsigned long long)pcie_device->wwid,
2665 pcie_device->port_num);
2666 if (pcie_device->enclosure_handle != 0)
2667 sdev_printk(KERN_INFO, sdev,
2668 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2669 ds,
2670 (unsigned long long)pcie_device->enclosure_logical_id,
2671 pcie_device->slot);
2672 if (pcie_device->connector_name[0] != '\0')
2673 sdev_printk(KERN_INFO, sdev,
2674 "%s: enclosure level(0x%04x),"
2675 "connector name( %s)\n", ds,
2676 pcie_device->enclosure_level,
2677 pcie_device->connector_name);
d1b01d14
SPS
2678
2679 if (pcie_device->nvme_mdts)
2680 blk_queue_max_hw_sectors(sdev->request_queue,
2681 pcie_device->nvme_mdts/512);
20a04433
C
2682
2683 pcie_device_put(pcie_device);
2684 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8dc8d29a 2685 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
d1b01d14
SPS
2686 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2687 ** merged and can eliminate holes created during merging
2688 ** operation.
2689 **/
8b904b5b 2690 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
d1b01d14
SPS
2691 sdev->request_queue);
2692 blk_queue_virt_boundary(sdev->request_queue,
2693 ioc->page_size - 1);
2694 return 0;
2695 }
2696
f92363d1 2697 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49 2698 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7d310f24
SR
2699 sas_device_priv_data->sas_target->sas_address,
2700 sas_device_priv_data->sas_target->port);
f92363d1
SR
2701 if (!sas_device) {
2702 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
919d8a3f
JP
2703 dfailprintk(ioc,
2704 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2705 __FILE__, __LINE__, __func__));
f92363d1
SR
2706 return 1;
2707 }
2708
2709 sas_device->volume_handle = volume_handle;
2710 sas_device->volume_wwid = volume_wwid;
2711 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
787f2448
SP
2712 qdepth = (sas_device->port_type > 1) ?
2713 ioc->max_wideport_qd : ioc->max_narrowport_qd;
f92363d1 2714 ssp_target = 1;
30158dc9
SS
2715 if (sas_device->device_info &
2716 MPI2_SAS_DEVICE_INFO_SEP) {
2717 sdev_printk(KERN_WARNING, sdev,
2718 "set ignore_delay_remove for handle(0x%04x)\n",
2719 sas_device_priv_data->sas_target->handle);
2720 sas_device_priv_data->ignore_delay_remove = 1;
2721 ds = "SES";
2722 } else
2723 ds = "SSP";
f92363d1 2724 } else {
787f2448 2725 qdepth = ioc->max_sata_qd;
f92363d1
SR
2726 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2727 ds = "STP";
2728 else if (sas_device->device_info &
2729 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2730 ds = "SATA";
2731 }
2732
2733 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2734 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2735 ds, handle, (unsigned long long)sas_device->sas_address,
2736 sas_device->phy, (unsigned long long)sas_device->device_name);
75888956
SR
2737
2738 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
f92363d1 2739
d1cb5e49 2740 sas_device_put(sas_device);
f92363d1
SR
2741 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2742
2743 if (!ssp_target)
2744 _scsih_display_sata_capabilities(ioc, handle, sdev);
2745
2746
8dc8d29a 2747 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
f92363d1
SR
2748
2749 if (ssp_target) {
2750 sas_read_port_mode_page(sdev);
2751 _scsih_enable_tlr(ioc, sdev);
2752 }
2753
2754 return 0;
2755}
2756
2757/**
8a7e4c24 2758 * scsih_bios_param - fetch head, sector, cylinder info for a disk
f92363d1
SR
2759 * @sdev: scsi device struct
2760 * @bdev: pointer to block device context
2761 * @capacity: device size (in 512 byte sectors)
2762 * @params: three element array to place output:
2763 * params[0] number of heads (max 255)
2764 * params[1] number of sectors (max 63)
2765 * params[2] number of cylinders
f92363d1 2766 */
8bbb1cf6 2767static int
8a7e4c24 2768scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
f92363d1
SR
2769 sector_t capacity, int params[])
2770{
2771 int heads;
2772 int sectors;
2773 sector_t cylinders;
2774 ulong dummy;
2775
2776 heads = 64;
2777 sectors = 32;
2778
2779 dummy = heads * sectors;
2780 cylinders = capacity;
2781 sector_div(cylinders, dummy);
2782
2783 /*
2784 * Handle extended translation size for logical drives
2785 * > 1Gb
2786 */
2787 if ((ulong)capacity >= 0x200000) {
2788 heads = 255;
2789 sectors = 63;
2790 dummy = heads * sectors;
2791 cylinders = capacity;
2792 sector_div(cylinders, dummy);
2793 }
2794
2795 /* return result */
2796 params[0] = heads;
2797 params[1] = sectors;
2798 params[2] = cylinders;
2799
2800 return 0;
2801}
2802
2803/**
2804 * _scsih_response_code - translation of device response code
2805 * @ioc: per adapter object
2806 * @response_code: response code returned by the device
f92363d1
SR
2807 */
2808static void
2809_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2810{
2811 char *desc;
2812
2813 switch (response_code) {
2814 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2815 desc = "task management request completed";
2816 break;
2817 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2818 desc = "invalid frame";
2819 break;
2820 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2821 desc = "task management request not supported";
2822 break;
2823 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2824 desc = "task management request failed";
2825 break;
2826 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2827 desc = "task management request succeeded";
2828 break;
2829 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2830 desc = "invalid lun";
2831 break;
2832 case 0xA:
2833 desc = "overlapped tag attempted";
2834 break;
2835 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2836 desc = "task queued, however not sent to target";
2837 break;
2838 default:
2839 desc = "unknown";
2840 break;
2841 }
919d8a3f 2842 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
f92363d1
SR
2843}
2844
2845/**
2846 * _scsih_tm_done - tm completion routine
2847 * @ioc: per adapter object
2848 * @smid: system request message index
2849 * @msix_index: MSIX table index supplied by the OS
2850 * @reply: reply message frame(lower 32bit addr)
2851 * Context: none.
2852 *
2853 * The callback handler when using scsih_issue_tm.
2854 *
4beb4867
BVA
2855 * Return: 1 meaning mf should be freed from _base_interrupt
2856 * 0 means the mf is freed from this function.
f92363d1
SR
2857 */
2858static u8
2859_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2860{
2861 MPI2DefaultReply_t *mpi_reply;
2862
2863 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2864 return 1;
2865 if (ioc->tm_cmds.smid != smid)
2866 return 1;
f92363d1
SR
2867 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2868 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2869 if (mpi_reply) {
2870 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2871 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2872 }
2873 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2874 complete(&ioc->tm_cmds.done);
2875 return 1;
2876}
2877
2878/**
2879 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2880 * @ioc: per adapter object
2881 * @handle: device handle
2882 *
2883 * During taskmangement request, we need to freeze the device queue.
2884 */
2885void
2886mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2887{
2888 struct MPT3SAS_DEVICE *sas_device_priv_data;
2889 struct scsi_device *sdev;
2890 u8 skip = 0;
2891
2892 shost_for_each_device(sdev, ioc->shost) {
2893 if (skip)
2894 continue;
2895 sas_device_priv_data = sdev->hostdata;
2896 if (!sas_device_priv_data)
2897 continue;
2898 if (sas_device_priv_data->sas_target->handle == handle) {
2899 sas_device_priv_data->sas_target->tm_busy = 1;
2900 skip = 1;
2901 ioc->ignore_loginfos = 1;
2902 }
2903 }
2904}
2905
2906/**
2907 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2908 * @ioc: per adapter object
2909 * @handle: device handle
2910 *
2911 * During taskmangement request, we need to freeze the device queue.
2912 */
2913void
2914mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2915{
2916 struct MPT3SAS_DEVICE *sas_device_priv_data;
2917 struct scsi_device *sdev;
2918 u8 skip = 0;
2919
2920 shost_for_each_device(sdev, ioc->shost) {
2921 if (skip)
2922 continue;
2923 sas_device_priv_data = sdev->hostdata;
2924 if (!sas_device_priv_data)
2925 continue;
2926 if (sas_device_priv_data->sas_target->handle == handle) {
2927 sas_device_priv_data->sas_target->tm_busy = 0;
2928 skip = 1;
2929 ioc->ignore_loginfos = 0;
2930 }
2931 }
2932}
2933
711a923c
SP
2934/**
2935 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
e330c969
LJ
2936 * @ioc: per adapter object
2937 * @channel: the channel assigned by the OS
711a923c
SP
2938 * @id: the id assigned by the OS
2939 * @lun: lun number
2940 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2941 * @smid_task: smid assigned to the task
2942 *
2943 * Look whether TM has aborted the timed out SCSI command, if
2944 * TM has aborted the IO then return SUCCESS else return FAILED.
2945 */
2946static int
2947scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2948 uint id, uint lun, u8 type, u16 smid_task)
2949{
2950
2951 if (smid_task <= ioc->shost->can_queue) {
2952 switch (type) {
2953 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2954 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2955 id, channel)))
2956 return SUCCESS;
2957 break;
2958 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2959 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2960 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2961 lun, channel)))
2962 return SUCCESS;
2963 break;
2964 default:
2965 return SUCCESS;
2966 }
2967 } else if (smid_task == ioc->scsih_cmds.smid) {
2968 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2969 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2970 return SUCCESS;
2971 } else if (smid_task == ioc->ctl_cmds.smid) {
2972 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2973 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2974 return SUCCESS;
2975 }
2976
2977 return FAILED;
2978}
2979
2980/**
2981 * scsih_tm_post_processing - post processing of target & LUN reset
e330c969 2982 * @ioc: per adapter object
711a923c 2983 * @handle: device handle
e330c969 2984 * @channel: the channel assigned by the OS
711a923c
SP
2985 * @id: the id assigned by the OS
2986 * @lun: lun number
2987 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2988 * @smid_task: smid assigned to the task
2989 *
2990 * Post processing of target & LUN reset. Due to interrupt latency
2991 * issue it possible that interrupt for aborted IO might not be
2992 * received yet. So before returning failure status, poll the
2993 * reply descriptor pools for the reply of timed out SCSI command.
2994 * Return FAILED status if reply for timed out is not received
2995 * otherwise return SUCCESS.
2996 */
2997static int
2998scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2999 uint channel, uint id, uint lun, u8 type, u16 smid_task)
3000{
3001 int rc;
3002
3003 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3004 if (rc == SUCCESS)
3005 return rc;
3006
3007 ioc_info(ioc,
3008 "Poll ReplyDescriptor queues for completion of"
3009 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3010 smid_task, type, handle);
3011
3012 /*
3013 * Due to interrupt latency issues, driver may receive interrupt for
3014 * TM first and then for aborted SCSI IO command. So, poll all the
3015 * ReplyDescriptor pools before returning the FAILED status to SML.
3016 */
3017 mpt3sas_base_mask_interrupts(ioc);
3018 mpt3sas_base_sync_reply_irqs(ioc, 1);
3019 mpt3sas_base_unmask_interrupts(ioc);
3020
3021 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3022}
3023
f92363d1
SR
3024/**
3025 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3026 * @ioc: per adapter struct
6da999fe 3027 * @handle: device handle
521e9c0b
SP
3028 * @channel: the channel assigned by the OS
3029 * @id: the id assigned by the OS
f92363d1
SR
3030 * @lun: lun number
3031 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3032 * @smid_task: smid assigned to the task
6da999fe 3033 * @msix_task: MSIX table index supplied by the OS
f92363d1 3034 * @timeout: timeout in seconds
c1a6c5ac 3035 * @tr_method: Target Reset Method
f92363d1
SR
3036 * Context: user
3037 *
3038 * A generic API for sending task management requests to firmware.
3039 *
3040 * The callback index is set inside `ioc->tm_cb_idx`.
6da999fe 3041 * The caller is responsible to check for outstanding commands.
f92363d1 3042 *
4beb4867 3043 * Return: SUCCESS or FAILED.
f92363d1
SR
3044 */
3045int
521e9c0b
SP
3046mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3047 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3048 u8 timeout, u8 tr_method)
f92363d1
SR
3049{
3050 Mpi2SCSITaskManagementRequest_t *mpi_request;
3051 Mpi2SCSITaskManagementReply_t *mpi_reply;
711a923c 3052 Mpi25SCSIIORequest_t *request;
f92363d1
SR
3053 u16 smid = 0;
3054 u32 ioc_state;
f92363d1 3055 int rc;
c6bdb6a1 3056 u8 issue_reset = 0;
f92363d1 3057
96902835
CO
3058 lockdep_assert_held(&ioc->tm_cmds.mutex);
3059
f92363d1 3060 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
4dc74b2e 3061 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
96902835 3062 return FAILED;
f92363d1
SR
3063 }
3064
3065 if (ioc->shost_recovery || ioc->remove_host ||
3066 ioc->pci_error_recovery) {
4dc74b2e 3067 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
96902835 3068 return FAILED;
f92363d1
SR
3069 }
3070
3071 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3072 if (ioc_state & MPI2_DOORBELL_USED) {
919d8a3f 3073 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
98c56ad3 3074 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
96902835 3075 return (!rc) ? SUCCESS : FAILED;
f92363d1
SR
3076 }
3077
3078 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
c5977718 3079 mpt3sas_print_fault_code(ioc, ioc_state &
f92363d1 3080 MPI2_DOORBELL_DATA_MASK);
98c56ad3 3081 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
96902835 3082 return (!rc) ? SUCCESS : FAILED;
fce0aa08
SR
3083 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3084 MPI2_IOC_STATE_COREDUMP) {
c5977718 3085 mpt3sas_print_coredump_info(ioc, ioc_state &
fce0aa08
SR
3086 MPI2_DOORBELL_DATA_MASK);
3087 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3088 return (!rc) ? SUCCESS : FAILED;
f92363d1
SR
3089 }
3090
3091 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3092 if (!smid) {
919d8a3f 3093 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
96902835 3094 return FAILED;
f92363d1
SR
3095 }
3096
919d8a3f
JP
3097 dtmprintk(ioc,
3098 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3099 handle, type, smid_task, timeout, tr_method));
f92363d1
SR
3100 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3101 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3102 ioc->tm_cmds.smid = smid;
3103 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3104 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3105 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3106 mpi_request->DevHandle = cpu_to_le16(handle);
3107 mpi_request->TaskType = type;
711a923c
SP
3108 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3109 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3110 mpi_request->MsgFlags = tr_method;
f92363d1
SR
3111 mpi_request->TaskMID = cpu_to_le16(smid_task);
3112 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3113 mpt3sas_scsih_set_tm_flag(ioc, handle);
3114 init_completion(&ioc->tm_cmds.done);
078a4cc1 3115 ioc->put_smid_hi_priority(ioc, smid, msix_task);
8bbb1cf6 3116 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
f92363d1 3117 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
c6bdb6a1
SR
3118 mpt3sas_check_cmd_timeout(ioc,
3119 ioc->tm_cmds.status, mpi_request,
3120 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3121 if (issue_reset) {
98c56ad3
CO
3122 rc = mpt3sas_base_hard_reset_handler(ioc,
3123 FORCE_BIG_HAMMER);
f92363d1 3124 rc = (!rc) ? SUCCESS : FAILED;
96902835 3125 goto out;
f92363d1
SR
3126 }
3127 }
3128
5f0dfb7a 3129 /* sync IRQs in case those were busy during flush. */
711a923c 3130 mpt3sas_base_sync_reply_irqs(ioc, 0);
5f0dfb7a 3131
f92363d1
SR
3132 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3133 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3134 mpi_reply = ioc->tm_cmds.reply;
919d8a3f
JP
3135 dtmprintk(ioc,
3136 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3137 le16_to_cpu(mpi_reply->IOCStatus),
3138 le32_to_cpu(mpi_reply->IOCLogInfo),
3139 le32_to_cpu(mpi_reply->TerminationCount)));
f92363d1
SR
3140 if (ioc->logging_level & MPT_DEBUG_TM) {
3141 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3142 if (mpi_reply->IOCStatus)
3143 _debug_dump_mf(mpi_request,
3144 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3145 }
3146 }
711a923c
SP
3147
3148 switch (type) {
3149 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3150 rc = SUCCESS;
3151 /*
3152 * If DevHandle filed in smid_task's entry of request pool
3153 * doesn't match with device handle on which this task abort
3154 * TM is received then it means that TM has successfully
3155 * aborted the timed out command. Since smid_task's entry in
3156 * request pool will be memset to zero once the timed out
3157 * command is returned to the SML. If the command is not
3158 * aborted then smid_task’s entry won’t be cleared and it
3159 * will have same DevHandle value on which this task abort TM
3160 * is received and driver will return the TM status as FAILED.
3161 */
3162 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3163 if (le16_to_cpu(request->DevHandle) != handle)
3164 break;
3165
3166 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3167 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3168 handle, timeout, tr_method, smid_task, msix_task);
3169 rc = FAILED;
3170 break;
3171
3172 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3173 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3174 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3175 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3176 type, smid_task);
3177 break;
3178 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3179 rc = SUCCESS;
3180 break;
3181 default:
3182 rc = FAILED;
3183 break;
3184 }
f92363d1 3185
96902835 3186out:
f92363d1
SR
3187 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3188 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
f92363d1 3189 return rc;
96902835 3190}
f92363d1 3191
96902835 3192int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
521e9c0b
SP
3193 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3194 u16 msix_task, u8 timeout, u8 tr_method)
96902835
CO
3195{
3196 int ret;
3197
3198 mutex_lock(&ioc->tm_cmds.mutex);
521e9c0b
SP
3199 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3200 smid_task, msix_task, timeout, tr_method);
96902835
CO
3201 mutex_unlock(&ioc->tm_cmds.mutex);
3202
3203 return ret;
f92363d1
SR
3204}
3205
3206/**
3207 * _scsih_tm_display_info - displays info about the device
3208 * @ioc: per adapter struct
3209 * @scmd: pointer to scsi command object
3210 *
3211 * Called by task management callback handlers.
3212 */
3213static void
3214_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3215{
3216 struct scsi_target *starget = scmd->device->sdev_target;
3217 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3218 struct _sas_device *sas_device = NULL;
6ce2f1d1 3219 struct _pcie_device *pcie_device = NULL;
f92363d1
SR
3220 unsigned long flags;
3221 char *device_str = NULL;
3222
3223 if (!priv_target)
3224 return;
7786ab6a
SR
3225 if (ioc->hide_ir_msg)
3226 device_str = "WarpDrive";
3227 else
3228 device_str = "volume";
f92363d1
SR
3229
3230 scsi_print_command(scmd);
3231 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3232 starget_printk(KERN_INFO, starget,
3233 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3234 device_str, priv_target->handle,
3235 device_str, (unsigned long long)priv_target->sas_address);
6ce2f1d1
SPS
3236
3237 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3238 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3239 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3240 if (pcie_device) {
3241 starget_printk(KERN_INFO, starget,
3242 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3243 pcie_device->handle,
3244 (unsigned long long)pcie_device->wwid,
3245 pcie_device->port_num);
3246 if (pcie_device->enclosure_handle != 0)
3247 starget_printk(KERN_INFO, starget,
3248 "enclosure logical id(0x%016llx), slot(%d)\n",
3249 (unsigned long long)
3250 pcie_device->enclosure_logical_id,
3251 pcie_device->slot);
3252 if (pcie_device->connector_name[0] != '\0')
3253 starget_printk(KERN_INFO, starget,
3254 "enclosure level(0x%04x), connector name( %s)\n",
3255 pcie_device->enclosure_level,
3256 pcie_device->connector_name);
3257 pcie_device_put(pcie_device);
3258 }
3259 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3260
f92363d1
SR
3261 } else {
3262 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49 3263 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
f92363d1
SR
3264 if (sas_device) {
3265 if (priv_target->flags &
3266 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3267 starget_printk(KERN_INFO, starget,
3268 "volume handle(0x%04x), "
3269 "volume wwid(0x%016llx)\n",
3270 sas_device->volume_handle,
3271 (unsigned long long)sas_device->volume_wwid);
3272 }
3273 starget_printk(KERN_INFO, starget,
3274 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3275 sas_device->handle,
3276 (unsigned long long)sas_device->sas_address,
3277 sas_device->phy);
75888956
SR
3278
3279 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3280 NULL, starget);
d1cb5e49
SR
3281
3282 sas_device_put(sas_device);
f92363d1
SR
3283 }
3284 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3285 }
3286}
3287
3288/**
8a7e4c24 3289 * scsih_abort - eh threads main abort routine
f92363d1
SR
3290 * @scmd: pointer to scsi command object
3291 *
4beb4867 3292 * Return: SUCCESS if command aborted else FAILED
f92363d1 3293 */
8bbb1cf6 3294static int
8a7e4c24 3295scsih_abort(struct scsi_cmnd *scmd)
f92363d1
SR
3296{
3297 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3298 struct MPT3SAS_DEVICE *sas_device_priv_data;
dbec4c90 3299 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
f92363d1
SR
3300 u16 handle;
3301 int r;
3302
c1a6c5ac
C
3303 u8 timeout = 30;
3304 struct _pcie_device *pcie_device = NULL;
5b061980
SR
3305 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3306 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3307 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
24b3c922 3308 (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
f92363d1
SR
3309 _scsih_tm_display_info(ioc, scmd);
3310
3311 sas_device_priv_data = scmd->device->hostdata;
9ff549ff
MFO
3312 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3313 ioc->remove_host) {
f92363d1 3314 sdev_printk(KERN_INFO, scmd->device,
5b061980 3315 "device been deleted! scmd(0x%p)\n", scmd);
f92363d1 3316 scmd->result = DID_NO_CONNECT << 16;
b0c30079 3317 scsi_done(scmd);
f92363d1
SR
3318 r = SUCCESS;
3319 goto out;
3320 }
3321
dbec4c90
SPS
3322 /* check for completed command */
3323 if (st == NULL || st->cb_idx == 0xFF) {
5b061980
SR
3324 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3325 "driver, assuming scmd(0x%p) might have completed\n", scmd);
f92363d1
SR
3326 scmd->result = DID_RESET << 16;
3327 r = SUCCESS;
3328 goto out;
3329 }
3330
3331 /* for hidden raid components and volumes this is not supported */
3332 if (sas_device_priv_data->sas_target->flags &
3333 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3334 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3335 scmd->result = DID_RESET << 16;
3336 r = FAILED;
3337 goto out;
3338 }
3339
3340 mpt3sas_halt_firmware(ioc);
3341
3342 handle = sas_device_priv_data->sas_target->handle;
c1a6c5ac 3343 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
5bb309db
SP
3344 if (pcie_device && (!ioc->tm_custom_handling) &&
3345 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
c1a6c5ac 3346 timeout = ioc->nvme_abort_timeout;
521e9c0b
SP
3347 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3348 scmd->device->id, scmd->device->lun,
6da999fe 3349 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
c1a6c5ac 3350 st->smid, st->msix_io, timeout, 0);
6da999fe 3351 /* Command must be cleared after abort */
dbec4c90 3352 if (r == SUCCESS && st->cb_idx != 0xFF)
6da999fe 3353 r = FAILED;
f92363d1 3354 out:
5b061980 3355 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
f92363d1 3356 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
c1a6c5ac
C
3357 if (pcie_device)
3358 pcie_device_put(pcie_device);
f92363d1
SR
3359 return r;
3360}
3361
3362/**
8a7e4c24 3363 * scsih_dev_reset - eh threads main device reset routine
f92363d1
SR
3364 * @scmd: pointer to scsi command object
3365 *
4beb4867 3366 * Return: SUCCESS if command aborted else FAILED
f92363d1 3367 */
8bbb1cf6 3368static int
8a7e4c24 3369scsih_dev_reset(struct scsi_cmnd *scmd)
f92363d1
SR
3370{
3371 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3372 struct MPT3SAS_DEVICE *sas_device_priv_data;
d1cb5e49 3373 struct _sas_device *sas_device = NULL;
c1a6c5ac 3374 struct _pcie_device *pcie_device = NULL;
f92363d1 3375 u16 handle;
c1a6c5ac
C
3376 u8 tr_method = 0;
3377 u8 tr_timeout = 30;
f92363d1
SR
3378 int r;
3379
d1cb5e49
SR
3380 struct scsi_target *starget = scmd->device->sdev_target;
3381 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3382
f92363d1 3383 sdev_printk(KERN_INFO, scmd->device,
5b061980 3384 "attempting device reset! scmd(0x%p)\n", scmd);
f92363d1
SR
3385 _scsih_tm_display_info(ioc, scmd);
3386
3387 sas_device_priv_data = scmd->device->hostdata;
9ff549ff
MFO
3388 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3389 ioc->remove_host) {
f92363d1 3390 sdev_printk(KERN_INFO, scmd->device,
5b061980 3391 "device been deleted! scmd(0x%p)\n", scmd);
f92363d1 3392 scmd->result = DID_NO_CONNECT << 16;
b0c30079 3393 scsi_done(scmd);
f92363d1
SR
3394 r = SUCCESS;
3395 goto out;
3396 }
3397
3398 /* for hidden raid components obtain the volume_handle */
3399 handle = 0;
3400 if (sas_device_priv_data->sas_target->flags &
3401 MPT_TARGET_FLAGS_RAID_COMPONENT) {
d1cb5e49
SR
3402 sas_device = mpt3sas_get_sdev_from_target(ioc,
3403 target_priv_data);
f92363d1
SR
3404 if (sas_device)
3405 handle = sas_device->volume_handle;
f92363d1
SR
3406 } else
3407 handle = sas_device_priv_data->sas_target->handle;
3408
3409 if (!handle) {
3410 scmd->result = DID_RESET << 16;
3411 r = FAILED;
3412 goto out;
3413 }
3414
c1a6c5ac
C
3415 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3416
5bb309db
SP
3417 if (pcie_device && (!ioc->tm_custom_handling) &&
3418 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
c1a6c5ac
C
3419 tr_timeout = pcie_device->reset_timeout;
3420 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3421 } else
3422 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
5bb309db 3423
521e9c0b
SP
3424 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3425 scmd->device->id, scmd->device->lun,
c1a6c5ac
C
3426 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3427 tr_timeout, tr_method);
74fcfa53 3428 /* Check for busy commands after reset */
8278807a 3429 if (r == SUCCESS && scsi_device_busy(scmd->device))
74fcfa53 3430 r = FAILED;
f92363d1 3431 out:
5b061980 3432 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
f92363d1 3433 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
d1cb5e49
SR
3434
3435 if (sas_device)
3436 sas_device_put(sas_device);
c1a6c5ac
C
3437 if (pcie_device)
3438 pcie_device_put(pcie_device);
d1cb5e49 3439
f92363d1
SR
3440 return r;
3441}
3442
3443/**
8a7e4c24 3444 * scsih_target_reset - eh threads main target reset routine
f92363d1
SR
3445 * @scmd: pointer to scsi command object
3446 *
4beb4867 3447 * Return: SUCCESS if command aborted else FAILED
f92363d1 3448 */
8bbb1cf6 3449static int
8a7e4c24 3450scsih_target_reset(struct scsi_cmnd *scmd)
f92363d1
SR
3451{
3452 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3453 struct MPT3SAS_DEVICE *sas_device_priv_data;
d1cb5e49 3454 struct _sas_device *sas_device = NULL;
c1a6c5ac 3455 struct _pcie_device *pcie_device = NULL;
f92363d1 3456 u16 handle;
c1a6c5ac
C
3457 u8 tr_method = 0;
3458 u8 tr_timeout = 30;
f92363d1
SR
3459 int r;
3460 struct scsi_target *starget = scmd->device->sdev_target;
d1cb5e49 3461 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
f92363d1 3462
5b061980
SR
3463 starget_printk(KERN_INFO, starget,
3464 "attempting target reset! scmd(0x%p)\n", scmd);
f92363d1
SR
3465 _scsih_tm_display_info(ioc, scmd);
3466
3467 sas_device_priv_data = scmd->device->hostdata;
9ff549ff
MFO
3468 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3469 ioc->remove_host) {
5b061980
SR
3470 starget_printk(KERN_INFO, starget,
3471 "target been deleted! scmd(0x%p)\n", scmd);
f92363d1 3472 scmd->result = DID_NO_CONNECT << 16;
b0c30079 3473 scsi_done(scmd);
f92363d1
SR
3474 r = SUCCESS;
3475 goto out;
3476 }
3477
3478 /* for hidden raid components obtain the volume_handle */
3479 handle = 0;
3480 if (sas_device_priv_data->sas_target->flags &
3481 MPT_TARGET_FLAGS_RAID_COMPONENT) {
d1cb5e49
SR
3482 sas_device = mpt3sas_get_sdev_from_target(ioc,
3483 target_priv_data);
f92363d1
SR
3484 if (sas_device)
3485 handle = sas_device->volume_handle;
f92363d1
SR
3486 } else
3487 handle = sas_device_priv_data->sas_target->handle;
3488
3489 if (!handle) {
3490 scmd->result = DID_RESET << 16;
3491 r = FAILED;
3492 goto out;
3493 }
3494
c1a6c5ac
C
3495 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3496
5bb309db
SP
3497 if (pcie_device && (!ioc->tm_custom_handling) &&
3498 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
c1a6c5ac
C
3499 tr_timeout = pcie_device->reset_timeout;
3500 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3501 } else
3502 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
521e9c0b
SP
3503 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3504 scmd->device->id, 0,
c1a6c5ac
C
3505 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3506 tr_timeout, tr_method);
74fcfa53
HR
3507 /* Check for busy commands after reset */
3508 if (r == SUCCESS && atomic_read(&starget->target_busy))
3509 r = FAILED;
f92363d1 3510 out:
5b061980 3511 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
f92363d1 3512 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
d1cb5e49
SR
3513
3514 if (sas_device)
3515 sas_device_put(sas_device);
c1a6c5ac
C
3516 if (pcie_device)
3517 pcie_device_put(pcie_device);
f92363d1
SR
3518 return r;
3519}
3520
3521
3522/**
8a7e4c24 3523 * scsih_host_reset - eh threads main host reset routine
f92363d1
SR
3524 * @scmd: pointer to scsi command object
3525 *
4beb4867 3526 * Return: SUCCESS if command aborted else FAILED
f92363d1 3527 */
8bbb1cf6 3528static int
8a7e4c24 3529scsih_host_reset(struct scsi_cmnd *scmd)
f92363d1
SR
3530{
3531 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3532 int r, retval;
3533
5b061980 3534 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
f92363d1
SR
3535 scsi_print_command(scmd);
3536
9ff549ff 3537 if (ioc->is_driver_loading || ioc->remove_host) {
919d8a3f 3538 ioc_info(ioc, "Blocking the host reset\n");
ddb588be
SR
3539 r = FAILED;
3540 goto out;
3541 }
3542
98c56ad3 3543 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1 3544 r = (retval < 0) ? FAILED : SUCCESS;
ddb588be 3545out:
5b061980 3546 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
919d8a3f 3547 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
f92363d1
SR
3548
3549 return r;
3550}
3551
3552/**
3553 * _scsih_fw_event_add - insert and queue up fw_event
3554 * @ioc: per adapter object
3555 * @fw_event: object describing the event
3556 * Context: This function will acquire ioc->fw_event_lock.
3557 *
3558 * This adds the firmware event object into link list, then queues it up to
3559 * be processed from user context.
f92363d1
SR
3560 */
3561static void
3562_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3563{
3564 unsigned long flags;
3565
3566 if (ioc->firmware_event_thread == NULL)
3567 return;
3568
3569 spin_lock_irqsave(&ioc->fw_event_lock, flags);
146b16c8 3570 fw_event_work_get(fw_event);
f92363d1
SR
3571 INIT_LIST_HEAD(&fw_event->list);
3572 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3573 INIT_WORK(&fw_event->work, _firmware_event_work);
146b16c8 3574 fw_event_work_get(fw_event);
f92363d1
SR
3575 queue_work(ioc->firmware_event_thread, &fw_event->work);
3576 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3577}
3578
3579/**
146b16c8 3580 * _scsih_fw_event_del_from_list - delete fw_event from the list
f92363d1
SR
3581 * @ioc: per adapter object
3582 * @fw_event: object describing the event
3583 * Context: This function will acquire ioc->fw_event_lock.
3584 *
146b16c8 3585 * If the fw_event is on the fw_event_list, remove it and do a put.
f92363d1
SR
3586 */
3587static void
146b16c8 3588_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
f92363d1
SR
3589 *fw_event)
3590{
3591 unsigned long flags;
3592
3593 spin_lock_irqsave(&ioc->fw_event_lock, flags);
146b16c8
SR
3594 if (!list_empty(&fw_event->list)) {
3595 list_del_init(&fw_event->list);
3596 fw_event_work_put(fw_event);
3597 }
f92363d1
SR
3598 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3599}
3600
3601
3602 /**
3603 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3604 * @ioc: per adapter object
3605 * @event_data: trigger event data
f92363d1
SR
3606 */
3607void
3608mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3609 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3610{
3611 struct fw_event_work *fw_event;
146b16c8 3612 u16 sz;
f92363d1
SR
3613
3614 if (ioc->is_driver_loading)
3615 return;
146b16c8
SR
3616 sz = sizeof(*event_data);
3617 fw_event = alloc_fw_event_work(sz);
f92363d1
SR
3618 if (!fw_event)
3619 return;
f92363d1
SR
3620 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3621 fw_event->ioc = ioc;
3622 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3623 _scsih_fw_event_add(ioc, fw_event);
146b16c8 3624 fw_event_work_put(fw_event);
f92363d1
SR
3625}
3626
3627/**
3628 * _scsih_error_recovery_delete_devices - remove devices not responding
3629 * @ioc: per adapter object
f92363d1
SR
3630 */
3631static void
3632_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3633{
3634 struct fw_event_work *fw_event;
3635
146b16c8 3636 fw_event = alloc_fw_event_work(0);
f92363d1
SR
3637 if (!fw_event)
3638 return;
3639 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3640 fw_event->ioc = ioc;
3641 _scsih_fw_event_add(ioc, fw_event);
146b16c8 3642 fw_event_work_put(fw_event);
f92363d1
SR
3643}
3644
3645/**
3646 * mpt3sas_port_enable_complete - port enable completed (fake event)
3647 * @ioc: per adapter object
f92363d1
SR
3648 */
3649void
3650mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3651{
3652 struct fw_event_work *fw_event;
3653
146b16c8 3654 fw_event = alloc_fw_event_work(0);
f92363d1
SR
3655 if (!fw_event)
3656 return;
3657 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3658 fw_event->ioc = ioc;
3659 _scsih_fw_event_add(ioc, fw_event);
146b16c8
SR
3660 fw_event_work_put(fw_event);
3661}
3662
3663static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3664{
3665 unsigned long flags;
3666 struct fw_event_work *fw_event = NULL;
3667
3668 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3669 if (!list_empty(&ioc->fw_event_list)) {
3670 fw_event = list_first_entry(&ioc->fw_event_list,
3671 struct fw_event_work, list);
3672 list_del_init(&fw_event->list);
3673 }
3674 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3675
3676 return fw_event;
f92363d1
SR
3677}
3678
3679/**
3680 * _scsih_fw_event_cleanup_queue - cleanup event queue
3681 * @ioc: per adapter object
3682 *
3683 * Walk the firmware event queue, either killing timers, or waiting
3684 * for outstanding events to complete
547c0d1a
AD
3685 *
3686 * Context: task, can sleep
f92363d1
SR
3687 */
3688static void
3689_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3690{
146b16c8 3691 struct fw_event_work *fw_event;
f92363d1 3692
9e73ed2e 3693 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
547c0d1a 3694 !ioc->firmware_event_thread)
f92363d1 3695 return;
a0815c45
SP
3696 /*
3697 * Set current running event as ignore, so that
3698 * current running event will exit quickly.
3699 * As diag reset has occurred it is of no use
3700 * to process remaining stale event data entries.
3701 */
3702 if (ioc->shost_recovery && ioc->current_event)
3703 ioc->current_event->ignore = 1;
f92363d1 3704
9e73ed2e
SP
3705 ioc->fw_events_cleanup = 1;
3706 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3707 (fw_event = ioc->current_event)) {
e2fac6c4
SP
3708
3709 /*
3710 * Don't call cancel_work_sync() for current_event
3711 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3712 * otherwise we may observe deadlock if current
3713 * hard reset issued as part of processing the current_event.
3714 *
3715 * Orginal logic of cleaning the current_event is added
3716 * for handling the back to back host reset issued by the user.
3717 * i.e. during back to back host reset, driver use to process
3718 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3719 * event back to back and this made the drives to unregister
3720 * the devices from SML.
3721 */
3722
3723 if (fw_event == ioc->current_event &&
3724 ioc->current_event->event !=
3725 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3726 ioc->current_event = NULL;
3727 continue;
3728 }
3729
a0815c45
SP
3730 /*
3731 * Driver has to clear ioc->start_scan flag when
3732 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
3733 * otherwise scsi_scan_host() API waits for the
3734 * 5 minute timer to expire. If we exit from
3735 * scsi_scan_host() early then we can issue the
3736 * new port enable request as part of current diag reset.
3737 */
3738 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3739 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3740 ioc->start_scan = 0;
3741 }
3742
146b16c8
SR
3743 /*
3744 * Wait on the fw_event to complete. If this returns 1, then
3745 * the event was never executed, and we need a put for the
b8ac0cc7 3746 * reference the work had on the fw_event.
146b16c8
SR
3747 *
3748 * If it did execute, we wait for it to finish, and the put will
3749 * happen from _firmware_event_work()
3750 */
b8ac0cc7 3751 if (cancel_work_sync(&fw_event->work))
146b16c8
SR
3752 fw_event_work_put(fw_event);
3753
3754 fw_event_work_put(fw_event);
f92363d1 3755 }
9e73ed2e 3756 ioc->fw_events_cleanup = 0;
f92363d1
SR
3757}
3758
df838f92
SR
3759/**
3760 * _scsih_internal_device_block - block the sdev device
3761 * @sdev: per device object
3762 * @sas_device_priv_data : per device driver private data
3763 *
3764 * make sure device is blocked without error, if not
3765 * print an error
3766 */
3767static void
3768_scsih_internal_device_block(struct scsi_device *sdev,
3769 struct MPT3SAS_DEVICE *sas_device_priv_data)
3770{
3771 int r = 0;
3772
3773 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3774 sas_device_priv_data->sas_target->handle);
3775 sas_device_priv_data->block = 1;
3776
551eb598 3777 r = scsi_internal_device_block_nowait(sdev);
df838f92
SR
3778 if (r == -EINVAL)
3779 sdev_printk(KERN_WARNING, sdev,
3780 "device_block failed with return(%d) for handle(0x%04x)\n",
b2fe6be7 3781 r, sas_device_priv_data->sas_target->handle);
df838f92
SR
3782}
3783
3784/**
3785 * _scsih_internal_device_unblock - unblock the sdev device
3786 * @sdev: per device object
3787 * @sas_device_priv_data : per device driver private data
3788 * make sure device is unblocked without error, if not retry
3789 * by blocking and then unblocking
3790 */
3791
3792static void
3793_scsih_internal_device_unblock(struct scsi_device *sdev,
3794 struct MPT3SAS_DEVICE *sas_device_priv_data)
3795{
3796 int r = 0;
3797
3798 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3799 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3800 sas_device_priv_data->block = 0;
43f7571b 3801 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
df838f92
SR
3802 if (r == -EINVAL) {
3803 /* The device has been set to SDEV_RUNNING by SD layer during
3804 * device addition but the request queue is still stopped by
3805 * our earlier block call. We need to perform a block again
3806 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3807
3808 sdev_printk(KERN_WARNING, sdev,
3809 "device_unblock failed with return(%d) for handle(0x%04x) "
3810 "performing a block followed by an unblock\n",
b2fe6be7 3811 r, sas_device_priv_data->sas_target->handle);
df838f92 3812 sas_device_priv_data->block = 1;
551eb598 3813 r = scsi_internal_device_block_nowait(sdev);
df838f92
SR
3814 if (r)
3815 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3816 "failed with return(%d) for handle(0x%04x)\n",
b2fe6be7 3817 r, sas_device_priv_data->sas_target->handle);
df838f92
SR
3818
3819 sas_device_priv_data->block = 0;
43f7571b 3820 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
df838f92
SR
3821 if (r)
3822 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3823 " failed with return(%d) for handle(0x%04x)\n",
b2fe6be7 3824 r, sas_device_priv_data->sas_target->handle);
df838f92
SR
3825 }
3826}
3827
f92363d1
SR
3828/**
3829 * _scsih_ublock_io_all_device - unblock every device
3830 * @ioc: per adapter object
3831 *
3832 * change the device state from block to running
3833 */
3834static void
3835_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3836{
3837 struct MPT3SAS_DEVICE *sas_device_priv_data;
3838 struct scsi_device *sdev;
3839
3840 shost_for_each_device(sdev, ioc->shost) {
3841 sas_device_priv_data = sdev->hostdata;
3842 if (!sas_device_priv_data)
3843 continue;
3844 if (!sas_device_priv_data->block)
3845 continue;
3846
f92363d1
SR
3847 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3848 "device_running, handle(0x%04x)\n",
3849 sas_device_priv_data->sas_target->handle));
df838f92 3850 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
f92363d1
SR
3851 }
3852}
3853
3854
3855/**
3856 * _scsih_ublock_io_device - prepare device to be deleted
3857 * @ioc: per adapter object
4beb4867 3858 * @sas_address: sas address
7d310f24 3859 * @port: hba port entry
f92363d1
SR
3860 *
3861 * unblock then put device in offline state
3862 */
3863static void
7d310f24
SR
3864_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3865 u64 sas_address, struct hba_port *port)
f92363d1
SR
3866{
3867 struct MPT3SAS_DEVICE *sas_device_priv_data;
3868 struct scsi_device *sdev;
3869
3870 shost_for_each_device(sdev, ioc->shost) {
3871 sas_device_priv_data = sdev->hostdata;
0ee4ba13 3872 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
f92363d1
SR
3873 continue;
3874 if (sas_device_priv_data->sas_target->sas_address
3875 != sas_address)
3876 continue;
7d310f24
SR
3877 if (sas_device_priv_data->sas_target->port != port)
3878 continue;
df838f92
SR
3879 if (sas_device_priv_data->block)
3880 _scsih_internal_device_unblock(sdev,
3881 sas_device_priv_data);
f92363d1
SR
3882 }
3883}
3884
3885/**
3886 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3887 * @ioc: per adapter object
f92363d1 3888 *
6c7abffc 3889 * During device pull we need to appropriately set the sdev state.
f92363d1
SR
3890 */
3891static void
3892_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3893{
3894 struct MPT3SAS_DEVICE *sas_device_priv_data;
3895 struct scsi_device *sdev;
3896
3897 shost_for_each_device(sdev, ioc->shost) {
3898 sas_device_priv_data = sdev->hostdata;
3899 if (!sas_device_priv_data)
3900 continue;
3901 if (sas_device_priv_data->block)
3902 continue;
30158dc9
SS
3903 if (sas_device_priv_data->ignore_delay_remove) {
3904 sdev_printk(KERN_INFO, sdev,
3905 "%s skip device_block for SES handle(0x%04x)\n",
3906 __func__, sas_device_priv_data->sas_target->handle);
3907 continue;
3908 }
df838f92 3909 _scsih_internal_device_block(sdev, sas_device_priv_data);
f92363d1
SR
3910 }
3911}
3912
3913/**
3914 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3915 * @ioc: per adapter object
3916 * @handle: device handle
3917 *
6c7abffc 3918 * During device pull we need to appropriately set the sdev state.
f92363d1
SR
3919 */
3920static void
3921_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3922{
3923 struct MPT3SAS_DEVICE *sas_device_priv_data;
3924 struct scsi_device *sdev;
e4bc7f5c
SR
3925 struct _sas_device *sas_device;
3926
d1cb5e49 3927 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
f92363d1
SR
3928
3929 shost_for_each_device(sdev, ioc->shost) {
3930 sas_device_priv_data = sdev->hostdata;
3931 if (!sas_device_priv_data)
3932 continue;
3933 if (sas_device_priv_data->sas_target->handle != handle)
3934 continue;
3935 if (sas_device_priv_data->block)
3936 continue;
4318c734 3937 if (sas_device && sas_device->pend_sas_rphy_add)
e4bc7f5c 3938 continue;
30158dc9
SS
3939 if (sas_device_priv_data->ignore_delay_remove) {
3940 sdev_printk(KERN_INFO, sdev,
3941 "%s skip device_block for SES handle(0x%04x)\n",
3942 __func__, sas_device_priv_data->sas_target->handle);
3943 continue;
3944 }
df838f92 3945 _scsih_internal_device_block(sdev, sas_device_priv_data);
f92363d1 3946 }
d1cb5e49 3947
4318c734
SPS
3948 if (sas_device)
3949 sas_device_put(sas_device);
f92363d1
SR
3950}
3951
3952/**
3953 * _scsih_block_io_to_children_attached_to_ex
3954 * @ioc: per adapter object
3955 * @sas_expander: the sas_device object
3956 *
3957 * This routine set sdev state to SDEV_BLOCK for all devices
3958 * attached to this expander. This function called when expander is
3959 * pulled.
3960 */
3961static void
3962_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3963 struct _sas_node *sas_expander)
3964{
3965 struct _sas_port *mpt3sas_port;
3966 struct _sas_device *sas_device;
3967 struct _sas_node *expander_sibling;
3968 unsigned long flags;
3969
3970 if (!sas_expander)
3971 return;
3972
3973 list_for_each_entry(mpt3sas_port,
3974 &sas_expander->sas_port_list, port_list) {
3975 if (mpt3sas_port->remote_identify.device_type ==
3976 SAS_END_DEVICE) {
3977 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49 3978 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7d310f24
SR
3979 mpt3sas_port->remote_identify.sas_address,
3980 mpt3sas_port->hba_port);
d1cb5e49 3981 if (sas_device) {
f92363d1 3982 set_bit(sas_device->handle,
d1cb5e49
SR
3983 ioc->blocking_handles);
3984 sas_device_put(sas_device);
3985 }
f92363d1
SR
3986 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3987 }
3988 }
3989
3990 list_for_each_entry(mpt3sas_port,
3991 &sas_expander->sas_port_list, port_list) {
3992
3993 if (mpt3sas_port->remote_identify.device_type ==
3994 SAS_EDGE_EXPANDER_DEVICE ||
3995 mpt3sas_port->remote_identify.device_type ==
3996 SAS_FANOUT_EXPANDER_DEVICE) {
3997 expander_sibling =
3998 mpt3sas_scsih_expander_find_by_sas_address(
7d310f24
SR
3999 ioc, mpt3sas_port->remote_identify.sas_address,
4000 mpt3sas_port->hba_port);
f92363d1
SR
4001 _scsih_block_io_to_children_attached_to_ex(ioc,
4002 expander_sibling);
4003 }
4004 }
4005}
4006
4007/**
4008 * _scsih_block_io_to_children_attached_directly
4009 * @ioc: per adapter object
4010 * @event_data: topology change event data
4011 *
4012 * This routine set sdev state to SDEV_BLOCK for all devices
4013 * direct attached during device pull.
4014 */
4015static void
4016_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4017 Mpi2EventDataSasTopologyChangeList_t *event_data)
4018{
4019 int i;
4020 u16 handle;
4021 u16 reason_code;
f92363d1
SR
4022
4023 for (i = 0; i < event_data->NumEntries; i++) {
4024 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4025 if (!handle)
4026 continue;
f92363d1
SR
4027 reason_code = event_data->PHY[i].PhyStatus &
4028 MPI2_EVENT_SAS_TOPO_RC_MASK;
4029 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4030 _scsih_block_io_device(ioc, handle);
4031 }
4032}
4033
4318c734
SPS
4034/**
4035 * _scsih_block_io_to_pcie_children_attached_directly
4036 * @ioc: per adapter object
4037 * @event_data: topology change event data
4038 *
4039 * This routine set sdev state to SDEV_BLOCK for all devices
4040 * direct attached during device pull/reconnect.
4041 */
4042static void
4043_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4044 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4045{
4046 int i;
4047 u16 handle;
4048 u16 reason_code;
4049
4050 for (i = 0; i < event_data->NumEntries; i++) {
4051 handle =
4052 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4053 if (!handle)
4054 continue;
4055 reason_code = event_data->PortEntry[i].PortStatus;
4056 if (reason_code ==
4057 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4058 _scsih_block_io_device(ioc, handle);
4059 }
4060}
f92363d1
SR
4061/**
4062 * _scsih_tm_tr_send - send task management request
4063 * @ioc: per adapter object
4064 * @handle: device handle
4065 * Context: interrupt time.
4066 *
4067 * This code is to initiate the device removal handshake protocol
4068 * with controller firmware. This function will issue target reset
4069 * using high priority request queue. It will send a sas iounit
4070 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4071 *
4072 * This is designed to send muliple task management request at the same
4073 * time to the fifo. If the fifo is full, we will append the request,
4074 * and process it in a future completion.
4075 */
4076static void
4077_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4078{
4079 Mpi2SCSITaskManagementRequest_t *mpi_request;
4080 u16 smid;
d1cb5e49 4081 struct _sas_device *sas_device = NULL;
6ce2f1d1 4082 struct _pcie_device *pcie_device = NULL;
f92363d1
SR
4083 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4084 u64 sas_address = 0;
4085 unsigned long flags;
4086 struct _tr_list *delayed_tr;
4087 u32 ioc_state;
c1a6c5ac 4088 u8 tr_method = 0;
7d310f24 4089 struct hba_port *port = NULL;
f92363d1 4090
6ce2f1d1 4091 if (ioc->pci_error_recovery) {
4dc74b2e
JP
4092 dewtprintk(ioc,
4093 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4094 __func__, handle));
f92363d1
SR
4095 return;
4096 }
4097 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4098 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4dc74b2e
JP
4099 dewtprintk(ioc,
4100 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4101 __func__, handle));
f92363d1
SR
4102 return;
4103 }
4104
4105 /* if PD, then return */
4106 if (test_bit(handle, ioc->pd_handles))
4107 return;
4108
c696f7b8
SPS
4109 clear_bit(handle, ioc->pend_os_device_add);
4110
f92363d1 4111 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49 4112 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
f92363d1
SR
4113 if (sas_device && sas_device->starget &&
4114 sas_device->starget->hostdata) {
4115 sas_target_priv_data = sas_device->starget->hostdata;
4116 sas_target_priv_data->deleted = 1;
4117 sas_address = sas_device->sas_address;
7d310f24 4118 port = sas_device->port;
f92363d1
SR
4119 }
4120 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6ce2f1d1
SPS
4121 if (!sas_device) {
4122 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4123 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4124 if (pcie_device && pcie_device->starget &&
4125 pcie_device->starget->hostdata) {
4126 sas_target_priv_data = pcie_device->starget->hostdata;
4127 sas_target_priv_data->deleted = 1;
4128 sas_address = pcie_device->wwid;
4129 }
4130 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
5bb309db
SP
4131 if (pcie_device && (!ioc->tm_custom_handling) &&
4132 (!(mpt3sas_scsih_is_pcie_scsi_device(
4133 pcie_device->device_info))))
c1a6c5ac
C
4134 tr_method =
4135 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4136 else
4137 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
6ce2f1d1 4138 }
f92363d1 4139 if (sas_target_priv_data) {
919d8a3f
JP
4140 dewtprintk(ioc,
4141 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4142 handle, (u64)sas_address));
6ce2f1d1
SPS
4143 if (sas_device) {
4144 if (sas_device->enclosure_handle != 0)
919d8a3f
JP
4145 dewtprintk(ioc,
4146 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4147 (u64)sas_device->enclosure_logical_id,
4148 sas_device->slot));
6ce2f1d1 4149 if (sas_device->connector_name[0] != '\0')
919d8a3f
JP
4150 dewtprintk(ioc,
4151 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4152 sas_device->enclosure_level,
4153 sas_device->connector_name));
6ce2f1d1
SPS
4154 } else if (pcie_device) {
4155 if (pcie_device->enclosure_handle != 0)
919d8a3f
JP
4156 dewtprintk(ioc,
4157 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4158 (u64)pcie_device->enclosure_logical_id,
4159 pcie_device->slot));
6ce2f1d1 4160 if (pcie_device->connector_name[0] != '\0')
919d8a3f
JP
4161 dewtprintk(ioc,
4162 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4163 pcie_device->enclosure_level,
4164 pcie_device->connector_name));
6ce2f1d1 4165 }
7d310f24 4166 _scsih_ublock_io_device(ioc, sas_address, port);
f92363d1
SR
4167 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4168 }
4169
4170 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4171 if (!smid) {
4172 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4173 if (!delayed_tr)
d1cb5e49 4174 goto out;
f92363d1
SR
4175 INIT_LIST_HEAD(&delayed_tr->list);
4176 delayed_tr->handle = handle;
4177 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
919d8a3f
JP
4178 dewtprintk(ioc,
4179 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4180 handle));
d1cb5e49 4181 goto out;
f92363d1
SR
4182 }
4183
919d8a3f
JP
4184 dewtprintk(ioc,
4185 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4186 handle, smid, ioc->tm_tr_cb_idx));
f92363d1
SR
4187 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4188 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4189 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4190 mpi_request->DevHandle = cpu_to_le16(handle);
4191 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
c1a6c5ac 4192 mpi_request->MsgFlags = tr_method;
c696f7b8 4193 set_bit(handle, ioc->device_remove_in_progress);
078a4cc1 4194 ioc->put_smid_hi_priority(ioc, smid, 0);
f92363d1 4195 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
d1cb5e49
SR
4196
4197out:
4198 if (sas_device)
4199 sas_device_put(sas_device);
6ce2f1d1
SPS
4200 if (pcie_device)
4201 pcie_device_put(pcie_device);
f92363d1
SR
4202}
4203
4204/**
4205 * _scsih_tm_tr_complete -
4206 * @ioc: per adapter object
4207 * @smid: system request message index
4208 * @msix_index: MSIX table index supplied by the OS
4209 * @reply: reply message frame(lower 32bit addr)
4210 * Context: interrupt time.
4211 *
4212 * This is the target reset completion routine.
4213 * This code is part of the code to initiate the device removal
4214 * handshake protocol with controller firmware.
4215 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4216 *
4beb4867
BVA
4217 * Return: 1 meaning mf should be freed from _base_interrupt
4218 * 0 means the mf is freed from this function.
f92363d1
SR
4219 */
4220static u8
4221_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4222 u32 reply)
4223{
4224 u16 handle;
4225 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4226 Mpi2SCSITaskManagementReply_t *mpi_reply =
4227 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4228 Mpi2SasIoUnitControlRequest_t *mpi_request;
4229 u16 smid_sas_ctrl;
4230 u32 ioc_state;
fd0331b3 4231 struct _sc_list *delayed_sc;
f92363d1 4232
79eb96d6 4233 if (ioc->pci_error_recovery) {
919d8a3f
JP
4234 dewtprintk(ioc,
4235 ioc_info(ioc, "%s: host in pci error recovery\n",
4236 __func__));
f92363d1
SR
4237 return 1;
4238 }
4239 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4240 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
919d8a3f
JP
4241 dewtprintk(ioc,
4242 ioc_info(ioc, "%s: host is not operational\n",
4243 __func__));
f92363d1
SR
4244 return 1;
4245 }
4246 if (unlikely(!mpi_reply)) {
919d8a3f
JP
4247 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4248 __FILE__, __LINE__, __func__);
f92363d1
SR
4249 return 1;
4250 }
4251 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4252 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4253 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
919d8a3f
JP
4254 dewtprintk(ioc,
4255 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4256 handle,
4257 le16_to_cpu(mpi_reply->DevHandle), smid));
f92363d1
SR
4258 return 0;
4259 }
4260
4261 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
919d8a3f
JP
4262 dewtprintk(ioc,
4263 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4264 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4265 le32_to_cpu(mpi_reply->IOCLogInfo),
4266 le32_to_cpu(mpi_reply->TerminationCount)));
f92363d1
SR
4267
4268 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4269 if (!smid_sas_ctrl) {
fd0331b3
SS
4270 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4271 if (!delayed_sc)
4272 return _scsih_check_for_pending_tm(ioc, smid);
4273 INIT_LIST_HEAD(&delayed_sc->list);
cf6bf971 4274 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
fd0331b3 4275 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
919d8a3f
JP
4276 dewtprintk(ioc,
4277 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4278 handle));
fd0331b3 4279 return _scsih_check_for_pending_tm(ioc, smid);
f92363d1
SR
4280 }
4281
919d8a3f
JP
4282 dewtprintk(ioc,
4283 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4284 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
f92363d1
SR
4285 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4286 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4287 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4288 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4289 mpi_request->DevHandle = mpi_request_tm->DevHandle;
078a4cc1 4290 ioc->put_smid_default(ioc, smid_sas_ctrl);
f92363d1
SR
4291
4292 return _scsih_check_for_pending_tm(ioc, smid);
4293}
4294
9029a725
SP
4295/** _scsih_allow_scmd_to_device - check whether scmd needs to
4296 * issue to IOC or not.
4297 * @ioc: per adapter object
4298 * @scmd: pointer to scsi command object
4299 *
4300 * Returns true if scmd can be issued to IOC otherwise returns false.
4301 */
4302inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4303 struct scsi_cmnd *scmd)
4304{
4305
4306 if (ioc->pci_error_recovery)
4307 return false;
4308
4309 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4310 if (ioc->remove_host)
4311 return false;
4312
4313 return true;
4314 }
4315
4316 if (ioc->remove_host) {
4317
4318 switch (scmd->cmnd[0]) {
4319 case SYNCHRONIZE_CACHE:
4320 case START_STOP:
4321 return true;
4322 default:
4323 return false;
4324 }
4325 }
4326
4327 return true;
4328}
f92363d1
SR
4329
4330/**
4331 * _scsih_sas_control_complete - completion routine
4332 * @ioc: per adapter object
4333 * @smid: system request message index
4334 * @msix_index: MSIX table index supplied by the OS
4335 * @reply: reply message frame(lower 32bit addr)
4336 * Context: interrupt time.
4337 *
4338 * This is the sas iounit control completion routine.
4339 * This code is part of the code to initiate the device removal
4340 * handshake protocol with controller firmware.
4341 *
4beb4867
BVA
4342 * Return: 1 meaning mf should be freed from _base_interrupt
4343 * 0 means the mf is freed from this function.
f92363d1
SR
4344 */
4345static u8
4346_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4347 u8 msix_index, u32 reply)
4348{
4349 Mpi2SasIoUnitControlReply_t *mpi_reply =
4350 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4351
4352 if (likely(mpi_reply)) {
919d8a3f
JP
4353 dewtprintk(ioc,
4354 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4355 le16_to_cpu(mpi_reply->DevHandle), smid,
4356 le16_to_cpu(mpi_reply->IOCStatus),
4357 le32_to_cpu(mpi_reply->IOCLogInfo)));
c696f7b8
SPS
4358 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4359 MPI2_IOCSTATUS_SUCCESS) {
4360 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4361 ioc->device_remove_in_progress);
4362 }
f92363d1 4363 } else {
919d8a3f
JP
4364 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4365 __FILE__, __LINE__, __func__);
f92363d1 4366 }
fd0331b3 4367 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
f92363d1
SR
4368}
4369
4370/**
4371 * _scsih_tm_tr_volume_send - send target reset request for volumes
4372 * @ioc: per adapter object
4373 * @handle: device handle
4374 * Context: interrupt time.
4375 *
4376 * This is designed to send muliple task management request at the same
4377 * time to the fifo. If the fifo is full, we will append the request,
4378 * and process it in a future completion.
4379 */
4380static void
4381_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4382{
4383 Mpi2SCSITaskManagementRequest_t *mpi_request;
4384 u16 smid;
4385 struct _tr_list *delayed_tr;
4386
79eb96d6 4387 if (ioc->pci_error_recovery) {
4dc74b2e
JP
4388 dewtprintk(ioc,
4389 ioc_info(ioc, "%s: host reset in progress!\n",
4390 __func__));
f92363d1
SR
4391 return;
4392 }
4393
4394 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4395 if (!smid) {
4396 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4397 if (!delayed_tr)
4398 return;
4399 INIT_LIST_HEAD(&delayed_tr->list);
4400 delayed_tr->handle = handle;
4401 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
919d8a3f
JP
4402 dewtprintk(ioc,
4403 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4404 handle));
f92363d1
SR
4405 return;
4406 }
4407
919d8a3f
JP
4408 dewtprintk(ioc,
4409 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4410 handle, smid, ioc->tm_tr_volume_cb_idx));
f92363d1
SR
4411 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4412 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4413 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4414 mpi_request->DevHandle = cpu_to_le16(handle);
4415 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
078a4cc1 4416 ioc->put_smid_hi_priority(ioc, smid, 0);
f92363d1
SR
4417}
4418
4419/**
4420 * _scsih_tm_volume_tr_complete - target reset completion
4421 * @ioc: per adapter object
4422 * @smid: system request message index
4423 * @msix_index: MSIX table index supplied by the OS
4424 * @reply: reply message frame(lower 32bit addr)
4425 * Context: interrupt time.
4426 *
4beb4867
BVA
4427 * Return: 1 meaning mf should be freed from _base_interrupt
4428 * 0 means the mf is freed from this function.
f92363d1
SR
4429 */
4430static u8
4431_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4432 u8 msix_index, u32 reply)
4433{
4434 u16 handle;
4435 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4436 Mpi2SCSITaskManagementReply_t *mpi_reply =
4437 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4438
79eb96d6 4439 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4dc74b2e
JP
4440 dewtprintk(ioc,
4441 ioc_info(ioc, "%s: host reset in progress!\n",
4442 __func__));
f92363d1
SR
4443 return 1;
4444 }
4445 if (unlikely(!mpi_reply)) {
919d8a3f
JP
4446 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4447 __FILE__, __LINE__, __func__);
f92363d1
SR
4448 return 1;
4449 }
4450
4451 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4452 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4453 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
919d8a3f
JP
4454 dewtprintk(ioc,
4455 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4456 handle, le16_to_cpu(mpi_reply->DevHandle),
4457 smid));
f92363d1
SR
4458 return 0;
4459 }
4460
919d8a3f
JP
4461 dewtprintk(ioc,
4462 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4463 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4464 le32_to_cpu(mpi_reply->IOCLogInfo),
4465 le32_to_cpu(mpi_reply->TerminationCount)));
f92363d1
SR
4466
4467 return _scsih_check_for_pending_tm(ioc, smid);
4468}
4469
fd0331b3
SS
4470/**
4471 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4472 * @ioc: per adapter object
4473 * @smid: system request message index
4474 * @event: Event ID
4475 * @event_context: used to track events uniquely
4476 *
4477 * Context - processed in interrupt context.
4478 */
8bbb1cf6 4479static void
cf6bf971
C
4480_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4481 U32 event_context)
fd0331b3
SS
4482{
4483 Mpi2EventAckRequest_t *ack_request;
4484 int i = smid - ioc->internal_smid;
4485 unsigned long flags;
4486
4487 /* Without releasing the smid just update the
4488 * call back index and reuse the same smid for
4489 * processing this delayed request
4490 */
4491 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4492 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4493 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4494
919d8a3f
JP
4495 dewtprintk(ioc,
4496 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4497 le16_to_cpu(event), smid, ioc->base_cb_idx));
fd0331b3
SS
4498 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4499 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4500 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4501 ack_request->Event = event;
4502 ack_request->EventContext = event_context;
4503 ack_request->VF_ID = 0; /* TODO */
4504 ack_request->VP_ID = 0;
078a4cc1 4505 ioc->put_smid_default(ioc, smid);
fd0331b3
SS
4506}
4507
4508/**
4509 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4510 * sas_io_unit_ctrl messages
4511 * @ioc: per adapter object
4512 * @smid: system request message index
4513 * @handle: device handle
4514 *
4515 * Context - processed in interrupt context.
4516 */
8bbb1cf6 4517static void
fd0331b3
SS
4518_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4519 u16 smid, u16 handle)
199fd79a
BVA
4520{
4521 Mpi2SasIoUnitControlRequest_t *mpi_request;
4522 u32 ioc_state;
4523 int i = smid - ioc->internal_smid;
4524 unsigned long flags;
fd0331b3 4525
199fd79a 4526 if (ioc->remove_host) {
4dc74b2e
JP
4527 dewtprintk(ioc,
4528 ioc_info(ioc, "%s: host has been removed\n",
4529 __func__));
199fd79a
BVA
4530 return;
4531 } else if (ioc->pci_error_recovery) {
4dc74b2e
JP
4532 dewtprintk(ioc,
4533 ioc_info(ioc, "%s: host in pci error recovery\n",
4534 __func__));
fd0331b3
SS
4535 return;
4536 }
4537 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4538 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4dc74b2e
JP
4539 dewtprintk(ioc,
4540 ioc_info(ioc, "%s: host is not operational\n",
4541 __func__));
fd0331b3
SS
4542 return;
4543 }
4544
4545 /* Without releasing the smid just update the
4546 * call back index and reuse the same smid for
4547 * processing this delayed request
4548 */
4549 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4550 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4551 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4552
919d8a3f
JP
4553 dewtprintk(ioc,
4554 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4555 handle, smid, ioc->tm_sas_control_cb_idx));
fd0331b3
SS
4556 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4557 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4558 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4559 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
cf6bf971 4560 mpi_request->DevHandle = cpu_to_le16(handle);
078a4cc1 4561 ioc->put_smid_default(ioc, smid);
fd0331b3
SS
4562}
4563
4564/**
a8d548b0 4565 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
fd0331b3
SS
4566 * @ioc: per adapter object
4567 * @smid: system request message index
4568 *
4569 * Context: Executed in interrupt context
4570 *
4571 * This will check delayed internal messages list, and process the
4572 * next request.
4573 *
4beb4867
BVA
4574 * Return: 1 meaning mf should be freed from _base_interrupt
4575 * 0 means the mf is freed from this function.
fd0331b3
SS
4576 */
4577u8
4578mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4579{
4580 struct _sc_list *delayed_sc;
4581 struct _event_ack_list *delayed_event_ack;
4582
4583 if (!list_empty(&ioc->delayed_event_ack_list)) {
4584 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4585 struct _event_ack_list, list);
4586 _scsih_issue_delayed_event_ack(ioc, smid,
4587 delayed_event_ack->Event, delayed_event_ack->EventContext);
4588 list_del(&delayed_event_ack->list);
4589 kfree(delayed_event_ack);
4590 return 0;
4591 }
4592
4593 if (!list_empty(&ioc->delayed_sc_list)) {
4594 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4595 struct _sc_list, list);
4596 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4597 delayed_sc->handle);
4598 list_del(&delayed_sc->list);
4599 kfree(delayed_sc);
4600 return 0;
4601 }
4602 return 1;
4603}
f92363d1
SR
4604
4605/**
4606 * _scsih_check_for_pending_tm - check for pending task management
4607 * @ioc: per adapter object
4608 * @smid: system request message index
4609 *
4610 * This will check delayed target reset list, and feed the
4611 * next reqeust.
4612 *
4beb4867
BVA
4613 * Return: 1 meaning mf should be freed from _base_interrupt
4614 * 0 means the mf is freed from this function.
f92363d1
SR
4615 */
4616static u8
4617_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4618{
4619 struct _tr_list *delayed_tr;
4620
4621 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4622 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4623 struct _tr_list, list);
4624 mpt3sas_base_free_smid(ioc, smid);
4625 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4626 list_del(&delayed_tr->list);
4627 kfree(delayed_tr);
4628 return 0;
4629 }
4630
4631 if (!list_empty(&ioc->delayed_tr_list)) {
4632 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4633 struct _tr_list, list);
4634 mpt3sas_base_free_smid(ioc, smid);
4635 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4636 list_del(&delayed_tr->list);
4637 kfree(delayed_tr);
4638 return 0;
4639 }
4640
4641 return 1;
4642}
4643
4644/**
4645 * _scsih_check_topo_delete_events - sanity check on topo events
4646 * @ioc: per adapter object
4647 * @event_data: the event data payload
4648 *
4649 * This routine added to better handle cable breaker.
4650 *
4651 * This handles the case where driver receives multiple expander
4652 * add and delete events in a single shot. When there is a delete event
4653 * the routine will void any pending add events waiting in the event queue.
f92363d1
SR
4654 */
4655static void
4656_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4657 Mpi2EventDataSasTopologyChangeList_t *event_data)
4658{
4659 struct fw_event_work *fw_event;
4660 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4661 u16 expander_handle;
4662 struct _sas_node *sas_expander;
4663 unsigned long flags;
4664 int i, reason_code;
4665 u16 handle;
4666
4667 for (i = 0 ; i < event_data->NumEntries; i++) {
4668 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4669 if (!handle)
4670 continue;
4671 reason_code = event_data->PHY[i].PhyStatus &
4672 MPI2_EVENT_SAS_TOPO_RC_MASK;
4673 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4674 _scsih_tm_tr_send(ioc, handle);
4675 }
4676
4677 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4678 if (expander_handle < ioc->sas_hba.num_phys) {
4679 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4680 return;
4681 }
4682 if (event_data->ExpStatus ==
4683 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4684 /* put expander attached devices into blocking state */
4685 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4686 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4687 expander_handle);
4688 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4689 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4690 do {
4691 handle = find_first_bit(ioc->blocking_handles,
4692 ioc->facts.MaxDevHandle);
4693 if (handle < ioc->facts.MaxDevHandle)
4694 _scsih_block_io_device(ioc, handle);
4695 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4696 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4697 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4698
4699 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4700 return;
4701
4702 /* mark ignore flag for pending events */
4703 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4704 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4705 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4706 fw_event->ignore)
4707 continue;
35b62362
JL
4708 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4709 fw_event->event_data;
f92363d1
SR
4710 if (local_event_data->ExpStatus ==
4711 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4712 local_event_data->ExpStatus ==
4713 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4714 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4715 expander_handle) {
919d8a3f
JP
4716 dewtprintk(ioc,
4717 ioc_info(ioc, "setting ignoring flag\n"));
f92363d1
SR
4718 fw_event->ignore = 1;
4719 }
4720 }
4721 }
4722 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4723}
4724
4318c734
SPS
4725/**
4726 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4727 * events
4728 * @ioc: per adapter object
4729 * @event_data: the event data payload
4730 *
4731 * This handles the case where driver receives multiple switch
4732 * or device add and delete events in a single shot. When there
4733 * is a delete event the routine will void any pending add
4734 * events waiting in the event queue.
4318c734
SPS
4735 */
4736static void
4737_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4738 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4739{
4740 struct fw_event_work *fw_event;
4741 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4742 unsigned long flags;
4743 int i, reason_code;
4744 u16 handle, switch_handle;
4745
4746 for (i = 0; i < event_data->NumEntries; i++) {
4747 handle =
4748 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4749 if (!handle)
4750 continue;
4751 reason_code = event_data->PortEntry[i].PortStatus;
4752 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4753 _scsih_tm_tr_send(ioc, handle);
4754 }
4755
4756 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4757 if (!switch_handle) {
4758 _scsih_block_io_to_pcie_children_attached_directly(
4759 ioc, event_data);
4760 return;
4761 }
4762 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4763 if ((event_data->SwitchStatus
4764 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4765 (event_data->SwitchStatus ==
4766 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4767 _scsih_block_io_to_pcie_children_attached_directly(
4768 ioc, event_data);
4769
4770 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4771 return;
4772
4773 /* mark ignore flag for pending events */
4774 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4775 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4776 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4777 fw_event->ignore)
4778 continue;
4779 local_event_data =
4780 (Mpi26EventDataPCIeTopologyChangeList_t *)
4781 fw_event->event_data;
4782 if (local_event_data->SwitchStatus ==
4783 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4784 local_event_data->SwitchStatus ==
4785 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4786 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4787 switch_handle) {
919d8a3f
JP
4788 dewtprintk(ioc,
4789 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4318c734
SPS
4790 fw_event->ignore = 1;
4791 }
4792 }
4793 }
4794 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4795}
4796
f92363d1
SR
4797/**
4798 * _scsih_set_volume_delete_flag - setting volume delete flag
4799 * @ioc: per adapter object
4800 * @handle: device handle
4801 *
4802 * This returns nothing.
4803 */
4804static void
4805_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4806{
4807 struct _raid_device *raid_device;
4808 struct MPT3SAS_TARGET *sas_target_priv_data;
4809 unsigned long flags;
4810
4811 spin_lock_irqsave(&ioc->raid_device_lock, flags);
c84b06a4 4812 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
f92363d1
SR
4813 if (raid_device && raid_device->starget &&
4814 raid_device->starget->hostdata) {
4815 sas_target_priv_data =
4816 raid_device->starget->hostdata;
4817 sas_target_priv_data->deleted = 1;
919d8a3f
JP
4818 dewtprintk(ioc,
4819 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4820 handle, (u64)raid_device->wwid));
f92363d1
SR
4821 }
4822 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4823}
4824
4825/**
4826 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4827 * @handle: input handle
4828 * @a: handle for volume a
4829 * @b: handle for volume b
4830 *
4831 * IR firmware only supports two raid volumes. The purpose of this
4832 * routine is to set the volume handle in either a or b. When the given
4833 * input handle is non-zero, or when a and b have not been set before.
4834 */
4835static void
4836_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4837{
4838 if (!handle || handle == *a || handle == *b)
4839 return;
4840 if (!*a)
4841 *a = handle;
4842 else if (!*b)
4843 *b = handle;
4844}
4845
4846/**
4847 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4848 * @ioc: per adapter object
4849 * @event_data: the event data payload
4850 * Context: interrupt time.
4851 *
4852 * This routine will send target reset to volume, followed by target
4853 * resets to the PDs. This is called when a PD has been removed, or
4854 * volume has been deleted or removed. When the target reset is sent
4855 * to volume, the PD target resets need to be queued to start upon
4856 * completion of the volume target reset.
f92363d1
SR
4857 */
4858static void
4859_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4860 Mpi2EventDataIrConfigChangeList_t *event_data)
4861{
4862 Mpi2EventIrConfigElement_t *element;
4863 int i;
4864 u16 handle, volume_handle, a, b;
4865 struct _tr_list *delayed_tr;
4866
4867 a = 0;
4868 b = 0;
4869
7786ab6a
SR
4870 if (ioc->is_warpdrive)
4871 return;
4872
f92363d1
SR
4873 /* Volume Resets for Deleted or Removed */
4874 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4875 for (i = 0; i < event_data->NumElements; i++, element++) {
4876 if (le32_to_cpu(event_data->Flags) &
4877 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4878 continue;
4879 if (element->ReasonCode ==
4880 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4881 element->ReasonCode ==
4882 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4883 volume_handle = le16_to_cpu(element->VolDevHandle);
4884 _scsih_set_volume_delete_flag(ioc, volume_handle);
4885 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4886 }
4887 }
4888
4889 /* Volume Resets for UNHIDE events */
4890 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4891 for (i = 0; i < event_data->NumElements; i++, element++) {
4892 if (le32_to_cpu(event_data->Flags) &
4893 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4894 continue;
4895 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4896 volume_handle = le16_to_cpu(element->VolDevHandle);
4897 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4898 }
4899 }
4900
4901 if (a)
4902 _scsih_tm_tr_volume_send(ioc, a);
4903 if (b)
4904 _scsih_tm_tr_volume_send(ioc, b);
4905
4906 /* PD target resets */
4907 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4908 for (i = 0; i < event_data->NumElements; i++, element++) {
4909 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4910 continue;
4911 handle = le16_to_cpu(element->PhysDiskDevHandle);
4912 volume_handle = le16_to_cpu(element->VolDevHandle);
4913 clear_bit(handle, ioc->pd_handles);
4914 if (!volume_handle)
4915 _scsih_tm_tr_send(ioc, handle);
4916 else if (volume_handle == a || volume_handle == b) {
4917 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4918 BUG_ON(!delayed_tr);
4919 INIT_LIST_HEAD(&delayed_tr->list);
4920 delayed_tr->handle = handle;
4921 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
919d8a3f
JP
4922 dewtprintk(ioc,
4923 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4924 handle));
f92363d1
SR
4925 } else
4926 _scsih_tm_tr_send(ioc, handle);
4927 }
4928}
4929
4930
4931/**
4932 * _scsih_check_volume_delete_events - set delete flag for volumes
4933 * @ioc: per adapter object
4934 * @event_data: the event data payload
4935 * Context: interrupt time.
4936 *
4937 * This will handle the case when the cable connected to entire volume is
4938 * pulled. We will take care of setting the deleted flag so normal IO will
4939 * not be sent.
f92363d1
SR
4940 */
4941static void
4942_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4943 Mpi2EventDataIrVolume_t *event_data)
4944{
4945 u32 state;
4946
4947 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4948 return;
4949 state = le32_to_cpu(event_data->NewValue);
4950 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4951 MPI2_RAID_VOL_STATE_FAILED)
4952 _scsih_set_volume_delete_flag(ioc,
4953 le16_to_cpu(event_data->VolDevHandle));
4954}
4955
2d8ce8c9
SR
4956/**
4957 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4958 * @ioc: per adapter object
4959 * @event_data: the temp threshold event data
4960 * Context: interrupt time.
2d8ce8c9
SR
4961 */
4962static void
4963_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4964 Mpi2EventDataTemperature_t *event_data)
4965{
fce0aa08 4966 u32 doorbell;
2d8ce8c9 4967 if (ioc->temp_sensors_count >= event_data->SensorNum) {
919d8a3f
JP
4968 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4969 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4970 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4971 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4972 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4973 event_data->SensorNum);
4974 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4975 event_data->CurrentTemperature);
fce0aa08
SR
4976 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4977 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4978 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4979 MPI2_IOC_STATE_FAULT) {
c5977718 4980 mpt3sas_print_fault_code(ioc,
fce0aa08
SR
4981 doorbell & MPI2_DOORBELL_DATA_MASK);
4982 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4983 MPI2_IOC_STATE_COREDUMP) {
c5977718 4984 mpt3sas_print_coredump_info(ioc,
fce0aa08
SR
4985 doorbell & MPI2_DOORBELL_DATA_MASK);
4986 }
4987 }
2d8ce8c9
SR
4988 }
4989}
4990
ffb58456 4991static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
7ff723ad 4992{
ffb58456
JB
4993 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4994
4995 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4996 return 0;
4997
4998 if (pending)
4999 return test_and_set_bit(0, &priv->ata_command_pending);
5000
5001 clear_bit(0, &priv->ata_command_pending);
5002 return 0;
7ff723ad
SP
5003}
5004
f92363d1
SR
5005/**
5006 * _scsih_flush_running_cmds - completing outstanding commands.
5007 * @ioc: per adapter object
5008 *
5009 * The flushing out of all pending scmd commands following host reset,
5010 * where all IO is dropped to the floor.
f92363d1
SR
5011 */
5012static void
5013_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5014{
5015 struct scsi_cmnd *scmd;
dbec4c90 5016 struct scsiio_tracker *st;
f92363d1 5017 u16 smid;
dbec4c90 5018 int count = 0;
f92363d1
SR
5019
5020 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
dbec4c90 5021 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
f92363d1
SR
5022 if (!scmd)
5023 continue;
5024 count++;
ffb58456 5025 _scsih_set_satl_pending(scmd, false);
dbec4c90
SPS
5026 st = scsi_cmd_priv(scmd);
5027 mpt3sas_base_clear_st(ioc, st);
f92363d1 5028 scsi_dma_unmap(scmd);
c666d3be 5029 if (ioc->pci_error_recovery || ioc->remove_host)
f92363d1
SR
5030 scmd->result = DID_NO_CONNECT << 16;
5031 else
5032 scmd->result = DID_RESET << 16;
b0c30079 5033 scsi_done(scmd);
f92363d1 5034 }
919d8a3f 5035 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
f92363d1
SR
5036}
5037
5038/**
5039 * _scsih_setup_eedp - setup MPI request for EEDP transfer
5040 * @ioc: per adapter object
5041 * @scmd: pointer to scsi command object
6c7abffc 5042 * @mpi_request: pointer to the SCSI_IO request message frame
f92363d1
SR
5043 *
5044 * Supporting protection 1 and 3.
f92363d1
SR
5045 */
5046static void
5047_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
016d5c35 5048 Mpi25SCSIIORequest_t *mpi_request)
f92363d1
SR
5049{
5050 u16 eedp_flags;
f92363d1
SR
5051 Mpi25SCSIIORequest_t *mpi_request_3v =
5052 (Mpi25SCSIIORequest_t *)mpi_request;
5053
b3e2c72a
MP
5054 switch (scsi_get_prot_op(scmd)) {
5055 case SCSI_PROT_READ_STRIP:
f92363d1 5056 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
b3e2c72a
MP
5057 break;
5058 case SCSI_PROT_WRITE_INSERT:
f92363d1 5059 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
b3e2c72a
MP
5060 break;
5061 default:
f92363d1 5062 return;
b3e2c72a 5063 }
f92363d1 5064
b3e2c72a
MP
5065 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
5066 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
f92363d1 5067
61a9f252
MP
5068 if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
5069 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
5070
5071 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
5072 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
5073
f92363d1 5074 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
b3e2c72a 5075 cpu_to_be32(scsi_prot_ref_tag(scmd));
f92363d1
SR
5076 }
5077
b3e2c72a 5078 mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
186a18e5
SPS
5079
5080 if (ioc->is_gen35_ioc)
5081 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
f92363d1
SR
5082 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5083}
5084
5085/**
5086 * _scsih_eedp_error_handling - return sense code for EEDP errors
5087 * @scmd: pointer to scsi command object
5088 * @ioc_status: ioc status
f92363d1
SR
5089 */
5090static void
5091_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5092{
5093 u8 ascq;
5094
5095 switch (ioc_status) {
5096 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5097 ascq = 0x01;
5098 break;
5099 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5100 ascq = 0x02;
5101 break;
5102 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5103 ascq = 0x03;
5104 break;
5105 default:
5106 ascq = 0x00;
5107 break;
5108 }
f2b1e9c6
HR
5109 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
5110 set_host_byte(scmd, DID_ABORT);
f92363d1
SR
5111}
5112
f92363d1 5113/**
8a7e4c24 5114 * scsih_qcmd - main scsi request entry point
4beb4867 5115 * @shost: SCSI host pointer
f92363d1 5116 * @scmd: pointer to scsi command object
f92363d1
SR
5117 *
5118 * The callback index is set inside `ioc->scsi_io_cb_idx`.
5119 *
4beb4867 5120 * Return: 0 on success. If there's a failure, return either:
f92363d1
SR
5121 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5122 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5123 */
8bbb1cf6 5124static int
8a7e4c24 5125scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
f92363d1 5126{
d8bfbd8d 5127 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
f92363d1
SR
5128 struct MPT3SAS_DEVICE *sas_device_priv_data;
5129 struct MPT3SAS_TARGET *sas_target_priv_data;
7786ab6a 5130 struct _raid_device *raid_device;
24b3c922 5131 struct request *rq = scsi_cmd_to_rq(scmd);
307d9075 5132 int class;
016d5c35
SPS
5133 Mpi25SCSIIORequest_t *mpi_request;
5134 struct _pcie_device *pcie_device = NULL;
f92363d1
SR
5135 u32 mpi_control;
5136 u16 smid;
5137 u16 handle;
5138
f92363d1
SR
5139 if (ioc->logging_level & MPT_DEBUG_SCSI)
5140 scsi_print_command(scmd);
f92363d1 5141
f92363d1
SR
5142 sas_device_priv_data = scmd->device->hostdata;
5143 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5144 scmd->result = DID_NO_CONNECT << 16;
b0c30079 5145 scsi_done(scmd);
f92363d1
SR
5146 return 0;
5147 }
5148
9029a725 5149 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
f92363d1 5150 scmd->result = DID_NO_CONNECT << 16;
b0c30079 5151 scsi_done(scmd);
f92363d1
SR
5152 return 0;
5153 }
5154
5155 sas_target_priv_data = sas_device_priv_data->sas_target;
5156
5157 /* invalid device handle */
5158 handle = sas_target_priv_data->handle;
5159 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5160 scmd->result = DID_NO_CONNECT << 16;
b0c30079 5161 scsi_done(scmd);
f92363d1
SR
5162 return 0;
5163 }
5164
5165
199fd79a
BVA
5166 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5167 /* host recovery or link resets sent via IOCTLs */
f92363d1 5168 return SCSI_MLQUEUE_HOST_BUSY;
199fd79a
BVA
5169 } else if (sas_target_priv_data->deleted) {
5170 /* device has been deleted */
f92363d1 5171 scmd->result = DID_NO_CONNECT << 16;
b0c30079 5172 scsi_done(scmd);
f92363d1 5173 return 0;
f92363d1 5174 } else if (sas_target_priv_data->tm_busy ||
199fd79a
BVA
5175 sas_device_priv_data->block) {
5176 /* device busy with task management */
f92363d1 5177 return SCSI_MLQUEUE_DEVICE_BUSY;
199fd79a 5178 }
f92363d1 5179
f49d4aed
C
5180 /*
5181 * Bug work around for firmware SATL handling. The loop
5182 * is based on atomic operations and ensures consistency
5183 * since we're lockless at this point
5184 */
5185 do {
1edc6770
SP
5186 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5187 return SCSI_MLQUEUE_DEVICE_BUSY;
f49d4aed
C
5188 } while (_scsih_set_satl_pending(scmd, true));
5189
f92363d1
SR
5190 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5191 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5192 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5193 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5194 else
5195 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5196
5197 /* set tags */
609aa22f 5198 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
307d9075
AM
5199 /* NCQ Prio supported, make sure control indicated high priority */
5200 if (sas_device_priv_data->ncq_prio_enable) {
5201 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5202 if (class == IOPRIO_CLASS_RT)
5203 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5204 }
7786ab6a
SR
5205 /* Make sure Device is not raid volume.
5206 * We do not expose raid functionality to upper layer for warpdrive.
5207 */
cd5897ed
SPS
5208 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5209 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5210 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
f92363d1
SR
5211 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5212
5213 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5214 if (!smid) {
919d8a3f 5215 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
f49d4aed 5216 _scsih_set_satl_pending(scmd, false);
f92363d1
SR
5217 goto out;
5218 }
5219 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
016d5c35 5220 memset(mpi_request, 0, ioc->request_sz);
f92363d1
SR
5221 _scsih_setup_eedp(ioc, scmd, mpi_request);
5222
5223 if (scmd->cmd_len == 32)
5224 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5225 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5226 if (sas_device_priv_data->sas_target->flags &
5227 MPT_TARGET_FLAGS_RAID_COMPONENT)
5228 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5229 else
5230 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5231 mpi_request->DevHandle = cpu_to_le16(handle);
5232 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5233 mpi_request->Control = cpu_to_le32(mpi_control);
5234 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5235 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5236 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5237 mpi_request->SenseBufferLowAddress =
5238 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
016d5c35 5239 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
f92363d1
SR
5240 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5241 mpi_request->LUN);
5242 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5243
5244 if (mpi_request->DataLength) {
016d5c35
SPS
5245 pcie_device = sas_target_priv_data->pcie_dev;
5246 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
f92363d1 5247 mpt3sas_base_free_smid(ioc, smid);
f49d4aed 5248 _scsih_set_satl_pending(scmd, false);
f92363d1
SR
5249 goto out;
5250 }
5251 } else
5252 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5253
7786ab6a
SR
5254 raid_device = sas_target_priv_data->raid_device;
5255 if (raid_device && raid_device->direct_io_enabled)
cd5897ed 5256 mpt3sas_setup_direct_io(ioc, scmd,
dbec4c90 5257 raid_device, mpi_request);
7786ab6a 5258
f92363d1
SR
5259 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5260 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5261 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5262 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
078a4cc1 5263 ioc->put_smid_fast_path(ioc, smid, handle);
f92363d1 5264 } else
81c16f83 5265 ioc->put_smid_scsi_io(ioc, smid,
7786ab6a 5266 le16_to_cpu(mpi_request->DevHandle));
f92363d1 5267 } else
078a4cc1 5268 ioc->put_smid_default(ioc, smid);
f92363d1
SR
5269 return 0;
5270
5271 out:
5272 return SCSI_MLQUEUE_HOST_BUSY;
5273}
f92363d1
SR
5274
5275/**
5276 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5277 * @sense_buffer: sense data returned by target
5278 * @data: normalized skey/asc/ascq
f92363d1
SR
5279 */
5280static void
5281_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5282{
5283 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5284 /* descriptor format */
5285 data->skey = sense_buffer[1] & 0x0F;
5286 data->asc = sense_buffer[2];
5287 data->ascq = sense_buffer[3];
5288 } else {
5289 /* fixed format */
5290 data->skey = sense_buffer[2] & 0x0F;
5291 data->asc = sense_buffer[12];
5292 data->ascq = sense_buffer[13];
5293 }
5294}
5295
f92363d1
SR
5296/**
5297 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
5298 * @ioc: per adapter object
5299 * @scmd: pointer to scsi command object
5300 * @mpi_reply: reply mf payload returned from firmware
4beb4867 5301 * @smid: ?
f92363d1
SR
5302 *
5303 * scsi_status - SCSI Status code returned from target device
5304 * scsi_state - state info associated with SCSI_IO determined by ioc
5305 * ioc_status - ioc supplied status info
f92363d1
SR
5306 */
5307static void
5308_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5309 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5310{
5311 u32 response_info;
5312 u8 *response_bytes;
5313 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5314 MPI2_IOCSTATUS_MASK;
5315 u8 scsi_state = mpi_reply->SCSIState;
5316 u8 scsi_status = mpi_reply->SCSIStatus;
5317 char *desc_ioc_state = NULL;
5318 char *desc_scsi_status = NULL;
5319 char *desc_scsi_state = ioc->tmp_string;
5320 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5321 struct _sas_device *sas_device = NULL;
ec051e5a 5322 struct _pcie_device *pcie_device = NULL;
f92363d1
SR
5323 struct scsi_target *starget = scmd->device->sdev_target;
5324 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5325 char *device_str = NULL;
5326
5327 if (!priv_target)
5328 return;
7786ab6a
SR
5329 if (ioc->hide_ir_msg)
5330 device_str = "WarpDrive";
5331 else
5332 device_str = "volume";
f92363d1
SR
5333
5334 if (log_info == 0x31170000)
5335 return;
5336
5337 switch (ioc_status) {
5338 case MPI2_IOCSTATUS_SUCCESS:
5339 desc_ioc_state = "success";
5340 break;
5341 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5342 desc_ioc_state = "invalid function";
5343 break;
5344 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5345 desc_ioc_state = "scsi recovered error";
5346 break;
5347 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5348 desc_ioc_state = "scsi invalid dev handle";
5349 break;
5350 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5351 desc_ioc_state = "scsi device not there";
5352 break;
5353 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5354 desc_ioc_state = "scsi data overrun";
5355 break;
5356 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5357 desc_ioc_state = "scsi data underrun";
5358 break;
5359 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5360 desc_ioc_state = "scsi io data error";
5361 break;
5362 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5363 desc_ioc_state = "scsi protocol error";
5364 break;
5365 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5366 desc_ioc_state = "scsi task terminated";
5367 break;
5368 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5369 desc_ioc_state = "scsi residual mismatch";
5370 break;
5371 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5372 desc_ioc_state = "scsi task mgmt failed";
5373 break;
5374 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5375 desc_ioc_state = "scsi ioc terminated";
5376 break;
5377 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5378 desc_ioc_state = "scsi ext terminated";
5379 break;
5380 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5381 desc_ioc_state = "eedp guard error";
5382 break;
5383 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5384 desc_ioc_state = "eedp ref tag error";
5385 break;
5386 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5387 desc_ioc_state = "eedp app tag error";
5388 break;
b130b0d5
SS
5389 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5390 desc_ioc_state = "insufficient power";
5391 break;
f92363d1
SR
5392 default:
5393 desc_ioc_state = "unknown";
5394 break;
5395 }
5396
5397 switch (scsi_status) {
5398 case MPI2_SCSI_STATUS_GOOD:
5399 desc_scsi_status = "good";
5400 break;
5401 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5402 desc_scsi_status = "check condition";
5403 break;
5404 case MPI2_SCSI_STATUS_CONDITION_MET:
5405 desc_scsi_status = "condition met";
5406 break;
5407 case MPI2_SCSI_STATUS_BUSY:
5408 desc_scsi_status = "busy";
5409 break;
5410 case MPI2_SCSI_STATUS_INTERMEDIATE:
5411 desc_scsi_status = "intermediate";
5412 break;
5413 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5414 desc_scsi_status = "intermediate condmet";
5415 break;
5416 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5417 desc_scsi_status = "reservation conflict";
5418 break;
5419 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5420 desc_scsi_status = "command terminated";
5421 break;
5422 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5423 desc_scsi_status = "task set full";
5424 break;
5425 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5426 desc_scsi_status = "aca active";
5427 break;
5428 case MPI2_SCSI_STATUS_TASK_ABORTED:
5429 desc_scsi_status = "task aborted";
5430 break;
5431 default:
5432 desc_scsi_status = "unknown";
5433 break;
5434 }
5435
5436 desc_scsi_state[0] = '\0';
5437 if (!scsi_state)
5438 desc_scsi_state = " ";
5439 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5440 strcat(desc_scsi_state, "response info ");
5441 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5442 strcat(desc_scsi_state, "state terminated ");
5443 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5444 strcat(desc_scsi_state, "no status ");
5445 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5446 strcat(desc_scsi_state, "autosense failed ");
5447 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5448 strcat(desc_scsi_state, "autosense valid ");
5449
5450 scsi_print_command(scmd);
5451
5452 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
919d8a3f
JP
5453 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5454 device_str, (u64)priv_target->sas_address);
ec051e5a
SPS
5455 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5456 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5457 if (pcie_device) {
919d8a3f
JP
5458 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5459 (u64)pcie_device->wwid, pcie_device->port_num);
ec051e5a 5460 if (pcie_device->enclosure_handle != 0)
919d8a3f
JP
5461 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5462 (u64)pcie_device->enclosure_logical_id,
5463 pcie_device->slot);
ec051e5a 5464 if (pcie_device->connector_name[0])
919d8a3f
JP
5465 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5466 pcie_device->enclosure_level,
5467 pcie_device->connector_name);
ec051e5a
SPS
5468 pcie_device_put(pcie_device);
5469 }
f92363d1 5470 } else {
d1cb5e49 5471 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
f92363d1 5472 if (sas_device) {
919d8a3f
JP
5473 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5474 (u64)sas_device->sas_address, sas_device->phy);
75888956
SR
5475
5476 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5477 NULL, NULL);
d1cb5e49
SR
5478
5479 sas_device_put(sas_device);
f92363d1 5480 }
f92363d1
SR
5481 }
5482
919d8a3f
JP
5483 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5484 le16_to_cpu(mpi_reply->DevHandle),
5485 desc_ioc_state, ioc_status, smid);
5486 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5487 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5488 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5489 le16_to_cpu(mpi_reply->TaskTag),
5490 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5491 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5492 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
f92363d1
SR
5493
5494 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5495 struct sense_info data;
5496 _scsih_normalize_sense(scmd->sense_buffer, &data);
919d8a3f
JP
5497 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5498 data.skey, data.asc, data.ascq,
5499 le32_to_cpu(mpi_reply->SenseCount));
f92363d1 5500 }
f92363d1
SR
5501 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5502 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5503 response_bytes = (u8 *)&response_info;
5504 _scsih_response_code(ioc, response_bytes[0]);
5505 }
5506}
f92363d1
SR
5507
5508/**
0f624c39 5509 * _scsih_turn_on_pfa_led - illuminate PFA LED
f92363d1
SR
5510 * @ioc: per adapter object
5511 * @handle: device handle
5512 * Context: process
f92363d1
SR
5513 */
5514static void
0f624c39 5515_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
f92363d1
SR
5516{
5517 Mpi2SepReply_t mpi_reply;
5518 Mpi2SepRequest_t mpi_request;
0f624c39
SR
5519 struct _sas_device *sas_device;
5520
d1cb5e49 5521 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
0f624c39
SR
5522 if (!sas_device)
5523 return;
f92363d1
SR
5524
5525 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5526 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5527 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5528 mpi_request.SlotStatus =
5529 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5530 mpi_request.DevHandle = cpu_to_le16(handle);
5531 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5532 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5533 &mpi_request)) != 0) {
919d8a3f
JP
5534 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5535 __FILE__, __LINE__, __func__);
d1cb5e49 5536 goto out;
f92363d1 5537 }
0f624c39 5538 sas_device->pfa_led_on = 1;
f92363d1
SR
5539
5540 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
919d8a3f
JP
5541 dewtprintk(ioc,
5542 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5543 le16_to_cpu(mpi_reply.IOCStatus),
5544 le32_to_cpu(mpi_reply.IOCLogInfo)));
d1cb5e49 5545 goto out;
f92363d1 5546 }
d1cb5e49
SR
5547out:
5548 sas_device_put(sas_device);
f92363d1 5549}
d1cb5e49 5550
0f624c39
SR
5551/**
5552 * _scsih_turn_off_pfa_led - turn off Fault LED
5553 * @ioc: per adapter object
5554 * @sas_device: sas device whose PFA LED has to turned off
5555 * Context: process
0f624c39
SR
5556 */
5557static void
5558_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5559 struct _sas_device *sas_device)
5560{
5561 Mpi2SepReply_t mpi_reply;
5562 Mpi2SepRequest_t mpi_request;
f92363d1 5563
0f624c39
SR
5564 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5565 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5566 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5567 mpi_request.SlotStatus = 0;
5568 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5569 mpi_request.DevHandle = 0;
5570 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5571 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5572 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5573 &mpi_request)) != 0) {
1f95a47e
JP
5574 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5575 __FILE__, __LINE__, __func__);
0f624c39
SR
5576 return;
5577 }
5578
5579 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
1f95a47e
JP
5580 dewtprintk(ioc,
5581 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5582 le16_to_cpu(mpi_reply.IOCStatus),
5583 le32_to_cpu(mpi_reply.IOCLogInfo)));
0f624c39
SR
5584 return;
5585 }
5586}
d1cb5e49 5587
f92363d1 5588/**
0f624c39 5589 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
f92363d1
SR
5590 * @ioc: per adapter object
5591 * @handle: device handle
5592 * Context: interrupt.
f92363d1
SR
5593 */
5594static void
0f624c39 5595_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
f92363d1
SR
5596{
5597 struct fw_event_work *fw_event;
5598
146b16c8 5599 fw_event = alloc_fw_event_work(0);
f92363d1
SR
5600 if (!fw_event)
5601 return;
0f624c39 5602 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
f92363d1
SR
5603 fw_event->device_handle = handle;
5604 fw_event->ioc = ioc;
5605 _scsih_fw_event_add(ioc, fw_event);
146b16c8 5606 fw_event_work_put(fw_event);
f92363d1
SR
5607}
5608
5609/**
5610 * _scsih_smart_predicted_fault - process smart errors
5611 * @ioc: per adapter object
5612 * @handle: device handle
5613 * Context: interrupt.
f92363d1
SR
5614 */
5615static void
5616_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5617{
5618 struct scsi_target *starget;
5619 struct MPT3SAS_TARGET *sas_target_priv_data;
5620 Mpi2EventNotificationReply_t *event_reply;
5621 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5622 struct _sas_device *sas_device;
5623 ssize_t sz;
5624 unsigned long flags;
5625
5626 /* only handle non-raid devices */
5627 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49
SR
5628 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5629 if (!sas_device)
5630 goto out_unlock;
5631
f92363d1
SR
5632 starget = sas_device->starget;
5633 sas_target_priv_data = starget->hostdata;
5634
5635 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
d1cb5e49
SR
5636 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5637 goto out_unlock;
5638
75888956
SR
5639 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5640
f92363d1
SR
5641 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5642
5643 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
0f624c39 5644 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
f92363d1
SR
5645
5646 /* insert into event log */
5647 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5648 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5bb2f743 5649 event_reply = kzalloc(sz, GFP_ATOMIC);
f92363d1 5650 if (!event_reply) {
919d8a3f
JP
5651 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5652 __FILE__, __LINE__, __func__);
d1cb5e49 5653 goto out;
f92363d1
SR
5654 }
5655
5656 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5657 event_reply->Event =
5658 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5659 event_reply->MsgLength = sz/4;
5660 event_reply->EventDataLength =
5661 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5662 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5663 event_reply->EventData;
5664 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5665 event_data->ASC = 0x5D;
5666 event_data->DevHandle = cpu_to_le16(handle);
5667 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5668 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5669 kfree(event_reply);
d1cb5e49
SR
5670out:
5671 if (sas_device)
5672 sas_device_put(sas_device);
5673 return;
5674
5675out_unlock:
5676 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5677 goto out;
f92363d1
SR
5678}
5679
5680/**
5681 * _scsih_io_done - scsi request callback
5682 * @ioc: per adapter object
5683 * @smid: system request message index
5684 * @msix_index: MSIX table index supplied by the OS
5685 * @reply: reply message frame(lower 32bit addr)
5686 *
5687 * Callback handler when using _scsih_qcmd.
5688 *
4beb4867
BVA
5689 * Return: 1 meaning mf should be freed from _base_interrupt
5690 * 0 means the mf is freed from this function.
f92363d1
SR
5691 */
5692static u8
5693_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5694{
016d5c35 5695 Mpi25SCSIIORequest_t *mpi_request;
f92363d1
SR
5696 Mpi2SCSIIOReply_t *mpi_reply;
5697 struct scsi_cmnd *scmd;
dbec4c90 5698 struct scsiio_tracker *st;
f92363d1
SR
5699 u16 ioc_status;
5700 u32 xfer_cnt;
5701 u8 scsi_state;
5702 u8 scsi_status;
5703 u32 log_info;
5704 struct MPT3SAS_DEVICE *sas_device_priv_data;
5705 u32 response_code = 0;
5706
5707 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
459325c4 5708
dbec4c90 5709 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
f92363d1
SR
5710 if (scmd == NULL)
5711 return 1;
5712
ffb58456 5713 _scsih_set_satl_pending(scmd, false);
18f6084a 5714
f92363d1
SR
5715 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5716
5717 if (mpi_reply == NULL) {
5718 scmd->result = DID_OK << 16;
5719 goto out;
5720 }
5721
5722 sas_device_priv_data = scmd->device->hostdata;
5723 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5724 sas_device_priv_data->sas_target->deleted) {
5725 scmd->result = DID_NO_CONNECT << 16;
5726 goto out;
5727 }
5728 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5729
7786ab6a
SR
5730 /*
5731 * WARPDRIVE: If direct_io is set then it is directIO,
5732 * the failed direct I/O should be redirected to volume
5733 */
dbec4c90
SPS
5734 st = scsi_cmd_priv(scmd);
5735 if (st->direct_io &&
7786ab6a
SR
5736 ((ioc_status & MPI2_IOCSTATUS_MASK)
5737 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
dbec4c90 5738 st->direct_io = 0;
998c3001 5739 st->scmd = scmd;
7786ab6a
SR
5740 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5741 mpi_request->DevHandle =
5742 cpu_to_le16(sas_device_priv_data->sas_target->handle);
81c16f83 5743 ioc->put_smid_scsi_io(ioc, smid,
7786ab6a
SR
5744 sas_device_priv_data->sas_target->handle);
5745 return 0;
5746 }
f92363d1
SR
5747 /* turning off TLR */
5748 scsi_state = mpi_reply->SCSIState;
5749 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5750 response_code =
5751 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5752 if (!sas_device_priv_data->tlr_snoop_check) {
5753 sas_device_priv_data->tlr_snoop_check++;
cd5897ed 5754 if ((!ioc->is_warpdrive &&
7786ab6a 5755 !scsih_is_raid(&scmd->device->sdev_gendev) &&
cd5897ed
SPS
5756 !scsih_is_nvme(&scmd->device->sdev_gendev))
5757 && sas_is_tlr_enabled(scmd->device) &&
c84b06a4
SR
5758 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5759 sas_disable_tlr(scmd->device);
5760 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5761 }
f92363d1
SR
5762 }
5763
5764 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5765 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5766 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5767 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5768 else
5769 log_info = 0;
5770 ioc_status &= MPI2_IOCSTATUS_MASK;
5771 scsi_status = mpi_reply->SCSIStatus;
5772
5773 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5774 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5775 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5776 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5777 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5778 }
5779
5780 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5781 struct sense_info data;
5782 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5783 smid);
5784 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5785 le32_to_cpu(mpi_reply->SenseCount));
5786 memcpy(scmd->sense_buffer, sense_data, sz);
5787 _scsih_normalize_sense(scmd->sense_buffer, &data);
5788 /* failure prediction threshold exceeded */
5789 if (data.asc == 0x5D)
5790 _scsih_smart_predicted_fault(ioc,
5791 le16_to_cpu(mpi_reply->DevHandle));
5792 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
f92363d1 5793
0d667f72 5794 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
e6d45e3e
SR
5795 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5796 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5797 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5798 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
e6d45e3e 5799 }
f92363d1
SR
5800 switch (ioc_status) {
5801 case MPI2_IOCSTATUS_BUSY:
5802 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5803 scmd->result = SAM_STAT_BUSY;
5804 break;
5805
5806 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5807 scmd->result = DID_NO_CONNECT << 16;
5808 break;
5809
5810 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5811 if (sas_device_priv_data->block) {
5812 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5813 goto out;
5814 }
5815 if (log_info == 0x31110630) {
5816 if (scmd->retries > 2) {
5817 scmd->result = DID_NO_CONNECT << 16;
5818 scsi_device_set_state(scmd->device,
5819 SDEV_OFFLINE);
5820 } else {
5821 scmd->result = DID_SOFT_ERROR << 16;
5822 scmd->device->expecting_cc_ua = 1;
5823 }
5824 break;
3898f08e
SR
5825 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5826 scmd->result = DID_RESET << 16;
5827 break;
2ce9a364
SR
5828 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5829 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5830 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5831 scmd->result = DID_RESET << 16;
5832 break;
f92363d1
SR
5833 }
5834 scmd->result = DID_SOFT_ERROR << 16;
5835 break;
5836 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5837 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5838 scmd->result = DID_RESET << 16;
5839 break;
5840
5841 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5842 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5843 scmd->result = DID_SOFT_ERROR << 16;
5844 else
5845 scmd->result = (DID_OK << 16) | scsi_status;
5846 break;
5847
5848 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5849 scmd->result = (DID_OK << 16) | scsi_status;
5850
5851 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5852 break;
5853
5854 if (xfer_cnt < scmd->underflow) {
5855 if (scsi_status == SAM_STAT_BUSY)
5856 scmd->result = SAM_STAT_BUSY;
5857 else
5858 scmd->result = DID_SOFT_ERROR << 16;
5859 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5860 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5861 scmd->result = DID_SOFT_ERROR << 16;
5862 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5863 scmd->result = DID_RESET << 16;
5864 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5865 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5866 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
f2b1e9c6
HR
5867 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
5868 0x20, 0);
f92363d1
SR
5869 }
5870 break;
5871
5872 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5873 scsi_set_resid(scmd, 0);
df561f66 5874 fallthrough;
f92363d1
SR
5875 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5876 case MPI2_IOCSTATUS_SUCCESS:
5877 scmd->result = (DID_OK << 16) | scsi_status;
5878 if (response_code ==
5879 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5880 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5881 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5882 scmd->result = DID_SOFT_ERROR << 16;
5883 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5884 scmd->result = DID_RESET << 16;
5885 break;
5886
5887 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5888 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5889 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5890 _scsih_eedp_error_handling(scmd, ioc_status);
5891 break;
5892
5893 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5894 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5895 case MPI2_IOCSTATUS_INVALID_SGL:
5896 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5897 case MPI2_IOCSTATUS_INVALID_FIELD:
5898 case MPI2_IOCSTATUS_INVALID_STATE:
5899 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5900 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
b130b0d5 5901 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
f92363d1
SR
5902 default:
5903 scmd->result = DID_SOFT_ERROR << 16;
5904 break;
5905
5906 }
5907
f92363d1
SR
5908 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5909 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
f92363d1
SR
5910
5911 out:
5912
5913 scsi_dma_unmap(scmd);
dbec4c90 5914 mpt3sas_base_free_smid(ioc, smid);
b0c30079 5915 scsi_done(scmd);
dbec4c90 5916 return 0;
f92363d1
SR
5917}
5918
ffa381d6
SR
5919/**
5920 * _scsih_update_vphys_after_reset - update the Port's
5921 * vphys_list after reset
5922 * @ioc: per adapter object
5923 *
5924 * Returns nothing.
5925 */
5926static void
5927_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5928{
5929 u16 sz, ioc_status;
5930 int i;
5931 Mpi2ConfigReply_t mpi_reply;
5932 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5933 u16 attached_handle;
5934 u64 attached_sas_addr;
5935 u8 found = 0, port_id;
5936 Mpi2SasPhyPage0_t phy_pg0;
5937 struct hba_port *port, *port_next, *mport;
5938 struct virtual_phy *vphy, *vphy_next;
5939 struct _sas_device *sas_device;
5940
5941 /*
5942 * Mark all the vphys objects as dirty.
5943 */
5944 list_for_each_entry_safe(port, port_next,
5945 &ioc->port_table_list, list) {
5946 if (!port->vphys_mask)
5947 continue;
5948 list_for_each_entry_safe(vphy, vphy_next,
5949 &port->vphys_list, list) {
5950 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5951 }
5952 }
5953
5954 /*
5955 * Read SASIOUnitPage0 to get each HBA Phy's data.
5956 */
5957 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) +
5958 (ioc->sas_hba.num_phys * sizeof(Mpi2SasIOUnit0PhyData_t));
5959 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5960 if (!sas_iounit_pg0) {
5961 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5962 __FILE__, __LINE__, __func__);
5963 return;
5964 }
5965 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5966 sas_iounit_pg0, sz)) != 0)
5967 goto out;
5968 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5969 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5970 goto out;
5971 /*
5972 * Loop over each HBA Phy.
5973 */
5974 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5975 /*
5976 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5977 */
5978 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5979 MPI2_SAS_NEG_LINK_RATE_1_5)
5980 continue;
5981 /*
5982 * Check whether Phy is connected to SEP device or not,
5983 * if it is SEP device then read the Phy's SASPHYPage0 data to
5984 * determine whether Phy is a virtual Phy or not. if it is
5985 * virtual phy then it is conformed that the attached remote
5986 * device is a HBA's vSES device.
5987 */
5988 if (!(le32_to_cpu(
5989 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
5990 MPI2_SAS_DEVICE_INFO_SEP))
5991 continue;
5992
5993 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5994 i))) {
5995 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5996 __FILE__, __LINE__, __func__);
5997 continue;
5998 }
5999
6000 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6001 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6002 continue;
6003 /*
6004 * Get the vSES device's SAS Address.
6005 */
6006 attached_handle = le16_to_cpu(
6007 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6008 if (_scsih_get_sas_address(ioc, attached_handle,
6009 &attached_sas_addr) != 0) {
6010 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6011 __FILE__, __LINE__, __func__);
6012 continue;
6013 }
6014
6015 found = 0;
6016 port = port_next = NULL;
6017 /*
6018 * Loop over each virtual_phy object from
6019 * each port's vphys_list.
6020 */
6021 list_for_each_entry_safe(port,
6022 port_next, &ioc->port_table_list, list) {
6023 if (!port->vphys_mask)
6024 continue;
6025 list_for_each_entry_safe(vphy, vphy_next,
6026 &port->vphys_list, list) {
6027 /*
6028 * Continue with next virtual_phy object
6029 * if the object is not marked as dirty.
6030 */
6031 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6032 continue;
6033
6034 /*
6035 * Continue with next virtual_phy object
6036 * if the object's SAS Address is not equals
6037 * to current Phy's vSES device SAS Address.
6038 */
6039 if (vphy->sas_address != attached_sas_addr)
6040 continue;
6041 /*
6042 * Enable current Phy number bit in object's
6043 * phy_mask field.
6044 */
6045 if (!(vphy->phy_mask & (1 << i)))
6046 vphy->phy_mask = (1 << i);
6047 /*
6048 * Get hba_port object from hba_port table
6049 * corresponding to current phy's Port ID.
6050 * if there is no hba_port object corresponding
6051 * to Phy's Port ID then create a new hba_port
6052 * object & add to hba_port table.
6053 */
6054 port_id = sas_iounit_pg0->PhyData[i].Port;
6055 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6056 if (!mport) {
6057 mport = kzalloc(
6058 sizeof(struct hba_port), GFP_KERNEL);
6059 if (!mport)
6060 break;
6061 mport->port_id = port_id;
6062 ioc_info(ioc,
6063 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6064 __func__, mport, mport->port_id);
6065 list_add_tail(&mport->list,
6066 &ioc->port_table_list);
6067 }
6068 /*
6069 * If mport & port pointers are not pointing to
6070 * same hba_port object then it means that vSES
6071 * device's Port ID got changed after reset and
6072 * hence move current virtual_phy object from
6073 * port's vphys_list to mport's vphys_list.
6074 */
6075 if (port != mport) {
6076 if (!mport->vphys_mask)
6077 INIT_LIST_HEAD(
6078 &mport->vphys_list);
6079 mport->vphys_mask |= (1 << i);
6080 port->vphys_mask &= ~(1 << i);
6081 list_move(&vphy->list,
6082 &mport->vphys_list);
6083 sas_device = mpt3sas_get_sdev_by_addr(
6084 ioc, attached_sas_addr, port);
6085 if (sas_device)
6086 sas_device->port = mport;
6087 }
6088 /*
6089 * Earlier while updating the hba_port table,
6090 * it is determined that there is no other
6091 * direct attached device with mport's Port ID,
6092 * Hence mport was marked as dirty. Only vSES
6093 * device has this Port ID, so unmark the mport
6094 * as dirt.
6095 */
6096 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6097 mport->sas_address = 0;
6098 mport->phy_mask = 0;
6099 mport->flags &=
6100 ~HBA_PORT_FLAG_DIRTY_PORT;
6101 }
6102 /*
6103 * Unmark current virtual_phy object as dirty.
6104 */
6105 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6106 found = 1;
6107 break;
6108 }
6109 if (found)
6110 break;
6111 }
6112 }
6113out:
6114 kfree(sas_iounit_pg0);
6115}
6116
a5e99fda
SR
6117/**
6118 * _scsih_get_port_table_after_reset - Construct temporary port table
6119 * @ioc: per adapter object
6120 * @port_table: address where port table needs to be constructed
6121 *
6122 * return number of HBA port entries available after reset.
6123 */
6124static int
6125_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6126 struct hba_port *port_table)
6127{
6128 u16 sz, ioc_status;
6129 int i, j;
6130 Mpi2ConfigReply_t mpi_reply;
6131 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6132 u16 attached_handle;
6133 u64 attached_sas_addr;
6134 u8 found = 0, port_count = 0, port_id;
6135
6136 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6137 * sizeof(Mpi2SasIOUnit0PhyData_t));
6138 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6139 if (!sas_iounit_pg0) {
6140 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6141 __FILE__, __LINE__, __func__);
6142 return port_count;
6143 }
6144
6145 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6146 sas_iounit_pg0, sz)) != 0)
6147 goto out;
6148 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6149 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6150 goto out;
6151 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6152 found = 0;
6153 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6154 MPI2_SAS_NEG_LINK_RATE_1_5)
6155 continue;
6156 attached_handle =
6157 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6158 if (_scsih_get_sas_address(
6159 ioc, attached_handle, &attached_sas_addr) != 0) {
6160 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6161 __FILE__, __LINE__, __func__);
6162 continue;
6163 }
6164
6165 for (j = 0; j < port_count; j++) {
6166 port_id = sas_iounit_pg0->PhyData[i].Port;
6167 if (port_table[j].port_id == port_id &&
6168 port_table[j].sas_address == attached_sas_addr) {
6169 port_table[j].phy_mask |= (1 << i);
6170 found = 1;
6171 break;
6172 }
6173 }
6174
6175 if (found)
6176 continue;
6177
6178 port_id = sas_iounit_pg0->PhyData[i].Port;
6179 port_table[port_count].port_id = port_id;
6180 port_table[port_count].phy_mask = (1 << i);
6181 port_table[port_count].sas_address = attached_sas_addr;
6182 port_count++;
6183 }
6184out:
6185 kfree(sas_iounit_pg0);
6186 return port_count;
6187}
6188
6189enum hba_port_matched_codes {
6190 NOT_MATCHED = 0,
6191 MATCHED_WITH_ADDR_AND_PHYMASK,
6192 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6193 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6194 MATCHED_WITH_ADDR,
6195};
6196
6197/**
6198 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6199 * from HBA port table
6200 * @ioc: per adapter object
a8d548b0 6201 * @port_entry: hba port entry from temporary port table which needs to be
a5e99fda 6202 * searched for matched entry in the HBA port table
a8d548b0
LJ
6203 * @matched_port_entry: save matched hba port entry here
6204 * @count: count of matched entries
a5e99fda
SR
6205 *
6206 * return type of matched entry found.
6207 */
6208static enum hba_port_matched_codes
6209_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6210 struct hba_port *port_entry,
6211 struct hba_port **matched_port_entry, int *count)
6212{
6213 struct hba_port *port_table_entry, *matched_port = NULL;
6214 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6215 int lcount = 0;
6216 *matched_port_entry = NULL;
6217
6218 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6219 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6220 continue;
6221
6222 if ((port_table_entry->sas_address == port_entry->sas_address)
6223 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6224 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6225 matched_port = port_table_entry;
6226 break;
6227 }
6228
6229 if ((port_table_entry->sas_address == port_entry->sas_address)
6230 && (port_table_entry->phy_mask & port_entry->phy_mask)
6231 && (port_table_entry->port_id == port_entry->port_id)) {
6232 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6233 matched_port = port_table_entry;
6234 continue;
6235 }
6236
6237 if ((port_table_entry->sas_address == port_entry->sas_address)
6238 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6239 if (matched_code ==
6240 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6241 continue;
6242 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6243 matched_port = port_table_entry;
6244 continue;
6245 }
6246
6247 if (port_table_entry->sas_address == port_entry->sas_address) {
6248 if (matched_code ==
6249 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6250 continue;
6251 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6252 continue;
6253 matched_code = MATCHED_WITH_ADDR;
6254 matched_port = port_table_entry;
6255 lcount++;
6256 }
6257 }
6258
6259 *matched_port_entry = matched_port;
6260 if (matched_code == MATCHED_WITH_ADDR)
6261 *count = lcount;
6262 return matched_code;
6263}
6264
6265/**
6266 * _scsih_del_phy_part_of_anther_port - remove phy if it
6267 * is a part of anther port
6268 *@ioc: per adapter object
6269 *@port_table: port table after reset
6270 *@index: hba port entry index
6271 *@port_count: number of ports available after host reset
6272 *@offset: HBA phy bit offset
6273 *
6274 */
6275static void
6276_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6277 struct hba_port *port_table,
6278 int index, u8 port_count, int offset)
6279{
6280 struct _sas_node *sas_node = &ioc->sas_hba;
6281 u32 i, found = 0;
6282
6283 for (i = 0; i < port_count; i++) {
6284 if (i == index)
6285 continue;
6286
6287 if (port_table[i].phy_mask & (1 << offset)) {
6288 mpt3sas_transport_del_phy_from_an_existing_port(
6289 ioc, sas_node, &sas_node->phy[offset]);
6290 found = 1;
6291 break;
6292 }
6293 }
6294 if (!found)
6295 port_table[index].phy_mask |= (1 << offset);
6296}
6297
6298/**
6299 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6300 * right port
6301 *@ioc: per adapter object
6302 *@hba_port_entry: hba port table entry
6303 *@port_table: temporary port table
6304 *@index: hba port entry index
6305 *@port_count: number of ports available after host reset
6306 *
6307 */
6308static void
6309_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6310 struct hba_port *hba_port_entry, struct hba_port *port_table,
6311 int index, int port_count)
6312{
6313 u32 phy_mask, offset = 0;
6314 struct _sas_node *sas_node = &ioc->sas_hba;
6315
6316 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6317
6318 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6319 if (phy_mask & (1 << offset)) {
6320 if (!(port_table[index].phy_mask & (1 << offset))) {
6321 _scsih_del_phy_part_of_anther_port(
6322 ioc, port_table, index, port_count,
6323 offset);
6324 continue;
6325 }
6326 if (sas_node->phy[offset].phy_belongs_to_port)
6327 mpt3sas_transport_del_phy_from_an_existing_port(
6328 ioc, sas_node, &sas_node->phy[offset]);
6329 mpt3sas_transport_add_phy_to_an_existing_port(
6330 ioc, sas_node, &sas_node->phy[offset],
6331 hba_port_entry->sas_address,
6332 hba_port_entry);
6333 }
6334 }
6335}
6336
ffa381d6
SR
6337/**
6338 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6339 * @ioc: per adapter object
6340 *
6341 * Returns nothing.
6342 */
6343static void
6344_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6345{
6346 struct hba_port *port, *port_next;
6347 struct virtual_phy *vphy, *vphy_next;
6348
6349 list_for_each_entry_safe(port, port_next,
6350 &ioc->port_table_list, list) {
6351 if (!port->vphys_mask)
6352 continue;
6353 list_for_each_entry_safe(vphy, vphy_next,
6354 &port->vphys_list, list) {
6355 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6356 drsprintk(ioc, ioc_info(ioc,
6357 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6358 vphy, port->port_id,
6359 vphy->phy_mask));
6360 port->vphys_mask &= ~vphy->phy_mask;
6361 list_del(&vphy->list);
6362 kfree(vphy);
6363 }
6364 }
6365 if (!port->vphys_mask && !port->sas_address)
6366 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6367 }
6368}
6369
a5e99fda
SR
6370/**
6371 * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6372 * after host reset
6373 *@ioc: per adapter object
6374 *
6375 */
6376static void
6377_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6378{
6379 struct hba_port *port, *port_next;
6380
6381 list_for_each_entry_safe(port, port_next,
6382 &ioc->port_table_list, list) {
6383 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6384 port->flags & HBA_PORT_FLAG_NEW_PORT)
6385 continue;
6386
6387 drsprintk(ioc, ioc_info(ioc,
6388 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6389 port, port->port_id, port->phy_mask));
6390 list_del(&port->list);
6391 kfree(port);
6392 }
6393}
6394
6395/**
6396 * _scsih_sas_port_refresh - Update HBA port table after host reset
6397 * @ioc: per adapter object
6398 */
6399static void
6400_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6401{
6402 u32 port_count = 0;
6403 struct hba_port *port_table;
6404 struct hba_port *port_table_entry;
6405 struct hba_port *port_entry = NULL;
6406 int i, j, count = 0, lcount = 0;
6407 int ret;
6408 u64 sas_addr;
91202a01 6409 u8 num_phys;
a5e99fda
SR
6410
6411 drsprintk(ioc, ioc_info(ioc,
6412 "updating ports for sas_host(0x%016llx)\n",
6413 (unsigned long long)ioc->sas_hba.sas_address));
6414
91202a01
SR
6415 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6416 if (!num_phys) {
6417 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6418 __FILE__, __LINE__, __func__);
6419 return;
6420 }
6421
6422 if (num_phys > ioc->sas_hba.nr_phys_allocated) {
6423 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6424 __FILE__, __LINE__, __func__);
6425 return;
6426 }
6427 ioc->sas_hba.num_phys = num_phys;
6428
a5e99fda
SR
6429 port_table = kcalloc(ioc->sas_hba.num_phys,
6430 sizeof(struct hba_port), GFP_KERNEL);
6431 if (!port_table)
6432 return;
6433
6434 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6435 if (!port_count)
6436 return;
6437
6438 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6439 for (j = 0; j < port_count; j++)
6440 drsprintk(ioc, ioc_info(ioc,
6441 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6442 port_table[j].port_id,
6443 port_table[j].phy_mask, port_table[j].sas_address));
6444
6445 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6446 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6447
6448 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6449 port_table_entry = NULL;
6450 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6451 drsprintk(ioc, ioc_info(ioc,
6452 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6453 port_table_entry->port_id,
6454 port_table_entry->phy_mask,
6455 port_table_entry->sas_address));
6456 }
6457
6458 for (j = 0; j < port_count; j++) {
6459 ret = _scsih_look_and_get_matched_port_entry(ioc,
6460 &port_table[j], &port_entry, &count);
6461 if (!port_entry) {
6462 drsprintk(ioc, ioc_info(ioc,
6463 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6464 port_table[j].sas_address,
6465 port_table[j].port_id));
6466 continue;
6467 }
6468
6469 switch (ret) {
6470 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6471 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6472 _scsih_add_or_del_phys_from_existing_port(ioc,
6473 port_entry, port_table, j, port_count);
6474 break;
6475 case MATCHED_WITH_ADDR:
6476 sas_addr = port_table[j].sas_address;
6477 for (i = 0; i < port_count; i++) {
6478 if (port_table[i].sas_address == sas_addr)
6479 lcount++;
6480 }
6481
6482 if (count > 1 || lcount > 1)
6483 port_entry = NULL;
6484 else
6485 _scsih_add_or_del_phys_from_existing_port(ioc,
6486 port_entry, port_table, j, port_count);
6487 }
6488
6489 if (!port_entry)
6490 continue;
6491
6492 if (port_entry->port_id != port_table[j].port_id)
6493 port_entry->port_id = port_table[j].port_id;
6494 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6495 port_entry->phy_mask = port_table[j].phy_mask;
6496 }
6497
6498 port_table_entry = NULL;
6499}
6500
ccc59923
SR
6501/**
6502 * _scsih_alloc_vphy - allocate virtual_phy object
6503 * @ioc: per adapter object
6504 * @port_id: Port ID number
6505 * @phy_num: HBA Phy number
6506 *
6507 * Returns allocated virtual_phy object.
6508 */
6509static struct virtual_phy *
6510_scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6511{
6512 struct virtual_phy *vphy;
6513 struct hba_port *port;
6514
34b0a785 6515 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
ccc59923
SR
6516 if (!port)
6517 return NULL;
6518
6519 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6520 if (!vphy) {
6521 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6522 if (!vphy)
6523 return NULL;
6524
4c51f956
SR
6525 if (!port->vphys_mask)
6526 INIT_LIST_HEAD(&port->vphys_list);
6527
ccc59923
SR
6528 /*
6529 * Enable bit corresponding to HBA phy number on its
6530 * parent hba_port object's vphys_mask field.
6531 */
6532 port->vphys_mask |= (1 << phy_num);
6533 vphy->phy_mask |= (1 << phy_num);
6534
ccc59923
SR
6535 list_add_tail(&vphy->list, &port->vphys_list);
6536
6537 ioc_info(ioc,
6538 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6539 vphy, port->port_id, phy_num);
6540 }
6541 return vphy;
6542}
6543
f92363d1
SR
6544/**
6545 * _scsih_sas_host_refresh - refreshing sas host object contents
6546 * @ioc: per adapter object
6547 * Context: user
6548 *
6549 * During port enable, fw will send topology events for every device. Its
6550 * possible that the handles may change from the previous setting, so this
6551 * code keeping handles updating if changed.
f92363d1
SR
6552 */
6553static void
6554_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6555{
6556 u16 sz;
6557 u16 ioc_status;
6558 int i;
6559 Mpi2ConfigReply_t mpi_reply;
6560 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6561 u16 attached_handle;
e238e71b
SR
6562 u8 link_rate, port_id;
6563 struct hba_port *port;
ccc59923 6564 Mpi2SasPhyPage0_t phy_pg0;
f92363d1 6565
919d8a3f
JP
6566 dtmprintk(ioc,
6567 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6568 (u64)ioc->sas_hba.sas_address));
f92363d1
SR
6569
6570 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
6571 * sizeof(Mpi2SasIOUnit0PhyData_t));
6572 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6573 if (!sas_iounit_pg0) {
919d8a3f
JP
6574 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6575 __FILE__, __LINE__, __func__);
f92363d1
SR
6576 return;
6577 }
6578
6579 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6580 sas_iounit_pg0, sz)) != 0)
6581 goto out;
6582 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6583 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6584 goto out;
6585 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6586 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6587 if (i == 0)
e238e71b
SR
6588 ioc->sas_hba.handle = le16_to_cpu(
6589 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6590 port_id = sas_iounit_pg0->PhyData[i].Port;
34b0a785 6591 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
e238e71b
SR
6592 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6593 if (!port)
6594 goto out;
6595
6596 port->port_id = port_id;
6597 ioc_info(ioc,
6598 "hba_port entry: %p, port: %d is added to hba_port list\n",
6599 port, port->port_id);
6600 if (ioc->shost_recovery)
6601 port->flags = HBA_PORT_FLAG_NEW_PORT;
6602 list_add_tail(&port->list, &ioc->port_table_list);
6603 }
ccc59923
SR
6604 /*
6605 * Check whether current Phy belongs to HBA vSES device or not.
6606 */
6607 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6608 MPI2_SAS_DEVICE_INFO_SEP &&
6609 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6610 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6611 &phy_pg0, i))) {
6612 ioc_err(ioc,
6613 "failure at %s:%d/%s()!\n",
6614 __FILE__, __LINE__, __func__);
6615 goto out;
6616 }
6617 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6618 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6619 continue;
6620 /*
6621 * Allocate a virtual_phy object for vSES device, if
6622 * this vSES device is hot added.
6623 */
6624 if (!_scsih_alloc_vphy(ioc, port_id, i))
6625 goto out;
6626 ioc->sas_hba.phy[i].hba_vphy = 1;
6627 }
6628
91202a01
SR
6629 /*
6630 * Add new HBA phys to STL if these new phys got added as part
6631 * of HBA Firmware upgrade/downgrade operation.
6632 */
6633 if (!ioc->sas_hba.phy[i].phy) {
6634 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6635 &phy_pg0, i))) {
6636 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6637 __FILE__, __LINE__, __func__);
6638 continue;
6639 }
6640 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6641 MPI2_IOCSTATUS_MASK;
6642 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6643 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6644 __FILE__, __LINE__, __func__);
6645 continue;
6646 }
6647 ioc->sas_hba.phy[i].phy_id = i;
6648 mpt3sas_transport_add_host_phy(ioc,
6649 &ioc->sas_hba.phy[i], phy_pg0,
6650 ioc->sas_hba.parent_dev);
6651 continue;
6652 }
f92363d1
SR
6653 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6654 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6655 AttachedDevHandle);
6656 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6657 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
34b0a785
SR
6658 ioc->sas_hba.phy[i].port =
6659 mpt3sas_get_port_by_id(ioc, port_id, 0);
f92363d1 6660 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
e2f0cdf7
SR
6661 attached_handle, i, link_rate,
6662 ioc->sas_hba.phy[i].port);
f92363d1 6663 }
91202a01
SR
6664 /*
6665 * Clear the phy details if this phy got disabled as part of
6666 * HBA Firmware upgrade/downgrade operation.
6667 */
6668 for (i = ioc->sas_hba.num_phys;
6669 i < ioc->sas_hba.nr_phys_allocated; i++) {
6670 if (ioc->sas_hba.phy[i].phy &&
6671 ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
6672 SAS_LINK_RATE_1_5_GBPS)
6673 mpt3sas_transport_update_links(ioc,
6674 ioc->sas_hba.sas_address, 0, i,
6675 MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
6676 }
f92363d1
SR
6677 out:
6678 kfree(sas_iounit_pg0);
6679}
6680
6681/**
6682 * _scsih_sas_host_add - create sas host object
6683 * @ioc: per adapter object
6684 *
6685 * Creating host side data object, stored in ioc->sas_hba
f92363d1
SR
6686 */
6687static void
6688_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6689{
6690 int i;
6691 Mpi2ConfigReply_t mpi_reply;
6692 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6693 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6694 Mpi2SasPhyPage0_t phy_pg0;
6695 Mpi2SasDevicePage0_t sas_device_pg0;
6696 Mpi2SasEnclosurePage0_t enclosure_pg0;
6697 u16 ioc_status;
6698 u16 sz;
6699 u8 device_missing_delay;
e238e71b
SR
6700 u8 num_phys, port_id;
6701 struct hba_port *port;
f92363d1 6702
87aa95d4
JL
6703 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6704 if (!num_phys) {
919d8a3f
JP
6705 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6706 __FILE__, __LINE__, __func__);
f92363d1
SR
6707 return;
6708 }
91202a01
SR
6709
6710 ioc->sas_hba.nr_phys_allocated = max_t(u8,
6711 MPT_MAX_HBA_NUM_PHYS, num_phys);
6712 ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
87aa95d4
JL
6713 sizeof(struct _sas_phy), GFP_KERNEL);
6714 if (!ioc->sas_hba.phy) {
919d8a3f
JP
6715 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6716 __FILE__, __LINE__, __func__);
87aa95d4
JL
6717 goto out;
6718 }
6719 ioc->sas_hba.num_phys = num_phys;
f92363d1
SR
6720
6721 /* sas_iounit page 0 */
6722 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
6723 sizeof(Mpi2SasIOUnit0PhyData_t));
6724 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6725 if (!sas_iounit_pg0) {
919d8a3f
JP
6726 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6727 __FILE__, __LINE__, __func__);
f92363d1
SR
6728 return;
6729 }
6730 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6731 sas_iounit_pg0, sz))) {
919d8a3f
JP
6732 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6733 __FILE__, __LINE__, __func__);
f92363d1
SR
6734 goto out;
6735 }
6736 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6737 MPI2_IOCSTATUS_MASK;
6738 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
6739 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6740 __FILE__, __LINE__, __func__);
f92363d1
SR
6741 goto out;
6742 }
6743
6744 /* sas_iounit page 1 */
6745 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
6746 sizeof(Mpi2SasIOUnit1PhyData_t));
6747 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6748 if (!sas_iounit_pg1) {
919d8a3f
JP
6749 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6750 __FILE__, __LINE__, __func__);
f92363d1
SR
6751 goto out;
6752 }
6753 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6754 sas_iounit_pg1, sz))) {
919d8a3f
JP
6755 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6756 __FILE__, __LINE__, __func__);
f92363d1
SR
6757 goto out;
6758 }
6759 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6760 MPI2_IOCSTATUS_MASK;
6761 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
6762 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6763 __FILE__, __LINE__, __func__);
f92363d1
SR
6764 goto out;
6765 }
6766
6767 ioc->io_missing_delay =
6768 sas_iounit_pg1->IODeviceMissingDelay;
6769 device_missing_delay =
6770 sas_iounit_pg1->ReportDeviceMissingDelay;
6771 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6772 ioc->device_missing_delay = (device_missing_delay &
6773 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6774 else
6775 ioc->device_missing_delay = device_missing_delay &
6776 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6777
6778 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
f92363d1
SR
6779 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6780 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6781 i))) {
919d8a3f
JP
6782 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6783 __FILE__, __LINE__, __func__);
f92363d1
SR
6784 goto out;
6785 }
6786 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6787 MPI2_IOCSTATUS_MASK;
6788 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
6789 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6790 __FILE__, __LINE__, __func__);
f92363d1
SR
6791 goto out;
6792 }
6793
6794 if (i == 0)
6795 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6796 PhyData[0].ControllerDevHandle);
e238e71b
SR
6797
6798 port_id = sas_iounit_pg0->PhyData[i].Port;
34b0a785 6799 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
e238e71b
SR
6800 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6801 if (!port)
6802 goto out;
6803
6804 port->port_id = port_id;
6805 ioc_info(ioc,
6806 "hba_port entry: %p, port: %d is added to hba_port list\n",
6807 port, port->port_id);
6808 list_add_tail(&port->list,
6809 &ioc->port_table_list);
6810 }
6811
ccc59923
SR
6812 /*
6813 * Check whether current Phy belongs to HBA vSES device or not.
6814 */
6815 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6816 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6817 (phy_pg0.NegotiatedLinkRate >> 4) >=
6818 MPI2_SAS_NEG_LINK_RATE_1_5) {
6819 /*
6820 * Allocate a virtual_phy object for vSES device.
6821 */
6822 if (!_scsih_alloc_vphy(ioc, port_id, i))
6823 goto out;
6824 ioc->sas_hba.phy[i].hba_vphy = 1;
6825 }
6826
f92363d1
SR
6827 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6828 ioc->sas_hba.phy[i].phy_id = i;
34b0a785
SR
6829 ioc->sas_hba.phy[i].port =
6830 mpt3sas_get_port_by_id(ioc, port_id, 0);
f92363d1
SR
6831 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6832 phy_pg0, ioc->sas_hba.parent_dev);
6833 }
6834 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6835 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
919d8a3f
JP
6836 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6837 __FILE__, __LINE__, __func__);
f92363d1
SR
6838 goto out;
6839 }
6840 ioc->sas_hba.enclosure_handle =
6841 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6842 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
919d8a3f
JP
6843 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6844 ioc->sas_hba.handle,
6845 (u64)ioc->sas_hba.sas_address,
6846 ioc->sas_hba.num_phys);
f92363d1
SR
6847
6848 if (ioc->sas_hba.enclosure_handle) {
6849 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6850 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6851 ioc->sas_hba.enclosure_handle)))
6852 ioc->sas_hba.enclosure_logical_id =
6853 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6854 }
6855
6856 out:
6857 kfree(sas_iounit_pg1);
6858 kfree(sas_iounit_pg0);
6859}
6860
6861/**
6862 * _scsih_expander_add - creating expander object
6863 * @ioc: per adapter object
6864 * @handle: expander handle
6865 *
6866 * Creating expander object, stored in ioc->sas_expander_list.
6867 *
4beb4867 6868 * Return: 0 for success, else error.
f92363d1
SR
6869 */
6870static int
6871_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6872{
6873 struct _sas_node *sas_expander;
22a923c3 6874 struct _enclosure_node *enclosure_dev;
f92363d1
SR
6875 Mpi2ConfigReply_t mpi_reply;
6876 Mpi2ExpanderPage0_t expander_pg0;
6877 Mpi2ExpanderPage1_t expander_pg1;
f92363d1
SR
6878 u32 ioc_status;
6879 u16 parent_handle;
6880 u64 sas_address, sas_address_parent = 0;
6881 int i;
6882 unsigned long flags;
6883 struct _sas_port *mpt3sas_port = NULL;
e2f0cdf7 6884 u8 port_id;
f92363d1
SR
6885
6886 int rc = 0;
6887
6888 if (!handle)
6889 return -1;
6890
6891 if (ioc->shost_recovery || ioc->pci_error_recovery)
6892 return -1;
6893
6894 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6895 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
919d8a3f
JP
6896 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6897 __FILE__, __LINE__, __func__);
f92363d1
SR
6898 return -1;
6899 }
6900
6901 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6902 MPI2_IOCSTATUS_MASK;
6903 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
6904 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6905 __FILE__, __LINE__, __func__);
f92363d1
SR
6906 return -1;
6907 }
6908
6909 /* handle out of order topology events */
6910 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6911 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6912 != 0) {
919d8a3f
JP
6913 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6914 __FILE__, __LINE__, __func__);
f92363d1
SR
6915 return -1;
6916 }
e2f0cdf7
SR
6917
6918 port_id = expander_pg0.PhysicalPort;
f92363d1
SR
6919 if (sas_address_parent != ioc->sas_hba.sas_address) {
6920 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6921 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
34b0a785
SR
6922 sas_address_parent,
6923 mpt3sas_get_port_by_id(ioc, port_id, 0));
f92363d1
SR
6924 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6925 if (!sas_expander) {
6926 rc = _scsih_expander_add(ioc, parent_handle);
6927 if (rc != 0)
6928 return rc;
6929 }
6930 }
6931
6932 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6933 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6934 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
34b0a785 6935 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
f92363d1
SR
6936 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6937
6938 if (sas_expander)
6939 return 0;
6940
6941 sas_expander = kzalloc(sizeof(struct _sas_node),
6942 GFP_KERNEL);
6943 if (!sas_expander) {
919d8a3f
JP
6944 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6945 __FILE__, __LINE__, __func__);
f92363d1
SR
6946 return -1;
6947 }
6948
6949 sas_expander->handle = handle;
6950 sas_expander->num_phys = expander_pg0.NumPhys;
6951 sas_expander->sas_address_parent = sas_address_parent;
6952 sas_expander->sas_address = sas_address;
34b0a785 6953 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
e2f0cdf7
SR
6954 if (!sas_expander->port) {
6955 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6956 __FILE__, __LINE__, __func__);
6957 rc = -1;
6958 goto out_fail;
6959 }
f92363d1 6960
919d8a3f
JP
6961 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6962 handle, parent_handle,
6963 (u64)sas_expander->sas_address, sas_expander->num_phys);
f92363d1 6964
d6c2ce43
ZL
6965 if (!sas_expander->num_phys) {
6966 rc = -1;
f92363d1 6967 goto out_fail;
d6c2ce43 6968 }
f92363d1
SR
6969 sas_expander->phy = kcalloc(sas_expander->num_phys,
6970 sizeof(struct _sas_phy), GFP_KERNEL);
6971 if (!sas_expander->phy) {
919d8a3f
JP
6972 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6973 __FILE__, __LINE__, __func__);
f92363d1
SR
6974 rc = -1;
6975 goto out_fail;
6976 }
6977
6978 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6979 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
e2f0cdf7 6980 sas_address_parent, sas_expander->port);
f92363d1 6981 if (!mpt3sas_port) {
919d8a3f
JP
6982 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6983 __FILE__, __LINE__, __func__);
f92363d1
SR
6984 rc = -1;
6985 goto out_fail;
6986 }
6987 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
9d0348a9 6988 sas_expander->rphy = mpt3sas_port->rphy;
f92363d1
SR
6989
6990 for (i = 0 ; i < sas_expander->num_phys ; i++) {
6991 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6992 &expander_pg1, i, handle))) {
919d8a3f
JP
6993 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6994 __FILE__, __LINE__, __func__);
f92363d1
SR
6995 rc = -1;
6996 goto out_fail;
6997 }
6998 sas_expander->phy[i].handle = handle;
6999 sas_expander->phy[i].phy_id = i;
34b0a785
SR
7000 sas_expander->phy[i].port =
7001 mpt3sas_get_port_by_id(ioc, port_id, 0);
f92363d1
SR
7002
7003 if ((mpt3sas_transport_add_expander_phy(ioc,
7004 &sas_expander->phy[i], expander_pg1,
7005 sas_expander->parent_dev))) {
919d8a3f
JP
7006 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7007 __FILE__, __LINE__, __func__);
f92363d1
SR
7008 rc = -1;
7009 goto out_fail;
7010 }
7011 }
7012
7013 if (sas_expander->enclosure_handle) {
22a923c3
C
7014 enclosure_dev =
7015 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7016 sas_expander->enclosure_handle);
7017 if (enclosure_dev)
f92363d1 7018 sas_expander->enclosure_logical_id =
22a923c3 7019 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
f92363d1
SR
7020 }
7021
7022 _scsih_expander_node_add(ioc, sas_expander);
199fd79a 7023 return 0;
f92363d1
SR
7024
7025 out_fail:
7026
7027 if (mpt3sas_port)
7028 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
e2f0cdf7 7029 sas_address_parent, sas_expander->port);
f92363d1
SR
7030 kfree(sas_expander);
7031 return rc;
7032}
7033
7034/**
7035 * mpt3sas_expander_remove - removing expander object
7036 * @ioc: per adapter object
7037 * @sas_address: expander sas_address
a8d548b0 7038 * @port: hba port entry
f92363d1
SR
7039 */
7040void
7d310f24
SR
7041mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7042 struct hba_port *port)
f92363d1
SR
7043{
7044 struct _sas_node *sas_expander;
7045 unsigned long flags;
7046
7047 if (ioc->shost_recovery)
7048 return;
7049
7d310f24
SR
7050 if (!port)
7051 return;
7052
f92363d1
SR
7053 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7054 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7d310f24 7055 sas_address, port);
f92363d1
SR
7056 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7057 if (sas_expander)
7058 _scsih_expander_node_remove(ioc, sas_expander);
7059}
7060
7061/**
7062 * _scsih_done - internal SCSI_IO callback handler.
7063 * @ioc: per adapter object
7064 * @smid: system request message index
7065 * @msix_index: MSIX table index supplied by the OS
7066 * @reply: reply message frame(lower 32bit addr)
7067 *
7068 * Callback handler when sending internal generated SCSI_IO.
7069 * The callback index passed is `ioc->scsih_cb_idx`
7070 *
4beb4867
BVA
7071 * Return: 1 meaning mf should be freed from _base_interrupt
7072 * 0 means the mf is freed from this function.
f92363d1
SR
7073 */
7074static u8
7075_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7076{
7077 MPI2DefaultReply_t *mpi_reply;
7078
7079 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7080 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7081 return 1;
7082 if (ioc->scsih_cmds.smid != smid)
7083 return 1;
7084 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7085 if (mpi_reply) {
7086 memcpy(ioc->scsih_cmds.reply, mpi_reply,
7087 mpi_reply->MsgLength*4);
7088 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7089 }
7090 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7091 complete(&ioc->scsih_cmds.done);
7092 return 1;
7093}
7094
7095
7096
7097
7098#define MPT3_MAX_LUNS (255)
7099
7100
7101/**
7102 * _scsih_check_access_status - check access flags
7103 * @ioc: per adapter object
7104 * @sas_address: sas address
7105 * @handle: sas device handle
4beb4867 7106 * @access_status: errors returned during discovery of the device
f92363d1 7107 *
4beb4867 7108 * Return: 0 for success, else failure
f92363d1
SR
7109 */
7110static u8
7111_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7112 u16 handle, u8 access_status)
7113{
7114 u8 rc = 1;
7115 char *desc = NULL;
7116
7117 switch (access_status) {
7118 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7119 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7120 rc = 0;
7121 break;
7122 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7123 desc = "sata capability failed";
7124 break;
7125 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7126 desc = "sata affiliation conflict";
7127 break;
7128 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7129 desc = "route not addressable";
7130 break;
7131 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7132 desc = "smp error not addressable";
7133 break;
7134 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7135 desc = "device blocked";
7136 break;
7137 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7138 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7139 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7140 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7141 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7142 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7143 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7144 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7145 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7146 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7147 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7148 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7149 desc = "sata initialization failed";
7150 break;
7151 default:
7152 desc = "unknown";
7153 break;
7154 }
7155
7156 if (!rc)
7157 return 0;
7158
919d8a3f
JP
7159 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7160 desc, (u64)sas_address, handle);
f92363d1
SR
7161 return rc;
7162}
7163
7164/**
7165 * _scsih_check_device - checking device responsiveness
7166 * @ioc: per adapter object
7167 * @parent_sas_address: sas address of parent expander or sas host
7168 * @handle: attached device handle
4beb4867 7169 * @phy_number: phy number
f92363d1 7170 * @link_rate: new link rate
f92363d1
SR
7171 */
7172static void
7173_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7174 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7175{
7176 Mpi2ConfigReply_t mpi_reply;
7177 Mpi2SasDevicePage0_t sas_device_pg0;
7d310f24 7178 struct _sas_device *sas_device = NULL;
22a923c3 7179 struct _enclosure_node *enclosure_dev = NULL;
f92363d1
SR
7180 u32 ioc_status;
7181 unsigned long flags;
7182 u64 sas_address;
7183 struct scsi_target *starget;
7184 struct MPT3SAS_TARGET *sas_target_priv_data;
7185 u32 device_info;
7d310f24 7186 struct hba_port *port;
f92363d1 7187
f92363d1
SR
7188 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7189 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7190 return;
7191
7192 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7193 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7194 return;
7195
7196 /* wide port handling ~ we need only handle device once for the phy that
7197 * is matched in sas device page zero
7198 */
7199 if (phy_number != sas_device_pg0.PhyNum)
7200 return;
7201
7202 /* check if this is end device */
7203 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7204 if (!(_scsih_is_end_device(device_info)))
7205 return;
7206
7207 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7208 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
34b0a785 7209 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7d310f24
SR
7210 if (!port)
7211 goto out_unlock;
d1cb5e49 7212 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7d310f24 7213 sas_address, port);
f92363d1 7214
d1cb5e49
SR
7215 if (!sas_device)
7216 goto out_unlock;
f92363d1
SR
7217
7218 if (unlikely(sas_device->handle != handle)) {
7219 starget = sas_device->starget;
7220 sas_target_priv_data = starget->hostdata;
7221 starget_printk(KERN_INFO, starget,
7222 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7223 sas_device->handle, handle);
7224 sas_target_priv_data->handle = handle;
7225 sas_device->handle = handle;
aa53bb89 7226 if (le16_to_cpu(sas_device_pg0.Flags) &
e6d45e3e
SR
7227 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7228 sas_device->enclosure_level =
aa53bb89 7229 sas_device_pg0.EnclosureLevel;
310c8e40
CO
7230 memcpy(sas_device->connector_name,
7231 sas_device_pg0.ConnectorName, 4);
7232 sas_device->connector_name[4] = '\0';
e6d45e3e
SR
7233 } else {
7234 sas_device->enclosure_level = 0;
7235 sas_device->connector_name[0] = '\0';
7236 }
aba5a85c 7237
22a923c3
C
7238 sas_device->enclosure_handle =
7239 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7240 sas_device->is_chassis_slot_valid = 0;
7241 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7242 sas_device->enclosure_handle);
7243 if (enclosure_dev) {
7244 sas_device->enclosure_logical_id =
7245 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7246 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7247 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7248 sas_device->is_chassis_slot_valid = 1;
7249 sas_device->chassis_slot =
7250 enclosure_dev->pg0.ChassisSlot;
7251 }
7252 }
f92363d1
SR
7253 }
7254
7255 /* check if device is present */
7256 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7257 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
919d8a3f
JP
7258 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7259 handle);
d1cb5e49 7260 goto out_unlock;
f92363d1
SR
7261 }
7262
7263 /* check if there were any issues with discovery */
7264 if (_scsih_check_access_status(ioc, sas_address, handle,
d1cb5e49
SR
7265 sas_device_pg0.AccessStatus))
7266 goto out_unlock;
f92363d1
SR
7267
7268 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7d310f24 7269 _scsih_ublock_io_device(ioc, sas_address, port);
f92363d1 7270
d1cb5e49
SR
7271 if (sas_device)
7272 sas_device_put(sas_device);
7273 return;
7274
7275out_unlock:
7276 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7277 if (sas_device)
7278 sas_device_put(sas_device);
f92363d1
SR
7279}
7280
7281/**
7282 * _scsih_add_device - creating sas device object
7283 * @ioc: per adapter object
7284 * @handle: sas device handle
7285 * @phy_num: phy number end device attached to
7286 * @is_pd: is this hidden raid component
7287 *
7288 * Creating end device object, stored in ioc->sas_device_list.
7289 *
4beb4867 7290 * Return: 0 for success, non-zero for failure.
f92363d1
SR
7291 */
7292static int
7293_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7294 u8 is_pd)
7295{
7296 Mpi2ConfigReply_t mpi_reply;
7297 Mpi2SasDevicePage0_t sas_device_pg0;
f92363d1 7298 struct _sas_device *sas_device;
22a923c3 7299 struct _enclosure_node *enclosure_dev = NULL;
f92363d1
SR
7300 u32 ioc_status;
7301 u64 sas_address;
7302 u32 device_info;
e2f0cdf7 7303 u8 port_id;
f92363d1
SR
7304
7305 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7306 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
919d8a3f
JP
7307 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7308 __FILE__, __LINE__, __func__);
f92363d1
SR
7309 return -1;
7310 }
7311
7312 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7313 MPI2_IOCSTATUS_MASK;
7314 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
7315 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7316 __FILE__, __LINE__, __func__);
f92363d1
SR
7317 return -1;
7318 }
7319
7320 /* check if this is end device */
7321 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7322 if (!(_scsih_is_end_device(device_info)))
7323 return -1;
c696f7b8 7324 set_bit(handle, ioc->pend_os_device_add);
f92363d1
SR
7325 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7326
7327 /* check if device is present */
7328 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7329 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
919d8a3f
JP
7330 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7331 handle);
f92363d1
SR
7332 return -1;
7333 }
7334
7335 /* check if there were any issues with discovery */
7336 if (_scsih_check_access_status(ioc, sas_address, handle,
7337 sas_device_pg0.AccessStatus))
7338 return -1;
7339
e2f0cdf7 7340 port_id = sas_device_pg0.PhysicalPort;
d1cb5e49 7341 sas_device = mpt3sas_get_sdev_by_addr(ioc,
34b0a785 7342 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
d1cb5e49 7343 if (sas_device) {
c696f7b8 7344 clear_bit(handle, ioc->pend_os_device_add);
d1cb5e49 7345 sas_device_put(sas_device);
f92363d1 7346 return -1;
d1cb5e49 7347 }
f92363d1 7348
75888956 7349 if (sas_device_pg0.EnclosureHandle) {
22a923c3
C
7350 enclosure_dev =
7351 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7352 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7353 if (enclosure_dev == NULL)
919d8a3f
JP
7354 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7355 sas_device_pg0.EnclosureHandle);
75888956
SR
7356 }
7357
f92363d1
SR
7358 sas_device = kzalloc(sizeof(struct _sas_device),
7359 GFP_KERNEL);
7360 if (!sas_device) {
919d8a3f
JP
7361 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7362 __FILE__, __LINE__, __func__);
f92363d1
SR
7363 return 0;
7364 }
7365
d1cb5e49 7366 kref_init(&sas_device->refcount);
f92363d1
SR
7367 sas_device->handle = handle;
7368 if (_scsih_get_sas_address(ioc,
7369 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7370 &sas_device->sas_address_parent) != 0)
919d8a3f
JP
7371 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7372 __FILE__, __LINE__, __func__);
f92363d1
SR
7373 sas_device->enclosure_handle =
7374 le16_to_cpu(sas_device_pg0.EnclosureHandle);
e6d45e3e
SR
7375 if (sas_device->enclosure_handle != 0)
7376 sas_device->slot =
7377 le16_to_cpu(sas_device_pg0.Slot);
f92363d1
SR
7378 sas_device->device_info = device_info;
7379 sas_device->sas_address = sas_address;
7380 sas_device->phy = sas_device_pg0.PhyNum;
7381 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7382 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
34b0a785 7383 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
e2f0cdf7
SR
7384 if (!sas_device->port) {
7385 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7386 __FILE__, __LINE__, __func__);
7387 goto out;
7388 }
f92363d1 7389
aa53bb89
SPS
7390 if (le16_to_cpu(sas_device_pg0.Flags)
7391 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
e6d45e3e 7392 sas_device->enclosure_level =
aa53bb89 7393 sas_device_pg0.EnclosureLevel;
310c8e40
CO
7394 memcpy(sas_device->connector_name,
7395 sas_device_pg0.ConnectorName, 4);
7396 sas_device->connector_name[4] = '\0';
e6d45e3e
SR
7397 } else {
7398 sas_device->enclosure_level = 0;
7399 sas_device->connector_name[0] = '\0';
7400 }
22a923c3 7401 /* get enclosure_logical_id & chassis_slot*/
75888956 7402 sas_device->is_chassis_slot_valid = 0;
22a923c3 7403 if (enclosure_dev) {
f92363d1 7404 sas_device->enclosure_logical_id =
22a923c3
C
7405 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7406 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
75888956
SR
7407 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7408 sas_device->is_chassis_slot_valid = 1;
7409 sas_device->chassis_slot =
22a923c3 7410 enclosure_dev->pg0.ChassisSlot;
75888956
SR
7411 }
7412 }
7413
f92363d1
SR
7414 /* get device name */
7415 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
787f2448
SP
7416 sas_device->port_type = sas_device_pg0.MaxPortConnections;
7417 ioc_info(ioc,
7418 "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
7419 handle, sas_device->sas_address, sas_device->port_type);
f92363d1
SR
7420
7421 if (ioc->wait_for_discovery_to_complete)
7422 _scsih_sas_device_init_add(ioc, sas_device);
7423 else
7424 _scsih_sas_device_add(ioc, sas_device);
7425
e2f0cdf7 7426out:
d1cb5e49 7427 sas_device_put(sas_device);
f92363d1
SR
7428 return 0;
7429}
7430
7431/**
7432 * _scsih_remove_device - removing sas device object
7433 * @ioc: per adapter object
4beb4867 7434 * @sas_device: the sas_device object
f92363d1
SR
7435 */
7436static void
7437_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7438 struct _sas_device *sas_device)
7439{
7440 struct MPT3SAS_TARGET *sas_target_priv_data;
7441
0f624c39
SR
7442 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7443 (sas_device->pfa_led_on)) {
7444 _scsih_turn_off_pfa_led(ioc, sas_device);
7445 sas_device->pfa_led_on = 0;
7446 }
75888956 7447
919d8a3f
JP
7448 dewtprintk(ioc,
7449 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7450 __func__,
7451 sas_device->handle, (u64)sas_device->sas_address));
75888956
SR
7452
7453 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7454 NULL, NULL));
f92363d1
SR
7455
7456 if (sas_device->starget && sas_device->starget->hostdata) {
7457 sas_target_priv_data = sas_device->starget->hostdata;
7458 sas_target_priv_data->deleted = 1;
7d310f24
SR
7459 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7460 sas_device->port);
f92363d1
SR
7461 sas_target_priv_data->handle =
7462 MPT3SAS_INVALID_DEVICE_HANDLE;
7463 }
7786ab6a
SR
7464
7465 if (!ioc->hide_drives)
7466 mpt3sas_transport_port_remove(ioc,
f92363d1 7467 sas_device->sas_address,
e2f0cdf7
SR
7468 sas_device->sas_address_parent,
7469 sas_device->port);
f92363d1 7470
919d8a3f
JP
7471 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7472 sas_device->handle, (u64)sas_device->sas_address);
75888956
SR
7473
7474 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
f92363d1 7475
919d8a3f
JP
7476 dewtprintk(ioc,
7477 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7478 __func__,
7479 sas_device->handle, (u64)sas_device->sas_address));
75888956
SR
7480 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7481 NULL, NULL));
f92363d1
SR
7482}
7483
f92363d1
SR
7484/**
7485 * _scsih_sas_topology_change_event_debug - debug for topology event
7486 * @ioc: per adapter object
7487 * @event_data: event data payload
7488 * Context: user.
7489 */
7490static void
7491_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7492 Mpi2EventDataSasTopologyChangeList_t *event_data)
7493{
7494 int i;
7495 u16 handle;
7496 u16 reason_code;
7497 u8 phy_number;
7498 char *status_str = NULL;
7499 u8 link_rate, prev_link_rate;
7500
7501 switch (event_data->ExpStatus) {
7502 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7503 status_str = "add";
7504 break;
7505 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7506 status_str = "remove";
7507 break;
7508 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7509 case 0:
7510 status_str = "responding";
7511 break;
7512 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7513 status_str = "remove delay";
7514 break;
7515 default:
7516 status_str = "unknown status";
7517 break;
7518 }
919d8a3f 7519 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
f92363d1
SR
7520 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7521 "start_phy(%02d), count(%d)\n",
7522 le16_to_cpu(event_data->ExpanderDevHandle),
7523 le16_to_cpu(event_data->EnclosureHandle),
7524 event_data->StartPhyNum, event_data->NumEntries);
7525 for (i = 0; i < event_data->NumEntries; i++) {
7526 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7527 if (!handle)
7528 continue;
7529 phy_number = event_data->StartPhyNum + i;
7530 reason_code = event_data->PHY[i].PhyStatus &
7531 MPI2_EVENT_SAS_TOPO_RC_MASK;
7532 switch (reason_code) {
7533 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7534 status_str = "target add";
7535 break;
7536 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7537 status_str = "target remove";
7538 break;
7539 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7540 status_str = "delay target remove";
7541 break;
7542 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7543 status_str = "link rate change";
7544 break;
7545 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7546 status_str = "target responding";
7547 break;
7548 default:
7549 status_str = "unknown";
7550 break;
7551 }
7552 link_rate = event_data->PHY[i].LinkRate >> 4;
7553 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7554 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7555 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7556 handle, status_str, link_rate, prev_link_rate);
7557
7558 }
7559}
f92363d1
SR
7560
7561/**
7562 * _scsih_sas_topology_change_event - handle topology changes
7563 * @ioc: per adapter object
7564 * @fw_event: The fw_event_work object
7565 * Context: user.
7566 *
7567 */
7568static int
7569_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7570 struct fw_event_work *fw_event)
7571{
7572 int i;
7573 u16 parent_handle, handle;
7574 u16 reason_code;
7575 u8 phy_number, max_phys;
7576 struct _sas_node *sas_expander;
7577 u64 sas_address;
7578 unsigned long flags;
7579 u8 link_rate, prev_link_rate;
e2f0cdf7 7580 struct hba_port *port;
35b62362
JL
7581 Mpi2EventDataSasTopologyChangeList_t *event_data =
7582 (Mpi2EventDataSasTopologyChangeList_t *)
7583 fw_event->event_data;
f92363d1 7584
f92363d1
SR
7585 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7586 _scsih_sas_topology_change_event_debug(ioc, event_data);
f92363d1
SR
7587
7588 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7589 return 0;
7590
7591 if (!ioc->sas_hba.num_phys)
7592 _scsih_sas_host_add(ioc);
7593 else
7594 _scsih_sas_host_refresh(ioc);
7595
7596 if (fw_event->ignore) {
919d8a3f 7597 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
f92363d1
SR
7598 return 0;
7599 }
7600
7601 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
34b0a785 7602 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
f92363d1
SR
7603
7604 /* handle expander add */
7605 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7606 if (_scsih_expander_add(ioc, parent_handle) != 0)
7607 return 0;
7608
7609 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7610 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7611 parent_handle);
7612 if (sas_expander) {
7613 sas_address = sas_expander->sas_address;
7614 max_phys = sas_expander->num_phys;
e2f0cdf7 7615 port = sas_expander->port;
f92363d1
SR
7616 } else if (parent_handle < ioc->sas_hba.num_phys) {
7617 sas_address = ioc->sas_hba.sas_address;
7618 max_phys = ioc->sas_hba.num_phys;
7619 } else {
7620 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7621 return 0;
7622 }
7623 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7624
7625 /* handle siblings events */
7626 for (i = 0; i < event_data->NumEntries; i++) {
7627 if (fw_event->ignore) {
919d8a3f
JP
7628 dewtprintk(ioc,
7629 ioc_info(ioc, "ignoring expander event\n"));
f92363d1
SR
7630 return 0;
7631 }
7632 if (ioc->remove_host || ioc->pci_error_recovery)
7633 return 0;
7634 phy_number = event_data->StartPhyNum + i;
7635 if (phy_number >= max_phys)
7636 continue;
7637 reason_code = event_data->PHY[i].PhyStatus &
7638 MPI2_EVENT_SAS_TOPO_RC_MASK;
7639 if ((event_data->PHY[i].PhyStatus &
7640 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7641 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7642 continue;
7643 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7644 if (!handle)
7645 continue;
7646 link_rate = event_data->PHY[i].LinkRate >> 4;
7647 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7648 switch (reason_code) {
7649 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7650
7651 if (ioc->shost_recovery)
7652 break;
7653
7654 if (link_rate == prev_link_rate)
7655 break;
7656
7657 mpt3sas_transport_update_links(ioc, sas_address,
e2f0cdf7 7658 handle, phy_number, link_rate, port);
f92363d1
SR
7659
7660 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7661 break;
7662
7663 _scsih_check_device(ioc, sas_address, handle,
7664 phy_number, link_rate);
7665
c696f7b8
SPS
7666 if (!test_bit(handle, ioc->pend_os_device_add))
7667 break;
7668
df561f66 7669 fallthrough;
f92363d1
SR
7670
7671 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7672
7673 if (ioc->shost_recovery)
7674 break;
7675
7676 mpt3sas_transport_update_links(ioc, sas_address,
e2f0cdf7 7677 handle, phy_number, link_rate, port);
f92363d1
SR
7678
7679 _scsih_add_device(ioc, handle, phy_number, 0);
7680
7681 break;
7682 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7683
7684 _scsih_device_remove_by_handle(ioc, handle);
7685 break;
7686 }
7687 }
7688
7689 /* handle expander removal */
7690 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7691 sas_expander)
7d310f24 7692 mpt3sas_expander_remove(ioc, sas_address, port);
f92363d1
SR
7693
7694 return 0;
7695}
7696
f92363d1
SR
7697/**
7698 * _scsih_sas_device_status_change_event_debug - debug for device event
4beb4867 7699 * @ioc: ?
f92363d1
SR
7700 * @event_data: event data payload
7701 * Context: user.
f92363d1
SR
7702 */
7703static void
7704_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7705 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7706{
7707 char *reason_str = NULL;
7708
7709 switch (event_data->ReasonCode) {
7710 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7711 reason_str = "smart data";
7712 break;
7713 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7714 reason_str = "unsupported device discovered";
7715 break;
7716 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7717 reason_str = "internal device reset";
7718 break;
7719 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7720 reason_str = "internal task abort";
7721 break;
7722 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7723 reason_str = "internal task abort set";
7724 break;
7725 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7726 reason_str = "internal clear task set";
7727 break;
7728 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7729 reason_str = "internal query task";
7730 break;
7731 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7732 reason_str = "sata init failure";
7733 break;
7734 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7735 reason_str = "internal device reset complete";
7736 break;
7737 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7738 reason_str = "internal task abort complete";
7739 break;
7740 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7741 reason_str = "internal async notification";
7742 break;
7743 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7744 reason_str = "expander reduced functionality";
7745 break;
7746 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7747 reason_str = "expander reduced functionality complete";
7748 break;
7749 default:
7750 reason_str = "unknown reason";
7751 break;
7752 }
919d8a3f
JP
7753 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7754 reason_str, le16_to_cpu(event_data->DevHandle),
7755 (u64)le64_to_cpu(event_data->SASAddress),
7756 le16_to_cpu(event_data->TaskTag));
f92363d1 7757 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
919d8a3f
JP
7758 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7759 event_data->ASC, event_data->ASCQ);
7760 pr_cont("\n");
f92363d1 7761}
f92363d1
SR
7762
7763/**
7764 * _scsih_sas_device_status_change_event - handle device status change
7765 * @ioc: per adapter object
54d74e6b 7766 * @event_data: The fw event
f92363d1 7767 * Context: user.
f92363d1
SR
7768 */
7769static void
7770_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
54d74e6b 7771 Mpi2EventDataSasDeviceStatusChange_t *event_data)
f92363d1
SR
7772{
7773 struct MPT3SAS_TARGET *target_priv_data;
7774 struct _sas_device *sas_device;
7775 u64 sas_address;
7776 unsigned long flags;
f92363d1
SR
7777
7778 /* In MPI Revision K (0xC), the internal device reset complete was
7779 * implemented, so avoid setting tm_busy flag for older firmware.
7780 */
7781 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7782 return;
7783
7784 if (event_data->ReasonCode !=
7785 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7786 event_data->ReasonCode !=
7787 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7788 return;
7789
7790 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7791 sas_address = le64_to_cpu(event_data->SASAddress);
d1cb5e49 7792 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
34b0a785
SR
7793 sas_address,
7794 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
f92363d1 7795
d1cb5e49
SR
7796 if (!sas_device || !sas_device->starget)
7797 goto out;
f92363d1
SR
7798
7799 target_priv_data = sas_device->starget->hostdata;
d1cb5e49
SR
7800 if (!target_priv_data)
7801 goto out;
f92363d1
SR
7802
7803 if (event_data->ReasonCode ==
7804 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7805 target_priv_data->tm_busy = 1;
7806 else
7807 target_priv_data->tm_busy = 0;
d1cb5e49 7808
54d74e6b
SP
7809 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7810 ioc_info(ioc,
7811 "%s tm_busy flag for handle(0x%04x)\n",
7812 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7813 target_priv_data->handle);
7814
d1cb5e49
SR
7815out:
7816 if (sas_device)
7817 sas_device_put(sas_device);
7818
f92363d1
SR
7819 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7820}
7821
4318c734 7822
c102e00c
SPS
7823/**
7824 * _scsih_check_pcie_access_status - check access flags
7825 * @ioc: per adapter object
7826 * @wwid: wwid
7827 * @handle: sas device handle
4beb4867 7828 * @access_status: errors returned during discovery of the device
c102e00c 7829 *
4beb4867 7830 * Return: 0 for success, else failure
c102e00c
SPS
7831 */
7832static u8
7833_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7834 u16 handle, u8 access_status)
7835{
7836 u8 rc = 1;
7837 char *desc = NULL;
7838
7839 switch (access_status) {
7840 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7841 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7842 rc = 0;
7843 break;
7844 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7845 desc = "PCIe device capability failed";
7846 break;
7847 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7848 desc = "PCIe device blocked";
3c090ce3
SP
7849 ioc_info(ioc,
7850 "Device with Access Status (%s): wwid(0x%016llx), "
7851 "handle(0x%04x)\n ll only be added to the internal list",
7852 desc, (u64)wwid, handle);
7853 rc = 0;
c102e00c
SPS
7854 break;
7855 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7856 desc = "PCIe device mem space access failed";
7857 break;
7858 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7859 desc = "PCIe device unsupported";
7860 break;
7861 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7862 desc = "PCIe device MSIx Required";
7863 break;
7864 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7865 desc = "PCIe device init fail max";
7866 break;
7867 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7868 desc = "PCIe device status unknown";
7869 break;
7870 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7871 desc = "nvme ready timeout";
7872 break;
7873 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7874 desc = "nvme device configuration unsupported";
7875 break;
7876 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7877 desc = "nvme identify failed";
7878 break;
7879 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7880 desc = "nvme qconfig failed";
7881 break;
7882 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7883 desc = "nvme qcreation failed";
7884 break;
7885 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7886 desc = "nvme eventcfg failed";
7887 break;
7888 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7889 desc = "nvme get feature stat failed";
7890 break;
7891 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7892 desc = "nvme idle timeout";
7893 break;
7894 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7895 desc = "nvme failure status";
7896 break;
7897 default:
919d8a3f
JP
7898 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7899 access_status, (u64)wwid, handle);
c102e00c
SPS
7900 return rc;
7901 }
7902
7903 if (!rc)
7904 return rc;
7905
919d8a3f
JP
7906 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7907 desc, (u64)wwid, handle);
c102e00c
SPS
7908 return rc;
7909}
3075ac49
SPS
7910
7911/**
7912 * _scsih_pcie_device_remove_from_sml - removing pcie device
7913 * from SML and free up associated memory
7914 * @ioc: per adapter object
7915 * @pcie_device: the pcie_device object
3075ac49
SPS
7916 */
7917static void
7918_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7919 struct _pcie_device *pcie_device)
7920{
7921 struct MPT3SAS_TARGET *sas_target_priv_data;
7922
919d8a3f
JP
7923 dewtprintk(ioc,
7924 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7925 __func__,
7926 pcie_device->handle, (u64)pcie_device->wwid));
3075ac49 7927 if (pcie_device->enclosure_handle != 0)
919d8a3f
JP
7928 dewtprintk(ioc,
7929 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7930 __func__,
7931 (u64)pcie_device->enclosure_logical_id,
7932 pcie_device->slot));
3075ac49 7933 if (pcie_device->connector_name[0] != '\0')
919d8a3f
JP
7934 dewtprintk(ioc,
7935 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7936 __func__,
7937 pcie_device->enclosure_level,
7938 pcie_device->connector_name));
3075ac49
SPS
7939
7940 if (pcie_device->starget && pcie_device->starget->hostdata) {
7941 sas_target_priv_data = pcie_device->starget->hostdata;
7942 sas_target_priv_data->deleted = 1;
7d310f24 7943 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
3075ac49
SPS
7944 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7945 }
7946
919d8a3f
JP
7947 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7948 pcie_device->handle, (u64)pcie_device->wwid);
3075ac49 7949 if (pcie_device->enclosure_handle != 0)
919d8a3f
JP
7950 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7951 (u64)pcie_device->enclosure_logical_id,
7952 pcie_device->slot);
3075ac49 7953 if (pcie_device->connector_name[0] != '\0')
919d8a3f
JP
7954 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7955 pcie_device->enclosure_level,
7956 pcie_device->connector_name);
3075ac49 7957
3c090ce3
SP
7958 if (pcie_device->starget && (pcie_device->access_status !=
7959 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
3075ac49 7960 scsi_remove_target(&pcie_device->starget->dev);
919d8a3f
JP
7961 dewtprintk(ioc,
7962 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7963 __func__,
7964 pcie_device->handle, (u64)pcie_device->wwid));
3075ac49 7965 if (pcie_device->enclosure_handle != 0)
919d8a3f
JP
7966 dewtprintk(ioc,
7967 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7968 __func__,
7969 (u64)pcie_device->enclosure_logical_id,
7970 pcie_device->slot));
3075ac49 7971 if (pcie_device->connector_name[0] != '\0')
919d8a3f
JP
7972 dewtprintk(ioc,
7973 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7974 __func__,
7975 pcie_device->enclosure_level,
7976 pcie_device->connector_name));
3075ac49
SPS
7977
7978 kfree(pcie_device->serial_number);
7979}
7980
7981
c102e00c
SPS
7982/**
7983 * _scsih_pcie_check_device - checking device responsiveness
7984 * @ioc: per adapter object
7985 * @handle: attached device handle
c102e00c
SPS
7986 */
7987static void
7988_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7989{
7990 Mpi2ConfigReply_t mpi_reply;
7991 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7992 u32 ioc_status;
7993 struct _pcie_device *pcie_device;
7994 u64 wwid;
7995 unsigned long flags;
7996 struct scsi_target *starget;
7997 struct MPT3SAS_TARGET *sas_target_priv_data;
7998 u32 device_info;
7999
8000 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8001 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
8002 return;
8003
8004 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
8005 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8006 return;
8007
8008 /* check if this is end device */
8009 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
5bb309db 8010 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
c102e00c
SPS
8011 return;
8012
8013 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8014 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8015 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8016
8017 if (!pcie_device) {
8018 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8019 return;
8020 }
8021
8022 if (unlikely(pcie_device->handle != handle)) {
8023 starget = pcie_device->starget;
8024 sas_target_priv_data = starget->hostdata;
3c090ce3 8025 pcie_device->access_status = pcie_device_pg0.AccessStatus;
c102e00c
SPS
8026 starget_printk(KERN_INFO, starget,
8027 "handle changed from(0x%04x) to (0x%04x)!!!\n",
8028 pcie_device->handle, handle);
8029 sas_target_priv_data->handle = handle;
8030 pcie_device->handle = handle;
8031
8032 if (le32_to_cpu(pcie_device_pg0.Flags) &
8033 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8034 pcie_device->enclosure_level =
8035 pcie_device_pg0.EnclosureLevel;
8036 memcpy(&pcie_device->connector_name[0],
8037 &pcie_device_pg0.ConnectorName[0], 4);
8038 } else {
8039 pcie_device->enclosure_level = 0;
8040 pcie_device->connector_name[0] = '\0';
8041 }
8042 }
8043
8044 /* check if device is present */
8045 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8046 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
919d8a3f
JP
8047 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
8048 handle);
c102e00c
SPS
8049 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8050 pcie_device_put(pcie_device);
8051 return;
8052 }
8053
8054 /* check if there were any issues with discovery */
8055 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8056 pcie_device_pg0.AccessStatus)) {
8057 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8058 pcie_device_put(pcie_device);
8059 return;
8060 }
8061
8062 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8063 pcie_device_put(pcie_device);
8064
7d310f24 8065 _scsih_ublock_io_device(ioc, wwid, NULL);
c102e00c
SPS
8066
8067 return;
8068}
8069
8070/**
8071 * _scsih_pcie_add_device - creating pcie device object
8072 * @ioc: per adapter object
8073 * @handle: pcie device handle
8074 *
8075 * Creating end device object, stored in ioc->pcie_device_list.
8076 *
4beb4867 8077 * Return: 1 means queue the event later, 0 means complete the event
c102e00c
SPS
8078 */
8079static int
8080_scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8081{
8082 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8083 Mpi26PCIeDevicePage2_t pcie_device_pg2;
8084 Mpi2ConfigReply_t mpi_reply;
c102e00c 8085 struct _pcie_device *pcie_device;
22a923c3 8086 struct _enclosure_node *enclosure_dev;
c102e00c
SPS
8087 u32 ioc_status;
8088 u64 wwid;
8089
8090 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8091 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
919d8a3f
JP
8092 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8093 __FILE__, __LINE__, __func__);
c102e00c
SPS
8094 return 0;
8095 }
8096 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8097 MPI2_IOCSTATUS_MASK;
8098 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
8099 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8100 __FILE__, __LINE__, __func__);
c102e00c
SPS
8101 return 0;
8102 }
8103
8104 set_bit(handle, ioc->pend_os_device_add);
8105 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8106
8107 /* check if device is present */
8108 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8109 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
919d8a3f
JP
8110 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8111 handle);
c102e00c
SPS
8112 return 0;
8113 }
8114
8115 /* check if there were any issues with discovery */
8116 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8117 pcie_device_pg0.AccessStatus))
8118 return 0;
8119
5bb309db
SP
8120 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8121 (pcie_device_pg0.DeviceInfo))))
c102e00c
SPS
8122 return 0;
8123
8124 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8125 if (pcie_device) {
8126 clear_bit(handle, ioc->pend_os_device_add);
8127 pcie_device_put(pcie_device);
8128 return 0;
8129 }
8130
5bb309db
SP
8131 /* PCIe Device Page 2 contains read-only information about a
8132 * specific NVMe device; therefore, this page is only
8133 * valid for NVMe devices and skip for pcie devices of type scsi.
8134 */
8135 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8136 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8137 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8138 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8139 handle)) {
8140 ioc_err(ioc,
8141 "failure at %s:%d/%s()!\n", __FILE__,
8142 __LINE__, __func__);
8143 return 0;
8144 }
8145
8146 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8147 MPI2_IOCSTATUS_MASK;
8148 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8149 ioc_err(ioc,
8150 "failure at %s:%d/%s()!\n", __FILE__,
8151 __LINE__, __func__);
8152 return 0;
8153 }
8154 }
8155
c102e00c
SPS
8156 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8157 if (!pcie_device) {
919d8a3f
JP
8158 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8159 __FILE__, __LINE__, __func__);
c102e00c
SPS
8160 return 0;
8161 }
8162
8163 kref_init(&pcie_device->refcount);
8164 pcie_device->id = ioc->pcie_target_id++;
8165 pcie_device->channel = PCIE_CHANNEL;
8166 pcie_device->handle = handle;
3c090ce3 8167 pcie_device->access_status = pcie_device_pg0.AccessStatus;
c102e00c
SPS
8168 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8169 pcie_device->wwid = wwid;
8170 pcie_device->port_num = pcie_device_pg0.PortNum;
8171 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8172 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
c102e00c
SPS
8173
8174 pcie_device->enclosure_handle =
8175 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8176 if (pcie_device->enclosure_handle != 0)
8177 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8178
cf6bf971 8179 if (le32_to_cpu(pcie_device_pg0.Flags) &
c102e00c
SPS
8180 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8181 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8182 memcpy(&pcie_device->connector_name[0],
8183 &pcie_device_pg0.ConnectorName[0], 4);
8184 } else {
8185 pcie_device->enclosure_level = 0;
8186 pcie_device->connector_name[0] = '\0';
8187 }
8188
8189 /* get enclosure_logical_id */
22a923c3
C
8190 if (pcie_device->enclosure_handle) {
8191 enclosure_dev =
8192 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8193 pcie_device->enclosure_handle);
8194 if (enclosure_dev)
8195 pcie_device->enclosure_logical_id =
8196 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8197 }
c102e00c 8198 /* TODO -- Add device name once FW supports it */
5bb309db
SP
8199 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8200 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8201 pcie_device->nvme_mdts =
8202 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
d3f623ae
SR
8203 pcie_device->shutdown_latency =
8204 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8205 /*
8206 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8207 * if drive's RTD3 Entry Latency is greater then IOC's
8208 * max_shutdown_latency.
8209 */
8210 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8211 ioc->max_shutdown_latency =
8212 pcie_device->shutdown_latency;
5bb309db
SP
8213 if (pcie_device_pg2.ControllerResetTO)
8214 pcie_device->reset_timeout =
8215 pcie_device_pg2.ControllerResetTO;
8216 else
8217 pcie_device->reset_timeout = 30;
8218 } else
c1a6c5ac 8219 pcie_device->reset_timeout = 30;
c102e00c
SPS
8220
8221 if (ioc->wait_for_discovery_to_complete)
8222 _scsih_pcie_device_init_add(ioc, pcie_device);
8223 else
8224 _scsih_pcie_device_add(ioc, pcie_device);
8225
8226 pcie_device_put(pcie_device);
8227 return 0;
8228}
4318c734
SPS
8229
8230/**
8231 * _scsih_pcie_topology_change_event_debug - debug for topology
8232 * event
8233 * @ioc: per adapter object
8234 * @event_data: event data payload
8235 * Context: user.
8236 */
8237static void
8238_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8239 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8240{
8241 int i;
8242 u16 handle;
8243 u16 reason_code;
8244 u8 port_number;
8245 char *status_str = NULL;
8246 u8 link_rate, prev_link_rate;
8247
8248 switch (event_data->SwitchStatus) {
8249 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8250 status_str = "add";
8251 break;
8252 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8253 status_str = "remove";
8254 break;
8255 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8256 case 0:
8257 status_str = "responding";
8258 break;
8259 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8260 status_str = "remove delay";
8261 break;
8262 default:
8263 status_str = "unknown status";
8264 break;
8265 }
919d8a3f 8266 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
4318c734
SPS
8267 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8268 "start_port(%02d), count(%d)\n",
8269 le16_to_cpu(event_data->SwitchDevHandle),
8270 le16_to_cpu(event_data->EnclosureHandle),
8271 event_data->StartPortNum, event_data->NumEntries);
8272 for (i = 0; i < event_data->NumEntries; i++) {
8273 handle =
8274 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8275 if (!handle)
8276 continue;
8277 port_number = event_data->StartPortNum + i;
8278 reason_code = event_data->PortEntry[i].PortStatus;
8279 switch (reason_code) {
8280 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8281 status_str = "target add";
8282 break;
8283 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8284 status_str = "target remove";
8285 break;
8286 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8287 status_str = "delay target remove";
8288 break;
8289 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8290 status_str = "link rate change";
8291 break;
8292 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8293 status_str = "target responding";
8294 break;
8295 default:
8296 status_str = "unknown";
8297 break;
8298 }
8299 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8300 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8301 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8302 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8303 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8304 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8305 handle, status_str, link_rate, prev_link_rate);
8306 }
8307}
8308
8309/**
8310 * _scsih_pcie_topology_change_event - handle PCIe topology
8311 * changes
8312 * @ioc: per adapter object
8313 * @fw_event: The fw_event_work object
8314 * Context: user.
8315 *
8316 */
45b7aef7 8317static void
4318c734
SPS
8318_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8319 struct fw_event_work *fw_event)
8320{
8321 int i;
8322 u16 handle;
8323 u16 reason_code;
8324 u8 link_rate, prev_link_rate;
8325 unsigned long flags;
8326 int rc;
4318c734
SPS
8327 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8328 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8329 struct _pcie_device *pcie_device;
8330
8331 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8332 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8333
8334 if (ioc->shost_recovery || ioc->remove_host ||
8335 ioc->pci_error_recovery)
45b7aef7 8336 return;
4318c734
SPS
8337
8338 if (fw_event->ignore) {
919d8a3f 8339 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
45b7aef7 8340 return;
4318c734
SPS
8341 }
8342
8343 /* handle siblings events */
8344 for (i = 0; i < event_data->NumEntries; i++) {
8345 if (fw_event->ignore) {
919d8a3f
JP
8346 dewtprintk(ioc,
8347 ioc_info(ioc, "ignoring switch event\n"));
45b7aef7 8348 return;
4318c734
SPS
8349 }
8350 if (ioc->remove_host || ioc->pci_error_recovery)
45b7aef7 8351 return;
4318c734
SPS
8352 reason_code = event_data->PortEntry[i].PortStatus;
8353 handle =
8354 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8355 if (!handle)
8356 continue;
8357
8358 link_rate = event_data->PortEntry[i].CurrentPortInfo
8359 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8360 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8361 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8362
8363 switch (reason_code) {
8364 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8365 if (ioc->shost_recovery)
8366 break;
8367 if (link_rate == prev_link_rate)
8368 break;
8369 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8370 break;
8371
8372 _scsih_pcie_check_device(ioc, handle);
8373
8374 /* This code after this point handles the test case
8375 * where a device has been added, however its returning
8376 * BUSY for sometime. Then before the Device Missing
8377 * Delay expires and the device becomes READY, the
8378 * device is removed and added back.
8379 */
8380 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8381 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8382 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8383
8384 if (pcie_device) {
8385 pcie_device_put(pcie_device);
8386 break;
8387 }
8388
8389 if (!test_bit(handle, ioc->pend_os_device_add))
8390 break;
8391
919d8a3f
JP
8392 dewtprintk(ioc,
8393 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8394 handle));
4318c734
SPS
8395 event_data->PortEntry[i].PortStatus &= 0xF0;
8396 event_data->PortEntry[i].PortStatus |=
8397 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
df561f66 8398 fallthrough;
4318c734
SPS
8399 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8400 if (ioc->shost_recovery)
8401 break;
8402 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8403 break;
8404
8405 rc = _scsih_pcie_add_device(ioc, handle);
8406 if (!rc) {
8407 /* mark entry vacant */
8408 /* TODO This needs to be reviewed and fixed,
8409 * we dont have an entry
8410 * to make an event void like vacant
8411 */
8412 event_data->PortEntry[i].PortStatus |=
8413 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8414 }
8415 break;
8416 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8417 _scsih_pcie_device_remove_by_handle(ioc, handle);
8418 break;
8419 }
8420 }
4318c734
SPS
8421}
8422
8423/**
4beb4867
BVA
8424 * _scsih_pcie_device_status_change_event_debug - debug for device event
8425 * @ioc: ?
4318c734
SPS
8426 * @event_data: event data payload
8427 * Context: user.
4318c734
SPS
8428 */
8429static void
8430_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8431 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8432{
8433 char *reason_str = NULL;
8434
8435 switch (event_data->ReasonCode) {
8436 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8437 reason_str = "smart data";
8438 break;
8439 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8440 reason_str = "unsupported device discovered";
8441 break;
8442 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8443 reason_str = "internal device reset";
8444 break;
8445 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8446 reason_str = "internal task abort";
8447 break;
8448 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8449 reason_str = "internal task abort set";
8450 break;
8451 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8452 reason_str = "internal clear task set";
8453 break;
8454 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8455 reason_str = "internal query task";
8456 break;
8457 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8458 reason_str = "device init failure";
8459 break;
8460 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8461 reason_str = "internal device reset complete";
8462 break;
8463 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8464 reason_str = "internal task abort complete";
8465 break;
8466 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8467 reason_str = "internal async notification";
8468 break;
c1a6c5ac
C
8469 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8470 reason_str = "pcie hot reset failed";
8471 break;
4318c734
SPS
8472 default:
8473 reason_str = "unknown reason";
8474 break;
8475 }
8476
919d8a3f
JP
8477 ioc_info(ioc, "PCIE device status change: (%s)\n"
8478 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8479 reason_str, le16_to_cpu(event_data->DevHandle),
8480 (u64)le64_to_cpu(event_data->WWID),
8481 le16_to_cpu(event_data->TaskTag));
4318c734 8482 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
919d8a3f 8483 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
4318c734 8484 event_data->ASC, event_data->ASCQ);
919d8a3f 8485 pr_cont("\n");
4318c734
SPS
8486}
8487
8488/**
8489 * _scsih_pcie_device_status_change_event - handle device status
8490 * change
8491 * @ioc: per adapter object
8492 * @fw_event: The fw_event_work object
8493 * Context: user.
4318c734
SPS
8494 */
8495static void
8496_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8497 struct fw_event_work *fw_event)
8498{
8499 struct MPT3SAS_TARGET *target_priv_data;
8500 struct _pcie_device *pcie_device;
8501 u64 wwid;
8502 unsigned long flags;
8503 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8504 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8505 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8506 _scsih_pcie_device_status_change_event_debug(ioc,
8507 event_data);
8508
8509 if (event_data->ReasonCode !=
8510 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8511 event_data->ReasonCode !=
8512 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8513 return;
8514
8515 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8516 wwid = le64_to_cpu(event_data->WWID);
8517 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8518
8519 if (!pcie_device || !pcie_device->starget)
8520 goto out;
8521
8522 target_priv_data = pcie_device->starget->hostdata;
8523 if (!target_priv_data)
8524 goto out;
8525
8526 if (event_data->ReasonCode ==
8527 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8528 target_priv_data->tm_busy = 1;
8529 else
8530 target_priv_data->tm_busy = 0;
8531out:
8532 if (pcie_device)
8533 pcie_device_put(pcie_device);
8534
8535 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8536}
8537
f92363d1
SR
8538/**
8539 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8540 * event
8541 * @ioc: per adapter object
8542 * @event_data: event data payload
8543 * Context: user.
f92363d1
SR
8544 */
8545static void
8546_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8547 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8548{
8549 char *reason_str = NULL;
8550
8551 switch (event_data->ReasonCode) {
8552 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8553 reason_str = "enclosure add";
8554 break;
8555 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8556 reason_str = "enclosure remove";
8557 break;
8558 default:
8559 reason_str = "unknown reason";
8560 break;
8561 }
8562
919d8a3f
JP
8563 ioc_info(ioc, "enclosure status change: (%s)\n"
8564 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8565 reason_str,
8566 le16_to_cpu(event_data->EnclosureHandle),
8567 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8568 le16_to_cpu(event_data->StartSlot));
f92363d1 8569}
f92363d1
SR
8570
8571/**
8572 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8573 * @ioc: per adapter object
8574 * @fw_event: The fw_event_work object
8575 * Context: user.
f92363d1
SR
8576 */
8577static void
8578_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8579 struct fw_event_work *fw_event)
8580{
22a923c3
C
8581 Mpi2ConfigReply_t mpi_reply;
8582 struct _enclosure_node *enclosure_dev = NULL;
8583 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8584 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8585 int rc;
8586 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8587
f92363d1
SR
8588 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8589 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
35b62362 8590 (Mpi2EventDataSasEnclDevStatusChange_t *)
f92363d1 8591 fw_event->event_data);
22a923c3
C
8592 if (ioc->shost_recovery)
8593 return;
8594
8595 if (enclosure_handle)
8596 enclosure_dev =
8597 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8598 enclosure_handle);
8599 switch (event_data->ReasonCode) {
8600 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8601 if (!enclosure_dev) {
8602 enclosure_dev =
8603 kzalloc(sizeof(struct _enclosure_node),
8604 GFP_KERNEL);
8605 if (!enclosure_dev) {
919d8a3f
JP
8606 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8607 __FILE__, __LINE__, __func__);
22a923c3
C
8608 return;
8609 }
8610 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8611 &enclosure_dev->pg0,
8612 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8613 enclosure_handle);
8614
8615 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8616 MPI2_IOCSTATUS_MASK)) {
8617 kfree(enclosure_dev);
8618 return;
8619 }
8620
8621 list_add_tail(&enclosure_dev->list,
8622 &ioc->enclosure_list);
8623 }
8624 break;
8625 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8626 if (enclosure_dev) {
8627 list_del(&enclosure_dev->list);
8628 kfree(enclosure_dev);
8629 }
8630 break;
8631 default:
8632 break;
8633 }
f92363d1
SR
8634}
8635
8636/**
8637 * _scsih_sas_broadcast_primitive_event - handle broadcast events
8638 * @ioc: per adapter object
8639 * @fw_event: The fw_event_work object
8640 * Context: user.
f92363d1
SR
8641 */
8642static void
8643_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8644 struct fw_event_work *fw_event)
8645{
8646 struct scsi_cmnd *scmd;
8647 struct scsi_device *sdev;
6da999fe 8648 struct scsiio_tracker *st;
f92363d1
SR
8649 u16 smid, handle;
8650 u32 lun;
8651 struct MPT3SAS_DEVICE *sas_device_priv_data;
8652 u32 termination_count;
8653 u32 query_count;
8654 Mpi2SCSITaskManagementReply_t *mpi_reply;
35b62362
JL
8655 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8656 (Mpi2EventDataSasBroadcastPrimitive_t *)
8657 fw_event->event_data;
f92363d1
SR
8658 u16 ioc_status;
8659 unsigned long flags;
8660 int r;
8661 u8 max_retries = 0;
8662 u8 task_abort_retries;
8663
8664 mutex_lock(&ioc->tm_cmds.mutex);
919d8a3f
JP
8665 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8666 __func__, event_data->PhyNum, event_data->PortWidth);
f92363d1
SR
8667
8668 _scsih_block_io_all_device(ioc);
8669
8670 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8671 mpi_reply = ioc->tm_cmds.reply;
8672 broadcast_aen_retry:
8673
8674 /* sanity checks for retrying this loop */
8675 if (max_retries++ == 5) {
919d8a3f 8676 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
f92363d1
SR
8677 goto out;
8678 } else if (max_retries > 1)
919d8a3f
JP
8679 dewtprintk(ioc,
8680 ioc_info(ioc, "%s: %d retry\n",
8681 __func__, max_retries - 1));
f92363d1
SR
8682
8683 termination_count = 0;
8684 query_count = 0;
8685 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8686 if (ioc->shost_recovery)
8687 goto out;
dbec4c90 8688 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
f92363d1
SR
8689 if (!scmd)
8690 continue;
dbec4c90 8691 st = scsi_cmd_priv(scmd);
f92363d1
SR
8692 sdev = scmd->device;
8693 sas_device_priv_data = sdev->hostdata;
8694 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8695 continue;
8696 /* skip hidden raid components */
8697 if (sas_device_priv_data->sas_target->flags &
8698 MPT_TARGET_FLAGS_RAID_COMPONENT)
8699 continue;
8700 /* skip volumes */
8701 if (sas_device_priv_data->sas_target->flags &
8702 MPT_TARGET_FLAGS_VOLUME)
8703 continue;
999c8513
C
8704 /* skip PCIe devices */
8705 if (sas_device_priv_data->sas_target->flags &
8706 MPT_TARGET_FLAGS_PCIE_DEVICE)
8707 continue;
f92363d1
SR
8708
8709 handle = sas_device_priv_data->sas_target->handle;
8710 lun = sas_device_priv_data->lun;
8711 query_count++;
8712
8713 if (ioc->shost_recovery)
8714 goto out;
8715
8716 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
521e9c0b 8717 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
dbec4c90 8718 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
c1a6c5ac 8719 st->msix_io, 30, 0);
f92363d1
SR
8720 if (r == FAILED) {
8721 sdev_printk(KERN_WARNING, sdev,
8722 "mpt3sas_scsih_issue_tm: FAILED when sending "
8723 "QUERY_TASK: scmd(%p)\n", scmd);
8724 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8725 goto broadcast_aen_retry;
8726 }
8727 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8728 & MPI2_IOCSTATUS_MASK;
8729 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8730 sdev_printk(KERN_WARNING, sdev,
8731 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8732 ioc_status, scmd);
8733 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8734 goto broadcast_aen_retry;
8735 }
8736
8737 /* see if IO is still owned by IOC and target */
8738 if (mpi_reply->ResponseCode ==
8739 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8740 mpi_reply->ResponseCode ==
8741 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8742 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8743 continue;
8744 }
8745 task_abort_retries = 0;
8746 tm_retry:
8747 if (task_abort_retries++ == 60) {
919d8a3f
JP
8748 dewtprintk(ioc,
8749 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8750 __func__));
f92363d1
SR
8751 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8752 goto broadcast_aen_retry;
8753 }
8754
8755 if (ioc->shost_recovery)
8756 goto out_no_lock;
8757
521e9c0b
SP
8758 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8759 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8760 st->smid, st->msix_io, 30, 0);
dbec4c90 8761 if (r == FAILED || st->cb_idx != 0xFF) {
f92363d1
SR
8762 sdev_printk(KERN_WARNING, sdev,
8763 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8764 "scmd(%p)\n", scmd);
8765 goto tm_retry;
8766 }
8767
8768 if (task_abort_retries > 1)
8769 sdev_printk(KERN_WARNING, sdev,
8770 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8771 " scmd(%p)\n",
8772 task_abort_retries - 1, scmd);
8773
8774 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8775 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8776 }
8777
8778 if (ioc->broadcast_aen_pending) {
919d8a3f
JP
8779 dewtprintk(ioc,
8780 ioc_info(ioc,
8781 "%s: loop back due to pending AEN\n",
8782 __func__));
f92363d1
SR
8783 ioc->broadcast_aen_pending = 0;
8784 goto broadcast_aen_retry;
8785 }
8786
8787 out:
8788 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8789 out_no_lock:
8790
919d8a3f
JP
8791 dewtprintk(ioc,
8792 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8793 __func__, query_count, termination_count));
f92363d1
SR
8794
8795 ioc->broadcast_aen_busy = 0;
8796 if (!ioc->shost_recovery)
8797 _scsih_ublock_io_all_device(ioc);
8798 mutex_unlock(&ioc->tm_cmds.mutex);
8799}
8800
8801/**
8802 * _scsih_sas_discovery_event - handle discovery events
8803 * @ioc: per adapter object
8804 * @fw_event: The fw_event_work object
8805 * Context: user.
f92363d1
SR
8806 */
8807static void
8808_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8809 struct fw_event_work *fw_event)
8810{
35b62362
JL
8811 Mpi2EventDataSasDiscovery_t *event_data =
8812 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
f92363d1 8813
f92363d1 8814 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
919d8a3f
JP
8815 ioc_info(ioc, "discovery event: (%s)",
8816 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8817 "start" : "stop");
af009411 8818 if (event_data->DiscoveryStatus)
919d8a3f
JP
8819 pr_cont("discovery_status(0x%08x)",
8820 le32_to_cpu(event_data->DiscoveryStatus));
8821 pr_cont("\n");
f92363d1 8822 }
f92363d1
SR
8823
8824 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8825 !ioc->sas_hba.num_phys) {
8826 if (disable_discovery > 0 && ioc->shost_recovery) {
8827 /* Wait for the reset to complete */
8828 while (ioc->shost_recovery)
8829 ssleep(1);
8830 }
8831 _scsih_sas_host_add(ioc);
8832 }
8833}
8834
95540b8e
C
8835/**
8836 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8837 * events
8838 * @ioc: per adapter object
8839 * @fw_event: The fw_event_work object
8840 * Context: user.
95540b8e
C
8841 */
8842static void
8843_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8844 struct fw_event_work *fw_event)
8845{
8846 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8847 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8848
8849 switch (event_data->ReasonCode) {
8850 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
919d8a3f
JP
8851 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8852 le16_to_cpu(event_data->DevHandle),
8853 (u64)le64_to_cpu(event_data->SASAddress),
8854 event_data->PhysicalPort);
95540b8e
C
8855 break;
8856 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
919d8a3f
JP
8857 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8858 le16_to_cpu(event_data->DevHandle),
8859 (u64)le64_to_cpu(event_data->SASAddress),
8860 event_data->PhysicalPort);
95540b8e
C
8861 break;
8862 default:
8863 break;
8864 }
8865}
8866
4318c734
SPS
8867/**
8868 * _scsih_pcie_enumeration_event - handle enumeration events
8869 * @ioc: per adapter object
8870 * @fw_event: The fw_event_work object
8871 * Context: user.
4318c734
SPS
8872 */
8873static void
8874_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8875 struct fw_event_work *fw_event)
8876{
8877 Mpi26EventDataPCIeEnumeration_t *event_data =
8878 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8879
6767aced
DC
8880 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8881 return;
8882
919d8a3f
JP
8883 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8884 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8885 "started" : "completed",
8886 event_data->Flags);
4318c734 8887 if (event_data->EnumerationStatus)
6767aced
DC
8888 pr_cont("enumeration_status(0x%08x)",
8889 le32_to_cpu(event_data->EnumerationStatus));
8890 pr_cont("\n");
4318c734
SPS
8891}
8892
f92363d1
SR
8893/**
8894 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8895 * @ioc: per adapter object
8896 * @handle: device handle for physical disk
8897 * @phys_disk_num: physical disk number
8898 *
4beb4867 8899 * Return: 0 for success, else failure.
f92363d1
SR
8900 */
8901static int
8902_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8903{
8904 Mpi2RaidActionRequest_t *mpi_request;
8905 Mpi2RaidActionReply_t *mpi_reply;
8906 u16 smid;
8907 u8 issue_reset = 0;
8908 int rc = 0;
8909 u16 ioc_status;
8910 u32 log_info;
8911
c84b06a4
SR
8912 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8913 return rc;
8914
f92363d1
SR
8915 mutex_lock(&ioc->scsih_cmds.mutex);
8916
8917 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
919d8a3f 8918 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
f92363d1
SR
8919 rc = -EAGAIN;
8920 goto out;
8921 }
8922 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8923
8924 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8925 if (!smid) {
919d8a3f 8926 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
f92363d1
SR
8927 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8928 rc = -EAGAIN;
8929 goto out;
8930 }
8931
8932 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8933 ioc->scsih_cmds.smid = smid;
8934 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8935
8936 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8937 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8938 mpi_request->PhysDiskNum = phys_disk_num;
8939
919d8a3f
JP
8940 dewtprintk(ioc,
8941 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8942 handle, phys_disk_num));
f92363d1
SR
8943
8944 init_completion(&ioc->scsih_cmds.done);
078a4cc1 8945 ioc->put_smid_default(ioc, smid);
f92363d1
SR
8946 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8947
8948 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
c6bdb6a1
SR
8949 mpt3sas_check_cmd_timeout(ioc,
8950 ioc->scsih_cmds.status, mpi_request,
8951 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
f92363d1
SR
8952 rc = -EFAULT;
8953 goto out;
8954 }
8955
8956 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8957
8958 mpi_reply = ioc->scsih_cmds.reply;
8959 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8960 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8961 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8962 else
8963 log_info = 0;
8964 ioc_status &= MPI2_IOCSTATUS_MASK;
8965 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
8966 dewtprintk(ioc,
8967 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8968 ioc_status, log_info));
f92363d1
SR
8969 rc = -EFAULT;
8970 } else
919d8a3f
JP
8971 dewtprintk(ioc,
8972 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
f92363d1
SR
8973 }
8974
8975 out:
8976 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8977 mutex_unlock(&ioc->scsih_cmds.mutex);
8978
8979 if (issue_reset)
98c56ad3 8980 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1
SR
8981 return rc;
8982}
8983
8984/**
8985 * _scsih_reprobe_lun - reprobing lun
8986 * @sdev: scsi device struct
8987 * @no_uld_attach: sdev->no_uld_attach flag setting
8988 *
8989 **/
8990static void
8991_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8992{
f92363d1
SR
8993 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8994 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
6c7abffc 8995 sdev->no_uld_attach ? "hiding" : "exposing");
8bbb1cf6 8996 WARN_ON(scsi_device_reprobe(sdev));
f92363d1
SR
8997}
8998
8999/**
9000 * _scsih_sas_volume_add - add new volume
9001 * @ioc: per adapter object
9002 * @element: IR config element data
9003 * Context: user.
f92363d1
SR
9004 */
9005static void
9006_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
9007 Mpi2EventIrConfigElement_t *element)
9008{
9009 struct _raid_device *raid_device;
9010 unsigned long flags;
9011 u64 wwid;
9012 u16 handle = le16_to_cpu(element->VolDevHandle);
9013 int rc;
9014
9015 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9016 if (!wwid) {
919d8a3f
JP
9017 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9018 __FILE__, __LINE__, __func__);
f92363d1
SR
9019 return;
9020 }
9021
9022 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9023 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
9024 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9025
9026 if (raid_device)
9027 return;
9028
9029 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9030 if (!raid_device) {
919d8a3f
JP
9031 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9032 __FILE__, __LINE__, __func__);
f92363d1
SR
9033 return;
9034 }
9035
9036 raid_device->id = ioc->sas_id++;
9037 raid_device->channel = RAID_CHANNEL;
9038 raid_device->handle = handle;
9039 raid_device->wwid = wwid;
9040 _scsih_raid_device_add(ioc, raid_device);
9041 if (!ioc->wait_for_discovery_to_complete) {
9042 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9043 raid_device->id, 0);
9044 if (rc)
9045 _scsih_raid_device_remove(ioc, raid_device);
9046 } else {
9047 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9048 _scsih_determine_boot_device(ioc, raid_device, 1);
9049 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9050 }
9051}
9052
9053/**
9054 * _scsih_sas_volume_delete - delete volume
9055 * @ioc: per adapter object
9056 * @handle: volume device handle
9057 * Context: user.
f92363d1
SR
9058 */
9059static void
9060_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9061{
9062 struct _raid_device *raid_device;
9063 unsigned long flags;
9064 struct MPT3SAS_TARGET *sas_target_priv_data;
9065 struct scsi_target *starget = NULL;
9066
9067 spin_lock_irqsave(&ioc->raid_device_lock, flags);
c84b06a4 9068 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
f92363d1
SR
9069 if (raid_device) {
9070 if (raid_device->starget) {
9071 starget = raid_device->starget;
9072 sas_target_priv_data = starget->hostdata;
9073 sas_target_priv_data->deleted = 1;
9074 }
919d8a3f
JP
9075 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9076 raid_device->handle, (u64)raid_device->wwid);
f92363d1
SR
9077 list_del(&raid_device->list);
9078 kfree(raid_device);
9079 }
9080 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9081 if (starget)
9082 scsi_remove_target(&starget->dev);
9083}
9084
9085/**
9086 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9087 * @ioc: per adapter object
9088 * @element: IR config element data
9089 * Context: user.
f92363d1
SR
9090 */
9091static void
9092_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9093 Mpi2EventIrConfigElement_t *element)
9094{
9095 struct _sas_device *sas_device;
9096 struct scsi_target *starget = NULL;
9097 struct MPT3SAS_TARGET *sas_target_priv_data;
9098 unsigned long flags;
9099 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9100
9101 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49 9102 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
f92363d1
SR
9103 if (sas_device) {
9104 sas_device->volume_handle = 0;
9105 sas_device->volume_wwid = 0;
9106 clear_bit(handle, ioc->pd_handles);
9107 if (sas_device->starget && sas_device->starget->hostdata) {
9108 starget = sas_device->starget;
9109 sas_target_priv_data = starget->hostdata;
9110 sas_target_priv_data->flags &=
9111 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9112 }
9113 }
9114 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9115 if (!sas_device)
9116 return;
9117
9118 /* exposing raid component */
9119 if (starget)
9120 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
d1cb5e49
SR
9121
9122 sas_device_put(sas_device);
f92363d1
SR
9123}
9124
9125/**
9126 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9127 * @ioc: per adapter object
9128 * @element: IR config element data
9129 * Context: user.
f92363d1
SR
9130 */
9131static void
9132_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9133 Mpi2EventIrConfigElement_t *element)
9134{
9135 struct _sas_device *sas_device;
9136 struct scsi_target *starget = NULL;
9137 struct MPT3SAS_TARGET *sas_target_priv_data;
9138 unsigned long flags;
9139 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9140 u16 volume_handle = 0;
9141 u64 volume_wwid = 0;
9142
9143 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9144 if (volume_handle)
9145 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9146 &volume_wwid);
9147
9148 spin_lock_irqsave(&ioc->sas_device_lock, flags);
d1cb5e49 9149 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
f92363d1
SR
9150 if (sas_device) {
9151 set_bit(handle, ioc->pd_handles);
9152 if (sas_device->starget && sas_device->starget->hostdata) {
9153 starget = sas_device->starget;
9154 sas_target_priv_data = starget->hostdata;
9155 sas_target_priv_data->flags |=
9156 MPT_TARGET_FLAGS_RAID_COMPONENT;
9157 sas_device->volume_handle = volume_handle;
9158 sas_device->volume_wwid = volume_wwid;
9159 }
9160 }
9161 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9162 if (!sas_device)
9163 return;
9164
9165 /* hiding raid component */
9166 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
c84b06a4 9167
f92363d1
SR
9168 if (starget)
9169 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
d1cb5e49
SR
9170
9171 sas_device_put(sas_device);
f92363d1
SR
9172}
9173
9174/**
9175 * _scsih_sas_pd_delete - delete pd component
9176 * @ioc: per adapter object
9177 * @element: IR config element data
9178 * Context: user.
f92363d1
SR
9179 */
9180static void
9181_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9182 Mpi2EventIrConfigElement_t *element)
9183{
9184 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9185
9186 _scsih_device_remove_by_handle(ioc, handle);
9187}
9188
9189/**
9190 * _scsih_sas_pd_add - remove pd component
9191 * @ioc: per adapter object
9192 * @element: IR config element data
9193 * Context: user.
f92363d1
SR
9194 */
9195static void
9196_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9197 Mpi2EventIrConfigElement_t *element)
9198{
9199 struct _sas_device *sas_device;
f92363d1
SR
9200 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9201 Mpi2ConfigReply_t mpi_reply;
9202 Mpi2SasDevicePage0_t sas_device_pg0;
9203 u32 ioc_status;
9204 u64 sas_address;
9205 u16 parent_handle;
9206
9207 set_bit(handle, ioc->pd_handles);
9208
d1cb5e49 9209 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
f92363d1
SR
9210 if (sas_device) {
9211 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
d1cb5e49 9212 sas_device_put(sas_device);
f92363d1
SR
9213 return;
9214 }
9215
9216 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9217 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
919d8a3f
JP
9218 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9219 __FILE__, __LINE__, __func__);
f92363d1
SR
9220 return;
9221 }
9222
9223 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9224 MPI2_IOCSTATUS_MASK;
9225 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
9226 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9227 __FILE__, __LINE__, __func__);
f92363d1
SR
9228 return;
9229 }
9230
9231 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9232 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9233 mpt3sas_transport_update_links(ioc, sas_address, handle,
e2f0cdf7 9234 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
34b0a785
SR
9235 mpt3sas_get_port_by_id(ioc,
9236 sas_device_pg0.PhysicalPort, 0));
f92363d1
SR
9237
9238 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9239 _scsih_add_device(ioc, handle, 0, 1);
9240}
9241
f92363d1
SR
9242/**
9243 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9244 * @ioc: per adapter object
9245 * @event_data: event data payload
9246 * Context: user.
f92363d1
SR
9247 */
9248static void
9249_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9250 Mpi2EventDataIrConfigChangeList_t *event_data)
9251{
9252 Mpi2EventIrConfigElement_t *element;
9253 u8 element_type;
9254 int i;
9255 char *reason_str = NULL, *element_str = NULL;
9256
9257 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9258
919d8a3f
JP
9259 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9260 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9261 "foreign" : "native",
9262 event_data->NumElements);
f92363d1
SR
9263 for (i = 0; i < event_data->NumElements; i++, element++) {
9264 switch (element->ReasonCode) {
9265 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9266 reason_str = "add";
9267 break;
9268 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9269 reason_str = "remove";
9270 break;
9271 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9272 reason_str = "no change";
9273 break;
9274 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9275 reason_str = "hide";
9276 break;
9277 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9278 reason_str = "unhide";
9279 break;
9280 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9281 reason_str = "volume_created";
9282 break;
9283 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9284 reason_str = "volume_deleted";
9285 break;
9286 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9287 reason_str = "pd_created";
9288 break;
9289 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9290 reason_str = "pd_deleted";
9291 break;
9292 default:
9293 reason_str = "unknown reason";
9294 break;
9295 }
9296 element_type = le16_to_cpu(element->ElementFlags) &
9297 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9298 switch (element_type) {
9299 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9300 element_str = "volume";
9301 break;
9302 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9303 element_str = "phys disk";
9304 break;
9305 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9306 element_str = "hot spare";
9307 break;
9308 default:
9309 element_str = "unknown element";
9310 break;
9311 }
9312 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9313 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9314 reason_str, le16_to_cpu(element->VolDevHandle),
9315 le16_to_cpu(element->PhysDiskDevHandle),
9316 element->PhysDiskNum);
9317 }
9318}
f92363d1
SR
9319
9320/**
9321 * _scsih_sas_ir_config_change_event - handle ir configuration change events
9322 * @ioc: per adapter object
9323 * @fw_event: The fw_event_work object
9324 * Context: user.
f92363d1
SR
9325 */
9326static void
9327_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9328 struct fw_event_work *fw_event)
9329{
9330 Mpi2EventIrConfigElement_t *element;
9331 int i;
9332 u8 foreign_config;
35b62362
JL
9333 Mpi2EventDataIrConfigChangeList_t *event_data =
9334 (Mpi2EventDataIrConfigChangeList_t *)
9335 fw_event->event_data;
f92363d1 9336
7786ab6a
SR
9337 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9338 (!ioc->hide_ir_msg))
f92363d1
SR
9339 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9340
f92363d1
SR
9341 foreign_config = (le32_to_cpu(event_data->Flags) &
9342 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9343
9344 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
c84b06a4
SR
9345 if (ioc->shost_recovery &&
9346 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
f92363d1
SR
9347 for (i = 0; i < event_data->NumElements; i++, element++) {
9348 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9349 _scsih_ir_fastpath(ioc,
9350 le16_to_cpu(element->PhysDiskDevHandle),
9351 element->PhysDiskNum);
9352 }
9353 return;
9354 }
7786ab6a 9355
f92363d1
SR
9356 for (i = 0; i < event_data->NumElements; i++, element++) {
9357
9358 switch (element->ReasonCode) {
9359 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9360 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9361 if (!foreign_config)
9362 _scsih_sas_volume_add(ioc, element);
9363 break;
9364 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9365 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9366 if (!foreign_config)
9367 _scsih_sas_volume_delete(ioc,
9368 le16_to_cpu(element->VolDevHandle));
9369 break;
9370 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7786ab6a
SR
9371 if (!ioc->is_warpdrive)
9372 _scsih_sas_pd_hide(ioc, element);
f92363d1
SR
9373 break;
9374 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7786ab6a
SR
9375 if (!ioc->is_warpdrive)
9376 _scsih_sas_pd_expose(ioc, element);
f92363d1
SR
9377 break;
9378 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7786ab6a
SR
9379 if (!ioc->is_warpdrive)
9380 _scsih_sas_pd_add(ioc, element);
f92363d1
SR
9381 break;
9382 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7786ab6a
SR
9383 if (!ioc->is_warpdrive)
9384 _scsih_sas_pd_delete(ioc, element);
f92363d1
SR
9385 break;
9386 }
9387 }
9388}
9389
9390/**
9391 * _scsih_sas_ir_volume_event - IR volume event
9392 * @ioc: per adapter object
9393 * @fw_event: The fw_event_work object
9394 * Context: user.
f92363d1
SR
9395 */
9396static void
9397_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9398 struct fw_event_work *fw_event)
9399{
9400 u64 wwid;
9401 unsigned long flags;
9402 struct _raid_device *raid_device;
9403 u16 handle;
9404 u32 state;
9405 int rc;
35b62362
JL
9406 Mpi2EventDataIrVolume_t *event_data =
9407 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
f92363d1
SR
9408
9409 if (ioc->shost_recovery)
9410 return;
9411
9412 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9413 return;
9414
9415 handle = le16_to_cpu(event_data->VolDevHandle);
9416 state = le32_to_cpu(event_data->NewValue);
7786ab6a 9417 if (!ioc->hide_ir_msg)
919d8a3f
JP
9418 dewtprintk(ioc,
9419 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9420 __func__, handle,
9421 le32_to_cpu(event_data->PreviousValue),
9422 state));
f92363d1
SR
9423 switch (state) {
9424 case MPI2_RAID_VOL_STATE_MISSING:
9425 case MPI2_RAID_VOL_STATE_FAILED:
9426 _scsih_sas_volume_delete(ioc, handle);
9427 break;
9428
9429 case MPI2_RAID_VOL_STATE_ONLINE:
9430 case MPI2_RAID_VOL_STATE_DEGRADED:
9431 case MPI2_RAID_VOL_STATE_OPTIMAL:
9432
9433 spin_lock_irqsave(&ioc->raid_device_lock, flags);
c84b06a4 9434 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
f92363d1
SR
9435 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9436
9437 if (raid_device)
9438 break;
9439
9440 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9441 if (!wwid) {
919d8a3f
JP
9442 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9443 __FILE__, __LINE__, __func__);
f92363d1
SR
9444 break;
9445 }
9446
9447 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9448 if (!raid_device) {
919d8a3f
JP
9449 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9450 __FILE__, __LINE__, __func__);
f92363d1
SR
9451 break;
9452 }
9453
9454 raid_device->id = ioc->sas_id++;
9455 raid_device->channel = RAID_CHANNEL;
9456 raid_device->handle = handle;
9457 raid_device->wwid = wwid;
9458 _scsih_raid_device_add(ioc, raid_device);
9459 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9460 raid_device->id, 0);
9461 if (rc)
9462 _scsih_raid_device_remove(ioc, raid_device);
9463 break;
9464
9465 case MPI2_RAID_VOL_STATE_INITIALIZING:
9466 default:
9467 break;
9468 }
9469}
9470
9471/**
9472 * _scsih_sas_ir_physical_disk_event - PD event
9473 * @ioc: per adapter object
9474 * @fw_event: The fw_event_work object
9475 * Context: user.
f92363d1
SR
9476 */
9477static void
9478_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9479 struct fw_event_work *fw_event)
9480{
9481 u16 handle, parent_handle;
9482 u32 state;
9483 struct _sas_device *sas_device;
f92363d1
SR
9484 Mpi2ConfigReply_t mpi_reply;
9485 Mpi2SasDevicePage0_t sas_device_pg0;
9486 u32 ioc_status;
35b62362
JL
9487 Mpi2EventDataIrPhysicalDisk_t *event_data =
9488 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
f92363d1
SR
9489 u64 sas_address;
9490
9491 if (ioc->shost_recovery)
9492 return;
9493
9494 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9495 return;
9496
9497 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9498 state = le32_to_cpu(event_data->NewValue);
9499
7786ab6a 9500 if (!ioc->hide_ir_msg)
919d8a3f
JP
9501 dewtprintk(ioc,
9502 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9503 __func__, handle,
9504 le32_to_cpu(event_data->PreviousValue),
9505 state));
7786ab6a 9506
f92363d1
SR
9507 switch (state) {
9508 case MPI2_RAID_PD_STATE_ONLINE:
9509 case MPI2_RAID_PD_STATE_DEGRADED:
9510 case MPI2_RAID_PD_STATE_REBUILDING:
9511 case MPI2_RAID_PD_STATE_OPTIMAL:
9512 case MPI2_RAID_PD_STATE_HOT_SPARE:
9513
7786ab6a
SR
9514 if (!ioc->is_warpdrive)
9515 set_bit(handle, ioc->pd_handles);
f92363d1 9516
d1cb5e49
SR
9517 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9518 if (sas_device) {
9519 sas_device_put(sas_device);
f92363d1 9520 return;
d1cb5e49 9521 }
f92363d1
SR
9522
9523 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9524 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9525 handle))) {
919d8a3f
JP
9526 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9527 __FILE__, __LINE__, __func__);
f92363d1
SR
9528 return;
9529 }
9530
9531 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9532 MPI2_IOCSTATUS_MASK;
9533 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
9534 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9535 __FILE__, __LINE__, __func__);
f92363d1
SR
9536 return;
9537 }
9538
9539 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9540 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9541 mpt3sas_transport_update_links(ioc, sas_address, handle,
e2f0cdf7
SR
9542 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9543 mpt3sas_get_port_by_id(ioc,
34b0a785 9544 sas_device_pg0.PhysicalPort, 0));
f92363d1
SR
9545
9546 _scsih_add_device(ioc, handle, 0, 1);
9547
9548 break;
9549
9550 case MPI2_RAID_PD_STATE_OFFLINE:
9551 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9552 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9553 default:
9554 break;
9555 }
9556}
9557
f92363d1
SR
9558/**
9559 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9560 * @ioc: per adapter object
9561 * @event_data: event data payload
9562 * Context: user.
f92363d1
SR
9563 */
9564static void
9565_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9566 Mpi2EventDataIrOperationStatus_t *event_data)
9567{
9568 char *reason_str = NULL;
9569
9570 switch (event_data->RAIDOperation) {
9571 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9572 reason_str = "resync";
9573 break;
9574 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9575 reason_str = "online capacity expansion";
9576 break;
9577 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9578 reason_str = "consistency check";
9579 break;
9580 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9581 reason_str = "background init";
9582 break;
9583 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9584 reason_str = "make data consistent";
9585 break;
9586 }
9587
9588 if (!reason_str)
9589 return;
9590
919d8a3f
JP
9591 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9592 reason_str,
9593 le16_to_cpu(event_data->VolDevHandle),
9594 event_data->PercentComplete);
f92363d1 9595}
f92363d1
SR
9596
9597/**
9598 * _scsih_sas_ir_operation_status_event - handle RAID operation events
9599 * @ioc: per adapter object
9600 * @fw_event: The fw_event_work object
9601 * Context: user.
f92363d1
SR
9602 */
9603static void
9604_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9605 struct fw_event_work *fw_event)
9606{
35b62362
JL
9607 Mpi2EventDataIrOperationStatus_t *event_data =
9608 (Mpi2EventDataIrOperationStatus_t *)
9609 fw_event->event_data;
f92363d1
SR
9610 static struct _raid_device *raid_device;
9611 unsigned long flags;
9612 u16 handle;
9613
7786ab6a
SR
9614 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9615 (!ioc->hide_ir_msg))
f92363d1
SR
9616 _scsih_sas_ir_operation_status_event_debug(ioc,
9617 event_data);
f92363d1
SR
9618
9619 /* code added for raid transport support */
9620 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9621
9622 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9623 handle = le16_to_cpu(event_data->VolDevHandle);
c84b06a4 9624 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
f92363d1
SR
9625 if (raid_device)
9626 raid_device->percent_complete =
9627 event_data->PercentComplete;
9628 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9629 }
9630}
9631
9632/**
9633 * _scsih_prep_device_scan - initialize parameters prior to device scan
9634 * @ioc: per adapter object
9635 *
9636 * Set the deleted flag prior to device scan. If the device is found during
9637 * the scan, then we clear the deleted flag.
9638 */
9639static void
9640_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9641{
9642 struct MPT3SAS_DEVICE *sas_device_priv_data;
9643 struct scsi_device *sdev;
9644
9645 shost_for_each_device(sdev, ioc->shost) {
9646 sas_device_priv_data = sdev->hostdata;
9647 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9648 sas_device_priv_data->sas_target->deleted = 1;
9649 }
9650}
9651
787f2448
SP
9652/**
9653 * _scsih_update_device_qdepth - Update QD during Reset.
9654 * @ioc: per adapter object
9655 *
9656 */
9657static void
9658_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
9659{
9660 struct MPT3SAS_DEVICE *sas_device_priv_data;
9661 struct MPT3SAS_TARGET *sas_target_priv_data;
9662 struct _sas_device *sas_device;
9663 struct scsi_device *sdev;
9664 u16 qdepth;
9665
9666 ioc_info(ioc, "Update devices with firmware reported queue depth\n");
9667 shost_for_each_device(sdev, ioc->shost) {
9668 sas_device_priv_data = sdev->hostdata;
9669 if (sas_device_priv_data && sas_device_priv_data->sas_target) {
9670 sas_target_priv_data = sas_device_priv_data->sas_target;
9671 sas_device = sas_device_priv_data->sas_target->sas_dev;
9672 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
9673 qdepth = ioc->max_nvme_qd;
9674 else if (sas_device &&
9675 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
9676 qdepth = (sas_device->port_type > 1) ?
9677 ioc->max_wideport_qd : ioc->max_narrowport_qd;
9678 else if (sas_device &&
9679 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
9680 qdepth = ioc->max_sata_qd;
9681 else
9682 continue;
9683 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
9684 }
9685 }
9686}
9687
f92363d1
SR
9688/**
9689 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9690 * @ioc: per adapter object
e6d45e3e 9691 * @sas_device_pg0: SAS Device page 0
f92363d1
SR
9692 *
9693 * After host reset, find out whether devices are still responding.
9694 * Used in _scsih_remove_unresponsive_sas_devices.
f92363d1
SR
9695 */
9696static void
e6d45e3e
SR
9697_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9698Mpi2SasDevicePage0_t *sas_device_pg0)
f92363d1
SR
9699{
9700 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9701 struct scsi_target *starget;
aba5a85c 9702 struct _sas_device *sas_device = NULL;
22a923c3 9703 struct _enclosure_node *enclosure_dev = NULL;
f92363d1 9704 unsigned long flags;
7d310f24 9705 struct hba_port *port = mpt3sas_get_port_by_id(
34b0a785 9706 ioc, sas_device_pg0->PhysicalPort, 0);
f92363d1 9707
22a923c3
C
9708 if (sas_device_pg0->EnclosureHandle) {
9709 enclosure_dev =
9710 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9711 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9712 if (enclosure_dev == NULL)
919d8a3f
JP
9713 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9714 sas_device_pg0->EnclosureHandle);
22a923c3 9715 }
f92363d1
SR
9716 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9717 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
78ca7003
SR
9718 if (sas_device->sas_address != le64_to_cpu(
9719 sas_device_pg0->SASAddress))
9720 continue;
9721 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9722 continue;
7d310f24
SR
9723 if (sas_device->port != port)
9724 continue;
78ca7003
SR
9725 sas_device->responding = 1;
9726 starget = sas_device->starget;
9727 if (starget && starget->hostdata) {
9728 sas_target_priv_data = starget->hostdata;
9729 sas_target_priv_data->tm_busy = 0;
9730 sas_target_priv_data->deleted = 0;
9731 } else
9732 sas_target_priv_data = NULL;
9733 if (starget) {
9734 starget_printk(KERN_INFO, starget,
9735 "handle(0x%04x), sas_addr(0x%016llx)\n",
9736 le16_to_cpu(sas_device_pg0->DevHandle),
9737 (unsigned long long)
9738 sas_device->sas_address);
9739
9740 if (sas_device->enclosure_handle != 0)
f92363d1 9741 starget_printk(KERN_INFO, starget,
78ca7003
SR
9742 "enclosure logical id(0x%016llx), slot(%d)\n",
9743 (unsigned long long)
9744 sas_device->enclosure_logical_id,
9745 sas_device->slot);
9746 }
9747 if (le16_to_cpu(sas_device_pg0->Flags) &
9748 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9749 sas_device->enclosure_level =
9750 sas_device_pg0->EnclosureLevel;
9751 memcpy(&sas_device->connector_name[0],
9752 &sas_device_pg0->ConnectorName[0], 4);
9753 } else {
9754 sas_device->enclosure_level = 0;
9755 sas_device->connector_name[0] = '\0';
9756 }
e6d45e3e 9757
78ca7003
SR
9758 sas_device->enclosure_handle =
9759 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9760 sas_device->is_chassis_slot_valid = 0;
9761 if (enclosure_dev) {
9762 sas_device->enclosure_logical_id = le64_to_cpu(
9763 enclosure_dev->pg0.EnclosureLogicalID);
9764 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9765 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9766 sas_device->is_chassis_slot_valid = 1;
9767 sas_device->chassis_slot =
9768 enclosure_dev->pg0.ChassisSlot;
22a923c3 9769 }
78ca7003 9770 }
75888956 9771
78ca7003
SR
9772 if (sas_device->handle == le16_to_cpu(
9773 sas_device_pg0->DevHandle))
f92363d1 9774 goto out;
78ca7003
SR
9775 pr_info("\thandle changed from(0x%04x)!!!\n",
9776 sas_device->handle);
9777 sas_device->handle = le16_to_cpu(
9778 sas_device_pg0->DevHandle);
9779 if (sas_target_priv_data)
9780 sas_target_priv_data->handle =
9781 le16_to_cpu(sas_device_pg0->DevHandle);
9782 goto out;
f92363d1
SR
9783 }
9784 out:
9785 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9786}
9787
22a923c3
C
9788/**
9789 * _scsih_create_enclosure_list_after_reset - Free Existing list,
9790 * And create enclosure list by scanning all Enclosure Page(0)s
9791 * @ioc: per adapter object
22a923c3
C
9792 */
9793static void
9794_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9795{
9796 struct _enclosure_node *enclosure_dev;
9797 Mpi2ConfigReply_t mpi_reply;
9798 u16 enclosure_handle;
9799 int rc;
9800
9801 /* Free existing enclosure list */
9802 mpt3sas_free_enclosure_list(ioc);
9803
9804 /* Re constructing enclosure list after reset*/
9805 enclosure_handle = 0xFFFF;
9806 do {
9807 enclosure_dev =
9808 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9809 if (!enclosure_dev) {
919d8a3f 9810 ioc_err(ioc, "failure at %s:%d/%s()!\n",
22a923c3
C
9811 __FILE__, __LINE__, __func__);
9812 return;
9813 }
9814 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9815 &enclosure_dev->pg0,
9816 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9817 enclosure_handle);
9818
9819 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9820 MPI2_IOCSTATUS_MASK)) {
9821 kfree(enclosure_dev);
9822 return;
9823 }
9824 list_add_tail(&enclosure_dev->list,
9825 &ioc->enclosure_list);
9826 enclosure_handle =
9827 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9828 } while (1);
9829}
9830
f92363d1
SR
9831/**
9832 * _scsih_search_responding_sas_devices -
9833 * @ioc: per adapter object
9834 *
9835 * After host reset, find out whether devices are still responding.
9836 * If not remove.
f92363d1
SR
9837 */
9838static void
9839_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9840{
9841 Mpi2SasDevicePage0_t sas_device_pg0;
9842 Mpi2ConfigReply_t mpi_reply;
9843 u16 ioc_status;
9844 u16 handle;
9845 u32 device_info;
9846
919d8a3f 9847 ioc_info(ioc, "search for end-devices: start\n");
f92363d1
SR
9848
9849 if (list_empty(&ioc->sas_device_list))
9850 goto out;
9851
9852 handle = 0xFFFF;
9853 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9854 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9855 handle))) {
9856 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9857 MPI2_IOCSTATUS_MASK;
14be49ac 9858 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
f92363d1 9859 break;
cf6bf971 9860 handle = le16_to_cpu(sas_device_pg0.DevHandle);
f92363d1
SR
9861 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9862 if (!(_scsih_is_end_device(device_info)))
9863 continue;
e6d45e3e 9864 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
f92363d1
SR
9865 }
9866
9867 out:
919d8a3f 9868 ioc_info(ioc, "search for end-devices: complete\n");
f92363d1
SR
9869}
9870
ec051e5a
SPS
9871/**
9872 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9873 * @ioc: per adapter object
9874 * @pcie_device_pg0: PCIe Device page 0
9875 *
9876 * After host reset, find out whether devices are still responding.
9877 * Used in _scsih_remove_unresponding_devices.
ec051e5a
SPS
9878 */
9879static void
9880_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9881 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9882{
9883 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9884 struct scsi_target *starget;
9885 struct _pcie_device *pcie_device;
9886 unsigned long flags;
9887
9888 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9889 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
cf6bf971
C
9890 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9891 && (pcie_device->slot == le16_to_cpu(
9892 pcie_device_pg0->Slot))) {
3c090ce3
SP
9893 pcie_device->access_status =
9894 pcie_device_pg0->AccessStatus;
ec051e5a
SPS
9895 pcie_device->responding = 1;
9896 starget = pcie_device->starget;
9897 if (starget && starget->hostdata) {
9898 sas_target_priv_data = starget->hostdata;
9899 sas_target_priv_data->tm_busy = 0;
9900 sas_target_priv_data->deleted = 0;
9901 } else
9902 sas_target_priv_data = NULL;
9903 if (starget) {
9904 starget_printk(KERN_INFO, starget,
9905 "handle(0x%04x), wwid(0x%016llx) ",
9906 pcie_device->handle,
9907 (unsigned long long)pcie_device->wwid);
9908 if (pcie_device->enclosure_handle != 0)
9909 starget_printk(KERN_INFO, starget,
9910 "enclosure logical id(0x%016llx), "
9911 "slot(%d)\n",
9912 (unsigned long long)
9913 pcie_device->enclosure_logical_id,
9914 pcie_device->slot);
9915 }
9916
9917 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9918 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9919 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9920 pcie_device->enclosure_level =
9921 pcie_device_pg0->EnclosureLevel;
9922 memcpy(&pcie_device->connector_name[0],
9923 &pcie_device_pg0->ConnectorName[0], 4);
9924 } else {
9925 pcie_device->enclosure_level = 0;
9926 pcie_device->connector_name[0] = '\0';
9927 }
9928
cf6bf971
C
9929 if (pcie_device->handle == le16_to_cpu(
9930 pcie_device_pg0->DevHandle))
ec051e5a 9931 goto out;
268eb498 9932 pr_info("\thandle changed from(0x%04x)!!!\n",
ec051e5a 9933 pcie_device->handle);
cf6bf971
C
9934 pcie_device->handle = le16_to_cpu(
9935 pcie_device_pg0->DevHandle);
ec051e5a
SPS
9936 if (sas_target_priv_data)
9937 sas_target_priv_data->handle =
cf6bf971 9938 le16_to_cpu(pcie_device_pg0->DevHandle);
ec051e5a
SPS
9939 goto out;
9940 }
9941 }
9942
9943 out:
9944 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9945}
9946
9947/**
9948 * _scsih_search_responding_pcie_devices -
9949 * @ioc: per adapter object
9950 *
9951 * After host reset, find out whether devices are still responding.
9952 * If not remove.
ec051e5a
SPS
9953 */
9954static void
9955_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9956{
9957 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9958 Mpi2ConfigReply_t mpi_reply;
9959 u16 ioc_status;
9960 u16 handle;
9961 u32 device_info;
9962
919d8a3f 9963 ioc_info(ioc, "search for end-devices: start\n");
ec051e5a
SPS
9964
9965 if (list_empty(&ioc->pcie_device_list))
9966 goto out;
9967
9968 handle = 0xFFFF;
9969 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9970 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9971 handle))) {
9972 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9973 MPI2_IOCSTATUS_MASK;
9974 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
9975 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9976 __func__, ioc_status,
9977 le32_to_cpu(mpi_reply.IOCLogInfo));
ec051e5a
SPS
9978 break;
9979 }
9980 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9981 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
5bb309db 9982 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
ec051e5a 9983 continue;
ec051e5a
SPS
9984 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9985 }
9986out:
919d8a3f 9987 ioc_info(ioc, "search for PCIe end-devices: complete\n");
ec051e5a
SPS
9988}
9989
f92363d1
SR
9990/**
9991 * _scsih_mark_responding_raid_device - mark a raid_device as responding
9992 * @ioc: per adapter object
9993 * @wwid: world wide identifier for raid volume
9994 * @handle: device handle
9995 *
9996 * After host reset, find out whether devices are still responding.
9997 * Used in _scsih_remove_unresponsive_raid_devices.
f92363d1
SR
9998 */
9999static void
10000_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
10001 u16 handle)
10002{
7786ab6a 10003 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
f92363d1
SR
10004 struct scsi_target *starget;
10005 struct _raid_device *raid_device;
10006 unsigned long flags;
10007
10008 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10009 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
10010 if (raid_device->wwid == wwid && raid_device->starget) {
10011 starget = raid_device->starget;
10012 if (starget && starget->hostdata) {
10013 sas_target_priv_data = starget->hostdata;
10014 sas_target_priv_data->deleted = 0;
10015 } else
10016 sas_target_priv_data = NULL;
10017 raid_device->responding = 1;
10018 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10019 starget_printk(KERN_INFO, raid_device->starget,
10020 "handle(0x%04x), wwid(0x%016llx)\n", handle,
10021 (unsigned long long)raid_device->wwid);
7786ab6a 10022
7786ab6a
SR
10023 /*
10024 * WARPDRIVE: The handles of the PDs might have changed
10025 * across the host reset so re-initialize the
10026 * required data for Direct IO
10027 */
c84b06a4 10028 mpt3sas_init_warpdrive_properties(ioc, raid_device);
f92363d1
SR
10029 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10030 if (raid_device->handle == handle) {
10031 spin_unlock_irqrestore(&ioc->raid_device_lock,
10032 flags);
10033 return;
10034 }
10035 pr_info("\thandle changed from(0x%04x)!!!\n",
10036 raid_device->handle);
10037 raid_device->handle = handle;
10038 if (sas_target_priv_data)
10039 sas_target_priv_data->handle = handle;
10040 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10041 return;
10042 }
10043 }
10044 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10045}
10046
10047/**
10048 * _scsih_search_responding_raid_devices -
10049 * @ioc: per adapter object
10050 *
10051 * After host reset, find out whether devices are still responding.
10052 * If not remove.
f92363d1
SR
10053 */
10054static void
10055_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
10056{
10057 Mpi2RaidVolPage1_t volume_pg1;
10058 Mpi2RaidVolPage0_t volume_pg0;
10059 Mpi2RaidPhysDiskPage0_t pd_pg0;
10060 Mpi2ConfigReply_t mpi_reply;
10061 u16 ioc_status;
10062 u16 handle;
10063 u8 phys_disk_num;
10064
10065 if (!ioc->ir_firmware)
10066 return;
10067
919d8a3f 10068 ioc_info(ioc, "search for raid volumes: start\n");
f92363d1
SR
10069
10070 if (list_empty(&ioc->raid_device_list))
10071 goto out;
10072
10073 handle = 0xFFFF;
10074 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10075 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10076 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10077 MPI2_IOCSTATUS_MASK;
14be49ac 10078 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
f92363d1
SR
10079 break;
10080 handle = le16_to_cpu(volume_pg1.DevHandle);
10081
10082 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10083 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10084 sizeof(Mpi2RaidVolPage0_t)))
10085 continue;
10086
10087 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10088 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10089 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
10090 _scsih_mark_responding_raid_device(ioc,
10091 le64_to_cpu(volume_pg1.WWID), handle);
10092 }
10093
10094 /* refresh the pd_handles */
7786ab6a 10095 if (!ioc->is_warpdrive) {
f92363d1
SR
10096 phys_disk_num = 0xFF;
10097 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
10098 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10099 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10100 phys_disk_num))) {
10101 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10102 MPI2_IOCSTATUS_MASK;
14be49ac 10103 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
f92363d1
SR
10104 break;
10105 phys_disk_num = pd_pg0.PhysDiskNum;
10106 handle = le16_to_cpu(pd_pg0.DevHandle);
10107 set_bit(handle, ioc->pd_handles);
10108 }
7786ab6a 10109 }
f92363d1 10110 out:
919d8a3f 10111 ioc_info(ioc, "search for responding raid volumes: complete\n");
f92363d1
SR
10112}
10113
10114/**
10115 * _scsih_mark_responding_expander - mark a expander as responding
10116 * @ioc: per adapter object
aba5a85c 10117 * @expander_pg0:SAS Expander Config Page0
f92363d1
SR
10118 *
10119 * After host reset, find out whether devices are still responding.
10120 * Used in _scsih_remove_unresponsive_expanders.
f92363d1
SR
10121 */
10122static void
aba5a85c
SR
10123_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10124 Mpi2ExpanderPage0_t *expander_pg0)
f92363d1 10125{
aba5a85c 10126 struct _sas_node *sas_expander = NULL;
f92363d1 10127 unsigned long flags;
22a923c3
C
10128 int i;
10129 struct _enclosure_node *enclosure_dev = NULL;
aba5a85c 10130 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
22a923c3 10131 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
aba5a85c 10132 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
7d310f24 10133 struct hba_port *port = mpt3sas_get_port_by_id(
34b0a785 10134 ioc, expander_pg0->PhysicalPort, 0);
aba5a85c 10135
22a923c3
C
10136 if (enclosure_handle)
10137 enclosure_dev =
10138 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10139 enclosure_handle);
f92363d1
SR
10140
10141 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10142 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10143 if (sas_expander->sas_address != sas_address)
10144 continue;
7d310f24
SR
10145 if (sas_expander->port != port)
10146 continue;
f92363d1 10147 sas_expander->responding = 1;
aba5a85c 10148
22a923c3 10149 if (enclosure_dev) {
aba5a85c 10150 sas_expander->enclosure_logical_id =
22a923c3
C
10151 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10152 sas_expander->enclosure_handle =
10153 le16_to_cpu(expander_pg0->EnclosureHandle);
10154 }
aba5a85c 10155
f92363d1
SR
10156 if (sas_expander->handle == handle)
10157 goto out;
10158 pr_info("\texpander(0x%016llx): handle changed" \
10159 " from(0x%04x) to (0x%04x)!!!\n",
10160 (unsigned long long)sas_expander->sas_address,
10161 sas_expander->handle, handle);
10162 sas_expander->handle = handle;
10163 for (i = 0 ; i < sas_expander->num_phys ; i++)
10164 sas_expander->phy[i].handle = handle;
10165 goto out;
10166 }
10167 out:
10168 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10169}
10170
10171/**
10172 * _scsih_search_responding_expanders -
10173 * @ioc: per adapter object
10174 *
10175 * After host reset, find out whether devices are still responding.
10176 * If not remove.
f92363d1
SR
10177 */
10178static void
10179_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10180{
10181 Mpi2ExpanderPage0_t expander_pg0;
10182 Mpi2ConfigReply_t mpi_reply;
10183 u16 ioc_status;
10184 u64 sas_address;
10185 u16 handle;
324c122f 10186 u8 port;
f92363d1 10187
919d8a3f 10188 ioc_info(ioc, "search for expanders: start\n");
f92363d1
SR
10189
10190 if (list_empty(&ioc->sas_expander_list))
10191 goto out;
10192
10193 handle = 0xFFFF;
10194 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10195 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10196
10197 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10198 MPI2_IOCSTATUS_MASK;
14be49ac 10199 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
f92363d1
SR
10200 break;
10201
10202 handle = le16_to_cpu(expander_pg0.DevHandle);
10203 sas_address = le64_to_cpu(expander_pg0.SASAddress);
324c122f 10204 port = expander_pg0.PhysicalPort;
7d310f24
SR
10205 pr_info(
10206 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10207 handle, (unsigned long long)sas_address,
324c122f
SR
10208 (ioc->multipath_on_hba ?
10209 port : MULTIPATH_DISABLED_PORT_ID));
aba5a85c 10210 _scsih_mark_responding_expander(ioc, &expander_pg0);
f92363d1
SR
10211 }
10212
10213 out:
919d8a3f 10214 ioc_info(ioc, "search for expanders: complete\n");
f92363d1
SR
10215}
10216
10217/**
3075ac49 10218 * _scsih_remove_unresponding_devices - removing unresponding devices
f92363d1 10219 * @ioc: per adapter object
f92363d1
SR
10220 */
10221static void
3075ac49 10222_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
10223{
10224 struct _sas_device *sas_device, *sas_device_next;
10225 struct _sas_node *sas_expander, *sas_expander_next;
10226 struct _raid_device *raid_device, *raid_device_next;
3075ac49 10227 struct _pcie_device *pcie_device, *pcie_device_next;
f92363d1
SR
10228 struct list_head tmp_list;
10229 unsigned long flags;
d1cb5e49 10230 LIST_HEAD(head);
f92363d1 10231
919d8a3f 10232 ioc_info(ioc, "removing unresponding devices: start\n");
f92363d1
SR
10233
10234 /* removing unresponding end devices */
919d8a3f 10235 ioc_info(ioc, "removing unresponding devices: end-devices\n");
d1cb5e49
SR
10236 /*
10237 * Iterate, pulling off devices marked as non-responding. We become the
10238 * owner for the reference the list had on any object we prune.
10239 */
10240 spin_lock_irqsave(&ioc->sas_device_lock, flags);
a0815c45
SP
10241
10242 /*
10243 * Clean up the sas_device_init_list list as
10244 * driver goes for fresh scan as part of diag reset.
10245 */
10246 list_for_each_entry_safe(sas_device, sas_device_next,
10247 &ioc->sas_device_init_list, list) {
10248 list_del_init(&sas_device->list);
10249 sas_device_put(sas_device);
10250 }
10251
f92363d1
SR
10252 list_for_each_entry_safe(sas_device, sas_device_next,
10253 &ioc->sas_device_list, list) {
10254 if (!sas_device->responding)
d1cb5e49 10255 list_move_tail(&sas_device->list, &head);
f92363d1
SR
10256 else
10257 sas_device->responding = 0;
10258 }
d1cb5e49
SR
10259 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10260
10261 /*
10262 * Now, uninitialize and remove the unresponding devices we pruned.
10263 */
10264 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10265 _scsih_remove_device(ioc, sas_device);
10266 list_del_init(&sas_device->list);
10267 sas_device_put(sas_device);
10268 }
f92363d1 10269
919d8a3f 10270 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
3075ac49
SPS
10271 INIT_LIST_HEAD(&head);
10272 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
a0815c45
SP
10273 /*
10274 * Clean up the pcie_device_init_list list as
10275 * driver goes for fresh scan as part of diag reset.
10276 */
10277 list_for_each_entry_safe(pcie_device, pcie_device_next,
10278 &ioc->pcie_device_init_list, list) {
10279 list_del_init(&pcie_device->list);
10280 pcie_device_put(pcie_device);
10281 }
10282
3075ac49
SPS
10283 list_for_each_entry_safe(pcie_device, pcie_device_next,
10284 &ioc->pcie_device_list, list) {
10285 if (!pcie_device->responding)
10286 list_move_tail(&pcie_device->list, &head);
10287 else
10288 pcie_device->responding = 0;
10289 }
10290 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10291
10292 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10293 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10294 list_del_init(&pcie_device->list);
10295 pcie_device_put(pcie_device);
10296 }
10297
f92363d1
SR
10298 /* removing unresponding volumes */
10299 if (ioc->ir_firmware) {
919d8a3f 10300 ioc_info(ioc, "removing unresponding devices: volumes\n");
f92363d1
SR
10301 list_for_each_entry_safe(raid_device, raid_device_next,
10302 &ioc->raid_device_list, list) {
10303 if (!raid_device->responding)
10304 _scsih_sas_volume_delete(ioc,
10305 raid_device->handle);
10306 else
10307 raid_device->responding = 0;
10308 }
10309 }
10310
10311 /* removing unresponding expanders */
919d8a3f 10312 ioc_info(ioc, "removing unresponding devices: expanders\n");
f92363d1
SR
10313 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10314 INIT_LIST_HEAD(&tmp_list);
10315 list_for_each_entry_safe(sas_expander, sas_expander_next,
10316 &ioc->sas_expander_list, list) {
10317 if (!sas_expander->responding)
10318 list_move_tail(&sas_expander->list, &tmp_list);
10319 else
10320 sas_expander->responding = 0;
10321 }
10322 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10323 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10324 list) {
f92363d1
SR
10325 _scsih_expander_node_remove(ioc, sas_expander);
10326 }
10327
919d8a3f 10328 ioc_info(ioc, "removing unresponding devices: complete\n");
f92363d1
SR
10329
10330 /* unblock devices */
10331 _scsih_ublock_io_all_device(ioc);
10332}
10333
10334static void
10335_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10336 struct _sas_node *sas_expander, u16 handle)
10337{
10338 Mpi2ExpanderPage1_t expander_pg1;
10339 Mpi2ConfigReply_t mpi_reply;
10340 int i;
10341
10342 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10343 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10344 &expander_pg1, i, handle))) {
919d8a3f
JP
10345 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10346 __FILE__, __LINE__, __func__);
f92363d1
SR
10347 return;
10348 }
10349
10350 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10351 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
e2f0cdf7
SR
10352 expander_pg1.NegotiatedLinkRate >> 4,
10353 sas_expander->port);
f92363d1
SR
10354 }
10355}
10356
10357/**
10358 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10359 * @ioc: per adapter object
f92363d1
SR
10360 */
10361static void
10362_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10363{
10364 Mpi2ExpanderPage0_t expander_pg0;
10365 Mpi2SasDevicePage0_t sas_device_pg0;
ec051e5a 10366 Mpi26PCIeDevicePage0_t pcie_device_pg0;
2111ba87
LJ
10367 Mpi2RaidVolPage1_t *volume_pg1;
10368 Mpi2RaidVolPage0_t *volume_pg0;
f92363d1
SR
10369 Mpi2RaidPhysDiskPage0_t pd_pg0;
10370 Mpi2EventIrConfigElement_t element;
10371 Mpi2ConfigReply_t mpi_reply;
e2f0cdf7 10372 u8 phys_disk_num, port_id;
f92363d1
SR
10373 u16 ioc_status;
10374 u16 handle, parent_handle;
10375 u64 sas_address;
10376 struct _sas_device *sas_device;
ec051e5a 10377 struct _pcie_device *pcie_device;
f92363d1
SR
10378 struct _sas_node *expander_device;
10379 static struct _raid_device *raid_device;
10380 u8 retry_count;
10381 unsigned long flags;
10382
2111ba87
LJ
10383 volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL);
10384 if (!volume_pg0)
10385 return;
10386
10387 volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL);
10388 if (!volume_pg1) {
10389 kfree(volume_pg0);
10390 return;
10391 }
10392
919d8a3f 10393 ioc_info(ioc, "scan devices: start\n");
f92363d1
SR
10394
10395 _scsih_sas_host_refresh(ioc);
10396
919d8a3f 10397 ioc_info(ioc, "\tscan devices: expanders start\n");
f92363d1
SR
10398
10399 /* expanders */
10400 handle = 0xFFFF;
10401 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10402 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10403 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10404 MPI2_IOCSTATUS_MASK;
f92363d1 10405 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
10406 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10407 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
f92363d1
SR
10408 break;
10409 }
10410 handle = le16_to_cpu(expander_pg0.DevHandle);
10411 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7d310f24 10412 port_id = expander_pg0.PhysicalPort;
f92363d1 10413 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
7d310f24 10414 ioc, le64_to_cpu(expander_pg0.SASAddress),
34b0a785 10415 mpt3sas_get_port_by_id(ioc, port_id, 0));
f92363d1
SR
10416 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10417 if (expander_device)
10418 _scsih_refresh_expander_links(ioc, expander_device,
10419 handle);
10420 else {
919d8a3f
JP
10421 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10422 handle,
10423 (u64)le64_to_cpu(expander_pg0.SASAddress));
f92363d1 10424 _scsih_expander_add(ioc, handle);
919d8a3f
JP
10425 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10426 handle,
10427 (u64)le64_to_cpu(expander_pg0.SASAddress));
f92363d1
SR
10428 }
10429 }
10430
919d8a3f 10431 ioc_info(ioc, "\tscan devices: expanders complete\n");
f92363d1
SR
10432
10433 if (!ioc->ir_firmware)
10434 goto skip_to_sas;
10435
919d8a3f 10436 ioc_info(ioc, "\tscan devices: phys disk start\n");
f92363d1
SR
10437
10438 /* phys disk */
10439 phys_disk_num = 0xFF;
10440 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10441 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10442 phys_disk_num))) {
10443 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10444 MPI2_IOCSTATUS_MASK;
f92363d1 10445 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
10446 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10447 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
f92363d1
SR
10448 break;
10449 }
10450 phys_disk_num = pd_pg0.PhysDiskNum;
10451 handle = le16_to_cpu(pd_pg0.DevHandle);
d1cb5e49
SR
10452 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10453 if (sas_device) {
10454 sas_device_put(sas_device);
f92363d1 10455 continue;
d1cb5e49 10456 }
f92363d1
SR
10457 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10458 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10459 handle) != 0)
10460 continue;
10461 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10462 MPI2_IOCSTATUS_MASK;
10463 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
10464 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10465 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
f92363d1
SR
10466 break;
10467 }
10468 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10469 if (!_scsih_get_sas_address(ioc, parent_handle,
10470 &sas_address)) {
919d8a3f
JP
10471 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10472 handle,
10473 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
e2f0cdf7 10474 port_id = sas_device_pg0.PhysicalPort;
f92363d1
SR
10475 mpt3sas_transport_update_links(ioc, sas_address,
10476 handle, sas_device_pg0.PhyNum,
e2f0cdf7 10477 MPI2_SAS_NEG_LINK_RATE_1_5,
34b0a785 10478 mpt3sas_get_port_by_id(ioc, port_id, 0));
f92363d1
SR
10479 set_bit(handle, ioc->pd_handles);
10480 retry_count = 0;
10481 /* This will retry adding the end device.
10482 * _scsih_add_device() will decide on retries and
10483 * return "1" when it should be retried
10484 */
10485 while (_scsih_add_device(ioc, handle, retry_count++,
10486 1)) {
10487 ssleep(1);
10488 }
919d8a3f
JP
10489 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10490 handle,
10491 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
f92363d1
SR
10492 }
10493 }
10494
919d8a3f 10495 ioc_info(ioc, "\tscan devices: phys disk complete\n");
f92363d1 10496
919d8a3f 10497 ioc_info(ioc, "\tscan devices: volumes start\n");
f92363d1
SR
10498
10499 /* volumes */
10500 handle = 0xFFFF;
10501 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
2111ba87 10502 volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
f92363d1
SR
10503 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10504 MPI2_IOCSTATUS_MASK;
f92363d1 10505 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
10506 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10507 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
f92363d1
SR
10508 break;
10509 }
2111ba87 10510 handle = le16_to_cpu(volume_pg1->DevHandle);
f92363d1
SR
10511 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10512 raid_device = _scsih_raid_device_find_by_wwid(ioc,
2111ba87 10513 le64_to_cpu(volume_pg1->WWID));
f92363d1
SR
10514 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10515 if (raid_device)
10516 continue;
10517 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
2111ba87 10518 volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
f92363d1
SR
10519 sizeof(Mpi2RaidVolPage0_t)))
10520 continue;
10521 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10522 MPI2_IOCSTATUS_MASK;
10523 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
10524 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10525 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
f92363d1
SR
10526 break;
10527 }
2111ba87
LJ
10528 if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10529 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10530 volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
f92363d1
SR
10531 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10532 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
2111ba87 10533 element.VolDevHandle = volume_pg1->DevHandle;
919d8a3f 10534 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
2111ba87 10535 volume_pg1->DevHandle);
f92363d1 10536 _scsih_sas_volume_add(ioc, &element);
919d8a3f 10537 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
2111ba87 10538 volume_pg1->DevHandle);
f92363d1
SR
10539 }
10540 }
10541
919d8a3f 10542 ioc_info(ioc, "\tscan devices: volumes complete\n");
f92363d1
SR
10543
10544 skip_to_sas:
10545
919d8a3f 10546 ioc_info(ioc, "\tscan devices: end devices start\n");
f92363d1
SR
10547
10548 /* sas devices */
10549 handle = 0xFFFF;
10550 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10551 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10552 handle))) {
10553 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10554 MPI2_IOCSTATUS_MASK;
f92363d1 10555 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
10556 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10557 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
f92363d1
SR
10558 break;
10559 }
10560 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10561 if (!(_scsih_is_end_device(
10562 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10563 continue;
e2f0cdf7 10564 port_id = sas_device_pg0.PhysicalPort;
d1cb5e49 10565 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7d310f24 10566 le64_to_cpu(sas_device_pg0.SASAddress),
34b0a785 10567 mpt3sas_get_port_by_id(ioc, port_id, 0));
d1cb5e49
SR
10568 if (sas_device) {
10569 sas_device_put(sas_device);
f92363d1 10570 continue;
d1cb5e49 10571 }
f92363d1
SR
10572 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10573 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
919d8a3f
JP
10574 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10575 handle,
10576 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
f92363d1 10577 mpt3sas_transport_update_links(ioc, sas_address, handle,
e2f0cdf7 10578 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
34b0a785 10579 mpt3sas_get_port_by_id(ioc, port_id, 0));
f92363d1
SR
10580 retry_count = 0;
10581 /* This will retry adding the end device.
10582 * _scsih_add_device() will decide on retries and
10583 * return "1" when it should be retried
10584 */
10585 while (_scsih_add_device(ioc, handle, retry_count++,
10586 0)) {
10587 ssleep(1);
10588 }
919d8a3f
JP
10589 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10590 handle,
10591 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
f92363d1
SR
10592 }
10593 }
919d8a3f
JP
10594 ioc_info(ioc, "\tscan devices: end devices complete\n");
10595 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
ec051e5a
SPS
10596
10597 /* pcie devices */
10598 handle = 0xFFFF;
10599 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10600 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10601 handle))) {
10602 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10603 & MPI2_IOCSTATUS_MASK;
10604 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
10605 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10606 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
ec051e5a
SPS
10607 break;
10608 }
10609 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
5bb309db 10610 if (!(_scsih_is_nvme_pciescsi_device(
ec051e5a
SPS
10611 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10612 continue;
10613 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10614 le64_to_cpu(pcie_device_pg0.WWID));
10615 if (pcie_device) {
10616 pcie_device_put(pcie_device);
10617 continue;
10618 }
10619 retry_count = 0;
10620 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10621 _scsih_pcie_add_device(ioc, handle);
f92363d1 10622
919d8a3f
JP
10623 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10624 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
ec051e5a 10625 }
2111ba87
LJ
10626
10627 kfree(volume_pg0);
10628 kfree(volume_pg1);
10629
919d8a3f
JP
10630 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10631 ioc_info(ioc, "scan devices: complete\n");
f92363d1 10632}
c7a35705 10633
f92363d1 10634/**
a8d548b0 10635 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
f92363d1 10636 * @ioc: per adapter object
f92363d1
SR
10637 *
10638 * The handler for doing any required cleanup or initialization.
c7a35705
BVA
10639 */
10640void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10641{
919d8a3f 10642 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
c7a35705
BVA
10643}
10644
10645/**
36c6c7f7
SR
10646 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10647 * scsi & tm cmds.
c7a35705 10648 * @ioc: per adapter object
f92363d1 10649 *
c7a35705 10650 * The handler for doing any required cleanup or initialization.
f92363d1
SR
10651 */
10652void
36c6c7f7 10653mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
f92363d1 10654{
36c6c7f7
SR
10655 dtmprintk(ioc,
10656 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
c7a35705
BVA
10657 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10658 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10659 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10660 complete(&ioc->scsih_cmds.done);
10661 }
10662 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10663 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10664 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10665 complete(&ioc->tm_cmds.done);
10666 }
f92363d1 10667
c7a35705
BVA
10668 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10669 memset(ioc->device_remove_in_progress, 0,
10670 ioc->device_remove_in_progress_sz);
10671 _scsih_fw_event_cleanup_queue(ioc);
10672 _scsih_flush_running_cmds(ioc);
10673}
10674
10675/**
a8d548b0 10676 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
c7a35705
BVA
10677 * @ioc: per adapter object
10678 *
10679 * The handler for doing any required cleanup or initialization.
10680 */
10681void
10682mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10683{
919d8a3f 10684 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
a0815c45 10685 if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
324c122f
SR
10686 if (ioc->multipath_on_hba) {
10687 _scsih_sas_port_refresh(ioc);
10688 _scsih_update_vphys_after_reset(ioc);
10689 }
c7a35705
BVA
10690 _scsih_prep_device_scan(ioc);
10691 _scsih_create_enclosure_list_after_reset(ioc);
10692 _scsih_search_responding_sas_devices(ioc);
10693 _scsih_search_responding_pcie_devices(ioc);
10694 _scsih_search_responding_raid_devices(ioc);
10695 _scsih_search_responding_expanders(ioc);
10696 _scsih_error_recovery_delete_devices(ioc);
f92363d1
SR
10697 }
10698}
10699
10700/**
10701 * _mpt3sas_fw_work - delayed task for processing firmware events
10702 * @ioc: per adapter object
10703 * @fw_event: The fw_event_work object
10704 * Context: user.
f92363d1
SR
10705 */
10706static void
10707_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10708{
9e73ed2e 10709 ioc->current_event = fw_event;
146b16c8
SR
10710 _scsih_fw_event_del_from_list(ioc, fw_event);
10711
f92363d1 10712 /* the queue is being flushed so ignore this event */
146b16c8
SR
10713 if (ioc->remove_host || ioc->pci_error_recovery) {
10714 fw_event_work_put(fw_event);
9e73ed2e 10715 ioc->current_event = NULL;
f92363d1
SR
10716 return;
10717 }
10718
10719 switch (fw_event->event) {
10720 case MPT3SAS_PROCESS_TRIGGER_DIAG:
35b62362
JL
10721 mpt3sas_process_trigger_data(ioc,
10722 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10723 fw_event->event_data);
f92363d1
SR
10724 break;
10725 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
146b16c8
SR
10726 while (scsi_host_in_recovery(ioc->shost) ||
10727 ioc->shost_recovery) {
10728 /*
9e73ed2e
SP
10729 * If we're unloading or cancelling the work, bail.
10730 * Otherwise, this can become an infinite loop.
146b16c8 10731 */
9e73ed2e 10732 if (ioc->remove_host || ioc->fw_events_cleanup)
146b16c8 10733 goto out;
f92363d1 10734 ssleep(1);
146b16c8 10735 }
3075ac49 10736 _scsih_remove_unresponding_devices(ioc);
ffa381d6 10737 _scsih_del_dirty_vphy(ioc);
a5e99fda 10738 _scsih_del_dirty_port_entries(ioc);
787f2448
SP
10739 if (ioc->is_gen35_ioc)
10740 _scsih_update_device_qdepth(ioc);
f92363d1 10741 _scsih_scan_for_devices_after_reset(ioc);
a0815c45
SP
10742 /*
10743 * If diag reset has occurred during the driver load
10744 * then driver has to complete the driver load operation
10745 * by executing the following items:
10746 *- Register the devices from sas_device_init_list to SML
10747 *- clear is_driver_loading flag,
10748 *- start the watchdog thread.
10749 * In happy driver load path, above things are taken care of when
10750 * driver executes scsih_scan_finished().
10751 */
10752 if (ioc->is_driver_loading)
10753 _scsih_complete_devices_scanning(ioc);
d3f623ae 10754 _scsih_set_nvme_max_shutdown_latency(ioc);
f92363d1
SR
10755 break;
10756 case MPT3SAS_PORT_ENABLE_COMPLETE:
10757 ioc->start_scan = 0;
199fd79a 10758 if (missing_delay[0] != -1 && missing_delay[1] != -1)
f92363d1
SR
10759 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10760 missing_delay[1]);
919d8a3f
JP
10761 dewtprintk(ioc,
10762 ioc_info(ioc, "port enable: complete from worker thread\n"));
f92363d1 10763 break;
0f624c39
SR
10764 case MPT3SAS_TURN_ON_PFA_LED:
10765 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
f92363d1
SR
10766 break;
10767 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10768 _scsih_sas_topology_change_event(ioc, fw_event);
10769 break;
10770 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
54d74e6b
SP
10771 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10772 _scsih_sas_device_status_change_event_debug(ioc,
10773 (Mpi2EventDataSasDeviceStatusChange_t *)
10774 fw_event->event_data);
f92363d1
SR
10775 break;
10776 case MPI2_EVENT_SAS_DISCOVERY:
10777 _scsih_sas_discovery_event(ioc, fw_event);
10778 break;
95540b8e
C
10779 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10780 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10781 break;
f92363d1
SR
10782 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10783 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10784 break;
10785 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10786 _scsih_sas_enclosure_dev_status_change_event(ioc,
10787 fw_event);
10788 break;
10789 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10790 _scsih_sas_ir_config_change_event(ioc, fw_event);
10791 break;
10792 case MPI2_EVENT_IR_VOLUME:
10793 _scsih_sas_ir_volume_event(ioc, fw_event);
10794 break;
10795 case MPI2_EVENT_IR_PHYSICAL_DISK:
10796 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10797 break;
10798 case MPI2_EVENT_IR_OPERATION_STATUS:
10799 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10800 break;
4318c734
SPS
10801 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10802 _scsih_pcie_device_status_change_event(ioc, fw_event);
10803 break;
10804 case MPI2_EVENT_PCIE_ENUMERATION:
10805 _scsih_pcie_enumeration_event(ioc, fw_event);
10806 break;
10807 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10808 _scsih_pcie_topology_change_event(ioc, fw_event);
9e73ed2e 10809 ioc->current_event = NULL;
04c260bd 10810 return;
f92363d1 10811 }
146b16c8
SR
10812out:
10813 fw_event_work_put(fw_event);
9e73ed2e 10814 ioc->current_event = NULL;
f92363d1
SR
10815}
10816
10817/**
10818 * _firmware_event_work
f92363d1
SR
10819 * @work: The fw_event_work object
10820 * Context: user.
10821 *
10822 * wrappers for the work thread handling firmware events
f92363d1
SR
10823 */
10824
10825static void
10826_firmware_event_work(struct work_struct *work)
10827{
10828 struct fw_event_work *fw_event = container_of(work,
10829 struct fw_event_work, work);
10830
10831 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10832}
10833
10834/**
10835 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10836 * @ioc: per adapter object
10837 * @msix_index: MSIX table index supplied by the OS
10838 * @reply: reply message frame(lower 32bit addr)
10839 * Context: interrupt.
10840 *
10841 * This function merely adds a new work task into ioc->firmware_event_thread.
10842 * The tasks are worked from _firmware_event_work in user context.
10843 *
4beb4867
BVA
10844 * Return: 1 meaning mf should be freed from _base_interrupt
10845 * 0 means the mf is freed from this function.
f92363d1
SR
10846 */
10847u8
10848mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10849 u32 reply)
10850{
10851 struct fw_event_work *fw_event;
10852 Mpi2EventNotificationReply_t *mpi_reply;
10853 u16 event;
10854 u16 sz;
a470a51c 10855 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
f92363d1 10856
79eb96d6
C
10857 /* events turned off due to host reset */
10858 if (ioc->pci_error_recovery)
f92363d1
SR
10859 return 1;
10860
10861 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10862
10863 if (unlikely(!mpi_reply)) {
919d8a3f
JP
10864 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10865 __FILE__, __LINE__, __func__);
f92363d1
SR
10866 return 1;
10867 }
10868
10869 event = le16_to_cpu(mpi_reply->Event);
10870
10871 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10872 mpt3sas_trigger_event(ioc, event, 0);
10873
10874 switch (event) {
10875 /* handle these */
10876 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10877 {
10878 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10879 (Mpi2EventDataSasBroadcastPrimitive_t *)
10880 mpi_reply->EventData;
10881
10882 if (baen_data->Primitive !=
10883 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10884 return 1;
10885
10886 if (ioc->broadcast_aen_busy) {
10887 ioc->broadcast_aen_pending++;
10888 return 1;
10889 } else
10890 ioc->broadcast_aen_busy = 1;
10891 break;
10892 }
10893
10894 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10895 _scsih_check_topo_delete_events(ioc,
10896 (Mpi2EventDataSasTopologyChangeList_t *)
10897 mpi_reply->EventData);
a0815c45
SP
10898 /*
10899 * No need to add the topology change list
10900 * event to fw event work queue when
10901 * diag reset is going on. Since during diag
10902 * reset driver scan the devices by reading
10903 * sas device page0's not by processing the
10904 * events.
10905 */
10906 if (ioc->shost_recovery)
10907 return 1;
f92363d1 10908 break;
4318c734
SPS
10909 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10910 _scsih_check_pcie_topo_remove_events(ioc,
10911 (Mpi26EventDataPCIeTopologyChangeList_t *)
10912 mpi_reply->EventData);
a0815c45
SP
10913 if (ioc->shost_recovery)
10914 return 1;
4318c734 10915 break;
f92363d1
SR
10916 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10917 _scsih_check_ir_config_unhide_events(ioc,
10918 (Mpi2EventDataIrConfigChangeList_t *)
10919 mpi_reply->EventData);
10920 break;
10921 case MPI2_EVENT_IR_VOLUME:
10922 _scsih_check_volume_delete_events(ioc,
10923 (Mpi2EventDataIrVolume_t *)
10924 mpi_reply->EventData);
10925 break;
7786ab6a
SR
10926 case MPI2_EVENT_LOG_ENTRY_ADDED:
10927 {
10928 Mpi2EventDataLogEntryAdded_t *log_entry;
10929 u32 *log_code;
10930
10931 if (!ioc->is_warpdrive)
10932 break;
10933
10934 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10935 mpi_reply->EventData;
10936 log_code = (u32 *)log_entry->LogData;
f92363d1 10937
7786ab6a
SR
10938 if (le16_to_cpu(log_entry->LogEntryQualifier)
10939 != MPT2_WARPDRIVE_LOGENTRY)
10940 break;
10941
10942 switch (le32_to_cpu(*log_code)) {
10943 case MPT2_WARPDRIVE_LC_SSDT:
919d8a3f 10944 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
7786ab6a
SR
10945 break;
10946 case MPT2_WARPDRIVE_LC_SSDLW:
919d8a3f 10947 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
7786ab6a
SR
10948 break;
10949 case MPT2_WARPDRIVE_LC_SSDLF:
919d8a3f 10950 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
7786ab6a
SR
10951 break;
10952 case MPT2_WARPDRIVE_LC_BRMF:
919d8a3f 10953 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
7786ab6a
SR
10954 break;
10955 }
10956
10957 break;
10958 }
f92363d1 10959 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
54d74e6b
SP
10960 _scsih_sas_device_status_change_event(ioc,
10961 (Mpi2EventDataSasDeviceStatusChange_t *)
10962 mpi_reply->EventData);
10963 break;
f92363d1
SR
10964 case MPI2_EVENT_IR_OPERATION_STATUS:
10965 case MPI2_EVENT_SAS_DISCOVERY:
95540b8e 10966 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
f92363d1
SR
10967 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10968 case MPI2_EVENT_IR_PHYSICAL_DISK:
4318c734
SPS
10969 case MPI2_EVENT_PCIE_ENUMERATION:
10970 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
f92363d1
SR
10971 break;
10972
2d8ce8c9
SR
10973 case MPI2_EVENT_TEMP_THRESHOLD:
10974 _scsih_temp_threshold_events(ioc,
10975 (Mpi2EventDataTemperature_t *)
10976 mpi_reply->EventData);
10977 break;
a470a51c
C
10978 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10979 ActiveCableEventData =
10980 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
6c44c0fe
C
10981 switch (ActiveCableEventData->ReasonCode) {
10982 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
919d8a3f
JP
10983 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10984 ActiveCableEventData->ReceptacleID);
b99b1993
SR
10985 pr_notice("cannot be powered and devices connected\n");
10986 pr_notice("to this active cable will not be seen\n");
10987 pr_notice("This active cable requires %d mW of power\n",
c0629d70
SR
10988 le32_to_cpu(
10989 ActiveCableEventData->ActiveCablePowerRequirement));
6c44c0fe
C
10990 break;
10991
10992 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
919d8a3f
JP
10993 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10994 ActiveCableEventData->ReceptacleID);
b99b1993
SR
10995 pr_notice(
10996 "is not running at optimal speed(12 Gb/s rate)\n");
6c44c0fe 10997 break;
7ebd67e0 10998 }
6c44c0fe 10999
a470a51c 11000 break;
2d8ce8c9 11001
f92363d1
SR
11002 default: /* ignore the rest */
11003 return 1;
11004 }
11005
f92363d1 11006 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
146b16c8 11007 fw_event = alloc_fw_event_work(sz);
35b62362 11008 if (!fw_event) {
919d8a3f
JP
11009 ioc_err(ioc, "failure at %s:%d/%s()!\n",
11010 __FILE__, __LINE__, __func__);
f92363d1
SR
11011 return 1;
11012 }
11013
11014 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
11015 fw_event->ioc = ioc;
11016 fw_event->VF_ID = mpi_reply->VF_ID;
11017 fw_event->VP_ID = mpi_reply->VP_ID;
11018 fw_event->event = event;
11019 _scsih_fw_event_add(ioc, fw_event);
146b16c8 11020 fw_event_work_put(fw_event);
f92363d1
SR
11021 return 1;
11022}
11023
f92363d1
SR
11024/**
11025 * _scsih_expander_node_remove - removing expander device from list.
11026 * @ioc: per adapter object
11027 * @sas_expander: the sas_device object
f92363d1
SR
11028 *
11029 * Removing object and freeing associated memory from the
11030 * ioc->sas_expander_list.
f92363d1
SR
11031 */
11032static void
11033_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
11034 struct _sas_node *sas_expander)
11035{
11036 struct _sas_port *mpt3sas_port, *next;
bbe3def3 11037 unsigned long flags;
f92363d1
SR
11038
11039 /* remove sibling ports attached to this expander */
11040 list_for_each_entry_safe(mpt3sas_port, next,
11041 &sas_expander->sas_port_list, port_list) {
11042 if (ioc->shost_recovery)
11043 return;
11044 if (mpt3sas_port->remote_identify.device_type ==
11045 SAS_END_DEVICE)
11046 mpt3sas_device_remove_by_sas_address(ioc,
7d310f24
SR
11047 mpt3sas_port->remote_identify.sas_address,
11048 mpt3sas_port->hba_port);
f92363d1
SR
11049 else if (mpt3sas_port->remote_identify.device_type ==
11050 SAS_EDGE_EXPANDER_DEVICE ||
11051 mpt3sas_port->remote_identify.device_type ==
11052 SAS_FANOUT_EXPANDER_DEVICE)
11053 mpt3sas_expander_remove(ioc,
7d310f24
SR
11054 mpt3sas_port->remote_identify.sas_address,
11055 mpt3sas_port->hba_port);
f92363d1
SR
11056 }
11057
11058 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
e2f0cdf7 11059 sas_expander->sas_address_parent, sas_expander->port);
f92363d1 11060
7d310f24
SR
11061 ioc_info(ioc,
11062 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
11063 sas_expander->handle, (unsigned long long)
11064 sas_expander->sas_address,
11065 sas_expander->port->port_id);
f92363d1 11066
bbe3def3
SR
11067 spin_lock_irqsave(&ioc->sas_node_lock, flags);
11068 list_del(&sas_expander->list);
11069 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11070
f92363d1
SR
11071 kfree(sas_expander->phy);
11072 kfree(sas_expander);
11073}
11074
d3f623ae
SR
11075/**
11076 * _scsih_nvme_shutdown - NVMe shutdown notification
11077 * @ioc: per adapter object
11078 *
11079 * Sending IoUnitControl request with shutdown operation code to alert IOC that
11080 * the host system is shutting down so that IOC can issue NVMe shutdown to
11081 * NVMe drives attached to it.
11082 */
11083static void
11084_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
11085{
11086 Mpi26IoUnitControlRequest_t *mpi_request;
11087 Mpi26IoUnitControlReply_t *mpi_reply;
11088 u16 smid;
11089
11090 /* are there any NVMe devices ? */
11091 if (list_empty(&ioc->pcie_device_list))
11092 return;
11093
11094 mutex_lock(&ioc->scsih_cmds.mutex);
11095
11096 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11097 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11098 goto out;
11099 }
11100
11101 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11102
11103 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11104 if (!smid) {
11105 ioc_err(ioc,
11106 "%s: failed obtaining a smid\n", __func__);
11107 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11108 goto out;
11109 }
11110
11111 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11112 ioc->scsih_cmds.smid = smid;
11113 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
11114 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
11115 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
11116
11117 init_completion(&ioc->scsih_cmds.done);
11118 ioc->put_smid_default(ioc, smid);
11119 /* Wait for max_shutdown_latency seconds */
11120 ioc_info(ioc,
11121 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
11122 ioc->max_shutdown_latency);
11123 wait_for_completion_timeout(&ioc->scsih_cmds.done,
11124 ioc->max_shutdown_latency*HZ);
11125
11126 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11127 ioc_err(ioc, "%s: timeout\n", __func__);
11128 goto out;
11129 }
11130
11131 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11132 mpi_reply = ioc->scsih_cmds.reply;
11133 ioc_info(ioc, "Io Unit Control shutdown (complete):"
11134 "ioc_status(0x%04x), loginfo(0x%08x)\n",
11135 le16_to_cpu(mpi_reply->IOCStatus),
11136 le32_to_cpu(mpi_reply->IOCLogInfo));
11137 }
11138 out:
11139 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11140 mutex_unlock(&ioc->scsih_cmds.mutex);
11141}
11142
11143
f92363d1
SR
11144/**
11145 * _scsih_ir_shutdown - IR shutdown notification
11146 * @ioc: per adapter object
11147 *
11148 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
11149 * the host system is shutting down.
f92363d1
SR
11150 */
11151static void
11152_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
11153{
11154 Mpi2RaidActionRequest_t *mpi_request;
11155 Mpi2RaidActionReply_t *mpi_reply;
11156 u16 smid;
11157
11158 /* is IR firmware build loaded ? */
11159 if (!ioc->ir_firmware)
11160 return;
11161
11162 /* are there any volumes ? */
11163 if (list_empty(&ioc->raid_device_list))
11164 return;
11165
11166 mutex_lock(&ioc->scsih_cmds.mutex);
11167
11168 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
919d8a3f 11169 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
f92363d1
SR
11170 goto out;
11171 }
11172 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11173
11174 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11175 if (!smid) {
919d8a3f 11176 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
f92363d1
SR
11177 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11178 goto out;
11179 }
11180
11181 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11182 ioc->scsih_cmds.smid = smid;
11183 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11184
11185 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11186 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11187
7786ab6a 11188 if (!ioc->hide_ir_msg)
919d8a3f 11189 ioc_info(ioc, "IR shutdown (sending)\n");
f92363d1 11190 init_completion(&ioc->scsih_cmds.done);
078a4cc1 11191 ioc->put_smid_default(ioc, smid);
f92363d1
SR
11192 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11193
11194 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
919d8a3f 11195 ioc_err(ioc, "%s: timeout\n", __func__);
f92363d1
SR
11196 goto out;
11197 }
11198
11199 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11200 mpi_reply = ioc->scsih_cmds.reply;
7786ab6a 11201 if (!ioc->hide_ir_msg)
919d8a3f
JP
11202 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11203 le16_to_cpu(mpi_reply->IOCStatus),
11204 le32_to_cpu(mpi_reply->IOCLogInfo));
f92363d1
SR
11205 }
11206
11207 out:
11208 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11209 mutex_unlock(&ioc->scsih_cmds.mutex);
11210}
11211
f38c43a0
SR
11212/**
11213 * _scsih_get_shost_and_ioc - get shost and ioc
11214 * and verify whether they are NULL or not
11215 * @pdev: PCI device struct
11216 * @shost: address of scsi host pointer
11217 * @ioc: address of HBA adapter pointer
11218 *
11219 * Return zero if *shost and *ioc are not NULL otherwise return error number.
11220 */
11221static int
11222_scsih_get_shost_and_ioc(struct pci_dev *pdev,
11223 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11224{
11225 *shost = pci_get_drvdata(pdev);
11226 if (*shost == NULL) {
11227 dev_err(&pdev->dev, "pdev's driver data is null\n");
11228 return -ENXIO;
11229 }
11230
11231 *ioc = shost_priv(*shost);
11232 if (*ioc == NULL) {
11233 dev_err(&pdev->dev, "shost's private data is null\n");
11234 return -ENXIO;
11235 }
11236
11237 return 0;
11238}
11239
f92363d1 11240/**
8a7e4c24 11241 * scsih_remove - detach and remove add host
f92363d1
SR
11242 * @pdev: PCI device struct
11243 *
11244 * Routine called when unloading the driver.
f92363d1 11245 */
8bbb1cf6 11246static void scsih_remove(struct pci_dev *pdev)
f92363d1 11247{
f38c43a0
SR
11248 struct Scsi_Host *shost;
11249 struct MPT3SAS_ADAPTER *ioc;
f92363d1
SR
11250 struct _sas_port *mpt3sas_port, *next_port;
11251 struct _raid_device *raid_device, *next;
11252 struct MPT3SAS_TARGET *sas_target_priv_data;
3075ac49 11253 struct _pcie_device *pcie_device, *pcienext;
f92363d1
SR
11254 struct workqueue_struct *wq;
11255 unsigned long flags;
2426f209 11256 Mpi2ConfigReply_t mpi_reply;
e238e71b 11257 struct hba_port *port, *port_next;
f92363d1 11258
f38c43a0
SR
11259 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11260 return;
11261
f92363d1 11262 ioc->remove_host = 1;
c666d3be 11263
432bc7ca
SR
11264 if (!pci_device_is_present(pdev)) {
11265 mpt3sas_base_pause_mq_polling(ioc);
cc41f11a 11266 _scsih_flush_running_cmds(ioc);
432bc7ca 11267 }
c666d3be 11268
f92363d1
SR
11269 _scsih_fw_event_cleanup_queue(ioc);
11270
11271 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11272 wq = ioc->firmware_event_thread;
11273 ioc->firmware_event_thread = NULL;
11274 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11275 if (wq)
11276 destroy_workqueue(wq);
2426f209
SP
11277 /*
11278 * Copy back the unmodified ioc page1. so that on next driver load,
11279 * current modified changes on ioc page1 won't take effect.
11280 */
11281 if (ioc->is_aero_ioc)
11282 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11283 &ioc->ioc_pg1_copy);
f92363d1
SR
11284 /* release all the volumes */
11285 _scsih_ir_shutdown(ioc);
2b01b293 11286 mpt3sas_destroy_debugfs(ioc);
dc730212 11287 sas_remove_host(shost);
f92363d1
SR
11288 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11289 list) {
11290 if (raid_device->starget) {
11291 sas_target_priv_data =
11292 raid_device->starget->hostdata;
11293 sas_target_priv_data->deleted = 1;
11294 scsi_remove_target(&raid_device->starget->dev);
11295 }
919d8a3f
JP
11296 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11297 raid_device->handle, (u64)raid_device->wwid);
f92363d1
SR
11298 _scsih_raid_device_remove(ioc, raid_device);
11299 }
3075ac49
SPS
11300 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11301 list) {
11302 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11303 list_del_init(&pcie_device->list);
11304 pcie_device_put(pcie_device);
11305 }
f92363d1
SR
11306
11307 /* free ports attached to the sas_host */
11308 list_for_each_entry_safe(mpt3sas_port, next_port,
11309 &ioc->sas_hba.sas_port_list, port_list) {
11310 if (mpt3sas_port->remote_identify.device_type ==
11311 SAS_END_DEVICE)
11312 mpt3sas_device_remove_by_sas_address(ioc,
7d310f24
SR
11313 mpt3sas_port->remote_identify.sas_address,
11314 mpt3sas_port->hba_port);
f92363d1
SR
11315 else if (mpt3sas_port->remote_identify.device_type ==
11316 SAS_EDGE_EXPANDER_DEVICE ||
11317 mpt3sas_port->remote_identify.device_type ==
11318 SAS_FANOUT_EXPANDER_DEVICE)
11319 mpt3sas_expander_remove(ioc,
7d310f24
SR
11320 mpt3sas_port->remote_identify.sas_address,
11321 mpt3sas_port->hba_port);
f92363d1
SR
11322 }
11323
e238e71b
SR
11324 list_for_each_entry_safe(port, port_next,
11325 &ioc->port_table_list, list) {
11326 list_del(&port->list);
11327 kfree(port);
11328 }
11329
f92363d1
SR
11330 /* free phys attached to the sas_host */
11331 if (ioc->sas_hba.num_phys) {
11332 kfree(ioc->sas_hba.phy);
11333 ioc->sas_hba.phy = NULL;
11334 ioc->sas_hba.num_phys = 0;
11335 }
11336
f92363d1 11337 mpt3sas_base_detach(ioc);
08c4d550 11338 spin_lock(&gioc_lock);
f92363d1 11339 list_del(&ioc->list);
08c4d550 11340 spin_unlock(&gioc_lock);
f92363d1
SR
11341 scsi_host_put(shost);
11342}
11343
11344/**
8a7e4c24 11345 * scsih_shutdown - routine call during system shutdown
f92363d1 11346 * @pdev: PCI device struct
f92363d1 11347 */
8bbb1cf6 11348static void
8a7e4c24 11349scsih_shutdown(struct pci_dev *pdev)
f92363d1 11350{
f38c43a0
SR
11351 struct Scsi_Host *shost;
11352 struct MPT3SAS_ADAPTER *ioc;
f92363d1
SR
11353 struct workqueue_struct *wq;
11354 unsigned long flags;
2426f209 11355 Mpi2ConfigReply_t mpi_reply;
f92363d1 11356
f38c43a0
SR
11357 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11358 return;
11359
f92363d1 11360 ioc->remove_host = 1;
c666d3be 11361
432bc7ca
SR
11362 if (!pci_device_is_present(pdev)) {
11363 mpt3sas_base_pause_mq_polling(ioc);
cc41f11a 11364 _scsih_flush_running_cmds(ioc);
432bc7ca 11365 }
c666d3be 11366
f92363d1
SR
11367 _scsih_fw_event_cleanup_queue(ioc);
11368
11369 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11370 wq = ioc->firmware_event_thread;
11371 ioc->firmware_event_thread = NULL;
11372 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11373 if (wq)
11374 destroy_workqueue(wq);
2426f209
SP
11375 /*
11376 * Copy back the unmodified ioc page1 so that on next driver load,
11377 * current modified changes on ioc page1 won't take effect.
11378 */
11379 if (ioc->is_aero_ioc)
11380 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11381 &ioc->ioc_pg1_copy);
f92363d1
SR
11382
11383 _scsih_ir_shutdown(ioc);
d3f623ae 11384 _scsih_nvme_shutdown(ioc);
fae21608
SR
11385 mpt3sas_base_mask_interrupts(ioc);
11386 ioc->shost_recovery = 1;
11387 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
11388 ioc->shost_recovery = 0;
11389 mpt3sas_base_free_irq(ioc);
11390 mpt3sas_base_disable_msix(ioc);
f92363d1
SR
11391}
11392
11393
11394/**
11395 * _scsih_probe_boot_devices - reports 1st device
11396 * @ioc: per adapter object
11397 *
11398 * If specified in bios page 2, this routine reports the 1st
11399 * device scsi-ml or sas transport for persistent boot device
11400 * purposes. Please refer to function _scsih_determine_boot_device()
11401 */
11402static void
11403_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11404{
d88e1eab 11405 u32 channel;
f92363d1
SR
11406 void *device;
11407 struct _sas_device *sas_device;
11408 struct _raid_device *raid_device;
d88e1eab 11409 struct _pcie_device *pcie_device;
f92363d1
SR
11410 u16 handle;
11411 u64 sas_address_parent;
11412 u64 sas_address;
11413 unsigned long flags;
11414 int rc;
d88e1eab 11415 int tid;
e2f0cdf7 11416 struct hba_port *port;
f92363d1
SR
11417
11418 /* no Bios, return immediately */
11419 if (!ioc->bios_pg3.BiosVersion)
11420 return;
11421
11422 device = NULL;
f92363d1
SR
11423 if (ioc->req_boot_device.device) {
11424 device = ioc->req_boot_device.device;
d88e1eab 11425 channel = ioc->req_boot_device.channel;
f92363d1
SR
11426 } else if (ioc->req_alt_boot_device.device) {
11427 device = ioc->req_alt_boot_device.device;
d88e1eab 11428 channel = ioc->req_alt_boot_device.channel;
f92363d1
SR
11429 } else if (ioc->current_boot_device.device) {
11430 device = ioc->current_boot_device.device;
d88e1eab 11431 channel = ioc->current_boot_device.channel;
f92363d1
SR
11432 }
11433
11434 if (!device)
11435 return;
11436
d88e1eab 11437 if (channel == RAID_CHANNEL) {
f92363d1 11438 raid_device = device;
a0815c45
SP
11439 /*
11440 * If this boot vd is already registered with SML then
11441 * no need to register it again as part of device scanning
11442 * after diag reset during driver load operation.
11443 */
11444 if (raid_device->starget)
11445 return;
f92363d1
SR
11446 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11447 raid_device->id, 0);
11448 if (rc)
11449 _scsih_raid_device_remove(ioc, raid_device);
d88e1eab 11450 } else if (channel == PCIE_CHANNEL) {
d88e1eab 11451 pcie_device = device;
a0815c45
SP
11452 /*
11453 * If this boot NVMe device is already registered with SML then
11454 * no need to register it again as part of device scanning
11455 * after diag reset during driver load operation.
11456 */
11457 if (pcie_device->starget)
11458 return;
11459 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
d88e1eab
SPS
11460 tid = pcie_device->id;
11461 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11462 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11463 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11464 if (rc)
11465 _scsih_pcie_device_remove(ioc, pcie_device);
f92363d1 11466 } else {
f92363d1 11467 sas_device = device;
a0815c45
SP
11468 /*
11469 * If this boot sas/sata device is already registered with SML
11470 * then no need to register it again as part of device scanning
11471 * after diag reset during driver load operation.
11472 */
11473 if (sas_device->starget)
11474 return;
11475 spin_lock_irqsave(&ioc->sas_device_lock, flags);
f92363d1
SR
11476 handle = sas_device->handle;
11477 sas_address_parent = sas_device->sas_address_parent;
11478 sas_address = sas_device->sas_address;
e2f0cdf7 11479 port = sas_device->port;
f92363d1
SR
11480 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11481 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11482
7786ab6a
SR
11483 if (ioc->hide_drives)
11484 return;
e2f0cdf7
SR
11485
11486 if (!port)
11487 return;
11488
f92363d1 11489 if (!mpt3sas_transport_port_add(ioc, handle,
e2f0cdf7 11490 sas_address_parent, port)) {
f92363d1
SR
11491 _scsih_sas_device_remove(ioc, sas_device);
11492 } else if (!sas_device->starget) {
f5edbe77
SR
11493 if (!ioc->is_driver_loading) {
11494 mpt3sas_transport_port_remove(ioc,
11495 sas_address,
e2f0cdf7 11496 sas_address_parent, port);
f5edbe77
SR
11497 _scsih_sas_device_remove(ioc, sas_device);
11498 }
f92363d1
SR
11499 }
11500 }
11501}
11502
11503/**
11504 * _scsih_probe_raid - reporting raid volumes to scsi-ml
11505 * @ioc: per adapter object
11506 *
11507 * Called during initial loading of the driver.
11508 */
11509static void
11510_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11511{
11512 struct _raid_device *raid_device, *raid_next;
11513 int rc;
11514
11515 list_for_each_entry_safe(raid_device, raid_next,
11516 &ioc->raid_device_list, list) {
11517 if (raid_device->starget)
11518 continue;
11519 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11520 raid_device->id, 0);
11521 if (rc)
11522 _scsih_raid_device_remove(ioc, raid_device);
11523 }
11524}
11525
d1cb5e49
SR
11526static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11527{
11528 struct _sas_device *sas_device = NULL;
11529 unsigned long flags;
11530
11531 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11532 if (!list_empty(&ioc->sas_device_init_list)) {
11533 sas_device = list_first_entry(&ioc->sas_device_init_list,
11534 struct _sas_device, list);
11535 sas_device_get(sas_device);
11536 }
11537 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11538
11539 return sas_device;
11540}
11541
11542static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11543 struct _sas_device *sas_device)
11544{
11545 unsigned long flags;
11546
11547 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11548
11549 /*
11550 * Since we dropped the lock during the call to port_add(), we need to
11551 * be careful here that somebody else didn't move or delete this item
11552 * while we were busy with other things.
11553 *
11554 * If it was on the list, we need a put() for the reference the list
11555 * had. Either way, we need a get() for the destination list.
11556 */
11557 if (!list_empty(&sas_device->list)) {
11558 list_del_init(&sas_device->list);
11559 sas_device_put(sas_device);
11560 }
11561
11562 sas_device_get(sas_device);
11563 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11564
11565 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11566}
11567
f92363d1
SR
11568/**
11569 * _scsih_probe_sas - reporting sas devices to sas transport
11570 * @ioc: per adapter object
11571 *
11572 * Called during initial loading of the driver.
11573 */
11574static void
11575_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11576{
d1cb5e49 11577 struct _sas_device *sas_device;
f92363d1 11578
d1cb5e49
SR
11579 if (ioc->hide_drives)
11580 return;
7786ab6a 11581
d1cb5e49 11582 while ((sas_device = get_next_sas_device(ioc))) {
f92363d1 11583 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
e2f0cdf7 11584 sas_device->sas_address_parent, sas_device->port)) {
d1cb5e49
SR
11585 _scsih_sas_device_remove(ioc, sas_device);
11586 sas_device_put(sas_device);
f92363d1
SR
11587 continue;
11588 } else if (!sas_device->starget) {
11589 /*
11590 * When asyn scanning is enabled, its not possible to
11591 * remove devices while scanning is turned on due to an
11592 * oops in scsi_sysfs_add_sdev()->add_device()->
11593 * sysfs_addrm_start()
11594 */
f5edbe77 11595 if (!ioc->is_driver_loading) {
f92363d1
SR
11596 mpt3sas_transport_port_remove(ioc,
11597 sas_device->sas_address,
e2f0cdf7
SR
11598 sas_device->sas_address_parent,
11599 sas_device->port);
d1cb5e49
SR
11600 _scsih_sas_device_remove(ioc, sas_device);
11601 sas_device_put(sas_device);
f5edbe77
SR
11602 continue;
11603 }
f92363d1 11604 }
d1cb5e49
SR
11605 sas_device_make_active(ioc, sas_device);
11606 sas_device_put(sas_device);
f92363d1
SR
11607 }
11608}
11609
d88e1eab
SPS
11610/**
11611 * get_next_pcie_device - Get the next pcie device
11612 * @ioc: per adapter object
11613 *
11614 * Get the next pcie device from pcie_device_init_list list.
11615 *
4beb4867 11616 * Return: pcie device structure if pcie_device_init_list list is not empty
d88e1eab
SPS
11617 * otherwise returns NULL
11618 */
11619static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11620{
11621 struct _pcie_device *pcie_device = NULL;
11622 unsigned long flags;
11623
11624 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11625 if (!list_empty(&ioc->pcie_device_init_list)) {
11626 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11627 struct _pcie_device, list);
11628 pcie_device_get(pcie_device);
11629 }
11630 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11631
11632 return pcie_device;
11633}
11634
11635/**
11636 * pcie_device_make_active - Add pcie device to pcie_device_list list
11637 * @ioc: per adapter object
11638 * @pcie_device: pcie device object
11639 *
11640 * Add the pcie device which has registered with SCSI Transport Later to
11641 * pcie_device_list list
11642 */
11643static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11644 struct _pcie_device *pcie_device)
11645{
11646 unsigned long flags;
11647
11648 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11649
11650 if (!list_empty(&pcie_device->list)) {
11651 list_del_init(&pcie_device->list);
11652 pcie_device_put(pcie_device);
11653 }
11654 pcie_device_get(pcie_device);
11655 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11656
11657 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11658}
11659
11660/**
11661 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11662 * @ioc: per adapter object
11663 *
11664 * Called during initial loading of the driver.
11665 */
11666static void
11667_scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11668{
11669 struct _pcie_device *pcie_device;
11670 int rc;
11671
11672 /* PCIe Device List */
11673 while ((pcie_device = get_next_pcie_device(ioc))) {
11674 if (pcie_device->starget) {
11675 pcie_device_put(pcie_device);
11676 continue;
11677 }
3c090ce3
SP
11678 if (pcie_device->access_status ==
11679 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11680 pcie_device_make_active(ioc, pcie_device);
11681 pcie_device_put(pcie_device);
11682 continue;
11683 }
d88e1eab
SPS
11684 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11685 pcie_device->id, 0);
11686 if (rc) {
11687 _scsih_pcie_device_remove(ioc, pcie_device);
11688 pcie_device_put(pcie_device);
11689 continue;
11690 } else if (!pcie_device->starget) {
11691 /*
11692 * When async scanning is enabled, its not possible to
11693 * remove devices while scanning is turned on due to an
11694 * oops in scsi_sysfs_add_sdev()->add_device()->
11695 * sysfs_addrm_start()
11696 */
11697 if (!ioc->is_driver_loading) {
11698 /* TODO-- Need to find out whether this condition will
11699 * occur or not
11700 */
11701 _scsih_pcie_device_remove(ioc, pcie_device);
11702 pcie_device_put(pcie_device);
11703 continue;
11704 }
11705 }
11706 pcie_device_make_active(ioc, pcie_device);
11707 pcie_device_put(pcie_device);
11708 }
11709}
11710
f92363d1
SR
11711/**
11712 * _scsih_probe_devices - probing for devices
11713 * @ioc: per adapter object
11714 *
11715 * Called during initial loading of the driver.
11716 */
11717static void
11718_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11719{
11720 u16 volume_mapping_flags;
11721
11722 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11723 return; /* return when IOC doesn't support initiator mode */
11724
11725 _scsih_probe_boot_devices(ioc);
11726
11727 if (ioc->ir_firmware) {
11728 volume_mapping_flags =
11729 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11730 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11731 if (volume_mapping_flags ==
11732 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11733 _scsih_probe_raid(ioc);
11734 _scsih_probe_sas(ioc);
11735 } else {
11736 _scsih_probe_sas(ioc);
11737 _scsih_probe_raid(ioc);
11738 }
d88e1eab 11739 } else {
f92363d1 11740 _scsih_probe_sas(ioc);
d88e1eab
SPS
11741 _scsih_probe_pcie(ioc);
11742 }
f92363d1
SR
11743}
11744
11745/**
8a7e4c24 11746 * scsih_scan_start - scsi lld callback for .scan_start
f92363d1
SR
11747 * @shost: SCSI host pointer
11748 *
11749 * The shost has the ability to discover targets on its own instead
11750 * of scanning the entire bus. In our implemention, we will kick off
11751 * firmware discovery.
11752 */
8bbb1cf6 11753static void
8a7e4c24 11754scsih_scan_start(struct Scsi_Host *shost)
f92363d1
SR
11755{
11756 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11757 int rc;
11758 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11759 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
d04a6edf
SR
11760 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11761 mpt3sas_enable_diag_buffer(ioc, 1);
f92363d1
SR
11762
11763 if (disable_discovery > 0)
11764 return;
11765
11766 ioc->start_scan = 1;
11767 rc = mpt3sas_port_enable(ioc);
11768
11769 if (rc != 0)
919d8a3f 11770 ioc_info(ioc, "port enable: FAILED\n");
f92363d1
SR
11771}
11772
a0815c45
SP
11773/**
11774 * _scsih_complete_devices_scanning - add the devices to sml and
11775 * complete ioc initialization.
11776 * @ioc: per adapter object
11777 *
11778 * Return nothing.
11779 */
11780static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
11781{
11782
11783 if (ioc->wait_for_discovery_to_complete) {
11784 ioc->wait_for_discovery_to_complete = 0;
11785 _scsih_probe_devices(ioc);
11786 }
11787
11788 mpt3sas_base_start_watchdog(ioc);
11789 ioc->is_driver_loading = 0;
11790}
11791
f92363d1 11792/**
8a7e4c24 11793 * scsih_scan_finished - scsi lld callback for .scan_finished
f92363d1
SR
11794 * @shost: SCSI host pointer
11795 * @time: elapsed time of the scan in jiffies
11796 *
11797 * This function will be called periodicallyn until it returns 1 with the
11798 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11799 * we wait for firmware discovery to complete, then return 1.
11800 */
8bbb1cf6 11801static int
8a7e4c24 11802scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
f92363d1
SR
11803{
11804 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
a0815c45
SP
11805 u32 ioc_state;
11806 int issue_hard_reset = 0;
f92363d1
SR
11807
11808 if (disable_discovery > 0) {
11809 ioc->is_driver_loading = 0;
11810 ioc->wait_for_discovery_to_complete = 0;
11811 return 1;
11812 }
11813
11814 if (time >= (300 * HZ)) {
e3586147 11815 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
919d8a3f 11816 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
f92363d1
SR
11817 ioc->is_driver_loading = 0;
11818 return 1;
11819 }
11820
a0815c45
SP
11821 if (ioc->start_scan) {
11822 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
11823 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
11824 mpt3sas_print_fault_code(ioc, ioc_state &
11825 MPI2_DOORBELL_DATA_MASK);
11826 issue_hard_reset = 1;
11827 goto out;
11828 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
11829 MPI2_IOC_STATE_COREDUMP) {
11830 mpt3sas_base_coredump_info(ioc, ioc_state &
11831 MPI2_DOORBELL_DATA_MASK);
11832 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
11833 issue_hard_reset = 1;
11834 goto out;
11835 }
f92363d1 11836 return 0;
a0815c45 11837 }
f92363d1 11838
a0815c45
SP
11839 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
11840 ioc_info(ioc,
11841 "port enable: aborted due to diag reset\n");
11842 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11843 goto out;
11844 }
f92363d1 11845 if (ioc->start_scan_failed) {
919d8a3f
JP
11846 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11847 ioc->start_scan_failed);
f92363d1
SR
11848 ioc->is_driver_loading = 0;
11849 ioc->wait_for_discovery_to_complete = 0;
11850 ioc->remove_host = 1;
11851 return 1;
11852 }
11853
919d8a3f 11854 ioc_info(ioc, "port enable: SUCCESS\n");
e3586147 11855 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
a0815c45 11856 _scsih_complete_devices_scanning(ioc);
f92363d1 11857
a0815c45
SP
11858out:
11859 if (issue_hard_reset) {
11860 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11861 if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
11862 ioc->is_driver_loading = 0;
f92363d1 11863 }
f92363d1
SR
11864 return 1;
11865}
11866
664f0dce
SR
11867/**
11868 * scsih_map_queues - map reply queues with request queues
11869 * @shost: SCSI host pointer
11870 */
11871static int scsih_map_queues(struct Scsi_Host *shost)
11872{
11873 struct MPT3SAS_ADAPTER *ioc =
11874 (struct MPT3SAS_ADAPTER *)shost->hostdata;
432bc7ca
SR
11875 struct blk_mq_queue_map *map;
11876 int i, qoff, offset;
11877 int nr_msix_vectors = ioc->iopoll_q_start_index;
11878 int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
664f0dce 11879
432bc7ca 11880 if (shost->nr_hw_queues == 1)
664f0dce
SR
11881 return 0;
11882
432bc7ca
SR
11883 for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
11884 map = &shost->tag_set.map[i];
11885 map->nr_queues = 0;
11886 offset = 0;
11887 if (i == HCTX_TYPE_DEFAULT) {
11888 map->nr_queues =
11889 nr_msix_vectors - ioc->high_iops_queues;
11890 offset = ioc->high_iops_queues;
11891 } else if (i == HCTX_TYPE_POLL)
11892 map->nr_queues = iopoll_q_count;
11893
11894 if (!map->nr_queues)
11895 BUG_ON(i == HCTX_TYPE_DEFAULT);
11896
11897 /*
11898 * The poll queue(s) doesn't have an IRQ (and hence IRQ
11899 * affinity), so use the regular blk-mq cpu mapping
11900 */
11901 map->queue_offset = qoff;
11902 if (i != HCTX_TYPE_POLL)
11903 blk_mq_pci_map_queues(map, ioc->pdev, offset);
11904 else
11905 blk_mq_map_queues(map);
11906
11907 qoff += map->nr_queues;
11908 }
11909 return 0;
664f0dce
SR
11910}
11911
c84b06a4
SR
11912/* shost template for SAS 2.0 HBA devices */
11913static struct scsi_host_template mpt2sas_driver_template = {
11914 .module = THIS_MODULE,
11915 .name = "Fusion MPT SAS Host",
11916 .proc_name = MPT2SAS_DRIVER_NAME,
11917 .queuecommand = scsih_qcmd,
11918 .target_alloc = scsih_target_alloc,
11919 .slave_alloc = scsih_slave_alloc,
11920 .slave_configure = scsih_slave_configure,
11921 .target_destroy = scsih_target_destroy,
11922 .slave_destroy = scsih_slave_destroy,
11923 .scan_finished = scsih_scan_finished,
11924 .scan_start = scsih_scan_start,
11925 .change_queue_depth = scsih_change_queue_depth,
11926 .eh_abort_handler = scsih_abort,
11927 .eh_device_reset_handler = scsih_dev_reset,
11928 .eh_target_reset_handler = scsih_target_reset,
11929 .eh_host_reset_handler = scsih_host_reset,
11930 .bios_param = scsih_bios_param,
11931 .can_queue = 1,
11932 .this_id = -1,
11933 .sg_tablesize = MPT2SAS_SG_DEPTH,
11934 .max_sectors = 32767,
11935 .cmd_per_lun = 7,
1bb3ca27
BVA
11936 .shost_groups = mpt3sas_host_groups,
11937 .sdev_groups = mpt3sas_dev_groups,
c84b06a4 11938 .track_queue_depth = 1,
dbec4c90 11939 .cmd_size = sizeof(struct scsiio_tracker),
c84b06a4
SR
11940};
11941
11942/* raid transport support for SAS 2.0 HBA devices */
11943static struct raid_function_template mpt2sas_raid_functions = {
11944 .cookie = &mpt2sas_driver_template,
11945 .is_raid = scsih_is_raid,
11946 .get_resync = scsih_get_resync,
11947 .get_state = scsih_get_state,
11948};
d357e84d 11949
c84b06a4
SR
11950/* shost template for SAS 3.0 HBA devices */
11951static struct scsi_host_template mpt3sas_driver_template = {
11952 .module = THIS_MODULE,
11953 .name = "Fusion MPT SAS Host",
11954 .proc_name = MPT3SAS_DRIVER_NAME,
11955 .queuecommand = scsih_qcmd,
11956 .target_alloc = scsih_target_alloc,
11957 .slave_alloc = scsih_slave_alloc,
11958 .slave_configure = scsih_slave_configure,
11959 .target_destroy = scsih_target_destroy,
11960 .slave_destroy = scsih_slave_destroy,
11961 .scan_finished = scsih_scan_finished,
11962 .scan_start = scsih_scan_start,
11963 .change_queue_depth = scsih_change_queue_depth,
11964 .eh_abort_handler = scsih_abort,
11965 .eh_device_reset_handler = scsih_dev_reset,
11966 .eh_target_reset_handler = scsih_target_reset,
11967 .eh_host_reset_handler = scsih_host_reset,
11968 .bios_param = scsih_bios_param,
11969 .can_queue = 1,
11970 .this_id = -1,
11971 .sg_tablesize = MPT3SAS_SG_DEPTH,
11972 .max_sectors = 32767,
ce0ad853 11973 .max_segment_size = 0xffffffff,
c84b06a4 11974 .cmd_per_lun = 7,
1bb3ca27
BVA
11975 .shost_groups = mpt3sas_host_groups,
11976 .sdev_groups = mpt3sas_dev_groups,
c84b06a4 11977 .track_queue_depth = 1,
dbec4c90 11978 .cmd_size = sizeof(struct scsiio_tracker),
664f0dce 11979 .map_queues = scsih_map_queues,
432bc7ca 11980 .mq_poll = mpt3sas_blk_mq_poll,
c84b06a4
SR
11981};
11982
11983/* raid transport support for SAS 3.0 HBA devices */
11984static struct raid_function_template mpt3sas_raid_functions = {
11985 .cookie = &mpt3sas_driver_template,
11986 .is_raid = scsih_is_raid,
11987 .get_resync = scsih_get_resync,
11988 .get_state = scsih_get_state,
11989};
11990
11991/**
11992 * _scsih_determine_hba_mpi_version - determine in which MPI version class
11993 * this device belongs to.
11994 * @pdev: PCI device struct
11995 *
11996 * return MPI2_VERSION for SAS 2.0 HBA devices,
b130b0d5
SS
11997 * MPI25_VERSION for SAS 3.0 HBA devices, and
11998 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
c84b06a4 11999 */
8bbb1cf6 12000static u16
c84b06a4
SR
12001_scsih_determine_hba_mpi_version(struct pci_dev *pdev)
12002{
12003
12004 switch (pdev->device) {
7786ab6a 12005 case MPI2_MFGPAGE_DEVID_SSS6200:
d357e84d
SR
12006 case MPI2_MFGPAGE_DEVID_SAS2004:
12007 case MPI2_MFGPAGE_DEVID_SAS2008:
12008 case MPI2_MFGPAGE_DEVID_SAS2108_1:
12009 case MPI2_MFGPAGE_DEVID_SAS2108_2:
12010 case MPI2_MFGPAGE_DEVID_SAS2108_3:
12011 case MPI2_MFGPAGE_DEVID_SAS2116_1:
12012 case MPI2_MFGPAGE_DEVID_SAS2116_2:
12013 case MPI2_MFGPAGE_DEVID_SAS2208_1:
12014 case MPI2_MFGPAGE_DEVID_SAS2208_2:
12015 case MPI2_MFGPAGE_DEVID_SAS2208_3:
12016 case MPI2_MFGPAGE_DEVID_SAS2208_4:
12017 case MPI2_MFGPAGE_DEVID_SAS2208_5:
12018 case MPI2_MFGPAGE_DEVID_SAS2208_6:
12019 case MPI2_MFGPAGE_DEVID_SAS2308_1:
12020 case MPI2_MFGPAGE_DEVID_SAS2308_2:
12021 case MPI2_MFGPAGE_DEVID_SAS2308_3:
1244790d 12022 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
8f838450 12023 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
c84b06a4 12024 return MPI2_VERSION;
d357e84d
SR
12025 case MPI25_MFGPAGE_DEVID_SAS3004:
12026 case MPI25_MFGPAGE_DEVID_SAS3008:
12027 case MPI25_MFGPAGE_DEVID_SAS3108_1:
12028 case MPI25_MFGPAGE_DEVID_SAS3108_2:
12029 case MPI25_MFGPAGE_DEVID_SAS3108_5:
12030 case MPI25_MFGPAGE_DEVID_SAS3108_6:
c84b06a4 12031 return MPI25_VERSION;
b130b0d5
SS
12032 case MPI26_MFGPAGE_DEVID_SAS3216:
12033 case MPI26_MFGPAGE_DEVID_SAS3224:
12034 case MPI26_MFGPAGE_DEVID_SAS3316_1:
12035 case MPI26_MFGPAGE_DEVID_SAS3316_2:
12036 case MPI26_MFGPAGE_DEVID_SAS3316_3:
12037 case MPI26_MFGPAGE_DEVID_SAS3316_4:
12038 case MPI26_MFGPAGE_DEVID_SAS3324_1:
12039 case MPI26_MFGPAGE_DEVID_SAS3324_2:
12040 case MPI26_MFGPAGE_DEVID_SAS3324_3:
12041 case MPI26_MFGPAGE_DEVID_SAS3324_4:
998f26ae
SPS
12042 case MPI26_MFGPAGE_DEVID_SAS3508:
12043 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12044 case MPI26_MFGPAGE_DEVID_SAS3408:
12045 case MPI26_MFGPAGE_DEVID_SAS3516:
12046 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12047 case MPI26_MFGPAGE_DEVID_SAS3416:
15fd7c74 12048 case MPI26_MFGPAGE_DEVID_SAS3616:
eb9c7ce5 12049 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
6c2938f7
SP
12050 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12051 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12052 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12053 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
f38c43a0
SR
12054 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12055 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12056 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12057 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
b130b0d5 12058 return MPI26_VERSION;
d357e84d 12059 }
c84b06a4 12060 return 0;
d357e84d
SR
12061}
12062
f92363d1 12063/**
c84b06a4 12064 * _scsih_probe - attach and add scsi host
f92363d1
SR
12065 * @pdev: PCI device struct
12066 * @id: pci device id
12067 *
4beb4867 12068 * Return: 0 success, anything else error.
f92363d1 12069 */
8bbb1cf6 12070static int
c84b06a4 12071_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
f92363d1
SR
12072{
12073 struct MPT3SAS_ADAPTER *ioc;
c84b06a4 12074 struct Scsi_Host *shost = NULL;
b65f1d4d 12075 int rv;
c84b06a4 12076 u16 hba_mpi_version;
432bc7ca 12077 int iopoll_q_count = 0;
c84b06a4
SR
12078
12079 /* Determine in which MPI version class this pci device belongs */
12080 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
12081 if (hba_mpi_version == 0)
12082 return -ENODEV;
12083
12084 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
12085 * for other generation HBA's return with -ENODEV
12086 */
12087 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
12088 return -ENODEV;
12089
12090 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
12091 * for other generation HBA's return with -ENODEV
12092 */
b130b0d5
SS
12093 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
12094 || hba_mpi_version == MPI26_VERSION)))
c84b06a4
SR
12095 return -ENODEV;
12096
12097 switch (hba_mpi_version) {
12098 case MPI2_VERSION:
ffdadd68 12099 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
12100 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
c84b06a4
SR
12101 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
12102 shost = scsi_host_alloc(&mpt2sas_driver_template,
12103 sizeof(struct MPT3SAS_ADAPTER));
12104 if (!shost)
12105 return -ENODEV;
12106 ioc = shost_priv(shost);
12107 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12108 ioc->hba_mpi_version_belonged = hba_mpi_version;
12109 ioc->id = mpt2_ids++;
12110 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
c520691b
SPS
12111 switch (pdev->device) {
12112 case MPI2_MFGPAGE_DEVID_SSS6200:
c84b06a4
SR
12113 ioc->is_warpdrive = 1;
12114 ioc->hide_ir_msg = 1;
c520691b 12115 break;
1244790d 12116 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
8f838450 12117 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
c520691b
SPS
12118 ioc->is_mcpu_endpoint = 1;
12119 break;
12120 default:
c84b06a4 12121 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
c520691b
SPS
12122 break;
12123 }
324c122f
SR
12124
12125 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12126 ioc->multipath_on_hba = 0;
12127 else
12128 ioc->multipath_on_hba = 1;
12129
c84b06a4
SR
12130 break;
12131 case MPI25_VERSION:
b130b0d5 12132 case MPI26_VERSION:
c84b06a4
SR
12133 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
12134 shost = scsi_host_alloc(&mpt3sas_driver_template,
12135 sizeof(struct MPT3SAS_ADAPTER));
12136 if (!shost)
12137 return -ENODEV;
12138 ioc = shost_priv(shost);
12139 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12140 ioc->hba_mpi_version_belonged = hba_mpi_version;
12141 ioc->id = mpt3_ids++;
12142 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
998f26ae
SPS
12143 switch (pdev->device) {
12144 case MPI26_MFGPAGE_DEVID_SAS3508:
12145 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12146 case MPI26_MFGPAGE_DEVID_SAS3408:
12147 case MPI26_MFGPAGE_DEVID_SAS3516:
12148 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12149 case MPI26_MFGPAGE_DEVID_SAS3416:
15fd7c74 12150 case MPI26_MFGPAGE_DEVID_SAS3616:
eb9c7ce5 12151 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
cc68e607
SP
12152 ioc->is_gen35_ioc = 1;
12153 break;
f38c43a0
SR
12154 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12155 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12156 dev_err(&pdev->dev,
12157 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
12158 pdev->device, pdev->subsystem_vendor,
12159 pdev->subsystem_device);
12160 return 1;
12161 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12162 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12163 dev_err(&pdev->dev,
12164 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
12165 pdev->device, pdev->subsystem_vendor,
12166 pdev->subsystem_device);
12167 return 1;
cc68e607
SP
12168 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12169 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12170 dev_info(&pdev->dev,
12171 "HBA is in Configurable Secure mode\n");
df561f66 12172 fallthrough;
6c2938f7
SP
12173 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12174 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
cc68e607 12175 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
998f26ae
SPS
12176 break;
12177 default:
cc68e607 12178 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
998f26ae 12179 }
b130b0d5
SS
12180 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
12181 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
0bb337c9
SPS
12182 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
12183 ioc->combined_reply_queue = 1;
12184 if (ioc->is_gen35_ioc)
12185 ioc->combined_reply_index_count =
12186 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
12187 else
12188 ioc->combined_reply_index_count =
12189 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
12190 }
324c122f
SR
12191
12192 switch (ioc->is_gen35_ioc) {
12193 case 0:
12194 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12195 ioc->multipath_on_hba = 0;
12196 else
12197 ioc->multipath_on_hba = 1;
12198 break;
12199 case 1:
12200 if (multipath_on_hba == -1 || multipath_on_hba > 0)
12201 ioc->multipath_on_hba = 1;
12202 else
12203 ioc->multipath_on_hba = 0;
84a84cc6 12204 break;
324c122f
SR
12205 default:
12206 break;
12207 }
12208
c84b06a4
SR
12209 break;
12210 default:
12211 return -ENODEV;
12212 }
f92363d1 12213
f92363d1 12214 INIT_LIST_HEAD(&ioc->list);
08c4d550 12215 spin_lock(&gioc_lock);
f92363d1 12216 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
08c4d550 12217 spin_unlock(&gioc_lock);
f92363d1 12218 ioc->shost = shost;
f92363d1
SR
12219 ioc->pdev = pdev;
12220 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
12221 ioc->tm_cb_idx = tm_cb_idx;
12222 ioc->ctl_cb_idx = ctl_cb_idx;
12223 ioc->base_cb_idx = base_cb_idx;
12224 ioc->port_enable_cb_idx = port_enable_cb_idx;
12225 ioc->transport_cb_idx = transport_cb_idx;
12226 ioc->scsih_cb_idx = scsih_cb_idx;
12227 ioc->config_cb_idx = config_cb_idx;
12228 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
12229 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
12230 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
12231 ioc->logging_level = logging_level;
12232 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
d3f623ae
SR
12233 /* Host waits for minimum of six seconds */
12234 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
3ac8e47b
SP
12235 /*
12236 * Enable MEMORY MOVE support flag.
12237 */
12238 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
688c1a0a
SP
12239 /* Enable ADDITIONAL QUERY support flag. */
12240 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
8dc8d29a
SR
12241
12242 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
12243
f92363d1
SR
12244 /* misc semaphores and spin locks */
12245 mutex_init(&ioc->reset_in_progress_mutex);
08c4d550
SR
12246 /* initializing pci_access_mutex lock */
12247 mutex_init(&ioc->pci_access_mutex);
f92363d1
SR
12248 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
12249 spin_lock_init(&ioc->scsi_lookup_lock);
12250 spin_lock_init(&ioc->sas_device_lock);
12251 spin_lock_init(&ioc->sas_node_lock);
12252 spin_lock_init(&ioc->fw_event_lock);
12253 spin_lock_init(&ioc->raid_device_lock);
d88e1eab 12254 spin_lock_init(&ioc->pcie_device_lock);
f92363d1
SR
12255 spin_lock_init(&ioc->diag_trigger_lock);
12256
12257 INIT_LIST_HEAD(&ioc->sas_device_list);
12258 INIT_LIST_HEAD(&ioc->sas_device_init_list);
12259 INIT_LIST_HEAD(&ioc->sas_expander_list);
22a923c3 12260 INIT_LIST_HEAD(&ioc->enclosure_list);
d88e1eab
SPS
12261 INIT_LIST_HEAD(&ioc->pcie_device_list);
12262 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
f92363d1
SR
12263 INIT_LIST_HEAD(&ioc->fw_event_list);
12264 INIT_LIST_HEAD(&ioc->raid_device_list);
12265 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
12266 INIT_LIST_HEAD(&ioc->delayed_tr_list);
fd0331b3
SS
12267 INIT_LIST_HEAD(&ioc->delayed_sc_list);
12268 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
f92363d1 12269 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
cf9bd21a 12270 INIT_LIST_HEAD(&ioc->reply_queue_list);
e238e71b 12271 INIT_LIST_HEAD(&ioc->port_table_list);
f92363d1 12272
c84b06a4 12273 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
d357e84d 12274
f92363d1
SR
12275 /* init shost parameters */
12276 shost->max_cmd_len = 32;
12277 shost->max_lun = max_lun;
12278 shost->transportt = mpt3sas_transport_template;
12279 shost->unique_id = ioc->id;
12280
0448f019
SPS
12281 if (ioc->is_mcpu_endpoint) {
12282 /* mCPU MPI support 64K max IO */
12283 shost->max_sectors = 128;
919d8a3f
JP
12284 ioc_info(ioc, "The max_sectors value is set to %d\n",
12285 shost->max_sectors);
0448f019
SPS
12286 } else {
12287 if (max_sectors != 0xFFFF) {
12288 if (max_sectors < 64) {
12289 shost->max_sectors = 64;
919d8a3f
JP
12290 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12291 max_sectors);
0448f019
SPS
12292 } else if (max_sectors > 32767) {
12293 shost->max_sectors = 32767;
919d8a3f
JP
12294 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12295 max_sectors);
0448f019
SPS
12296 } else {
12297 shost->max_sectors = max_sectors & 0xFFFE;
919d8a3f
JP
12298 ioc_info(ioc, "The max_sectors value is set to %d\n",
12299 shost->max_sectors);
0448f019 12300 }
f92363d1
SR
12301 }
12302 }
f92363d1 12303 /* register EEDP capabilities with SCSI layer */
e869f8ea
SR
12304 if (prot_mask >= 0)
12305 scsi_host_set_prot(shost, (prot_mask & 0x07));
f92363d1
SR
12306 else
12307 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12308 | SHOST_DIF_TYPE2_PROTECTION
12309 | SHOST_DIF_TYPE3_PROTECTION);
12310
12311 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12312
12313 /* event thread */
12314 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
c84b06a4 12315 "fw_event_%s%d", ioc->driver_name, ioc->id);
bdff785e 12316 ioc->firmware_event_thread = alloc_ordered_workqueue(
864449ee 12317 ioc->firmware_event_name, 0);
f92363d1 12318 if (!ioc->firmware_event_thread) {
919d8a3f
JP
12319 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12320 __FILE__, __LINE__, __func__);
b65f1d4d 12321 rv = -ENODEV;
f92363d1
SR
12322 goto out_thread_fail;
12323 }
12324
432bc7ca
SR
12325 shost->host_tagset = 0;
12326
12327 if (ioc->is_gen35_ioc && host_tagset_enable)
12328 shost->host_tagset = 1;
12329
f92363d1
SR
12330 ioc->is_driver_loading = 1;
12331 if ((mpt3sas_base_attach(ioc))) {
919d8a3f
JP
12332 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12333 __FILE__, __LINE__, __func__);
b65f1d4d 12334 rv = -ENODEV;
f92363d1
SR
12335 goto out_attach_fail;
12336 }
7786ab6a 12337
7786ab6a
SR
12338 if (ioc->is_warpdrive) {
12339 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12340 ioc->hide_drives = 0;
12341 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12342 ioc->hide_drives = 1;
12343 else {
c84b06a4 12344 if (mpt3sas_get_num_volumes(ioc))
7786ab6a
SR
12345 ioc->hide_drives = 1;
12346 else
12347 ioc->hide_drives = 0;
12348 }
12349 } else
12350 ioc->hide_drives = 0;
7786ab6a 12351
664f0dce
SR
12352 shost->nr_hw_queues = 1;
12353
432bc7ca 12354 if (shost->host_tagset) {
664f0dce
SR
12355 shost->nr_hw_queues =
12356 ioc->reply_queue_count - ioc->high_iops_queues;
12357
432bc7ca
SR
12358 iopoll_q_count =
12359 ioc->reply_queue_count - ioc->iopoll_q_start_index;
12360
12361 shost->nr_maps = iopoll_q_count ? 3 : 1;
12362
664f0dce
SR
12363 dev_info(&ioc->pdev->dev,
12364 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12365 shost->can_queue, shost->nr_hw_queues);
12366 }
12367
b65f1d4d
SR
12368 rv = scsi_add_host(shost, &pdev->dev);
12369 if (rv) {
919d8a3f
JP
12370 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12371 __FILE__, __LINE__, __func__);
4dc06fd8
RS
12372 goto out_add_shost_fail;
12373 }
12374
f92363d1 12375 scsi_scan_host(shost);
2b01b293 12376 mpt3sas_setup_debugfs(ioc);
f92363d1 12377 return 0;
4dc06fd8
RS
12378out_add_shost_fail:
12379 mpt3sas_base_detach(ioc);
f92363d1
SR
12380 out_attach_fail:
12381 destroy_workqueue(ioc->firmware_event_thread);
12382 out_thread_fail:
08c4d550 12383 spin_lock(&gioc_lock);
f92363d1 12384 list_del(&ioc->list);
08c4d550 12385 spin_unlock(&gioc_lock);
f92363d1 12386 scsi_host_put(shost);
b65f1d4d 12387 return rv;
f92363d1
SR
12388}
12389
f92363d1 12390/**
8a7e4c24 12391 * scsih_suspend - power management suspend main entry point
17287305 12392 * @dev: Device struct
f92363d1 12393 *
4beb4867 12394 * Return: 0 success, anything else error.
f92363d1 12395 */
17287305
VG
12396static int __maybe_unused
12397scsih_suspend(struct device *dev)
f92363d1 12398{
17287305 12399 struct pci_dev *pdev = to_pci_dev(dev);
f38c43a0
SR
12400 struct Scsi_Host *shost;
12401 struct MPT3SAS_ADAPTER *ioc;
f38c43a0
SR
12402 int rc;
12403
12404 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12405 if (rc)
12406 return rc;
f92363d1
SR
12407
12408 mpt3sas_base_stop_watchdog(ioc);
12409 flush_scheduled_work();
12410 scsi_block_requests(shost);
d3f623ae 12411 _scsih_nvme_shutdown(ioc);
17287305
VG
12412 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12413 pdev, pci_name(pdev));
f92363d1 12414
f92363d1 12415 mpt3sas_base_free_resources(ioc);
f92363d1
SR
12416 return 0;
12417}
12418
12419/**
8a7e4c24 12420 * scsih_resume - power management resume main entry point
17287305 12421 * @dev: Device struct
f92363d1 12422 *
4beb4867 12423 * Return: 0 success, anything else error.
f92363d1 12424 */
17287305
VG
12425static int __maybe_unused
12426scsih_resume(struct device *dev)
f92363d1 12427{
17287305 12428 struct pci_dev *pdev = to_pci_dev(dev);
f38c43a0
SR
12429 struct Scsi_Host *shost;
12430 struct MPT3SAS_ADAPTER *ioc;
f92363d1
SR
12431 pci_power_t device_state = pdev->current_state;
12432 int r;
12433
f38c43a0
SR
12434 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12435 if (r)
12436 return r;
12437
919d8a3f
JP
12438 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12439 pdev, pci_name(pdev), device_state);
f92363d1 12440
f92363d1
SR
12441 ioc->pdev = pdev;
12442 r = mpt3sas_base_map_resources(ioc);
12443 if (r)
12444 return r;
5b061980 12445 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
98c56ad3 12446 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
f92363d1
SR
12447 scsi_unblock_requests(shost);
12448 mpt3sas_base_start_watchdog(ioc);
12449 return 0;
12450}
f92363d1
SR
12451
12452/**
8a7e4c24 12453 * scsih_pci_error_detected - Called when a PCI error is detected.
f92363d1
SR
12454 * @pdev: PCI device struct
12455 * @state: PCI channel state
12456 *
12457 * Description: Called when a PCI error is detected.
12458 *
4beb4867 12459 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
f92363d1 12460 */
8bbb1cf6 12461static pci_ers_result_t
8a7e4c24 12462scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
f92363d1 12463{
f38c43a0
SR
12464 struct Scsi_Host *shost;
12465 struct MPT3SAS_ADAPTER *ioc;
12466
12467 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12468 return PCI_ERS_RESULT_DISCONNECT;
f92363d1 12469
919d8a3f 12470 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
f92363d1
SR
12471
12472 switch (state) {
12473 case pci_channel_io_normal:
12474 return PCI_ERS_RESULT_CAN_RECOVER;
12475 case pci_channel_io_frozen:
12476 /* Fatal error, prepare for slot reset */
12477 ioc->pci_error_recovery = 1;
12478 scsi_block_requests(ioc->shost);
12479 mpt3sas_base_stop_watchdog(ioc);
12480 mpt3sas_base_free_resources(ioc);
12481 return PCI_ERS_RESULT_NEED_RESET;
12482 case pci_channel_io_perm_failure:
12483 /* Permanent error, prepare for device removal */
12484 ioc->pci_error_recovery = 1;
12485 mpt3sas_base_stop_watchdog(ioc);
432bc7ca 12486 mpt3sas_base_pause_mq_polling(ioc);
f92363d1
SR
12487 _scsih_flush_running_cmds(ioc);
12488 return PCI_ERS_RESULT_DISCONNECT;
12489 }
12490 return PCI_ERS_RESULT_NEED_RESET;
12491}
12492
12493/**
8a7e4c24 12494 * scsih_pci_slot_reset - Called when PCI slot has been reset.
f92363d1
SR
12495 * @pdev: PCI device struct
12496 *
12497 * Description: This routine is called by the pci error recovery
12498 * code after the PCI slot has been reset, just before we
12499 * should resume normal operations.
12500 */
8bbb1cf6 12501static pci_ers_result_t
8a7e4c24 12502scsih_pci_slot_reset(struct pci_dev *pdev)
f92363d1 12503{
f38c43a0
SR
12504 struct Scsi_Host *shost;
12505 struct MPT3SAS_ADAPTER *ioc;
f92363d1
SR
12506 int rc;
12507
f38c43a0
SR
12508 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12509 return PCI_ERS_RESULT_DISCONNECT;
12510
919d8a3f 12511 ioc_info(ioc, "PCI error: slot reset callback!!\n");
f92363d1
SR
12512
12513 ioc->pci_error_recovery = 0;
12514 ioc->pdev = pdev;
12515 pci_restore_state(pdev);
12516 rc = mpt3sas_base_map_resources(ioc);
12517 if (rc)
12518 return PCI_ERS_RESULT_DISCONNECT;
12519
5b061980 12520 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
98c56ad3 12521 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1 12522
919d8a3f
JP
12523 ioc_warn(ioc, "hard reset: %s\n",
12524 (rc == 0) ? "success" : "failed");
f92363d1
SR
12525
12526 if (!rc)
12527 return PCI_ERS_RESULT_RECOVERED;
12528 else
12529 return PCI_ERS_RESULT_DISCONNECT;
12530}
12531
12532/**
8a7e4c24 12533 * scsih_pci_resume() - resume normal ops after PCI reset
f92363d1
SR
12534 * @pdev: pointer to PCI device
12535 *
12536 * Called when the error recovery driver tells us that its
12537 * OK to resume normal operation. Use completion to allow
12538 * halted scsi ops to resume.
12539 */
8bbb1cf6 12540static void
8a7e4c24 12541scsih_pci_resume(struct pci_dev *pdev)
f92363d1 12542{
f38c43a0
SR
12543 struct Scsi_Host *shost;
12544 struct MPT3SAS_ADAPTER *ioc;
12545
12546 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12547 return;
f92363d1 12548
919d8a3f 12549 ioc_info(ioc, "PCI error: resume callback!!\n");
f92363d1 12550
f92363d1
SR
12551 mpt3sas_base_start_watchdog(ioc);
12552 scsi_unblock_requests(ioc->shost);
12553}
12554
12555/**
8a7e4c24 12556 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
f92363d1
SR
12557 * @pdev: pointer to PCI device
12558 */
8bbb1cf6 12559static pci_ers_result_t
8a7e4c24 12560scsih_pci_mmio_enabled(struct pci_dev *pdev)
f92363d1 12561{
f38c43a0
SR
12562 struct Scsi_Host *shost;
12563 struct MPT3SAS_ADAPTER *ioc;
12564
12565 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12566 return PCI_ERS_RESULT_DISCONNECT;
f92363d1 12567
919d8a3f 12568 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
f92363d1
SR
12569
12570 /* TODO - dump whatever for debugging purposes */
12571
83c3d340
KW
12572 /* This called only if scsih_pci_error_detected returns
12573 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12574 * works, no need to reset slot.
12575 */
12576 return PCI_ERS_RESULT_RECOVERED;
f92363d1
SR
12577}
12578
307d9075 12579/**
a8d548b0 12580 * scsih_ncq_prio_supp - Check for NCQ command priority support
307d9075
AM
12581 * @sdev: scsi device struct
12582 *
12583 * This is called when a user indicates they would like to enable
12584 * ncq command priorities. This works only on SATA devices.
12585 */
12586bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12587{
12588 unsigned char *buf;
12589 bool ncq_prio_supp = false;
12590
12591 if (!scsi_device_supports_vpd(sdev))
12592 return ncq_prio_supp;
12593
12594 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
12595 if (!buf)
12596 return ncq_prio_supp;
12597
12598 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
12599 ncq_prio_supp = (buf[213] >> 4) & 1;
12600
12601 kfree(buf);
12602 return ncq_prio_supp;
12603}
c84b06a4
SR
12604/*
12605 * The pci device ids are defined in mpi/mpi2_cnfg.h.
12606 */
12607static const struct pci_device_id mpt3sas_pci_table[] = {
12608 /* Spitfire ~ 2004 */
12609 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12610 PCI_ANY_ID, PCI_ANY_ID },
12611 /* Falcon ~ 2008 */
12612 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12613 PCI_ANY_ID, PCI_ANY_ID },
12614 /* Liberator ~ 2108 */
12615 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12616 PCI_ANY_ID, PCI_ANY_ID },
12617 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12618 PCI_ANY_ID, PCI_ANY_ID },
12619 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12620 PCI_ANY_ID, PCI_ANY_ID },
12621 /* Meteor ~ 2116 */
12622 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12623 PCI_ANY_ID, PCI_ANY_ID },
12624 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12625 PCI_ANY_ID, PCI_ANY_ID },
12626 /* Thunderbolt ~ 2208 */
12627 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12628 PCI_ANY_ID, PCI_ANY_ID },
12629 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12630 PCI_ANY_ID, PCI_ANY_ID },
12631 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12632 PCI_ANY_ID, PCI_ANY_ID },
12633 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12634 PCI_ANY_ID, PCI_ANY_ID },
12635 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12636 PCI_ANY_ID, PCI_ANY_ID },
12637 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12638 PCI_ANY_ID, PCI_ANY_ID },
12639 /* Mustang ~ 2308 */
12640 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12641 PCI_ANY_ID, PCI_ANY_ID },
12642 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12643 PCI_ANY_ID, PCI_ANY_ID },
12644 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12645 PCI_ANY_ID, PCI_ANY_ID },
1244790d 12646 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
c520691b 12647 PCI_ANY_ID, PCI_ANY_ID },
8f838450
SP
12648 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12649 PCI_ANY_ID, PCI_ANY_ID },
c84b06a4
SR
12650 /* SSS6200 */
12651 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12652 PCI_ANY_ID, PCI_ANY_ID },
12653 /* Fury ~ 3004 and 3008 */
12654 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12655 PCI_ANY_ID, PCI_ANY_ID },
12656 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12657 PCI_ANY_ID, PCI_ANY_ID },
12658 /* Invader ~ 3108 */
12659 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12660 PCI_ANY_ID, PCI_ANY_ID },
12661 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12662 PCI_ANY_ID, PCI_ANY_ID },
12663 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12664 PCI_ANY_ID, PCI_ANY_ID },
12665 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12666 PCI_ANY_ID, PCI_ANY_ID },
b130b0d5
SS
12667 /* Cutlass ~ 3216 and 3224 */
12668 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12669 PCI_ANY_ID, PCI_ANY_ID },
12670 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12671 PCI_ANY_ID, PCI_ANY_ID },
12672 /* Intruder ~ 3316 and 3324 */
12673 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12674 PCI_ANY_ID, PCI_ANY_ID },
12675 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12676 PCI_ANY_ID, PCI_ANY_ID },
12677 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12678 PCI_ANY_ID, PCI_ANY_ID },
12679 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12680 PCI_ANY_ID, PCI_ANY_ID },
12681 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12682 PCI_ANY_ID, PCI_ANY_ID },
12683 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12684 PCI_ANY_ID, PCI_ANY_ID },
12685 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12686 PCI_ANY_ID, PCI_ANY_ID },
12687 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12688 PCI_ANY_ID, PCI_ANY_ID },
998f26ae
SPS
12689 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12690 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12691 PCI_ANY_ID, PCI_ANY_ID },
12692 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12693 PCI_ANY_ID, PCI_ANY_ID },
12694 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12695 PCI_ANY_ID, PCI_ANY_ID },
12696 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12697 PCI_ANY_ID, PCI_ANY_ID },
12698 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12699 PCI_ANY_ID, PCI_ANY_ID },
12700 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12701 PCI_ANY_ID, PCI_ANY_ID },
15fd7c74
SR
12702 /* Mercator ~ 3616*/
12703 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12704 PCI_ANY_ID, PCI_ANY_ID },
6c2938f7
SP
12705
12706 /* Aero SI 0x00E1 Configurable Secure
12707 * 0x00E2 Hard Secure
12708 */
12709 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12710 PCI_ANY_ID, PCI_ANY_ID },
12711 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12712 PCI_ANY_ID, PCI_ANY_ID },
12713
f38c43a0
SR
12714 /*
12715 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12716 */
12717 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12718 PCI_ANY_ID, PCI_ANY_ID },
12719 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12720 PCI_ANY_ID, PCI_ANY_ID },
12721
eb9c7ce5
SP
12722 /* Atlas PCIe Switch Management Port */
12723 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12724 PCI_ANY_ID, PCI_ANY_ID },
12725
6c2938f7
SP
12726 /* Sea SI 0x00E5 Configurable Secure
12727 * 0x00E6 Hard Secure
12728 */
12729 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12730 PCI_ANY_ID, PCI_ANY_ID },
12731 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12732 PCI_ANY_ID, PCI_ANY_ID },
12733
f38c43a0
SR
12734 /*
12735 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12736 */
12737 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12738 PCI_ANY_ID, PCI_ANY_ID },
12739 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12740 PCI_ANY_ID, PCI_ANY_ID },
12741
c84b06a4
SR
12742 {0} /* Terminating entry */
12743};
12744MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12745
12746static struct pci_error_handlers _mpt3sas_err_handler = {
12747 .error_detected = scsih_pci_error_detected,
12748 .mmio_enabled = scsih_pci_mmio_enabled,
12749 .slot_reset = scsih_pci_slot_reset,
12750 .resume = scsih_pci_resume,
12751};
12752
17287305
VG
12753static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12754
c84b06a4
SR
12755static struct pci_driver mpt3sas_driver = {
12756 .name = MPT3SAS_DRIVER_NAME,
12757 .id_table = mpt3sas_pci_table,
12758 .probe = _scsih_probe,
12759 .remove = scsih_remove,
12760 .shutdown = scsih_shutdown,
12761 .err_handler = &_mpt3sas_err_handler,
17287305 12762 .driver.pm = &scsih_pm_ops,
c84b06a4
SR
12763};
12764
f92363d1 12765/**
8a7e4c24 12766 * scsih_init - main entry point for this driver.
f92363d1 12767 *
4beb4867 12768 * Return: 0 success, anything else error.
f92363d1 12769 */
8bbb1cf6 12770static int
8a7e4c24 12771scsih_init(void)
f92363d1 12772{
c84b06a4
SR
12773 mpt2_ids = 0;
12774 mpt3_ids = 0;
f92363d1 12775
f92363d1
SR
12776 mpt3sas_base_initialize_callback_handler();
12777
12778 /* queuecommand callback hander */
12779 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12780
6c7abffc 12781 /* task management callback handler */
f92363d1
SR
12782 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12783
12784 /* base internal commands callback handler */
12785 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12786 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12787 mpt3sas_port_enable_done);
12788
12789 /* transport internal commands callback handler */
12790 transport_cb_idx = mpt3sas_base_register_callback_handler(
12791 mpt3sas_transport_done);
12792
12793 /* scsih internal commands callback handler */
12794 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12795
12796 /* configuration page API internal commands callback handler */
12797 config_cb_idx = mpt3sas_base_register_callback_handler(
12798 mpt3sas_config_done);
12799
12800 /* ctl module callback handler */
12801 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12802
12803 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12804 _scsih_tm_tr_complete);
12805
12806 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12807 _scsih_tm_volume_tr_complete);
12808
12809 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12810 _scsih_sas_control_complete);
12811
2b01b293 12812 mpt3sas_init_debugfs();
7497392a 12813 return 0;
f92363d1
SR
12814}
12815
12816/**
7497392a 12817 * scsih_exit - exit point for this driver (when it is a module).
f92363d1 12818 *
4beb4867 12819 * Return: 0 success, anything else error.
f92363d1 12820 */
8bbb1cf6 12821static void
8a7e4c24 12822scsih_exit(void)
f92363d1 12823{
f92363d1
SR
12824
12825 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12826 mpt3sas_base_release_callback_handler(tm_cb_idx);
12827 mpt3sas_base_release_callback_handler(base_cb_idx);
12828 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12829 mpt3sas_base_release_callback_handler(transport_cb_idx);
12830 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12831 mpt3sas_base_release_callback_handler(config_cb_idx);
12832 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12833
12834 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12835 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12836 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12837
12838/* raid transport support */
c84b06a4
SR
12839 if (hbas_to_enumerate != 1)
12840 raid_class_release(mpt3sas_raid_template);
12841 if (hbas_to_enumerate != 2)
12842 raid_class_release(mpt2sas_raid_template);
f92363d1 12843 sas_release_transport(mpt3sas_transport_template);
2b01b293 12844 mpt3sas_exit_debugfs();
f92363d1 12845}
7786ab6a 12846
c84b06a4
SR
12847/**
12848 * _mpt3sas_init - main entry point for this driver.
12849 *
4beb4867 12850 * Return: 0 success, anything else error.
c84b06a4
SR
12851 */
12852static int __init
12853_mpt3sas_init(void)
12854{
12855 int error;
12856
12857 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12858 MPT3SAS_DRIVER_VERSION);
12859
12860 mpt3sas_transport_template =
12861 sas_attach_transport(&mpt3sas_transport_functions);
12862 if (!mpt3sas_transport_template)
12863 return -ENODEV;
12864
12865 /* No need attach mpt3sas raid functions template
12866 * if hbas_to_enumarate value is one.
12867 */
12868 if (hbas_to_enumerate != 1) {
12869 mpt3sas_raid_template =
12870 raid_class_attach(&mpt3sas_raid_functions);
12871 if (!mpt3sas_raid_template) {
12872 sas_release_transport(mpt3sas_transport_template);
12873 return -ENODEV;
12874 }
12875 }
12876
12877 /* No need to attach mpt2sas raid functions template
12878 * if hbas_to_enumarate value is two
12879 */
12880 if (hbas_to_enumerate != 2) {
12881 mpt2sas_raid_template =
12882 raid_class_attach(&mpt2sas_raid_functions);
12883 if (!mpt2sas_raid_template) {
12884 sas_release_transport(mpt3sas_transport_template);
12885 return -ENODEV;
12886 }
12887 }
12888
12889 error = scsih_init();
12890 if (error) {
12891 scsih_exit();
12892 return error;
12893 }
12894
12895 mpt3sas_ctl_init(hbas_to_enumerate);
12896
12897 error = pci_register_driver(&mpt3sas_driver);
12898 if (error)
12899 scsih_exit();
12900
12901 return error;
12902}
12903
12904/**
12905 * _mpt3sas_exit - exit point for this driver (when it is a module).
12906 *
12907 */
12908static void __exit
12909_mpt3sas_exit(void)
12910{
12911 pr_info("mpt3sas version %s unloading\n",
12912 MPT3SAS_DRIVER_VERSION);
12913
c84b06a4
SR
12914 mpt3sas_ctl_exit(hbas_to_enumerate);
12915
87b3576e
C
12916 pci_unregister_driver(&mpt3sas_driver);
12917
c84b06a4
SR
12918 scsih_exit();
12919}
12920
12921module_init(_mpt3sas_init);
12922module_exit(_mpt3sas_exit);