2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/raid_class.h>
56 #include <linux/blk-mq-pci.h>
57 #include <asm/unaligned.h>
59 #include "mpt3sas_base.h"
61 #define RAID_CHANNEL 1
63 #define PCIE_CHANNEL 2
66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 struct _sas_node *sas_expander);
68 static void _firmware_event_work(struct work_struct *work);
70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 struct _sas_device *sas_device);
72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 u8 retry_count, u8 is_pd);
74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 struct _pcie_device *pcie_device);
78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
80 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 " bits for enabling additional logging info (default=0)");
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0444);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0444);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0444);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0444);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 1 - enumerates only SAS 2.0 generation HBAs\n \
135 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
137 /* diag_buffer_enable is bitwise
139 * bit 1 set = SNAPSHOT
140 * bit 2 set = EXTENDED
142 * Either bit can be set, or both
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0444);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0444);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0444);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
158 static bool enable_sdev_max_qd;
159 module_param(enable_sdev_max_qd, bool, 0444);
160 MODULE_PARM_DESC(enable_sdev_max_qd,
161 "Enable sdev max qd as can_queue, def=disabled(0)");
163 static int multipath_on_hba = -1;
164 module_param(multipath_on_hba, int, 0);
165 MODULE_PARM_DESC(multipath_on_hba,
166 "Multipath support to add same target device\n\t\t"
167 "as many times as it is visible to HBA from various paths\n\t\t"
169 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
170 "\t SAS 3.5 HBA - This will be enabled)");
172 static int host_tagset_enable = 1;
173 module_param(host_tagset_enable, int, 0444);
174 MODULE_PARM_DESC(host_tagset_enable,
175 "Shared host tagset enable/disable Default: enable(1)");
177 /* raid transport support */
178 static struct raid_template *mpt3sas_raid_template;
179 static struct raid_template *mpt2sas_raid_template;
183 * struct sense_info - common structure for obtaining sense keys
185 * @asc: additional sense code
186 * @ascq: additional sense code qualifier
194 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
195 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
196 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
197 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
198 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
200 * struct fw_event_work - firmware event struct
201 * @list: link list framework
202 * @work: work object (ioc->fault_reset_work_q)
203 * @ioc: per adapter object
204 * @device_handle: device handle
205 * @VF_ID: virtual function id
206 * @VP_ID: virtual port id
207 * @ignore: flag meaning this event has been marked to ignore
208 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
209 * @refcount: kref for this event
210 * @event_data: reply event data payload follows
212 * This object stored on ioc->fw_event_list.
214 struct fw_event_work {
215 struct list_head list;
216 struct work_struct work;
218 struct MPT3SAS_ADAPTER *ioc;
224 struct kref refcount;
225 char event_data[] __aligned(4);
228 static void fw_event_work_free(struct kref *r)
230 kfree(container_of(r, struct fw_event_work, refcount));
233 static void fw_event_work_get(struct fw_event_work *fw_work)
235 kref_get(&fw_work->refcount);
238 static void fw_event_work_put(struct fw_event_work *fw_work)
240 kref_put(&fw_work->refcount, fw_event_work_free);
243 static struct fw_event_work *alloc_fw_event_work(int len)
245 struct fw_event_work *fw_event;
247 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
251 kref_init(&fw_event->refcount);
256 * struct _scsi_io_transfer - scsi io transfer
257 * @handle: sas device handle (assigned by firmware)
258 * @is_raid: flag set for hidden raid components
259 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
260 * @data_length: data transfer length
261 * @data_dma: dma pointer to data
264 * @cdb_length: cdb length
266 * @timeout: timeout for this command
267 * @VF_ID: virtual function id
268 * @VP_ID: virtual port id
269 * @valid_reply: flag set for reply message
270 * @sense_length: sense length
271 * @ioc_status: ioc status
272 * @scsi_state: scsi state
273 * @scsi_status: scsi staus
274 * @log_info: log information
275 * @transfer_length: data length transfer when there is a reply message
277 * Used for sending internal scsi commands to devices within this module.
278 * Refer to _scsi_send_scsi_io().
280 struct _scsi_io_transfer {
283 enum dma_data_direction dir;
286 u8 sense[SCSI_SENSE_BUFFERSIZE];
294 /* the following bits are only valid when 'valid_reply = 1' */
304 * _scsih_set_debug_level - global setting of ioc->logging_level.
305 * @val: value of the parameter to be set
306 * @kp: pointer to kernel_param structure
308 * Note: The logging levels are defined in mpt3sas_debug.h.
311 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
313 int ret = param_set_int(val, kp);
314 struct MPT3SAS_ADAPTER *ioc;
319 pr_info("setting logging_level(0x%08x)\n", logging_level);
320 spin_lock(&gioc_lock);
321 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
322 ioc->logging_level = logging_level;
323 spin_unlock(&gioc_lock);
326 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
327 &logging_level, 0644);
330 * _scsih_srch_boot_sas_address - search based on sas_address
331 * @sas_address: sas address
332 * @boot_device: boot device object from bios page 2
334 * Return: 1 when there's a match, 0 means no match.
337 _scsih_srch_boot_sas_address(u64 sas_address,
338 Mpi2BootDeviceSasWwid_t *boot_device)
340 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
344 * _scsih_srch_boot_device_name - search based on device name
345 * @device_name: device name specified in INDENTIFY fram
346 * @boot_device: boot device object from bios page 2
348 * Return: 1 when there's a match, 0 means no match.
351 _scsih_srch_boot_device_name(u64 device_name,
352 Mpi2BootDeviceDeviceName_t *boot_device)
354 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
358 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
359 * @enclosure_logical_id: enclosure logical id
360 * @slot_number: slot number
361 * @boot_device: boot device object from bios page 2
363 * Return: 1 when there's a match, 0 means no match.
366 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
367 Mpi2BootDeviceEnclosureSlot_t *boot_device)
369 return (enclosure_logical_id == le64_to_cpu(boot_device->
370 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
371 SlotNumber)) ? 1 : 0;
375 * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
376 * port number from port list
377 * @ioc: per adapter object
378 * @port_id: port number
379 * @bypass_dirty_port_flag: when set look the matching hba port entry even
380 * if hba port entry is marked as dirty.
382 * Search for hba port entry corresponding to provided port number,
383 * if available return port object otherwise return NULL.
386 mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
387 u8 port_id, u8 bypass_dirty_port_flag)
389 struct hba_port *port, *port_next;
392 * When multipath_on_hba is disabled then
393 * search the hba_port entry using default
396 if (!ioc->multipath_on_hba)
397 port_id = MULTIPATH_DISABLED_PORT_ID;
399 list_for_each_entry_safe(port, port_next,
400 &ioc->port_table_list, list) {
401 if (port->port_id != port_id)
403 if (bypass_dirty_port_flag)
405 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
411 * Allocate hba_port object for default port id (i.e. 255)
412 * when multipath_on_hba is disabled for the HBA.
413 * And add this object to port_table_list.
415 if (!ioc->multipath_on_hba) {
416 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
420 port->port_id = port_id;
422 "hba_port entry: %p, port: %d is added to hba_port list\n",
423 port, port->port_id);
424 list_add_tail(&port->list,
425 &ioc->port_table_list);
432 * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
433 * @ioc: per adapter object
434 * @port: hba_port object
437 * Return virtual_phy object corresponding to phy number.
440 mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
441 struct hba_port *port, u32 phy)
443 struct virtual_phy *vphy, *vphy_next;
445 if (!port->vphys_mask)
448 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
449 if (vphy->phy_mask & (1 << phy))
456 * _scsih_is_boot_device - search for matching boot device.
457 * @sas_address: sas address
458 * @device_name: device name specified in INDENTIFY fram
459 * @enclosure_logical_id: enclosure logical id
461 * @form: specifies boot device form
462 * @boot_device: boot device object from bios page 2
464 * Return: 1 when there's a match, 0 means no match.
467 _scsih_is_boot_device(u64 sas_address, u64 device_name,
468 u64 enclosure_logical_id, u16 slot, u8 form,
469 Mpi2BiosPage2BootDevice_t *boot_device)
474 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
477 rc = _scsih_srch_boot_sas_address(
478 sas_address, &boot_device->SasWwid);
480 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
481 if (!enclosure_logical_id)
483 rc = _scsih_srch_boot_encl_slot(
484 enclosure_logical_id,
485 slot, &boot_device->EnclosureSlot);
487 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
490 rc = _scsih_srch_boot_device_name(
491 device_name, &boot_device->DeviceName);
493 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
501 * _scsih_get_sas_address - set the sas_address for given device handle
503 * @handle: device handle
504 * @sas_address: sas address
506 * Return: 0 success, non-zero when failure
509 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
512 Mpi2SasDevicePage0_t sas_device_pg0;
513 Mpi2ConfigReply_t mpi_reply;
518 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
519 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
520 ioc_err(ioc, "failure at %s:%d/%s()!\n",
521 __FILE__, __LINE__, __func__);
525 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
526 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
527 /* For HBA, vSES doesn't return HBA SAS address. Instead return
528 * vSES's sas address.
530 if ((handle <= ioc->sas_hba.num_phys) &&
531 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
532 MPI2_SAS_DEVICE_INFO_SEP)))
533 *sas_address = ioc->sas_hba.sas_address;
535 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
539 /* we hit this because the given parent handle doesn't exist */
540 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
543 /* else error case */
544 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
545 handle, ioc_status, __FILE__, __LINE__, __func__);
550 * _scsih_determine_boot_device - determine boot device.
551 * @ioc: per adapter object
552 * @device: sas_device or pcie_device object
553 * @channel: SAS or PCIe channel
555 * Determines whether this device should be first reported device to
556 * to scsi-ml or sas transport, this purpose is for persistent boot device.
557 * There are primary, alternate, and current entries in bios page 2. The order
558 * priority is primary, alternate, then current. This routine saves
559 * the corresponding device object.
560 * The saved data to be used later in _scsih_probe_boot_devices().
563 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
566 struct _sas_device *sas_device;
567 struct _pcie_device *pcie_device;
568 struct _raid_device *raid_device;
571 u64 enclosure_logical_id;
574 /* only process this function when driver loads */
575 if (!ioc->is_driver_loading)
578 /* no Bios, return immediately */
579 if (!ioc->bios_pg3.BiosVersion)
582 if (channel == RAID_CHANNEL) {
583 raid_device = device;
584 sas_address = raid_device->wwid;
586 enclosure_logical_id = 0;
588 } else if (channel == PCIE_CHANNEL) {
589 pcie_device = device;
590 sas_address = pcie_device->wwid;
592 enclosure_logical_id = 0;
596 sas_address = sas_device->sas_address;
597 device_name = sas_device->device_name;
598 enclosure_logical_id = sas_device->enclosure_logical_id;
599 slot = sas_device->slot;
602 if (!ioc->req_boot_device.device) {
603 if (_scsih_is_boot_device(sas_address, device_name,
604 enclosure_logical_id, slot,
605 (ioc->bios_pg2.ReqBootDeviceForm &
606 MPI2_BIOSPAGE2_FORM_MASK),
607 &ioc->bios_pg2.RequestedBootDevice)) {
609 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
610 __func__, (u64)sas_address));
611 ioc->req_boot_device.device = device;
612 ioc->req_boot_device.channel = channel;
616 if (!ioc->req_alt_boot_device.device) {
617 if (_scsih_is_boot_device(sas_address, device_name,
618 enclosure_logical_id, slot,
619 (ioc->bios_pg2.ReqAltBootDeviceForm &
620 MPI2_BIOSPAGE2_FORM_MASK),
621 &ioc->bios_pg2.RequestedAltBootDevice)) {
623 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
624 __func__, (u64)sas_address));
625 ioc->req_alt_boot_device.device = device;
626 ioc->req_alt_boot_device.channel = channel;
630 if (!ioc->current_boot_device.device) {
631 if (_scsih_is_boot_device(sas_address, device_name,
632 enclosure_logical_id, slot,
633 (ioc->bios_pg2.CurrentBootDeviceForm &
634 MPI2_BIOSPAGE2_FORM_MASK),
635 &ioc->bios_pg2.CurrentBootDevice)) {
637 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
638 __func__, (u64)sas_address));
639 ioc->current_boot_device.device = device;
640 ioc->current_boot_device.channel = channel;
645 static struct _sas_device *
646 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
647 struct MPT3SAS_TARGET *tgt_priv)
649 struct _sas_device *ret;
651 assert_spin_locked(&ioc->sas_device_lock);
653 ret = tgt_priv->sas_dev;
660 static struct _sas_device *
661 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
662 struct MPT3SAS_TARGET *tgt_priv)
664 struct _sas_device *ret;
667 spin_lock_irqsave(&ioc->sas_device_lock, flags);
668 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
669 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
674 static struct _pcie_device *
675 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
676 struct MPT3SAS_TARGET *tgt_priv)
678 struct _pcie_device *ret;
680 assert_spin_locked(&ioc->pcie_device_lock);
682 ret = tgt_priv->pcie_dev;
684 pcie_device_get(ret);
690 * mpt3sas_get_pdev_from_target - pcie device search
691 * @ioc: per adapter object
692 * @tgt_priv: starget private object
694 * Context: This function will acquire ioc->pcie_device_lock and will release
695 * before returning the pcie_device object.
697 * This searches for pcie_device from target, then return pcie_device object.
699 static struct _pcie_device *
700 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
701 struct MPT3SAS_TARGET *tgt_priv)
703 struct _pcie_device *ret;
706 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
707 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
708 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
715 * __mpt3sas_get_sdev_by_rphy - sas device search
716 * @ioc: per adapter object
717 * @rphy: sas_rphy pointer
719 * Context: This function will acquire ioc->sas_device_lock and will release
720 * before returning the sas_device object.
722 * This searches for sas_device from rphy object
723 * then return sas_device object.
726 __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
727 struct sas_rphy *rphy)
729 struct _sas_device *sas_device;
731 assert_spin_locked(&ioc->sas_device_lock);
733 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
734 if (sas_device->rphy != rphy)
736 sas_device_get(sas_device);
741 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
742 if (sas_device->rphy != rphy)
744 sas_device_get(sas_device);
752 * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
753 * sas address from sas_device_list list
754 * @ioc: per adapter object
755 * @sas_address: device sas address
758 * Search for _sas_device object corresponding to provided sas address,
759 * if available return _sas_device object address otherwise return NULL.
762 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
763 u64 sas_address, struct hba_port *port)
765 struct _sas_device *sas_device;
770 assert_spin_locked(&ioc->sas_device_lock);
772 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
773 if (sas_device->sas_address != sas_address)
775 if (sas_device->port != port)
777 sas_device_get(sas_device);
781 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
782 if (sas_device->sas_address != sas_address)
784 if (sas_device->port != port)
786 sas_device_get(sas_device);
794 * mpt3sas_get_sdev_by_addr - sas device search
795 * @ioc: per adapter object
796 * @sas_address: sas address
797 * @port: hba port entry
798 * Context: Calling function should acquire ioc->sas_device_lock
800 * This searches for sas_device based on sas_address & port number,
801 * then return sas_device object.
804 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
805 u64 sas_address, struct hba_port *port)
807 struct _sas_device *sas_device;
810 spin_lock_irqsave(&ioc->sas_device_lock, flags);
811 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
813 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
818 static struct _sas_device *
819 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
821 struct _sas_device *sas_device;
823 assert_spin_locked(&ioc->sas_device_lock);
825 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
826 if (sas_device->handle == handle)
829 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
830 if (sas_device->handle == handle)
836 sas_device_get(sas_device);
841 * mpt3sas_get_sdev_by_handle - sas device search
842 * @ioc: per adapter object
843 * @handle: sas device handle (assigned by firmware)
844 * Context: Calling function should acquire ioc->sas_device_lock
846 * This searches for sas_device based on sas_address, then return sas_device
850 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
852 struct _sas_device *sas_device;
855 spin_lock_irqsave(&ioc->sas_device_lock, flags);
856 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
857 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
863 * _scsih_display_enclosure_chassis_info - display device location info
864 * @ioc: per adapter object
865 * @sas_device: per sas device object
866 * @sdev: scsi device struct
867 * @starget: scsi target struct
870 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
871 struct _sas_device *sas_device, struct scsi_device *sdev,
872 struct scsi_target *starget)
875 if (sas_device->enclosure_handle != 0)
876 sdev_printk(KERN_INFO, sdev,
877 "enclosure logical id (0x%016llx), slot(%d) \n",
879 sas_device->enclosure_logical_id,
881 if (sas_device->connector_name[0] != '\0')
882 sdev_printk(KERN_INFO, sdev,
883 "enclosure level(0x%04x), connector name( %s)\n",
884 sas_device->enclosure_level,
885 sas_device->connector_name);
886 if (sas_device->is_chassis_slot_valid)
887 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
888 sas_device->chassis_slot);
889 } else if (starget) {
890 if (sas_device->enclosure_handle != 0)
891 starget_printk(KERN_INFO, starget,
892 "enclosure logical id(0x%016llx), slot(%d) \n",
894 sas_device->enclosure_logical_id,
896 if (sas_device->connector_name[0] != '\0')
897 starget_printk(KERN_INFO, starget,
898 "enclosure level(0x%04x), connector name( %s)\n",
899 sas_device->enclosure_level,
900 sas_device->connector_name);
901 if (sas_device->is_chassis_slot_valid)
902 starget_printk(KERN_INFO, starget,
903 "chassis slot(0x%04x)\n",
904 sas_device->chassis_slot);
906 if (sas_device->enclosure_handle != 0)
907 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
908 (u64)sas_device->enclosure_logical_id,
910 if (sas_device->connector_name[0] != '\0')
911 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
912 sas_device->enclosure_level,
913 sas_device->connector_name);
914 if (sas_device->is_chassis_slot_valid)
915 ioc_info(ioc, "chassis slot(0x%04x)\n",
916 sas_device->chassis_slot);
921 * _scsih_sas_device_remove - remove sas_device from list.
922 * @ioc: per adapter object
923 * @sas_device: the sas_device object
924 * Context: This function will acquire ioc->sas_device_lock.
926 * If sas_device is on the list, remove it and decrement its reference count.
929 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
930 struct _sas_device *sas_device)
936 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
937 sas_device->handle, (u64)sas_device->sas_address);
939 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
942 * The lock serializes access to the list, but we still need to verify
943 * that nobody removed the entry while we were waiting on the lock.
945 spin_lock_irqsave(&ioc->sas_device_lock, flags);
946 if (!list_empty(&sas_device->list)) {
947 list_del_init(&sas_device->list);
948 sas_device_put(sas_device);
950 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
954 * _scsih_device_remove_by_handle - removing device object by handle
955 * @ioc: per adapter object
956 * @handle: device handle
959 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
961 struct _sas_device *sas_device;
964 if (ioc->shost_recovery)
967 spin_lock_irqsave(&ioc->sas_device_lock, flags);
968 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
970 list_del_init(&sas_device->list);
971 sas_device_put(sas_device);
973 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
975 _scsih_remove_device(ioc, sas_device);
976 sas_device_put(sas_device);
981 * mpt3sas_device_remove_by_sas_address - removing device object by
982 * sas address & port number
983 * @ioc: per adapter object
984 * @sas_address: device sas_address
985 * @port: hba port entry
990 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
991 u64 sas_address, struct hba_port *port)
993 struct _sas_device *sas_device;
996 if (ioc->shost_recovery)
999 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1000 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1002 list_del_init(&sas_device->list);
1003 sas_device_put(sas_device);
1005 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1007 _scsih_remove_device(ioc, sas_device);
1008 sas_device_put(sas_device);
1013 * _scsih_sas_device_add - insert sas_device to the list.
1014 * @ioc: per adapter object
1015 * @sas_device: the sas_device object
1016 * Context: This function will acquire ioc->sas_device_lock.
1018 * Adding new object to the ioc->sas_device_list.
1021 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1022 struct _sas_device *sas_device)
1024 unsigned long flags;
1027 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1028 __func__, sas_device->handle,
1029 (u64)sas_device->sas_address));
1031 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1034 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1035 sas_device_get(sas_device);
1036 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1037 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1039 if (ioc->hide_drives) {
1040 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1044 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1045 sas_device->sas_address_parent, sas_device->port)) {
1046 _scsih_sas_device_remove(ioc, sas_device);
1047 } else if (!sas_device->starget) {
1049 * When asyn scanning is enabled, its not possible to remove
1050 * devices while scanning is turned on due to an oops in
1051 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1053 if (!ioc->is_driver_loading) {
1054 mpt3sas_transport_port_remove(ioc,
1055 sas_device->sas_address,
1056 sas_device->sas_address_parent,
1058 _scsih_sas_device_remove(ioc, sas_device);
1061 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1065 * _scsih_sas_device_init_add - insert sas_device to the list.
1066 * @ioc: per adapter object
1067 * @sas_device: the sas_device object
1068 * Context: This function will acquire ioc->sas_device_lock.
1070 * Adding new object at driver load time to the ioc->sas_device_init_list.
1073 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1074 struct _sas_device *sas_device)
1076 unsigned long flags;
1079 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1080 __func__, sas_device->handle,
1081 (u64)sas_device->sas_address));
1083 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1086 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1087 sas_device_get(sas_device);
1088 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1089 _scsih_determine_boot_device(ioc, sas_device, 0);
1090 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1094 static struct _pcie_device *
1095 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1097 struct _pcie_device *pcie_device;
1099 assert_spin_locked(&ioc->pcie_device_lock);
1101 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1102 if (pcie_device->wwid == wwid)
1105 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1106 if (pcie_device->wwid == wwid)
1112 pcie_device_get(pcie_device);
1118 * mpt3sas_get_pdev_by_wwid - pcie device search
1119 * @ioc: per adapter object
1122 * Context: This function will acquire ioc->pcie_device_lock and will release
1123 * before returning the pcie_device object.
1125 * This searches for pcie_device based on wwid, then return pcie_device object.
1127 static struct _pcie_device *
1128 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1130 struct _pcie_device *pcie_device;
1131 unsigned long flags;
1133 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1134 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1135 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1141 static struct _pcie_device *
1142 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1145 struct _pcie_device *pcie_device;
1147 assert_spin_locked(&ioc->pcie_device_lock);
1149 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1150 if (pcie_device->id == id && pcie_device->channel == channel)
1153 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1154 if (pcie_device->id == id && pcie_device->channel == channel)
1160 pcie_device_get(pcie_device);
1164 static struct _pcie_device *
1165 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1167 struct _pcie_device *pcie_device;
1169 assert_spin_locked(&ioc->pcie_device_lock);
1171 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1172 if (pcie_device->handle == handle)
1175 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1176 if (pcie_device->handle == handle)
1182 pcie_device_get(pcie_device);
1188 * mpt3sas_get_pdev_by_handle - pcie device search
1189 * @ioc: per adapter object
1190 * @handle: Firmware device handle
1192 * Context: This function will acquire ioc->pcie_device_lock and will release
1193 * before returning the pcie_device object.
1195 * This searches for pcie_device based on handle, then return pcie_device
1198 struct _pcie_device *
1199 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1201 struct _pcie_device *pcie_device;
1202 unsigned long flags;
1204 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1205 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1206 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1212 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1213 * @ioc: per adapter object
1214 * Context: This function will acquire ioc->pcie_device_lock
1216 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1217 * which has reported maximum among all available NVMe drives.
1218 * Minimum max_shutdown_latency will be six seconds.
1221 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1223 struct _pcie_device *pcie_device;
1224 unsigned long flags;
1225 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1227 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1228 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1229 if (pcie_device->shutdown_latency) {
1230 if (shutdown_latency < pcie_device->shutdown_latency)
1232 pcie_device->shutdown_latency;
1235 ioc->max_shutdown_latency = shutdown_latency;
1236 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1240 * _scsih_pcie_device_remove - remove pcie_device from list.
1241 * @ioc: per adapter object
1242 * @pcie_device: the pcie_device object
1243 * Context: This function will acquire ioc->pcie_device_lock.
1245 * If pcie_device is on the list, remove it and decrement its reference count.
1248 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1249 struct _pcie_device *pcie_device)
1251 unsigned long flags;
1252 int was_on_pcie_device_list = 0;
1253 u8 update_latency = 0;
1257 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1258 pcie_device->handle, (u64)pcie_device->wwid);
1259 if (pcie_device->enclosure_handle != 0)
1260 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1261 (u64)pcie_device->enclosure_logical_id,
1263 if (pcie_device->connector_name[0] != '\0')
1264 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1265 pcie_device->enclosure_level,
1266 pcie_device->connector_name);
1268 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1269 if (!list_empty(&pcie_device->list)) {
1270 list_del_init(&pcie_device->list);
1271 was_on_pcie_device_list = 1;
1273 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1275 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1276 if (was_on_pcie_device_list) {
1277 kfree(pcie_device->serial_number);
1278 pcie_device_put(pcie_device);
1282 * This device's RTD3 Entry Latency matches IOC's
1283 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1284 * from the available drives as current drive is getting removed.
1287 _scsih_set_nvme_max_shutdown_latency(ioc);
1292 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1293 * @ioc: per adapter object
1294 * @handle: device handle
1297 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1299 struct _pcie_device *pcie_device;
1300 unsigned long flags;
1301 int was_on_pcie_device_list = 0;
1302 u8 update_latency = 0;
1304 if (ioc->shost_recovery)
1307 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1308 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1310 if (!list_empty(&pcie_device->list)) {
1311 list_del_init(&pcie_device->list);
1312 was_on_pcie_device_list = 1;
1313 pcie_device_put(pcie_device);
1315 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1318 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1319 if (was_on_pcie_device_list) {
1320 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1321 pcie_device_put(pcie_device);
1325 * This device's RTD3 Entry Latency matches IOC's
1326 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1327 * from the available drives as current drive is getting removed.
1330 _scsih_set_nvme_max_shutdown_latency(ioc);
1334 * _scsih_pcie_device_add - add pcie_device object
1335 * @ioc: per adapter object
1336 * @pcie_device: pcie_device object
1338 * This is added to the pcie_device_list link list.
1341 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1342 struct _pcie_device *pcie_device)
1344 unsigned long flags;
1347 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1349 pcie_device->handle, (u64)pcie_device->wwid));
1350 if (pcie_device->enclosure_handle != 0)
1352 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1354 (u64)pcie_device->enclosure_logical_id,
1355 pcie_device->slot));
1356 if (pcie_device->connector_name[0] != '\0')
1358 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1359 __func__, pcie_device->enclosure_level,
1360 pcie_device->connector_name));
1362 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1363 pcie_device_get(pcie_device);
1364 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1365 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1367 if (pcie_device->access_status ==
1368 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1369 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1372 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1373 _scsih_pcie_device_remove(ioc, pcie_device);
1374 } else if (!pcie_device->starget) {
1375 if (!ioc->is_driver_loading) {
1376 /*TODO-- Need to find out whether this condition will occur or not*/
1377 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1380 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1384 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1385 * @ioc: per adapter object
1386 * @pcie_device: the pcie_device object
1387 * Context: This function will acquire ioc->pcie_device_lock.
1389 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1392 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1393 struct _pcie_device *pcie_device)
1395 unsigned long flags;
1398 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1400 pcie_device->handle, (u64)pcie_device->wwid));
1401 if (pcie_device->enclosure_handle != 0)
1403 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1405 (u64)pcie_device->enclosure_logical_id,
1406 pcie_device->slot));
1407 if (pcie_device->connector_name[0] != '\0')
1409 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1410 __func__, pcie_device->enclosure_level,
1411 pcie_device->connector_name));
1413 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1414 pcie_device_get(pcie_device);
1415 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1416 if (pcie_device->access_status !=
1417 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1418 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1419 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1422 * _scsih_raid_device_find_by_id - raid device search
1423 * @ioc: per adapter object
1424 * @id: sas device target id
1425 * @channel: sas device channel
1426 * Context: Calling function should acquire ioc->raid_device_lock
1428 * This searches for raid_device based on target id, then return raid_device
1431 static struct _raid_device *
1432 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1434 struct _raid_device *raid_device, *r;
1437 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1438 if (raid_device->id == id && raid_device->channel == channel) {
1449 * mpt3sas_raid_device_find_by_handle - raid device search
1450 * @ioc: per adapter object
1451 * @handle: sas device handle (assigned by firmware)
1452 * Context: Calling function should acquire ioc->raid_device_lock
1454 * This searches for raid_device based on handle, then return raid_device
1457 struct _raid_device *
1458 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1460 struct _raid_device *raid_device, *r;
1463 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1464 if (raid_device->handle != handle)
1475 * _scsih_raid_device_find_by_wwid - raid device search
1476 * @ioc: per adapter object
1478 * Context: Calling function should acquire ioc->raid_device_lock
1480 * This searches for raid_device based on wwid, then return raid_device
1483 static struct _raid_device *
1484 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1486 struct _raid_device *raid_device, *r;
1489 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1490 if (raid_device->wwid != wwid)
1501 * _scsih_raid_device_add - add raid_device object
1502 * @ioc: per adapter object
1503 * @raid_device: raid_device object
1505 * This is added to the raid_device_list link list.
1508 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1509 struct _raid_device *raid_device)
1511 unsigned long flags;
1514 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1516 raid_device->handle, (u64)raid_device->wwid));
1518 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1519 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1520 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1524 * _scsih_raid_device_remove - delete raid_device object
1525 * @ioc: per adapter object
1526 * @raid_device: raid_device object
1530 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1531 struct _raid_device *raid_device)
1533 unsigned long flags;
1535 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1536 list_del(&raid_device->list);
1538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1542 * mpt3sas_scsih_expander_find_by_handle - expander device search
1543 * @ioc: per adapter object
1544 * @handle: expander handle (assigned by firmware)
1545 * Context: Calling function should acquire ioc->sas_device_lock
1547 * This searches for expander device based on handle, then returns the
1551 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1553 struct _sas_node *sas_expander, *r;
1556 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1557 if (sas_expander->handle != handle)
1567 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1568 * @ioc: per adapter object
1569 * @handle: enclosure handle (assigned by firmware)
1570 * Context: Calling function should acquire ioc->sas_device_lock
1572 * This searches for enclosure device based on handle, then returns the
1575 static struct _enclosure_node *
1576 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1578 struct _enclosure_node *enclosure_dev, *r;
1581 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1582 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1591 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1592 * @ioc: per adapter object
1593 * @sas_address: sas address
1594 * @port: hba port entry
1595 * Context: Calling function should acquire ioc->sas_node_lock.
1597 * This searches for expander device based on sas_address & port number,
1598 * then returns the sas_node object.
1601 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1602 u64 sas_address, struct hba_port *port)
1604 struct _sas_node *sas_expander, *r = NULL;
1609 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1610 if (sas_expander->sas_address != sas_address)
1612 if (sas_expander->port != port)
1622 * _scsih_expander_node_add - insert expander device to the list.
1623 * @ioc: per adapter object
1624 * @sas_expander: the sas_device object
1625 * Context: This function will acquire ioc->sas_node_lock.
1627 * Adding new object to the ioc->sas_expander_list.
1630 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1631 struct _sas_node *sas_expander)
1633 unsigned long flags;
1635 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1636 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1637 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1641 * _scsih_is_end_device - determines if device is an end device
1642 * @device_info: bitfield providing information about the device.
1645 * Return: 1 if end device.
1648 _scsih_is_end_device(u32 device_info)
1650 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1651 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1652 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1653 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1660 * _scsih_is_nvme_pciescsi_device - determines if
1661 * device is an pcie nvme/scsi device
1662 * @device_info: bitfield providing information about the device.
1665 * Returns 1 if device is pcie device type nvme/scsi.
1668 _scsih_is_nvme_pciescsi_device(u32 device_info)
1670 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1671 == MPI26_PCIE_DEVINFO_NVME) ||
1672 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1673 == MPI26_PCIE_DEVINFO_SCSI))
1680 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1681 * @ioc: per adapter object
1684 * Context: This function will acquire ioc->scsi_lookup_lock.
1686 * This will search for a matching channel:id in the scsi_lookup array,
1687 * returning 1 if found.
1690 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1694 struct scsi_cmnd *scmd;
1697 smid <= ioc->shost->can_queue; smid++) {
1698 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1701 if (scmd->device->id == id &&
1702 scmd->device->channel == channel)
1709 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1710 * @ioc: per adapter object
1714 * Context: This function will acquire ioc->scsi_lookup_lock.
1716 * This will search for a matching channel:id:lun in the scsi_lookup array,
1717 * returning 1 if found.
1720 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1721 unsigned int lun, int channel)
1724 struct scsi_cmnd *scmd;
1726 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1728 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1731 if (scmd->device->id == id &&
1732 scmd->device->channel == channel &&
1733 scmd->device->lun == lun)
1740 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1741 * @ioc: per adapter object
1742 * @smid: system request message index
1744 * Return: the smid stored scmd pointer.
1745 * Then will dereference the stored scmd pointer.
1748 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1750 struct scsi_cmnd *scmd = NULL;
1751 struct scsiio_tracker *st;
1752 Mpi25SCSIIORequest_t *mpi_request;
1756 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1758 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1760 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1763 * If SCSI IO request is outstanding at driver level then
1764 * DevHandle filed must be non-zero. If DevHandle is zero
1765 * then it means that this smid is free at driver level,
1768 if (!mpi_request->DevHandle)
1771 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1773 st = scsi_cmd_priv(scmd);
1774 if (st->cb_idx == 0xFF || st->smid == 0)
1782 * scsih_change_queue_depth - setting device queue depth
1783 * @sdev: scsi device struct
1784 * @qdepth: requested queue depth
1786 * Return: queue depth.
1789 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1791 struct Scsi_Host *shost = sdev->host;
1793 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1794 struct MPT3SAS_DEVICE *sas_device_priv_data;
1795 struct MPT3SAS_TARGET *sas_target_priv_data;
1796 struct _sas_device *sas_device;
1797 unsigned long flags;
1799 max_depth = shost->can_queue;
1802 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1805 if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
1808 sas_device_priv_data = sdev->hostdata;
1809 if (!sas_device_priv_data)
1811 sas_target_priv_data = sas_device_priv_data->sas_target;
1812 if (!sas_target_priv_data)
1814 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1817 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1818 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1820 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1821 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1823 sas_device_put(sas_device);
1825 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1829 if (!sdev->tagged_supported)
1831 if (qdepth > max_depth)
1833 scsi_change_queue_depth(sdev, qdepth);
1834 sdev_printk(KERN_INFO, sdev,
1835 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1836 sdev->queue_depth, sdev->tagged_supported,
1837 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1838 return sdev->queue_depth;
1842 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1843 * @sdev: scsi device struct
1844 * @qdepth: requested queue depth
1849 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1851 struct Scsi_Host *shost = sdev->host;
1852 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1854 if (ioc->enable_sdev_max_qd)
1855 qdepth = shost->can_queue;
1857 scsih_change_queue_depth(sdev, qdepth);
1861 * scsih_target_alloc - target add routine
1862 * @starget: scsi target struct
1864 * Return: 0 if ok. Any other return is assumed to be an error and
1865 * the device is ignored.
1868 scsih_target_alloc(struct scsi_target *starget)
1870 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1871 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1872 struct MPT3SAS_TARGET *sas_target_priv_data;
1873 struct _sas_device *sas_device;
1874 struct _raid_device *raid_device;
1875 struct _pcie_device *pcie_device;
1876 unsigned long flags;
1877 struct sas_rphy *rphy;
1879 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1881 if (!sas_target_priv_data)
1884 starget->hostdata = sas_target_priv_data;
1885 sas_target_priv_data->starget = starget;
1886 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1889 if (starget->channel == RAID_CHANNEL) {
1890 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1891 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1894 sas_target_priv_data->handle = raid_device->handle;
1895 sas_target_priv_data->sas_address = raid_device->wwid;
1896 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1897 if (ioc->is_warpdrive)
1898 sas_target_priv_data->raid_device = raid_device;
1899 raid_device->starget = starget;
1901 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1906 if (starget->channel == PCIE_CHANNEL) {
1907 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1908 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1911 sas_target_priv_data->handle = pcie_device->handle;
1912 sas_target_priv_data->sas_address = pcie_device->wwid;
1913 sas_target_priv_data->port = NULL;
1914 sas_target_priv_data->pcie_dev = pcie_device;
1915 pcie_device->starget = starget;
1916 pcie_device->id = starget->id;
1917 pcie_device->channel = starget->channel;
1918 sas_target_priv_data->flags |=
1919 MPT_TARGET_FLAGS_PCIE_DEVICE;
1920 if (pcie_device->fast_path)
1921 sas_target_priv_data->flags |=
1922 MPT_TARGET_FASTPATH_IO;
1924 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1928 /* sas/sata devices */
1929 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1930 rphy = dev_to_rphy(starget->dev.parent);
1931 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1934 sas_target_priv_data->handle = sas_device->handle;
1935 sas_target_priv_data->sas_address = sas_device->sas_address;
1936 sas_target_priv_data->port = sas_device->port;
1937 sas_target_priv_data->sas_dev = sas_device;
1938 sas_device->starget = starget;
1939 sas_device->id = starget->id;
1940 sas_device->channel = starget->channel;
1941 if (test_bit(sas_device->handle, ioc->pd_handles))
1942 sas_target_priv_data->flags |=
1943 MPT_TARGET_FLAGS_RAID_COMPONENT;
1944 if (sas_device->fast_path)
1945 sas_target_priv_data->flags |=
1946 MPT_TARGET_FASTPATH_IO;
1948 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1954 * scsih_target_destroy - target destroy routine
1955 * @starget: scsi target struct
1958 scsih_target_destroy(struct scsi_target *starget)
1960 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1961 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1962 struct MPT3SAS_TARGET *sas_target_priv_data;
1963 struct _sas_device *sas_device;
1964 struct _raid_device *raid_device;
1965 struct _pcie_device *pcie_device;
1966 unsigned long flags;
1968 sas_target_priv_data = starget->hostdata;
1969 if (!sas_target_priv_data)
1972 if (starget->channel == RAID_CHANNEL) {
1973 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1974 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1977 raid_device->starget = NULL;
1978 raid_device->sdev = NULL;
1980 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1984 if (starget->channel == PCIE_CHANNEL) {
1985 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1986 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1987 sas_target_priv_data);
1988 if (pcie_device && (pcie_device->starget == starget) &&
1989 (pcie_device->id == starget->id) &&
1990 (pcie_device->channel == starget->channel))
1991 pcie_device->starget = NULL;
1995 * Corresponding get() is in _scsih_target_alloc()
1997 sas_target_priv_data->pcie_dev = NULL;
1998 pcie_device_put(pcie_device);
1999 pcie_device_put(pcie_device);
2001 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2005 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2006 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2007 if (sas_device && (sas_device->starget == starget) &&
2008 (sas_device->id == starget->id) &&
2009 (sas_device->channel == starget->channel))
2010 sas_device->starget = NULL;
2014 * Corresponding get() is in _scsih_target_alloc()
2016 sas_target_priv_data->sas_dev = NULL;
2017 sas_device_put(sas_device);
2019 sas_device_put(sas_device);
2021 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2024 kfree(sas_target_priv_data);
2025 starget->hostdata = NULL;
2029 * scsih_slave_alloc - device add routine
2030 * @sdev: scsi device struct
2032 * Return: 0 if ok. Any other return is assumed to be an error and
2033 * the device is ignored.
2036 scsih_slave_alloc(struct scsi_device *sdev)
2038 struct Scsi_Host *shost;
2039 struct MPT3SAS_ADAPTER *ioc;
2040 struct MPT3SAS_TARGET *sas_target_priv_data;
2041 struct MPT3SAS_DEVICE *sas_device_priv_data;
2042 struct scsi_target *starget;
2043 struct _raid_device *raid_device;
2044 struct _sas_device *sas_device;
2045 struct _pcie_device *pcie_device;
2046 unsigned long flags;
2048 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2050 if (!sas_device_priv_data)
2053 sas_device_priv_data->lun = sdev->lun;
2054 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2056 starget = scsi_target(sdev);
2057 sas_target_priv_data = starget->hostdata;
2058 sas_target_priv_data->num_luns++;
2059 sas_device_priv_data->sas_target = sas_target_priv_data;
2060 sdev->hostdata = sas_device_priv_data;
2061 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2062 sdev->no_uld_attach = 1;
2064 shost = dev_to_shost(&starget->dev);
2065 ioc = shost_priv(shost);
2066 if (starget->channel == RAID_CHANNEL) {
2067 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2068 raid_device = _scsih_raid_device_find_by_id(ioc,
2069 starget->id, starget->channel);
2071 raid_device->sdev = sdev; /* raid is single lun */
2072 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2074 if (starget->channel == PCIE_CHANNEL) {
2075 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2076 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2077 sas_target_priv_data->sas_address);
2078 if (pcie_device && (pcie_device->starget == NULL)) {
2079 sdev_printk(KERN_INFO, sdev,
2080 "%s : pcie_device->starget set to starget @ %d\n",
2081 __func__, __LINE__);
2082 pcie_device->starget = starget;
2086 pcie_device_put(pcie_device);
2087 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2089 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2090 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2091 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2092 sas_target_priv_data->sas_address,
2093 sas_target_priv_data->port);
2094 if (sas_device && (sas_device->starget == NULL)) {
2095 sdev_printk(KERN_INFO, sdev,
2096 "%s : sas_device->starget set to starget @ %d\n",
2097 __func__, __LINE__);
2098 sas_device->starget = starget;
2102 sas_device_put(sas_device);
2104 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2111 * scsih_slave_destroy - device destroy routine
2112 * @sdev: scsi device struct
2115 scsih_slave_destroy(struct scsi_device *sdev)
2117 struct MPT3SAS_TARGET *sas_target_priv_data;
2118 struct scsi_target *starget;
2119 struct Scsi_Host *shost;
2120 struct MPT3SAS_ADAPTER *ioc;
2121 struct _sas_device *sas_device;
2122 struct _pcie_device *pcie_device;
2123 unsigned long flags;
2125 if (!sdev->hostdata)
2128 starget = scsi_target(sdev);
2129 sas_target_priv_data = starget->hostdata;
2130 sas_target_priv_data->num_luns--;
2132 shost = dev_to_shost(&starget->dev);
2133 ioc = shost_priv(shost);
2135 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2136 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2137 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2138 sas_target_priv_data);
2139 if (pcie_device && !sas_target_priv_data->num_luns)
2140 pcie_device->starget = NULL;
2143 pcie_device_put(pcie_device);
2145 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2147 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2148 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2149 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2150 sas_target_priv_data);
2151 if (sas_device && !sas_target_priv_data->num_luns)
2152 sas_device->starget = NULL;
2155 sas_device_put(sas_device);
2156 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2159 kfree(sdev->hostdata);
2160 sdev->hostdata = NULL;
2164 * _scsih_display_sata_capabilities - sata capabilities
2165 * @ioc: per adapter object
2166 * @handle: device handle
2167 * @sdev: scsi device struct
2170 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2171 u16 handle, struct scsi_device *sdev)
2173 Mpi2ConfigReply_t mpi_reply;
2174 Mpi2SasDevicePage0_t sas_device_pg0;
2179 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2180 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2181 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2182 __FILE__, __LINE__, __func__);
2186 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2187 MPI2_IOCSTATUS_MASK;
2188 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2189 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2190 __FILE__, __LINE__, __func__);
2194 flags = le16_to_cpu(sas_device_pg0.Flags);
2195 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2197 sdev_printk(KERN_INFO, sdev,
2198 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2199 "sw_preserve(%s)\n",
2200 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2201 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2204 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2210 * raid transport support -
2211 * Enabled for SLES11 and newer, in older kernels the driver will panic when
2212 * unloading the driver followed by a load - I believe that the subroutine
2213 * raid_class_release() is not cleaning up properly.
2217 * scsih_is_raid - return boolean indicating device is raid volume
2218 * @dev: the device struct object
2221 scsih_is_raid(struct device *dev)
2223 struct scsi_device *sdev = to_scsi_device(dev);
2224 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2226 if (ioc->is_warpdrive)
2228 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2232 scsih_is_nvme(struct device *dev)
2234 struct scsi_device *sdev = to_scsi_device(dev);
2236 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2240 * scsih_get_resync - get raid volume resync percent complete
2241 * @dev: the device struct object
2244 scsih_get_resync(struct device *dev)
2246 struct scsi_device *sdev = to_scsi_device(dev);
2247 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2248 static struct _raid_device *raid_device;
2249 unsigned long flags;
2250 Mpi2RaidVolPage0_t vol_pg0;
2251 Mpi2ConfigReply_t mpi_reply;
2252 u32 volume_status_flags;
2253 u8 percent_complete;
2256 percent_complete = 0;
2258 if (ioc->is_warpdrive)
2261 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2262 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2265 handle = raid_device->handle;
2266 percent_complete = raid_device->percent_complete;
2268 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2273 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2274 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2275 sizeof(Mpi2RaidVolPage0_t))) {
2276 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2277 __FILE__, __LINE__, __func__);
2278 percent_complete = 0;
2282 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2283 if (!(volume_status_flags &
2284 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2285 percent_complete = 0;
2289 switch (ioc->hba_mpi_version_belonged) {
2291 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2295 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2301 * scsih_get_state - get raid volume level
2302 * @dev: the device struct object
2305 scsih_get_state(struct device *dev)
2307 struct scsi_device *sdev = to_scsi_device(dev);
2308 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2309 static struct _raid_device *raid_device;
2310 unsigned long flags;
2311 Mpi2RaidVolPage0_t vol_pg0;
2312 Mpi2ConfigReply_t mpi_reply;
2314 enum raid_state state = RAID_STATE_UNKNOWN;
2317 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2318 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2321 handle = raid_device->handle;
2322 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2327 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2328 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2329 sizeof(Mpi2RaidVolPage0_t))) {
2330 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2331 __FILE__, __LINE__, __func__);
2335 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2336 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2337 state = RAID_STATE_RESYNCING;
2341 switch (vol_pg0.VolumeState) {
2342 case MPI2_RAID_VOL_STATE_OPTIMAL:
2343 case MPI2_RAID_VOL_STATE_ONLINE:
2344 state = RAID_STATE_ACTIVE;
2346 case MPI2_RAID_VOL_STATE_DEGRADED:
2347 state = RAID_STATE_DEGRADED;
2349 case MPI2_RAID_VOL_STATE_FAILED:
2350 case MPI2_RAID_VOL_STATE_MISSING:
2351 state = RAID_STATE_OFFLINE;
2355 switch (ioc->hba_mpi_version_belonged) {
2357 raid_set_state(mpt2sas_raid_template, dev, state);
2361 raid_set_state(mpt3sas_raid_template, dev, state);
2367 * _scsih_set_level - set raid level
2369 * @sdev: scsi device struct
2370 * @volume_type: volume type
2373 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2374 struct scsi_device *sdev, u8 volume_type)
2376 enum raid_level level = RAID_LEVEL_UNKNOWN;
2378 switch (volume_type) {
2379 case MPI2_RAID_VOL_TYPE_RAID0:
2380 level = RAID_LEVEL_0;
2382 case MPI2_RAID_VOL_TYPE_RAID10:
2383 level = RAID_LEVEL_10;
2385 case MPI2_RAID_VOL_TYPE_RAID1E:
2386 level = RAID_LEVEL_1E;
2388 case MPI2_RAID_VOL_TYPE_RAID1:
2389 level = RAID_LEVEL_1;
2393 switch (ioc->hba_mpi_version_belonged) {
2395 raid_set_level(mpt2sas_raid_template,
2396 &sdev->sdev_gendev, level);
2400 raid_set_level(mpt3sas_raid_template,
2401 &sdev->sdev_gendev, level);
2408 * _scsih_get_volume_capabilities - volume capabilities
2409 * @ioc: per adapter object
2410 * @raid_device: the raid_device object
2412 * Return: 0 for success, else 1
2415 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2416 struct _raid_device *raid_device)
2418 Mpi2RaidVolPage0_t *vol_pg0;
2419 Mpi2RaidPhysDiskPage0_t pd_pg0;
2420 Mpi2SasDevicePage0_t sas_device_pg0;
2421 Mpi2ConfigReply_t mpi_reply;
2425 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2426 &num_pds)) || !num_pds) {
2428 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2429 __FILE__, __LINE__, __func__));
2433 raid_device->num_pds = num_pds;
2434 sz = struct_size(vol_pg0, PhysDisk, num_pds);
2435 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2438 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2439 __FILE__, __LINE__, __func__));
2443 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2444 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2446 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2447 __FILE__, __LINE__, __func__));
2452 raid_device->volume_type = vol_pg0->VolumeType;
2454 /* figure out what the underlying devices are by
2455 * obtaining the device_info bits for the 1st device
2457 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2458 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2459 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2460 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2461 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2462 le16_to_cpu(pd_pg0.DevHandle)))) {
2463 raid_device->device_info =
2464 le32_to_cpu(sas_device_pg0.DeviceInfo);
2473 * _scsih_enable_tlr - setting TLR flags
2474 * @ioc: per adapter object
2475 * @sdev: scsi device struct
2477 * Enabling Transaction Layer Retries for tape devices when
2478 * vpd page 0x90 is present
2482 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2486 if (sdev->type != TYPE_TAPE)
2489 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2492 sas_enable_tlr(sdev);
2493 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2494 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2500 * scsih_device_configure - device configure routine.
2501 * @sdev: scsi device struct
2502 * @lim: queue limits
2504 * Return: 0 if ok. Any other return is assumed to be an error and
2505 * the device is ignored.
2508 scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim)
2510 struct Scsi_Host *shost = sdev->host;
2511 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2512 struct MPT3SAS_DEVICE *sas_device_priv_data;
2513 struct MPT3SAS_TARGET *sas_target_priv_data;
2514 struct _sas_device *sas_device;
2515 struct _pcie_device *pcie_device;
2516 struct _raid_device *raid_device;
2517 unsigned long flags;
2522 u16 handle, volume_handle = 0;
2523 u64 volume_wwid = 0;
2526 sas_device_priv_data = sdev->hostdata;
2527 sas_device_priv_data->configured_lun = 1;
2528 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2529 sas_target_priv_data = sas_device_priv_data->sas_target;
2530 handle = sas_target_priv_data->handle;
2532 /* raid volume handling */
2533 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2535 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2536 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2537 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2540 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2541 __FILE__, __LINE__, __func__));
2545 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2547 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2548 __FILE__, __LINE__, __func__));
2553 * WARPDRIVE: Initialize the required data for Direct IO
2555 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2557 /* RAID Queue Depth Support
2558 * IS volume = underlying qdepth of drive type, either
2559 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2560 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2562 if (raid_device->device_info &
2563 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2564 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2567 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2568 if (raid_device->device_info &
2569 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2575 switch (raid_device->volume_type) {
2576 case MPI2_RAID_VOL_TYPE_RAID0:
2579 case MPI2_RAID_VOL_TYPE_RAID1E:
2580 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2581 if (ioc->manu_pg10.OEMIdentifier &&
2582 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2583 MFG10_GF0_R10_DISPLAY) &&
2584 !(raid_device->num_pds % 2))
2589 case MPI2_RAID_VOL_TYPE_RAID1:
2590 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2593 case MPI2_RAID_VOL_TYPE_RAID10:
2594 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2597 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2599 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2604 if (!ioc->hide_ir_msg)
2605 sdev_printk(KERN_INFO, sdev,
2606 "%s: handle(0x%04x), wwid(0x%016llx),"
2607 " pd_count(%d), type(%s)\n",
2608 r_level, raid_device->handle,
2609 (unsigned long long)raid_device->wwid,
2610 raid_device->num_pds, ds);
2612 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2613 lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS;
2614 sdev_printk(KERN_INFO, sdev,
2615 "Set queue's max_sector to: %u\n",
2616 MPT3SAS_RAID_MAX_SECTORS);
2619 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2621 /* raid transport support */
2622 if (!ioc->is_warpdrive)
2623 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2627 /* non-raid handling */
2628 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2629 if (mpt3sas_config_get_volume_handle(ioc, handle,
2632 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2633 __FILE__, __LINE__, __func__));
2636 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2637 volume_handle, &volume_wwid)) {
2639 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2640 __FILE__, __LINE__, __func__));
2646 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2647 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2648 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2649 sas_device_priv_data->sas_target->sas_address);
2651 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2653 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2654 __FILE__, __LINE__, __func__));
2658 qdepth = ioc->max_nvme_qd;
2660 sdev_printk(KERN_INFO, sdev,
2661 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2662 ds, handle, (unsigned long long)pcie_device->wwid,
2663 pcie_device->port_num);
2664 if (pcie_device->enclosure_handle != 0)
2665 sdev_printk(KERN_INFO, sdev,
2666 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2668 (unsigned long long)pcie_device->enclosure_logical_id,
2670 if (pcie_device->connector_name[0] != '\0')
2671 sdev_printk(KERN_INFO, sdev,
2672 "%s: enclosure level(0x%04x),"
2673 "connector name( %s)\n", ds,
2674 pcie_device->enclosure_level,
2675 pcie_device->connector_name);
2677 if (pcie_device->nvme_mdts)
2678 lim->max_hw_sectors = pcie_device->nvme_mdts / 512;
2680 pcie_device_put(pcie_device);
2681 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2682 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2683 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2684 ** merged and can eliminate holes created during merging
2687 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2688 sdev->request_queue);
2689 lim->virt_boundary_mask = ioc->page_size - 1;
2693 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2694 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2695 sas_device_priv_data->sas_target->sas_address,
2696 sas_device_priv_data->sas_target->port);
2698 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2700 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2701 __FILE__, __LINE__, __func__));
2705 sas_device->volume_handle = volume_handle;
2706 sas_device->volume_wwid = volume_wwid;
2707 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2708 qdepth = (sas_device->port_type > 1) ?
2709 ioc->max_wideport_qd : ioc->max_narrowport_qd;
2711 if (sas_device->device_info &
2712 MPI2_SAS_DEVICE_INFO_SEP) {
2713 sdev_printk(KERN_WARNING, sdev,
2714 "set ignore_delay_remove for handle(0x%04x)\n",
2715 sas_device_priv_data->sas_target->handle);
2716 sas_device_priv_data->ignore_delay_remove = 1;
2721 qdepth = ioc->max_sata_qd;
2722 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2724 else if (sas_device->device_info &
2725 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2729 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2730 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2731 ds, handle, (unsigned long long)sas_device->sas_address,
2732 sas_device->phy, (unsigned long long)sas_device->device_name);
2734 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2736 sas_device_put(sas_device);
2737 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2740 _scsih_display_sata_capabilities(ioc, handle, sdev);
2743 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2746 sas_read_port_mode_page(sdev);
2747 _scsih_enable_tlr(ioc, sdev);
2754 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2755 * @sdev: scsi device struct
2756 * @bdev: pointer to block device context
2757 * @capacity: device size (in 512 byte sectors)
2758 * @params: three element array to place output:
2759 * params[0] number of heads (max 255)
2760 * params[1] number of sectors (max 63)
2761 * params[2] number of cylinders
2764 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2765 sector_t capacity, int params[])
2775 dummy = heads * sectors;
2776 cylinders = capacity;
2777 sector_div(cylinders, dummy);
2780 * Handle extended translation size for logical drives
2783 if ((ulong)capacity >= 0x200000) {
2786 dummy = heads * sectors;
2787 cylinders = capacity;
2788 sector_div(cylinders, dummy);
2793 params[1] = sectors;
2794 params[2] = cylinders;
2800 * _scsih_response_code - translation of device response code
2801 * @ioc: per adapter object
2802 * @response_code: response code returned by the device
2805 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2809 switch (response_code) {
2810 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2811 desc = "task management request completed";
2813 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2814 desc = "invalid frame";
2816 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2817 desc = "task management request not supported";
2819 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2820 desc = "task management request failed";
2822 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2823 desc = "task management request succeeded";
2825 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2826 desc = "invalid lun";
2829 desc = "overlapped tag attempted";
2831 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2832 desc = "task queued, however not sent to target";
2838 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2842 * _scsih_tm_done - tm completion routine
2843 * @ioc: per adapter object
2844 * @smid: system request message index
2845 * @msix_index: MSIX table index supplied by the OS
2846 * @reply: reply message frame(lower 32bit addr)
2849 * The callback handler when using scsih_issue_tm.
2851 * Return: 1 meaning mf should be freed from _base_interrupt
2852 * 0 means the mf is freed from this function.
2855 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2857 MPI2DefaultReply_t *mpi_reply;
2859 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2861 if (ioc->tm_cmds.smid != smid)
2863 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2864 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2866 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2867 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2869 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2870 complete(&ioc->tm_cmds.done);
2875 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2876 * @ioc: per adapter object
2877 * @handle: device handle
2879 * During taskmangement request, we need to freeze the device queue.
2882 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2884 struct MPT3SAS_DEVICE *sas_device_priv_data;
2885 struct scsi_device *sdev;
2888 shost_for_each_device(sdev, ioc->shost) {
2891 sas_device_priv_data = sdev->hostdata;
2892 if (!sas_device_priv_data)
2894 if (sas_device_priv_data->sas_target->handle == handle) {
2895 sas_device_priv_data->sas_target->tm_busy = 1;
2897 ioc->ignore_loginfos = 1;
2903 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2904 * @ioc: per adapter object
2905 * @handle: device handle
2907 * During taskmangement request, we need to freeze the device queue.
2910 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2912 struct MPT3SAS_DEVICE *sas_device_priv_data;
2913 struct scsi_device *sdev;
2916 shost_for_each_device(sdev, ioc->shost) {
2919 sas_device_priv_data = sdev->hostdata;
2920 if (!sas_device_priv_data)
2922 if (sas_device_priv_data->sas_target->handle == handle) {
2923 sas_device_priv_data->sas_target->tm_busy = 0;
2925 ioc->ignore_loginfos = 0;
2931 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2932 * @ioc: per adapter object
2933 * @channel: the channel assigned by the OS
2934 * @id: the id assigned by the OS
2936 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2937 * @smid_task: smid assigned to the task
2939 * Look whether TM has aborted the timed out SCSI command, if
2940 * TM has aborted the IO then return SUCCESS else return FAILED.
2943 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2944 uint id, uint lun, u8 type, u16 smid_task)
2947 if (smid_task <= ioc->shost->can_queue) {
2949 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2950 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2954 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2955 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2956 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2963 } else if (smid_task == ioc->scsih_cmds.smid) {
2964 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2965 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2967 } else if (smid_task == ioc->ctl_cmds.smid) {
2968 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2969 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2977 * scsih_tm_post_processing - post processing of target & LUN reset
2978 * @ioc: per adapter object
2979 * @handle: device handle
2980 * @channel: the channel assigned by the OS
2981 * @id: the id assigned by the OS
2983 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2984 * @smid_task: smid assigned to the task
2986 * Post processing of target & LUN reset. Due to interrupt latency
2987 * issue it possible that interrupt for aborted IO might not be
2988 * received yet. So before returning failure status, poll the
2989 * reply descriptor pools for the reply of timed out SCSI command.
2990 * Return FAILED status if reply for timed out is not received
2991 * otherwise return SUCCESS.
2994 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2995 uint channel, uint id, uint lun, u8 type, u16 smid_task)
2999 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3004 "Poll ReplyDescriptor queues for completion of"
3005 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3006 smid_task, type, handle);
3009 * Due to interrupt latency issues, driver may receive interrupt for
3010 * TM first and then for aborted SCSI IO command. So, poll all the
3011 * ReplyDescriptor pools before returning the FAILED status to SML.
3013 mpt3sas_base_mask_interrupts(ioc);
3014 mpt3sas_base_sync_reply_irqs(ioc, 1);
3015 mpt3sas_base_unmask_interrupts(ioc);
3017 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3021 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3022 * @ioc: per adapter struct
3023 * @handle: device handle
3024 * @channel: the channel assigned by the OS
3025 * @id: the id assigned by the OS
3027 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3028 * @smid_task: smid assigned to the task
3029 * @msix_task: MSIX table index supplied by the OS
3030 * @timeout: timeout in seconds
3031 * @tr_method: Target Reset Method
3034 * A generic API for sending task management requests to firmware.
3036 * The callback index is set inside `ioc->tm_cb_idx`.
3037 * The caller is responsible to check for outstanding commands.
3039 * Return: SUCCESS or FAILED.
3042 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3043 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3044 u8 timeout, u8 tr_method)
3046 Mpi2SCSITaskManagementRequest_t *mpi_request;
3047 Mpi2SCSITaskManagementReply_t *mpi_reply;
3048 Mpi25SCSIIORequest_t *request;
3054 lockdep_assert_held(&ioc->tm_cmds.mutex);
3056 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3057 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3061 if (ioc->shost_recovery || ioc->remove_host ||
3062 ioc->pci_error_recovery) {
3063 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3067 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3068 if (ioc_state & MPI2_DOORBELL_USED) {
3069 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3070 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3071 return (!rc) ? SUCCESS : FAILED;
3074 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3075 mpt3sas_print_fault_code(ioc, ioc_state &
3076 MPI2_DOORBELL_DATA_MASK);
3077 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3078 return (!rc) ? SUCCESS : FAILED;
3079 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3080 MPI2_IOC_STATE_COREDUMP) {
3081 mpt3sas_print_coredump_info(ioc, ioc_state &
3082 MPI2_DOORBELL_DATA_MASK);
3083 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3084 return (!rc) ? SUCCESS : FAILED;
3087 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3089 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3094 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3095 handle, type, smid_task, timeout, tr_method));
3096 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3097 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3098 ioc->tm_cmds.smid = smid;
3099 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3100 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3101 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3102 mpi_request->DevHandle = cpu_to_le16(handle);
3103 mpi_request->TaskType = type;
3104 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3105 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3106 mpi_request->MsgFlags = tr_method;
3107 mpi_request->TaskMID = cpu_to_le16(smid_task);
3108 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3109 mpt3sas_scsih_set_tm_flag(ioc, handle);
3110 init_completion(&ioc->tm_cmds.done);
3111 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3112 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3113 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3114 mpt3sas_check_cmd_timeout(ioc,
3115 ioc->tm_cmds.status, mpi_request,
3116 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3118 rc = mpt3sas_base_hard_reset_handler(ioc,
3120 rc = (!rc) ? SUCCESS : FAILED;
3125 /* sync IRQs in case those were busy during flush. */
3126 mpt3sas_base_sync_reply_irqs(ioc, 0);
3128 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3129 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3130 mpi_reply = ioc->tm_cmds.reply;
3132 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3133 le16_to_cpu(mpi_reply->IOCStatus),
3134 le32_to_cpu(mpi_reply->IOCLogInfo),
3135 le32_to_cpu(mpi_reply->TerminationCount)));
3136 if (ioc->logging_level & MPT_DEBUG_TM) {
3137 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3138 if (mpi_reply->IOCStatus)
3139 _debug_dump_mf(mpi_request,
3140 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3145 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3148 * If DevHandle filed in smid_task's entry of request pool
3149 * doesn't match with device handle on which this task abort
3150 * TM is received then it means that TM has successfully
3151 * aborted the timed out command. Since smid_task's entry in
3152 * request pool will be memset to zero once the timed out
3153 * command is returned to the SML. If the command is not
3154 * aborted then smid_task’s entry won’t be cleared and it
3155 * will have same DevHandle value on which this task abort TM
3156 * is received and driver will return the TM status as FAILED.
3158 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3159 if (le16_to_cpu(request->DevHandle) != handle)
3162 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3163 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3164 handle, timeout, tr_method, smid_task, msix_task);
3168 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3169 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3170 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3171 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3174 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3183 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3184 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3188 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3189 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3190 u16 msix_task, u8 timeout, u8 tr_method)
3194 mutex_lock(&ioc->tm_cmds.mutex);
3195 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3196 smid_task, msix_task, timeout, tr_method);
3197 mutex_unlock(&ioc->tm_cmds.mutex);
3203 * _scsih_tm_display_info - displays info about the device
3204 * @ioc: per adapter struct
3205 * @scmd: pointer to scsi command object
3207 * Called by task management callback handlers.
3210 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3212 struct scsi_target *starget = scmd->device->sdev_target;
3213 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3214 struct _sas_device *sas_device = NULL;
3215 struct _pcie_device *pcie_device = NULL;
3216 unsigned long flags;
3217 char *device_str = NULL;
3221 if (ioc->hide_ir_msg)
3222 device_str = "WarpDrive";
3224 device_str = "volume";
3226 scsi_print_command(scmd);
3227 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3228 starget_printk(KERN_INFO, starget,
3229 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3230 device_str, priv_target->handle,
3231 device_str, (unsigned long long)priv_target->sas_address);
3233 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3234 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3235 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3237 starget_printk(KERN_INFO, starget,
3238 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3239 pcie_device->handle,
3240 (unsigned long long)pcie_device->wwid,
3241 pcie_device->port_num);
3242 if (pcie_device->enclosure_handle != 0)
3243 starget_printk(KERN_INFO, starget,
3244 "enclosure logical id(0x%016llx), slot(%d)\n",
3245 (unsigned long long)
3246 pcie_device->enclosure_logical_id,
3248 if (pcie_device->connector_name[0] != '\0')
3249 starget_printk(KERN_INFO, starget,
3250 "enclosure level(0x%04x), connector name( %s)\n",
3251 pcie_device->enclosure_level,
3252 pcie_device->connector_name);
3253 pcie_device_put(pcie_device);
3255 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3258 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3259 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3261 if (priv_target->flags &
3262 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3263 starget_printk(KERN_INFO, starget,
3264 "volume handle(0x%04x), "
3265 "volume wwid(0x%016llx)\n",
3266 sas_device->volume_handle,
3267 (unsigned long long)sas_device->volume_wwid);
3269 starget_printk(KERN_INFO, starget,
3270 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3272 (unsigned long long)sas_device->sas_address,
3275 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3278 sas_device_put(sas_device);
3280 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3285 * scsih_abort - eh threads main abort routine
3286 * @scmd: pointer to scsi command object
3288 * Return: SUCCESS if command aborted else FAILED
3291 scsih_abort(struct scsi_cmnd *scmd)
3293 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3294 struct MPT3SAS_DEVICE *sas_device_priv_data;
3295 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3300 struct _pcie_device *pcie_device = NULL;
3301 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3302 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3303 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3304 (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
3305 _scsih_tm_display_info(ioc, scmd);
3307 sas_device_priv_data = scmd->device->hostdata;
3308 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3310 sdev_printk(KERN_INFO, scmd->device,
3311 "device been deleted! scmd(0x%p)\n", scmd);
3312 scmd->result = DID_NO_CONNECT << 16;
3318 /* check for completed command */
3319 if (st == NULL || st->cb_idx == 0xFF) {
3320 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3321 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3322 scmd->result = DID_RESET << 16;
3327 /* for hidden raid components and volumes this is not supported */
3328 if (sas_device_priv_data->sas_target->flags &
3329 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3330 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3331 scmd->result = DID_RESET << 16;
3336 mpt3sas_halt_firmware(ioc);
3338 handle = sas_device_priv_data->sas_target->handle;
3339 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3340 if (pcie_device && (!ioc->tm_custom_handling) &&
3341 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3342 timeout = ioc->nvme_abort_timeout;
3343 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3344 scmd->device->id, scmd->device->lun,
3345 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3346 st->smid, st->msix_io, timeout, 0);
3347 /* Command must be cleared after abort */
3348 if (r == SUCCESS && st->cb_idx != 0xFF)
3351 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3352 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3354 pcie_device_put(pcie_device);
3359 * scsih_dev_reset - eh threads main device reset routine
3360 * @scmd: pointer to scsi command object
3362 * Return: SUCCESS if command aborted else FAILED
3365 scsih_dev_reset(struct scsi_cmnd *scmd)
3367 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3368 struct MPT3SAS_DEVICE *sas_device_priv_data;
3369 struct _sas_device *sas_device = NULL;
3370 struct _pcie_device *pcie_device = NULL;
3376 struct scsi_target *starget = scmd->device->sdev_target;
3377 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3379 sdev_printk(KERN_INFO, scmd->device,
3380 "attempting device reset! scmd(0x%p)\n", scmd);
3381 _scsih_tm_display_info(ioc, scmd);
3383 sas_device_priv_data = scmd->device->hostdata;
3384 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3386 sdev_printk(KERN_INFO, scmd->device,
3387 "device been deleted! scmd(0x%p)\n", scmd);
3388 scmd->result = DID_NO_CONNECT << 16;
3394 /* for hidden raid components obtain the volume_handle */
3396 if (sas_device_priv_data->sas_target->flags &
3397 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3398 sas_device = mpt3sas_get_sdev_from_target(ioc,
3401 handle = sas_device->volume_handle;
3403 handle = sas_device_priv_data->sas_target->handle;
3406 scmd->result = DID_RESET << 16;
3411 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3413 if (pcie_device && (!ioc->tm_custom_handling) &&
3414 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3415 tr_timeout = pcie_device->reset_timeout;
3416 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3418 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3420 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3421 scmd->device->id, scmd->device->lun,
3422 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3423 tr_timeout, tr_method);
3424 /* Check for busy commands after reset */
3425 if (r == SUCCESS && scsi_device_busy(scmd->device))
3428 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3429 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3432 sas_device_put(sas_device);
3434 pcie_device_put(pcie_device);
3440 * scsih_target_reset - eh threads main target reset routine
3441 * @scmd: pointer to scsi command object
3443 * Return: SUCCESS if command aborted else FAILED
3446 scsih_target_reset(struct scsi_cmnd *scmd)
3448 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3449 struct MPT3SAS_DEVICE *sas_device_priv_data;
3450 struct _sas_device *sas_device = NULL;
3451 struct _pcie_device *pcie_device = NULL;
3456 struct scsi_target *starget = scmd->device->sdev_target;
3457 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3459 starget_printk(KERN_INFO, starget,
3460 "attempting target reset! scmd(0x%p)\n", scmd);
3461 _scsih_tm_display_info(ioc, scmd);
3463 sas_device_priv_data = scmd->device->hostdata;
3464 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3466 starget_printk(KERN_INFO, starget,
3467 "target been deleted! scmd(0x%p)\n", scmd);
3468 scmd->result = DID_NO_CONNECT << 16;
3474 /* for hidden raid components obtain the volume_handle */
3476 if (sas_device_priv_data->sas_target->flags &
3477 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3478 sas_device = mpt3sas_get_sdev_from_target(ioc,
3481 handle = sas_device->volume_handle;
3483 handle = sas_device_priv_data->sas_target->handle;
3486 scmd->result = DID_RESET << 16;
3491 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3493 if (pcie_device && (!ioc->tm_custom_handling) &&
3494 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3495 tr_timeout = pcie_device->reset_timeout;
3496 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3498 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3499 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3500 scmd->device->id, 0,
3501 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3502 tr_timeout, tr_method);
3503 /* Check for busy commands after reset */
3504 if (r == SUCCESS && atomic_read(&starget->target_busy))
3507 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3508 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3511 sas_device_put(sas_device);
3513 pcie_device_put(pcie_device);
3519 * scsih_host_reset - eh threads main host reset routine
3520 * @scmd: pointer to scsi command object
3522 * Return: SUCCESS if command aborted else FAILED
3525 scsih_host_reset(struct scsi_cmnd *scmd)
3527 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3530 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3531 scsi_print_command(scmd);
3533 if (ioc->is_driver_loading || ioc->remove_host) {
3534 ioc_info(ioc, "Blocking the host reset\n");
3539 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3540 r = (retval < 0) ? FAILED : SUCCESS;
3542 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3543 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3549 * _scsih_fw_event_add - insert and queue up fw_event
3550 * @ioc: per adapter object
3551 * @fw_event: object describing the event
3552 * Context: This function will acquire ioc->fw_event_lock.
3554 * This adds the firmware event object into link list, then queues it up to
3555 * be processed from user context.
3558 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3560 unsigned long flags;
3562 if (ioc->firmware_event_thread == NULL)
3565 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3566 fw_event_work_get(fw_event);
3567 INIT_LIST_HEAD(&fw_event->list);
3568 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3569 INIT_WORK(&fw_event->work, _firmware_event_work);
3570 fw_event_work_get(fw_event);
3571 queue_work(ioc->firmware_event_thread, &fw_event->work);
3572 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3576 * _scsih_fw_event_del_from_list - delete fw_event from the list
3577 * @ioc: per adapter object
3578 * @fw_event: object describing the event
3579 * Context: This function will acquire ioc->fw_event_lock.
3581 * If the fw_event is on the fw_event_list, remove it and do a put.
3584 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3587 unsigned long flags;
3589 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3590 if (!list_empty(&fw_event->list)) {
3591 list_del_init(&fw_event->list);
3592 fw_event_work_put(fw_event);
3594 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3599 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3600 * @ioc: per adapter object
3601 * @event_data: trigger event data
3604 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3605 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3607 struct fw_event_work *fw_event;
3610 if (ioc->is_driver_loading)
3612 sz = sizeof(*event_data);
3613 fw_event = alloc_fw_event_work(sz);
3616 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3617 fw_event->ioc = ioc;
3618 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3619 _scsih_fw_event_add(ioc, fw_event);
3620 fw_event_work_put(fw_event);
3624 * _scsih_error_recovery_delete_devices - remove devices not responding
3625 * @ioc: per adapter object
3628 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3630 struct fw_event_work *fw_event;
3632 fw_event = alloc_fw_event_work(0);
3635 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3636 fw_event->ioc = ioc;
3637 _scsih_fw_event_add(ioc, fw_event);
3638 fw_event_work_put(fw_event);
3642 * mpt3sas_port_enable_complete - port enable completed (fake event)
3643 * @ioc: per adapter object
3646 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3648 struct fw_event_work *fw_event;
3650 fw_event = alloc_fw_event_work(0);
3653 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3654 fw_event->ioc = ioc;
3655 _scsih_fw_event_add(ioc, fw_event);
3656 fw_event_work_put(fw_event);
3659 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3661 unsigned long flags;
3662 struct fw_event_work *fw_event = NULL;
3664 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3665 if (!list_empty(&ioc->fw_event_list)) {
3666 fw_event = list_first_entry(&ioc->fw_event_list,
3667 struct fw_event_work, list);
3668 list_del_init(&fw_event->list);
3669 fw_event_work_put(fw_event);
3671 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3677 * _scsih_fw_event_cleanup_queue - cleanup event queue
3678 * @ioc: per adapter object
3680 * Walk the firmware event queue, either killing timers, or waiting
3681 * for outstanding events to complete
3683 * Context: task, can sleep
3686 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3688 struct fw_event_work *fw_event;
3690 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3691 !ioc->firmware_event_thread)
3694 * Set current running event as ignore, so that
3695 * current running event will exit quickly.
3696 * As diag reset has occurred it is of no use
3697 * to process remaining stale event data entries.
3699 if (ioc->shost_recovery && ioc->current_event)
3700 ioc->current_event->ignore = 1;
3702 ioc->fw_events_cleanup = 1;
3703 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3704 (fw_event = ioc->current_event)) {
3707 * Don't call cancel_work_sync() for current_event
3708 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3709 * otherwise we may observe deadlock if current
3710 * hard reset issued as part of processing the current_event.
3712 * Orginal logic of cleaning the current_event is added
3713 * for handling the back to back host reset issued by the user.
3714 * i.e. during back to back host reset, driver use to process
3715 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3716 * event back to back and this made the drives to unregister
3717 * the devices from SML.
3720 if (fw_event == ioc->current_event &&
3721 ioc->current_event->event !=
3722 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3723 ioc->current_event = NULL;
3728 * Driver has to clear ioc->start_scan flag when
3729 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
3730 * otherwise scsi_scan_host() API waits for the
3731 * 5 minute timer to expire. If we exit from
3732 * scsi_scan_host() early then we can issue the
3733 * new port enable request as part of current diag reset.
3735 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3736 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3737 ioc->start_scan = 0;
3741 * Wait on the fw_event to complete. If this returns 1, then
3742 * the event was never executed, and we need a put for the
3743 * reference the work had on the fw_event.
3745 * If it did execute, we wait for it to finish, and the put will
3746 * happen from _firmware_event_work()
3748 if (cancel_work_sync(&fw_event->work))
3749 fw_event_work_put(fw_event);
3752 ioc->fw_events_cleanup = 0;
3756 * _scsih_internal_device_block - block the sdev device
3757 * @sdev: per device object
3758 * @sas_device_priv_data : per device driver private data
3760 * make sure device is blocked without error, if not
3764 _scsih_internal_device_block(struct scsi_device *sdev,
3765 struct MPT3SAS_DEVICE *sas_device_priv_data)
3769 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3770 sas_device_priv_data->sas_target->handle);
3771 sas_device_priv_data->block = 1;
3773 r = scsi_internal_device_block_nowait(sdev);
3775 sdev_printk(KERN_WARNING, sdev,
3776 "device_block failed with return(%d) for handle(0x%04x)\n",
3777 r, sas_device_priv_data->sas_target->handle);
3781 * _scsih_internal_device_unblock - unblock the sdev device
3782 * @sdev: per device object
3783 * @sas_device_priv_data : per device driver private data
3784 * make sure device is unblocked without error, if not retry
3785 * by blocking and then unblocking
3789 _scsih_internal_device_unblock(struct scsi_device *sdev,
3790 struct MPT3SAS_DEVICE *sas_device_priv_data)
3794 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3795 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3796 sas_device_priv_data->block = 0;
3797 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3799 /* The device has been set to SDEV_RUNNING by SD layer during
3800 * device addition but the request queue is still stopped by
3801 * our earlier block call. We need to perform a block again
3802 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3804 sdev_printk(KERN_WARNING, sdev,
3805 "device_unblock failed with return(%d) for handle(0x%04x) "
3806 "performing a block followed by an unblock\n",
3807 r, sas_device_priv_data->sas_target->handle);
3808 sas_device_priv_data->block = 1;
3809 r = scsi_internal_device_block_nowait(sdev);
3811 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3812 "failed with return(%d) for handle(0x%04x)\n",
3813 r, sas_device_priv_data->sas_target->handle);
3815 sas_device_priv_data->block = 0;
3816 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3818 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3819 " failed with return(%d) for handle(0x%04x)\n",
3820 r, sas_device_priv_data->sas_target->handle);
3825 * _scsih_ublock_io_all_device - unblock every device
3826 * @ioc: per adapter object
3828 * change the device state from block to running
3831 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3833 struct MPT3SAS_DEVICE *sas_device_priv_data;
3834 struct scsi_device *sdev;
3836 shost_for_each_device(sdev, ioc->shost) {
3837 sas_device_priv_data = sdev->hostdata;
3838 if (!sas_device_priv_data)
3840 if (!sas_device_priv_data->block)
3843 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3844 "device_running, handle(0x%04x)\n",
3845 sas_device_priv_data->sas_target->handle));
3846 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3852 * _scsih_ublock_io_device - prepare device to be deleted
3853 * @ioc: per adapter object
3854 * @sas_address: sas address
3855 * @port: hba port entry
3857 * unblock then put device in offline state
3860 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3861 u64 sas_address, struct hba_port *port)
3863 struct MPT3SAS_DEVICE *sas_device_priv_data;
3864 struct scsi_device *sdev;
3866 shost_for_each_device(sdev, ioc->shost) {
3867 sas_device_priv_data = sdev->hostdata;
3868 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3870 if (sas_device_priv_data->sas_target->sas_address
3873 if (sas_device_priv_data->sas_target->port != port)
3875 if (sas_device_priv_data->block)
3876 _scsih_internal_device_unblock(sdev,
3877 sas_device_priv_data);
3882 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3883 * @ioc: per adapter object
3885 * During device pull we need to appropriately set the sdev state.
3888 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3890 struct MPT3SAS_DEVICE *sas_device_priv_data;
3891 struct scsi_device *sdev;
3893 shost_for_each_device(sdev, ioc->shost) {
3894 sas_device_priv_data = sdev->hostdata;
3895 if (!sas_device_priv_data)
3897 if (sas_device_priv_data->block)
3899 if (sas_device_priv_data->ignore_delay_remove) {
3900 sdev_printk(KERN_INFO, sdev,
3901 "%s skip device_block for SES handle(0x%04x)\n",
3902 __func__, sas_device_priv_data->sas_target->handle);
3905 _scsih_internal_device_block(sdev, sas_device_priv_data);
3910 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3911 * @ioc: per adapter object
3912 * @handle: device handle
3914 * During device pull we need to appropriately set the sdev state.
3917 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3919 struct MPT3SAS_DEVICE *sas_device_priv_data;
3920 struct scsi_device *sdev;
3921 struct _sas_device *sas_device;
3923 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3925 shost_for_each_device(sdev, ioc->shost) {
3926 sas_device_priv_data = sdev->hostdata;
3927 if (!sas_device_priv_data)
3929 if (sas_device_priv_data->sas_target->handle != handle)
3931 if (sas_device_priv_data->block)
3933 if (sas_device && sas_device->pend_sas_rphy_add)
3935 if (sas_device_priv_data->ignore_delay_remove) {
3936 sdev_printk(KERN_INFO, sdev,
3937 "%s skip device_block for SES handle(0x%04x)\n",
3938 __func__, sas_device_priv_data->sas_target->handle);
3941 _scsih_internal_device_block(sdev, sas_device_priv_data);
3945 sas_device_put(sas_device);
3949 * _scsih_block_io_to_children_attached_to_ex
3950 * @ioc: per adapter object
3951 * @sas_expander: the sas_device object
3953 * This routine set sdev state to SDEV_BLOCK for all devices
3954 * attached to this expander. This function called when expander is
3958 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3959 struct _sas_node *sas_expander)
3961 struct _sas_port *mpt3sas_port;
3962 struct _sas_device *sas_device;
3963 struct _sas_node *expander_sibling;
3964 unsigned long flags;
3969 list_for_each_entry(mpt3sas_port,
3970 &sas_expander->sas_port_list, port_list) {
3971 if (mpt3sas_port->remote_identify.device_type ==
3973 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3974 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3975 mpt3sas_port->remote_identify.sas_address,
3976 mpt3sas_port->hba_port);
3978 set_bit(sas_device->handle,
3979 ioc->blocking_handles);
3980 sas_device_put(sas_device);
3982 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3986 list_for_each_entry(mpt3sas_port,
3987 &sas_expander->sas_port_list, port_list) {
3989 if (mpt3sas_port->remote_identify.device_type ==
3990 SAS_EDGE_EXPANDER_DEVICE ||
3991 mpt3sas_port->remote_identify.device_type ==
3992 SAS_FANOUT_EXPANDER_DEVICE) {
3994 mpt3sas_scsih_expander_find_by_sas_address(
3995 ioc, mpt3sas_port->remote_identify.sas_address,
3996 mpt3sas_port->hba_port);
3997 _scsih_block_io_to_children_attached_to_ex(ioc,
4004 * _scsih_block_io_to_children_attached_directly
4005 * @ioc: per adapter object
4006 * @event_data: topology change event data
4008 * This routine set sdev state to SDEV_BLOCK for all devices
4009 * direct attached during device pull.
4012 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4013 Mpi2EventDataSasTopologyChangeList_t *event_data)
4019 for (i = 0; i < event_data->NumEntries; i++) {
4020 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4023 reason_code = event_data->PHY[i].PhyStatus &
4024 MPI2_EVENT_SAS_TOPO_RC_MASK;
4025 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4026 _scsih_block_io_device(ioc, handle);
4031 * _scsih_block_io_to_pcie_children_attached_directly
4032 * @ioc: per adapter object
4033 * @event_data: topology change event data
4035 * This routine set sdev state to SDEV_BLOCK for all devices
4036 * direct attached during device pull/reconnect.
4039 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4040 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4046 for (i = 0; i < event_data->NumEntries; i++) {
4048 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4051 reason_code = event_data->PortEntry[i].PortStatus;
4053 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4054 _scsih_block_io_device(ioc, handle);
4058 * _scsih_tm_tr_send - send task management request
4059 * @ioc: per adapter object
4060 * @handle: device handle
4061 * Context: interrupt time.
4063 * This code is to initiate the device removal handshake protocol
4064 * with controller firmware. This function will issue target reset
4065 * using high priority request queue. It will send a sas iounit
4066 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4068 * This is designed to send muliple task management request at the same
4069 * time to the fifo. If the fifo is full, we will append the request,
4070 * and process it in a future completion.
4073 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4075 Mpi2SCSITaskManagementRequest_t *mpi_request;
4077 struct _sas_device *sas_device = NULL;
4078 struct _pcie_device *pcie_device = NULL;
4079 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4080 u64 sas_address = 0;
4081 unsigned long flags;
4082 struct _tr_list *delayed_tr;
4085 struct hba_port *port = NULL;
4087 if (ioc->pci_error_recovery) {
4089 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4093 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4094 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4096 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4101 /* if PD, then return */
4102 if (test_bit(handle, ioc->pd_handles))
4105 clear_bit(handle, ioc->pend_os_device_add);
4107 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4108 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4109 if (sas_device && sas_device->starget &&
4110 sas_device->starget->hostdata) {
4111 sas_target_priv_data = sas_device->starget->hostdata;
4112 sas_target_priv_data->deleted = 1;
4113 sas_address = sas_device->sas_address;
4114 port = sas_device->port;
4116 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4118 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4119 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4120 if (pcie_device && pcie_device->starget &&
4121 pcie_device->starget->hostdata) {
4122 sas_target_priv_data = pcie_device->starget->hostdata;
4123 sas_target_priv_data->deleted = 1;
4124 sas_address = pcie_device->wwid;
4126 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4127 if (pcie_device && (!ioc->tm_custom_handling) &&
4128 (!(mpt3sas_scsih_is_pcie_scsi_device(
4129 pcie_device->device_info))))
4131 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4133 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4135 if (sas_target_priv_data) {
4137 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4138 handle, (u64)sas_address));
4140 if (sas_device->enclosure_handle != 0)
4142 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4143 (u64)sas_device->enclosure_logical_id,
4145 if (sas_device->connector_name[0] != '\0')
4147 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4148 sas_device->enclosure_level,
4149 sas_device->connector_name));
4150 } else if (pcie_device) {
4151 if (pcie_device->enclosure_handle != 0)
4153 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4154 (u64)pcie_device->enclosure_logical_id,
4155 pcie_device->slot));
4156 if (pcie_device->connector_name[0] != '\0')
4158 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4159 pcie_device->enclosure_level,
4160 pcie_device->connector_name));
4162 _scsih_ublock_io_device(ioc, sas_address, port);
4163 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4166 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4168 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4171 INIT_LIST_HEAD(&delayed_tr->list);
4172 delayed_tr->handle = handle;
4173 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4175 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4181 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4182 handle, smid, ioc->tm_tr_cb_idx));
4183 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4184 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4185 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4186 mpi_request->DevHandle = cpu_to_le16(handle);
4187 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4188 mpi_request->MsgFlags = tr_method;
4189 set_bit(handle, ioc->device_remove_in_progress);
4190 ioc->put_smid_hi_priority(ioc, smid, 0);
4191 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4195 sas_device_put(sas_device);
4197 pcie_device_put(pcie_device);
4201 * _scsih_tm_tr_complete -
4202 * @ioc: per adapter object
4203 * @smid: system request message index
4204 * @msix_index: MSIX table index supplied by the OS
4205 * @reply: reply message frame(lower 32bit addr)
4206 * Context: interrupt time.
4208 * This is the target reset completion routine.
4209 * This code is part of the code to initiate the device removal
4210 * handshake protocol with controller firmware.
4211 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4213 * Return: 1 meaning mf should be freed from _base_interrupt
4214 * 0 means the mf is freed from this function.
4217 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4221 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4222 Mpi2SCSITaskManagementReply_t *mpi_reply =
4223 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4224 Mpi2SasIoUnitControlRequest_t *mpi_request;
4227 struct _sc_list *delayed_sc;
4229 if (ioc->pci_error_recovery) {
4231 ioc_info(ioc, "%s: host in pci error recovery\n",
4235 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4236 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4238 ioc_info(ioc, "%s: host is not operational\n",
4242 if (unlikely(!mpi_reply)) {
4243 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4244 __FILE__, __LINE__, __func__);
4247 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4248 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4249 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4251 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4253 le16_to_cpu(mpi_reply->DevHandle), smid));
4257 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4259 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4260 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4261 le32_to_cpu(mpi_reply->IOCLogInfo),
4262 le32_to_cpu(mpi_reply->TerminationCount)));
4264 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4265 if (!smid_sas_ctrl) {
4266 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4268 return _scsih_check_for_pending_tm(ioc, smid);
4269 INIT_LIST_HEAD(&delayed_sc->list);
4270 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4271 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4273 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4275 return _scsih_check_for_pending_tm(ioc, smid);
4279 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4280 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4281 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4282 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4283 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4284 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4285 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4286 ioc->put_smid_default(ioc, smid_sas_ctrl);
4288 return _scsih_check_for_pending_tm(ioc, smid);
4291 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4292 * issue to IOC or not.
4293 * @ioc: per adapter object
4294 * @scmd: pointer to scsi command object
4296 * Returns true if scmd can be issued to IOC otherwise returns false.
4298 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4299 struct scsi_cmnd *scmd)
4302 if (ioc->pci_error_recovery)
4305 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4306 if (ioc->remove_host)
4312 if (ioc->remove_host) {
4314 switch (scmd->cmnd[0]) {
4315 case SYNCHRONIZE_CACHE:
4327 * _scsih_sas_control_complete - completion routine
4328 * @ioc: per adapter object
4329 * @smid: system request message index
4330 * @msix_index: MSIX table index supplied by the OS
4331 * @reply: reply message frame(lower 32bit addr)
4332 * Context: interrupt time.
4334 * This is the sas iounit control completion routine.
4335 * This code is part of the code to initiate the device removal
4336 * handshake protocol with controller firmware.
4338 * Return: 1 meaning mf should be freed from _base_interrupt
4339 * 0 means the mf is freed from this function.
4342 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4343 u8 msix_index, u32 reply)
4345 Mpi2SasIoUnitControlReply_t *mpi_reply =
4346 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4348 if (likely(mpi_reply)) {
4350 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4351 le16_to_cpu(mpi_reply->DevHandle), smid,
4352 le16_to_cpu(mpi_reply->IOCStatus),
4353 le32_to_cpu(mpi_reply->IOCLogInfo)));
4354 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4355 MPI2_IOCSTATUS_SUCCESS) {
4356 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4357 ioc->device_remove_in_progress);
4360 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4361 __FILE__, __LINE__, __func__);
4363 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4367 * _scsih_tm_tr_volume_send - send target reset request for volumes
4368 * @ioc: per adapter object
4369 * @handle: device handle
4370 * Context: interrupt time.
4372 * This is designed to send muliple task management request at the same
4373 * time to the fifo. If the fifo is full, we will append the request,
4374 * and process it in a future completion.
4377 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4379 Mpi2SCSITaskManagementRequest_t *mpi_request;
4381 struct _tr_list *delayed_tr;
4383 if (ioc->pci_error_recovery) {
4385 ioc_info(ioc, "%s: host reset in progress!\n",
4390 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4392 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4395 INIT_LIST_HEAD(&delayed_tr->list);
4396 delayed_tr->handle = handle;
4397 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4399 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4405 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4406 handle, smid, ioc->tm_tr_volume_cb_idx));
4407 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4408 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4409 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4410 mpi_request->DevHandle = cpu_to_le16(handle);
4411 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4412 ioc->put_smid_hi_priority(ioc, smid, 0);
4416 * _scsih_tm_volume_tr_complete - target reset completion
4417 * @ioc: per adapter object
4418 * @smid: system request message index
4419 * @msix_index: MSIX table index supplied by the OS
4420 * @reply: reply message frame(lower 32bit addr)
4421 * Context: interrupt time.
4423 * Return: 1 meaning mf should be freed from _base_interrupt
4424 * 0 means the mf is freed from this function.
4427 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4428 u8 msix_index, u32 reply)
4431 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4432 Mpi2SCSITaskManagementReply_t *mpi_reply =
4433 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4435 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4437 ioc_info(ioc, "%s: host reset in progress!\n",
4441 if (unlikely(!mpi_reply)) {
4442 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4443 __FILE__, __LINE__, __func__);
4447 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4448 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4449 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4451 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4452 handle, le16_to_cpu(mpi_reply->DevHandle),
4458 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4459 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4460 le32_to_cpu(mpi_reply->IOCLogInfo),
4461 le32_to_cpu(mpi_reply->TerminationCount)));
4463 return _scsih_check_for_pending_tm(ioc, smid);
4467 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4468 * @ioc: per adapter object
4469 * @smid: system request message index
4471 * @event_context: used to track events uniquely
4473 * Context - processed in interrupt context.
4476 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4479 Mpi2EventAckRequest_t *ack_request;
4480 int i = smid - ioc->internal_smid;
4481 unsigned long flags;
4483 /* Without releasing the smid just update the
4484 * call back index and reuse the same smid for
4485 * processing this delayed request
4487 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4488 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4489 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4492 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4493 le16_to_cpu(event), smid, ioc->base_cb_idx));
4494 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4495 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4496 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4497 ack_request->Event = event;
4498 ack_request->EventContext = event_context;
4499 ack_request->VF_ID = 0; /* TODO */
4500 ack_request->VP_ID = 0;
4501 ioc->put_smid_default(ioc, smid);
4505 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4506 * sas_io_unit_ctrl messages
4507 * @ioc: per adapter object
4508 * @smid: system request message index
4509 * @handle: device handle
4511 * Context - processed in interrupt context.
4514 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4515 u16 smid, u16 handle)
4517 Mpi2SasIoUnitControlRequest_t *mpi_request;
4519 int i = smid - ioc->internal_smid;
4520 unsigned long flags;
4522 if (ioc->remove_host) {
4524 ioc_info(ioc, "%s: host has been removed\n",
4527 } else if (ioc->pci_error_recovery) {
4529 ioc_info(ioc, "%s: host in pci error recovery\n",
4533 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4534 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4536 ioc_info(ioc, "%s: host is not operational\n",
4541 /* Without releasing the smid just update the
4542 * call back index and reuse the same smid for
4543 * processing this delayed request
4545 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4546 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4547 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4550 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4551 handle, smid, ioc->tm_sas_control_cb_idx));
4552 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4553 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4554 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4555 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4556 mpi_request->DevHandle = cpu_to_le16(handle);
4557 ioc->put_smid_default(ioc, smid);
4561 * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4562 * @ioc: per adapter object
4563 * @smid: system request message index
4565 * Context: Executed in interrupt context
4567 * This will check delayed internal messages list, and process the
4570 * Return: 1 meaning mf should be freed from _base_interrupt
4571 * 0 means the mf is freed from this function.
4574 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4576 struct _sc_list *delayed_sc;
4577 struct _event_ack_list *delayed_event_ack;
4579 if (!list_empty(&ioc->delayed_event_ack_list)) {
4580 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4581 struct _event_ack_list, list);
4582 _scsih_issue_delayed_event_ack(ioc, smid,
4583 delayed_event_ack->Event, delayed_event_ack->EventContext);
4584 list_del(&delayed_event_ack->list);
4585 kfree(delayed_event_ack);
4589 if (!list_empty(&ioc->delayed_sc_list)) {
4590 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4591 struct _sc_list, list);
4592 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4593 delayed_sc->handle);
4594 list_del(&delayed_sc->list);
4602 * _scsih_check_for_pending_tm - check for pending task management
4603 * @ioc: per adapter object
4604 * @smid: system request message index
4606 * This will check delayed target reset list, and feed the
4609 * Return: 1 meaning mf should be freed from _base_interrupt
4610 * 0 means the mf is freed from this function.
4613 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4615 struct _tr_list *delayed_tr;
4617 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4618 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4619 struct _tr_list, list);
4620 mpt3sas_base_free_smid(ioc, smid);
4621 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4622 list_del(&delayed_tr->list);
4627 if (!list_empty(&ioc->delayed_tr_list)) {
4628 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4629 struct _tr_list, list);
4630 mpt3sas_base_free_smid(ioc, smid);
4631 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4632 list_del(&delayed_tr->list);
4641 * _scsih_check_topo_delete_events - sanity check on topo events
4642 * @ioc: per adapter object
4643 * @event_data: the event data payload
4645 * This routine added to better handle cable breaker.
4647 * This handles the case where driver receives multiple expander
4648 * add and delete events in a single shot. When there is a delete event
4649 * the routine will void any pending add events waiting in the event queue.
4652 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4653 Mpi2EventDataSasTopologyChangeList_t *event_data)
4655 struct fw_event_work *fw_event;
4656 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4657 u16 expander_handle;
4658 struct _sas_node *sas_expander;
4659 unsigned long flags;
4663 for (i = 0 ; i < event_data->NumEntries; i++) {
4664 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4667 reason_code = event_data->PHY[i].PhyStatus &
4668 MPI2_EVENT_SAS_TOPO_RC_MASK;
4669 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4670 _scsih_tm_tr_send(ioc, handle);
4673 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4674 if (expander_handle < ioc->sas_hba.num_phys) {
4675 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4678 if (event_data->ExpStatus ==
4679 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4680 /* put expander attached devices into blocking state */
4681 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4682 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4684 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4685 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4687 handle = find_first_bit(ioc->blocking_handles,
4688 ioc->facts.MaxDevHandle);
4689 if (handle < ioc->facts.MaxDevHandle)
4690 _scsih_block_io_device(ioc, handle);
4691 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4692 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4693 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4695 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4698 /* mark ignore flag for pending events */
4699 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4700 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4701 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4704 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4705 fw_event->event_data;
4706 if (local_event_data->ExpStatus ==
4707 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4708 local_event_data->ExpStatus ==
4709 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4710 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4713 ioc_info(ioc, "setting ignoring flag\n"));
4714 fw_event->ignore = 1;
4718 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4722 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4724 * @ioc: per adapter object
4725 * @event_data: the event data payload
4727 * This handles the case where driver receives multiple switch
4728 * or device add and delete events in a single shot. When there
4729 * is a delete event the routine will void any pending add
4730 * events waiting in the event queue.
4733 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4734 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4736 struct fw_event_work *fw_event;
4737 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4738 unsigned long flags;
4740 u16 handle, switch_handle;
4742 for (i = 0; i < event_data->NumEntries; i++) {
4744 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4747 reason_code = event_data->PortEntry[i].PortStatus;
4748 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4749 _scsih_tm_tr_send(ioc, handle);
4752 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4753 if (!switch_handle) {
4754 _scsih_block_io_to_pcie_children_attached_directly(
4758 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4759 if ((event_data->SwitchStatus
4760 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4761 (event_data->SwitchStatus ==
4762 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4763 _scsih_block_io_to_pcie_children_attached_directly(
4766 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4769 /* mark ignore flag for pending events */
4770 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4771 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4772 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4776 (Mpi26EventDataPCIeTopologyChangeList_t *)
4777 fw_event->event_data;
4778 if (local_event_data->SwitchStatus ==
4779 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4780 local_event_data->SwitchStatus ==
4781 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4782 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4785 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4786 fw_event->ignore = 1;
4790 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4794 * _scsih_set_volume_delete_flag - setting volume delete flag
4795 * @ioc: per adapter object
4796 * @handle: device handle
4798 * This returns nothing.
4801 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4803 struct _raid_device *raid_device;
4804 struct MPT3SAS_TARGET *sas_target_priv_data;
4805 unsigned long flags;
4807 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4808 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4809 if (raid_device && raid_device->starget &&
4810 raid_device->starget->hostdata) {
4811 sas_target_priv_data =
4812 raid_device->starget->hostdata;
4813 sas_target_priv_data->deleted = 1;
4815 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4816 handle, (u64)raid_device->wwid));
4818 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4822 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4823 * @handle: input handle
4824 * @a: handle for volume a
4825 * @b: handle for volume b
4827 * IR firmware only supports two raid volumes. The purpose of this
4828 * routine is to set the volume handle in either a or b. When the given
4829 * input handle is non-zero, or when a and b have not been set before.
4832 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4834 if (!handle || handle == *a || handle == *b)
4843 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4844 * @ioc: per adapter object
4845 * @event_data: the event data payload
4846 * Context: interrupt time.
4848 * This routine will send target reset to volume, followed by target
4849 * resets to the PDs. This is called when a PD has been removed, or
4850 * volume has been deleted or removed. When the target reset is sent
4851 * to volume, the PD target resets need to be queued to start upon
4852 * completion of the volume target reset.
4855 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4856 Mpi2EventDataIrConfigChangeList_t *event_data)
4858 Mpi2EventIrConfigElement_t *element;
4860 u16 handle, volume_handle, a, b;
4861 struct _tr_list *delayed_tr;
4866 if (ioc->is_warpdrive)
4869 /* Volume Resets for Deleted or Removed */
4870 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4871 for (i = 0; i < event_data->NumElements; i++, element++) {
4872 if (le32_to_cpu(event_data->Flags) &
4873 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4875 if (element->ReasonCode ==
4876 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4877 element->ReasonCode ==
4878 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4879 volume_handle = le16_to_cpu(element->VolDevHandle);
4880 _scsih_set_volume_delete_flag(ioc, volume_handle);
4881 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4885 /* Volume Resets for UNHIDE events */
4886 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4887 for (i = 0; i < event_data->NumElements; i++, element++) {
4888 if (le32_to_cpu(event_data->Flags) &
4889 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4891 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4892 volume_handle = le16_to_cpu(element->VolDevHandle);
4893 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4898 _scsih_tm_tr_volume_send(ioc, a);
4900 _scsih_tm_tr_volume_send(ioc, b);
4902 /* PD target resets */
4903 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4904 for (i = 0; i < event_data->NumElements; i++, element++) {
4905 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4907 handle = le16_to_cpu(element->PhysDiskDevHandle);
4908 volume_handle = le16_to_cpu(element->VolDevHandle);
4909 clear_bit(handle, ioc->pd_handles);
4911 _scsih_tm_tr_send(ioc, handle);
4912 else if (volume_handle == a || volume_handle == b) {
4913 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4914 BUG_ON(!delayed_tr);
4915 INIT_LIST_HEAD(&delayed_tr->list);
4916 delayed_tr->handle = handle;
4917 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4919 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4922 _scsih_tm_tr_send(ioc, handle);
4928 * _scsih_check_volume_delete_events - set delete flag for volumes
4929 * @ioc: per adapter object
4930 * @event_data: the event data payload
4931 * Context: interrupt time.
4933 * This will handle the case when the cable connected to entire volume is
4934 * pulled. We will take care of setting the deleted flag so normal IO will
4938 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4939 Mpi2EventDataIrVolume_t *event_data)
4943 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4945 state = le32_to_cpu(event_data->NewValue);
4946 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4947 MPI2_RAID_VOL_STATE_FAILED)
4948 _scsih_set_volume_delete_flag(ioc,
4949 le16_to_cpu(event_data->VolDevHandle));
4953 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4954 * @ioc: per adapter object
4955 * @event_data: the temp threshold event data
4956 * Context: interrupt time.
4959 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4960 Mpi2EventDataTemperature_t *event_data)
4963 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4964 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4965 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4966 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4967 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4968 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4969 event_data->SensorNum);
4970 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4971 event_data->CurrentTemperature);
4972 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4973 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4974 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4975 MPI2_IOC_STATE_FAULT) {
4976 mpt3sas_print_fault_code(ioc,
4977 doorbell & MPI2_DOORBELL_DATA_MASK);
4978 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4979 MPI2_IOC_STATE_COREDUMP) {
4980 mpt3sas_print_coredump_info(ioc,
4981 doorbell & MPI2_DOORBELL_DATA_MASK);
4987 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4989 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4991 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4995 return test_and_set_bit(0, &priv->ata_command_pending);
4997 clear_bit(0, &priv->ata_command_pending);
5002 * _scsih_flush_running_cmds - completing outstanding commands.
5003 * @ioc: per adapter object
5005 * The flushing out of all pending scmd commands following host reset,
5006 * where all IO is dropped to the floor.
5009 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5011 struct scsi_cmnd *scmd;
5012 struct scsiio_tracker *st;
5016 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5017 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5021 _scsih_set_satl_pending(scmd, false);
5022 st = scsi_cmd_priv(scmd);
5023 mpt3sas_base_clear_st(ioc, st);
5024 scsi_dma_unmap(scmd);
5025 if (ioc->pci_error_recovery || ioc->remove_host)
5026 scmd->result = DID_NO_CONNECT << 16;
5028 scmd->result = DID_RESET << 16;
5031 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
5035 * _scsih_setup_eedp - setup MPI request for EEDP transfer
5036 * @ioc: per adapter object
5037 * @scmd: pointer to scsi command object
5038 * @mpi_request: pointer to the SCSI_IO request message frame
5040 * Supporting protection 1 and 3.
5043 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5044 Mpi25SCSIIORequest_t *mpi_request)
5047 Mpi25SCSIIORequest_t *mpi_request_3v =
5048 (Mpi25SCSIIORequest_t *)mpi_request;
5050 switch (scsi_get_prot_op(scmd)) {
5051 case SCSI_PROT_READ_STRIP:
5052 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5054 case SCSI_PROT_WRITE_INSERT:
5055 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5061 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
5062 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5064 if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
5065 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
5067 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
5068 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
5070 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5071 cpu_to_be32(scsi_prot_ref_tag(scmd));
5074 mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
5076 if (ioc->is_gen35_ioc)
5077 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5078 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5082 * _scsih_eedp_error_handling - return sense code for EEDP errors
5083 * @scmd: pointer to scsi command object
5084 * @ioc_status: ioc status
5087 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5091 switch (ioc_status) {
5092 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5095 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5098 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5105 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
5106 set_host_byte(scmd, DID_ABORT);
5110 * scsih_qcmd - main scsi request entry point
5111 * @shost: SCSI host pointer
5112 * @scmd: pointer to scsi command object
5114 * The callback index is set inside `ioc->scsi_io_cb_idx`.
5116 * Return: 0 on success. If there's a failure, return either:
5117 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5118 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5121 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5123 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5124 struct MPT3SAS_DEVICE *sas_device_priv_data;
5125 struct MPT3SAS_TARGET *sas_target_priv_data;
5126 struct _raid_device *raid_device;
5127 struct request *rq = scsi_cmd_to_rq(scmd);
5129 Mpi25SCSIIORequest_t *mpi_request;
5130 struct _pcie_device *pcie_device = NULL;
5135 if (ioc->logging_level & MPT_DEBUG_SCSI)
5136 scsi_print_command(scmd);
5138 sas_device_priv_data = scmd->device->hostdata;
5139 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5140 scmd->result = DID_NO_CONNECT << 16;
5145 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5146 scmd->result = DID_NO_CONNECT << 16;
5151 sas_target_priv_data = sas_device_priv_data->sas_target;
5153 /* invalid device handle */
5154 handle = sas_target_priv_data->handle;
5157 * Avoid error handling escallation when device is disconnected
5159 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
5160 if (scmd->device->host->shost_state == SHOST_RECOVERY &&
5161 scmd->cmnd[0] == TEST_UNIT_READY) {
5162 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
5168 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5169 scmd->result = DID_NO_CONNECT << 16;
5175 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5176 /* host recovery or link resets sent via IOCTLs */
5177 return SCSI_MLQUEUE_HOST_BUSY;
5178 } else if (sas_target_priv_data->deleted) {
5179 /* device has been deleted */
5180 scmd->result = DID_NO_CONNECT << 16;
5183 } else if (sas_target_priv_data->tm_busy ||
5184 sas_device_priv_data->block) {
5185 /* device busy with task management */
5186 return SCSI_MLQUEUE_DEVICE_BUSY;
5190 * Bug work around for firmware SATL handling. The loop
5191 * is based on atomic operations and ensures consistency
5192 * since we're lockless at this point
5195 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5196 return SCSI_MLQUEUE_DEVICE_BUSY;
5197 } while (_scsih_set_satl_pending(scmd, true));
5199 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5200 mpi_control = MPI2_SCSIIO_CONTROL_READ;
5201 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5202 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5204 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5207 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5208 /* NCQ Prio supported, make sure control indicated high priority */
5209 if (sas_device_priv_data->ncq_prio_enable) {
5210 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5211 if (class == IOPRIO_CLASS_RT)
5212 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5214 /* Make sure Device is not raid volume.
5215 * We do not expose raid functionality to upper layer for warpdrive.
5217 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5218 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5219 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5220 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5222 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5224 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5225 _scsih_set_satl_pending(scmd, false);
5228 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5229 memset(mpi_request, 0, ioc->request_sz);
5230 _scsih_setup_eedp(ioc, scmd, mpi_request);
5232 if (scmd->cmd_len == 32)
5233 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5234 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5235 if (sas_device_priv_data->sas_target->flags &
5236 MPT_TARGET_FLAGS_RAID_COMPONENT)
5237 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5239 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5240 mpi_request->DevHandle = cpu_to_le16(handle);
5241 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5242 mpi_request->Control = cpu_to_le32(mpi_control);
5243 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5244 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5245 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5246 mpi_request->SenseBufferLowAddress =
5247 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5248 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5249 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5251 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5253 if (mpi_request->DataLength) {
5254 pcie_device = sas_target_priv_data->pcie_dev;
5255 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5256 mpt3sas_base_free_smid(ioc, smid);
5257 _scsih_set_satl_pending(scmd, false);
5261 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5263 raid_device = sas_target_priv_data->raid_device;
5264 if (raid_device && raid_device->direct_io_enabled)
5265 mpt3sas_setup_direct_io(ioc, scmd,
5266 raid_device, mpi_request);
5268 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5269 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5270 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5271 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5272 ioc->put_smid_fast_path(ioc, smid, handle);
5274 ioc->put_smid_scsi_io(ioc, smid,
5275 le16_to_cpu(mpi_request->DevHandle));
5277 ioc->put_smid_default(ioc, smid);
5281 return SCSI_MLQUEUE_HOST_BUSY;
5285 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5286 * @sense_buffer: sense data returned by target
5287 * @data: normalized skey/asc/ascq
5290 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5292 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5293 /* descriptor format */
5294 data->skey = sense_buffer[1] & 0x0F;
5295 data->asc = sense_buffer[2];
5296 data->ascq = sense_buffer[3];
5299 data->skey = sense_buffer[2] & 0x0F;
5300 data->asc = sense_buffer[12];
5301 data->ascq = sense_buffer[13];
5306 * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
5307 * @ioc: per adapter object
5308 * @scmd: pointer to scsi command object
5309 * @mpi_reply: reply mf payload returned from firmware
5312 * scsi_status - SCSI Status code returned from target device
5313 * scsi_state - state info associated with SCSI_IO determined by ioc
5314 * ioc_status - ioc supplied status info
5317 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5318 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5322 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5323 MPI2_IOCSTATUS_MASK;
5324 u8 scsi_state = mpi_reply->SCSIState;
5325 u8 scsi_status = mpi_reply->SCSIStatus;
5326 char *desc_ioc_state = NULL;
5327 char *desc_scsi_status = NULL;
5328 char *desc_scsi_state = ioc->tmp_string;
5329 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5330 struct _sas_device *sas_device = NULL;
5331 struct _pcie_device *pcie_device = NULL;
5332 struct scsi_target *starget = scmd->device->sdev_target;
5333 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5334 char *device_str = NULL;
5338 if (ioc->hide_ir_msg)
5339 device_str = "WarpDrive";
5341 device_str = "volume";
5343 if (log_info == 0x31170000)
5346 switch (ioc_status) {
5347 case MPI2_IOCSTATUS_SUCCESS:
5348 desc_ioc_state = "success";
5350 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5351 desc_ioc_state = "invalid function";
5353 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5354 desc_ioc_state = "scsi recovered error";
5356 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5357 desc_ioc_state = "scsi invalid dev handle";
5359 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5360 desc_ioc_state = "scsi device not there";
5362 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5363 desc_ioc_state = "scsi data overrun";
5365 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5366 desc_ioc_state = "scsi data underrun";
5368 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5369 desc_ioc_state = "scsi io data error";
5371 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5372 desc_ioc_state = "scsi protocol error";
5374 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5375 desc_ioc_state = "scsi task terminated";
5377 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5378 desc_ioc_state = "scsi residual mismatch";
5380 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5381 desc_ioc_state = "scsi task mgmt failed";
5383 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5384 desc_ioc_state = "scsi ioc terminated";
5386 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5387 desc_ioc_state = "scsi ext terminated";
5389 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5390 desc_ioc_state = "eedp guard error";
5392 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5393 desc_ioc_state = "eedp ref tag error";
5395 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5396 desc_ioc_state = "eedp app tag error";
5398 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5399 desc_ioc_state = "insufficient power";
5402 desc_ioc_state = "unknown";
5406 switch (scsi_status) {
5407 case MPI2_SCSI_STATUS_GOOD:
5408 desc_scsi_status = "good";
5410 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5411 desc_scsi_status = "check condition";
5413 case MPI2_SCSI_STATUS_CONDITION_MET:
5414 desc_scsi_status = "condition met";
5416 case MPI2_SCSI_STATUS_BUSY:
5417 desc_scsi_status = "busy";
5419 case MPI2_SCSI_STATUS_INTERMEDIATE:
5420 desc_scsi_status = "intermediate";
5422 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5423 desc_scsi_status = "intermediate condmet";
5425 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5426 desc_scsi_status = "reservation conflict";
5428 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5429 desc_scsi_status = "command terminated";
5431 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5432 desc_scsi_status = "task set full";
5434 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5435 desc_scsi_status = "aca active";
5437 case MPI2_SCSI_STATUS_TASK_ABORTED:
5438 desc_scsi_status = "task aborted";
5441 desc_scsi_status = "unknown";
5445 desc_scsi_state[0] = '\0';
5447 desc_scsi_state = " ";
5448 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5449 strcat(desc_scsi_state, "response info ");
5450 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5451 strcat(desc_scsi_state, "state terminated ");
5452 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5453 strcat(desc_scsi_state, "no status ");
5454 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5455 strcat(desc_scsi_state, "autosense failed ");
5456 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5457 strcat(desc_scsi_state, "autosense valid ");
5459 scsi_print_command(scmd);
5461 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5462 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5463 device_str, (u64)priv_target->sas_address);
5464 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5465 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5467 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5468 (u64)pcie_device->wwid, pcie_device->port_num);
5469 if (pcie_device->enclosure_handle != 0)
5470 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5471 (u64)pcie_device->enclosure_logical_id,
5473 if (pcie_device->connector_name[0])
5474 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5475 pcie_device->enclosure_level,
5476 pcie_device->connector_name);
5477 pcie_device_put(pcie_device);
5480 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5482 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5483 (u64)sas_device->sas_address, sas_device->phy);
5485 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5488 sas_device_put(sas_device);
5492 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5493 le16_to_cpu(mpi_reply->DevHandle),
5494 desc_ioc_state, ioc_status, smid);
5495 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5496 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5497 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5498 le16_to_cpu(mpi_reply->TaskTag),
5499 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5500 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5501 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5503 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5504 struct sense_info data;
5505 _scsih_normalize_sense(scmd->sense_buffer, &data);
5506 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5507 data.skey, data.asc, data.ascq,
5508 le32_to_cpu(mpi_reply->SenseCount));
5510 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5511 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5512 response_bytes = (u8 *)&response_info;
5513 _scsih_response_code(ioc, response_bytes[0]);
5518 * _scsih_turn_on_pfa_led - illuminate PFA LED
5519 * @ioc: per adapter object
5520 * @handle: device handle
5524 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5526 Mpi2SepReply_t mpi_reply;
5527 Mpi2SepRequest_t mpi_request;
5528 struct _sas_device *sas_device;
5530 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5534 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5535 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5536 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5537 mpi_request.SlotStatus =
5538 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5539 mpi_request.DevHandle = cpu_to_le16(handle);
5540 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5541 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5542 &mpi_request)) != 0) {
5543 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5544 __FILE__, __LINE__, __func__);
5547 sas_device->pfa_led_on = 1;
5549 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5551 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5552 le16_to_cpu(mpi_reply.IOCStatus),
5553 le32_to_cpu(mpi_reply.IOCLogInfo)));
5557 sas_device_put(sas_device);
5561 * _scsih_turn_off_pfa_led - turn off Fault LED
5562 * @ioc: per adapter object
5563 * @sas_device: sas device whose PFA LED has to turned off
5567 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5568 struct _sas_device *sas_device)
5570 Mpi2SepReply_t mpi_reply;
5571 Mpi2SepRequest_t mpi_request;
5573 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5574 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5575 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5576 mpi_request.SlotStatus = 0;
5577 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5578 mpi_request.DevHandle = 0;
5579 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5580 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5581 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5582 &mpi_request)) != 0) {
5583 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5584 __FILE__, __LINE__, __func__);
5588 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5590 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5591 le16_to_cpu(mpi_reply.IOCStatus),
5592 le32_to_cpu(mpi_reply.IOCLogInfo)));
5598 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5599 * @ioc: per adapter object
5600 * @handle: device handle
5601 * Context: interrupt.
5604 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5606 struct fw_event_work *fw_event;
5608 fw_event = alloc_fw_event_work(0);
5611 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5612 fw_event->device_handle = handle;
5613 fw_event->ioc = ioc;
5614 _scsih_fw_event_add(ioc, fw_event);
5615 fw_event_work_put(fw_event);
5619 * _scsih_smart_predicted_fault - process smart errors
5620 * @ioc: per adapter object
5621 * @handle: device handle
5622 * Context: interrupt.
5625 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5627 struct scsi_target *starget;
5628 struct MPT3SAS_TARGET *sas_target_priv_data;
5629 Mpi2EventNotificationReply_t *event_reply;
5630 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5631 struct _sas_device *sas_device;
5633 unsigned long flags;
5635 /* only handle non-raid devices */
5636 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5637 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5641 starget = sas_device->starget;
5642 sas_target_priv_data = starget->hostdata;
5644 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5645 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5648 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5650 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5652 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5653 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5655 /* insert into event log */
5656 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5657 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5658 event_reply = kzalloc(sz, GFP_ATOMIC);
5660 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5661 __FILE__, __LINE__, __func__);
5665 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5666 event_reply->Event =
5667 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5668 event_reply->MsgLength = sz/4;
5669 event_reply->EventDataLength =
5670 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5671 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5672 event_reply->EventData;
5673 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5674 event_data->ASC = 0x5D;
5675 event_data->DevHandle = cpu_to_le16(handle);
5676 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5677 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5681 sas_device_put(sas_device);
5685 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5690 * _scsih_io_done - scsi request callback
5691 * @ioc: per adapter object
5692 * @smid: system request message index
5693 * @msix_index: MSIX table index supplied by the OS
5694 * @reply: reply message frame(lower 32bit addr)
5696 * Callback handler when using _scsih_qcmd.
5698 * Return: 1 meaning mf should be freed from _base_interrupt
5699 * 0 means the mf is freed from this function.
5702 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5704 Mpi25SCSIIORequest_t *mpi_request;
5705 Mpi2SCSIIOReply_t *mpi_reply;
5706 struct scsi_cmnd *scmd;
5707 struct scsiio_tracker *st;
5713 struct MPT3SAS_DEVICE *sas_device_priv_data;
5714 u32 response_code = 0;
5716 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5718 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5722 _scsih_set_satl_pending(scmd, false);
5724 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5726 if (mpi_reply == NULL) {
5727 scmd->result = DID_OK << 16;
5731 sas_device_priv_data = scmd->device->hostdata;
5732 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5733 sas_device_priv_data->sas_target->deleted) {
5734 scmd->result = DID_NO_CONNECT << 16;
5737 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5740 * WARPDRIVE: If direct_io is set then it is directIO,
5741 * the failed direct I/O should be redirected to volume
5743 st = scsi_cmd_priv(scmd);
5744 if (st->direct_io &&
5745 ((ioc_status & MPI2_IOCSTATUS_MASK)
5746 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5749 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5750 mpi_request->DevHandle =
5751 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5752 ioc->put_smid_scsi_io(ioc, smid,
5753 sas_device_priv_data->sas_target->handle);
5756 /* turning off TLR */
5757 scsi_state = mpi_reply->SCSIState;
5758 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5760 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5761 if (!sas_device_priv_data->tlr_snoop_check) {
5762 sas_device_priv_data->tlr_snoop_check++;
5763 if ((!ioc->is_warpdrive &&
5764 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5765 !scsih_is_nvme(&scmd->device->sdev_gendev))
5766 && sas_is_tlr_enabled(scmd->device) &&
5767 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5768 sas_disable_tlr(scmd->device);
5769 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5773 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5774 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5775 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5776 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5779 ioc_status &= MPI2_IOCSTATUS_MASK;
5780 scsi_status = mpi_reply->SCSIStatus;
5782 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5783 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5784 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5785 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5786 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5789 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5790 struct sense_info data;
5791 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5793 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5794 le32_to_cpu(mpi_reply->SenseCount));
5795 memcpy(scmd->sense_buffer, sense_data, sz);
5796 _scsih_normalize_sense(scmd->sense_buffer, &data);
5797 /* failure prediction threshold exceeded */
5798 if (data.asc == 0x5D)
5799 _scsih_smart_predicted_fault(ioc,
5800 le16_to_cpu(mpi_reply->DevHandle));
5801 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5803 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5804 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5805 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5806 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5807 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5809 switch (ioc_status) {
5810 case MPI2_IOCSTATUS_BUSY:
5811 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5812 scmd->result = SAM_STAT_BUSY;
5815 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5816 scmd->result = DID_NO_CONNECT << 16;
5819 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5820 if (sas_device_priv_data->block) {
5821 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5824 if (log_info == 0x31110630) {
5825 if (scmd->retries > 2) {
5826 scmd->result = DID_NO_CONNECT << 16;
5827 scsi_device_set_state(scmd->device,
5830 scmd->result = DID_SOFT_ERROR << 16;
5831 scmd->device->expecting_cc_ua = 1;
5834 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5835 scmd->result = DID_RESET << 16;
5837 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5838 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5839 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5840 scmd->result = DID_RESET << 16;
5843 scmd->result = DID_SOFT_ERROR << 16;
5845 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5846 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5847 scmd->result = DID_RESET << 16;
5850 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5851 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5852 scmd->result = DID_SOFT_ERROR << 16;
5854 scmd->result = (DID_OK << 16) | scsi_status;
5857 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5858 scmd->result = (DID_OK << 16) | scsi_status;
5860 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5863 if (xfer_cnt < scmd->underflow) {
5864 if (scsi_status == SAM_STAT_BUSY)
5865 scmd->result = SAM_STAT_BUSY;
5867 scmd->result = DID_SOFT_ERROR << 16;
5868 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5869 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5870 scmd->result = DID_SOFT_ERROR << 16;
5871 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5872 scmd->result = DID_RESET << 16;
5873 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5874 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5875 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5876 scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
5881 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5882 scsi_set_resid(scmd, 0);
5884 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5885 case MPI2_IOCSTATUS_SUCCESS:
5886 scmd->result = (DID_OK << 16) | scsi_status;
5887 if (response_code ==
5888 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5889 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5890 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5891 scmd->result = DID_SOFT_ERROR << 16;
5892 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5893 scmd->result = DID_RESET << 16;
5896 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5897 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5898 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5899 _scsih_eedp_error_handling(scmd, ioc_status);
5902 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5903 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5904 case MPI2_IOCSTATUS_INVALID_SGL:
5905 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5906 case MPI2_IOCSTATUS_INVALID_FIELD:
5907 case MPI2_IOCSTATUS_INVALID_STATE:
5908 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5909 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5910 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5912 scmd->result = DID_SOFT_ERROR << 16;
5917 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5918 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5922 scsi_dma_unmap(scmd);
5923 mpt3sas_base_free_smid(ioc, smid);
5929 * _scsih_update_vphys_after_reset - update the Port's
5930 * vphys_list after reset
5931 * @ioc: per adapter object
5936 _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5940 Mpi2ConfigReply_t mpi_reply;
5941 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5942 u16 attached_handle;
5943 u64 attached_sas_addr;
5944 u8 found = 0, port_id;
5945 Mpi2SasPhyPage0_t phy_pg0;
5946 struct hba_port *port, *port_next, *mport;
5947 struct virtual_phy *vphy, *vphy_next;
5948 struct _sas_device *sas_device;
5951 * Mark all the vphys objects as dirty.
5953 list_for_each_entry_safe(port, port_next,
5954 &ioc->port_table_list, list) {
5955 if (!port->vphys_mask)
5957 list_for_each_entry_safe(vphy, vphy_next,
5958 &port->vphys_list, list) {
5959 vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5964 * Read SASIOUnitPage0 to get each HBA Phy's data.
5966 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
5967 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5968 if (!sas_iounit_pg0) {
5969 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5970 __FILE__, __LINE__, __func__);
5973 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5974 sas_iounit_pg0, sz)) != 0)
5976 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5977 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5980 * Loop over each HBA Phy.
5982 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5984 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5986 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5987 MPI2_SAS_NEG_LINK_RATE_1_5)
5990 * Check whether Phy is connected to SEP device or not,
5991 * if it is SEP device then read the Phy's SASPHYPage0 data to
5992 * determine whether Phy is a virtual Phy or not. if it is
5993 * virtual phy then it is conformed that the attached remote
5994 * device is a HBA's vSES device.
5997 sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
5998 MPI2_SAS_DEVICE_INFO_SEP))
6001 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6003 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6004 __FILE__, __LINE__, __func__);
6008 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6009 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6012 * Get the vSES device's SAS Address.
6014 attached_handle = le16_to_cpu(
6015 sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6016 if (_scsih_get_sas_address(ioc, attached_handle,
6017 &attached_sas_addr) != 0) {
6018 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6019 __FILE__, __LINE__, __func__);
6024 port = port_next = NULL;
6026 * Loop over each virtual_phy object from
6027 * each port's vphys_list.
6029 list_for_each_entry_safe(port,
6030 port_next, &ioc->port_table_list, list) {
6031 if (!port->vphys_mask)
6033 list_for_each_entry_safe(vphy, vphy_next,
6034 &port->vphys_list, list) {
6036 * Continue with next virtual_phy object
6037 * if the object is not marked as dirty.
6039 if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6043 * Continue with next virtual_phy object
6044 * if the object's SAS Address is not equals
6045 * to current Phy's vSES device SAS Address.
6047 if (vphy->sas_address != attached_sas_addr)
6050 * Enable current Phy number bit in object's
6053 if (!(vphy->phy_mask & (1 << i)))
6054 vphy->phy_mask = (1 << i);
6056 * Get hba_port object from hba_port table
6057 * corresponding to current phy's Port ID.
6058 * if there is no hba_port object corresponding
6059 * to Phy's Port ID then create a new hba_port
6060 * object & add to hba_port table.
6062 port_id = sas_iounit_pg0->PhyData[i].Port;
6063 mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6066 sizeof(struct hba_port), GFP_KERNEL);
6069 mport->port_id = port_id;
6071 "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6072 __func__, mport, mport->port_id);
6073 list_add_tail(&mport->list,
6074 &ioc->port_table_list);
6077 * If mport & port pointers are not pointing to
6078 * same hba_port object then it means that vSES
6079 * device's Port ID got changed after reset and
6080 * hence move current virtual_phy object from
6081 * port's vphys_list to mport's vphys_list.
6083 if (port != mport) {
6084 if (!mport->vphys_mask)
6086 &mport->vphys_list);
6087 mport->vphys_mask |= (1 << i);
6088 port->vphys_mask &= ~(1 << i);
6089 list_move(&vphy->list,
6090 &mport->vphys_list);
6091 sas_device = mpt3sas_get_sdev_by_addr(
6092 ioc, attached_sas_addr, port);
6094 sas_device->port = mport;
6097 * Earlier while updating the hba_port table,
6098 * it is determined that there is no other
6099 * direct attached device with mport's Port ID,
6100 * Hence mport was marked as dirty. Only vSES
6101 * device has this Port ID, so unmark the mport
6104 if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6105 mport->sas_address = 0;
6106 mport->phy_mask = 0;
6108 ~HBA_PORT_FLAG_DIRTY_PORT;
6111 * Unmark current virtual_phy object as dirty.
6113 vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6122 kfree(sas_iounit_pg0);
6126 * _scsih_get_port_table_after_reset - Construct temporary port table
6127 * @ioc: per adapter object
6128 * @port_table: address where port table needs to be constructed
6130 * return number of HBA port entries available after reset.
6133 _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6134 struct hba_port *port_table)
6138 Mpi2ConfigReply_t mpi_reply;
6139 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6140 u16 attached_handle;
6141 u64 attached_sas_addr;
6142 u8 found = 0, port_count = 0, port_id;
6144 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6145 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6146 if (!sas_iounit_pg0) {
6147 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6148 __FILE__, __LINE__, __func__);
6152 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6153 sas_iounit_pg0, sz)) != 0)
6155 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6156 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6158 for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6160 if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6161 MPI2_SAS_NEG_LINK_RATE_1_5)
6164 le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6165 if (_scsih_get_sas_address(
6166 ioc, attached_handle, &attached_sas_addr) != 0) {
6167 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6168 __FILE__, __LINE__, __func__);
6172 for (j = 0; j < port_count; j++) {
6173 port_id = sas_iounit_pg0->PhyData[i].Port;
6174 if (port_table[j].port_id == port_id &&
6175 port_table[j].sas_address == attached_sas_addr) {
6176 port_table[j].phy_mask |= (1 << i);
6185 port_id = sas_iounit_pg0->PhyData[i].Port;
6186 port_table[port_count].port_id = port_id;
6187 port_table[port_count].phy_mask = (1 << i);
6188 port_table[port_count].sas_address = attached_sas_addr;
6192 kfree(sas_iounit_pg0);
6196 enum hba_port_matched_codes {
6198 MATCHED_WITH_ADDR_AND_PHYMASK,
6199 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6200 MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6205 * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6206 * from HBA port table
6207 * @ioc: per adapter object
6208 * @port_entry: hba port entry from temporary port table which needs to be
6209 * searched for matched entry in the HBA port table
6210 * @matched_port_entry: save matched hba port entry here
6211 * @count: count of matched entries
6213 * return type of matched entry found.
6215 static enum hba_port_matched_codes
6216 _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6217 struct hba_port *port_entry,
6218 struct hba_port **matched_port_entry, int *count)
6220 struct hba_port *port_table_entry, *matched_port = NULL;
6221 enum hba_port_matched_codes matched_code = NOT_MATCHED;
6223 *matched_port_entry = NULL;
6225 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6226 if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6229 if ((port_table_entry->sas_address == port_entry->sas_address)
6230 && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6231 matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6232 matched_port = port_table_entry;
6236 if ((port_table_entry->sas_address == port_entry->sas_address)
6237 && (port_table_entry->phy_mask & port_entry->phy_mask)
6238 && (port_table_entry->port_id == port_entry->port_id)) {
6239 matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6240 matched_port = port_table_entry;
6244 if ((port_table_entry->sas_address == port_entry->sas_address)
6245 && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6247 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6249 matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6250 matched_port = port_table_entry;
6254 if (port_table_entry->sas_address == port_entry->sas_address) {
6256 MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6258 if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6260 matched_code = MATCHED_WITH_ADDR;
6261 matched_port = port_table_entry;
6266 *matched_port_entry = matched_port;
6267 if (matched_code == MATCHED_WITH_ADDR)
6269 return matched_code;
6273 * _scsih_del_phy_part_of_anther_port - remove phy if it
6274 * is a part of anther port
6275 *@ioc: per adapter object
6276 *@port_table: port table after reset
6277 *@index: hba port entry index
6278 *@port_count: number of ports available after host reset
6279 *@offset: HBA phy bit offset
6283 _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6284 struct hba_port *port_table,
6285 int index, u8 port_count, int offset)
6287 struct _sas_node *sas_node = &ioc->sas_hba;
6290 for (i = 0; i < port_count; i++) {
6294 if (port_table[i].phy_mask & (1 << offset)) {
6295 mpt3sas_transport_del_phy_from_an_existing_port(
6296 ioc, sas_node, &sas_node->phy[offset]);
6302 port_table[index].phy_mask |= (1 << offset);
6306 * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6308 *@ioc: per adapter object
6309 *@hba_port_entry: hba port table entry
6310 *@port_table: temporary port table
6311 *@index: hba port entry index
6312 *@port_count: number of ports available after host reset
6316 _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6317 struct hba_port *hba_port_entry, struct hba_port *port_table,
6318 int index, int port_count)
6320 u32 phy_mask, offset = 0;
6321 struct _sas_node *sas_node = &ioc->sas_hba;
6323 phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6325 for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6326 if (phy_mask & (1 << offset)) {
6327 if (!(port_table[index].phy_mask & (1 << offset))) {
6328 _scsih_del_phy_part_of_anther_port(
6329 ioc, port_table, index, port_count,
6333 if (sas_node->phy[offset].phy_belongs_to_port)
6334 mpt3sas_transport_del_phy_from_an_existing_port(
6335 ioc, sas_node, &sas_node->phy[offset]);
6336 mpt3sas_transport_add_phy_to_an_existing_port(
6337 ioc, sas_node, &sas_node->phy[offset],
6338 hba_port_entry->sas_address,
6345 * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6346 * @ioc: per adapter object
6351 _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6353 struct hba_port *port, *port_next;
6354 struct virtual_phy *vphy, *vphy_next;
6356 list_for_each_entry_safe(port, port_next,
6357 &ioc->port_table_list, list) {
6358 if (!port->vphys_mask)
6360 list_for_each_entry_safe(vphy, vphy_next,
6361 &port->vphys_list, list) {
6362 if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6363 drsprintk(ioc, ioc_info(ioc,
6364 "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6365 vphy, port->port_id,
6367 port->vphys_mask &= ~vphy->phy_mask;
6368 list_del(&vphy->list);
6372 if (!port->vphys_mask && !port->sas_address)
6373 port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6378 * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6380 *@ioc: per adapter object
6384 _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6386 struct hba_port *port, *port_next;
6388 list_for_each_entry_safe(port, port_next,
6389 &ioc->port_table_list, list) {
6390 if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6391 port->flags & HBA_PORT_FLAG_NEW_PORT)
6394 drsprintk(ioc, ioc_info(ioc,
6395 "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6396 port, port->port_id, port->phy_mask));
6397 list_del(&port->list);
6403 * _scsih_sas_port_refresh - Update HBA port table after host reset
6404 * @ioc: per adapter object
6407 _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6410 struct hba_port *port_table;
6411 struct hba_port *port_table_entry;
6412 struct hba_port *port_entry = NULL;
6413 int i, j, count = 0, lcount = 0;
6418 drsprintk(ioc, ioc_info(ioc,
6419 "updating ports for sas_host(0x%016llx)\n",
6420 (unsigned long long)ioc->sas_hba.sas_address));
6422 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6424 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6425 __FILE__, __LINE__, __func__);
6429 if (num_phys > ioc->sas_hba.nr_phys_allocated) {
6430 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6431 __FILE__, __LINE__, __func__);
6434 ioc->sas_hba.num_phys = num_phys;
6436 port_table = kcalloc(ioc->sas_hba.num_phys,
6437 sizeof(struct hba_port), GFP_KERNEL);
6441 port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6445 drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6446 for (j = 0; j < port_count; j++)
6447 drsprintk(ioc, ioc_info(ioc,
6448 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6449 port_table[j].port_id,
6450 port_table[j].phy_mask, port_table[j].sas_address));
6452 list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6453 port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6455 drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6456 port_table_entry = NULL;
6457 list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6458 drsprintk(ioc, ioc_info(ioc,
6459 "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6460 port_table_entry->port_id,
6461 port_table_entry->phy_mask,
6462 port_table_entry->sas_address));
6465 for (j = 0; j < port_count; j++) {
6466 ret = _scsih_look_and_get_matched_port_entry(ioc,
6467 &port_table[j], &port_entry, &count);
6469 drsprintk(ioc, ioc_info(ioc,
6470 "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6471 port_table[j].sas_address,
6472 port_table[j].port_id));
6477 case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6478 case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6479 _scsih_add_or_del_phys_from_existing_port(ioc,
6480 port_entry, port_table, j, port_count);
6482 case MATCHED_WITH_ADDR:
6483 sas_addr = port_table[j].sas_address;
6484 for (i = 0; i < port_count; i++) {
6485 if (port_table[i].sas_address == sas_addr)
6489 if (count > 1 || lcount > 1)
6492 _scsih_add_or_del_phys_from_existing_port(ioc,
6493 port_entry, port_table, j, port_count);
6499 if (port_entry->port_id != port_table[j].port_id)
6500 port_entry->port_id = port_table[j].port_id;
6501 port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6502 port_entry->phy_mask = port_table[j].phy_mask;
6505 port_table_entry = NULL;
6509 * _scsih_alloc_vphy - allocate virtual_phy object
6510 * @ioc: per adapter object
6511 * @port_id: Port ID number
6512 * @phy_num: HBA Phy number
6514 * Returns allocated virtual_phy object.
6516 static struct virtual_phy *
6517 _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6519 struct virtual_phy *vphy;
6520 struct hba_port *port;
6522 port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6526 vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6528 vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6532 if (!port->vphys_mask)
6533 INIT_LIST_HEAD(&port->vphys_list);
6536 * Enable bit corresponding to HBA phy number on its
6537 * parent hba_port object's vphys_mask field.
6539 port->vphys_mask |= (1 << phy_num);
6540 vphy->phy_mask |= (1 << phy_num);
6542 list_add_tail(&vphy->list, &port->vphys_list);
6545 "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6546 vphy, port->port_id, phy_num);
6552 * _scsih_sas_host_refresh - refreshing sas host object contents
6553 * @ioc: per adapter object
6556 * During port enable, fw will send topology events for every device. Its
6557 * possible that the handles may change from the previous setting, so this
6558 * code keeping handles updating if changed.
6561 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6566 Mpi2ConfigReply_t mpi_reply;
6567 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6568 u16 attached_handle;
6569 u8 link_rate, port_id;
6570 struct hba_port *port;
6571 Mpi2SasPhyPage0_t phy_pg0;
6574 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6575 (u64)ioc->sas_hba.sas_address));
6577 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6578 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6579 if (!sas_iounit_pg0) {
6580 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6581 __FILE__, __LINE__, __func__);
6585 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6586 sas_iounit_pg0, sz)) != 0)
6588 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6589 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6591 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6592 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6594 ioc->sas_hba.handle = le16_to_cpu(
6595 sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6596 port_id = sas_iounit_pg0->PhyData[i].Port;
6597 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6598 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6602 port->port_id = port_id;
6604 "hba_port entry: %p, port: %d is added to hba_port list\n",
6605 port, port->port_id);
6606 if (ioc->shost_recovery)
6607 port->flags = HBA_PORT_FLAG_NEW_PORT;
6608 list_add_tail(&port->list, &ioc->port_table_list);
6611 * Check whether current Phy belongs to HBA vSES device or not.
6613 if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6614 MPI2_SAS_DEVICE_INFO_SEP &&
6615 (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
6616 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6619 "failure at %s:%d/%s()!\n",
6620 __FILE__, __LINE__, __func__);
6623 if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6624 MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6627 * Allocate a virtual_phy object for vSES device, if
6628 * this vSES device is hot added.
6630 if (!_scsih_alloc_vphy(ioc, port_id, i))
6632 ioc->sas_hba.phy[i].hba_vphy = 1;
6636 * Add new HBA phys to STL if these new phys got added as part
6637 * of HBA Firmware upgrade/downgrade operation.
6639 if (!ioc->sas_hba.phy[i].phy) {
6640 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6642 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6643 __FILE__, __LINE__, __func__);
6646 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6647 MPI2_IOCSTATUS_MASK;
6648 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6649 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6650 __FILE__, __LINE__, __func__);
6653 ioc->sas_hba.phy[i].phy_id = i;
6654 mpt3sas_transport_add_host_phy(ioc,
6655 &ioc->sas_hba.phy[i], phy_pg0,
6656 ioc->sas_hba.parent_dev);
6659 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6660 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6662 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6663 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6664 ioc->sas_hba.phy[i].port =
6665 mpt3sas_get_port_by_id(ioc, port_id, 0);
6666 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6667 attached_handle, i, link_rate,
6668 ioc->sas_hba.phy[i].port);
6671 * Clear the phy details if this phy got disabled as part of
6672 * HBA Firmware upgrade/downgrade operation.
6674 for (i = ioc->sas_hba.num_phys;
6675 i < ioc->sas_hba.nr_phys_allocated; i++) {
6676 if (ioc->sas_hba.phy[i].phy &&
6677 ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
6678 SAS_LINK_RATE_1_5_GBPS)
6679 mpt3sas_transport_update_links(ioc,
6680 ioc->sas_hba.sas_address, 0, i,
6681 MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
6684 kfree(sas_iounit_pg0);
6688 * _scsih_sas_host_add - create sas host object
6689 * @ioc: per adapter object
6691 * Creating host side data object, stored in ioc->sas_hba
6694 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6697 Mpi2ConfigReply_t mpi_reply;
6698 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6699 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6700 Mpi2SasPhyPage0_t phy_pg0;
6701 Mpi2SasDevicePage0_t sas_device_pg0;
6702 Mpi2SasEnclosurePage0_t enclosure_pg0;
6705 u8 device_missing_delay;
6706 u8 num_phys, port_id;
6707 struct hba_port *port;
6709 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6711 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6712 __FILE__, __LINE__, __func__);
6716 ioc->sas_hba.nr_phys_allocated = max_t(u8,
6717 MPT_MAX_HBA_NUM_PHYS, num_phys);
6718 ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
6719 sizeof(struct _sas_phy), GFP_KERNEL);
6720 if (!ioc->sas_hba.phy) {
6721 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6722 __FILE__, __LINE__, __func__);
6725 ioc->sas_hba.num_phys = num_phys;
6727 /* sas_iounit page 0 */
6728 sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6729 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6730 if (!sas_iounit_pg0) {
6731 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6732 __FILE__, __LINE__, __func__);
6735 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6736 sas_iounit_pg0, sz))) {
6737 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6738 __FILE__, __LINE__, __func__);
6741 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6742 MPI2_IOCSTATUS_MASK;
6743 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6744 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6745 __FILE__, __LINE__, __func__);
6749 /* sas_iounit page 1 */
6750 sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys);
6751 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6752 if (!sas_iounit_pg1) {
6753 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6754 __FILE__, __LINE__, __func__);
6757 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6758 sas_iounit_pg1, sz))) {
6759 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6760 __FILE__, __LINE__, __func__);
6763 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6764 MPI2_IOCSTATUS_MASK;
6765 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6766 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6767 __FILE__, __LINE__, __func__);
6771 ioc->io_missing_delay =
6772 sas_iounit_pg1->IODeviceMissingDelay;
6773 device_missing_delay =
6774 sas_iounit_pg1->ReportDeviceMissingDelay;
6775 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6776 ioc->device_missing_delay = (device_missing_delay &
6777 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6779 ioc->device_missing_delay = device_missing_delay &
6780 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6782 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6783 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6784 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6786 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6787 __FILE__, __LINE__, __func__);
6790 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6791 MPI2_IOCSTATUS_MASK;
6792 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6793 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6794 __FILE__, __LINE__, __func__);
6799 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6800 PhyData[0].ControllerDevHandle);
6802 port_id = sas_iounit_pg0->PhyData[i].Port;
6803 if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6804 port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6808 port->port_id = port_id;
6810 "hba_port entry: %p, port: %d is added to hba_port list\n",
6811 port, port->port_id);
6812 list_add_tail(&port->list,
6813 &ioc->port_table_list);
6817 * Check whether current Phy belongs to HBA vSES device or not.
6819 if ((le32_to_cpu(phy_pg0.PhyInfo) &
6820 MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6821 (phy_pg0.NegotiatedLinkRate >> 4) >=
6822 MPI2_SAS_NEG_LINK_RATE_1_5) {
6824 * Allocate a virtual_phy object for vSES device.
6826 if (!_scsih_alloc_vphy(ioc, port_id, i))
6828 ioc->sas_hba.phy[i].hba_vphy = 1;
6831 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6832 ioc->sas_hba.phy[i].phy_id = i;
6833 ioc->sas_hba.phy[i].port =
6834 mpt3sas_get_port_by_id(ioc, port_id, 0);
6835 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6836 phy_pg0, ioc->sas_hba.parent_dev);
6838 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6839 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6840 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6841 __FILE__, __LINE__, __func__);
6844 ioc->sas_hba.enclosure_handle =
6845 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6846 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6847 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6848 ioc->sas_hba.handle,
6849 (u64)ioc->sas_hba.sas_address,
6850 ioc->sas_hba.num_phys);
6852 if (ioc->sas_hba.enclosure_handle) {
6853 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6854 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6855 ioc->sas_hba.enclosure_handle)))
6856 ioc->sas_hba.enclosure_logical_id =
6857 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6861 kfree(sas_iounit_pg1);
6862 kfree(sas_iounit_pg0);
6866 * _scsih_expander_add - creating expander object
6867 * @ioc: per adapter object
6868 * @handle: expander handle
6870 * Creating expander object, stored in ioc->sas_expander_list.
6872 * Return: 0 for success, else error.
6875 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6877 struct _sas_node *sas_expander;
6878 struct _enclosure_node *enclosure_dev;
6879 Mpi2ConfigReply_t mpi_reply;
6880 Mpi2ExpanderPage0_t expander_pg0;
6881 Mpi2ExpanderPage1_t expander_pg1;
6884 u64 sas_address, sas_address_parent = 0;
6886 unsigned long flags;
6887 struct _sas_port *mpt3sas_port = NULL;
6895 if (ioc->shost_recovery || ioc->pci_error_recovery)
6898 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6899 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6900 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6901 __FILE__, __LINE__, __func__);
6905 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6906 MPI2_IOCSTATUS_MASK;
6907 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6908 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6909 __FILE__, __LINE__, __func__);
6913 /* handle out of order topology events */
6914 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6915 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6917 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6918 __FILE__, __LINE__, __func__);
6922 port_id = expander_pg0.PhysicalPort;
6923 if (sas_address_parent != ioc->sas_hba.sas_address) {
6924 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6925 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6927 mpt3sas_get_port_by_id(ioc, port_id, 0));
6928 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6929 if (!sas_expander) {
6930 rc = _scsih_expander_add(ioc, parent_handle);
6936 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6937 sas_address = le64_to_cpu(expander_pg0.SASAddress);
6938 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6939 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6940 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6945 sas_expander = kzalloc(sizeof(struct _sas_node),
6947 if (!sas_expander) {
6948 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6949 __FILE__, __LINE__, __func__);
6953 sas_expander->handle = handle;
6954 sas_expander->num_phys = expander_pg0.NumPhys;
6955 sas_expander->sas_address_parent = sas_address_parent;
6956 sas_expander->sas_address = sas_address;
6957 sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6958 if (!sas_expander->port) {
6959 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6960 __FILE__, __LINE__, __func__);
6965 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6966 handle, parent_handle,
6967 (u64)sas_expander->sas_address, sas_expander->num_phys);
6969 if (!sas_expander->num_phys) {
6973 sas_expander->phy = kcalloc(sas_expander->num_phys,
6974 sizeof(struct _sas_phy), GFP_KERNEL);
6975 if (!sas_expander->phy) {
6976 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6977 __FILE__, __LINE__, __func__);
6982 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6983 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6984 sas_address_parent, sas_expander->port);
6985 if (!mpt3sas_port) {
6986 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6987 __FILE__, __LINE__, __func__);
6991 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6992 sas_expander->rphy = mpt3sas_port->rphy;
6994 for (i = 0 ; i < sas_expander->num_phys ; i++) {
6995 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6996 &expander_pg1, i, handle))) {
6997 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6998 __FILE__, __LINE__, __func__);
7002 sas_expander->phy[i].handle = handle;
7003 sas_expander->phy[i].phy_id = i;
7004 sas_expander->phy[i].port =
7005 mpt3sas_get_port_by_id(ioc, port_id, 0);
7007 if ((mpt3sas_transport_add_expander_phy(ioc,
7008 &sas_expander->phy[i], expander_pg1,
7009 sas_expander->parent_dev))) {
7010 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7011 __FILE__, __LINE__, __func__);
7017 if (sas_expander->enclosure_handle) {
7019 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7020 sas_expander->enclosure_handle);
7022 sas_expander->enclosure_logical_id =
7023 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7026 _scsih_expander_node_add(ioc, sas_expander);
7032 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
7033 sas_address_parent, sas_expander->port);
7034 kfree(sas_expander);
7039 * mpt3sas_expander_remove - removing expander object
7040 * @ioc: per adapter object
7041 * @sas_address: expander sas_address
7042 * @port: hba port entry
7045 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7046 struct hba_port *port)
7048 struct _sas_node *sas_expander;
7049 unsigned long flags;
7051 if (ioc->shost_recovery)
7057 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7058 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7060 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7062 _scsih_expander_node_remove(ioc, sas_expander);
7066 * _scsih_done - internal SCSI_IO callback handler.
7067 * @ioc: per adapter object
7068 * @smid: system request message index
7069 * @msix_index: MSIX table index supplied by the OS
7070 * @reply: reply message frame(lower 32bit addr)
7072 * Callback handler when sending internal generated SCSI_IO.
7073 * The callback index passed is `ioc->scsih_cb_idx`
7075 * Return: 1 meaning mf should be freed from _base_interrupt
7076 * 0 means the mf is freed from this function.
7079 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7081 MPI2DefaultReply_t *mpi_reply;
7083 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7084 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7086 if (ioc->scsih_cmds.smid != smid)
7088 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7090 memcpy(ioc->scsih_cmds.reply, mpi_reply,
7091 mpi_reply->MsgLength*4);
7092 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7094 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7095 complete(&ioc->scsih_cmds.done);
7102 #define MPT3_MAX_LUNS (255)
7106 * _scsih_check_access_status - check access flags
7107 * @ioc: per adapter object
7108 * @sas_address: sas address
7109 * @handle: sas device handle
7110 * @access_status: errors returned during discovery of the device
7112 * Return: 0 for success, else failure
7115 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7116 u16 handle, u8 access_status)
7121 switch (access_status) {
7122 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7123 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7126 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7127 desc = "sata capability failed";
7129 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7130 desc = "sata affiliation conflict";
7132 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7133 desc = "route not addressable";
7135 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7136 desc = "smp error not addressable";
7138 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7139 desc = "device blocked";
7141 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7142 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7143 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7144 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7145 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7146 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7147 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7148 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7149 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7150 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7151 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7152 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7153 desc = "sata initialization failed";
7163 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7164 desc, (u64)sas_address, handle);
7169 * _scsih_check_device - checking device responsiveness
7170 * @ioc: per adapter object
7171 * @parent_sas_address: sas address of parent expander or sas host
7172 * @handle: attached device handle
7173 * @phy_number: phy number
7174 * @link_rate: new link rate
7177 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7178 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7180 Mpi2ConfigReply_t mpi_reply;
7181 Mpi2SasDevicePage0_t sas_device_pg0;
7182 struct _sas_device *sas_device = NULL;
7183 struct _enclosure_node *enclosure_dev = NULL;
7185 unsigned long flags;
7187 struct scsi_target *starget;
7188 struct MPT3SAS_TARGET *sas_target_priv_data;
7190 struct hba_port *port;
7192 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7193 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7196 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7197 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7200 /* wide port handling ~ we need only handle device once for the phy that
7201 * is matched in sas device page zero
7203 if (phy_number != sas_device_pg0.PhyNum)
7206 /* check if this is end device */
7207 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7208 if (!(_scsih_is_end_device(device_info)))
7211 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7212 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7213 port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7216 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7222 if (unlikely(sas_device->handle != handle)) {
7223 starget = sas_device->starget;
7224 sas_target_priv_data = starget->hostdata;
7225 starget_printk(KERN_INFO, starget,
7226 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7227 sas_device->handle, handle);
7228 sas_target_priv_data->handle = handle;
7229 sas_device->handle = handle;
7230 if (le16_to_cpu(sas_device_pg0.Flags) &
7231 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7232 sas_device->enclosure_level =
7233 sas_device_pg0.EnclosureLevel;
7234 memcpy(sas_device->connector_name,
7235 sas_device_pg0.ConnectorName, 4);
7236 sas_device->connector_name[4] = '\0';
7238 sas_device->enclosure_level = 0;
7239 sas_device->connector_name[0] = '\0';
7242 sas_device->enclosure_handle =
7243 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7244 sas_device->is_chassis_slot_valid = 0;
7245 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7246 sas_device->enclosure_handle);
7247 if (enclosure_dev) {
7248 sas_device->enclosure_logical_id =
7249 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7250 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7251 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7252 sas_device->is_chassis_slot_valid = 1;
7253 sas_device->chassis_slot =
7254 enclosure_dev->pg0.ChassisSlot;
7259 /* check if device is present */
7260 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7261 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7262 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7267 /* check if there were any issues with discovery */
7268 if (_scsih_check_access_status(ioc, sas_address, handle,
7269 sas_device_pg0.AccessStatus))
7272 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7273 _scsih_ublock_io_device(ioc, sas_address, port);
7276 sas_device_put(sas_device);
7280 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7282 sas_device_put(sas_device);
7286 * _scsih_add_device - creating sas device object
7287 * @ioc: per adapter object
7288 * @handle: sas device handle
7289 * @phy_num: phy number end device attached to
7290 * @is_pd: is this hidden raid component
7292 * Creating end device object, stored in ioc->sas_device_list.
7294 * Return: 0 for success, non-zero for failure.
7297 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7300 Mpi2ConfigReply_t mpi_reply;
7301 Mpi2SasDevicePage0_t sas_device_pg0;
7302 struct _sas_device *sas_device;
7303 struct _enclosure_node *enclosure_dev = NULL;
7309 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7310 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7311 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7312 __FILE__, __LINE__, __func__);
7316 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7317 MPI2_IOCSTATUS_MASK;
7318 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7319 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7320 __FILE__, __LINE__, __func__);
7324 /* check if this is end device */
7325 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7326 if (!(_scsih_is_end_device(device_info)))
7328 set_bit(handle, ioc->pend_os_device_add);
7329 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7331 /* check if device is present */
7332 if (!(le16_to_cpu(sas_device_pg0.Flags) &
7333 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7334 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7339 /* check if there were any issues with discovery */
7340 if (_scsih_check_access_status(ioc, sas_address, handle,
7341 sas_device_pg0.AccessStatus))
7344 port_id = sas_device_pg0.PhysicalPort;
7345 sas_device = mpt3sas_get_sdev_by_addr(ioc,
7346 sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7348 clear_bit(handle, ioc->pend_os_device_add);
7349 sas_device_put(sas_device);
7353 if (sas_device_pg0.EnclosureHandle) {
7355 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7356 le16_to_cpu(sas_device_pg0.EnclosureHandle));
7357 if (enclosure_dev == NULL)
7358 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7359 sas_device_pg0.EnclosureHandle);
7362 sas_device = kzalloc(sizeof(struct _sas_device),
7365 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7366 __FILE__, __LINE__, __func__);
7370 kref_init(&sas_device->refcount);
7371 sas_device->handle = handle;
7372 if (_scsih_get_sas_address(ioc,
7373 le16_to_cpu(sas_device_pg0.ParentDevHandle),
7374 &sas_device->sas_address_parent) != 0)
7375 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7376 __FILE__, __LINE__, __func__);
7377 sas_device->enclosure_handle =
7378 le16_to_cpu(sas_device_pg0.EnclosureHandle);
7379 if (sas_device->enclosure_handle != 0)
7381 le16_to_cpu(sas_device_pg0.Slot);
7382 sas_device->device_info = device_info;
7383 sas_device->sas_address = sas_address;
7384 sas_device->phy = sas_device_pg0.PhyNum;
7385 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7386 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7387 sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7388 if (!sas_device->port) {
7389 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7390 __FILE__, __LINE__, __func__);
7394 if (le16_to_cpu(sas_device_pg0.Flags)
7395 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7396 sas_device->enclosure_level =
7397 sas_device_pg0.EnclosureLevel;
7398 memcpy(sas_device->connector_name,
7399 sas_device_pg0.ConnectorName, 4);
7400 sas_device->connector_name[4] = '\0';
7402 sas_device->enclosure_level = 0;
7403 sas_device->connector_name[0] = '\0';
7405 /* get enclosure_logical_id & chassis_slot*/
7406 sas_device->is_chassis_slot_valid = 0;
7407 if (enclosure_dev) {
7408 sas_device->enclosure_logical_id =
7409 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7410 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7411 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7412 sas_device->is_chassis_slot_valid = 1;
7413 sas_device->chassis_slot =
7414 enclosure_dev->pg0.ChassisSlot;
7418 /* get device name */
7419 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7420 sas_device->port_type = sas_device_pg0.MaxPortConnections;
7422 "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
7423 handle, sas_device->sas_address, sas_device->port_type);
7425 if (ioc->wait_for_discovery_to_complete)
7426 _scsih_sas_device_init_add(ioc, sas_device);
7428 _scsih_sas_device_add(ioc, sas_device);
7431 sas_device_put(sas_device);
7436 * _scsih_remove_device - removing sas device object
7437 * @ioc: per adapter object
7438 * @sas_device: the sas_device object
7441 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7442 struct _sas_device *sas_device)
7444 struct MPT3SAS_TARGET *sas_target_priv_data;
7446 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7447 (sas_device->pfa_led_on)) {
7448 _scsih_turn_off_pfa_led(ioc, sas_device);
7449 sas_device->pfa_led_on = 0;
7453 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7455 sas_device->handle, (u64)sas_device->sas_address));
7457 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7460 if (sas_device->starget && sas_device->starget->hostdata) {
7461 sas_target_priv_data = sas_device->starget->hostdata;
7462 sas_target_priv_data->deleted = 1;
7463 _scsih_ublock_io_device(ioc, sas_device->sas_address,
7465 sas_target_priv_data->handle =
7466 MPT3SAS_INVALID_DEVICE_HANDLE;
7469 if (!ioc->hide_drives)
7470 mpt3sas_transport_port_remove(ioc,
7471 sas_device->sas_address,
7472 sas_device->sas_address_parent,
7475 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7476 sas_device->handle, (u64)sas_device->sas_address);
7478 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7481 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7483 sas_device->handle, (u64)sas_device->sas_address));
7484 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7489 * _scsih_sas_topology_change_event_debug - debug for topology event
7490 * @ioc: per adapter object
7491 * @event_data: event data payload
7495 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7496 Mpi2EventDataSasTopologyChangeList_t *event_data)
7502 char *status_str = NULL;
7503 u8 link_rate, prev_link_rate;
7505 switch (event_data->ExpStatus) {
7506 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7509 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7510 status_str = "remove";
7512 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7514 status_str = "responding";
7516 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7517 status_str = "remove delay";
7520 status_str = "unknown status";
7523 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7524 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7525 "start_phy(%02d), count(%d)\n",
7526 le16_to_cpu(event_data->ExpanderDevHandle),
7527 le16_to_cpu(event_data->EnclosureHandle),
7528 event_data->StartPhyNum, event_data->NumEntries);
7529 for (i = 0; i < event_data->NumEntries; i++) {
7530 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7533 phy_number = event_data->StartPhyNum + i;
7534 reason_code = event_data->PHY[i].PhyStatus &
7535 MPI2_EVENT_SAS_TOPO_RC_MASK;
7536 switch (reason_code) {
7537 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7538 status_str = "target add";
7540 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7541 status_str = "target remove";
7543 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7544 status_str = "delay target remove";
7546 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7547 status_str = "link rate change";
7549 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7550 status_str = "target responding";
7553 status_str = "unknown";
7556 link_rate = event_data->PHY[i].LinkRate >> 4;
7557 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7558 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7559 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7560 handle, status_str, link_rate, prev_link_rate);
7566 * _scsih_sas_topology_change_event - handle topology changes
7567 * @ioc: per adapter object
7568 * @fw_event: The fw_event_work object
7573 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7574 struct fw_event_work *fw_event)
7577 u16 parent_handle, handle;
7579 u8 phy_number, max_phys;
7580 struct _sas_node *sas_expander;
7582 unsigned long flags;
7583 u8 link_rate, prev_link_rate;
7584 struct hba_port *port;
7585 Mpi2EventDataSasTopologyChangeList_t *event_data =
7586 (Mpi2EventDataSasTopologyChangeList_t *)
7587 fw_event->event_data;
7589 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7590 _scsih_sas_topology_change_event_debug(ioc, event_data);
7592 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7595 if (!ioc->sas_hba.num_phys)
7596 _scsih_sas_host_add(ioc);
7598 _scsih_sas_host_refresh(ioc);
7600 if (fw_event->ignore) {
7601 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7605 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7606 port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7608 /* handle expander add */
7609 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7610 if (_scsih_expander_add(ioc, parent_handle) != 0)
7613 spin_lock_irqsave(&ioc->sas_node_lock, flags);
7614 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7617 sas_address = sas_expander->sas_address;
7618 max_phys = sas_expander->num_phys;
7619 port = sas_expander->port;
7620 } else if (parent_handle < ioc->sas_hba.num_phys) {
7621 sas_address = ioc->sas_hba.sas_address;
7622 max_phys = ioc->sas_hba.num_phys;
7624 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7627 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7629 /* handle siblings events */
7630 for (i = 0; i < event_data->NumEntries; i++) {
7631 if (fw_event->ignore) {
7633 ioc_info(ioc, "ignoring expander event\n"));
7636 if (ioc->remove_host || ioc->pci_error_recovery)
7638 phy_number = event_data->StartPhyNum + i;
7639 if (phy_number >= max_phys)
7641 reason_code = event_data->PHY[i].PhyStatus &
7642 MPI2_EVENT_SAS_TOPO_RC_MASK;
7643 if ((event_data->PHY[i].PhyStatus &
7644 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7645 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7647 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7650 link_rate = event_data->PHY[i].LinkRate >> 4;
7651 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7652 switch (reason_code) {
7653 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7655 if (ioc->shost_recovery)
7658 if (link_rate == prev_link_rate)
7661 mpt3sas_transport_update_links(ioc, sas_address,
7662 handle, phy_number, link_rate, port);
7664 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7667 _scsih_check_device(ioc, sas_address, handle,
7668 phy_number, link_rate);
7670 if (!test_bit(handle, ioc->pend_os_device_add))
7675 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7677 if (ioc->shost_recovery)
7680 mpt3sas_transport_update_links(ioc, sas_address,
7681 handle, phy_number, link_rate, port);
7683 _scsih_add_device(ioc, handle, phy_number, 0);
7686 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7688 _scsih_device_remove_by_handle(ioc, handle);
7693 /* handle expander removal */
7694 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7696 mpt3sas_expander_remove(ioc, sas_address, port);
7702 * _scsih_sas_device_status_change_event_debug - debug for device event
7704 * @event_data: event data payload
7708 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7709 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7711 char *reason_str = NULL;
7713 switch (event_data->ReasonCode) {
7714 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7715 reason_str = "smart data";
7717 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7718 reason_str = "unsupported device discovered";
7720 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7721 reason_str = "internal device reset";
7723 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7724 reason_str = "internal task abort";
7726 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7727 reason_str = "internal task abort set";
7729 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7730 reason_str = "internal clear task set";
7732 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7733 reason_str = "internal query task";
7735 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7736 reason_str = "sata init failure";
7738 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7739 reason_str = "internal device reset complete";
7741 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7742 reason_str = "internal task abort complete";
7744 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7745 reason_str = "internal async notification";
7747 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7748 reason_str = "expander reduced functionality";
7750 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7751 reason_str = "expander reduced functionality complete";
7754 reason_str = "unknown reason";
7757 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7758 reason_str, le16_to_cpu(event_data->DevHandle),
7759 (u64)le64_to_cpu(event_data->SASAddress),
7760 le16_to_cpu(event_data->TaskTag));
7761 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7762 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7763 event_data->ASC, event_data->ASCQ);
7768 * _scsih_sas_device_status_change_event - handle device status change
7769 * @ioc: per adapter object
7770 * @event_data: The fw event
7774 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7775 Mpi2EventDataSasDeviceStatusChange_t *event_data)
7777 struct MPT3SAS_TARGET *target_priv_data;
7778 struct _sas_device *sas_device;
7780 unsigned long flags;
7782 /* In MPI Revision K (0xC), the internal device reset complete was
7783 * implemented, so avoid setting tm_busy flag for older firmware.
7785 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7788 if (event_data->ReasonCode !=
7789 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7790 event_data->ReasonCode !=
7791 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7794 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7795 sas_address = le64_to_cpu(event_data->SASAddress);
7796 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7798 mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7800 if (!sas_device || !sas_device->starget)
7803 target_priv_data = sas_device->starget->hostdata;
7804 if (!target_priv_data)
7807 if (event_data->ReasonCode ==
7808 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7809 target_priv_data->tm_busy = 1;
7811 target_priv_data->tm_busy = 0;
7813 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7815 "%s tm_busy flag for handle(0x%04x)\n",
7816 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7817 target_priv_data->handle);
7821 sas_device_put(sas_device);
7823 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7828 * _scsih_check_pcie_access_status - check access flags
7829 * @ioc: per adapter object
7831 * @handle: sas device handle
7832 * @access_status: errors returned during discovery of the device
7834 * Return: 0 for success, else failure
7837 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7838 u16 handle, u8 access_status)
7843 switch (access_status) {
7844 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7845 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7848 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7849 desc = "PCIe device capability failed";
7851 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7852 desc = "PCIe device blocked";
7854 "Device with Access Status (%s): wwid(0x%016llx), "
7855 "handle(0x%04x)\n ll only be added to the internal list",
7856 desc, (u64)wwid, handle);
7859 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7860 desc = "PCIe device mem space access failed";
7862 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7863 desc = "PCIe device unsupported";
7865 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7866 desc = "PCIe device MSIx Required";
7868 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7869 desc = "PCIe device init fail max";
7871 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7872 desc = "PCIe device status unknown";
7874 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7875 desc = "nvme ready timeout";
7877 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7878 desc = "nvme device configuration unsupported";
7880 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7881 desc = "nvme identify failed";
7883 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7884 desc = "nvme qconfig failed";
7886 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7887 desc = "nvme qcreation failed";
7889 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7890 desc = "nvme eventcfg failed";
7892 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7893 desc = "nvme get feature stat failed";
7895 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7896 desc = "nvme idle timeout";
7898 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7899 desc = "nvme failure status";
7902 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7903 access_status, (u64)wwid, handle);
7910 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7911 desc, (u64)wwid, handle);
7916 * _scsih_pcie_device_remove_from_sml - removing pcie device
7917 * from SML and free up associated memory
7918 * @ioc: per adapter object
7919 * @pcie_device: the pcie_device object
7922 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7923 struct _pcie_device *pcie_device)
7925 struct MPT3SAS_TARGET *sas_target_priv_data;
7928 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7930 pcie_device->handle, (u64)pcie_device->wwid));
7931 if (pcie_device->enclosure_handle != 0)
7933 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7935 (u64)pcie_device->enclosure_logical_id,
7936 pcie_device->slot));
7937 if (pcie_device->connector_name[0] != '\0')
7939 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7941 pcie_device->enclosure_level,
7942 pcie_device->connector_name));
7944 if (pcie_device->starget && pcie_device->starget->hostdata) {
7945 sas_target_priv_data = pcie_device->starget->hostdata;
7946 sas_target_priv_data->deleted = 1;
7947 _scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7948 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7951 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7952 pcie_device->handle, (u64)pcie_device->wwid);
7953 if (pcie_device->enclosure_handle != 0)
7954 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7955 (u64)pcie_device->enclosure_logical_id,
7957 if (pcie_device->connector_name[0] != '\0')
7958 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7959 pcie_device->enclosure_level,
7960 pcie_device->connector_name);
7962 if (pcie_device->starget && (pcie_device->access_status !=
7963 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7964 scsi_remove_target(&pcie_device->starget->dev);
7966 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7968 pcie_device->handle, (u64)pcie_device->wwid));
7969 if (pcie_device->enclosure_handle != 0)
7971 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7973 (u64)pcie_device->enclosure_logical_id,
7974 pcie_device->slot));
7975 if (pcie_device->connector_name[0] != '\0')
7977 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7979 pcie_device->enclosure_level,
7980 pcie_device->connector_name));
7982 kfree(pcie_device->serial_number);
7987 * _scsih_pcie_check_device - checking device responsiveness
7988 * @ioc: per adapter object
7989 * @handle: attached device handle
7992 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7994 Mpi2ConfigReply_t mpi_reply;
7995 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7997 struct _pcie_device *pcie_device;
7999 unsigned long flags;
8000 struct scsi_target *starget;
8001 struct MPT3SAS_TARGET *sas_target_priv_data;
8004 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8005 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
8008 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
8009 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8012 /* check if this is end device */
8013 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8014 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8017 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8018 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8019 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8022 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8026 if (unlikely(pcie_device->handle != handle)) {
8027 starget = pcie_device->starget;
8028 sas_target_priv_data = starget->hostdata;
8029 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8030 starget_printk(KERN_INFO, starget,
8031 "handle changed from(0x%04x) to (0x%04x)!!!\n",
8032 pcie_device->handle, handle);
8033 sas_target_priv_data->handle = handle;
8034 pcie_device->handle = handle;
8036 if (le32_to_cpu(pcie_device_pg0.Flags) &
8037 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8038 pcie_device->enclosure_level =
8039 pcie_device_pg0.EnclosureLevel;
8040 memcpy(&pcie_device->connector_name[0],
8041 &pcie_device_pg0.ConnectorName[0], 4);
8043 pcie_device->enclosure_level = 0;
8044 pcie_device->connector_name[0] = '\0';
8048 /* check if device is present */
8049 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8050 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8051 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
8053 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8054 pcie_device_put(pcie_device);
8058 /* check if there were any issues with discovery */
8059 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8060 pcie_device_pg0.AccessStatus)) {
8061 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8062 pcie_device_put(pcie_device);
8066 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8067 pcie_device_put(pcie_device);
8069 _scsih_ublock_io_device(ioc, wwid, NULL);
8075 * _scsih_pcie_add_device - creating pcie device object
8076 * @ioc: per adapter object
8077 * @handle: pcie device handle
8079 * Creating end device object, stored in ioc->pcie_device_list.
8081 * Return: 1 means queue the event later, 0 means complete the event
8084 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8086 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8087 Mpi26PCIeDevicePage2_t pcie_device_pg2;
8088 Mpi2ConfigReply_t mpi_reply;
8089 struct _pcie_device *pcie_device;
8090 struct _enclosure_node *enclosure_dev;
8094 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8095 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8096 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8097 __FILE__, __LINE__, __func__);
8100 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8101 MPI2_IOCSTATUS_MASK;
8102 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8103 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8104 __FILE__, __LINE__, __func__);
8108 set_bit(handle, ioc->pend_os_device_add);
8109 wwid = le64_to_cpu(pcie_device_pg0.WWID);
8111 /* check if device is present */
8112 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8113 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8114 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8119 /* check if there were any issues with discovery */
8120 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8121 pcie_device_pg0.AccessStatus))
8124 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8125 (pcie_device_pg0.DeviceInfo))))
8128 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8130 clear_bit(handle, ioc->pend_os_device_add);
8131 pcie_device_put(pcie_device);
8135 /* PCIe Device Page 2 contains read-only information about a
8136 * specific NVMe device; therefore, this page is only
8137 * valid for NVMe devices and skip for pcie devices of type scsi.
8139 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8140 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8141 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8142 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8145 "failure at %s:%d/%s()!\n", __FILE__,
8146 __LINE__, __func__);
8150 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8151 MPI2_IOCSTATUS_MASK;
8152 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8154 "failure at %s:%d/%s()!\n", __FILE__,
8155 __LINE__, __func__);
8160 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8162 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8163 __FILE__, __LINE__, __func__);
8167 kref_init(&pcie_device->refcount);
8168 pcie_device->id = ioc->pcie_target_id++;
8169 pcie_device->channel = PCIE_CHANNEL;
8170 pcie_device->handle = handle;
8171 pcie_device->access_status = pcie_device_pg0.AccessStatus;
8172 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8173 pcie_device->wwid = wwid;
8174 pcie_device->port_num = pcie_device_pg0.PortNum;
8175 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8176 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8178 pcie_device->enclosure_handle =
8179 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8180 if (pcie_device->enclosure_handle != 0)
8181 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8183 if (le32_to_cpu(pcie_device_pg0.Flags) &
8184 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8185 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8186 memcpy(&pcie_device->connector_name[0],
8187 &pcie_device_pg0.ConnectorName[0], 4);
8189 pcie_device->enclosure_level = 0;
8190 pcie_device->connector_name[0] = '\0';
8193 /* get enclosure_logical_id */
8194 if (pcie_device->enclosure_handle) {
8196 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8197 pcie_device->enclosure_handle);
8199 pcie_device->enclosure_logical_id =
8200 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8202 /* TODO -- Add device name once FW supports it */
8203 if (!(mpt3sas_scsih_is_pcie_scsi_device(
8204 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8205 pcie_device->nvme_mdts =
8206 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8207 pcie_device->shutdown_latency =
8208 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8210 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8211 * if drive's RTD3 Entry Latency is greater then IOC's
8212 * max_shutdown_latency.
8214 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8215 ioc->max_shutdown_latency =
8216 pcie_device->shutdown_latency;
8217 if (pcie_device_pg2.ControllerResetTO)
8218 pcie_device->reset_timeout =
8219 pcie_device_pg2.ControllerResetTO;
8221 pcie_device->reset_timeout = 30;
8223 pcie_device->reset_timeout = 30;
8225 if (ioc->wait_for_discovery_to_complete)
8226 _scsih_pcie_device_init_add(ioc, pcie_device);
8228 _scsih_pcie_device_add(ioc, pcie_device);
8230 pcie_device_put(pcie_device);
8235 * _scsih_pcie_topology_change_event_debug - debug for topology
8237 * @ioc: per adapter object
8238 * @event_data: event data payload
8242 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8243 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8249 char *status_str = NULL;
8250 u8 link_rate, prev_link_rate;
8252 switch (event_data->SwitchStatus) {
8253 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8256 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8257 status_str = "remove";
8259 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8261 status_str = "responding";
8263 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8264 status_str = "remove delay";
8267 status_str = "unknown status";
8270 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8271 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8272 "start_port(%02d), count(%d)\n",
8273 le16_to_cpu(event_data->SwitchDevHandle),
8274 le16_to_cpu(event_data->EnclosureHandle),
8275 event_data->StartPortNum, event_data->NumEntries);
8276 for (i = 0; i < event_data->NumEntries; i++) {
8278 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8281 port_number = event_data->StartPortNum + i;
8282 reason_code = event_data->PortEntry[i].PortStatus;
8283 switch (reason_code) {
8284 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8285 status_str = "target add";
8287 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8288 status_str = "target remove";
8290 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8291 status_str = "delay target remove";
8293 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8294 status_str = "link rate change";
8296 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8297 status_str = "target responding";
8300 status_str = "unknown";
8303 link_rate = event_data->PortEntry[i].CurrentPortInfo &
8304 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8305 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8306 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8307 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8308 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
8309 handle, status_str, link_rate, prev_link_rate);
8314 * _scsih_pcie_topology_change_event - handle PCIe topology
8316 * @ioc: per adapter object
8317 * @fw_event: The fw_event_work object
8322 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8323 struct fw_event_work *fw_event)
8328 u8 link_rate, prev_link_rate;
8329 unsigned long flags;
8331 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8332 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8333 struct _pcie_device *pcie_device;
8335 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8336 _scsih_pcie_topology_change_event_debug(ioc, event_data);
8338 if (ioc->shost_recovery || ioc->remove_host ||
8339 ioc->pci_error_recovery)
8342 if (fw_event->ignore) {
8343 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8347 /* handle siblings events */
8348 for (i = 0; i < event_data->NumEntries; i++) {
8349 if (fw_event->ignore) {
8351 ioc_info(ioc, "ignoring switch event\n"));
8354 if (ioc->remove_host || ioc->pci_error_recovery)
8356 reason_code = event_data->PortEntry[i].PortStatus;
8358 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8362 link_rate = event_data->PortEntry[i].CurrentPortInfo
8363 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8364 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8365 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8367 switch (reason_code) {
8368 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8369 if (ioc->shost_recovery)
8371 if (link_rate == prev_link_rate)
8373 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8376 _scsih_pcie_check_device(ioc, handle);
8378 /* This code after this point handles the test case
8379 * where a device has been added, however its returning
8380 * BUSY for sometime. Then before the Device Missing
8381 * Delay expires and the device becomes READY, the
8382 * device is removed and added back.
8384 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8385 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8386 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8389 pcie_device_put(pcie_device);
8393 if (!test_bit(handle, ioc->pend_os_device_add))
8397 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8399 event_data->PortEntry[i].PortStatus &= 0xF0;
8400 event_data->PortEntry[i].PortStatus |=
8401 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8403 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8404 if (ioc->shost_recovery)
8406 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8409 rc = _scsih_pcie_add_device(ioc, handle);
8411 /* mark entry vacant */
8412 /* TODO This needs to be reviewed and fixed,
8413 * we dont have an entry
8414 * to make an event void like vacant
8416 event_data->PortEntry[i].PortStatus |=
8417 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8420 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8421 _scsih_pcie_device_remove_by_handle(ioc, handle);
8428 * _scsih_pcie_device_status_change_event_debug - debug for device event
8430 * @event_data: event data payload
8434 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8435 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8437 char *reason_str = NULL;
8439 switch (event_data->ReasonCode) {
8440 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8441 reason_str = "smart data";
8443 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8444 reason_str = "unsupported device discovered";
8446 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8447 reason_str = "internal device reset";
8449 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8450 reason_str = "internal task abort";
8452 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8453 reason_str = "internal task abort set";
8455 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8456 reason_str = "internal clear task set";
8458 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8459 reason_str = "internal query task";
8461 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8462 reason_str = "device init failure";
8464 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8465 reason_str = "internal device reset complete";
8467 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8468 reason_str = "internal task abort complete";
8470 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8471 reason_str = "internal async notification";
8473 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8474 reason_str = "pcie hot reset failed";
8477 reason_str = "unknown reason";
8481 ioc_info(ioc, "PCIE device status change: (%s)\n"
8482 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8483 reason_str, le16_to_cpu(event_data->DevHandle),
8484 (u64)le64_to_cpu(event_data->WWID),
8485 le16_to_cpu(event_data->TaskTag));
8486 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8487 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8488 event_data->ASC, event_data->ASCQ);
8493 * _scsih_pcie_device_status_change_event - handle device status
8495 * @ioc: per adapter object
8496 * @fw_event: The fw_event_work object
8500 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8501 struct fw_event_work *fw_event)
8503 struct MPT3SAS_TARGET *target_priv_data;
8504 struct _pcie_device *pcie_device;
8506 unsigned long flags;
8507 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8508 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8509 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8510 _scsih_pcie_device_status_change_event_debug(ioc,
8513 if (event_data->ReasonCode !=
8514 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8515 event_data->ReasonCode !=
8516 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8519 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8520 wwid = le64_to_cpu(event_data->WWID);
8521 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8523 if (!pcie_device || !pcie_device->starget)
8526 target_priv_data = pcie_device->starget->hostdata;
8527 if (!target_priv_data)
8530 if (event_data->ReasonCode ==
8531 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8532 target_priv_data->tm_busy = 1;
8534 target_priv_data->tm_busy = 0;
8537 pcie_device_put(pcie_device);
8539 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8543 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8545 * @ioc: per adapter object
8546 * @event_data: event data payload
8550 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8551 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8553 char *reason_str = NULL;
8555 switch (event_data->ReasonCode) {
8556 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8557 reason_str = "enclosure add";
8559 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8560 reason_str = "enclosure remove";
8563 reason_str = "unknown reason";
8567 ioc_info(ioc, "enclosure status change: (%s)\n"
8568 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8570 le16_to_cpu(event_data->EnclosureHandle),
8571 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8572 le16_to_cpu(event_data->StartSlot));
8576 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8577 * @ioc: per adapter object
8578 * @fw_event: The fw_event_work object
8582 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8583 struct fw_event_work *fw_event)
8585 Mpi2ConfigReply_t mpi_reply;
8586 struct _enclosure_node *enclosure_dev = NULL;
8587 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8588 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8590 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8592 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8593 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8594 (Mpi2EventDataSasEnclDevStatusChange_t *)
8595 fw_event->event_data);
8596 if (ioc->shost_recovery)
8599 if (enclosure_handle)
8601 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8603 switch (event_data->ReasonCode) {
8604 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8605 if (!enclosure_dev) {
8607 kzalloc(sizeof(struct _enclosure_node),
8609 if (!enclosure_dev) {
8610 ioc_info(ioc, "failure at %s:%d/%s()!\n",
8611 __FILE__, __LINE__, __func__);
8614 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8615 &enclosure_dev->pg0,
8616 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8619 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8620 MPI2_IOCSTATUS_MASK)) {
8621 kfree(enclosure_dev);
8625 list_add_tail(&enclosure_dev->list,
8626 &ioc->enclosure_list);
8629 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8630 if (enclosure_dev) {
8631 list_del(&enclosure_dev->list);
8632 kfree(enclosure_dev);
8641 * _scsih_sas_broadcast_primitive_event - handle broadcast events
8642 * @ioc: per adapter object
8643 * @fw_event: The fw_event_work object
8647 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8648 struct fw_event_work *fw_event)
8650 struct scsi_cmnd *scmd;
8651 struct scsi_device *sdev;
8652 struct scsiio_tracker *st;
8655 struct MPT3SAS_DEVICE *sas_device_priv_data;
8656 u32 termination_count;
8658 Mpi2SCSITaskManagementReply_t *mpi_reply;
8659 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8660 (Mpi2EventDataSasBroadcastPrimitive_t *)
8661 fw_event->event_data;
8663 unsigned long flags;
8666 u8 task_abort_retries;
8668 mutex_lock(&ioc->tm_cmds.mutex);
8669 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8670 __func__, event_data->PhyNum, event_data->PortWidth);
8672 _scsih_block_io_all_device(ioc);
8674 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8675 mpi_reply = ioc->tm_cmds.reply;
8676 broadcast_aen_retry:
8678 /* sanity checks for retrying this loop */
8679 if (max_retries++ == 5) {
8680 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8682 } else if (max_retries > 1)
8684 ioc_info(ioc, "%s: %d retry\n",
8685 __func__, max_retries - 1));
8687 termination_count = 0;
8689 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8690 if (ioc->shost_recovery)
8692 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8695 st = scsi_cmd_priv(scmd);
8696 sdev = scmd->device;
8697 sas_device_priv_data = sdev->hostdata;
8698 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8700 /* skip hidden raid components */
8701 if (sas_device_priv_data->sas_target->flags &
8702 MPT_TARGET_FLAGS_RAID_COMPONENT)
8705 if (sas_device_priv_data->sas_target->flags &
8706 MPT_TARGET_FLAGS_VOLUME)
8708 /* skip PCIe devices */
8709 if (sas_device_priv_data->sas_target->flags &
8710 MPT_TARGET_FLAGS_PCIE_DEVICE)
8713 handle = sas_device_priv_data->sas_target->handle;
8714 lun = sas_device_priv_data->lun;
8717 if (ioc->shost_recovery)
8720 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8721 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8722 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8723 st->msix_io, 30, 0);
8725 sdev_printk(KERN_WARNING, sdev,
8726 "mpt3sas_scsih_issue_tm: FAILED when sending "
8727 "QUERY_TASK: scmd(%p)\n", scmd);
8728 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8729 goto broadcast_aen_retry;
8731 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8732 & MPI2_IOCSTATUS_MASK;
8733 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8734 sdev_printk(KERN_WARNING, sdev,
8735 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8737 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8738 goto broadcast_aen_retry;
8741 /* see if IO is still owned by IOC and target */
8742 if (mpi_reply->ResponseCode ==
8743 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8744 mpi_reply->ResponseCode ==
8745 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8746 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8749 task_abort_retries = 0;
8751 if (task_abort_retries++ == 60) {
8753 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8755 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8756 goto broadcast_aen_retry;
8759 if (ioc->shost_recovery)
8762 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8763 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8764 st->smid, st->msix_io, 30, 0);
8765 if (r == FAILED || st->cb_idx != 0xFF) {
8766 sdev_printk(KERN_WARNING, sdev,
8767 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8768 "scmd(%p)\n", scmd);
8772 if (task_abort_retries > 1)
8773 sdev_printk(KERN_WARNING, sdev,
8774 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8776 task_abort_retries - 1, scmd);
8778 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8779 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8782 if (ioc->broadcast_aen_pending) {
8785 "%s: loop back due to pending AEN\n",
8787 ioc->broadcast_aen_pending = 0;
8788 goto broadcast_aen_retry;
8792 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8796 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8797 __func__, query_count, termination_count));
8799 ioc->broadcast_aen_busy = 0;
8800 if (!ioc->shost_recovery)
8801 _scsih_ublock_io_all_device(ioc);
8802 mutex_unlock(&ioc->tm_cmds.mutex);
8806 * _scsih_sas_discovery_event - handle discovery events
8807 * @ioc: per adapter object
8808 * @fw_event: The fw_event_work object
8812 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8813 struct fw_event_work *fw_event)
8815 Mpi2EventDataSasDiscovery_t *event_data =
8816 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8818 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8819 ioc_info(ioc, "discovery event: (%s)",
8820 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8822 if (event_data->DiscoveryStatus)
8823 pr_cont("discovery_status(0x%08x)",
8824 le32_to_cpu(event_data->DiscoveryStatus));
8828 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8829 !ioc->sas_hba.num_phys) {
8830 if (disable_discovery > 0 && ioc->shost_recovery) {
8831 /* Wait for the reset to complete */
8832 while (ioc->shost_recovery)
8835 _scsih_sas_host_add(ioc);
8840 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8842 * @ioc: per adapter object
8843 * @fw_event: The fw_event_work object
8847 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8848 struct fw_event_work *fw_event)
8850 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8851 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8853 switch (event_data->ReasonCode) {
8854 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8855 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8856 le16_to_cpu(event_data->DevHandle),
8857 (u64)le64_to_cpu(event_data->SASAddress),
8858 event_data->PhysicalPort);
8860 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8861 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8862 le16_to_cpu(event_data->DevHandle),
8863 (u64)le64_to_cpu(event_data->SASAddress),
8864 event_data->PhysicalPort);
8872 * _scsih_pcie_enumeration_event - handle enumeration events
8873 * @ioc: per adapter object
8874 * @fw_event: The fw_event_work object
8878 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8879 struct fw_event_work *fw_event)
8881 Mpi26EventDataPCIeEnumeration_t *event_data =
8882 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8884 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8887 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8888 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8889 "started" : "completed",
8891 if (event_data->EnumerationStatus)
8892 pr_cont("enumeration_status(0x%08x)",
8893 le32_to_cpu(event_data->EnumerationStatus));
8898 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8899 * @ioc: per adapter object
8900 * @handle: device handle for physical disk
8901 * @phys_disk_num: physical disk number
8903 * Return: 0 for success, else failure.
8906 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8908 Mpi2RaidActionRequest_t *mpi_request;
8909 Mpi2RaidActionReply_t *mpi_reply;
8916 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8919 mutex_lock(&ioc->scsih_cmds.mutex);
8921 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8922 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8926 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8928 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8930 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8931 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8936 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8937 ioc->scsih_cmds.smid = smid;
8938 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8940 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8941 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8942 mpi_request->PhysDiskNum = phys_disk_num;
8945 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8946 handle, phys_disk_num));
8948 init_completion(&ioc->scsih_cmds.done);
8949 ioc->put_smid_default(ioc, smid);
8950 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8952 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8953 mpt3sas_check_cmd_timeout(ioc,
8954 ioc->scsih_cmds.status, mpi_request,
8955 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8960 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8962 mpi_reply = ioc->scsih_cmds.reply;
8963 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8964 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8965 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
8968 ioc_status &= MPI2_IOCSTATUS_MASK;
8969 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8971 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8972 ioc_status, log_info));
8976 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8980 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8981 mutex_unlock(&ioc->scsih_cmds.mutex);
8984 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8989 * _scsih_reprobe_lun - reprobing lun
8990 * @sdev: scsi device struct
8991 * @no_uld_attach: sdev->no_uld_attach flag setting
8995 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8997 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8998 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8999 sdev->no_uld_attach ? "hiding" : "exposing");
9000 WARN_ON(scsi_device_reprobe(sdev));
9004 * _scsih_sas_volume_add - add new volume
9005 * @ioc: per adapter object
9006 * @element: IR config element data
9010 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
9011 Mpi2EventIrConfigElement_t *element)
9013 struct _raid_device *raid_device;
9014 unsigned long flags;
9016 u16 handle = le16_to_cpu(element->VolDevHandle);
9019 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9021 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9022 __FILE__, __LINE__, __func__);
9026 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9027 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
9028 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9033 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9035 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9036 __FILE__, __LINE__, __func__);
9040 raid_device->id = ioc->sas_id++;
9041 raid_device->channel = RAID_CHANNEL;
9042 raid_device->handle = handle;
9043 raid_device->wwid = wwid;
9044 _scsih_raid_device_add(ioc, raid_device);
9045 if (!ioc->wait_for_discovery_to_complete) {
9046 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9047 raid_device->id, 0);
9049 _scsih_raid_device_remove(ioc, raid_device);
9051 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9052 _scsih_determine_boot_device(ioc, raid_device, 1);
9053 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9058 * _scsih_sas_volume_delete - delete volume
9059 * @ioc: per adapter object
9060 * @handle: volume device handle
9064 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9066 struct _raid_device *raid_device;
9067 unsigned long flags;
9068 struct MPT3SAS_TARGET *sas_target_priv_data;
9069 struct scsi_target *starget = NULL;
9071 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9072 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9074 if (raid_device->starget) {
9075 starget = raid_device->starget;
9076 sas_target_priv_data = starget->hostdata;
9077 sas_target_priv_data->deleted = 1;
9079 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9080 raid_device->handle, (u64)raid_device->wwid);
9081 list_del(&raid_device->list);
9084 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9086 scsi_remove_target(&starget->dev);
9090 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9091 * @ioc: per adapter object
9092 * @element: IR config element data
9096 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9097 Mpi2EventIrConfigElement_t *element)
9099 struct _sas_device *sas_device;
9100 struct scsi_target *starget = NULL;
9101 struct MPT3SAS_TARGET *sas_target_priv_data;
9102 unsigned long flags;
9103 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9105 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9106 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9108 sas_device->volume_handle = 0;
9109 sas_device->volume_wwid = 0;
9110 clear_bit(handle, ioc->pd_handles);
9111 if (sas_device->starget && sas_device->starget->hostdata) {
9112 starget = sas_device->starget;
9113 sas_target_priv_data = starget->hostdata;
9114 sas_target_priv_data->flags &=
9115 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9118 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9122 /* exposing raid component */
9124 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9126 sas_device_put(sas_device);
9130 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9131 * @ioc: per adapter object
9132 * @element: IR config element data
9136 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9137 Mpi2EventIrConfigElement_t *element)
9139 struct _sas_device *sas_device;
9140 struct scsi_target *starget = NULL;
9141 struct MPT3SAS_TARGET *sas_target_priv_data;
9142 unsigned long flags;
9143 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9144 u16 volume_handle = 0;
9145 u64 volume_wwid = 0;
9147 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9149 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9152 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9153 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9155 set_bit(handle, ioc->pd_handles);
9156 if (sas_device->starget && sas_device->starget->hostdata) {
9157 starget = sas_device->starget;
9158 sas_target_priv_data = starget->hostdata;
9159 sas_target_priv_data->flags |=
9160 MPT_TARGET_FLAGS_RAID_COMPONENT;
9161 sas_device->volume_handle = volume_handle;
9162 sas_device->volume_wwid = volume_wwid;
9165 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9169 /* hiding raid component */
9170 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9173 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9175 sas_device_put(sas_device);
9179 * _scsih_sas_pd_delete - delete pd component
9180 * @ioc: per adapter object
9181 * @element: IR config element data
9185 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9186 Mpi2EventIrConfigElement_t *element)
9188 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9190 _scsih_device_remove_by_handle(ioc, handle);
9194 * _scsih_sas_pd_add - remove pd component
9195 * @ioc: per adapter object
9196 * @element: IR config element data
9200 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9201 Mpi2EventIrConfigElement_t *element)
9203 struct _sas_device *sas_device;
9204 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9205 Mpi2ConfigReply_t mpi_reply;
9206 Mpi2SasDevicePage0_t sas_device_pg0;
9211 set_bit(handle, ioc->pd_handles);
9213 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9215 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9216 sas_device_put(sas_device);
9220 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9221 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9222 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9223 __FILE__, __LINE__, __func__);
9227 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9228 MPI2_IOCSTATUS_MASK;
9229 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9230 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9231 __FILE__, __LINE__, __func__);
9235 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9236 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9237 mpt3sas_transport_update_links(ioc, sas_address, handle,
9238 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9239 mpt3sas_get_port_by_id(ioc,
9240 sas_device_pg0.PhysicalPort, 0));
9242 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9243 _scsih_add_device(ioc, handle, 0, 1);
9247 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9248 * @ioc: per adapter object
9249 * @event_data: event data payload
9253 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9254 Mpi2EventDataIrConfigChangeList_t *event_data)
9256 Mpi2EventIrConfigElement_t *element;
9259 char *reason_str = NULL, *element_str = NULL;
9261 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9263 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9264 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9265 "foreign" : "native",
9266 event_data->NumElements);
9267 for (i = 0; i < event_data->NumElements; i++, element++) {
9268 switch (element->ReasonCode) {
9269 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9272 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9273 reason_str = "remove";
9275 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9276 reason_str = "no change";
9278 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9279 reason_str = "hide";
9281 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9282 reason_str = "unhide";
9284 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9285 reason_str = "volume_created";
9287 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9288 reason_str = "volume_deleted";
9290 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9291 reason_str = "pd_created";
9293 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9294 reason_str = "pd_deleted";
9297 reason_str = "unknown reason";
9300 element_type = le16_to_cpu(element->ElementFlags) &
9301 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9302 switch (element_type) {
9303 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9304 element_str = "volume";
9306 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9307 element_str = "phys disk";
9309 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9310 element_str = "hot spare";
9313 element_str = "unknown element";
9316 pr_info("\t(%s:%s), vol handle(0x%04x), " \
9317 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9318 reason_str, le16_to_cpu(element->VolDevHandle),
9319 le16_to_cpu(element->PhysDiskDevHandle),
9320 element->PhysDiskNum);
9325 * _scsih_sas_ir_config_change_event - handle ir configuration change events
9326 * @ioc: per adapter object
9327 * @fw_event: The fw_event_work object
9331 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9332 struct fw_event_work *fw_event)
9334 Mpi2EventIrConfigElement_t *element;
9337 Mpi2EventDataIrConfigChangeList_t *event_data =
9338 (Mpi2EventDataIrConfigChangeList_t *)
9339 fw_event->event_data;
9341 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9342 (!ioc->hide_ir_msg))
9343 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
9345 foreign_config = (le32_to_cpu(event_data->Flags) &
9346 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9348 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9349 if (ioc->shost_recovery &&
9350 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9351 for (i = 0; i < event_data->NumElements; i++, element++) {
9352 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9353 _scsih_ir_fastpath(ioc,
9354 le16_to_cpu(element->PhysDiskDevHandle),
9355 element->PhysDiskNum);
9360 for (i = 0; i < event_data->NumElements; i++, element++) {
9362 switch (element->ReasonCode) {
9363 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9364 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9365 if (!foreign_config)
9366 _scsih_sas_volume_add(ioc, element);
9368 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9369 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9370 if (!foreign_config)
9371 _scsih_sas_volume_delete(ioc,
9372 le16_to_cpu(element->VolDevHandle));
9374 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9375 if (!ioc->is_warpdrive)
9376 _scsih_sas_pd_hide(ioc, element);
9378 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9379 if (!ioc->is_warpdrive)
9380 _scsih_sas_pd_expose(ioc, element);
9382 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9383 if (!ioc->is_warpdrive)
9384 _scsih_sas_pd_add(ioc, element);
9386 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9387 if (!ioc->is_warpdrive)
9388 _scsih_sas_pd_delete(ioc, element);
9395 * _scsih_sas_ir_volume_event - IR volume event
9396 * @ioc: per adapter object
9397 * @fw_event: The fw_event_work object
9401 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9402 struct fw_event_work *fw_event)
9405 unsigned long flags;
9406 struct _raid_device *raid_device;
9410 Mpi2EventDataIrVolume_t *event_data =
9411 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
9413 if (ioc->shost_recovery)
9416 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9419 handle = le16_to_cpu(event_data->VolDevHandle);
9420 state = le32_to_cpu(event_data->NewValue);
9421 if (!ioc->hide_ir_msg)
9423 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9425 le32_to_cpu(event_data->PreviousValue),
9428 case MPI2_RAID_VOL_STATE_MISSING:
9429 case MPI2_RAID_VOL_STATE_FAILED:
9430 _scsih_sas_volume_delete(ioc, handle);
9433 case MPI2_RAID_VOL_STATE_ONLINE:
9434 case MPI2_RAID_VOL_STATE_DEGRADED:
9435 case MPI2_RAID_VOL_STATE_OPTIMAL:
9437 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9438 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9439 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9444 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9446 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9447 __FILE__, __LINE__, __func__);
9451 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9453 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9454 __FILE__, __LINE__, __func__);
9458 raid_device->id = ioc->sas_id++;
9459 raid_device->channel = RAID_CHANNEL;
9460 raid_device->handle = handle;
9461 raid_device->wwid = wwid;
9462 _scsih_raid_device_add(ioc, raid_device);
9463 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9464 raid_device->id, 0);
9466 _scsih_raid_device_remove(ioc, raid_device);
9469 case MPI2_RAID_VOL_STATE_INITIALIZING:
9476 * _scsih_sas_ir_physical_disk_event - PD event
9477 * @ioc: per adapter object
9478 * @fw_event: The fw_event_work object
9482 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9483 struct fw_event_work *fw_event)
9485 u16 handle, parent_handle;
9487 struct _sas_device *sas_device;
9488 Mpi2ConfigReply_t mpi_reply;
9489 Mpi2SasDevicePage0_t sas_device_pg0;
9491 Mpi2EventDataIrPhysicalDisk_t *event_data =
9492 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9495 if (ioc->shost_recovery)
9498 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9501 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9502 state = le32_to_cpu(event_data->NewValue);
9504 if (!ioc->hide_ir_msg)
9506 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9508 le32_to_cpu(event_data->PreviousValue),
9512 case MPI2_RAID_PD_STATE_ONLINE:
9513 case MPI2_RAID_PD_STATE_DEGRADED:
9514 case MPI2_RAID_PD_STATE_REBUILDING:
9515 case MPI2_RAID_PD_STATE_OPTIMAL:
9516 case MPI2_RAID_PD_STATE_HOT_SPARE:
9518 if (!ioc->is_warpdrive)
9519 set_bit(handle, ioc->pd_handles);
9521 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9523 sas_device_put(sas_device);
9527 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9528 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9530 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9531 __FILE__, __LINE__, __func__);
9535 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9536 MPI2_IOCSTATUS_MASK;
9537 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9538 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9539 __FILE__, __LINE__, __func__);
9543 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9544 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9545 mpt3sas_transport_update_links(ioc, sas_address, handle,
9546 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9547 mpt3sas_get_port_by_id(ioc,
9548 sas_device_pg0.PhysicalPort, 0));
9550 _scsih_add_device(ioc, handle, 0, 1);
9554 case MPI2_RAID_PD_STATE_OFFLINE:
9555 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9556 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9563 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9564 * @ioc: per adapter object
9565 * @event_data: event data payload
9569 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9570 Mpi2EventDataIrOperationStatus_t *event_data)
9572 char *reason_str = NULL;
9574 switch (event_data->RAIDOperation) {
9575 case MPI2_EVENT_IR_RAIDOP_RESYNC:
9576 reason_str = "resync";
9578 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9579 reason_str = "online capacity expansion";
9581 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9582 reason_str = "consistency check";
9584 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9585 reason_str = "background init";
9587 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9588 reason_str = "make data consistent";
9595 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9597 le16_to_cpu(event_data->VolDevHandle),
9598 event_data->PercentComplete);
9602 * _scsih_sas_ir_operation_status_event - handle RAID operation events
9603 * @ioc: per adapter object
9604 * @fw_event: The fw_event_work object
9608 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9609 struct fw_event_work *fw_event)
9611 Mpi2EventDataIrOperationStatus_t *event_data =
9612 (Mpi2EventDataIrOperationStatus_t *)
9613 fw_event->event_data;
9614 static struct _raid_device *raid_device;
9615 unsigned long flags;
9618 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9619 (!ioc->hide_ir_msg))
9620 _scsih_sas_ir_operation_status_event_debug(ioc,
9623 /* code added for raid transport support */
9624 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9626 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9627 handle = le16_to_cpu(event_data->VolDevHandle);
9628 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9630 raid_device->percent_complete =
9631 event_data->PercentComplete;
9632 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9637 * _scsih_prep_device_scan - initialize parameters prior to device scan
9638 * @ioc: per adapter object
9640 * Set the deleted flag prior to device scan. If the device is found during
9641 * the scan, then we clear the deleted flag.
9644 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9646 struct MPT3SAS_DEVICE *sas_device_priv_data;
9647 struct scsi_device *sdev;
9649 shost_for_each_device(sdev, ioc->shost) {
9650 sas_device_priv_data = sdev->hostdata;
9651 if (sas_device_priv_data && sas_device_priv_data->sas_target)
9652 sas_device_priv_data->sas_target->deleted = 1;
9657 * _scsih_update_device_qdepth - Update QD during Reset.
9658 * @ioc: per adapter object
9662 _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
9664 struct MPT3SAS_DEVICE *sas_device_priv_data;
9665 struct MPT3SAS_TARGET *sas_target_priv_data;
9666 struct _sas_device *sas_device;
9667 struct scsi_device *sdev;
9670 ioc_info(ioc, "Update devices with firmware reported queue depth\n");
9671 shost_for_each_device(sdev, ioc->shost) {
9672 sas_device_priv_data = sdev->hostdata;
9673 if (sas_device_priv_data && sas_device_priv_data->sas_target) {
9674 sas_target_priv_data = sas_device_priv_data->sas_target;
9675 sas_device = sas_device_priv_data->sas_target->sas_dev;
9676 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
9677 qdepth = ioc->max_nvme_qd;
9678 else if (sas_device &&
9679 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
9680 qdepth = (sas_device->port_type > 1) ?
9681 ioc->max_wideport_qd : ioc->max_narrowport_qd;
9682 else if (sas_device &&
9683 sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
9684 qdepth = ioc->max_sata_qd;
9687 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
9693 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9694 * @ioc: per adapter object
9695 * @sas_device_pg0: SAS Device page 0
9697 * After host reset, find out whether devices are still responding.
9698 * Used in _scsih_remove_unresponsive_sas_devices.
9701 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9702 Mpi2SasDevicePage0_t *sas_device_pg0)
9704 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9705 struct scsi_target *starget;
9706 struct _sas_device *sas_device = NULL;
9707 struct _enclosure_node *enclosure_dev = NULL;
9708 unsigned long flags;
9709 struct hba_port *port = mpt3sas_get_port_by_id(
9710 ioc, sas_device_pg0->PhysicalPort, 0);
9712 if (sas_device_pg0->EnclosureHandle) {
9714 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9715 le16_to_cpu(sas_device_pg0->EnclosureHandle));
9716 if (enclosure_dev == NULL)
9717 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9718 sas_device_pg0->EnclosureHandle);
9720 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9721 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9722 if (sas_device->sas_address != le64_to_cpu(
9723 sas_device_pg0->SASAddress))
9725 if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9727 if (sas_device->port != port)
9729 sas_device->responding = 1;
9730 starget = sas_device->starget;
9731 if (starget && starget->hostdata) {
9732 sas_target_priv_data = starget->hostdata;
9733 sas_target_priv_data->tm_busy = 0;
9734 sas_target_priv_data->deleted = 0;
9736 sas_target_priv_data = NULL;
9738 starget_printk(KERN_INFO, starget,
9739 "handle(0x%04x), sas_addr(0x%016llx)\n",
9740 le16_to_cpu(sas_device_pg0->DevHandle),
9741 (unsigned long long)
9742 sas_device->sas_address);
9744 if (sas_device->enclosure_handle != 0)
9745 starget_printk(KERN_INFO, starget,
9746 "enclosure logical id(0x%016llx), slot(%d)\n",
9747 (unsigned long long)
9748 sas_device->enclosure_logical_id,
9751 if (le16_to_cpu(sas_device_pg0->Flags) &
9752 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9753 sas_device->enclosure_level =
9754 sas_device_pg0->EnclosureLevel;
9755 memcpy(&sas_device->connector_name[0],
9756 &sas_device_pg0->ConnectorName[0], 4);
9758 sas_device->enclosure_level = 0;
9759 sas_device->connector_name[0] = '\0';
9762 sas_device->enclosure_handle =
9763 le16_to_cpu(sas_device_pg0->EnclosureHandle);
9764 sas_device->is_chassis_slot_valid = 0;
9765 if (enclosure_dev) {
9766 sas_device->enclosure_logical_id = le64_to_cpu(
9767 enclosure_dev->pg0.EnclosureLogicalID);
9768 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9769 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9770 sas_device->is_chassis_slot_valid = 1;
9771 sas_device->chassis_slot =
9772 enclosure_dev->pg0.ChassisSlot;
9776 if (sas_device->handle == le16_to_cpu(
9777 sas_device_pg0->DevHandle))
9779 pr_info("\thandle changed from(0x%04x)!!!\n",
9780 sas_device->handle);
9781 sas_device->handle = le16_to_cpu(
9782 sas_device_pg0->DevHandle);
9783 if (sas_target_priv_data)
9784 sas_target_priv_data->handle =
9785 le16_to_cpu(sas_device_pg0->DevHandle);
9789 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9793 * _scsih_create_enclosure_list_after_reset - Free Existing list,
9794 * And create enclosure list by scanning all Enclosure Page(0)s
9795 * @ioc: per adapter object
9798 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9800 struct _enclosure_node *enclosure_dev;
9801 Mpi2ConfigReply_t mpi_reply;
9802 u16 enclosure_handle;
9805 /* Free existing enclosure list */
9806 mpt3sas_free_enclosure_list(ioc);
9808 /* Re constructing enclosure list after reset*/
9809 enclosure_handle = 0xFFFF;
9812 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9813 if (!enclosure_dev) {
9814 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9815 __FILE__, __LINE__, __func__);
9818 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9819 &enclosure_dev->pg0,
9820 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9823 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9824 MPI2_IOCSTATUS_MASK)) {
9825 kfree(enclosure_dev);
9828 list_add_tail(&enclosure_dev->list,
9829 &ioc->enclosure_list);
9831 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9836 * _scsih_search_responding_sas_devices -
9837 * @ioc: per adapter object
9839 * After host reset, find out whether devices are still responding.
9843 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9845 Mpi2SasDevicePage0_t sas_device_pg0;
9846 Mpi2ConfigReply_t mpi_reply;
9851 ioc_info(ioc, "search for end-devices: start\n");
9853 if (list_empty(&ioc->sas_device_list))
9857 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9858 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9860 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9861 MPI2_IOCSTATUS_MASK;
9862 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9864 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9865 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9866 if (!(_scsih_is_end_device(device_info)))
9868 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9872 ioc_info(ioc, "search for end-devices: complete\n");
9876 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9877 * @ioc: per adapter object
9878 * @pcie_device_pg0: PCIe Device page 0
9880 * After host reset, find out whether devices are still responding.
9881 * Used in _scsih_remove_unresponding_devices.
9884 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9885 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9887 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9888 struct scsi_target *starget;
9889 struct _pcie_device *pcie_device;
9890 unsigned long flags;
9892 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9893 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9894 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9895 && (pcie_device->slot == le16_to_cpu(
9896 pcie_device_pg0->Slot))) {
9897 pcie_device->access_status =
9898 pcie_device_pg0->AccessStatus;
9899 pcie_device->responding = 1;
9900 starget = pcie_device->starget;
9901 if (starget && starget->hostdata) {
9902 sas_target_priv_data = starget->hostdata;
9903 sas_target_priv_data->tm_busy = 0;
9904 sas_target_priv_data->deleted = 0;
9906 sas_target_priv_data = NULL;
9908 starget_printk(KERN_INFO, starget,
9909 "handle(0x%04x), wwid(0x%016llx) ",
9910 pcie_device->handle,
9911 (unsigned long long)pcie_device->wwid);
9912 if (pcie_device->enclosure_handle != 0)
9913 starget_printk(KERN_INFO, starget,
9914 "enclosure logical id(0x%016llx), "
9916 (unsigned long long)
9917 pcie_device->enclosure_logical_id,
9921 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9922 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9923 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9924 pcie_device->enclosure_level =
9925 pcie_device_pg0->EnclosureLevel;
9926 memcpy(&pcie_device->connector_name[0],
9927 &pcie_device_pg0->ConnectorName[0], 4);
9929 pcie_device->enclosure_level = 0;
9930 pcie_device->connector_name[0] = '\0';
9933 if (pcie_device->handle == le16_to_cpu(
9934 pcie_device_pg0->DevHandle))
9936 pr_info("\thandle changed from(0x%04x)!!!\n",
9937 pcie_device->handle);
9938 pcie_device->handle = le16_to_cpu(
9939 pcie_device_pg0->DevHandle);
9940 if (sas_target_priv_data)
9941 sas_target_priv_data->handle =
9942 le16_to_cpu(pcie_device_pg0->DevHandle);
9948 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9952 * _scsih_search_responding_pcie_devices -
9953 * @ioc: per adapter object
9955 * After host reset, find out whether devices are still responding.
9959 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9961 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9962 Mpi2ConfigReply_t mpi_reply;
9967 ioc_info(ioc, "search for end-devices: start\n");
9969 if (list_empty(&ioc->pcie_device_list))
9973 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9974 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9976 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9977 MPI2_IOCSTATUS_MASK;
9978 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9979 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9980 __func__, ioc_status,
9981 le32_to_cpu(mpi_reply.IOCLogInfo));
9984 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9985 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9986 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9988 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9991 ioc_info(ioc, "search for PCIe end-devices: complete\n");
9995 * _scsih_mark_responding_raid_device - mark a raid_device as responding
9996 * @ioc: per adapter object
9997 * @wwid: world wide identifier for raid volume
9998 * @handle: device handle
10000 * After host reset, find out whether devices are still responding.
10001 * Used in _scsih_remove_unresponsive_raid_devices.
10004 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
10007 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
10008 struct scsi_target *starget;
10009 struct _raid_device *raid_device;
10010 unsigned long flags;
10012 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10013 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
10014 if (raid_device->wwid == wwid && raid_device->starget) {
10015 starget = raid_device->starget;
10016 if (starget && starget->hostdata) {
10017 sas_target_priv_data = starget->hostdata;
10018 sas_target_priv_data->deleted = 0;
10020 sas_target_priv_data = NULL;
10021 raid_device->responding = 1;
10022 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10023 starget_printk(KERN_INFO, raid_device->starget,
10024 "handle(0x%04x), wwid(0x%016llx)\n", handle,
10025 (unsigned long long)raid_device->wwid);
10028 * WARPDRIVE: The handles of the PDs might have changed
10029 * across the host reset so re-initialize the
10030 * required data for Direct IO
10032 mpt3sas_init_warpdrive_properties(ioc, raid_device);
10033 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10034 if (raid_device->handle == handle) {
10035 spin_unlock_irqrestore(&ioc->raid_device_lock,
10039 pr_info("\thandle changed from(0x%04x)!!!\n",
10040 raid_device->handle);
10041 raid_device->handle = handle;
10042 if (sas_target_priv_data)
10043 sas_target_priv_data->handle = handle;
10044 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10048 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10052 * _scsih_search_responding_raid_devices -
10053 * @ioc: per adapter object
10055 * After host reset, find out whether devices are still responding.
10059 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
10061 Mpi2RaidVolPage1_t volume_pg1;
10062 Mpi2RaidVolPage0_t volume_pg0;
10063 Mpi2RaidPhysDiskPage0_t pd_pg0;
10064 Mpi2ConfigReply_t mpi_reply;
10069 if (!ioc->ir_firmware)
10072 ioc_info(ioc, "search for raid volumes: start\n");
10074 if (list_empty(&ioc->raid_device_list))
10078 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10079 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10080 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10081 MPI2_IOCSTATUS_MASK;
10082 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10084 handle = le16_to_cpu(volume_pg1.DevHandle);
10086 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10087 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10088 sizeof(Mpi2RaidVolPage0_t)))
10091 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10092 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10093 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
10094 _scsih_mark_responding_raid_device(ioc,
10095 le64_to_cpu(volume_pg1.WWID), handle);
10098 /* refresh the pd_handles */
10099 if (!ioc->is_warpdrive) {
10100 phys_disk_num = 0xFF;
10101 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
10102 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10103 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10105 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10106 MPI2_IOCSTATUS_MASK;
10107 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10109 phys_disk_num = pd_pg0.PhysDiskNum;
10110 handle = le16_to_cpu(pd_pg0.DevHandle);
10111 set_bit(handle, ioc->pd_handles);
10115 ioc_info(ioc, "search for responding raid volumes: complete\n");
10119 * _scsih_mark_responding_expander - mark a expander as responding
10120 * @ioc: per adapter object
10121 * @expander_pg0:SAS Expander Config Page0
10123 * After host reset, find out whether devices are still responding.
10124 * Used in _scsih_remove_unresponsive_expanders.
10127 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10128 Mpi2ExpanderPage0_t *expander_pg0)
10130 struct _sas_node *sas_expander = NULL;
10131 unsigned long flags;
10133 struct _enclosure_node *enclosure_dev = NULL;
10134 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10135 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10136 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10137 struct hba_port *port = mpt3sas_get_port_by_id(
10138 ioc, expander_pg0->PhysicalPort, 0);
10140 if (enclosure_handle)
10142 mpt3sas_scsih_enclosure_find_by_handle(ioc,
10145 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10146 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10147 if (sas_expander->sas_address != sas_address)
10149 if (sas_expander->port != port)
10151 sas_expander->responding = 1;
10153 if (enclosure_dev) {
10154 sas_expander->enclosure_logical_id =
10155 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10156 sas_expander->enclosure_handle =
10157 le16_to_cpu(expander_pg0->EnclosureHandle);
10160 if (sas_expander->handle == handle)
10162 pr_info("\texpander(0x%016llx): handle changed" \
10163 " from(0x%04x) to (0x%04x)!!!\n",
10164 (unsigned long long)sas_expander->sas_address,
10165 sas_expander->handle, handle);
10166 sas_expander->handle = handle;
10167 for (i = 0 ; i < sas_expander->num_phys ; i++)
10168 sas_expander->phy[i].handle = handle;
10172 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10176 * _scsih_search_responding_expanders -
10177 * @ioc: per adapter object
10179 * After host reset, find out whether devices are still responding.
10183 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10185 Mpi2ExpanderPage0_t expander_pg0;
10186 Mpi2ConfigReply_t mpi_reply;
10192 ioc_info(ioc, "search for expanders: start\n");
10194 if (list_empty(&ioc->sas_expander_list))
10198 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10199 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10201 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10202 MPI2_IOCSTATUS_MASK;
10203 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10206 handle = le16_to_cpu(expander_pg0.DevHandle);
10207 sas_address = le64_to_cpu(expander_pg0.SASAddress);
10208 port = expander_pg0.PhysicalPort;
10210 "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10211 handle, (unsigned long long)sas_address,
10212 (ioc->multipath_on_hba ?
10213 port : MULTIPATH_DISABLED_PORT_ID));
10214 _scsih_mark_responding_expander(ioc, &expander_pg0);
10218 ioc_info(ioc, "search for expanders: complete\n");
10222 * _scsih_remove_unresponding_devices - removing unresponding devices
10223 * @ioc: per adapter object
10226 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10228 struct _sas_device *sas_device, *sas_device_next;
10229 struct _sas_node *sas_expander, *sas_expander_next;
10230 struct _raid_device *raid_device, *raid_device_next;
10231 struct _pcie_device *pcie_device, *pcie_device_next;
10232 struct list_head tmp_list;
10233 unsigned long flags;
10236 ioc_info(ioc, "removing unresponding devices: start\n");
10238 /* removing unresponding end devices */
10239 ioc_info(ioc, "removing unresponding devices: end-devices\n");
10241 * Iterate, pulling off devices marked as non-responding. We become the
10242 * owner for the reference the list had on any object we prune.
10244 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10247 * Clean up the sas_device_init_list list as
10248 * driver goes for fresh scan as part of diag reset.
10250 list_for_each_entry_safe(sas_device, sas_device_next,
10251 &ioc->sas_device_init_list, list) {
10252 list_del_init(&sas_device->list);
10253 sas_device_put(sas_device);
10256 list_for_each_entry_safe(sas_device, sas_device_next,
10257 &ioc->sas_device_list, list) {
10258 if (!sas_device->responding)
10259 list_move_tail(&sas_device->list, &head);
10261 sas_device->responding = 0;
10263 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10266 * Now, uninitialize and remove the unresponding devices we pruned.
10268 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10269 _scsih_remove_device(ioc, sas_device);
10270 list_del_init(&sas_device->list);
10271 sas_device_put(sas_device);
10274 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10275 INIT_LIST_HEAD(&head);
10276 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10278 * Clean up the pcie_device_init_list list as
10279 * driver goes for fresh scan as part of diag reset.
10281 list_for_each_entry_safe(pcie_device, pcie_device_next,
10282 &ioc->pcie_device_init_list, list) {
10283 list_del_init(&pcie_device->list);
10284 pcie_device_put(pcie_device);
10287 list_for_each_entry_safe(pcie_device, pcie_device_next,
10288 &ioc->pcie_device_list, list) {
10289 if (!pcie_device->responding)
10290 list_move_tail(&pcie_device->list, &head);
10292 pcie_device->responding = 0;
10294 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10296 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10297 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10298 list_del_init(&pcie_device->list);
10299 pcie_device_put(pcie_device);
10302 /* removing unresponding volumes */
10303 if (ioc->ir_firmware) {
10304 ioc_info(ioc, "removing unresponding devices: volumes\n");
10305 list_for_each_entry_safe(raid_device, raid_device_next,
10306 &ioc->raid_device_list, list) {
10307 if (!raid_device->responding)
10308 _scsih_sas_volume_delete(ioc,
10309 raid_device->handle);
10311 raid_device->responding = 0;
10315 /* removing unresponding expanders */
10316 ioc_info(ioc, "removing unresponding devices: expanders\n");
10317 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10318 INIT_LIST_HEAD(&tmp_list);
10319 list_for_each_entry_safe(sas_expander, sas_expander_next,
10320 &ioc->sas_expander_list, list) {
10321 if (!sas_expander->responding)
10322 list_move_tail(&sas_expander->list, &tmp_list);
10324 sas_expander->responding = 0;
10326 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10327 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10329 _scsih_expander_node_remove(ioc, sas_expander);
10332 ioc_info(ioc, "removing unresponding devices: complete\n");
10334 /* unblock devices */
10335 _scsih_ublock_io_all_device(ioc);
10339 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10340 struct _sas_node *sas_expander, u16 handle)
10342 Mpi2ExpanderPage1_t expander_pg1;
10343 Mpi2ConfigReply_t mpi_reply;
10346 for (i = 0 ; i < sas_expander->num_phys ; i++) {
10347 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10348 &expander_pg1, i, handle))) {
10349 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10350 __FILE__, __LINE__, __func__);
10354 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10355 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10356 expander_pg1.NegotiatedLinkRate >> 4,
10357 sas_expander->port);
10362 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10363 * @ioc: per adapter object
10366 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10368 Mpi2ExpanderPage0_t expander_pg0;
10369 Mpi2SasDevicePage0_t sas_device_pg0;
10370 Mpi26PCIeDevicePage0_t pcie_device_pg0;
10371 Mpi2RaidVolPage1_t volume_pg1;
10372 Mpi2RaidVolPage0_t volume_pg0;
10373 Mpi2RaidPhysDiskPage0_t pd_pg0;
10374 Mpi2EventIrConfigElement_t element;
10375 Mpi2ConfigReply_t mpi_reply;
10376 u8 phys_disk_num, port_id;
10378 u16 handle, parent_handle;
10380 struct _sas_device *sas_device;
10381 struct _pcie_device *pcie_device;
10382 struct _sas_node *expander_device;
10383 static struct _raid_device *raid_device;
10385 unsigned long flags;
10387 ioc_info(ioc, "scan devices: start\n");
10389 _scsih_sas_host_refresh(ioc);
10391 ioc_info(ioc, "\tscan devices: expanders start\n");
10395 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10396 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10397 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10398 MPI2_IOCSTATUS_MASK;
10399 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10400 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10401 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10404 handle = le16_to_cpu(expander_pg0.DevHandle);
10405 spin_lock_irqsave(&ioc->sas_node_lock, flags);
10406 port_id = expander_pg0.PhysicalPort;
10407 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10408 ioc, le64_to_cpu(expander_pg0.SASAddress),
10409 mpt3sas_get_port_by_id(ioc, port_id, 0));
10410 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10411 if (expander_device)
10412 _scsih_refresh_expander_links(ioc, expander_device,
10415 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10417 (u64)le64_to_cpu(expander_pg0.SASAddress));
10418 _scsih_expander_add(ioc, handle);
10419 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10421 (u64)le64_to_cpu(expander_pg0.SASAddress));
10425 ioc_info(ioc, "\tscan devices: expanders complete\n");
10427 if (!ioc->ir_firmware)
10430 ioc_info(ioc, "\tscan devices: phys disk start\n");
10433 phys_disk_num = 0xFF;
10434 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10435 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10437 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10438 MPI2_IOCSTATUS_MASK;
10439 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10440 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10441 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10444 phys_disk_num = pd_pg0.PhysDiskNum;
10445 handle = le16_to_cpu(pd_pg0.DevHandle);
10446 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10448 sas_device_put(sas_device);
10451 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10452 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10455 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10456 MPI2_IOCSTATUS_MASK;
10457 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10458 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10459 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10462 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10463 if (!_scsih_get_sas_address(ioc, parent_handle,
10465 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10467 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10468 port_id = sas_device_pg0.PhysicalPort;
10469 mpt3sas_transport_update_links(ioc, sas_address,
10470 handle, sas_device_pg0.PhyNum,
10471 MPI2_SAS_NEG_LINK_RATE_1_5,
10472 mpt3sas_get_port_by_id(ioc, port_id, 0));
10473 set_bit(handle, ioc->pd_handles);
10475 /* This will retry adding the end device.
10476 * _scsih_add_device() will decide on retries and
10477 * return "1" when it should be retried
10479 while (_scsih_add_device(ioc, handle, retry_count++,
10483 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10485 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10489 ioc_info(ioc, "\tscan devices: phys disk complete\n");
10491 ioc_info(ioc, "\tscan devices: volumes start\n");
10495 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10496 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10497 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10498 MPI2_IOCSTATUS_MASK;
10499 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10500 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10501 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10504 handle = le16_to_cpu(volume_pg1.DevHandle);
10505 spin_lock_irqsave(&ioc->raid_device_lock, flags);
10506 raid_device = _scsih_raid_device_find_by_wwid(ioc,
10507 le64_to_cpu(volume_pg1.WWID));
10508 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10511 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10512 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10513 sizeof(Mpi2RaidVolPage0_t)))
10515 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10516 MPI2_IOCSTATUS_MASK;
10517 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10518 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10519 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10522 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10523 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10524 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10525 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10526 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10527 element.VolDevHandle = volume_pg1.DevHandle;
10528 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10529 volume_pg1.DevHandle);
10530 _scsih_sas_volume_add(ioc, &element);
10531 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10532 volume_pg1.DevHandle);
10536 ioc_info(ioc, "\tscan devices: volumes complete\n");
10540 ioc_info(ioc, "\tscan devices: end devices start\n");
10544 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10545 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10547 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10548 MPI2_IOCSTATUS_MASK;
10549 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10550 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10551 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10554 handle = le16_to_cpu(sas_device_pg0.DevHandle);
10555 if (!(_scsih_is_end_device(
10556 le32_to_cpu(sas_device_pg0.DeviceInfo))))
10558 port_id = sas_device_pg0.PhysicalPort;
10559 sas_device = mpt3sas_get_sdev_by_addr(ioc,
10560 le64_to_cpu(sas_device_pg0.SASAddress),
10561 mpt3sas_get_port_by_id(ioc, port_id, 0));
10563 sas_device_put(sas_device);
10566 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10567 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10568 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10570 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10571 mpt3sas_transport_update_links(ioc, sas_address, handle,
10572 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10573 mpt3sas_get_port_by_id(ioc, port_id, 0));
10575 /* This will retry adding the end device.
10576 * _scsih_add_device() will decide on retries and
10577 * return "1" when it should be retried
10579 while (_scsih_add_device(ioc, handle, retry_count++,
10583 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10585 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10588 ioc_info(ioc, "\tscan devices: end devices complete\n");
10589 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10593 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10594 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10596 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10597 & MPI2_IOCSTATUS_MASK;
10598 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10599 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10600 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10603 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10604 if (!(_scsih_is_nvme_pciescsi_device(
10605 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10607 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10608 le64_to_cpu(pcie_device_pg0.WWID));
10610 pcie_device_put(pcie_device);
10614 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10615 _scsih_pcie_add_device(ioc, handle);
10617 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10618 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10621 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10622 ioc_info(ioc, "scan devices: complete\n");
10626 * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
10627 * @ioc: per adapter object
10629 * The handler for doing any required cleanup or initialization.
10631 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10633 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10637 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10639 * @ioc: per adapter object
10641 * The handler for doing any required cleanup or initialization.
10644 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10647 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10648 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10649 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10650 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10651 complete(&ioc->scsih_cmds.done);
10653 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10654 ioc->tm_cmds.status |= MPT3_CMD_RESET;
10655 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10656 complete(&ioc->tm_cmds.done);
10659 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10660 memset(ioc->device_remove_in_progress, 0,
10661 ioc->device_remove_in_progress_sz);
10662 _scsih_fw_event_cleanup_queue(ioc);
10663 _scsih_flush_running_cmds(ioc);
10667 * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
10668 * @ioc: per adapter object
10670 * The handler for doing any required cleanup or initialization.
10673 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10675 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10676 if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
10677 if (ioc->multipath_on_hba) {
10678 _scsih_sas_port_refresh(ioc);
10679 _scsih_update_vphys_after_reset(ioc);
10681 _scsih_prep_device_scan(ioc);
10682 _scsih_create_enclosure_list_after_reset(ioc);
10683 _scsih_search_responding_sas_devices(ioc);
10684 _scsih_search_responding_pcie_devices(ioc);
10685 _scsih_search_responding_raid_devices(ioc);
10686 _scsih_search_responding_expanders(ioc);
10687 _scsih_error_recovery_delete_devices(ioc);
10692 * _mpt3sas_fw_work - delayed task for processing firmware events
10693 * @ioc: per adapter object
10694 * @fw_event: The fw_event_work object
10698 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10700 ioc->current_event = fw_event;
10701 _scsih_fw_event_del_from_list(ioc, fw_event);
10703 /* the queue is being flushed so ignore this event */
10704 if (ioc->remove_host || ioc->pci_error_recovery) {
10705 fw_event_work_put(fw_event);
10706 ioc->current_event = NULL;
10710 switch (fw_event->event) {
10711 case MPT3SAS_PROCESS_TRIGGER_DIAG:
10712 mpt3sas_process_trigger_data(ioc,
10713 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10714 fw_event->event_data);
10716 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10717 while (scsi_host_in_recovery(ioc->shost) ||
10718 ioc->shost_recovery) {
10720 * If we're unloading or cancelling the work, bail.
10721 * Otherwise, this can become an infinite loop.
10723 if (ioc->remove_host || ioc->fw_events_cleanup)
10727 _scsih_remove_unresponding_devices(ioc);
10728 _scsih_del_dirty_vphy(ioc);
10729 _scsih_del_dirty_port_entries(ioc);
10730 if (ioc->is_gen35_ioc)
10731 _scsih_update_device_qdepth(ioc);
10732 _scsih_scan_for_devices_after_reset(ioc);
10734 * If diag reset has occurred during the driver load
10735 * then driver has to complete the driver load operation
10736 * by executing the following items:
10737 *- Register the devices from sas_device_init_list to SML
10738 *- clear is_driver_loading flag,
10739 *- start the watchdog thread.
10740 * In happy driver load path, above things are taken care of when
10741 * driver executes scsih_scan_finished().
10743 if (ioc->is_driver_loading)
10744 _scsih_complete_devices_scanning(ioc);
10745 _scsih_set_nvme_max_shutdown_latency(ioc);
10747 case MPT3SAS_PORT_ENABLE_COMPLETE:
10748 ioc->start_scan = 0;
10749 if (missing_delay[0] != -1 && missing_delay[1] != -1)
10750 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10753 ioc_info(ioc, "port enable: complete from worker thread\n"));
10755 case MPT3SAS_TURN_ON_PFA_LED:
10756 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10758 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10759 _scsih_sas_topology_change_event(ioc, fw_event);
10761 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10762 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10763 _scsih_sas_device_status_change_event_debug(ioc,
10764 (Mpi2EventDataSasDeviceStatusChange_t *)
10765 fw_event->event_data);
10767 case MPI2_EVENT_SAS_DISCOVERY:
10768 _scsih_sas_discovery_event(ioc, fw_event);
10770 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10771 _scsih_sas_device_discovery_error_event(ioc, fw_event);
10773 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10774 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
10776 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10777 _scsih_sas_enclosure_dev_status_change_event(ioc,
10780 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10781 _scsih_sas_ir_config_change_event(ioc, fw_event);
10783 case MPI2_EVENT_IR_VOLUME:
10784 _scsih_sas_ir_volume_event(ioc, fw_event);
10786 case MPI2_EVENT_IR_PHYSICAL_DISK:
10787 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
10789 case MPI2_EVENT_IR_OPERATION_STATUS:
10790 _scsih_sas_ir_operation_status_event(ioc, fw_event);
10792 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10793 _scsih_pcie_device_status_change_event(ioc, fw_event);
10795 case MPI2_EVENT_PCIE_ENUMERATION:
10796 _scsih_pcie_enumeration_event(ioc, fw_event);
10798 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10799 _scsih_pcie_topology_change_event(ioc, fw_event);
10800 ioc->current_event = NULL;
10804 fw_event_work_put(fw_event);
10805 ioc->current_event = NULL;
10809 * _firmware_event_work
10810 * @work: The fw_event_work object
10813 * wrappers for the work thread handling firmware events
10817 _firmware_event_work(struct work_struct *work)
10819 struct fw_event_work *fw_event = container_of(work,
10820 struct fw_event_work, work);
10822 _mpt3sas_fw_work(fw_event->ioc, fw_event);
10826 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10827 * @ioc: per adapter object
10828 * @msix_index: MSIX table index supplied by the OS
10829 * @reply: reply message frame(lower 32bit addr)
10830 * Context: interrupt.
10832 * This function merely adds a new work task into ioc->firmware_event_thread.
10833 * The tasks are worked from _firmware_event_work in user context.
10835 * Return: 1 meaning mf should be freed from _base_interrupt
10836 * 0 means the mf is freed from this function.
10839 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10842 struct fw_event_work *fw_event;
10843 Mpi2EventNotificationReply_t *mpi_reply;
10846 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10848 /* events turned off due to host reset */
10849 if (ioc->pci_error_recovery)
10852 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10854 if (unlikely(!mpi_reply)) {
10855 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10856 __FILE__, __LINE__, __func__);
10860 event = le16_to_cpu(mpi_reply->Event);
10862 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10863 mpt3sas_trigger_event(ioc, event, 0);
10867 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10869 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10870 (Mpi2EventDataSasBroadcastPrimitive_t *)
10871 mpi_reply->EventData;
10873 if (baen_data->Primitive !=
10874 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10877 if (ioc->broadcast_aen_busy) {
10878 ioc->broadcast_aen_pending++;
10881 ioc->broadcast_aen_busy = 1;
10885 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10886 _scsih_check_topo_delete_events(ioc,
10887 (Mpi2EventDataSasTopologyChangeList_t *)
10888 mpi_reply->EventData);
10890 * No need to add the topology change list
10891 * event to fw event work queue when
10892 * diag reset is going on. Since during diag
10893 * reset driver scan the devices by reading
10894 * sas device page0's not by processing the
10897 if (ioc->shost_recovery)
10900 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10901 _scsih_check_pcie_topo_remove_events(ioc,
10902 (Mpi26EventDataPCIeTopologyChangeList_t *)
10903 mpi_reply->EventData);
10904 if (ioc->shost_recovery)
10907 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10908 _scsih_check_ir_config_unhide_events(ioc,
10909 (Mpi2EventDataIrConfigChangeList_t *)
10910 mpi_reply->EventData);
10912 case MPI2_EVENT_IR_VOLUME:
10913 _scsih_check_volume_delete_events(ioc,
10914 (Mpi2EventDataIrVolume_t *)
10915 mpi_reply->EventData);
10917 case MPI2_EVENT_LOG_ENTRY_ADDED:
10919 Mpi2EventDataLogEntryAdded_t *log_entry;
10922 if (!ioc->is_warpdrive)
10925 log_entry = (Mpi2EventDataLogEntryAdded_t *)
10926 mpi_reply->EventData;
10927 log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
10929 if (le16_to_cpu(log_entry->LogEntryQualifier)
10930 != MPT2_WARPDRIVE_LOGENTRY)
10933 switch (log_code) {
10934 case MPT2_WARPDRIVE_LC_SSDT:
10935 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10937 case MPT2_WARPDRIVE_LC_SSDLW:
10938 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10940 case MPT2_WARPDRIVE_LC_SSDLF:
10941 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10943 case MPT2_WARPDRIVE_LC_BRMF:
10944 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10950 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10951 _scsih_sas_device_status_change_event(ioc,
10952 (Mpi2EventDataSasDeviceStatusChange_t *)
10953 mpi_reply->EventData);
10955 case MPI2_EVENT_IR_OPERATION_STATUS:
10956 case MPI2_EVENT_SAS_DISCOVERY:
10957 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10958 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10959 case MPI2_EVENT_IR_PHYSICAL_DISK:
10960 case MPI2_EVENT_PCIE_ENUMERATION:
10961 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10964 case MPI2_EVENT_TEMP_THRESHOLD:
10965 _scsih_temp_threshold_events(ioc,
10966 (Mpi2EventDataTemperature_t *)
10967 mpi_reply->EventData);
10969 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10970 ActiveCableEventData =
10971 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10972 switch (ActiveCableEventData->ReasonCode) {
10973 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10974 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10975 ActiveCableEventData->ReceptacleID);
10976 pr_notice("cannot be powered and devices connected\n");
10977 pr_notice("to this active cable will not be seen\n");
10978 pr_notice("This active cable requires %d mW of power\n",
10980 ActiveCableEventData->ActiveCablePowerRequirement));
10983 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10984 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10985 ActiveCableEventData->ReceptacleID);
10987 "is not running at optimal speed(12 Gb/s rate)\n");
10993 default: /* ignore the rest */
10997 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
10998 fw_event = alloc_fw_event_work(sz);
11000 ioc_err(ioc, "failure at %s:%d/%s()!\n",
11001 __FILE__, __LINE__, __func__);
11005 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
11006 fw_event->ioc = ioc;
11007 fw_event->VF_ID = mpi_reply->VF_ID;
11008 fw_event->VP_ID = mpi_reply->VP_ID;
11009 fw_event->event = event;
11010 _scsih_fw_event_add(ioc, fw_event);
11011 fw_event_work_put(fw_event);
11016 * _scsih_expander_node_remove - removing expander device from list.
11017 * @ioc: per adapter object
11018 * @sas_expander: the sas_device object
11020 * Removing object and freeing associated memory from the
11021 * ioc->sas_expander_list.
11024 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
11025 struct _sas_node *sas_expander)
11027 struct _sas_port *mpt3sas_port, *next;
11028 unsigned long flags;
11031 /* remove sibling ports attached to this expander */
11032 list_for_each_entry_safe(mpt3sas_port, next,
11033 &sas_expander->sas_port_list, port_list) {
11034 if (ioc->shost_recovery)
11036 if (mpt3sas_port->remote_identify.device_type ==
11038 mpt3sas_device_remove_by_sas_address(ioc,
11039 mpt3sas_port->remote_identify.sas_address,
11040 mpt3sas_port->hba_port);
11041 else if (mpt3sas_port->remote_identify.device_type ==
11042 SAS_EDGE_EXPANDER_DEVICE ||
11043 mpt3sas_port->remote_identify.device_type ==
11044 SAS_FANOUT_EXPANDER_DEVICE)
11045 mpt3sas_expander_remove(ioc,
11046 mpt3sas_port->remote_identify.sas_address,
11047 mpt3sas_port->hba_port);
11050 port_id = sas_expander->port->port_id;
11052 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
11053 sas_expander->sas_address_parent, sas_expander->port);
11056 "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
11057 sas_expander->handle, (unsigned long long)
11058 sas_expander->sas_address,
11061 spin_lock_irqsave(&ioc->sas_node_lock, flags);
11062 list_del(&sas_expander->list);
11063 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11065 kfree(sas_expander->phy);
11066 kfree(sas_expander);
11070 * _scsih_nvme_shutdown - NVMe shutdown notification
11071 * @ioc: per adapter object
11073 * Sending IoUnitControl request with shutdown operation code to alert IOC that
11074 * the host system is shutting down so that IOC can issue NVMe shutdown to
11075 * NVMe drives attached to it.
11078 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
11080 Mpi26IoUnitControlRequest_t *mpi_request;
11081 Mpi26IoUnitControlReply_t *mpi_reply;
11084 /* are there any NVMe devices ? */
11085 if (list_empty(&ioc->pcie_device_list))
11088 mutex_lock(&ioc->scsih_cmds.mutex);
11090 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11091 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11095 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11097 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11100 "%s: failed obtaining a smid\n", __func__);
11101 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11105 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11106 ioc->scsih_cmds.smid = smid;
11107 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
11108 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
11109 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
11111 init_completion(&ioc->scsih_cmds.done);
11112 ioc->put_smid_default(ioc, smid);
11113 /* Wait for max_shutdown_latency seconds */
11115 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
11116 ioc->max_shutdown_latency);
11117 wait_for_completion_timeout(&ioc->scsih_cmds.done,
11118 ioc->max_shutdown_latency*HZ);
11120 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11121 ioc_err(ioc, "%s: timeout\n", __func__);
11125 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11126 mpi_reply = ioc->scsih_cmds.reply;
11127 ioc_info(ioc, "Io Unit Control shutdown (complete):"
11128 "ioc_status(0x%04x), loginfo(0x%08x)\n",
11129 le16_to_cpu(mpi_reply->IOCStatus),
11130 le32_to_cpu(mpi_reply->IOCLogInfo));
11133 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11134 mutex_unlock(&ioc->scsih_cmds.mutex);
11139 * _scsih_ir_shutdown - IR shutdown notification
11140 * @ioc: per adapter object
11142 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
11143 * the host system is shutting down.
11146 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
11148 Mpi2RaidActionRequest_t *mpi_request;
11149 Mpi2RaidActionReply_t *mpi_reply;
11152 /* is IR firmware build loaded ? */
11153 if (!ioc->ir_firmware)
11156 /* are there any volumes ? */
11157 if (list_empty(&ioc->raid_device_list))
11160 mutex_lock(&ioc->scsih_cmds.mutex);
11162 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11163 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11166 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11168 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11170 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11171 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11175 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11176 ioc->scsih_cmds.smid = smid;
11177 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11179 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11180 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11182 if (!ioc->hide_ir_msg)
11183 ioc_info(ioc, "IR shutdown (sending)\n");
11184 init_completion(&ioc->scsih_cmds.done);
11185 ioc->put_smid_default(ioc, smid);
11186 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11188 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11189 ioc_err(ioc, "%s: timeout\n", __func__);
11193 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11194 mpi_reply = ioc->scsih_cmds.reply;
11195 if (!ioc->hide_ir_msg)
11196 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11197 le16_to_cpu(mpi_reply->IOCStatus),
11198 le32_to_cpu(mpi_reply->IOCLogInfo));
11202 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11203 mutex_unlock(&ioc->scsih_cmds.mutex);
11207 * _scsih_get_shost_and_ioc - get shost and ioc
11208 * and verify whether they are NULL or not
11209 * @pdev: PCI device struct
11210 * @shost: address of scsi host pointer
11211 * @ioc: address of HBA adapter pointer
11213 * Return zero if *shost and *ioc are not NULL otherwise return error number.
11216 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11217 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11219 *shost = pci_get_drvdata(pdev);
11220 if (*shost == NULL) {
11221 dev_err(&pdev->dev, "pdev's driver data is null\n");
11225 *ioc = shost_priv(*shost);
11226 if (*ioc == NULL) {
11227 dev_err(&pdev->dev, "shost's private data is null\n");
11235 * scsih_remove - detach and remove add host
11236 * @pdev: PCI device struct
11238 * Routine called when unloading the driver.
11240 static void scsih_remove(struct pci_dev *pdev)
11242 struct Scsi_Host *shost;
11243 struct MPT3SAS_ADAPTER *ioc;
11244 struct _sas_port *mpt3sas_port, *next_port;
11245 struct _raid_device *raid_device, *next;
11246 struct MPT3SAS_TARGET *sas_target_priv_data;
11247 struct _pcie_device *pcie_device, *pcienext;
11248 struct workqueue_struct *wq;
11249 unsigned long flags;
11250 Mpi2ConfigReply_t mpi_reply;
11251 struct hba_port *port, *port_next;
11253 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11256 ioc->remove_host = 1;
11258 if (!pci_device_is_present(pdev)) {
11259 mpt3sas_base_pause_mq_polling(ioc);
11260 _scsih_flush_running_cmds(ioc);
11263 _scsih_fw_event_cleanup_queue(ioc);
11265 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11266 wq = ioc->firmware_event_thread;
11267 ioc->firmware_event_thread = NULL;
11268 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11270 destroy_workqueue(wq);
11272 * Copy back the unmodified ioc page1. so that on next driver load,
11273 * current modified changes on ioc page1 won't take effect.
11275 if (ioc->is_aero_ioc)
11276 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11277 &ioc->ioc_pg1_copy);
11278 /* release all the volumes */
11279 _scsih_ir_shutdown(ioc);
11280 mpt3sas_destroy_debugfs(ioc);
11281 sas_remove_host(shost);
11282 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11284 if (raid_device->starget) {
11285 sas_target_priv_data =
11286 raid_device->starget->hostdata;
11287 sas_target_priv_data->deleted = 1;
11288 scsi_remove_target(&raid_device->starget->dev);
11290 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11291 raid_device->handle, (u64)raid_device->wwid);
11292 _scsih_raid_device_remove(ioc, raid_device);
11294 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11296 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11297 list_del_init(&pcie_device->list);
11298 pcie_device_put(pcie_device);
11301 /* free ports attached to the sas_host */
11302 list_for_each_entry_safe(mpt3sas_port, next_port,
11303 &ioc->sas_hba.sas_port_list, port_list) {
11304 if (mpt3sas_port->remote_identify.device_type ==
11306 mpt3sas_device_remove_by_sas_address(ioc,
11307 mpt3sas_port->remote_identify.sas_address,
11308 mpt3sas_port->hba_port);
11309 else if (mpt3sas_port->remote_identify.device_type ==
11310 SAS_EDGE_EXPANDER_DEVICE ||
11311 mpt3sas_port->remote_identify.device_type ==
11312 SAS_FANOUT_EXPANDER_DEVICE)
11313 mpt3sas_expander_remove(ioc,
11314 mpt3sas_port->remote_identify.sas_address,
11315 mpt3sas_port->hba_port);
11318 list_for_each_entry_safe(port, port_next,
11319 &ioc->port_table_list, list) {
11320 list_del(&port->list);
11324 /* free phys attached to the sas_host */
11325 if (ioc->sas_hba.num_phys) {
11326 kfree(ioc->sas_hba.phy);
11327 ioc->sas_hba.phy = NULL;
11328 ioc->sas_hba.num_phys = 0;
11331 mpt3sas_base_detach(ioc);
11332 mpt3sas_ctl_release(ioc);
11333 spin_lock(&gioc_lock);
11334 list_del(&ioc->list);
11335 spin_unlock(&gioc_lock);
11336 scsi_host_put(shost);
11340 * scsih_shutdown - routine call during system shutdown
11341 * @pdev: PCI device struct
11344 scsih_shutdown(struct pci_dev *pdev)
11346 struct Scsi_Host *shost;
11347 struct MPT3SAS_ADAPTER *ioc;
11348 struct workqueue_struct *wq;
11349 unsigned long flags;
11350 Mpi2ConfigReply_t mpi_reply;
11352 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11355 ioc->remove_host = 1;
11357 if (!pci_device_is_present(pdev)) {
11358 mpt3sas_base_pause_mq_polling(ioc);
11359 _scsih_flush_running_cmds(ioc);
11362 _scsih_fw_event_cleanup_queue(ioc);
11364 spin_lock_irqsave(&ioc->fw_event_lock, flags);
11365 wq = ioc->firmware_event_thread;
11366 ioc->firmware_event_thread = NULL;
11367 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11369 destroy_workqueue(wq);
11371 * Copy back the unmodified ioc page1 so that on next driver load,
11372 * current modified changes on ioc page1 won't take effect.
11374 if (ioc->is_aero_ioc)
11375 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11376 &ioc->ioc_pg1_copy);
11378 _scsih_ir_shutdown(ioc);
11379 _scsih_nvme_shutdown(ioc);
11380 mpt3sas_base_mask_interrupts(ioc);
11381 mpt3sas_base_stop_watchdog(ioc);
11382 ioc->shost_recovery = 1;
11383 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
11384 ioc->shost_recovery = 0;
11385 mpt3sas_base_free_irq(ioc);
11386 mpt3sas_base_disable_msix(ioc);
11391 * _scsih_probe_boot_devices - reports 1st device
11392 * @ioc: per adapter object
11394 * If specified in bios page 2, this routine reports the 1st
11395 * device scsi-ml or sas transport for persistent boot device
11396 * purposes. Please refer to function _scsih_determine_boot_device()
11399 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11403 struct _sas_device *sas_device;
11404 struct _raid_device *raid_device;
11405 struct _pcie_device *pcie_device;
11407 u64 sas_address_parent;
11409 unsigned long flags;
11412 struct hba_port *port;
11414 /* no Bios, return immediately */
11415 if (!ioc->bios_pg3.BiosVersion)
11419 if (ioc->req_boot_device.device) {
11420 device = ioc->req_boot_device.device;
11421 channel = ioc->req_boot_device.channel;
11422 } else if (ioc->req_alt_boot_device.device) {
11423 device = ioc->req_alt_boot_device.device;
11424 channel = ioc->req_alt_boot_device.channel;
11425 } else if (ioc->current_boot_device.device) {
11426 device = ioc->current_boot_device.device;
11427 channel = ioc->current_boot_device.channel;
11433 if (channel == RAID_CHANNEL) {
11434 raid_device = device;
11436 * If this boot vd is already registered with SML then
11437 * no need to register it again as part of device scanning
11438 * after diag reset during driver load operation.
11440 if (raid_device->starget)
11442 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11443 raid_device->id, 0);
11445 _scsih_raid_device_remove(ioc, raid_device);
11446 } else if (channel == PCIE_CHANNEL) {
11447 pcie_device = device;
11449 * If this boot NVMe device is already registered with SML then
11450 * no need to register it again as part of device scanning
11451 * after diag reset during driver load operation.
11453 if (pcie_device->starget)
11455 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11456 tid = pcie_device->id;
11457 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11458 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11459 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11461 _scsih_pcie_device_remove(ioc, pcie_device);
11463 sas_device = device;
11465 * If this boot sas/sata device is already registered with SML
11466 * then no need to register it again as part of device scanning
11467 * after diag reset during driver load operation.
11469 if (sas_device->starget)
11471 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11472 handle = sas_device->handle;
11473 sas_address_parent = sas_device->sas_address_parent;
11474 sas_address = sas_device->sas_address;
11475 port = sas_device->port;
11476 list_move_tail(&sas_device->list, &ioc->sas_device_list);
11477 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11479 if (ioc->hide_drives)
11485 if (!mpt3sas_transport_port_add(ioc, handle,
11486 sas_address_parent, port)) {
11487 _scsih_sas_device_remove(ioc, sas_device);
11488 } else if (!sas_device->starget) {
11489 if (!ioc->is_driver_loading) {
11490 mpt3sas_transport_port_remove(ioc,
11492 sas_address_parent, port);
11493 _scsih_sas_device_remove(ioc, sas_device);
11500 * _scsih_probe_raid - reporting raid volumes to scsi-ml
11501 * @ioc: per adapter object
11503 * Called during initial loading of the driver.
11506 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11508 struct _raid_device *raid_device, *raid_next;
11511 list_for_each_entry_safe(raid_device, raid_next,
11512 &ioc->raid_device_list, list) {
11513 if (raid_device->starget)
11515 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11516 raid_device->id, 0);
11518 _scsih_raid_device_remove(ioc, raid_device);
11522 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11524 struct _sas_device *sas_device = NULL;
11525 unsigned long flags;
11527 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11528 if (!list_empty(&ioc->sas_device_init_list)) {
11529 sas_device = list_first_entry(&ioc->sas_device_init_list,
11530 struct _sas_device, list);
11531 sas_device_get(sas_device);
11533 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11538 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11539 struct _sas_device *sas_device)
11541 unsigned long flags;
11543 spin_lock_irqsave(&ioc->sas_device_lock, flags);
11546 * Since we dropped the lock during the call to port_add(), we need to
11547 * be careful here that somebody else didn't move or delete this item
11548 * while we were busy with other things.
11550 * If it was on the list, we need a put() for the reference the list
11551 * had. Either way, we need a get() for the destination list.
11553 if (!list_empty(&sas_device->list)) {
11554 list_del_init(&sas_device->list);
11555 sas_device_put(sas_device);
11558 sas_device_get(sas_device);
11559 list_add_tail(&sas_device->list, &ioc->sas_device_list);
11561 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11565 * _scsih_probe_sas - reporting sas devices to sas transport
11566 * @ioc: per adapter object
11568 * Called during initial loading of the driver.
11571 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11573 struct _sas_device *sas_device;
11575 if (ioc->hide_drives)
11578 while ((sas_device = get_next_sas_device(ioc))) {
11579 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11580 sas_device->sas_address_parent, sas_device->port)) {
11581 _scsih_sas_device_remove(ioc, sas_device);
11582 sas_device_put(sas_device);
11584 } else if (!sas_device->starget) {
11586 * When asyn scanning is enabled, its not possible to
11587 * remove devices while scanning is turned on due to an
11588 * oops in scsi_sysfs_add_sdev()->add_device()->
11589 * sysfs_addrm_start()
11591 if (!ioc->is_driver_loading) {
11592 mpt3sas_transport_port_remove(ioc,
11593 sas_device->sas_address,
11594 sas_device->sas_address_parent,
11596 _scsih_sas_device_remove(ioc, sas_device);
11597 sas_device_put(sas_device);
11601 sas_device_make_active(ioc, sas_device);
11602 sas_device_put(sas_device);
11607 * get_next_pcie_device - Get the next pcie device
11608 * @ioc: per adapter object
11610 * Get the next pcie device from pcie_device_init_list list.
11612 * Return: pcie device structure if pcie_device_init_list list is not empty
11613 * otherwise returns NULL
11615 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11617 struct _pcie_device *pcie_device = NULL;
11618 unsigned long flags;
11620 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11621 if (!list_empty(&ioc->pcie_device_init_list)) {
11622 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11623 struct _pcie_device, list);
11624 pcie_device_get(pcie_device);
11626 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11628 return pcie_device;
11632 * pcie_device_make_active - Add pcie device to pcie_device_list list
11633 * @ioc: per adapter object
11634 * @pcie_device: pcie device object
11636 * Add the pcie device which has registered with SCSI Transport Later to
11637 * pcie_device_list list
11639 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11640 struct _pcie_device *pcie_device)
11642 unsigned long flags;
11644 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11646 if (!list_empty(&pcie_device->list)) {
11647 list_del_init(&pcie_device->list);
11648 pcie_device_put(pcie_device);
11650 pcie_device_get(pcie_device);
11651 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11653 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11657 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11658 * @ioc: per adapter object
11660 * Called during initial loading of the driver.
11663 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11665 struct _pcie_device *pcie_device;
11668 /* PCIe Device List */
11669 while ((pcie_device = get_next_pcie_device(ioc))) {
11670 if (pcie_device->starget) {
11671 pcie_device_put(pcie_device);
11674 if (pcie_device->access_status ==
11675 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11676 pcie_device_make_active(ioc, pcie_device);
11677 pcie_device_put(pcie_device);
11680 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11681 pcie_device->id, 0);
11683 _scsih_pcie_device_remove(ioc, pcie_device);
11684 pcie_device_put(pcie_device);
11686 } else if (!pcie_device->starget) {
11688 * When async scanning is enabled, its not possible to
11689 * remove devices while scanning is turned on due to an
11690 * oops in scsi_sysfs_add_sdev()->add_device()->
11691 * sysfs_addrm_start()
11693 if (!ioc->is_driver_loading) {
11694 /* TODO-- Need to find out whether this condition will
11697 _scsih_pcie_device_remove(ioc, pcie_device);
11698 pcie_device_put(pcie_device);
11702 pcie_device_make_active(ioc, pcie_device);
11703 pcie_device_put(pcie_device);
11708 * _scsih_probe_devices - probing for devices
11709 * @ioc: per adapter object
11711 * Called during initial loading of the driver.
11714 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11716 u16 volume_mapping_flags;
11718 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11719 return; /* return when IOC doesn't support initiator mode */
11721 _scsih_probe_boot_devices(ioc);
11723 if (ioc->ir_firmware) {
11724 volume_mapping_flags =
11725 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11726 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11727 if (volume_mapping_flags ==
11728 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11729 _scsih_probe_raid(ioc);
11730 _scsih_probe_sas(ioc);
11732 _scsih_probe_sas(ioc);
11733 _scsih_probe_raid(ioc);
11736 _scsih_probe_sas(ioc);
11737 _scsih_probe_pcie(ioc);
11742 * scsih_scan_start - scsi lld callback for .scan_start
11743 * @shost: SCSI host pointer
11745 * The shost has the ability to discover targets on its own instead
11746 * of scanning the entire bus. In our implemention, we will kick off
11747 * firmware discovery.
11750 scsih_scan_start(struct Scsi_Host *shost)
11752 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11754 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11755 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11756 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11757 mpt3sas_enable_diag_buffer(ioc, 1);
11759 if (disable_discovery > 0)
11762 ioc->start_scan = 1;
11763 rc = mpt3sas_port_enable(ioc);
11766 ioc_info(ioc, "port enable: FAILED\n");
11770 * _scsih_complete_devices_scanning - add the devices to sml and
11771 * complete ioc initialization.
11772 * @ioc: per adapter object
11776 static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
11779 if (ioc->wait_for_discovery_to_complete) {
11780 ioc->wait_for_discovery_to_complete = 0;
11781 _scsih_probe_devices(ioc);
11784 mpt3sas_base_start_watchdog(ioc);
11785 ioc->is_driver_loading = 0;
11789 * scsih_scan_finished - scsi lld callback for .scan_finished
11790 * @shost: SCSI host pointer
11791 * @time: elapsed time of the scan in jiffies
11793 * This function will be called periodicallyn until it returns 1 with the
11794 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11795 * we wait for firmware discovery to complete, then return 1.
11798 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11800 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11802 int issue_hard_reset = 0;
11804 if (disable_discovery > 0) {
11805 ioc->is_driver_loading = 0;
11806 ioc->wait_for_discovery_to_complete = 0;
11810 if (time >= (300 * HZ)) {
11811 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11812 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11813 ioc->is_driver_loading = 0;
11817 if (ioc->start_scan) {
11818 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
11819 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
11820 mpt3sas_print_fault_code(ioc, ioc_state &
11821 MPI2_DOORBELL_DATA_MASK);
11822 issue_hard_reset = 1;
11824 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
11825 MPI2_IOC_STATE_COREDUMP) {
11826 mpt3sas_base_coredump_info(ioc, ioc_state &
11827 MPI2_DOORBELL_DATA_MASK);
11828 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
11829 issue_hard_reset = 1;
11835 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
11837 "port enable: aborted due to diag reset\n");
11838 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11841 if (ioc->start_scan_failed) {
11842 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11843 ioc->start_scan_failed);
11844 ioc->is_driver_loading = 0;
11845 ioc->wait_for_discovery_to_complete = 0;
11846 ioc->remove_host = 1;
11850 ioc_info(ioc, "port enable: SUCCESS\n");
11851 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11852 _scsih_complete_devices_scanning(ioc);
11855 if (issue_hard_reset) {
11856 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11857 if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
11858 ioc->is_driver_loading = 0;
11864 * scsih_map_queues - map reply queues with request queues
11865 * @shost: SCSI host pointer
11867 static void scsih_map_queues(struct Scsi_Host *shost)
11869 struct MPT3SAS_ADAPTER *ioc =
11870 (struct MPT3SAS_ADAPTER *)shost->hostdata;
11871 struct blk_mq_queue_map *map;
11872 int i, qoff, offset;
11873 int nr_msix_vectors = ioc->iopoll_q_start_index;
11874 int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
11876 if (shost->nr_hw_queues == 1)
11879 for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
11880 map = &shost->tag_set.map[i];
11881 map->nr_queues = 0;
11883 if (i == HCTX_TYPE_DEFAULT) {
11885 nr_msix_vectors - ioc->high_iops_queues;
11886 offset = ioc->high_iops_queues;
11887 } else if (i == HCTX_TYPE_POLL)
11888 map->nr_queues = iopoll_q_count;
11890 if (!map->nr_queues)
11891 BUG_ON(i == HCTX_TYPE_DEFAULT);
11894 * The poll queue(s) doesn't have an IRQ (and hence IRQ
11895 * affinity), so use the regular blk-mq cpu mapping
11897 map->queue_offset = qoff;
11898 if (i != HCTX_TYPE_POLL)
11899 blk_mq_pci_map_queues(map, ioc->pdev, offset);
11901 blk_mq_map_queues(map);
11903 qoff += map->nr_queues;
11907 /* shost template for SAS 2.0 HBA devices */
11908 static const struct scsi_host_template mpt2sas_driver_template = {
11909 .module = THIS_MODULE,
11910 .name = "Fusion MPT SAS Host",
11911 .proc_name = MPT2SAS_DRIVER_NAME,
11912 .queuecommand = scsih_qcmd,
11913 .target_alloc = scsih_target_alloc,
11914 .slave_alloc = scsih_slave_alloc,
11915 .device_configure = scsih_device_configure,
11916 .target_destroy = scsih_target_destroy,
11917 .slave_destroy = scsih_slave_destroy,
11918 .scan_finished = scsih_scan_finished,
11919 .scan_start = scsih_scan_start,
11920 .change_queue_depth = scsih_change_queue_depth,
11921 .eh_abort_handler = scsih_abort,
11922 .eh_device_reset_handler = scsih_dev_reset,
11923 .eh_target_reset_handler = scsih_target_reset,
11924 .eh_host_reset_handler = scsih_host_reset,
11925 .bios_param = scsih_bios_param,
11928 .sg_tablesize = MPT2SAS_SG_DEPTH,
11929 .max_sectors = 32767,
11931 .shost_groups = mpt3sas_host_groups,
11932 .sdev_groups = mpt3sas_dev_groups,
11933 .track_queue_depth = 1,
11934 .cmd_size = sizeof(struct scsiio_tracker),
11937 /* raid transport support for SAS 2.0 HBA devices */
11938 static struct raid_function_template mpt2sas_raid_functions = {
11939 .cookie = &mpt2sas_driver_template,
11940 .is_raid = scsih_is_raid,
11941 .get_resync = scsih_get_resync,
11942 .get_state = scsih_get_state,
11945 /* shost template for SAS 3.0 HBA devices */
11946 static const struct scsi_host_template mpt3sas_driver_template = {
11947 .module = THIS_MODULE,
11948 .name = "Fusion MPT SAS Host",
11949 .proc_name = MPT3SAS_DRIVER_NAME,
11950 .queuecommand = scsih_qcmd,
11951 .target_alloc = scsih_target_alloc,
11952 .slave_alloc = scsih_slave_alloc,
11953 .device_configure = scsih_device_configure,
11954 .target_destroy = scsih_target_destroy,
11955 .slave_destroy = scsih_slave_destroy,
11956 .scan_finished = scsih_scan_finished,
11957 .scan_start = scsih_scan_start,
11958 .change_queue_depth = scsih_change_queue_depth,
11959 .eh_abort_handler = scsih_abort,
11960 .eh_device_reset_handler = scsih_dev_reset,
11961 .eh_target_reset_handler = scsih_target_reset,
11962 .eh_host_reset_handler = scsih_host_reset,
11963 .bios_param = scsih_bios_param,
11966 .sg_tablesize = MPT3SAS_SG_DEPTH,
11967 .max_sectors = 32767,
11968 .max_segment_size = 0xffffffff,
11969 .cmd_per_lun = 128,
11970 .shost_groups = mpt3sas_host_groups,
11971 .sdev_groups = mpt3sas_dev_groups,
11972 .track_queue_depth = 1,
11973 .cmd_size = sizeof(struct scsiio_tracker),
11974 .map_queues = scsih_map_queues,
11975 .mq_poll = mpt3sas_blk_mq_poll,
11978 /* raid transport support for SAS 3.0 HBA devices */
11979 static struct raid_function_template mpt3sas_raid_functions = {
11980 .cookie = &mpt3sas_driver_template,
11981 .is_raid = scsih_is_raid,
11982 .get_resync = scsih_get_resync,
11983 .get_state = scsih_get_state,
11987 * _scsih_determine_hba_mpi_version - determine in which MPI version class
11988 * this device belongs to.
11989 * @pdev: PCI device struct
11991 * return MPI2_VERSION for SAS 2.0 HBA devices,
11992 * MPI25_VERSION for SAS 3.0 HBA devices, and
11993 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
11996 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
11999 switch (pdev->device) {
12000 case MPI2_MFGPAGE_DEVID_SSS6200:
12001 case MPI2_MFGPAGE_DEVID_SAS2004:
12002 case MPI2_MFGPAGE_DEVID_SAS2008:
12003 case MPI2_MFGPAGE_DEVID_SAS2108_1:
12004 case MPI2_MFGPAGE_DEVID_SAS2108_2:
12005 case MPI2_MFGPAGE_DEVID_SAS2108_3:
12006 case MPI2_MFGPAGE_DEVID_SAS2116_1:
12007 case MPI2_MFGPAGE_DEVID_SAS2116_2:
12008 case MPI2_MFGPAGE_DEVID_SAS2208_1:
12009 case MPI2_MFGPAGE_DEVID_SAS2208_2:
12010 case MPI2_MFGPAGE_DEVID_SAS2208_3:
12011 case MPI2_MFGPAGE_DEVID_SAS2208_4:
12012 case MPI2_MFGPAGE_DEVID_SAS2208_5:
12013 case MPI2_MFGPAGE_DEVID_SAS2208_6:
12014 case MPI2_MFGPAGE_DEVID_SAS2308_1:
12015 case MPI2_MFGPAGE_DEVID_SAS2308_2:
12016 case MPI2_MFGPAGE_DEVID_SAS2308_3:
12017 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12018 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12019 return MPI2_VERSION;
12020 case MPI25_MFGPAGE_DEVID_SAS3004:
12021 case MPI25_MFGPAGE_DEVID_SAS3008:
12022 case MPI25_MFGPAGE_DEVID_SAS3108_1:
12023 case MPI25_MFGPAGE_DEVID_SAS3108_2:
12024 case MPI25_MFGPAGE_DEVID_SAS3108_5:
12025 case MPI25_MFGPAGE_DEVID_SAS3108_6:
12026 return MPI25_VERSION;
12027 case MPI26_MFGPAGE_DEVID_SAS3216:
12028 case MPI26_MFGPAGE_DEVID_SAS3224:
12029 case MPI26_MFGPAGE_DEVID_SAS3316_1:
12030 case MPI26_MFGPAGE_DEVID_SAS3316_2:
12031 case MPI26_MFGPAGE_DEVID_SAS3316_3:
12032 case MPI26_MFGPAGE_DEVID_SAS3316_4:
12033 case MPI26_MFGPAGE_DEVID_SAS3324_1:
12034 case MPI26_MFGPAGE_DEVID_SAS3324_2:
12035 case MPI26_MFGPAGE_DEVID_SAS3324_3:
12036 case MPI26_MFGPAGE_DEVID_SAS3324_4:
12037 case MPI26_MFGPAGE_DEVID_SAS3508:
12038 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12039 case MPI26_MFGPAGE_DEVID_SAS3408:
12040 case MPI26_MFGPAGE_DEVID_SAS3516:
12041 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12042 case MPI26_MFGPAGE_DEVID_SAS3416:
12043 case MPI26_MFGPAGE_DEVID_SAS3616:
12044 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12045 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12046 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12047 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12048 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12049 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12050 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12051 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12052 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12053 return MPI26_VERSION;
12059 * _scsih_probe - attach and add scsi host
12060 * @pdev: PCI device struct
12061 * @id: pci device id
12063 * Return: 0 success, anything else error.
12066 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
12068 struct MPT3SAS_ADAPTER *ioc;
12069 struct Scsi_Host *shost = NULL;
12071 u16 hba_mpi_version;
12072 int iopoll_q_count = 0;
12074 /* Determine in which MPI version class this pci device belongs */
12075 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
12076 if (hba_mpi_version == 0)
12079 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
12080 * for other generation HBA's return with -ENODEV
12082 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
12085 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
12086 * for other generation HBA's return with -ENODEV
12088 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
12089 || hba_mpi_version == MPI26_VERSION)))
12092 switch (hba_mpi_version) {
12094 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
12095 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
12096 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
12097 shost = scsi_host_alloc(&mpt2sas_driver_template,
12098 sizeof(struct MPT3SAS_ADAPTER));
12101 ioc = shost_priv(shost);
12102 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12103 ioc->hba_mpi_version_belonged = hba_mpi_version;
12104 ioc->id = mpt2_ids++;
12105 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
12106 switch (pdev->device) {
12107 case MPI2_MFGPAGE_DEVID_SSS6200:
12108 ioc->is_warpdrive = 1;
12109 ioc->hide_ir_msg = 1;
12111 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12112 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12113 ioc->is_mcpu_endpoint = 1;
12116 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
12120 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12121 ioc->multipath_on_hba = 0;
12123 ioc->multipath_on_hba = 1;
12126 case MPI25_VERSION:
12127 case MPI26_VERSION:
12128 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
12129 shost = scsi_host_alloc(&mpt3sas_driver_template,
12130 sizeof(struct MPT3SAS_ADAPTER));
12133 ioc = shost_priv(shost);
12134 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12135 ioc->hba_mpi_version_belonged = hba_mpi_version;
12136 ioc->id = mpt3_ids++;
12137 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
12138 switch (pdev->device) {
12139 case MPI26_MFGPAGE_DEVID_SAS3508:
12140 case MPI26_MFGPAGE_DEVID_SAS3508_1:
12141 case MPI26_MFGPAGE_DEVID_SAS3408:
12142 case MPI26_MFGPAGE_DEVID_SAS3516:
12143 case MPI26_MFGPAGE_DEVID_SAS3516_1:
12144 case MPI26_MFGPAGE_DEVID_SAS3416:
12145 case MPI26_MFGPAGE_DEVID_SAS3616:
12146 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12147 ioc->is_gen35_ioc = 1;
12149 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12150 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12151 dev_err(&pdev->dev,
12152 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
12153 pdev->device, pdev->subsystem_vendor,
12154 pdev->subsystem_device);
12156 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12157 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12158 dev_err(&pdev->dev,
12159 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
12160 pdev->device, pdev->subsystem_vendor,
12161 pdev->subsystem_device);
12163 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12164 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12165 dev_info(&pdev->dev,
12166 "HBA is in Configurable Secure mode\n");
12168 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12169 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12170 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
12173 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
12175 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
12176 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
12177 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
12178 ioc->combined_reply_queue = 1;
12179 if (ioc->is_gen35_ioc)
12180 ioc->combined_reply_index_count =
12181 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
12183 ioc->combined_reply_index_count =
12184 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
12187 switch (ioc->is_gen35_ioc) {
12189 if (multipath_on_hba == -1 || multipath_on_hba == 0)
12190 ioc->multipath_on_hba = 0;
12192 ioc->multipath_on_hba = 1;
12195 if (multipath_on_hba == -1 || multipath_on_hba > 0)
12196 ioc->multipath_on_hba = 1;
12198 ioc->multipath_on_hba = 0;
12209 INIT_LIST_HEAD(&ioc->list);
12210 spin_lock(&gioc_lock);
12211 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
12212 spin_unlock(&gioc_lock);
12213 ioc->shost = shost;
12215 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
12216 ioc->tm_cb_idx = tm_cb_idx;
12217 ioc->ctl_cb_idx = ctl_cb_idx;
12218 ioc->base_cb_idx = base_cb_idx;
12219 ioc->port_enable_cb_idx = port_enable_cb_idx;
12220 ioc->transport_cb_idx = transport_cb_idx;
12221 ioc->scsih_cb_idx = scsih_cb_idx;
12222 ioc->config_cb_idx = config_cb_idx;
12223 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
12224 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
12225 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
12226 ioc->logging_level = logging_level;
12227 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
12228 /* Host waits for minimum of six seconds */
12229 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
12231 * Enable MEMORY MOVE support flag.
12233 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
12234 /* Enable ADDITIONAL QUERY support flag. */
12235 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
12237 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
12239 /* misc semaphores and spin locks */
12240 mutex_init(&ioc->reset_in_progress_mutex);
12241 mutex_init(&ioc->hostdiag_unlock_mutex);
12242 /* initializing pci_access_mutex lock */
12243 mutex_init(&ioc->pci_access_mutex);
12244 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
12245 spin_lock_init(&ioc->scsi_lookup_lock);
12246 spin_lock_init(&ioc->sas_device_lock);
12247 spin_lock_init(&ioc->sas_node_lock);
12248 spin_lock_init(&ioc->fw_event_lock);
12249 spin_lock_init(&ioc->raid_device_lock);
12250 spin_lock_init(&ioc->pcie_device_lock);
12251 spin_lock_init(&ioc->diag_trigger_lock);
12253 INIT_LIST_HEAD(&ioc->sas_device_list);
12254 INIT_LIST_HEAD(&ioc->sas_device_init_list);
12255 INIT_LIST_HEAD(&ioc->sas_expander_list);
12256 INIT_LIST_HEAD(&ioc->enclosure_list);
12257 INIT_LIST_HEAD(&ioc->pcie_device_list);
12258 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
12259 INIT_LIST_HEAD(&ioc->fw_event_list);
12260 INIT_LIST_HEAD(&ioc->raid_device_list);
12261 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
12262 INIT_LIST_HEAD(&ioc->delayed_tr_list);
12263 INIT_LIST_HEAD(&ioc->delayed_sc_list);
12264 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
12265 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12266 INIT_LIST_HEAD(&ioc->reply_queue_list);
12267 INIT_LIST_HEAD(&ioc->port_table_list);
12269 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12271 /* init shost parameters */
12272 shost->max_cmd_len = 32;
12273 shost->max_lun = max_lun;
12274 shost->transportt = mpt3sas_transport_template;
12275 shost->unique_id = ioc->id;
12277 if (ioc->is_mcpu_endpoint) {
12278 /* mCPU MPI support 64K max IO */
12279 shost->max_sectors = 128;
12280 ioc_info(ioc, "The max_sectors value is set to %d\n",
12281 shost->max_sectors);
12283 if (max_sectors != 0xFFFF) {
12284 if (max_sectors < 64) {
12285 shost->max_sectors = 64;
12286 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12288 } else if (max_sectors > 32767) {
12289 shost->max_sectors = 32767;
12290 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12293 shost->max_sectors = max_sectors & 0xFFFE;
12294 ioc_info(ioc, "The max_sectors value is set to %d\n",
12295 shost->max_sectors);
12299 /* register EEDP capabilities with SCSI layer */
12300 if (prot_mask >= 0)
12301 scsi_host_set_prot(shost, (prot_mask & 0x07));
12303 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12304 | SHOST_DIF_TYPE2_PROTECTION
12305 | SHOST_DIF_TYPE3_PROTECTION);
12307 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12310 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
12311 "fw_event_%s%d", ioc->driver_name, ioc->id);
12312 ioc->firmware_event_thread = alloc_ordered_workqueue(
12313 ioc->firmware_event_name, 0);
12314 if (!ioc->firmware_event_thread) {
12315 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12316 __FILE__, __LINE__, __func__);
12318 goto out_thread_fail;
12321 shost->host_tagset = 0;
12323 if (ioc->is_gen35_ioc && host_tagset_enable)
12324 shost->host_tagset = 1;
12326 ioc->is_driver_loading = 1;
12327 if ((mpt3sas_base_attach(ioc))) {
12328 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12329 __FILE__, __LINE__, __func__);
12331 goto out_attach_fail;
12334 if (ioc->is_warpdrive) {
12335 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
12336 ioc->hide_drives = 0;
12337 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
12338 ioc->hide_drives = 1;
12340 if (mpt3sas_get_num_volumes(ioc))
12341 ioc->hide_drives = 1;
12343 ioc->hide_drives = 0;
12346 ioc->hide_drives = 0;
12348 shost->nr_hw_queues = 1;
12350 if (shost->host_tagset) {
12351 shost->nr_hw_queues =
12352 ioc->reply_queue_count - ioc->high_iops_queues;
12355 ioc->reply_queue_count - ioc->iopoll_q_start_index;
12357 shost->nr_maps = iopoll_q_count ? 3 : 1;
12359 dev_info(&ioc->pdev->dev,
12360 "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12361 shost->can_queue, shost->nr_hw_queues);
12364 rv = scsi_add_host(shost, &pdev->dev);
12366 ioc_err(ioc, "failure at %s:%d/%s()!\n",
12367 __FILE__, __LINE__, __func__);
12368 goto out_add_shost_fail;
12371 scsi_scan_host(shost);
12372 mpt3sas_setup_debugfs(ioc);
12374 out_add_shost_fail:
12375 mpt3sas_base_detach(ioc);
12377 destroy_workqueue(ioc->firmware_event_thread);
12379 spin_lock(&gioc_lock);
12380 list_del(&ioc->list);
12381 spin_unlock(&gioc_lock);
12382 scsi_host_put(shost);
12387 * scsih_suspend - power management suspend main entry point
12388 * @dev: Device struct
12390 * Return: 0 success, anything else error.
12392 static int __maybe_unused
12393 scsih_suspend(struct device *dev)
12395 struct pci_dev *pdev = to_pci_dev(dev);
12396 struct Scsi_Host *shost;
12397 struct MPT3SAS_ADAPTER *ioc;
12400 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12404 mpt3sas_base_stop_watchdog(ioc);
12405 scsi_block_requests(shost);
12406 _scsih_nvme_shutdown(ioc);
12407 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12408 pdev, pci_name(pdev));
12410 mpt3sas_base_free_resources(ioc);
12415 * scsih_resume - power management resume main entry point
12416 * @dev: Device struct
12418 * Return: 0 success, anything else error.
12420 static int __maybe_unused
12421 scsih_resume(struct device *dev)
12423 struct pci_dev *pdev = to_pci_dev(dev);
12424 struct Scsi_Host *shost;
12425 struct MPT3SAS_ADAPTER *ioc;
12426 pci_power_t device_state = pdev->current_state;
12429 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12433 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12434 pdev, pci_name(pdev), device_state);
12437 r = mpt3sas_base_map_resources(ioc);
12440 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12441 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12442 scsi_unblock_requests(shost);
12443 mpt3sas_base_start_watchdog(ioc);
12448 * scsih_pci_error_detected - Called when a PCI error is detected.
12449 * @pdev: PCI device struct
12450 * @state: PCI channel state
12452 * Description: Called when a PCI error is detected.
12454 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12456 static pci_ers_result_t
12457 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12459 struct Scsi_Host *shost;
12460 struct MPT3SAS_ADAPTER *ioc;
12462 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12463 return PCI_ERS_RESULT_DISCONNECT;
12465 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12468 case pci_channel_io_normal:
12469 return PCI_ERS_RESULT_CAN_RECOVER;
12470 case pci_channel_io_frozen:
12471 /* Fatal error, prepare for slot reset */
12472 ioc->pci_error_recovery = 1;
12473 scsi_block_requests(ioc->shost);
12474 mpt3sas_base_stop_watchdog(ioc);
12475 mpt3sas_base_free_resources(ioc);
12476 return PCI_ERS_RESULT_NEED_RESET;
12477 case pci_channel_io_perm_failure:
12478 /* Permanent error, prepare for device removal */
12479 ioc->pci_error_recovery = 1;
12480 mpt3sas_base_stop_watchdog(ioc);
12481 mpt3sas_base_pause_mq_polling(ioc);
12482 _scsih_flush_running_cmds(ioc);
12483 return PCI_ERS_RESULT_DISCONNECT;
12485 return PCI_ERS_RESULT_NEED_RESET;
12489 * scsih_pci_slot_reset - Called when PCI slot has been reset.
12490 * @pdev: PCI device struct
12492 * Description: This routine is called by the pci error recovery
12493 * code after the PCI slot has been reset, just before we
12494 * should resume normal operations.
12496 static pci_ers_result_t
12497 scsih_pci_slot_reset(struct pci_dev *pdev)
12499 struct Scsi_Host *shost;
12500 struct MPT3SAS_ADAPTER *ioc;
12503 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12504 return PCI_ERS_RESULT_DISCONNECT;
12506 ioc_info(ioc, "PCI error: slot reset callback!!\n");
12508 ioc->pci_error_recovery = 0;
12510 pci_restore_state(pdev);
12511 rc = mpt3sas_base_map_resources(ioc);
12513 return PCI_ERS_RESULT_DISCONNECT;
12515 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12516 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12518 ioc_warn(ioc, "hard reset: %s\n",
12519 (rc == 0) ? "success" : "failed");
12522 return PCI_ERS_RESULT_RECOVERED;
12524 return PCI_ERS_RESULT_DISCONNECT;
12528 * scsih_pci_resume() - resume normal ops after PCI reset
12529 * @pdev: pointer to PCI device
12531 * Called when the error recovery driver tells us that its
12532 * OK to resume normal operation. Use completion to allow
12533 * halted scsi ops to resume.
12536 scsih_pci_resume(struct pci_dev *pdev)
12538 struct Scsi_Host *shost;
12539 struct MPT3SAS_ADAPTER *ioc;
12541 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12544 ioc_info(ioc, "PCI error: resume callback!!\n");
12546 mpt3sas_base_start_watchdog(ioc);
12547 scsi_unblock_requests(ioc->shost);
12551 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12552 * @pdev: pointer to PCI device
12554 static pci_ers_result_t
12555 scsih_pci_mmio_enabled(struct pci_dev *pdev)
12557 struct Scsi_Host *shost;
12558 struct MPT3SAS_ADAPTER *ioc;
12560 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12561 return PCI_ERS_RESULT_DISCONNECT;
12563 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12565 /* TODO - dump whatever for debugging purposes */
12567 /* This called only if scsih_pci_error_detected returns
12568 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12569 * works, no need to reset slot.
12571 return PCI_ERS_RESULT_RECOVERED;
12575 * scsih_ncq_prio_supp - Check for NCQ command priority support
12576 * @sdev: scsi device struct
12578 * This is called when a user indicates they would like to enable
12579 * ncq command priorities. This works only on SATA devices.
12581 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
12583 struct scsi_vpd *vpd;
12584 bool ncq_prio_supp = false;
12587 vpd = rcu_dereference(sdev->vpd_pg89);
12588 if (!vpd || vpd->len < 214)
12591 ncq_prio_supp = (vpd->data[213] >> 4) & 1;
12595 return ncq_prio_supp;
12598 * The pci device ids are defined in mpi/mpi2_cnfg.h.
12600 static const struct pci_device_id mpt3sas_pci_table[] = {
12601 /* Spitfire ~ 2004 */
12602 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12603 PCI_ANY_ID, PCI_ANY_ID },
12604 /* Falcon ~ 2008 */
12605 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12606 PCI_ANY_ID, PCI_ANY_ID },
12607 /* Liberator ~ 2108 */
12608 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12609 PCI_ANY_ID, PCI_ANY_ID },
12610 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12611 PCI_ANY_ID, PCI_ANY_ID },
12612 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12613 PCI_ANY_ID, PCI_ANY_ID },
12614 /* Meteor ~ 2116 */
12615 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12616 PCI_ANY_ID, PCI_ANY_ID },
12617 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12618 PCI_ANY_ID, PCI_ANY_ID },
12619 /* Thunderbolt ~ 2208 */
12620 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12621 PCI_ANY_ID, PCI_ANY_ID },
12622 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12623 PCI_ANY_ID, PCI_ANY_ID },
12624 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12625 PCI_ANY_ID, PCI_ANY_ID },
12626 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12627 PCI_ANY_ID, PCI_ANY_ID },
12628 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12629 PCI_ANY_ID, PCI_ANY_ID },
12630 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12631 PCI_ANY_ID, PCI_ANY_ID },
12632 /* Mustang ~ 2308 */
12633 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12634 PCI_ANY_ID, PCI_ANY_ID },
12635 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12636 PCI_ANY_ID, PCI_ANY_ID },
12637 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12638 PCI_ANY_ID, PCI_ANY_ID },
12639 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12640 PCI_ANY_ID, PCI_ANY_ID },
12641 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12642 PCI_ANY_ID, PCI_ANY_ID },
12644 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12645 PCI_ANY_ID, PCI_ANY_ID },
12646 /* Fury ~ 3004 and 3008 */
12647 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12648 PCI_ANY_ID, PCI_ANY_ID },
12649 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12650 PCI_ANY_ID, PCI_ANY_ID },
12651 /* Invader ~ 3108 */
12652 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12653 PCI_ANY_ID, PCI_ANY_ID },
12654 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12655 PCI_ANY_ID, PCI_ANY_ID },
12656 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12657 PCI_ANY_ID, PCI_ANY_ID },
12658 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12659 PCI_ANY_ID, PCI_ANY_ID },
12660 /* Cutlass ~ 3216 and 3224 */
12661 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12662 PCI_ANY_ID, PCI_ANY_ID },
12663 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12664 PCI_ANY_ID, PCI_ANY_ID },
12665 /* Intruder ~ 3316 and 3324 */
12666 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12667 PCI_ANY_ID, PCI_ANY_ID },
12668 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12669 PCI_ANY_ID, PCI_ANY_ID },
12670 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12671 PCI_ANY_ID, PCI_ANY_ID },
12672 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12673 PCI_ANY_ID, PCI_ANY_ID },
12674 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12675 PCI_ANY_ID, PCI_ANY_ID },
12676 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12677 PCI_ANY_ID, PCI_ANY_ID },
12678 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12679 PCI_ANY_ID, PCI_ANY_ID },
12680 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12681 PCI_ANY_ID, PCI_ANY_ID },
12682 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12683 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12684 PCI_ANY_ID, PCI_ANY_ID },
12685 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12686 PCI_ANY_ID, PCI_ANY_ID },
12687 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12688 PCI_ANY_ID, PCI_ANY_ID },
12689 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12690 PCI_ANY_ID, PCI_ANY_ID },
12691 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12692 PCI_ANY_ID, PCI_ANY_ID },
12693 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12694 PCI_ANY_ID, PCI_ANY_ID },
12695 /* Mercator ~ 3616*/
12696 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12697 PCI_ANY_ID, PCI_ANY_ID },
12699 /* Aero SI 0x00E1 Configurable Secure
12700 * 0x00E2 Hard Secure
12702 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12703 PCI_ANY_ID, PCI_ANY_ID },
12704 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12705 PCI_ANY_ID, PCI_ANY_ID },
12708 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12710 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12711 PCI_ANY_ID, PCI_ANY_ID },
12712 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12713 PCI_ANY_ID, PCI_ANY_ID },
12715 /* Atlas PCIe Switch Management Port */
12716 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12717 PCI_ANY_ID, PCI_ANY_ID },
12719 /* Sea SI 0x00E5 Configurable Secure
12720 * 0x00E6 Hard Secure
12722 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12723 PCI_ANY_ID, PCI_ANY_ID },
12724 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12725 PCI_ANY_ID, PCI_ANY_ID },
12728 * ATTO Branded ExpressSAS H12xx GT
12730 { MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12731 PCI_ANY_ID, PCI_ANY_ID },
12734 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12736 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12737 PCI_ANY_ID, PCI_ANY_ID },
12738 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12739 PCI_ANY_ID, PCI_ANY_ID },
12741 {0} /* Terminating entry */
12743 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12745 static struct pci_error_handlers _mpt3sas_err_handler = {
12746 .error_detected = scsih_pci_error_detected,
12747 .mmio_enabled = scsih_pci_mmio_enabled,
12748 .slot_reset = scsih_pci_slot_reset,
12749 .resume = scsih_pci_resume,
12752 static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12754 static struct pci_driver mpt3sas_driver = {
12755 .name = MPT3SAS_DRIVER_NAME,
12756 .id_table = mpt3sas_pci_table,
12757 .probe = _scsih_probe,
12758 .remove = scsih_remove,
12759 .shutdown = scsih_shutdown,
12760 .err_handler = &_mpt3sas_err_handler,
12761 .driver.pm = &scsih_pm_ops,
12765 * scsih_init - main entry point for this driver.
12767 * Return: 0 success, anything else error.
12775 mpt3sas_base_initialize_callback_handler();
12777 /* queuecommand callback hander */
12778 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12780 /* task management callback handler */
12781 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12783 /* base internal commands callback handler */
12784 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12785 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12786 mpt3sas_port_enable_done);
12788 /* transport internal commands callback handler */
12789 transport_cb_idx = mpt3sas_base_register_callback_handler(
12790 mpt3sas_transport_done);
12792 /* scsih internal commands callback handler */
12793 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12795 /* configuration page API internal commands callback handler */
12796 config_cb_idx = mpt3sas_base_register_callback_handler(
12797 mpt3sas_config_done);
12799 /* ctl module callback handler */
12800 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12802 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12803 _scsih_tm_tr_complete);
12805 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12806 _scsih_tm_volume_tr_complete);
12808 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12809 _scsih_sas_control_complete);
12811 mpt3sas_init_debugfs();
12816 * scsih_exit - exit point for this driver (when it is a module).
12818 * Return: 0 success, anything else error.
12824 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12825 mpt3sas_base_release_callback_handler(tm_cb_idx);
12826 mpt3sas_base_release_callback_handler(base_cb_idx);
12827 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12828 mpt3sas_base_release_callback_handler(transport_cb_idx);
12829 mpt3sas_base_release_callback_handler(scsih_cb_idx);
12830 mpt3sas_base_release_callback_handler(config_cb_idx);
12831 mpt3sas_base_release_callback_handler(ctl_cb_idx);
12833 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12834 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12835 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12837 /* raid transport support */
12838 if (hbas_to_enumerate != 1)
12839 raid_class_release(mpt3sas_raid_template);
12840 if (hbas_to_enumerate != 2)
12841 raid_class_release(mpt2sas_raid_template);
12842 sas_release_transport(mpt3sas_transport_template);
12843 mpt3sas_exit_debugfs();
12847 * _mpt3sas_init - main entry point for this driver.
12849 * Return: 0 success, anything else error.
12852 _mpt3sas_init(void)
12856 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12857 MPT3SAS_DRIVER_VERSION);
12859 mpt3sas_transport_template =
12860 sas_attach_transport(&mpt3sas_transport_functions);
12861 if (!mpt3sas_transport_template)
12864 /* No need attach mpt3sas raid functions template
12865 * if hbas_to_enumarate value is one.
12867 if (hbas_to_enumerate != 1) {
12868 mpt3sas_raid_template =
12869 raid_class_attach(&mpt3sas_raid_functions);
12870 if (!mpt3sas_raid_template) {
12871 sas_release_transport(mpt3sas_transport_template);
12876 /* No need to attach mpt2sas raid functions template
12877 * if hbas_to_enumarate value is two
12879 if (hbas_to_enumerate != 2) {
12880 mpt2sas_raid_template =
12881 raid_class_attach(&mpt2sas_raid_functions);
12882 if (!mpt2sas_raid_template) {
12883 sas_release_transport(mpt3sas_transport_template);
12888 error = scsih_init();
12894 mpt3sas_ctl_init(hbas_to_enumerate);
12896 error = pci_register_driver(&mpt3sas_driver);
12898 mpt3sas_ctl_exit(hbas_to_enumerate);
12906 * _mpt3sas_exit - exit point for this driver (when it is a module).
12910 _mpt3sas_exit(void)
12912 pr_info("mpt3sas version %s unloading\n",
12913 MPT3SAS_DRIVER_VERSION);
12915 pci_unregister_driver(&mpt3sas_driver);
12917 mpt3sas_ctl_exit(hbas_to_enumerate);
12922 module_init(_mpt3sas_init);
12923 module_exit(_mpt3sas_exit);