2 * Management Module Support for MPT (Message Passing Technology) based
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
6 * Copyright (C) 2012-2014 LSI Corporation
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/compat.h>
55 #include <linux/poll.h>
58 #include <linux/uaccess.h>
60 #include "mpt3sas_base.h"
61 #include "mpt3sas_ctl.h"
64 static struct fasync_struct *async_queue;
65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
69 * enum block_state - blocking state
70 * @NON_BLOCKING: non blocking
73 * These states are for ioctls that need to wait for a response
74 * from firmware, so they probably require sleep.
82 * _ctl_display_some_debug - debug routine
83 * @ioc: per adapter object
84 * @smid: system request message index
85 * @calling_function_name: string pass from calling function
86 * @mpi_reply: reply message frame
89 * Function for displaying debug info helpful when debugging issues
93 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
94 char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
96 Mpi2ConfigRequest_t *mpi_request;
99 if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
102 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
103 switch (mpi_request->Function) {
104 case MPI2_FUNCTION_SCSI_IO_REQUEST:
106 Mpi2SCSIIORequest_t *scsi_request =
107 (Mpi2SCSIIORequest_t *)mpi_request;
109 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
110 "scsi_io, cmd(0x%02x), cdb_len(%d)",
111 scsi_request->CDB.CDB32[0],
112 le16_to_cpu(scsi_request->IoFlags) & 0xF);
113 desc = ioc->tmp_string;
116 case MPI2_FUNCTION_SCSI_TASK_MGMT:
119 case MPI2_FUNCTION_IOC_INIT:
122 case MPI2_FUNCTION_IOC_FACTS:
125 case MPI2_FUNCTION_CONFIG:
127 Mpi2ConfigRequest_t *config_request =
128 (Mpi2ConfigRequest_t *)mpi_request;
130 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
131 "config, type(0x%02x), ext_type(0x%02x), number(%d)",
132 (config_request->Header.PageType &
133 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
134 config_request->Header.PageNumber);
135 desc = ioc->tmp_string;
138 case MPI2_FUNCTION_PORT_FACTS:
141 case MPI2_FUNCTION_PORT_ENABLE:
142 desc = "port_enable";
144 case MPI2_FUNCTION_EVENT_NOTIFICATION:
145 desc = "event_notification";
147 case MPI2_FUNCTION_FW_DOWNLOAD:
148 desc = "fw_download";
150 case MPI2_FUNCTION_FW_UPLOAD:
153 case MPI2_FUNCTION_RAID_ACTION:
154 desc = "raid_action";
156 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
158 Mpi2SCSIIORequest_t *scsi_request =
159 (Mpi2SCSIIORequest_t *)mpi_request;
161 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
162 "raid_pass, cmd(0x%02x), cdb_len(%d)",
163 scsi_request->CDB.CDB32[0],
164 le16_to_cpu(scsi_request->IoFlags) & 0xF);
165 desc = ioc->tmp_string;
168 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
169 desc = "sas_iounit_cntl";
171 case MPI2_FUNCTION_SATA_PASSTHROUGH:
174 case MPI2_FUNCTION_DIAG_BUFFER_POST:
175 desc = "diag_buffer_post";
177 case MPI2_FUNCTION_DIAG_RELEASE:
178 desc = "diag_release";
180 case MPI2_FUNCTION_SMP_PASSTHROUGH:
181 desc = "smp_passthrough";
188 pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
189 ioc->name, calling_function_name, desc, smid);
194 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
196 "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
197 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
198 le32_to_cpu(mpi_reply->IOCLogInfo));
200 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
201 mpi_request->Function ==
202 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
203 Mpi2SCSIIOReply_t *scsi_reply =
204 (Mpi2SCSIIOReply_t *)mpi_reply;
205 struct _sas_device *sas_device = NULL;
206 struct _pcie_device *pcie_device = NULL;
208 sas_device = mpt3sas_get_sdev_by_handle(ioc,
209 le16_to_cpu(scsi_reply->DevHandle));
211 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
212 ioc->name, (unsigned long long)
213 sas_device->sas_address, sas_device->phy);
215 "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
216 ioc->name, (unsigned long long)
217 sas_device->enclosure_logical_id, sas_device->slot);
218 sas_device_put(sas_device);
221 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
222 le16_to_cpu(scsi_reply->DevHandle));
225 "\tWWID(0x%016llx), port(%d)\n", ioc->name,
226 (unsigned long long)pcie_device->wwid,
227 pcie_device->port_num);
228 if (pcie_device->enclosure_handle != 0)
230 "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
231 ioc->name, (unsigned long long)
232 pcie_device->enclosure_logical_id,
234 pcie_device_put(pcie_device);
237 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
239 "\tscsi_state(0x%02x), scsi_status"
240 "(0x%02x)\n", ioc->name,
241 scsi_reply->SCSIState,
242 scsi_reply->SCSIStatus);
247 * mpt3sas_ctl_done - ctl module completion routine
248 * @ioc: per adapter object
249 * @smid: system request message index
250 * @msix_index: MSIX table index supplied by the OS
251 * @reply: reply message frame(lower 32bit addr)
254 * The callback handler when using ioc->ctl_cb_idx.
256 * Return: 1 meaning mf should be freed from _base_interrupt
257 * 0 means the mf is freed from this function.
260 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
263 MPI2DefaultReply_t *mpi_reply;
264 Mpi2SCSIIOReply_t *scsiio_reply;
265 Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
266 const void *sense_data;
269 if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
271 if (ioc->ctl_cmds.smid != smid)
273 ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
274 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
276 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
277 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
279 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
280 mpi_reply->Function ==
281 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
282 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
283 if (scsiio_reply->SCSIState &
284 MPI2_SCSI_STATE_AUTOSENSE_VALID) {
285 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
286 le32_to_cpu(scsiio_reply->SenseCount));
287 sense_data = mpt3sas_base_get_sense_buffer(ioc,
289 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
293 * Get Error Response data for NVMe device. The ctl_cmds.sense
294 * buffer is used to store the Error Response data.
296 if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
298 (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
299 sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
300 le16_to_cpu(nvme_error_reply->ErrorResponseCount));
301 sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
302 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
306 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
307 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
308 complete(&ioc->ctl_cmds.done);
313 * _ctl_check_event_type - determines when an event needs logging
314 * @ioc: per adapter object
315 * @event: firmware event
317 * The bitmask in ioc->event_type[] indicates which events should be
318 * be saved in the driver event_log. This bitmask is set by application.
320 * Return: 1 when event should be captured, or zero means no match.
323 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
328 if (event >= 128 || !event || !ioc->event_log)
331 desired_event = (1 << (event % 32));
335 return desired_event & ioc->event_type[i];
339 * mpt3sas_ctl_add_to_event_log - add event
340 * @ioc: per adapter object
341 * @mpi_reply: reply message frame
344 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
345 Mpi2EventNotificationReply_t *mpi_reply)
347 struct MPT3_IOCTL_EVENTS *event_log;
350 u32 sz, event_data_sz;
356 event = le16_to_cpu(mpi_reply->Event);
358 if (_ctl_check_event_type(ioc, event)) {
360 /* insert entry into circular event_log */
361 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
362 event_log = ioc->event_log;
363 event_log[i].event = event;
364 event_log[i].context = ioc->event_context++;
366 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
367 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
368 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
369 memcpy(event_log[i].data, mpi_reply->EventData, sz);
373 /* This aen_event_read_flag flag is set until the
374 * application has read the event log.
375 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
377 if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
378 (send_aen && !ioc->aen_event_read_flag)) {
379 ioc->aen_event_read_flag = 1;
380 wake_up_interruptible(&ctl_poll_wait);
382 kill_fasync(&async_queue, SIGIO, POLL_IN);
387 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
388 * @ioc: per adapter object
389 * @msix_index: MSIX table index supplied by the OS
390 * @reply: reply message frame(lower 32bit addr)
391 * Context: interrupt.
393 * This function merely adds a new work task into ioc->firmware_event_thread.
394 * The tasks are worked from _firmware_event_work in user context.
396 * Return: 1 meaning mf should be freed from _base_interrupt
397 * 0 means the mf is freed from this function.
400 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
403 Mpi2EventNotificationReply_t *mpi_reply;
405 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
407 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
412 * _ctl_verify_adapter - validates ioc_number passed from application
414 * @iocpp: The ioc pointer is returned in this.
415 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
416 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
418 * Return: (-1) means error, else ioc_number.
421 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
424 struct MPT3SAS_ADAPTER *ioc;
426 /* global ioc lock to protect controller on list operations */
427 spin_lock(&gioc_lock);
428 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
429 if (ioc->id != ioc_number)
431 /* Check whether this ioctl command is from right
432 * ioctl device or not, if not continue the search.
434 version = ioc->hba_mpi_version_belonged;
435 /* MPI25_VERSION and MPI26_VERSION uses same ioctl
438 if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
439 if ((version == MPI25_VERSION) ||
440 (version == MPI26_VERSION))
445 if (version != mpi_version)
449 spin_unlock(&gioc_lock);
453 spin_unlock(&gioc_lock);
459 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
460 * @ioc: per adapter object
462 * The handler for doing any required cleanup or initialization.
464 void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
469 dtmprintk(ioc, pr_info(MPT3SAS_FMT
470 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
471 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
472 if (!(ioc->diag_buffer_status[i] &
473 MPT3_DIAG_BUFFER_IS_REGISTERED))
475 if ((ioc->diag_buffer_status[i] &
476 MPT3_DIAG_BUFFER_IS_RELEASED))
478 mpt3sas_send_diag_release(ioc, i, &issue_reset);
483 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
484 * @ioc: per adapter object
486 * The handler for doing any required cleanup or initialization.
488 void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
490 dtmprintk(ioc, pr_info(MPT3SAS_FMT
491 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
492 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
493 ioc->ctl_cmds.status |= MPT3_CMD_RESET;
494 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
495 complete(&ioc->ctl_cmds.done);
500 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
501 * @ioc: per adapter object
503 * The handler for doing any required cleanup or initialization.
505 void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
509 dtmprintk(ioc, pr_info(MPT3SAS_FMT
510 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
512 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
513 if (!(ioc->diag_buffer_status[i] &
514 MPT3_DIAG_BUFFER_IS_REGISTERED))
516 if ((ioc->diag_buffer_status[i] &
517 MPT3_DIAG_BUFFER_IS_RELEASED))
519 ioc->diag_buffer_status[i] |=
520 MPT3_DIAG_BUFFER_IS_DIAG_RESET;
530 * Called when application request fasyn callback handler.
533 _ctl_fasync(int fd, struct file *filep, int mode)
535 return fasync_helper(fd, filep, mode, &async_queue);
545 _ctl_poll(struct file *filep, poll_table *wait)
547 struct MPT3SAS_ADAPTER *ioc;
549 poll_wait(filep, &ctl_poll_wait, wait);
551 /* global ioc lock to protect controller on list operations */
552 spin_lock(&gioc_lock);
553 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
554 if (ioc->aen_event_read_flag) {
555 spin_unlock(&gioc_lock);
556 return EPOLLIN | EPOLLRDNORM;
559 spin_unlock(&gioc_lock);
564 * _ctl_set_task_mid - assign an active smid to tm request
565 * @ioc: per adapter object
566 * @karg: (struct mpt3_ioctl_command)
567 * @tm_request: pointer to mf from user space
569 * Return: 0 when an smid if found, else fail.
570 * during failure, the reply frame is filled.
573 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
574 Mpi2SCSITaskManagementRequest_t *tm_request)
579 struct scsi_cmnd *scmd;
580 struct MPT3SAS_DEVICE *priv_data;
581 Mpi2SCSITaskManagementReply_t *tm_reply;
586 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
588 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
593 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
595 handle = le16_to_cpu(tm_request->DevHandle);
596 for (smid = ioc->scsiio_depth; smid && !found; smid--) {
597 struct scsiio_tracker *st;
599 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
602 if (lun != scmd->device->lun)
604 priv_data = scmd->device->hostdata;
605 if (priv_data->sas_target == NULL)
607 if (priv_data->sas_target->handle != handle)
609 st = scsi_cmd_priv(scmd);
610 tm_request->TaskMID = cpu_to_le16(st->smid);
615 dctlprintk(ioc, pr_info(MPT3SAS_FMT
616 "%s: handle(0x%04x), lun(%d), no active mid!!\n",
618 desc, le16_to_cpu(tm_request->DevHandle), lun));
619 tm_reply = ioc->ctl_cmds.reply;
620 tm_reply->DevHandle = tm_request->DevHandle;
621 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
622 tm_reply->TaskType = tm_request->TaskType;
623 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
624 tm_reply->VP_ID = tm_request->VP_ID;
625 tm_reply->VF_ID = tm_request->VF_ID;
626 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
627 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
629 pr_err("failure at %s:%d/%s()!\n", __FILE__,
634 dctlprintk(ioc, pr_info(MPT3SAS_FMT
635 "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
636 desc, le16_to_cpu(tm_request->DevHandle), lun,
637 le16_to_cpu(tm_request->TaskMID)));
642 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
643 * @ioc: per adapter object
644 * @karg: (struct mpt3_ioctl_command)
645 * @mf: pointer to mf in user space
648 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
651 MPI2RequestHeader_t *mpi_request = NULL, *request;
652 MPI2DefaultReply_t *mpi_reply;
653 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
654 struct _pcie_device *pcie_device = NULL;
661 void *data_out = NULL;
662 dma_addr_t data_out_dma = 0;
663 size_t data_out_sz = 0;
664 void *data_in = NULL;
665 dma_addr_t data_in_dma = 0;
666 size_t data_in_sz = 0;
668 u16 wait_state_count;
669 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
670 u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
674 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
675 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
676 ioc->name, __func__);
681 wait_state_count = 0;
682 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
683 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
684 if (wait_state_count++ == 10) {
686 "%s: failed due to ioc not operational\n",
687 ioc->name, __func__);
692 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
694 "%s: waiting for operational state(count=%d)\n",
696 __func__, wait_state_count);
698 if (wait_state_count)
699 pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
700 ioc->name, __func__);
702 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
705 "%s: failed obtaining a memory for mpi_request\n",
706 ioc->name, __func__);
711 /* Check for overflow and wraparound */
712 if (karg.data_sge_offset * 4 > ioc->request_sz ||
713 karg.data_sge_offset > (UINT_MAX / 4)) {
718 /* copy in request message frame from user */
719 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
720 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
726 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
727 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
729 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
730 ioc->name, __func__);
735 /* Use first reserved smid for passthrough ioctls */
736 smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
740 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
741 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
742 request = mpt3sas_base_get_msg_frame(ioc, smid);
743 memcpy(request, mpi_request, karg.data_sge_offset*4);
744 ioc->ctl_cmds.smid = smid;
745 data_out_sz = karg.data_out_size;
746 data_in_sz = karg.data_in_size;
748 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
749 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
750 mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
751 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
752 mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
754 device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
755 if (!device_handle || (device_handle >
756 ioc->facts.MaxDevHandle)) {
758 mpt3sas_base_free_smid(ioc, smid);
763 /* obtain dma-able memory for data transfer */
764 if (data_out_sz) /* WRITE */ {
765 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
768 pr_err("failure at %s:%d/%s()!\n", __FILE__,
771 mpt3sas_base_free_smid(ioc, smid);
774 if (copy_from_user(data_out, karg.data_out_buf_ptr,
776 pr_err("failure at %s:%d/%s()!\n", __FILE__,
779 mpt3sas_base_free_smid(ioc, smid);
784 if (data_in_sz) /* READ */ {
785 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
788 pr_err("failure at %s:%d/%s()!\n", __FILE__,
791 mpt3sas_base_free_smid(ioc, smid);
796 psge = (void *)request + (karg.data_sge_offset*4);
798 /* send command to firmware */
799 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
801 init_completion(&ioc->ctl_cmds.done);
802 switch (mpi_request->Function) {
803 case MPI2_FUNCTION_NVME_ENCAPSULATED:
805 nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
807 * Get the Physical Address of the sense buffer.
808 * Use Error Response buffer address field to hold the sense
810 * Clear the internal sense buffer, which will potentially hold
811 * the Completion Queue Entry on return, or 0 if no Entry.
812 * Build the PRPs and set direction bits.
815 nvme_encap_request->ErrorResponseBaseAddress =
816 cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL);
817 nvme_encap_request->ErrorResponseBaseAddress |=
818 cpu_to_le64(le32_to_cpu(
819 mpt3sas_base_get_sense_buffer_dma(ioc, smid)));
820 nvme_encap_request->ErrorResponseAllocationLength =
821 cpu_to_le16(NVME_ERROR_RESPONSE_SIZE);
822 memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
823 ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
824 data_out_dma, data_out_sz, data_in_dma, data_in_sz);
825 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
826 dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
827 "ioctl failed due to device removal in progress\n",
828 ioc->name, device_handle));
829 mpt3sas_base_free_smid(ioc, smid);
833 mpt3sas_base_put_smid_nvme_encap(ioc, smid);
836 case MPI2_FUNCTION_SCSI_IO_REQUEST:
837 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
839 Mpi2SCSIIORequest_t *scsiio_request =
840 (Mpi2SCSIIORequest_t *)request;
841 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
842 scsiio_request->SenseBufferLowAddress =
843 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
844 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
845 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
846 dtmprintk(ioc, pr_info(MPT3SAS_FMT
847 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
848 ioc->name, device_handle));
849 mpt3sas_base_free_smid(ioc, smid);
853 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
854 data_in_dma, data_in_sz);
855 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
856 ioc->put_smid_scsi_io(ioc, smid, device_handle);
858 mpt3sas_base_put_smid_default(ioc, smid);
861 case MPI2_FUNCTION_SCSI_TASK_MGMT:
863 Mpi2SCSITaskManagementRequest_t *tm_request =
864 (Mpi2SCSITaskManagementRequest_t *)request;
866 dtmprintk(ioc, pr_info(MPT3SAS_FMT
867 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
869 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
870 ioc->got_task_abort_from_ioctl = 1;
871 if (tm_request->TaskType ==
872 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
873 tm_request->TaskType ==
874 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
875 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
876 mpt3sas_base_free_smid(ioc, smid);
877 ioc->got_task_abort_from_ioctl = 0;
881 ioc->got_task_abort_from_ioctl = 0;
883 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
884 dtmprintk(ioc, pr_info(MPT3SAS_FMT
885 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
886 ioc->name, device_handle));
887 mpt3sas_base_free_smid(ioc, smid);
891 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
892 tm_request->DevHandle));
893 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
894 data_in_dma, data_in_sz);
895 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
898 case MPI2_FUNCTION_SMP_PASSTHROUGH:
900 Mpi2SmpPassthroughRequest_t *smp_request =
901 (Mpi2SmpPassthroughRequest_t *)mpi_request;
904 /* ioc determines which port to use */
905 smp_request->PhysicalPort = 0xFF;
906 if (smp_request->PassthroughFlags &
907 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
908 data = (u8 *)&smp_request->SGL;
910 if (unlikely(data_out == NULL)) {
911 pr_err("failure at %s:%d/%s()!\n",
912 __FILE__, __LINE__, __func__);
913 mpt3sas_base_free_smid(ioc, smid);
920 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
921 ioc->ioc_link_reset_in_progress = 1;
922 ioc->ignore_loginfos = 1;
924 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
926 mpt3sas_base_put_smid_default(ioc, smid);
929 case MPI2_FUNCTION_SATA_PASSTHROUGH:
931 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
932 dtmprintk(ioc, pr_info(MPT3SAS_FMT
933 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
934 ioc->name, device_handle));
935 mpt3sas_base_free_smid(ioc, smid);
939 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
941 mpt3sas_base_put_smid_default(ioc, smid);
944 case MPI2_FUNCTION_FW_DOWNLOAD:
945 case MPI2_FUNCTION_FW_UPLOAD:
947 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
949 mpt3sas_base_put_smid_default(ioc, smid);
952 case MPI2_FUNCTION_TOOLBOX:
954 Mpi2ToolboxCleanRequest_t *toolbox_request =
955 (Mpi2ToolboxCleanRequest_t *)mpi_request;
957 if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
958 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
959 data_in_dma, data_in_sz);
961 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
962 data_in_dma, data_in_sz);
964 mpt3sas_base_put_smid_default(ioc, smid);
967 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
969 Mpi2SasIoUnitControlRequest_t *sasiounit_request =
970 (Mpi2SasIoUnitControlRequest_t *)mpi_request;
972 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
973 || sasiounit_request->Operation ==
974 MPI2_SAS_OP_PHY_LINK_RESET) {
975 ioc->ioc_link_reset_in_progress = 1;
976 ioc->ignore_loginfos = 1;
978 /* drop to default case for posting the request */
982 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
983 data_in_dma, data_in_sz);
984 mpt3sas_base_put_smid_default(ioc, smid);
988 if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
989 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
991 timeout = karg.timeout;
992 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
993 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
994 Mpi2SCSITaskManagementRequest_t *tm_request =
995 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
996 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
997 tm_request->DevHandle));
998 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
999 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
1000 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
1001 ioc->ioc_link_reset_in_progress) {
1002 ioc->ioc_link_reset_in_progress = 0;
1003 ioc->ignore_loginfos = 0;
1005 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1007 mpt3sas_base_check_cmd_timeout(ioc,
1008 ioc->ctl_cmds.status, mpi_request,
1009 karg.data_sge_offset);
1010 goto issue_host_reset;
1013 mpi_reply = ioc->ctl_cmds.reply;
1015 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
1016 (ioc->logging_level & MPT_DEBUG_TM)) {
1017 Mpi2SCSITaskManagementReply_t *tm_reply =
1018 (Mpi2SCSITaskManagementReply_t *)mpi_reply;
1020 pr_info(MPT3SAS_FMT "TASK_MGMT: " \
1021 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
1022 "TerminationCount(0x%08x)\n", ioc->name,
1023 le16_to_cpu(tm_reply->IOCStatus),
1024 le32_to_cpu(tm_reply->IOCLogInfo),
1025 le32_to_cpu(tm_reply->TerminationCount));
1028 /* copy out xdata to user */
1030 if (copy_to_user(karg.data_in_buf_ptr, data_in,
1032 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1033 __LINE__, __func__);
1039 /* copy out reply message frame to user */
1040 if (karg.max_reply_bytes) {
1041 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
1042 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
1044 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1045 __LINE__, __func__);
1051 /* copy out sense/NVMe Error Response to user */
1052 if (karg.max_sense_bytes && (mpi_request->Function ==
1053 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
1054 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
1055 MPI2_FUNCTION_NVME_ENCAPSULATED)) {
1056 if (karg.sense_data_ptr == NULL) {
1057 pr_info(MPT3SAS_FMT "Response buffer provided"
1058 " by application is NULL; Response data will"
1059 " not be returned.\n", ioc->name);
1062 sz_arg = (mpi_request->Function ==
1063 MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
1064 SCSI_SENSE_BUFFERSIZE;
1065 sz = min_t(u32, karg.max_sense_bytes, sz_arg);
1066 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
1068 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1069 __LINE__, __func__);
1078 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
1079 mpi_request->Function ==
1080 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
1081 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
1082 pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
1084 le16_to_cpu(mpi_request->FunctionDependent1));
1085 mpt3sas_halt_firmware(ioc);
1086 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
1087 le16_to_cpu(mpi_request->FunctionDependent1));
1088 if (pcie_device && (!ioc->tm_custom_handling))
1089 mpt3sas_scsih_issue_locked_tm(ioc,
1090 le16_to_cpu(mpi_request->FunctionDependent1),
1091 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1092 0, pcie_device->reset_timeout,
1095 mpt3sas_scsih_issue_locked_tm(ioc,
1096 le16_to_cpu(mpi_request->FunctionDependent1),
1097 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1098 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
1100 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1105 pcie_device_put(pcie_device);
1107 /* free memory associated with sg buffers */
1109 pci_free_consistent(ioc->pdev, data_in_sz, data_in,
1113 pci_free_consistent(ioc->pdev, data_out_sz, data_out,
1117 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1122 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
1123 * @ioc: per adapter object
1124 * @arg: user space buffer containing ioctl content
1127 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1129 struct mpt3_ioctl_iocinfo karg;
1131 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1134 memset(&karg, 0 , sizeof(karg));
1136 karg.port_number = ioc->pfacts[0].PortNumber;
1137 karg.hw_rev = ioc->pdev->revision;
1138 karg.pci_id = ioc->pdev->device;
1139 karg.subsystem_device = ioc->pdev->subsystem_device;
1140 karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
1141 karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
1142 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
1143 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
1144 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
1145 karg.firmware_version = ioc->facts.FWVersion.Word;
1146 strcpy(karg.driver_version, ioc->driver_name);
1147 strcat(karg.driver_version, "-");
1148 switch (ioc->hba_mpi_version_belonged) {
1150 if (ioc->is_warpdrive)
1151 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
1153 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1154 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
1158 if (ioc->is_gen35_ioc)
1159 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
1161 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
1162 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
1165 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
1167 if (copy_to_user(arg, &karg, sizeof(karg))) {
1168 pr_err("failure at %s:%d/%s()!\n",
1169 __FILE__, __LINE__, __func__);
1176 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
1177 * @ioc: per adapter object
1178 * @arg: user space buffer containing ioctl content
1181 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1183 struct mpt3_ioctl_eventquery karg;
1185 if (copy_from_user(&karg, arg, sizeof(karg))) {
1186 pr_err("failure at %s:%d/%s()!\n",
1187 __FILE__, __LINE__, __func__);
1191 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1194 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
1195 memcpy(karg.event_types, ioc->event_type,
1196 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1198 if (copy_to_user(arg, &karg, sizeof(karg))) {
1199 pr_err("failure at %s:%d/%s()!\n",
1200 __FILE__, __LINE__, __func__);
1207 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
1208 * @ioc: per adapter object
1209 * @arg: user space buffer containing ioctl content
1212 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1214 struct mpt3_ioctl_eventenable karg;
1216 if (copy_from_user(&karg, arg, sizeof(karg))) {
1217 pr_err("failure at %s:%d/%s()!\n",
1218 __FILE__, __LINE__, __func__);
1222 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1225 memcpy(ioc->event_type, karg.event_types,
1226 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1227 mpt3sas_base_validate_event_type(ioc, ioc->event_type);
1231 /* initialize event_log */
1232 ioc->event_context = 0;
1233 ioc->aen_event_read_flag = 0;
1234 ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
1235 sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
1236 if (!ioc->event_log) {
1237 pr_err("failure at %s:%d/%s()!\n",
1238 __FILE__, __LINE__, __func__);
1245 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
1246 * @ioc: per adapter object
1247 * @arg: user space buffer containing ioctl content
1250 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1252 struct mpt3_ioctl_eventreport karg;
1253 u32 number_bytes, max_events, max;
1254 struct mpt3_ioctl_eventreport __user *uarg = arg;
1256 if (copy_from_user(&karg, arg, sizeof(karg))) {
1257 pr_err("failure at %s:%d/%s()!\n",
1258 __FILE__, __LINE__, __func__);
1262 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1265 number_bytes = karg.hdr.max_data_size -
1266 sizeof(struct mpt3_ioctl_header);
1267 max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
1268 max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
1270 /* If fewer than 1 event is requested, there must have
1271 * been some type of error.
1273 if (!max || !ioc->event_log)
1276 number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
1277 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
1278 pr_err("failure at %s:%d/%s()!\n",
1279 __FILE__, __LINE__, __func__);
1283 /* reset flag so SIGIO can restart */
1284 ioc->aen_event_read_flag = 0;
1289 * _ctl_do_reset - main handler for MPT3HARDRESET opcode
1290 * @ioc: per adapter object
1291 * @arg: user space buffer containing ioctl content
1294 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1296 struct mpt3_ioctl_diag_reset karg;
1299 if (copy_from_user(&karg, arg, sizeof(karg))) {
1300 pr_err("failure at %s:%d/%s()!\n",
1301 __FILE__, __LINE__, __func__);
1305 if (ioc->shost_recovery || ioc->pci_error_recovery ||
1306 ioc->is_driver_loading)
1309 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1312 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1313 pr_info(MPT3SAS_FMT "host reset: %s\n",
1314 ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
1319 * _ctl_btdh_search_sas_device - searching for sas device
1320 * @ioc: per adapter object
1321 * @btdh: btdh ioctl payload
1324 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
1325 struct mpt3_ioctl_btdh_mapping *btdh)
1327 struct _sas_device *sas_device;
1328 unsigned long flags;
1331 if (list_empty(&ioc->sas_device_list))
1334 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1335 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
1336 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1337 btdh->handle == sas_device->handle) {
1338 btdh->bus = sas_device->channel;
1339 btdh->id = sas_device->id;
1342 } else if (btdh->bus == sas_device->channel && btdh->id ==
1343 sas_device->id && btdh->handle == 0xFFFF) {
1344 btdh->handle = sas_device->handle;
1350 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1355 * _ctl_btdh_search_pcie_device - searching for pcie device
1356 * @ioc: per adapter object
1357 * @btdh: btdh ioctl payload
1360 _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
1361 struct mpt3_ioctl_btdh_mapping *btdh)
1363 struct _pcie_device *pcie_device;
1364 unsigned long flags;
1367 if (list_empty(&ioc->pcie_device_list))
1370 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1371 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1372 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1373 btdh->handle == pcie_device->handle) {
1374 btdh->bus = pcie_device->channel;
1375 btdh->id = pcie_device->id;
1378 } else if (btdh->bus == pcie_device->channel && btdh->id ==
1379 pcie_device->id && btdh->handle == 0xFFFF) {
1380 btdh->handle = pcie_device->handle;
1386 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1391 * _ctl_btdh_search_raid_device - searching for raid device
1392 * @ioc: per adapter object
1393 * @btdh: btdh ioctl payload
1396 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
1397 struct mpt3_ioctl_btdh_mapping *btdh)
1399 struct _raid_device *raid_device;
1400 unsigned long flags;
1403 if (list_empty(&ioc->raid_device_list))
1406 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1407 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1408 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1409 btdh->handle == raid_device->handle) {
1410 btdh->bus = raid_device->channel;
1411 btdh->id = raid_device->id;
1414 } else if (btdh->bus == raid_device->channel && btdh->id ==
1415 raid_device->id && btdh->handle == 0xFFFF) {
1416 btdh->handle = raid_device->handle;
1422 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1427 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
1428 * @ioc: per adapter object
1429 * @arg: user space buffer containing ioctl content
1432 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1434 struct mpt3_ioctl_btdh_mapping karg;
1437 if (copy_from_user(&karg, arg, sizeof(karg))) {
1438 pr_err("failure at %s:%d/%s()!\n",
1439 __FILE__, __LINE__, __func__);
1443 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1446 rc = _ctl_btdh_search_sas_device(ioc, &karg);
1448 rc = _ctl_btdh_search_pcie_device(ioc, &karg);
1450 _ctl_btdh_search_raid_device(ioc, &karg);
1452 if (copy_to_user(arg, &karg, sizeof(karg))) {
1453 pr_err("failure at %s:%d/%s()!\n",
1454 __FILE__, __LINE__, __func__);
1461 * _ctl_diag_capability - return diag buffer capability
1462 * @ioc: per adapter object
1463 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1465 * returns 1 when diag buffer support is enabled in firmware
1468 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
1472 switch (buffer_type) {
1473 case MPI2_DIAG_BUF_TYPE_TRACE:
1474 if (ioc->facts.IOCCapabilities &
1475 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
1478 case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
1479 if (ioc->facts.IOCCapabilities &
1480 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
1483 case MPI2_DIAG_BUF_TYPE_EXTENDED:
1484 if (ioc->facts.IOCCapabilities &
1485 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
1494 * _ctl_diag_register_2 - wrapper for registering diag buffer support
1495 * @ioc: per adapter object
1496 * @diag_register: the diag_register struct passed in from user space
1500 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1501 struct mpt3_diag_register *diag_register)
1504 void *request_data = NULL;
1505 dma_addr_t request_data_dma;
1506 u32 request_data_sz = 0;
1507 Mpi2DiagBufferPostRequest_t *mpi_request;
1508 Mpi2DiagBufferPostReply_t *mpi_reply;
1515 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1518 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1519 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1521 "%s: failed due to ioc not operational\n",
1522 ioc->name, __func__);
1527 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1528 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
1529 ioc->name, __func__);
1534 buffer_type = diag_register->buffer_type;
1535 if (!_ctl_diag_capability(ioc, buffer_type)) {
1537 "%s: doesn't have capability for buffer_type(0x%02x)\n",
1538 ioc->name, __func__, buffer_type);
1542 if (ioc->diag_buffer_status[buffer_type] &
1543 MPT3_DIAG_BUFFER_IS_REGISTERED) {
1545 "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1546 ioc->name, __func__,
1551 if (diag_register->requested_buffer_size % 4) {
1553 "%s: the requested_buffer_size is not 4 byte aligned\n",
1554 ioc->name, __func__);
1558 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1560 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
1561 ioc->name, __func__);
1567 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1568 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1569 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1570 ioc->ctl_cmds.smid = smid;
1572 request_data = ioc->diag_buffer[buffer_type];
1573 request_data_sz = diag_register->requested_buffer_size;
1574 ioc->unique_id[buffer_type] = diag_register->unique_id;
1575 ioc->diag_buffer_status[buffer_type] = 0;
1576 memcpy(ioc->product_specific[buffer_type],
1577 diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
1578 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
1581 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1582 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1583 pci_free_consistent(ioc->pdev,
1584 ioc->diag_buffer_sz[buffer_type],
1585 request_data, request_data_dma);
1586 request_data = NULL;
1590 if (request_data == NULL) {
1591 ioc->diag_buffer_sz[buffer_type] = 0;
1592 ioc->diag_buffer_dma[buffer_type] = 0;
1593 request_data = pci_alloc_consistent(
1594 ioc->pdev, request_data_sz, &request_data_dma);
1595 if (request_data == NULL) {
1596 pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
1597 " for diag buffers, requested size(%d)\n",
1598 ioc->name, __func__, request_data_sz);
1599 mpt3sas_base_free_smid(ioc, smid);
1602 ioc->diag_buffer[buffer_type] = request_data;
1603 ioc->diag_buffer_sz[buffer_type] = request_data_sz;
1604 ioc->diag_buffer_dma[buffer_type] = request_data_dma;
1607 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1608 mpi_request->BufferType = diag_register->buffer_type;
1609 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
1610 mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1611 mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1612 mpi_request->VF_ID = 0; /* TODO */
1613 mpi_request->VP_ID = 0;
1615 dctlprintk(ioc, pr_info(MPT3SAS_FMT
1616 "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
1617 ioc->name, __func__, request_data,
1618 (unsigned long long)request_data_dma,
1619 le32_to_cpu(mpi_request->BufferLength)));
1621 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1622 mpi_request->ProductSpecific[i] =
1623 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1625 init_completion(&ioc->ctl_cmds.done);
1626 mpt3sas_base_put_smid_default(ioc, smid);
1627 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1628 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1630 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1632 mpt3sas_base_check_cmd_timeout(ioc,
1633 ioc->ctl_cmds.status, mpi_request,
1634 sizeof(Mpi2DiagBufferPostRequest_t)/4);
1635 goto issue_host_reset;
1638 /* process the completed Reply Message Frame */
1639 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1640 pr_err(MPT3SAS_FMT "%s: no reply message\n",
1641 ioc->name, __func__);
1646 mpi_reply = ioc->ctl_cmds.reply;
1647 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1649 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1650 ioc->diag_buffer_status[buffer_type] |=
1651 MPT3_DIAG_BUFFER_IS_REGISTERED;
1652 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
1653 ioc->name, __func__));
1656 "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1657 ioc->name, __func__,
1658 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1664 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1668 if (rc && request_data)
1669 pci_free_consistent(ioc->pdev, request_data_sz,
1670 request_data, request_data_dma);
1672 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1677 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
1678 * @ioc: per adapter object
1679 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
1681 * This is called when command line option diag_buffer_enable is enabled
1682 * at driver load time.
1685 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1687 struct mpt3_diag_register diag_register;
1689 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
1691 if (bits_to_register & 1) {
1692 pr_info(MPT3SAS_FMT "registering trace buffer support\n",
1694 ioc->diag_trigger_master.MasterData =
1695 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
1696 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
1697 /* register for 2MB buffers */
1698 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1699 diag_register.unique_id = 0x7075900;
1700 _ctl_diag_register_2(ioc, &diag_register);
1703 if (bits_to_register & 2) {
1704 pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
1706 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
1707 /* register for 2MB buffers */
1708 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1709 diag_register.unique_id = 0x7075901;
1710 _ctl_diag_register_2(ioc, &diag_register);
1713 if (bits_to_register & 4) {
1714 pr_info(MPT3SAS_FMT "registering extended buffer support\n",
1716 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
1717 /* register for 2MB buffers */
1718 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1719 diag_register.unique_id = 0x7075901;
1720 _ctl_diag_register_2(ioc, &diag_register);
1725 * _ctl_diag_register - application register with driver
1726 * @ioc: per adapter object
1727 * @arg: user space buffer containing ioctl content
1729 * This will allow the driver to setup any required buffers that will be
1730 * needed by firmware to communicate with the driver.
1733 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1735 struct mpt3_diag_register karg;
1738 if (copy_from_user(&karg, arg, sizeof(karg))) {
1739 pr_err("failure at %s:%d/%s()!\n",
1740 __FILE__, __LINE__, __func__);
1744 rc = _ctl_diag_register_2(ioc, &karg);
1749 * _ctl_diag_unregister - application unregister with driver
1750 * @ioc: per adapter object
1751 * @arg: user space buffer containing ioctl content
1753 * This will allow the driver to cleanup any memory allocated for diag
1754 * messages and to free up any resources.
1757 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1759 struct mpt3_diag_unregister karg;
1761 dma_addr_t request_data_dma;
1762 u32 request_data_sz;
1765 if (copy_from_user(&karg, arg, sizeof(karg))) {
1766 pr_err("failure at %s:%d/%s()!\n",
1767 __FILE__, __LINE__, __func__);
1771 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1774 buffer_type = karg.unique_id & 0x000000ff;
1775 if (!_ctl_diag_capability(ioc, buffer_type)) {
1777 "%s: doesn't have capability for buffer_type(0x%02x)\n",
1778 ioc->name, __func__, buffer_type);
1782 if ((ioc->diag_buffer_status[buffer_type] &
1783 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1785 "%s: buffer_type(0x%02x) is not registered\n",
1786 ioc->name, __func__, buffer_type);
1789 if ((ioc->diag_buffer_status[buffer_type] &
1790 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
1792 "%s: buffer_type(0x%02x) has not been released\n",
1793 ioc->name, __func__, buffer_type);
1797 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1799 "%s: unique_id(0x%08x) is not registered\n",
1800 ioc->name, __func__, karg.unique_id);
1804 request_data = ioc->diag_buffer[buffer_type];
1805 if (!request_data) {
1807 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
1808 ioc->name, __func__, buffer_type);
1812 request_data_sz = ioc->diag_buffer_sz[buffer_type];
1813 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1814 pci_free_consistent(ioc->pdev, request_data_sz,
1815 request_data, request_data_dma);
1816 ioc->diag_buffer[buffer_type] = NULL;
1817 ioc->diag_buffer_status[buffer_type] = 0;
1822 * _ctl_diag_query - query relevant info associated with diag buffers
1823 * @ioc: per adapter object
1824 * @arg: user space buffer containing ioctl content
1826 * The application will send only buffer_type and unique_id. Driver will
1827 * inspect unique_id first, if valid, fill in all the info. If unique_id is
1828 * 0x00, the driver will return info specified by Buffer Type.
1831 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1833 struct mpt3_diag_query karg;
1838 if (copy_from_user(&karg, arg, sizeof(karg))) {
1839 pr_err("failure at %s:%d/%s()!\n",
1840 __FILE__, __LINE__, __func__);
1844 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1847 karg.application_flags = 0;
1848 buffer_type = karg.buffer_type;
1850 if (!_ctl_diag_capability(ioc, buffer_type)) {
1852 "%s: doesn't have capability for buffer_type(0x%02x)\n",
1853 ioc->name, __func__, buffer_type);
1857 if ((ioc->diag_buffer_status[buffer_type] &
1858 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1860 "%s: buffer_type(0x%02x) is not registered\n",
1861 ioc->name, __func__, buffer_type);
1865 if (karg.unique_id & 0xffffff00) {
1866 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1868 "%s: unique_id(0x%08x) is not registered\n",
1869 ioc->name, __func__, karg.unique_id);
1874 request_data = ioc->diag_buffer[buffer_type];
1875 if (!request_data) {
1877 "%s: doesn't have buffer for buffer_type(0x%02x)\n",
1878 ioc->name, __func__, buffer_type);
1882 if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
1883 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
1884 MPT3_APP_FLAGS_BUFFER_VALID);
1886 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
1887 MPT3_APP_FLAGS_BUFFER_VALID |
1888 MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
1890 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1891 karg.product_specific[i] =
1892 ioc->product_specific[buffer_type][i];
1894 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
1895 karg.driver_added_buffer_size = 0;
1896 karg.unique_id = ioc->unique_id[buffer_type];
1897 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
1899 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
1901 "%s: unable to write mpt3_diag_query data @ %p\n",
1902 ioc->name, __func__, arg);
1909 * mpt3sas_send_diag_release - Diag Release Message
1910 * @ioc: per adapter object
1911 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1912 * @issue_reset: specifies whether host reset is required.
1916 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1919 Mpi2DiagReleaseRequest_t *mpi_request;
1920 Mpi2DiagReleaseReply_t *mpi_reply;
1926 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1932 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1933 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1934 if (ioc->diag_buffer_status[buffer_type] &
1935 MPT3_DIAG_BUFFER_IS_REGISTERED)
1936 ioc->diag_buffer_status[buffer_type] |=
1937 MPT3_DIAG_BUFFER_IS_RELEASED;
1938 dctlprintk(ioc, pr_info(MPT3SAS_FMT
1939 "%s: skipping due to FAULT state\n", ioc->name,
1945 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1946 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
1947 ioc->name, __func__);
1952 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1954 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
1955 ioc->name, __func__);
1960 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1961 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1962 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1963 ioc->ctl_cmds.smid = smid;
1965 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
1966 mpi_request->BufferType = buffer_type;
1967 mpi_request->VF_ID = 0; /* TODO */
1968 mpi_request->VP_ID = 0;
1970 init_completion(&ioc->ctl_cmds.done);
1971 mpt3sas_base_put_smid_default(ioc, smid);
1972 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1973 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1975 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1976 *issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
1977 ioc->ctl_cmds.status, mpi_request,
1978 sizeof(Mpi2DiagReleaseRequest_t)/4);
1983 /* process the completed Reply Message Frame */
1984 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1985 pr_err(MPT3SAS_FMT "%s: no reply message\n",
1986 ioc->name, __func__);
1991 mpi_reply = ioc->ctl_cmds.reply;
1992 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1994 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1995 ioc->diag_buffer_status[buffer_type] |=
1996 MPT3_DIAG_BUFFER_IS_RELEASED;
1997 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
1998 ioc->name, __func__));
2001 "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2002 ioc->name, __func__,
2003 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2008 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2013 * _ctl_diag_release - request to send Diag Release Message to firmware
2015 * @arg: user space buffer containing ioctl content
2017 * This allows ownership of the specified buffer to returned to the driver,
2018 * allowing an application to read the buffer without fear that firmware is
2019 * overwriting information in the buffer.
2022 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2024 struct mpt3_diag_release karg;
2030 if (copy_from_user(&karg, arg, sizeof(karg))) {
2031 pr_err("failure at %s:%d/%s()!\n",
2032 __FILE__, __LINE__, __func__);
2036 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2039 buffer_type = karg.unique_id & 0x000000ff;
2040 if (!_ctl_diag_capability(ioc, buffer_type)) {
2042 "%s: doesn't have capability for buffer_type(0x%02x)\n",
2043 ioc->name, __func__, buffer_type);
2047 if ((ioc->diag_buffer_status[buffer_type] &
2048 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2050 "%s: buffer_type(0x%02x) is not registered\n",
2051 ioc->name, __func__, buffer_type);
2055 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2057 "%s: unique_id(0x%08x) is not registered\n",
2058 ioc->name, __func__, karg.unique_id);
2062 if (ioc->diag_buffer_status[buffer_type] &
2063 MPT3_DIAG_BUFFER_IS_RELEASED) {
2065 "%s: buffer_type(0x%02x) is already released\n",
2066 ioc->name, __func__,
2071 request_data = ioc->diag_buffer[buffer_type];
2073 if (!request_data) {
2075 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
2076 ioc->name, __func__, buffer_type);
2080 /* buffers were released by due to host reset */
2081 if ((ioc->diag_buffer_status[buffer_type] &
2082 MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
2083 ioc->diag_buffer_status[buffer_type] |=
2084 MPT3_DIAG_BUFFER_IS_RELEASED;
2085 ioc->diag_buffer_status[buffer_type] &=
2086 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
2088 "%s: buffer_type(0x%02x) was released due to host reset\n",
2089 ioc->name, __func__, buffer_type);
2093 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
2096 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2102 * _ctl_diag_read_buffer - request for copy of the diag buffer
2103 * @ioc: per adapter object
2104 * @arg: user space buffer containing ioctl content
2107 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2109 struct mpt3_diag_read_buffer karg;
2110 struct mpt3_diag_read_buffer __user *uarg = arg;
2111 void *request_data, *diag_data;
2112 Mpi2DiagBufferPostRequest_t *mpi_request;
2113 Mpi2DiagBufferPostReply_t *mpi_reply;
2116 unsigned long request_size, copy_size;
2121 if (copy_from_user(&karg, arg, sizeof(karg))) {
2122 pr_err("failure at %s:%d/%s()!\n",
2123 __FILE__, __LINE__, __func__);
2127 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2130 buffer_type = karg.unique_id & 0x000000ff;
2131 if (!_ctl_diag_capability(ioc, buffer_type)) {
2133 "%s: doesn't have capability for buffer_type(0x%02x)\n",
2134 ioc->name, __func__, buffer_type);
2138 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2140 "%s: unique_id(0x%08x) is not registered\n",
2141 ioc->name, __func__, karg.unique_id);
2145 request_data = ioc->diag_buffer[buffer_type];
2146 if (!request_data) {
2148 "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2149 ioc->name, __func__, buffer_type);
2153 request_size = ioc->diag_buffer_sz[buffer_type];
2155 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
2156 pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
2157 "or bytes_to_read are not 4 byte aligned\n", ioc->name,
2162 if (karg.starting_offset > request_size)
2165 diag_data = (void *)(request_data + karg.starting_offset);
2166 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2167 "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
2168 ioc->name, __func__,
2169 diag_data, karg.starting_offset, karg.bytes_to_read));
2171 /* Truncate data on requests that are too large */
2172 if ((diag_data + karg.bytes_to_read < diag_data) ||
2173 (diag_data + karg.bytes_to_read > request_data + request_size))
2174 copy_size = request_size - karg.starting_offset;
2176 copy_size = karg.bytes_to_read;
2178 if (copy_to_user((void __user *)uarg->diagnostic_data,
2179 diag_data, copy_size)) {
2181 "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
2182 ioc->name, __func__, diag_data);
2186 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
2189 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2190 "%s: Reregister buffer_type(0x%02x)\n",
2191 ioc->name, __func__, buffer_type));
2192 if ((ioc->diag_buffer_status[buffer_type] &
2193 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2194 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2195 "%s: buffer_type(0x%02x) is still registered\n",
2196 ioc->name, __func__, buffer_type));
2199 /* Get a free request frame and save the message context.
2202 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2203 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
2204 ioc->name, __func__);
2209 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2211 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
2212 ioc->name, __func__);
2218 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2219 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2220 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2221 ioc->ctl_cmds.smid = smid;
2223 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
2224 mpi_request->BufferType = buffer_type;
2225 mpi_request->BufferLength =
2226 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
2227 mpi_request->BufferAddress =
2228 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
2229 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2230 mpi_request->ProductSpecific[i] =
2231 cpu_to_le32(ioc->product_specific[buffer_type][i]);
2232 mpi_request->VF_ID = 0; /* TODO */
2233 mpi_request->VP_ID = 0;
2235 init_completion(&ioc->ctl_cmds.done);
2236 mpt3sas_base_put_smid_default(ioc, smid);
2237 wait_for_completion_timeout(&ioc->ctl_cmds.done,
2238 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2240 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2242 mpt3sas_base_check_cmd_timeout(ioc,
2243 ioc->ctl_cmds.status, mpi_request,
2244 sizeof(Mpi2DiagBufferPostRequest_t)/4);
2245 goto issue_host_reset;
2248 /* process the completed Reply Message Frame */
2249 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2250 pr_err(MPT3SAS_FMT "%s: no reply message\n",
2251 ioc->name, __func__);
2256 mpi_reply = ioc->ctl_cmds.reply;
2257 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2259 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2260 ioc->diag_buffer_status[buffer_type] |=
2261 MPT3_DIAG_BUFFER_IS_REGISTERED;
2262 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
2263 ioc->name, __func__));
2266 "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2267 ioc->name, __func__,
2268 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2274 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2278 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2284 #ifdef CONFIG_COMPAT
2286 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2287 * @ioc: per adapter object
2288 * @cmd: ioctl opcode
2289 * @arg: (struct mpt3_ioctl_command32)
2291 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
2294 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
2297 struct mpt3_ioctl_command32 karg32;
2298 struct mpt3_ioctl_command32 __user *uarg;
2299 struct mpt3_ioctl_command karg;
2301 if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
2304 uarg = (struct mpt3_ioctl_command32 __user *) arg;
2306 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2307 pr_err("failure at %s:%d/%s()!\n",
2308 __FILE__, __LINE__, __func__);
2312 memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
2313 karg.hdr.ioc_number = karg32.hdr.ioc_number;
2314 karg.hdr.port_number = karg32.hdr.port_number;
2315 karg.hdr.max_data_size = karg32.hdr.max_data_size;
2316 karg.timeout = karg32.timeout;
2317 karg.max_reply_bytes = karg32.max_reply_bytes;
2318 karg.data_in_size = karg32.data_in_size;
2319 karg.data_out_size = karg32.data_out_size;
2320 karg.max_sense_bytes = karg32.max_sense_bytes;
2321 karg.data_sge_offset = karg32.data_sge_offset;
2322 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2323 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2324 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2325 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2326 return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2331 * _ctl_ioctl_main - main ioctl entry point
2332 * @file: (struct file)
2333 * @cmd: ioctl opcode
2334 * @arg: user space data buffer
2335 * @compat: handles 32 bit applications in 64bit os
2336 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
2337 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
2340 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2341 u8 compat, u16 mpi_version)
2343 struct MPT3SAS_ADAPTER *ioc;
2344 struct mpt3_ioctl_header ioctl_header;
2345 enum block_state state;
2348 /* get IOCTL header */
2349 if (copy_from_user(&ioctl_header, (char __user *)arg,
2350 sizeof(struct mpt3_ioctl_header))) {
2351 pr_err("failure at %s:%d/%s()!\n",
2352 __FILE__, __LINE__, __func__);
2356 if (_ctl_verify_adapter(ioctl_header.ioc_number,
2357 &ioc, mpi_version) == -1 || !ioc)
2360 /* pci_access_mutex lock acquired by ioctl path */
2361 mutex_lock(&ioc->pci_access_mutex);
2363 if (ioc->shost_recovery || ioc->pci_error_recovery ||
2364 ioc->is_driver_loading || ioc->remove_host) {
2366 goto out_unlock_pciaccess;
2369 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2370 if (state == NON_BLOCKING) {
2371 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
2373 goto out_unlock_pciaccess;
2375 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2377 goto out_unlock_pciaccess;
2383 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
2384 ret = _ctl_getiocinfo(ioc, arg);
2386 #ifdef CONFIG_COMPAT
2391 struct mpt3_ioctl_command __user *uarg;
2392 struct mpt3_ioctl_command karg;
2394 #ifdef CONFIG_COMPAT
2396 ret = _ctl_compat_mpt_command(ioc, cmd, arg);
2400 if (copy_from_user(&karg, arg, sizeof(karg))) {
2401 pr_err("failure at %s:%d/%s()!\n",
2402 __FILE__, __LINE__, __func__);
2407 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
2409 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2413 case MPT3EVENTQUERY:
2414 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
2415 ret = _ctl_eventquery(ioc, arg);
2417 case MPT3EVENTENABLE:
2418 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
2419 ret = _ctl_eventenable(ioc, arg);
2421 case MPT3EVENTREPORT:
2422 ret = _ctl_eventreport(ioc, arg);
2425 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
2426 ret = _ctl_do_reset(ioc, arg);
2428 case MPT3BTDHMAPPING:
2429 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
2430 ret = _ctl_btdh_mapping(ioc, arg);
2432 case MPT3DIAGREGISTER:
2433 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
2434 ret = _ctl_diag_register(ioc, arg);
2436 case MPT3DIAGUNREGISTER:
2437 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
2438 ret = _ctl_diag_unregister(ioc, arg);
2441 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
2442 ret = _ctl_diag_query(ioc, arg);
2444 case MPT3DIAGRELEASE:
2445 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
2446 ret = _ctl_diag_release(ioc, arg);
2448 case MPT3DIAGREADBUFFER:
2449 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
2450 ret = _ctl_diag_read_buffer(ioc, arg);
2453 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2454 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
2458 mutex_unlock(&ioc->ctl_cmds.mutex);
2459 out_unlock_pciaccess:
2460 mutex_unlock(&ioc->pci_access_mutex);
2465 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
2466 * @file: (struct file)
2467 * @cmd: ioctl opcode
2471 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2475 /* pass MPI25_VERSION | MPI26_VERSION value,
2476 * to indicate that this ioctl cmd
2477 * came from mpt3ctl ioctl device.
2479 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
2480 MPI25_VERSION | MPI26_VERSION);
2485 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
2486 * @file: (struct file)
2487 * @cmd: ioctl opcode
2491 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2495 /* pass MPI2_VERSION value, to indicate that this ioctl cmd
2496 * came from mpt2ctl ioctl device.
2498 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION);
2501 #ifdef CONFIG_COMPAT
2503 *_ ctl_ioctl_compat - main ioctl entry point (compat)
2508 * This routine handles 32 bit applications in 64bit os.
2511 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2515 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
2516 MPI25_VERSION | MPI26_VERSION);
2521 *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
2526 * This routine handles 32 bit applications in 64bit os.
2529 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2533 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION);
2538 /* scsi host attributes */
2540 * _ctl_version_fw_show - firmware version
2541 * @cdev: pointer to embedded class device
2543 * @buf: the buffer returned
2545 * A sysfs 'read-only' shost attribute.
2548 _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
2551 struct Scsi_Host *shost = class_to_shost(cdev);
2552 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2554 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2555 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2556 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2557 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2558 ioc->facts.FWVersion.Word & 0x000000FF);
2560 static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
2563 * _ctl_version_bios_show - bios version
2564 * @cdev: pointer to embedded class device
2566 * @buf: the buffer returned
2568 * A sysfs 'read-only' shost attribute.
2571 _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
2574 struct Scsi_Host *shost = class_to_shost(cdev);
2575 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2577 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2579 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2580 (version & 0xFF000000) >> 24,
2581 (version & 0x00FF0000) >> 16,
2582 (version & 0x0000FF00) >> 8,
2583 version & 0x000000FF);
2585 static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
2588 * _ctl_version_mpi_show - MPI (message passing interface) version
2589 * @cdev: pointer to embedded class device
2591 * @buf: the buffer returned
2593 * A sysfs 'read-only' shost attribute.
2596 _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
2599 struct Scsi_Host *shost = class_to_shost(cdev);
2600 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2602 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
2603 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
2605 static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
2608 * _ctl_version_product_show - product name
2609 * @cdev: pointer to embedded class device
2611 * @buf: the buffer returned
2613 * A sysfs 'read-only' shost attribute.
2616 _ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
2619 struct Scsi_Host *shost = class_to_shost(cdev);
2620 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2622 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
2624 static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
2627 * _ctl_version_nvdata_persistent_show - ndvata persistent version
2628 * @cdev: pointer to embedded class device
2630 * @buf: the buffer returned
2632 * A sysfs 'read-only' shost attribute.
2635 _ctl_version_nvdata_persistent_show(struct device *cdev,
2636 struct device_attribute *attr, char *buf)
2638 struct Scsi_Host *shost = class_to_shost(cdev);
2639 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2641 return snprintf(buf, PAGE_SIZE, "%08xh\n",
2642 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
2644 static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
2645 _ctl_version_nvdata_persistent_show, NULL);
2648 * _ctl_version_nvdata_default_show - nvdata default version
2649 * @cdev: pointer to embedded class device
2651 * @buf: the buffer returned
2653 * A sysfs 'read-only' shost attribute.
2656 _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
2659 struct Scsi_Host *shost = class_to_shost(cdev);
2660 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2662 return snprintf(buf, PAGE_SIZE, "%08xh\n",
2663 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
2665 static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
2666 _ctl_version_nvdata_default_show, NULL);
2669 * _ctl_board_name_show - board name
2670 * @cdev: pointer to embedded class device
2672 * @buf: the buffer returned
2674 * A sysfs 'read-only' shost attribute.
2677 _ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
2680 struct Scsi_Host *shost = class_to_shost(cdev);
2681 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2683 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
2685 static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
2688 * _ctl_board_assembly_show - board assembly name
2689 * @cdev: pointer to embedded class device
2691 * @buf: the buffer returned
2693 * A sysfs 'read-only' shost attribute.
2696 _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
2699 struct Scsi_Host *shost = class_to_shost(cdev);
2700 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2702 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
2704 static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
2707 * _ctl_board_tracer_show - board tracer number
2708 * @cdev: pointer to embedded class device
2710 * @buf: the buffer returned
2712 * A sysfs 'read-only' shost attribute.
2715 _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
2718 struct Scsi_Host *shost = class_to_shost(cdev);
2719 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2721 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
2723 static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
2726 * _ctl_io_delay_show - io missing delay
2727 * @cdev: pointer to embedded class device
2729 * @buf: the buffer returned
2731 * This is for firmware implemention for deboucing device
2734 * A sysfs 'read-only' shost attribute.
2737 _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
2740 struct Scsi_Host *shost = class_to_shost(cdev);
2741 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2743 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
2745 static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
2748 * _ctl_device_delay_show - device missing delay
2749 * @cdev: pointer to embedded class device
2751 * @buf: the buffer returned
2753 * This is for firmware implemention for deboucing device
2756 * A sysfs 'read-only' shost attribute.
2759 _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
2762 struct Scsi_Host *shost = class_to_shost(cdev);
2763 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2765 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
2767 static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
2770 * _ctl_fw_queue_depth_show - global credits
2771 * @cdev: pointer to embedded class device
2773 * @buf: the buffer returned
2775 * This is firmware queue depth limit
2777 * A sysfs 'read-only' shost attribute.
2780 _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
2783 struct Scsi_Host *shost = class_to_shost(cdev);
2784 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2786 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
2788 static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
2791 * _ctl_sas_address_show - sas address
2792 * @cdev: pointer to embedded class device
2794 * @buf: the buffer returned
2796 * This is the controller sas address
2798 * A sysfs 'read-only' shost attribute.
2801 _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
2805 struct Scsi_Host *shost = class_to_shost(cdev);
2806 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2808 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
2809 (unsigned long long)ioc->sas_hba.sas_address);
2811 static DEVICE_ATTR(host_sas_address, S_IRUGO,
2812 _ctl_host_sas_address_show, NULL);
2815 * _ctl_logging_level_show - logging level
2816 * @cdev: pointer to embedded class device
2818 * @buf: the buffer returned
2820 * A sysfs 'read/write' shost attribute.
2823 _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
2826 struct Scsi_Host *shost = class_to_shost(cdev);
2827 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2829 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
2832 _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
2833 const char *buf, size_t count)
2835 struct Scsi_Host *shost = class_to_shost(cdev);
2836 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2839 if (sscanf(buf, "%x", &val) != 1)
2842 ioc->logging_level = val;
2843 pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
2844 ioc->logging_level);
2847 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
2848 _ctl_logging_level_store);
2851 * _ctl_fwfault_debug_show - show/store fwfault_debug
2852 * @cdev: pointer to embedded class device
2854 * @buf: the buffer returned
2856 * mpt3sas_fwfault_debug is command line option
2857 * A sysfs 'read/write' shost attribute.
2860 _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
2863 struct Scsi_Host *shost = class_to_shost(cdev);
2864 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2866 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
2869 _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
2870 const char *buf, size_t count)
2872 struct Scsi_Host *shost = class_to_shost(cdev);
2873 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2876 if (sscanf(buf, "%d", &val) != 1)
2879 ioc->fwfault_debug = val;
2880 pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
2881 ioc->fwfault_debug);
2884 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
2885 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
2888 * _ctl_ioc_reset_count_show - ioc reset count
2889 * @cdev: pointer to embedded class device
2891 * @buf: the buffer returned
2893 * This is firmware queue depth limit
2895 * A sysfs 'read-only' shost attribute.
2898 _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
2901 struct Scsi_Host *shost = class_to_shost(cdev);
2902 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2904 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
2906 static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
2909 * _ctl_ioc_reply_queue_count_show - number of reply queues
2910 * @cdev: pointer to embedded class device
2912 * @buf: the buffer returned
2914 * This is number of reply queues
2916 * A sysfs 'read-only' shost attribute.
2919 _ctl_ioc_reply_queue_count_show(struct device *cdev,
2920 struct device_attribute *attr, char *buf)
2922 u8 reply_queue_count;
2923 struct Scsi_Host *shost = class_to_shost(cdev);
2924 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2926 if ((ioc->facts.IOCCapabilities &
2927 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
2928 reply_queue_count = ioc->reply_queue_count;
2930 reply_queue_count = 1;
2932 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
2934 static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
2938 * _ctl_BRM_status_show - Backup Rail Monitor Status
2939 * @cdev: pointer to embedded class device
2941 * @buf: the buffer returned
2943 * This is number of reply queues
2945 * A sysfs 'read-only' shost attribute.
2948 _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
2951 struct Scsi_Host *shost = class_to_shost(cdev);
2952 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2953 Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
2954 Mpi2ConfigReply_t mpi_reply;
2955 u16 backup_rail_monitor_status = 0;
2960 if (!ioc->is_warpdrive) {
2961 pr_err(MPT3SAS_FMT "%s: BRM attribute is only for"
2962 " warpdrive\n", ioc->name, __func__);
2965 /* pci_access_mutex lock acquired by sysfs show path */
2966 mutex_lock(&ioc->pci_access_mutex);
2967 if (ioc->pci_error_recovery || ioc->remove_host) {
2968 mutex_unlock(&ioc->pci_access_mutex);
2972 /* allocate upto GPIOVal 36 entries */
2973 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
2974 io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
2976 pr_err(MPT3SAS_FMT "%s: failed allocating memory "
2977 "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
2981 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
2984 "%s: failed reading iounit_pg3\n", ioc->name,
2989 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
2990 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2991 pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with "
2992 "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
2996 if (io_unit_pg3->GPIOCount < 25) {
2997 pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than "
2998 "25 entries, detected (%d) entries\n", ioc->name, __func__,
2999 io_unit_pg3->GPIOCount);
3003 /* BRM status is in bit zero of GPIOVal[24] */
3004 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
3005 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
3009 mutex_unlock(&ioc->pci_access_mutex);
3012 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
3014 struct DIAG_BUFFER_START {
3025 * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
3026 * @cdev: pointer to embedded class device
3028 * @buf: the buffer returned
3030 * A sysfs 'read-only' shost attribute.
3033 _ctl_host_trace_buffer_size_show(struct device *cdev,
3034 struct device_attribute *attr, char *buf)
3036 struct Scsi_Host *shost = class_to_shost(cdev);
3037 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3039 struct DIAG_BUFFER_START *request_data;
3041 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3043 "%s: host_trace_buffer is not registered\n",
3044 ioc->name, __func__);
3048 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3049 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3051 "%s: host_trace_buffer is not registered\n",
3052 ioc->name, __func__);
3056 request_data = (struct DIAG_BUFFER_START *)
3057 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
3058 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
3059 le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
3060 le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
3061 le32_to_cpu(request_data->Reserved3) == 0x4742444c)
3062 size = le32_to_cpu(request_data->Size);
3064 ioc->ring_buffer_sz = size;
3065 return snprintf(buf, PAGE_SIZE, "%d\n", size);
3067 static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
3068 _ctl_host_trace_buffer_size_show, NULL);
3071 * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
3072 * @cdev: pointer to embedded class device
3074 * @buf: the buffer returned
3076 * A sysfs 'read/write' shost attribute.
3078 * You will only be able to read 4k bytes of ring buffer at a time.
3079 * In order to read beyond 4k bytes, you will have to write out the
3080 * offset to the same attribute, it will move the pointer.
3083 _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
3086 struct Scsi_Host *shost = class_to_shost(cdev);
3087 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3091 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3093 "%s: host_trace_buffer is not registered\n",
3094 ioc->name, __func__);
3098 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3099 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3101 "%s: host_trace_buffer is not registered\n",
3102 ioc->name, __func__);
3106 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
3109 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
3110 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3111 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
3112 memcpy(buf, request_data, size);
3117 _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
3118 const char *buf, size_t count)
3120 struct Scsi_Host *shost = class_to_shost(cdev);
3121 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3124 if (sscanf(buf, "%d", &val) != 1)
3127 ioc->ring_buffer_offset = val;
3130 static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
3131 _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
3134 /*****************************************/
3137 * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
3138 * @cdev: pointer to embedded class device
3140 * @buf: the buffer returned
3142 * A sysfs 'read/write' shost attribute.
3144 * This is a mechnism to post/release host_trace_buffers
3147 _ctl_host_trace_buffer_enable_show(struct device *cdev,
3148 struct device_attribute *attr, char *buf)
3150 struct Scsi_Host *shost = class_to_shost(cdev);
3151 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3153 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
3154 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3155 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
3156 return snprintf(buf, PAGE_SIZE, "off\n");
3157 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3158 MPT3_DIAG_BUFFER_IS_RELEASED))
3159 return snprintf(buf, PAGE_SIZE, "release\n");
3161 return snprintf(buf, PAGE_SIZE, "post\n");
3165 _ctl_host_trace_buffer_enable_store(struct device *cdev,
3166 struct device_attribute *attr, const char *buf, size_t count)
3168 struct Scsi_Host *shost = class_to_shost(cdev);
3169 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3171 struct mpt3_diag_register diag_register;
3174 /* don't allow post/release occurr while recovery is active */
3175 if (ioc->shost_recovery || ioc->remove_host ||
3176 ioc->pci_error_recovery || ioc->is_driver_loading)
3179 if (sscanf(buf, "%9s", str) != 1)
3182 if (!strcmp(str, "post")) {
3183 /* exit out if host buffers are already posted */
3184 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
3185 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3186 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
3187 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3188 MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
3190 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
3191 pr_info(MPT3SAS_FMT "posting host trace buffers\n",
3193 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
3194 diag_register.requested_buffer_size = (1024 * 1024);
3195 diag_register.unique_id = 0x7075900;
3196 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
3197 _ctl_diag_register_2(ioc, &diag_register);
3198 } else if (!strcmp(str, "release")) {
3199 /* exit out if host buffers are already released */
3200 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
3202 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3203 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
3205 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3206 MPT3_DIAG_BUFFER_IS_RELEASED))
3208 pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
3210 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
3217 static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
3218 _ctl_host_trace_buffer_enable_show,
3219 _ctl_host_trace_buffer_enable_store);
3221 /*********** diagnostic trigger suppport *********************************/
3224 * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
3225 * @cdev: pointer to embedded class device
3227 * @buf: the buffer returned
3229 * A sysfs 'read/write' shost attribute.
3232 _ctl_diag_trigger_master_show(struct device *cdev,
3233 struct device_attribute *attr, char *buf)
3236 struct Scsi_Host *shost = class_to_shost(cdev);
3237 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3238 unsigned long flags;
3241 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3242 rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
3243 memcpy(buf, &ioc->diag_trigger_master, rc);
3244 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3249 * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
3250 * @cdev: pointer to embedded class device
3252 * @buf: the buffer returned
3255 * A sysfs 'read/write' shost attribute.
3258 _ctl_diag_trigger_master_store(struct device *cdev,
3259 struct device_attribute *attr, const char *buf, size_t count)
3262 struct Scsi_Host *shost = class_to_shost(cdev);
3263 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3264 unsigned long flags;
3267 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3268 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
3269 memset(&ioc->diag_trigger_master, 0,
3270 sizeof(struct SL_WH_MASTER_TRIGGER_T));
3271 memcpy(&ioc->diag_trigger_master, buf, rc);
3272 ioc->diag_trigger_master.MasterData |=
3273 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
3274 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3277 static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
3278 _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
3282 * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
3283 * @cdev: pointer to embedded class device
3285 * @buf: the buffer returned
3287 * A sysfs 'read/write' shost attribute.
3290 _ctl_diag_trigger_event_show(struct device *cdev,
3291 struct device_attribute *attr, char *buf)
3293 struct Scsi_Host *shost = class_to_shost(cdev);
3294 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3295 unsigned long flags;
3298 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3299 rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
3300 memcpy(buf, &ioc->diag_trigger_event, rc);
3301 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3306 * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
3307 * @cdev: pointer to embedded class device
3309 * @buf: the buffer returned
3312 * A sysfs 'read/write' shost attribute.
3315 _ctl_diag_trigger_event_store(struct device *cdev,
3316 struct device_attribute *attr, const char *buf, size_t count)
3319 struct Scsi_Host *shost = class_to_shost(cdev);
3320 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3321 unsigned long flags;
3324 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3325 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
3326 memset(&ioc->diag_trigger_event, 0,
3327 sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3328 memcpy(&ioc->diag_trigger_event, buf, sz);
3329 if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
3330 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
3331 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3334 static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
3335 _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
3339 * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
3340 * @cdev: pointer to embedded class device
3342 * @buf: the buffer returned
3344 * A sysfs 'read/write' shost attribute.
3347 _ctl_diag_trigger_scsi_show(struct device *cdev,
3348 struct device_attribute *attr, char *buf)
3350 struct Scsi_Host *shost = class_to_shost(cdev);
3351 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3352 unsigned long flags;
3355 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3356 rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
3357 memcpy(buf, &ioc->diag_trigger_scsi, rc);
3358 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3363 * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
3364 * @cdev: pointer to embedded class device
3366 * @buf: the buffer returned
3369 * A sysfs 'read/write' shost attribute.
3372 _ctl_diag_trigger_scsi_store(struct device *cdev,
3373 struct device_attribute *attr, const char *buf, size_t count)
3375 struct Scsi_Host *shost = class_to_shost(cdev);
3376 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3377 unsigned long flags;
3380 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3381 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
3382 memset(&ioc->diag_trigger_scsi, 0,
3383 sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3384 memcpy(&ioc->diag_trigger_scsi, buf, sz);
3385 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
3386 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
3387 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3390 static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
3391 _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
3395 * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
3396 * @cdev: pointer to embedded class device
3398 * @buf: the buffer returned
3400 * A sysfs 'read/write' shost attribute.
3403 _ctl_diag_trigger_mpi_show(struct device *cdev,
3404 struct device_attribute *attr, char *buf)
3406 struct Scsi_Host *shost = class_to_shost(cdev);
3407 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3408 unsigned long flags;
3411 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3412 rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
3413 memcpy(buf, &ioc->diag_trigger_mpi, rc);
3414 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3419 * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
3420 * @cdev: pointer to embedded class device
3422 * @buf: the buffer returned
3425 * A sysfs 'read/write' shost attribute.
3428 _ctl_diag_trigger_mpi_store(struct device *cdev,
3429 struct device_attribute *attr, const char *buf, size_t count)
3431 struct Scsi_Host *shost = class_to_shost(cdev);
3432 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3433 unsigned long flags;
3436 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3437 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
3438 memset(&ioc->diag_trigger_mpi, 0,
3439 sizeof(ioc->diag_trigger_mpi));
3440 memcpy(&ioc->diag_trigger_mpi, buf, sz);
3441 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
3442 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
3443 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3447 static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
3448 _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
3450 /*********** diagnostic trigger suppport *** END ****************************/
3452 /*****************************************/
3454 struct device_attribute *mpt3sas_host_attrs[] = {
3455 &dev_attr_version_fw,
3456 &dev_attr_version_bios,
3457 &dev_attr_version_mpi,
3458 &dev_attr_version_product,
3459 &dev_attr_version_nvdata_persistent,
3460 &dev_attr_version_nvdata_default,
3461 &dev_attr_board_name,
3462 &dev_attr_board_assembly,
3463 &dev_attr_board_tracer,
3465 &dev_attr_device_delay,
3466 &dev_attr_logging_level,
3467 &dev_attr_fwfault_debug,
3468 &dev_attr_fw_queue_depth,
3469 &dev_attr_host_sas_address,
3470 &dev_attr_ioc_reset_count,
3471 &dev_attr_host_trace_buffer_size,
3472 &dev_attr_host_trace_buffer,
3473 &dev_attr_host_trace_buffer_enable,
3474 &dev_attr_reply_queue_count,
3475 &dev_attr_diag_trigger_master,
3476 &dev_attr_diag_trigger_event,
3477 &dev_attr_diag_trigger_scsi,
3478 &dev_attr_diag_trigger_mpi,
3479 &dev_attr_BRM_status,
3483 /* device attributes */
3486 * _ctl_device_sas_address_show - sas address
3487 * @dev: pointer to embedded class device
3489 * @buf: the buffer returned
3491 * This is the sas address for the target
3493 * A sysfs 'read-only' shost attribute.
3496 _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
3499 struct scsi_device *sdev = to_scsi_device(dev);
3500 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3502 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
3503 (unsigned long long)sas_device_priv_data->sas_target->sas_address);
3505 static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
3508 * _ctl_device_handle_show - device handle
3509 * @dev: pointer to embedded class device
3511 * @buf: the buffer returned
3513 * This is the firmware assigned device handle
3515 * A sysfs 'read-only' shost attribute.
3518 _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
3521 struct scsi_device *sdev = to_scsi_device(dev);
3522 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3524 return snprintf(buf, PAGE_SIZE, "0x%04x\n",
3525 sas_device_priv_data->sas_target->handle);
3527 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
3530 * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
3531 * @dev: pointer to embedded device
3533 * @buf: the buffer returned
3535 * A sysfs 'read/write' sdev attribute, only works with SATA
3538 _ctl_device_ncq_prio_enable_show(struct device *dev,
3539 struct device_attribute *attr, char *buf)
3541 struct scsi_device *sdev = to_scsi_device(dev);
3542 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3544 return snprintf(buf, PAGE_SIZE, "%d\n",
3545 sas_device_priv_data->ncq_prio_enable);
3549 _ctl_device_ncq_prio_enable_store(struct device *dev,
3550 struct device_attribute *attr,
3551 const char *buf, size_t count)
3553 struct scsi_device *sdev = to_scsi_device(dev);
3554 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3555 bool ncq_prio_enable = 0;
3557 if (kstrtobool(buf, &ncq_prio_enable))
3560 if (!scsih_ncq_prio_supp(sdev))
3563 sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
3566 static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
3567 _ctl_device_ncq_prio_enable_show,
3568 _ctl_device_ncq_prio_enable_store);
3570 struct device_attribute *mpt3sas_dev_attrs[] = {
3571 &dev_attr_sas_address,
3572 &dev_attr_sas_device_handle,
3573 &dev_attr_sas_ncq_prio_enable,
3577 /* file operations table for mpt3ctl device */
3578 static const struct file_operations ctl_fops = {
3579 .owner = THIS_MODULE,
3580 .unlocked_ioctl = _ctl_ioctl,
3582 .fasync = _ctl_fasync,
3583 #ifdef CONFIG_COMPAT
3584 .compat_ioctl = _ctl_ioctl_compat,
3588 /* file operations table for mpt2ctl device */
3589 static const struct file_operations ctl_gen2_fops = {
3590 .owner = THIS_MODULE,
3591 .unlocked_ioctl = _ctl_mpt2_ioctl,
3593 .fasync = _ctl_fasync,
3594 #ifdef CONFIG_COMPAT
3595 .compat_ioctl = _ctl_mpt2_ioctl_compat,
3599 static struct miscdevice ctl_dev = {
3600 .minor = MPT3SAS_MINOR,
3601 .name = MPT3SAS_DEV_NAME,
3605 static struct miscdevice gen2_ctl_dev = {
3606 .minor = MPT2SAS_MINOR,
3607 .name = MPT2SAS_DEV_NAME,
3608 .fops = &ctl_gen2_fops,
3612 * mpt3sas_ctl_init - main entry point for ctl.
3613 * @hbas_to_enumerate: ?
3616 mpt3sas_ctl_init(ushort hbas_to_enumerate)
3620 /* Don't register mpt3ctl ioctl device if
3621 * hbas_to_enumarate is one.
3623 if (hbas_to_enumerate != 1)
3624 if (misc_register(&ctl_dev) < 0)
3625 pr_err("%s can't register misc device [minor=%d]\n",
3626 MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
3628 /* Don't register mpt3ctl ioctl device if
3629 * hbas_to_enumarate is two.
3631 if (hbas_to_enumerate != 2)
3632 if (misc_register(&gen2_ctl_dev) < 0)
3633 pr_err("%s can't register misc device [minor=%d]\n",
3634 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
3636 init_waitqueue_head(&ctl_poll_wait);
3640 * mpt3sas_ctl_exit - exit point for ctl
3641 * @hbas_to_enumerate: ?
3644 mpt3sas_ctl_exit(ushort hbas_to_enumerate)
3646 struct MPT3SAS_ADAPTER *ioc;
3649 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
3651 /* free memory associated to diag buffers */
3652 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
3653 if (!ioc->diag_buffer[i])
3655 if (!(ioc->diag_buffer_status[i] &
3656 MPT3_DIAG_BUFFER_IS_REGISTERED))
3658 if ((ioc->diag_buffer_status[i] &
3659 MPT3_DIAG_BUFFER_IS_RELEASED))
3661 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
3662 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
3663 ioc->diag_buffer[i] = NULL;
3664 ioc->diag_buffer_status[i] = 0;
3667 kfree(ioc->event_log);
3669 if (hbas_to_enumerate != 1)
3670 misc_deregister(&ctl_dev);
3671 if (hbas_to_enumerate != 2)
3672 misc_deregister(&gen2_ctl_dev);