2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016-2017 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/rtc.h>
26 #include <linux/bcd.h>
27 #include <linux/reboot.h>
28 #include <linux/cciss_ioctl.h>
29 #include <linux/blk-mq-pci.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_transport_sas.h>
35 #include <asm/unaligned.h>
37 #include "smartpqi_sis.h"
39 #if !defined(BUILD_TIMESTAMP)
40 #define BUILD_TIMESTAMP
43 #define DRIVER_VERSION "1.1.4-115"
44 #define DRIVER_MAJOR 1
45 #define DRIVER_MINOR 1
46 #define DRIVER_RELEASE 4
47 #define DRIVER_REVISION 115
49 #define DRIVER_NAME "Microsemi PQI Driver (v" \
50 DRIVER_VERSION BUILD_TIMESTAMP ")"
51 #define DRIVER_NAME_SHORT "smartpqi"
53 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
55 MODULE_AUTHOR("Microsemi");
56 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
58 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
59 MODULE_VERSION(DRIVER_VERSION);
60 MODULE_LICENSE("GPL");
62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
63 static void pqi_ctrl_offline_worker(struct work_struct *work);
64 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
65 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
66 static void pqi_scan_start(struct Scsi_Host *shost);
67 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
68 struct pqi_queue_group *queue_group, enum pqi_io_path path,
69 struct pqi_io_request *io_request);
70 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
71 struct pqi_iu_header *request, unsigned int flags,
72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
73 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
75 unsigned int cdb_length, struct pqi_queue_group *queue_group,
76 struct pqi_encryption_info *encryption_info, bool raid_bypass);
78 /* for flags argument to pqi_submit_raid_request_synchronous() */
79 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
81 static struct scsi_transport_template *pqi_sas_transport_template;
83 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
85 enum pqi_lockup_action {
91 static enum pqi_lockup_action pqi_lockup_action = NONE;
94 enum pqi_lockup_action action;
96 } pqi_lockup_actions[] = {
111 static unsigned int pqi_supported_event_types[] = {
112 PQI_EVENT_TYPE_HOTPLUG,
113 PQI_EVENT_TYPE_HARDWARE,
114 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
115 PQI_EVENT_TYPE_LOGICAL_DEVICE,
116 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
120 static int pqi_disable_device_id_wildcards;
121 module_param_named(disable_device_id_wildcards,
122 pqi_disable_device_id_wildcards, int, 0644);
123 MODULE_PARM_DESC(disable_device_id_wildcards,
124 "Disable device ID wildcards.");
126 static int pqi_disable_heartbeat;
127 module_param_named(disable_heartbeat,
128 pqi_disable_heartbeat, int, 0644);
129 MODULE_PARM_DESC(disable_heartbeat,
130 "Disable heartbeat.");
132 static int pqi_disable_ctrl_shutdown;
133 module_param_named(disable_ctrl_shutdown,
134 pqi_disable_ctrl_shutdown, int, 0644);
135 MODULE_PARM_DESC(disable_ctrl_shutdown,
136 "Disable controller shutdown when controller locked up.");
138 static char *pqi_lockup_action_param;
139 module_param_named(lockup_action,
140 pqi_lockup_action_param, charp, 0644);
141 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
142 "\t\tSupported: none, reboot, panic\n"
143 "\t\tDefault: none");
145 static char *raid_levels[] = {
155 static char *pqi_raid_level_to_string(u8 raid_level)
157 if (raid_level < ARRAY_SIZE(raid_levels))
158 return raid_levels[raid_level];
160 return "RAID UNKNOWN";
165 #define SA_RAID_1 2 /* also used for RAID 10 */
166 #define SA_RAID_5 3 /* also used for RAID 50 */
168 #define SA_RAID_6 5 /* also used for RAID 60 */
169 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
170 #define SA_RAID_MAX SA_RAID_ADM
171 #define SA_RAID_UNKNOWN 0xff
173 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
175 pqi_prep_for_scsi_done(scmd);
176 scmd->scsi_done(scmd);
179 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
184 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
186 void *hostdata = shost_priv(shost);
188 return *((struct pqi_ctrl_info **)hostdata);
191 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
193 return !device->is_physical_device;
196 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
198 return scsi3addr[2] != 0;
201 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
203 return !ctrl_info->controller_online;
206 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
208 if (ctrl_info->controller_online)
209 if (!sis_is_firmware_running(ctrl_info))
210 pqi_take_ctrl_offline(ctrl_info);
213 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
215 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
218 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
219 struct pqi_ctrl_info *ctrl_info)
221 return sis_read_driver_scratch(ctrl_info);
224 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
225 enum pqi_ctrl_mode mode)
227 sis_write_driver_scratch(ctrl_info, mode);
230 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
232 ctrl_info->block_requests = true;
233 scsi_block_requests(ctrl_info->scsi_host);
236 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
238 ctrl_info->block_requests = false;
239 wake_up_all(&ctrl_info->block_requests_wait);
240 pqi_retry_raid_bypass_requests(ctrl_info);
241 scsi_unblock_requests(ctrl_info->scsi_host);
244 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
246 return ctrl_info->block_requests;
249 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
250 unsigned long timeout_msecs)
252 unsigned long remaining_msecs;
254 if (!pqi_ctrl_blocked(ctrl_info))
255 return timeout_msecs;
257 atomic_inc(&ctrl_info->num_blocked_threads);
259 if (timeout_msecs == NO_TIMEOUT) {
260 wait_event(ctrl_info->block_requests_wait,
261 !pqi_ctrl_blocked(ctrl_info));
262 remaining_msecs = timeout_msecs;
264 unsigned long remaining_jiffies;
267 wait_event_timeout(ctrl_info->block_requests_wait,
268 !pqi_ctrl_blocked(ctrl_info),
269 msecs_to_jiffies(timeout_msecs));
270 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
273 atomic_dec(&ctrl_info->num_blocked_threads);
275 return remaining_msecs;
278 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
280 atomic_inc(&ctrl_info->num_busy_threads);
283 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
285 atomic_dec(&ctrl_info->num_busy_threads);
288 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
290 while (atomic_read(&ctrl_info->num_busy_threads) >
291 atomic_read(&ctrl_info->num_blocked_threads))
292 usleep_range(1000, 2000);
295 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
297 return device->device_offline;
300 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
302 device->in_reset = true;
305 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
307 device->in_reset = false;
310 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
312 return device->in_reset;
315 static inline void pqi_schedule_rescan_worker_with_delay(
316 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
318 if (pqi_ctrl_offline(ctrl_info))
321 schedule_delayed_work(&ctrl_info->rescan_work, delay);
324 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
329 #define PQI_RESCAN_WORK_DELAY (10 * HZ)
331 static inline void pqi_schedule_rescan_worker_delayed(
332 struct pqi_ctrl_info *ctrl_info)
334 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
337 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
339 cancel_delayed_work_sync(&ctrl_info->rescan_work);
342 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
344 if (!ctrl_info->heartbeat_counter)
347 return readl(ctrl_info->heartbeat_counter);
350 static int pqi_map_single(struct pci_dev *pci_dev,
351 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
352 size_t buffer_length, int data_direction)
354 dma_addr_t bus_address;
356 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
359 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
361 if (pci_dma_mapping_error(pci_dev, bus_address))
364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
365 put_unaligned_le32(buffer_length, &sg_descriptor->length);
366 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
371 static void pqi_pci_unmap(struct pci_dev *pci_dev,
372 struct pqi_sg_descriptor *descriptors, int num_descriptors,
377 if (data_direction == PCI_DMA_NONE)
380 for (i = 0; i < num_descriptors; i++)
381 pci_unmap_single(pci_dev,
382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
383 get_unaligned_le32(&descriptors[i].length),
387 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
388 struct pqi_raid_path_request *request, u8 cmd,
389 u8 *scsi3addr, void *buffer, size_t buffer_length,
390 u16 vpd_page, int *pci_direction)
395 memset(request, 0, sizeof(*request));
397 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
398 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
399 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
400 &request->header.iu_length);
401 put_unaligned_le32(buffer_length, &request->buffer_length);
402 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
403 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
404 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
410 request->data_direction = SOP_READ_FLAG;
412 if (vpd_page & VPD_PAGE) {
414 cdb[2] = (u8)vpd_page;
416 cdb[4] = (u8)buffer_length;
418 case CISS_REPORT_LOG:
419 case CISS_REPORT_PHYS:
420 request->data_direction = SOP_READ_FLAG;
422 if (cmd == CISS_REPORT_PHYS)
423 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
425 cdb[1] = CISS_REPORT_LOG_EXTENDED;
426 put_unaligned_be32(buffer_length, &cdb[6]);
428 case CISS_GET_RAID_MAP:
429 request->data_direction = SOP_READ_FLAG;
431 cdb[1] = CISS_GET_RAID_MAP;
432 put_unaligned_be32(buffer_length, &cdb[6]);
435 request->data_direction = SOP_WRITE_FLAG;
437 cdb[6] = BMIC_FLUSH_CACHE;
438 put_unaligned_be16(buffer_length, &cdb[7]);
440 case BMIC_IDENTIFY_CONTROLLER:
441 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
442 request->data_direction = SOP_READ_FLAG;
445 put_unaligned_be16(buffer_length, &cdb[7]);
447 case BMIC_WRITE_HOST_WELLNESS:
448 request->data_direction = SOP_WRITE_FLAG;
451 put_unaligned_be16(buffer_length, &cdb[7]);
454 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
459 switch (request->data_direction) {
461 pci_dir = PCI_DMA_FROMDEVICE;
464 pci_dir = PCI_DMA_TODEVICE;
466 case SOP_NO_DIRECTION_FLAG:
467 pci_dir = PCI_DMA_NONE;
470 pci_dir = PCI_DMA_BIDIRECTIONAL;
474 *pci_direction = pci_dir;
476 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
477 buffer, buffer_length, pci_dir);
480 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
482 io_request->scmd = NULL;
483 io_request->status = 0;
484 io_request->error_info = NULL;
485 io_request->raid_bypass = false;
488 static struct pqi_io_request *pqi_alloc_io_request(
489 struct pqi_ctrl_info *ctrl_info)
491 struct pqi_io_request *io_request;
492 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
495 io_request = &ctrl_info->io_request_pool[i];
496 if (atomic_inc_return(&io_request->refcount) == 1)
498 atomic_dec(&io_request->refcount);
499 i = (i + 1) % ctrl_info->max_io_slots;
503 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
505 pqi_reinit_io_request(io_request);
510 static void pqi_free_io_request(struct pqi_io_request *io_request)
512 atomic_dec(&io_request->refcount);
515 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
516 struct bmic_identify_controller *buffer)
520 struct pqi_raid_path_request request;
522 rc = pqi_build_raid_path_request(ctrl_info, &request,
523 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
524 sizeof(*buffer), 0, &pci_direction);
528 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
531 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
537 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
538 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
542 struct pqi_raid_path_request request;
544 rc = pqi_build_raid_path_request(ctrl_info, &request,
545 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
550 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
553 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
559 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
560 struct pqi_scsi_dev *device,
561 struct bmic_identify_physical_device *buffer,
562 size_t buffer_length)
566 u16 bmic_device_index;
567 struct pqi_raid_path_request request;
569 rc = pqi_build_raid_path_request(ctrl_info, &request,
570 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
571 buffer_length, 0, &pci_direction);
575 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
576 request.cdb[2] = (u8)bmic_device_index;
577 request.cdb[9] = (u8)(bmic_device_index >> 8);
579 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
580 0, NULL, NO_TIMEOUT);
582 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
588 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
589 enum bmic_flush_cache_shutdown_event shutdown_event)
592 struct pqi_raid_path_request request;
594 struct bmic_flush_cache *flush_cache;
597 * Don't bother trying to flush the cache if the controller is
600 if (pqi_ctrl_offline(ctrl_info))
603 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
607 flush_cache->shutdown_event = shutdown_event;
609 rc = pqi_build_raid_path_request(ctrl_info, &request,
610 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
611 sizeof(*flush_cache), 0, &pci_direction);
615 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
616 0, NULL, NO_TIMEOUT);
618 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
627 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
628 void *buffer, size_t buffer_length)
631 struct pqi_raid_path_request request;
634 rc = pqi_build_raid_path_request(ctrl_info, &request,
635 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
636 buffer_length, 0, &pci_direction);
640 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
641 0, NULL, NO_TIMEOUT);
643 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
651 struct bmic_host_wellness_driver_version {
653 u8 driver_version_tag[2];
654 __le16 driver_version_length;
655 char driver_version[32];
661 static int pqi_write_driver_version_to_host_wellness(
662 struct pqi_ctrl_info *ctrl_info)
665 struct bmic_host_wellness_driver_version *buffer;
666 size_t buffer_length;
668 buffer_length = sizeof(*buffer);
670 buffer = kmalloc(buffer_length, GFP_KERNEL);
674 buffer->start_tag[0] = '<';
675 buffer->start_tag[1] = 'H';
676 buffer->start_tag[2] = 'W';
677 buffer->start_tag[3] = '>';
678 buffer->driver_version_tag[0] = 'D';
679 buffer->driver_version_tag[1] = 'V';
680 put_unaligned_le16(sizeof(buffer->driver_version),
681 &buffer->driver_version_length);
682 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
683 sizeof(buffer->driver_version) - 1);
684 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
685 buffer->end_tag[0] = 'Z';
686 buffer->end_tag[1] = 'Z';
688 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
697 struct bmic_host_wellness_time {
702 u8 dont_write_tag[2];
708 static int pqi_write_current_time_to_host_wellness(
709 struct pqi_ctrl_info *ctrl_info)
712 struct bmic_host_wellness_time *buffer;
713 size_t buffer_length;
718 buffer_length = sizeof(*buffer);
720 buffer = kmalloc(buffer_length, GFP_KERNEL);
724 buffer->start_tag[0] = '<';
725 buffer->start_tag[1] = 'H';
726 buffer->start_tag[2] = 'W';
727 buffer->start_tag[3] = '>';
728 buffer->time_tag[0] = 'T';
729 buffer->time_tag[1] = 'D';
730 put_unaligned_le16(sizeof(buffer->time),
731 &buffer->time_length);
733 local_time = ktime_get_real_seconds();
734 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
735 year = tm.tm_year + 1900;
737 buffer->time[0] = bin2bcd(tm.tm_hour);
738 buffer->time[1] = bin2bcd(tm.tm_min);
739 buffer->time[2] = bin2bcd(tm.tm_sec);
741 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
742 buffer->time[5] = bin2bcd(tm.tm_mday);
743 buffer->time[6] = bin2bcd(year / 100);
744 buffer->time[7] = bin2bcd(year % 100);
746 buffer->dont_write_tag[0] = 'D';
747 buffer->dont_write_tag[1] = 'W';
748 buffer->end_tag[0] = 'Z';
749 buffer->end_tag[1] = 'Z';
751 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
758 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
760 static void pqi_update_time_worker(struct work_struct *work)
763 struct pqi_ctrl_info *ctrl_info;
765 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
768 if (pqi_ctrl_offline(ctrl_info))
771 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
773 dev_warn(&ctrl_info->pci_dev->dev,
774 "error updating time on controller\n");
776 schedule_delayed_work(&ctrl_info->update_time_work,
777 PQI_UPDATE_TIME_WORK_INTERVAL);
780 static inline void pqi_schedule_update_time_worker(
781 struct pqi_ctrl_info *ctrl_info)
783 schedule_delayed_work(&ctrl_info->update_time_work, 0);
786 static inline void pqi_cancel_update_time_worker(
787 struct pqi_ctrl_info *ctrl_info)
789 cancel_delayed_work_sync(&ctrl_info->update_time_work);
792 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
793 void *buffer, size_t buffer_length)
797 struct pqi_raid_path_request request;
799 rc = pqi_build_raid_path_request(ctrl_info, &request,
800 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
804 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
807 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
813 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
817 size_t lun_list_length;
818 size_t lun_data_length;
819 size_t new_lun_list_length;
820 void *lun_data = NULL;
821 struct report_lun_header *report_lun_header;
823 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
824 if (!report_lun_header) {
829 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
830 sizeof(*report_lun_header));
834 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
837 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
839 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
845 if (lun_list_length == 0) {
846 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
850 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
854 new_lun_list_length = get_unaligned_be32(
855 &((struct report_lun_header *)lun_data)->list_length);
857 if (new_lun_list_length > lun_list_length) {
858 lun_list_length = new_lun_list_length;
864 kfree(report_lun_header);
876 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
879 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
883 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
886 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
889 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
890 struct report_phys_lun_extended **physdev_list,
891 struct report_log_lun_extended **logdev_list)
894 size_t logdev_list_length;
895 size_t logdev_data_length;
896 struct report_log_lun_extended *internal_logdev_list;
897 struct report_log_lun_extended *logdev_data;
898 struct report_lun_header report_lun_header;
900 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
902 dev_err(&ctrl_info->pci_dev->dev,
903 "report physical LUNs failed\n");
905 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
907 dev_err(&ctrl_info->pci_dev->dev,
908 "report logical LUNs failed\n");
911 * Tack the controller itself onto the end of the logical device list.
914 logdev_data = *logdev_list;
918 get_unaligned_be32(&logdev_data->header.list_length);
920 memset(&report_lun_header, 0, sizeof(report_lun_header));
922 (struct report_log_lun_extended *)&report_lun_header;
923 logdev_list_length = 0;
926 logdev_data_length = sizeof(struct report_lun_header) +
929 internal_logdev_list = kmalloc(logdev_data_length +
930 sizeof(struct report_log_lun_extended), GFP_KERNEL);
931 if (!internal_logdev_list) {
937 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
938 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
939 sizeof(struct report_log_lun_extended_entry));
940 put_unaligned_be32(logdev_list_length +
941 sizeof(struct report_log_lun_extended_entry),
942 &internal_logdev_list->header.list_length);
945 *logdev_list = internal_logdev_list;
950 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
951 int bus, int target, int lun)
954 device->target = target;
958 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
966 scsi3addr = device->scsi3addr;
967 lunid = get_unaligned_le32(scsi3addr);
969 if (pqi_is_hba_lunid(scsi3addr)) {
970 /* The specified device is the controller. */
971 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
972 device->target_lun_valid = true;
976 if (pqi_is_logical_device(device)) {
977 if (device->is_external_raid_device) {
978 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
979 target = (lunid >> 16) & 0x3fff;
982 bus = PQI_RAID_VOLUME_BUS;
984 lun = lunid & 0x3fff;
986 pqi_set_bus_target_lun(device, bus, target, lun);
987 device->target_lun_valid = true;
992 * Defer target and LUN assignment for non-controller physical devices
993 * because the SAS transport layer will make these assignments later.
995 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
998 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
999 struct pqi_scsi_dev *device)
1005 raid_level = SA_RAID_UNKNOWN;
1007 buffer = kmalloc(64, GFP_KERNEL);
1009 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1010 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1012 raid_level = buffer[8];
1013 if (raid_level > SA_RAID_MAX)
1014 raid_level = SA_RAID_UNKNOWN;
1019 device->raid_level = raid_level;
1022 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1023 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1027 u32 r5or6_blocks_per_row;
1028 unsigned int num_phys_disks;
1029 unsigned int num_raid_map_entries;
1031 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1033 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1034 err_msg = "RAID map too small";
1038 if (raid_map_size > sizeof(*raid_map)) {
1039 err_msg = "RAID map too large";
1043 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1044 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1045 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1046 num_raid_map_entries = num_phys_disks *
1047 get_unaligned_le16(&raid_map->row_cnt);
1049 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
1050 err_msg = "invalid number of map entries in RAID map";
1054 if (device->raid_level == SA_RAID_1) {
1055 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1056 err_msg = "invalid RAID-1 map";
1059 } else if (device->raid_level == SA_RAID_ADM) {
1060 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1061 err_msg = "invalid RAID-1(ADM) map";
1064 } else if ((device->raid_level == SA_RAID_5 ||
1065 device->raid_level == SA_RAID_6) &&
1066 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1068 r5or6_blocks_per_row =
1069 get_unaligned_le16(&raid_map->strip_size) *
1070 get_unaligned_le16(&raid_map->data_disks_per_row);
1071 if (r5or6_blocks_per_row == 0) {
1072 err_msg = "invalid RAID-5 or RAID-6 map";
1080 dev_warn(&ctrl_info->pci_dev->dev,
1081 "logical device %08x%08x %s\n",
1082 *((u32 *)&device->scsi3addr),
1083 *((u32 *)&device->scsi3addr[4]), err_msg);
1088 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1089 struct pqi_scsi_dev *device)
1093 struct pqi_raid_path_request request;
1094 struct raid_map *raid_map;
1096 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1100 rc = pqi_build_raid_path_request(ctrl_info, &request,
1101 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1102 sizeof(*raid_map), 0, &pci_direction);
1106 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1109 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1115 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1119 device->raid_map = raid_map;
1129 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1130 struct pqi_scsi_dev *device)
1136 buffer = kmalloc(64, GFP_KERNEL);
1140 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1141 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1145 #define RAID_BYPASS_STATUS 4
1146 #define RAID_BYPASS_CONFIGURED 0x1
1147 #define RAID_BYPASS_ENABLED 0x2
1149 bypass_status = buffer[RAID_BYPASS_STATUS];
1150 device->raid_bypass_configured =
1151 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1152 if (device->raid_bypass_configured &&
1153 (bypass_status & RAID_BYPASS_ENABLED) &&
1154 pqi_get_raid_map(ctrl_info, device) == 0)
1155 device->raid_bypass_enabled = true;
1162 * Use vendor-specific VPD to determine online/offline status of a volume.
1165 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1166 struct pqi_scsi_dev *device)
1170 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1171 bool volume_offline = true;
1173 struct ciss_vpd_logical_volume_status *vpd;
1175 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1179 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1180 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1184 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1185 volume_status) + vpd->page_length;
1186 if (page_length < sizeof(*vpd))
1189 volume_status = vpd->volume_status;
1190 volume_flags = get_unaligned_be32(&vpd->flags);
1191 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1196 device->volume_status = volume_status;
1197 device->volume_offline = volume_offline;
1200 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1201 struct pqi_scsi_dev *device)
1206 buffer = kmalloc(64, GFP_KERNEL);
1210 /* Send an inquiry to the device to see what it is. */
1211 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1215 scsi_sanitize_inquiry_string(&buffer[8], 8);
1216 scsi_sanitize_inquiry_string(&buffer[16], 16);
1218 device->devtype = buffer[0] & 0x1f;
1219 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1220 memcpy(device->model, &buffer[16], sizeof(device->model));
1222 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1223 if (device->is_external_raid_device) {
1224 device->raid_level = SA_RAID_UNKNOWN;
1225 device->volume_status = CISS_LV_OK;
1226 device->volume_offline = false;
1228 pqi_get_raid_level(ctrl_info, device);
1229 pqi_get_raid_bypass_status(ctrl_info, device);
1230 pqi_get_volume_status(ctrl_info, device);
1240 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1241 struct pqi_scsi_dev *device,
1242 struct bmic_identify_physical_device *id_phys)
1246 memset(id_phys, 0, sizeof(*id_phys));
1248 rc = pqi_identify_physical_device(ctrl_info, device,
1249 id_phys, sizeof(*id_phys));
1251 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1255 device->queue_depth =
1256 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1257 device->device_type = id_phys->device_type;
1258 device->active_path_index = id_phys->active_path_number;
1259 device->path_map = id_phys->redundant_path_present_map;
1260 memcpy(&device->box,
1261 &id_phys->alternate_paths_phys_box_on_port,
1262 sizeof(device->box));
1263 memcpy(&device->phys_connector,
1264 &id_phys->alternate_paths_phys_connector,
1265 sizeof(device->phys_connector));
1266 device->bay = id_phys->phys_bay_in_box;
1269 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1270 struct pqi_scsi_dev *device)
1273 static const char unknown_state_str[] =
1274 "Volume is in an unknown state (%u)";
1275 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1277 switch (device->volume_status) {
1279 status = "Volume online";
1281 case CISS_LV_FAILED:
1282 status = "Volume failed";
1284 case CISS_LV_NOT_CONFIGURED:
1285 status = "Volume not configured";
1287 case CISS_LV_DEGRADED:
1288 status = "Volume degraded";
1290 case CISS_LV_READY_FOR_RECOVERY:
1291 status = "Volume ready for recovery operation";
1293 case CISS_LV_UNDERGOING_RECOVERY:
1294 status = "Volume undergoing recovery";
1296 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1297 status = "Wrong physical drive was replaced";
1299 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1300 status = "A physical drive not properly connected";
1302 case CISS_LV_HARDWARE_OVERHEATING:
1303 status = "Hardware is overheating";
1305 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1306 status = "Hardware has overheated";
1308 case CISS_LV_UNDERGOING_EXPANSION:
1309 status = "Volume undergoing expansion";
1311 case CISS_LV_NOT_AVAILABLE:
1312 status = "Volume waiting for transforming volume";
1314 case CISS_LV_QUEUED_FOR_EXPANSION:
1315 status = "Volume queued for expansion";
1317 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1318 status = "Volume disabled due to SCSI ID conflict";
1320 case CISS_LV_EJECTED:
1321 status = "Volume has been ejected";
1323 case CISS_LV_UNDERGOING_ERASE:
1324 status = "Volume undergoing background erase";
1326 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1327 status = "Volume ready for predictive spare rebuild";
1329 case CISS_LV_UNDERGOING_RPI:
1330 status = "Volume undergoing rapid parity initialization";
1332 case CISS_LV_PENDING_RPI:
1333 status = "Volume queued for rapid parity initialization";
1335 case CISS_LV_ENCRYPTED_NO_KEY:
1336 status = "Encrypted volume inaccessible - key not present";
1338 case CISS_LV_UNDERGOING_ENCRYPTION:
1339 status = "Volume undergoing encryption process";
1341 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1342 status = "Volume undergoing encryption re-keying process";
1344 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1345 status = "Volume encrypted but encryption is disabled";
1347 case CISS_LV_PENDING_ENCRYPTION:
1348 status = "Volume pending migration to encrypted state";
1350 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1351 status = "Volume pending encryption rekeying";
1353 case CISS_LV_NOT_SUPPORTED:
1354 status = "Volume not supported on this controller";
1356 case CISS_LV_STATUS_UNAVAILABLE:
1357 status = "Volume status not available";
1360 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1361 unknown_state_str, device->volume_status);
1362 status = unknown_state_buffer;
1366 dev_info(&ctrl_info->pci_dev->dev,
1367 "scsi %d:%d:%d:%d %s\n",
1368 ctrl_info->scsi_host->host_no,
1369 device->bus, device->target, device->lun, status);
1372 static void pqi_rescan_worker(struct work_struct *work)
1374 struct pqi_ctrl_info *ctrl_info;
1376 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1379 pqi_scan_scsi_devices(ctrl_info);
1382 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1383 struct pqi_scsi_dev *device)
1387 if (pqi_is_logical_device(device))
1388 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1389 device->target, device->lun);
1391 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1396 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1397 struct pqi_scsi_dev *device)
1399 if (pqi_is_logical_device(device))
1400 scsi_remove_device(device->sdev);
1402 pqi_remove_sas_device(device);
1405 /* Assumes the SCSI device list lock is held. */
1407 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1408 int bus, int target, int lun)
1410 struct pqi_scsi_dev *device;
1412 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1413 scsi_device_list_entry)
1414 if (device->bus == bus && device->target == target &&
1421 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1422 struct pqi_scsi_dev *dev2)
1424 if (dev1->is_physical_device != dev2->is_physical_device)
1427 if (dev1->is_physical_device)
1428 return dev1->wwid == dev2->wwid;
1430 return memcmp(dev1->volume_id, dev2->volume_id,
1431 sizeof(dev1->volume_id)) == 0;
1434 enum pqi_find_result {
1440 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1441 struct pqi_scsi_dev *device_to_find,
1442 struct pqi_scsi_dev **matching_device)
1444 struct pqi_scsi_dev *device;
1446 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1447 scsi_device_list_entry) {
1448 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1449 device->scsi3addr)) {
1450 *matching_device = device;
1451 if (pqi_device_equal(device_to_find, device)) {
1452 if (device_to_find->volume_offline)
1453 return DEVICE_CHANGED;
1456 return DEVICE_CHANGED;
1460 return DEVICE_NOT_FOUND;
1463 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1465 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1466 char *action, struct pqi_scsi_dev *device)
1469 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1471 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1472 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1474 if (device->target_lun_valid)
1475 count += snprintf(buffer + count,
1476 PQI_DEV_INFO_BUFFER_LENGTH - count,
1481 count += snprintf(buffer + count,
1482 PQI_DEV_INFO_BUFFER_LENGTH - count,
1485 if (pqi_is_logical_device(device))
1486 count += snprintf(buffer + count,
1487 PQI_DEV_INFO_BUFFER_LENGTH - count,
1489 *((u32 *)&device->scsi3addr),
1490 *((u32 *)&device->scsi3addr[4]));
1492 count += snprintf(buffer + count,
1493 PQI_DEV_INFO_BUFFER_LENGTH - count,
1494 " %016llx", device->sas_address);
1496 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1498 scsi_device_type(device->devtype),
1502 if (pqi_is_logical_device(device)) {
1503 if (device->devtype == TYPE_DISK)
1504 count += snprintf(buffer + count,
1505 PQI_DEV_INFO_BUFFER_LENGTH - count,
1506 "SSDSmartPathCap%c En%c %-12s",
1507 device->raid_bypass_configured ? '+' : '-',
1508 device->raid_bypass_enabled ? '+' : '-',
1509 pqi_raid_level_to_string(device->raid_level));
1511 count += snprintf(buffer + count,
1512 PQI_DEV_INFO_BUFFER_LENGTH - count,
1513 "AIO%c", device->aio_enabled ? '+' : '-');
1514 if (device->devtype == TYPE_DISK ||
1515 device->devtype == TYPE_ZBC)
1516 count += snprintf(buffer + count,
1517 PQI_DEV_INFO_BUFFER_LENGTH - count,
1518 " qd=%-6d", device->queue_depth);
1521 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1524 /* Assumes the SCSI device list lock is held. */
1526 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1527 struct pqi_scsi_dev *new_device)
1529 existing_device->devtype = new_device->devtype;
1530 existing_device->device_type = new_device->device_type;
1531 existing_device->bus = new_device->bus;
1532 if (new_device->target_lun_valid) {
1533 existing_device->target = new_device->target;
1534 existing_device->lun = new_device->lun;
1535 existing_device->target_lun_valid = true;
1538 /* By definition, the scsi3addr and wwid fields are already the same. */
1540 existing_device->is_physical_device = new_device->is_physical_device;
1541 existing_device->is_external_raid_device =
1542 new_device->is_external_raid_device;
1543 existing_device->aio_enabled = new_device->aio_enabled;
1544 memcpy(existing_device->vendor, new_device->vendor,
1545 sizeof(existing_device->vendor));
1546 memcpy(existing_device->model, new_device->model,
1547 sizeof(existing_device->model));
1548 existing_device->sas_address = new_device->sas_address;
1549 existing_device->raid_level = new_device->raid_level;
1550 existing_device->queue_depth = new_device->queue_depth;
1551 existing_device->aio_handle = new_device->aio_handle;
1552 existing_device->volume_status = new_device->volume_status;
1553 existing_device->active_path_index = new_device->active_path_index;
1554 existing_device->path_map = new_device->path_map;
1555 existing_device->bay = new_device->bay;
1556 memcpy(existing_device->box, new_device->box,
1557 sizeof(existing_device->box));
1558 memcpy(existing_device->phys_connector, new_device->phys_connector,
1559 sizeof(existing_device->phys_connector));
1560 existing_device->offload_to_mirror = 0;
1561 kfree(existing_device->raid_map);
1562 existing_device->raid_map = new_device->raid_map;
1563 existing_device->raid_bypass_configured =
1564 new_device->raid_bypass_configured;
1565 existing_device->raid_bypass_enabled =
1566 new_device->raid_bypass_enabled;
1568 /* To prevent this from being freed later. */
1569 new_device->raid_map = NULL;
1572 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1575 kfree(device->raid_map);
1581 * Called when exposing a new device to the OS fails in order to re-adjust
1582 * our internal SCSI device list to match the SCSI ML's view.
1585 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1586 struct pqi_scsi_dev *device)
1588 unsigned long flags;
1590 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1591 list_del(&device->scsi_device_list_entry);
1592 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1594 /* Allow the device structure to be freed later. */
1595 device->keep_device = false;
1598 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1599 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1603 unsigned long flags;
1604 enum pqi_find_result find_result;
1605 struct pqi_scsi_dev *device;
1606 struct pqi_scsi_dev *next;
1607 struct pqi_scsi_dev *matching_device;
1608 LIST_HEAD(add_list);
1609 LIST_HEAD(delete_list);
1612 * The idea here is to do as little work as possible while holding the
1613 * spinlock. That's why we go to great pains to defer anything other
1614 * than updating the internal device list until after we release the
1618 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1620 /* Assume that all devices in the existing list have gone away. */
1621 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1622 scsi_device_list_entry)
1623 device->device_gone = true;
1625 for (i = 0; i < num_new_devices; i++) {
1626 device = new_device_list[i];
1628 find_result = pqi_scsi_find_entry(ctrl_info, device,
1631 switch (find_result) {
1634 * The newly found device is already in the existing
1637 device->new_device = false;
1638 matching_device->device_gone = false;
1639 pqi_scsi_update_device(matching_device, device);
1641 case DEVICE_NOT_FOUND:
1643 * The newly found device is NOT in the existing device
1646 device->new_device = true;
1648 case DEVICE_CHANGED:
1650 * The original device has gone away and we need to add
1653 device->new_device = true;
1658 /* Process all devices that have gone away. */
1659 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1660 scsi_device_list_entry) {
1661 if (device->device_gone) {
1662 list_del(&device->scsi_device_list_entry);
1663 list_add_tail(&device->delete_list_entry, &delete_list);
1667 /* Process all new devices. */
1668 for (i = 0; i < num_new_devices; i++) {
1669 device = new_device_list[i];
1670 if (!device->new_device)
1672 if (device->volume_offline)
1674 list_add_tail(&device->scsi_device_list_entry,
1675 &ctrl_info->scsi_device_list);
1676 list_add_tail(&device->add_list_entry, &add_list);
1677 /* To prevent this device structure from being freed later. */
1678 device->keep_device = true;
1681 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1683 /* Remove all devices that have gone away. */
1684 list_for_each_entry_safe(device, next, &delete_list,
1685 delete_list_entry) {
1686 if (device->volume_offline) {
1687 pqi_dev_info(ctrl_info, "offline", device);
1688 pqi_show_volume_status(ctrl_info, device);
1690 pqi_dev_info(ctrl_info, "removed", device);
1693 pqi_remove_device(ctrl_info, device);
1694 list_del(&device->delete_list_entry);
1695 pqi_free_device(device);
1699 * Notify the SCSI ML if the queue depth of any existing device has
1702 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1703 scsi_device_list_entry) {
1704 if (device->sdev && device->queue_depth !=
1705 device->advertised_queue_depth) {
1706 device->advertised_queue_depth = device->queue_depth;
1707 scsi_change_queue_depth(device->sdev,
1708 device->advertised_queue_depth);
1712 /* Expose any new devices. */
1713 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1714 if (!device->sdev) {
1715 pqi_dev_info(ctrl_info, "added", device);
1716 rc = pqi_add_device(ctrl_info, device);
1718 dev_warn(&ctrl_info->pci_dev->dev,
1719 "scsi %d:%d:%d:%d addition failed, device not added\n",
1720 ctrl_info->scsi_host->host_no,
1721 device->bus, device->target,
1723 pqi_fixup_botched_add(ctrl_info, device);
1729 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1731 bool is_supported = false;
1733 switch (device->devtype) {
1737 case TYPE_MEDIUM_CHANGER:
1738 case TYPE_ENCLOSURE:
1739 is_supported = true;
1743 * Only support the HBA controller itself as a RAID
1744 * controller. If it's a RAID controller other than
1745 * the HBA itself (an external RAID controller, for
1746 * example), we don't support it.
1748 if (pqi_is_hba_lunid(device->scsi3addr))
1749 is_supported = true;
1753 return is_supported;
1756 static inline bool pqi_skip_device(u8 *scsi3addr)
1758 /* Ignore all masked devices. */
1759 if (MASKED_DEVICE(scsi3addr))
1765 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1769 LIST_HEAD(new_device_list_head);
1770 struct report_phys_lun_extended *physdev_list = NULL;
1771 struct report_log_lun_extended *logdev_list = NULL;
1772 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1773 struct report_log_lun_extended_entry *log_lun_ext_entry;
1774 struct bmic_identify_physical_device *id_phys = NULL;
1777 struct pqi_scsi_dev **new_device_list = NULL;
1778 struct pqi_scsi_dev *device;
1779 struct pqi_scsi_dev *next;
1780 unsigned int num_new_devices;
1781 unsigned int num_valid_devices;
1782 bool is_physical_device;
1784 static char *out_of_memory_msg =
1785 "failed to allocate memory, device discovery stopped";
1787 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1793 get_unaligned_be32(&physdev_list->header.list_length)
1794 / sizeof(physdev_list->lun_entries[0]);
1800 get_unaligned_be32(&logdev_list->header.list_length)
1801 / sizeof(logdev_list->lun_entries[0]);
1805 if (num_physicals) {
1807 * We need this buffer for calls to pqi_get_physical_disk_info()
1808 * below. We allocate it here instead of inside
1809 * pqi_get_physical_disk_info() because it's a fairly large
1812 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1814 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1821 num_new_devices = num_physicals + num_logicals;
1823 new_device_list = kmalloc_array(num_new_devices,
1824 sizeof(*new_device_list),
1826 if (!new_device_list) {
1827 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1832 for (i = 0; i < num_new_devices; i++) {
1833 device = kzalloc(sizeof(*device), GFP_KERNEL);
1835 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1840 list_add_tail(&device->new_device_list_entry,
1841 &new_device_list_head);
1845 num_valid_devices = 0;
1847 for (i = 0; i < num_new_devices; i++) {
1849 if (i < num_physicals) {
1850 is_physical_device = true;
1851 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1852 log_lun_ext_entry = NULL;
1853 scsi3addr = phys_lun_ext_entry->lunid;
1855 is_physical_device = false;
1856 phys_lun_ext_entry = NULL;
1858 &logdev_list->lun_entries[i - num_physicals];
1859 scsi3addr = log_lun_ext_entry->lunid;
1862 if (is_physical_device && pqi_skip_device(scsi3addr))
1866 device = list_next_entry(device, new_device_list_entry);
1868 device = list_first_entry(&new_device_list_head,
1869 struct pqi_scsi_dev, new_device_list_entry);
1871 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1872 device->is_physical_device = is_physical_device;
1873 if (!is_physical_device)
1874 device->is_external_raid_device =
1875 pqi_is_external_raid_addr(scsi3addr);
1877 /* Gather information about the device. */
1878 rc = pqi_get_device_info(ctrl_info, device);
1879 if (rc == -ENOMEM) {
1880 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1885 if (device->is_physical_device)
1886 dev_warn(&ctrl_info->pci_dev->dev,
1887 "obtaining device info failed, skipping physical device %016llx\n",
1889 &phys_lun_ext_entry->wwid));
1891 dev_warn(&ctrl_info->pci_dev->dev,
1892 "obtaining device info failed, skipping logical device %08x%08x\n",
1893 *((u32 *)&device->scsi3addr),
1894 *((u32 *)&device->scsi3addr[4]));
1899 if (!pqi_is_supported_device(device))
1902 pqi_assign_bus_target_lun(device);
1904 if (device->is_physical_device) {
1905 device->wwid = phys_lun_ext_entry->wwid;
1906 if ((phys_lun_ext_entry->device_flags &
1907 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1908 phys_lun_ext_entry->aio_handle)
1909 device->aio_enabled = true;
1911 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1912 sizeof(device->volume_id));
1915 switch (device->devtype) {
1918 case TYPE_ENCLOSURE:
1919 if (device->is_physical_device) {
1920 device->sas_address =
1921 get_unaligned_be64(&device->wwid);
1922 if (device->devtype == TYPE_DISK ||
1923 device->devtype == TYPE_ZBC) {
1924 device->aio_handle =
1925 phys_lun_ext_entry->aio_handle;
1926 pqi_get_physical_disk_info(ctrl_info,
1933 new_device_list[num_valid_devices++] = device;
1936 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1939 list_for_each_entry_safe(device, next, &new_device_list_head,
1940 new_device_list_entry) {
1941 if (device->keep_device)
1943 list_del(&device->new_device_list_entry);
1944 pqi_free_device(device);
1947 kfree(new_device_list);
1948 kfree(physdev_list);
1955 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1957 unsigned long flags;
1958 struct pqi_scsi_dev *device;
1961 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1963 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1964 struct pqi_scsi_dev, scsi_device_list_entry);
1966 list_del(&device->scsi_device_list_entry);
1968 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1975 pqi_remove_device(ctrl_info, device);
1976 pqi_free_device(device);
1980 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1984 if (pqi_ctrl_offline(ctrl_info))
1987 mutex_lock(&ctrl_info->scan_mutex);
1989 rc = pqi_update_scsi_devices(ctrl_info);
1991 pqi_schedule_rescan_worker_delayed(ctrl_info);
1993 mutex_unlock(&ctrl_info->scan_mutex);
1998 static void pqi_scan_start(struct Scsi_Host *shost)
2000 pqi_scan_scsi_devices(shost_to_hba(shost));
2003 /* Returns TRUE if scan is finished. */
2005 static int pqi_scan_finished(struct Scsi_Host *shost,
2006 unsigned long elapsed_time)
2008 struct pqi_ctrl_info *ctrl_info;
2010 ctrl_info = shost_priv(shost);
2012 return !mutex_is_locked(&ctrl_info->scan_mutex);
2015 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2017 mutex_lock(&ctrl_info->scan_mutex);
2018 mutex_unlock(&ctrl_info->scan_mutex);
2021 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2023 mutex_lock(&ctrl_info->lun_reset_mutex);
2024 mutex_unlock(&ctrl_info->lun_reset_mutex);
2027 static inline void pqi_set_encryption_info(
2028 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2031 u32 volume_blk_size;
2034 * Set the encryption tweak values based on logical block address.
2035 * If the block size is 512, the tweak value is equal to the LBA.
2036 * For other block sizes, tweak value is (LBA * block size) / 512.
2038 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2039 if (volume_blk_size != 512)
2040 first_block = (first_block * volume_blk_size) / 512;
2042 encryption_info->data_encryption_key_index =
2043 get_unaligned_le16(&raid_map->data_encryption_key_index);
2044 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2045 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2049 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2052 #define PQI_RAID_BYPASS_INELIGIBLE 1
2054 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2055 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2056 struct pqi_queue_group *queue_group)
2058 struct raid_map *raid_map;
2059 bool is_write = false;
2067 u32 first_row_offset;
2068 u32 last_row_offset;
2073 u32 r5or6_blocks_per_row;
2074 u64 r5or6_first_row;
2076 u32 r5or6_first_row_offset;
2077 u32 r5or6_last_row_offset;
2078 u32 r5or6_first_column;
2079 u32 r5or6_last_column;
2080 u16 data_disks_per_row;
2081 u32 total_disks_per_row;
2082 u16 layout_map_count;
2094 int offload_to_mirror;
2095 struct pqi_encryption_info *encryption_info_ptr;
2096 struct pqi_encryption_info encryption_info;
2097 #if BITS_PER_LONG == 32
2101 /* Check for valid opcode, get LBA and block count. */
2102 switch (scmd->cmnd[0]) {
2107 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2108 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2109 block_cnt = (u32)scmd->cmnd[4];
2117 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2118 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2124 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2125 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2131 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2132 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2135 /* Process via normal I/O path. */
2136 return PQI_RAID_BYPASS_INELIGIBLE;
2139 /* Check for write to non-RAID-0. */
2140 if (is_write && device->raid_level != SA_RAID_0)
2141 return PQI_RAID_BYPASS_INELIGIBLE;
2143 if (unlikely(block_cnt == 0))
2144 return PQI_RAID_BYPASS_INELIGIBLE;
2146 last_block = first_block + block_cnt - 1;
2147 raid_map = device->raid_map;
2149 /* Check for invalid block or wraparound. */
2150 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2151 last_block < first_block)
2152 return PQI_RAID_BYPASS_INELIGIBLE;
2154 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2155 strip_size = get_unaligned_le16(&raid_map->strip_size);
2156 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2158 /* Calculate stripe information for the request. */
2159 blocks_per_row = data_disks_per_row * strip_size;
2160 #if BITS_PER_LONG == 32
2161 tmpdiv = first_block;
2162 do_div(tmpdiv, blocks_per_row);
2164 tmpdiv = last_block;
2165 do_div(tmpdiv, blocks_per_row);
2167 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2168 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2169 tmpdiv = first_row_offset;
2170 do_div(tmpdiv, strip_size);
2171 first_column = tmpdiv;
2172 tmpdiv = last_row_offset;
2173 do_div(tmpdiv, strip_size);
2174 last_column = tmpdiv;
2176 first_row = first_block / blocks_per_row;
2177 last_row = last_block / blocks_per_row;
2178 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2179 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2180 first_column = first_row_offset / strip_size;
2181 last_column = last_row_offset / strip_size;
2184 /* If this isn't a single row/column then give to the controller. */
2185 if (first_row != last_row || first_column != last_column)
2186 return PQI_RAID_BYPASS_INELIGIBLE;
2188 /* Proceeding with driver mapping. */
2189 total_disks_per_row = data_disks_per_row +
2190 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2191 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2192 get_unaligned_le16(&raid_map->row_cnt);
2193 map_index = (map_row * total_disks_per_row) + first_column;
2196 if (device->raid_level == SA_RAID_1) {
2197 if (device->offload_to_mirror)
2198 map_index += data_disks_per_row;
2199 device->offload_to_mirror = !device->offload_to_mirror;
2200 } else if (device->raid_level == SA_RAID_ADM) {
2203 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2206 offload_to_mirror = device->offload_to_mirror;
2207 if (offload_to_mirror == 0) {
2208 /* use physical disk in the first mirrored group. */
2209 map_index %= data_disks_per_row;
2213 * Determine mirror group that map_index
2216 current_group = map_index / data_disks_per_row;
2218 if (offload_to_mirror != current_group) {
2220 layout_map_count - 1) {
2222 * Select raid index from
2225 map_index += data_disks_per_row;
2229 * Select raid index from first
2232 map_index %= data_disks_per_row;
2236 } while (offload_to_mirror != current_group);
2239 /* Set mirror group to use next time. */
2241 (offload_to_mirror >= layout_map_count - 1) ?
2242 0 : offload_to_mirror + 1;
2243 WARN_ON(offload_to_mirror >= layout_map_count);
2244 device->offload_to_mirror = offload_to_mirror;
2246 * Avoid direct use of device->offload_to_mirror within this
2247 * function since multiple threads might simultaneously
2248 * increment it beyond the range of device->layout_map_count -1.
2250 } else if ((device->raid_level == SA_RAID_5 ||
2251 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2253 /* Verify first and last block are in same RAID group */
2254 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2255 stripesize = r5or6_blocks_per_row * layout_map_count;
2256 #if BITS_PER_LONG == 32
2257 tmpdiv = first_block;
2258 first_group = do_div(tmpdiv, stripesize);
2259 tmpdiv = first_group;
2260 do_div(tmpdiv, r5or6_blocks_per_row);
2261 first_group = tmpdiv;
2262 tmpdiv = last_block;
2263 last_group = do_div(tmpdiv, stripesize);
2264 tmpdiv = last_group;
2265 do_div(tmpdiv, r5or6_blocks_per_row);
2266 last_group = tmpdiv;
2268 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2269 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2271 if (first_group != last_group)
2272 return PQI_RAID_BYPASS_INELIGIBLE;
2274 /* Verify request is in a single row of RAID 5/6 */
2275 #if BITS_PER_LONG == 32
2276 tmpdiv = first_block;
2277 do_div(tmpdiv, stripesize);
2278 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2279 tmpdiv = last_block;
2280 do_div(tmpdiv, stripesize);
2281 r5or6_last_row = r0_last_row = tmpdiv;
2283 first_row = r5or6_first_row = r0_first_row =
2284 first_block / stripesize;
2285 r5or6_last_row = r0_last_row = last_block / stripesize;
2287 if (r5or6_first_row != r5or6_last_row)
2288 return PQI_RAID_BYPASS_INELIGIBLE;
2290 /* Verify request is in a single column */
2291 #if BITS_PER_LONG == 32
2292 tmpdiv = first_block;
2293 first_row_offset = do_div(tmpdiv, stripesize);
2294 tmpdiv = first_row_offset;
2295 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2296 r5or6_first_row_offset = first_row_offset;
2297 tmpdiv = last_block;
2298 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2299 tmpdiv = r5or6_last_row_offset;
2300 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2301 tmpdiv = r5or6_first_row_offset;
2302 do_div(tmpdiv, strip_size);
2303 first_column = r5or6_first_column = tmpdiv;
2304 tmpdiv = r5or6_last_row_offset;
2305 do_div(tmpdiv, strip_size);
2306 r5or6_last_column = tmpdiv;
2308 first_row_offset = r5or6_first_row_offset =
2309 (u32)((first_block % stripesize) %
2310 r5or6_blocks_per_row);
2312 r5or6_last_row_offset =
2313 (u32)((last_block % stripesize) %
2314 r5or6_blocks_per_row);
2316 first_column = r5or6_first_row_offset / strip_size;
2317 r5or6_first_column = first_column;
2318 r5or6_last_column = r5or6_last_row_offset / strip_size;
2320 if (r5or6_first_column != r5or6_last_column)
2321 return PQI_RAID_BYPASS_INELIGIBLE;
2323 /* Request is eligible */
2325 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2326 get_unaligned_le16(&raid_map->row_cnt);
2328 map_index = (first_group *
2329 (get_unaligned_le16(&raid_map->row_cnt) *
2330 total_disks_per_row)) +
2331 (map_row * total_disks_per_row) + first_column;
2334 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2335 return PQI_RAID_BYPASS_INELIGIBLE;
2337 aio_handle = raid_map->disk_data[map_index].aio_handle;
2338 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2339 first_row * strip_size +
2340 (first_row_offset - first_column * strip_size);
2341 disk_block_cnt = block_cnt;
2343 /* Handle differing logical/physical block sizes. */
2344 if (raid_map->phys_blk_shift) {
2345 disk_block <<= raid_map->phys_blk_shift;
2346 disk_block_cnt <<= raid_map->phys_blk_shift;
2349 if (unlikely(disk_block_cnt > 0xffff))
2350 return PQI_RAID_BYPASS_INELIGIBLE;
2352 /* Build the new CDB for the physical disk I/O. */
2353 if (disk_block > 0xffffffff) {
2354 cdb[0] = is_write ? WRITE_16 : READ_16;
2356 put_unaligned_be64(disk_block, &cdb[2]);
2357 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2362 cdb[0] = is_write ? WRITE_10 : READ_10;
2364 put_unaligned_be32((u32)disk_block, &cdb[2]);
2366 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2371 if (get_unaligned_le16(&raid_map->flags) &
2372 RAID_MAP_ENCRYPTION_ENABLED) {
2373 pqi_set_encryption_info(&encryption_info, raid_map,
2375 encryption_info_ptr = &encryption_info;
2377 encryption_info_ptr = NULL;
2380 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2381 cdb, cdb_length, queue_group, encryption_info_ptr, true);
2384 #define PQI_STATUS_IDLE 0x0
2386 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2387 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2389 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2390 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2391 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2392 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2393 #define PQI_DEVICE_STATE_ERROR 0x4
2395 #define PQI_MODE_READY_TIMEOUT_SECS 30
2396 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2398 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2400 struct pqi_device_registers __iomem *pqi_registers;
2401 unsigned long timeout;
2405 pqi_registers = ctrl_info->pqi_registers;
2406 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2409 signature = readq(&pqi_registers->signature);
2410 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2411 sizeof(signature)) == 0)
2413 if (time_after(jiffies, timeout)) {
2414 dev_err(&ctrl_info->pci_dev->dev,
2415 "timed out waiting for PQI signature\n");
2418 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2422 status = readb(&pqi_registers->function_and_status_code);
2423 if (status == PQI_STATUS_IDLE)
2425 if (time_after(jiffies, timeout)) {
2426 dev_err(&ctrl_info->pci_dev->dev,
2427 "timed out waiting for PQI IDLE\n");
2430 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2434 if (readl(&pqi_registers->device_status) ==
2435 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2437 if (time_after(jiffies, timeout)) {
2438 dev_err(&ctrl_info->pci_dev->dev,
2439 "timed out waiting for PQI all registers ready\n");
2442 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2448 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2450 struct pqi_scsi_dev *device;
2452 device = io_request->scmd->device->hostdata;
2453 device->raid_bypass_enabled = false;
2454 device->aio_enabled = false;
2457 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2459 struct pqi_ctrl_info *ctrl_info;
2460 struct pqi_scsi_dev *device;
2462 device = sdev->hostdata;
2463 if (device->device_offline)
2466 device->device_offline = true;
2467 scsi_device_set_state(sdev, SDEV_OFFLINE);
2468 ctrl_info = shost_to_hba(sdev->host);
2469 pqi_schedule_rescan_worker(ctrl_info);
2470 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
2471 path, ctrl_info->scsi_host->host_no, device->bus,
2472 device->target, device->lun);
2475 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2479 struct scsi_cmnd *scmd;
2480 struct pqi_raid_error_info *error_info;
2481 size_t sense_data_length;
2484 struct scsi_sense_hdr sshdr;
2486 scmd = io_request->scmd;
2490 error_info = io_request->error_info;
2491 scsi_status = error_info->status;
2494 switch (error_info->data_out_result) {
2495 case PQI_DATA_IN_OUT_GOOD:
2497 case PQI_DATA_IN_OUT_UNDERFLOW:
2499 get_unaligned_le32(&error_info->data_out_transferred);
2500 residual_count = scsi_bufflen(scmd) - xfer_count;
2501 scsi_set_resid(scmd, residual_count);
2502 if (xfer_count < scmd->underflow)
2503 host_byte = DID_SOFT_ERROR;
2505 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2506 case PQI_DATA_IN_OUT_ABORTED:
2507 host_byte = DID_ABORT;
2509 case PQI_DATA_IN_OUT_TIMEOUT:
2510 host_byte = DID_TIME_OUT;
2512 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2513 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2514 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2515 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2516 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2517 case PQI_DATA_IN_OUT_ERROR:
2518 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2519 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2520 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2521 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2522 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2523 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2524 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2525 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2526 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2527 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2529 host_byte = DID_ERROR;
2533 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2534 if (sense_data_length == 0)
2536 get_unaligned_le16(&error_info->response_data_length);
2537 if (sense_data_length) {
2538 if (sense_data_length > sizeof(error_info->data))
2539 sense_data_length = sizeof(error_info->data);
2541 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2542 scsi_normalize_sense(error_info->data,
2543 sense_data_length, &sshdr) &&
2544 sshdr.sense_key == HARDWARE_ERROR &&
2545 sshdr.asc == 0x3e &&
2546 sshdr.ascq == 0x1) {
2547 pqi_take_device_offline(scmd->device, "RAID");
2548 host_byte = DID_NO_CONNECT;
2551 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2552 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2553 memcpy(scmd->sense_buffer, error_info->data,
2557 scmd->result = scsi_status;
2558 set_host_byte(scmd, host_byte);
2561 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2565 struct scsi_cmnd *scmd;
2566 struct pqi_aio_error_info *error_info;
2567 size_t sense_data_length;
2570 bool device_offline;
2572 scmd = io_request->scmd;
2573 error_info = io_request->error_info;
2575 sense_data_length = 0;
2576 device_offline = false;
2578 switch (error_info->service_response) {
2579 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2580 scsi_status = error_info->status;
2582 case PQI_AIO_SERV_RESPONSE_FAILURE:
2583 switch (error_info->status) {
2584 case PQI_AIO_STATUS_IO_ABORTED:
2585 scsi_status = SAM_STAT_TASK_ABORTED;
2587 case PQI_AIO_STATUS_UNDERRUN:
2588 scsi_status = SAM_STAT_GOOD;
2589 residual_count = get_unaligned_le32(
2590 &error_info->residual_count);
2591 scsi_set_resid(scmd, residual_count);
2592 xfer_count = scsi_bufflen(scmd) - residual_count;
2593 if (xfer_count < scmd->underflow)
2594 host_byte = DID_SOFT_ERROR;
2596 case PQI_AIO_STATUS_OVERRUN:
2597 scsi_status = SAM_STAT_GOOD;
2599 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2600 pqi_aio_path_disabled(io_request);
2601 scsi_status = SAM_STAT_GOOD;
2602 io_request->status = -EAGAIN;
2604 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2605 case PQI_AIO_STATUS_INVALID_DEVICE:
2606 if (!io_request->raid_bypass) {
2607 device_offline = true;
2608 pqi_take_device_offline(scmd->device, "AIO");
2609 host_byte = DID_NO_CONNECT;
2611 scsi_status = SAM_STAT_CHECK_CONDITION;
2613 case PQI_AIO_STATUS_IO_ERROR:
2615 scsi_status = SAM_STAT_CHECK_CONDITION;
2619 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2620 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2621 scsi_status = SAM_STAT_GOOD;
2623 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2624 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2626 scsi_status = SAM_STAT_CHECK_CONDITION;
2630 if (error_info->data_present) {
2632 get_unaligned_le16(&error_info->data_length);
2633 if (sense_data_length) {
2634 if (sense_data_length > sizeof(error_info->data))
2635 sense_data_length = sizeof(error_info->data);
2636 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2637 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2638 memcpy(scmd->sense_buffer, error_info->data,
2643 if (device_offline && sense_data_length == 0)
2644 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2647 scmd->result = scsi_status;
2648 set_host_byte(scmd, host_byte);
2651 static void pqi_process_io_error(unsigned int iu_type,
2652 struct pqi_io_request *io_request)
2655 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2656 pqi_process_raid_io_error(io_request);
2658 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2659 pqi_process_aio_io_error(io_request);
2664 static int pqi_interpret_task_management_response(
2665 struct pqi_task_management_response *response)
2669 switch (response->response_code) {
2670 case SOP_TMF_COMPLETE:
2671 case SOP_TMF_FUNCTION_SUCCEEDED:
2682 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2683 struct pqi_queue_group *queue_group)
2685 unsigned int num_responses;
2688 struct pqi_io_request *io_request;
2689 struct pqi_io_response *response;
2693 oq_ci = queue_group->oq_ci_copy;
2696 oq_pi = *queue_group->oq_pi;
2701 response = queue_group->oq_element_array +
2702 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2704 request_id = get_unaligned_le16(&response->request_id);
2705 WARN_ON(request_id >= ctrl_info->max_io_slots);
2707 io_request = &ctrl_info->io_request_pool[request_id];
2708 WARN_ON(atomic_read(&io_request->refcount) == 0);
2710 switch (response->header.iu_type) {
2711 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2712 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2713 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2715 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2716 io_request->status =
2717 pqi_interpret_task_management_response(
2720 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2721 pqi_aio_path_disabled(io_request);
2722 io_request->status = -EAGAIN;
2724 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2725 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2726 io_request->error_info = ctrl_info->error_buffer +
2727 (get_unaligned_le16(&response->error_index) *
2728 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2729 pqi_process_io_error(response->header.iu_type,
2733 dev_err(&ctrl_info->pci_dev->dev,
2734 "unexpected IU type: 0x%x\n",
2735 response->header.iu_type);
2739 io_request->io_complete_callback(io_request,
2740 io_request->context);
2743 * Note that the I/O request structure CANNOT BE TOUCHED after
2744 * returning from the I/O completion callback!
2747 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2750 if (num_responses) {
2751 queue_group->oq_ci_copy = oq_ci;
2752 writel(oq_ci, queue_group->oq_ci);
2755 return num_responses;
2758 static inline unsigned int pqi_num_elements_free(unsigned int pi,
2759 unsigned int ci, unsigned int elements_in_queue)
2761 unsigned int num_elements_used;
2764 num_elements_used = pi - ci;
2766 num_elements_used = elements_in_queue - ci + pi;
2768 return elements_in_queue - num_elements_used - 1;
2771 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
2772 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2776 unsigned long flags;
2778 struct pqi_queue_group *queue_group;
2780 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2781 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2784 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2786 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2787 iq_ci = *queue_group->iq_ci[RAID_PATH];
2789 if (pqi_num_elements_free(iq_pi, iq_ci,
2790 ctrl_info->num_elements_per_iq))
2793 spin_unlock_irqrestore(
2794 &queue_group->submit_lock[RAID_PATH], flags);
2796 if (pqi_ctrl_offline(ctrl_info))
2800 next_element = queue_group->iq_element_array[RAID_PATH] +
2801 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2803 memcpy(next_element, iu, iu_length);
2805 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2806 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2809 * This write notifies the controller that an IU is available to be
2812 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2814 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
2817 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2818 struct pqi_event *event)
2820 struct pqi_event_acknowledge_request request;
2822 memset(&request, 0, sizeof(request));
2824 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2825 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2826 &request.header.iu_length);
2827 request.event_type = event->event_type;
2828 request.event_id = event->event_id;
2829 request.additional_event_id = event->additional_event_id;
2831 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
2834 static void pqi_event_worker(struct work_struct *work)
2837 struct pqi_ctrl_info *ctrl_info;
2838 struct pqi_event *event;
2840 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2842 pqi_ctrl_busy(ctrl_info);
2843 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
2844 if (pqi_ctrl_offline(ctrl_info))
2847 pqi_schedule_rescan_worker_delayed(ctrl_info);
2849 event = ctrl_info->events;
2850 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2851 if (event->pending) {
2852 event->pending = false;
2853 pqi_acknowledge_event(ctrl_info, event);
2859 pqi_ctrl_unbusy(ctrl_info);
2862 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
2864 static void pqi_heartbeat_timer_handler(struct timer_list *t)
2867 u32 heartbeat_count;
2868 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
2871 pqi_check_ctrl_health(ctrl_info);
2872 if (pqi_ctrl_offline(ctrl_info))
2875 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2876 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
2878 if (num_interrupts == ctrl_info->previous_num_interrupts) {
2879 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
2880 dev_err(&ctrl_info->pci_dev->dev,
2881 "no heartbeat detected - last heartbeat count: %u\n",
2883 pqi_take_ctrl_offline(ctrl_info);
2887 ctrl_info->previous_num_interrupts = num_interrupts;
2890 ctrl_info->previous_heartbeat_count = heartbeat_count;
2891 mod_timer(&ctrl_info->heartbeat_timer,
2892 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2895 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2897 if (!ctrl_info->heartbeat_counter)
2900 ctrl_info->previous_num_interrupts =
2901 atomic_read(&ctrl_info->num_interrupts);
2902 ctrl_info->previous_heartbeat_count =
2903 pqi_read_heartbeat_counter(ctrl_info);
2905 ctrl_info->heartbeat_timer.expires =
2906 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2907 add_timer(&ctrl_info->heartbeat_timer);
2910 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2912 del_timer_sync(&ctrl_info->heartbeat_timer);
2915 static inline int pqi_event_type_to_event_index(unsigned int event_type)
2919 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2920 if (event_type == pqi_supported_event_types[index])
2926 static inline bool pqi_is_supported_event(unsigned int event_type)
2928 return pqi_event_type_to_event_index(event_type) != -1;
2931 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2933 unsigned int num_events;
2936 struct pqi_event_queue *event_queue;
2937 struct pqi_event_response *response;
2938 struct pqi_event *event;
2941 event_queue = &ctrl_info->event_queue;
2943 oq_ci = event_queue->oq_ci_copy;
2946 oq_pi = *event_queue->oq_pi;
2951 response = event_queue->oq_element_array +
2952 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2955 pqi_event_type_to_event_index(response->event_type);
2957 if (event_index >= 0) {
2958 if (response->request_acknowlege) {
2959 event = &ctrl_info->events[event_index];
2960 event->pending = true;
2961 event->event_type = response->event_type;
2962 event->event_id = response->event_id;
2963 event->additional_event_id =
2964 response->additional_event_id;
2968 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2972 event_queue->oq_ci_copy = oq_ci;
2973 writel(oq_ci, event_queue->oq_ci);
2974 schedule_work(&ctrl_info->event_work);
2980 #define PQI_LEGACY_INTX_MASK 0x1
2982 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2986 struct pqi_device_registers __iomem *pqi_registers;
2987 volatile void __iomem *register_addr;
2989 pqi_registers = ctrl_info->pqi_registers;
2992 register_addr = &pqi_registers->legacy_intx_mask_clear;
2994 register_addr = &pqi_registers->legacy_intx_mask_set;
2996 intx_mask = readl(register_addr);
2997 intx_mask |= PQI_LEGACY_INTX_MASK;
2998 writel(intx_mask, register_addr);
3001 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3002 enum pqi_irq_mode new_mode)
3004 switch (ctrl_info->irq_mode) {
3010 pqi_configure_legacy_intx(ctrl_info, true);
3011 sis_enable_intx(ctrl_info);
3020 pqi_configure_legacy_intx(ctrl_info, false);
3021 sis_enable_msix(ctrl_info);
3026 pqi_configure_legacy_intx(ctrl_info, false);
3033 sis_enable_msix(ctrl_info);
3036 pqi_configure_legacy_intx(ctrl_info, true);
3037 sis_enable_intx(ctrl_info);
3045 ctrl_info->irq_mode = new_mode;
3048 #define PQI_LEGACY_INTX_PENDING 0x1
3050 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3055 switch (ctrl_info->irq_mode) {
3061 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3062 if (intx_status & PQI_LEGACY_INTX_PENDING)
3076 static irqreturn_t pqi_irq_handler(int irq, void *data)
3078 struct pqi_ctrl_info *ctrl_info;
3079 struct pqi_queue_group *queue_group;
3080 unsigned int num_responses_handled;
3083 ctrl_info = queue_group->ctrl_info;
3085 if (!pqi_is_valid_irq(ctrl_info))
3088 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3090 if (irq == ctrl_info->event_irq)
3091 num_responses_handled += pqi_process_event_intr(ctrl_info);
3093 if (num_responses_handled)
3094 atomic_inc(&ctrl_info->num_interrupts);
3096 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3097 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3102 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3104 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3108 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3110 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3111 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3112 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3114 dev_err(&pci_dev->dev,
3115 "irq %u init failed with error %d\n",
3116 pci_irq_vector(pci_dev, i), rc);
3119 ctrl_info->num_msix_vectors_initialized++;
3125 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3129 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3130 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3131 &ctrl_info->queue_groups[i]);
3133 ctrl_info->num_msix_vectors_initialized = 0;
3136 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3138 int num_vectors_enabled;
3140 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3141 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3142 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3143 if (num_vectors_enabled < 0) {
3144 dev_err(&ctrl_info->pci_dev->dev,
3145 "MSI-X init failed with error %d\n",
3146 num_vectors_enabled);
3147 return num_vectors_enabled;
3150 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3151 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3155 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3157 if (ctrl_info->num_msix_vectors_enabled) {
3158 pci_free_irq_vectors(ctrl_info->pci_dev);
3159 ctrl_info->num_msix_vectors_enabled = 0;
3163 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3166 size_t alloc_length;
3167 size_t element_array_length_per_iq;
3168 size_t element_array_length_per_oq;
3169 void *element_array;
3170 void *next_queue_index;
3171 void *aligned_pointer;
3172 unsigned int num_inbound_queues;
3173 unsigned int num_outbound_queues;
3174 unsigned int num_queue_indexes;
3175 struct pqi_queue_group *queue_group;
3177 element_array_length_per_iq =
3178 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3179 ctrl_info->num_elements_per_iq;
3180 element_array_length_per_oq =
3181 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3182 ctrl_info->num_elements_per_oq;
3183 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3184 num_outbound_queues = ctrl_info->num_queue_groups;
3185 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3187 aligned_pointer = NULL;
3189 for (i = 0; i < num_inbound_queues; i++) {
3190 aligned_pointer = PTR_ALIGN(aligned_pointer,
3191 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3192 aligned_pointer += element_array_length_per_iq;
3195 for (i = 0; i < num_outbound_queues; i++) {
3196 aligned_pointer = PTR_ALIGN(aligned_pointer,
3197 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3198 aligned_pointer += element_array_length_per_oq;
3201 aligned_pointer = PTR_ALIGN(aligned_pointer,
3202 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3203 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3204 PQI_EVENT_OQ_ELEMENT_LENGTH;
3206 for (i = 0; i < num_queue_indexes; i++) {
3207 aligned_pointer = PTR_ALIGN(aligned_pointer,
3208 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3209 aligned_pointer += sizeof(pqi_index_t);
3212 alloc_length = (size_t)aligned_pointer +
3213 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3215 alloc_length += PQI_EXTRA_SGL_MEMORY;
3217 ctrl_info->queue_memory_base =
3218 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3220 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3222 if (!ctrl_info->queue_memory_base)
3225 ctrl_info->queue_memory_length = alloc_length;
3227 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3228 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3230 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3231 queue_group = &ctrl_info->queue_groups[i];
3232 queue_group->iq_element_array[RAID_PATH] = element_array;
3233 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3234 ctrl_info->queue_memory_base_dma_handle +
3235 (element_array - ctrl_info->queue_memory_base);
3236 element_array += element_array_length_per_iq;
3237 element_array = PTR_ALIGN(element_array,
3238 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3239 queue_group->iq_element_array[AIO_PATH] = element_array;
3240 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3241 ctrl_info->queue_memory_base_dma_handle +
3242 (element_array - ctrl_info->queue_memory_base);
3243 element_array += element_array_length_per_iq;
3244 element_array = PTR_ALIGN(element_array,
3245 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3248 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3249 queue_group = &ctrl_info->queue_groups[i];
3250 queue_group->oq_element_array = element_array;
3251 queue_group->oq_element_array_bus_addr =
3252 ctrl_info->queue_memory_base_dma_handle +
3253 (element_array - ctrl_info->queue_memory_base);
3254 element_array += element_array_length_per_oq;
3255 element_array = PTR_ALIGN(element_array,
3256 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3259 ctrl_info->event_queue.oq_element_array = element_array;
3260 ctrl_info->event_queue.oq_element_array_bus_addr =
3261 ctrl_info->queue_memory_base_dma_handle +
3262 (element_array - ctrl_info->queue_memory_base);
3263 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3264 PQI_EVENT_OQ_ELEMENT_LENGTH;
3266 next_queue_index = PTR_ALIGN(element_array,
3267 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3269 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3270 queue_group = &ctrl_info->queue_groups[i];
3271 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3272 queue_group->iq_ci_bus_addr[RAID_PATH] =
3273 ctrl_info->queue_memory_base_dma_handle +
3274 (next_queue_index - ctrl_info->queue_memory_base);
3275 next_queue_index += sizeof(pqi_index_t);
3276 next_queue_index = PTR_ALIGN(next_queue_index,
3277 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3278 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3279 queue_group->iq_ci_bus_addr[AIO_PATH] =
3280 ctrl_info->queue_memory_base_dma_handle +
3281 (next_queue_index - ctrl_info->queue_memory_base);
3282 next_queue_index += sizeof(pqi_index_t);
3283 next_queue_index = PTR_ALIGN(next_queue_index,
3284 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3285 queue_group->oq_pi = next_queue_index;
3286 queue_group->oq_pi_bus_addr =
3287 ctrl_info->queue_memory_base_dma_handle +
3288 (next_queue_index - ctrl_info->queue_memory_base);
3289 next_queue_index += sizeof(pqi_index_t);
3290 next_queue_index = PTR_ALIGN(next_queue_index,
3291 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3294 ctrl_info->event_queue.oq_pi = next_queue_index;
3295 ctrl_info->event_queue.oq_pi_bus_addr =
3296 ctrl_info->queue_memory_base_dma_handle +
3297 (next_queue_index - ctrl_info->queue_memory_base);
3302 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3305 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3306 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3309 * Initialize the backpointers to the controller structure in
3310 * each operational queue group structure.
3312 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3313 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3316 * Assign IDs to all operational queues. Note that the IDs
3317 * assigned to operational IQs are independent of the IDs
3318 * assigned to operational OQs.
3320 ctrl_info->event_queue.oq_id = next_oq_id++;
3321 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3322 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3323 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3324 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3328 * Assign MSI-X table entry indexes to all queues. Note that the
3329 * interrupt for the event queue is shared with the first queue group.
3331 ctrl_info->event_queue.int_msg_num = 0;
3332 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3333 ctrl_info->queue_groups[i].int_msg_num = i;
3335 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3336 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3337 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3338 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3339 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3343 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3345 size_t alloc_length;
3346 struct pqi_admin_queues_aligned *admin_queues_aligned;
3347 struct pqi_admin_queues *admin_queues;
3349 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3350 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3352 ctrl_info->admin_queue_memory_base =
3353 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3355 &ctrl_info->admin_queue_memory_base_dma_handle,
3358 if (!ctrl_info->admin_queue_memory_base)
3361 ctrl_info->admin_queue_memory_length = alloc_length;
3363 admin_queues = &ctrl_info->admin_queues;
3364 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3365 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3366 admin_queues->iq_element_array =
3367 &admin_queues_aligned->iq_element_array;
3368 admin_queues->oq_element_array =
3369 &admin_queues_aligned->oq_element_array;
3370 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3371 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3373 admin_queues->iq_element_array_bus_addr =
3374 ctrl_info->admin_queue_memory_base_dma_handle +
3375 (admin_queues->iq_element_array -
3376 ctrl_info->admin_queue_memory_base);
3377 admin_queues->oq_element_array_bus_addr =
3378 ctrl_info->admin_queue_memory_base_dma_handle +
3379 (admin_queues->oq_element_array -
3380 ctrl_info->admin_queue_memory_base);
3381 admin_queues->iq_ci_bus_addr =
3382 ctrl_info->admin_queue_memory_base_dma_handle +
3383 ((void *)admin_queues->iq_ci -
3384 ctrl_info->admin_queue_memory_base);
3385 admin_queues->oq_pi_bus_addr =
3386 ctrl_info->admin_queue_memory_base_dma_handle +
3387 ((void *)admin_queues->oq_pi -
3388 ctrl_info->admin_queue_memory_base);
3393 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3394 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3396 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3398 struct pqi_device_registers __iomem *pqi_registers;
3399 struct pqi_admin_queues *admin_queues;
3400 unsigned long timeout;
3404 pqi_registers = ctrl_info->pqi_registers;
3405 admin_queues = &ctrl_info->admin_queues;
3407 writeq((u64)admin_queues->iq_element_array_bus_addr,
3408 &pqi_registers->admin_iq_element_array_addr);
3409 writeq((u64)admin_queues->oq_element_array_bus_addr,
3410 &pqi_registers->admin_oq_element_array_addr);
3411 writeq((u64)admin_queues->iq_ci_bus_addr,
3412 &pqi_registers->admin_iq_ci_addr);
3413 writeq((u64)admin_queues->oq_pi_bus_addr,
3414 &pqi_registers->admin_oq_pi_addr);
3416 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3417 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3418 (admin_queues->int_msg_num << 16);
3419 writel(reg, &pqi_registers->admin_iq_num_elements);
3420 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3421 &pqi_registers->function_and_status_code);
3423 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3425 status = readb(&pqi_registers->function_and_status_code);
3426 if (status == PQI_STATUS_IDLE)
3428 if (time_after(jiffies, timeout))
3430 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3434 * The offset registers are not initialized to the correct
3435 * offsets until *after* the create admin queue pair command
3436 * completes successfully.
3438 admin_queues->iq_pi = ctrl_info->iomem_base +
3439 PQI_DEVICE_REGISTERS_OFFSET +
3440 readq(&pqi_registers->admin_iq_pi_offset);
3441 admin_queues->oq_ci = ctrl_info->iomem_base +
3442 PQI_DEVICE_REGISTERS_OFFSET +
3443 readq(&pqi_registers->admin_oq_ci_offset);
3448 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3449 struct pqi_general_admin_request *request)
3451 struct pqi_admin_queues *admin_queues;
3455 admin_queues = &ctrl_info->admin_queues;
3456 iq_pi = admin_queues->iq_pi_copy;
3458 next_element = admin_queues->iq_element_array +
3459 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3461 memcpy(next_element, request, sizeof(*request));
3463 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3464 admin_queues->iq_pi_copy = iq_pi;
3467 * This write notifies the controller that an IU is available to be
3470 writel(iq_pi, admin_queues->iq_pi);
3473 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3475 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3476 struct pqi_general_admin_response *response)
3478 struct pqi_admin_queues *admin_queues;
3481 unsigned long timeout;
3483 admin_queues = &ctrl_info->admin_queues;
3484 oq_ci = admin_queues->oq_ci_copy;
3486 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
3489 oq_pi = *admin_queues->oq_pi;
3492 if (time_after(jiffies, timeout)) {
3493 dev_err(&ctrl_info->pci_dev->dev,
3494 "timed out waiting for admin response\n");
3497 if (!sis_is_firmware_running(ctrl_info))
3499 usleep_range(1000, 2000);
3502 memcpy(response, admin_queues->oq_element_array +
3503 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3505 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3506 admin_queues->oq_ci_copy = oq_ci;
3507 writel(oq_ci, admin_queues->oq_ci);
3512 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3513 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3514 struct pqi_io_request *io_request)
3516 struct pqi_io_request *next;
3521 unsigned long flags;
3522 unsigned int num_elements_needed;
3523 unsigned int num_elements_to_end_of_queue;
3525 struct pqi_iu_header *request;
3527 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3530 io_request->queue_group = queue_group;
3531 list_add_tail(&io_request->request_list_entry,
3532 &queue_group->request_list[path]);
3535 iq_pi = queue_group->iq_pi_copy[path];
3537 list_for_each_entry_safe(io_request, next,
3538 &queue_group->request_list[path], request_list_entry) {
3540 request = io_request->iu;
3542 iu_length = get_unaligned_le16(&request->iu_length) +
3543 PQI_REQUEST_HEADER_LENGTH;
3544 num_elements_needed =
3545 DIV_ROUND_UP(iu_length,
3546 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3548 iq_ci = *queue_group->iq_ci[path];
3550 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3551 ctrl_info->num_elements_per_iq))
3554 put_unaligned_le16(queue_group->oq_id,
3555 &request->response_queue_id);
3557 next_element = queue_group->iq_element_array[path] +
3558 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3560 num_elements_to_end_of_queue =
3561 ctrl_info->num_elements_per_iq - iq_pi;
3563 if (num_elements_needed <= num_elements_to_end_of_queue) {
3564 memcpy(next_element, request, iu_length);
3566 copy_count = num_elements_to_end_of_queue *
3567 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3568 memcpy(next_element, request, copy_count);
3569 memcpy(queue_group->iq_element_array[path],
3570 (u8 *)request + copy_count,
3571 iu_length - copy_count);
3574 iq_pi = (iq_pi + num_elements_needed) %
3575 ctrl_info->num_elements_per_iq;
3577 list_del(&io_request->request_list_entry);
3580 if (iq_pi != queue_group->iq_pi_copy[path]) {
3581 queue_group->iq_pi_copy[path] = iq_pi;
3583 * This write notifies the controller that one or more IUs are
3584 * available to be processed.
3586 writel(iq_pi, queue_group->iq_pi[path]);
3589 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3592 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3594 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3595 struct completion *wait)
3600 if (wait_for_completion_io_timeout(wait,
3601 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3606 pqi_check_ctrl_health(ctrl_info);
3607 if (pqi_ctrl_offline(ctrl_info)) {
3616 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3619 struct completion *waiting = context;
3624 static int pqi_submit_raid_request_synchronous_with_io_request(
3625 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3626 unsigned long timeout_msecs)
3629 DECLARE_COMPLETION_ONSTACK(wait);
3631 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3632 io_request->context = &wait;
3634 pqi_start_io(ctrl_info,
3635 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3638 if (timeout_msecs == NO_TIMEOUT) {
3639 pqi_wait_for_completion_io(ctrl_info, &wait);
3641 if (!wait_for_completion_io_timeout(&wait,
3642 msecs_to_jiffies(timeout_msecs))) {
3643 dev_warn(&ctrl_info->pci_dev->dev,
3644 "command timed out\n");
3652 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3653 struct pqi_iu_header *request, unsigned int flags,
3654 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3657 struct pqi_io_request *io_request;
3658 unsigned long start_jiffies;
3659 unsigned long msecs_blocked;
3663 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3664 * are mutually exclusive.
3667 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3668 if (down_interruptible(&ctrl_info->sync_request_sem))
3669 return -ERESTARTSYS;
3671 if (timeout_msecs == NO_TIMEOUT) {
3672 down(&ctrl_info->sync_request_sem);
3674 start_jiffies = jiffies;
3675 if (down_timeout(&ctrl_info->sync_request_sem,
3676 msecs_to_jiffies(timeout_msecs)))
3679 jiffies_to_msecs(jiffies - start_jiffies);
3680 if (msecs_blocked >= timeout_msecs)
3682 timeout_msecs -= msecs_blocked;
3686 pqi_ctrl_busy(ctrl_info);
3687 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3688 if (timeout_msecs == 0) {
3693 if (pqi_ctrl_offline(ctrl_info)) {
3698 io_request = pqi_alloc_io_request(ctrl_info);
3700 put_unaligned_le16(io_request->index,
3701 &(((struct pqi_raid_path_request *)request)->request_id));
3703 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3704 ((struct pqi_raid_path_request *)request)->error_index =
3705 ((struct pqi_raid_path_request *)request)->request_id;
3707 iu_length = get_unaligned_le16(&request->iu_length) +
3708 PQI_REQUEST_HEADER_LENGTH;
3709 memcpy(io_request->iu, request, iu_length);
3711 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3712 io_request, timeout_msecs);
3715 if (io_request->error_info)
3716 memcpy(error_info, io_request->error_info,
3717 sizeof(*error_info));
3719 memset(error_info, 0, sizeof(*error_info));
3720 } else if (rc == 0 && io_request->error_info) {
3722 struct pqi_raid_error_info *raid_error_info;
3724 raid_error_info = io_request->error_info;
3725 scsi_status = raid_error_info->status;
3727 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3728 raid_error_info->data_out_result ==
3729 PQI_DATA_IN_OUT_UNDERFLOW)
3730 scsi_status = SAM_STAT_GOOD;
3732 if (scsi_status != SAM_STAT_GOOD)
3736 pqi_free_io_request(io_request);
3739 pqi_ctrl_unbusy(ctrl_info);
3740 up(&ctrl_info->sync_request_sem);
3745 static int pqi_validate_admin_response(
3746 struct pqi_general_admin_response *response, u8 expected_function_code)
3748 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3751 if (get_unaligned_le16(&response->header.iu_length) !=
3752 PQI_GENERAL_ADMIN_IU_LENGTH)
3755 if (response->function_code != expected_function_code)
3758 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3764 static int pqi_submit_admin_request_synchronous(
3765 struct pqi_ctrl_info *ctrl_info,
3766 struct pqi_general_admin_request *request,
3767 struct pqi_general_admin_response *response)
3771 pqi_submit_admin_request(ctrl_info, request);
3773 rc = pqi_poll_for_admin_response(ctrl_info, response);
3776 rc = pqi_validate_admin_response(response,
3777 request->function_code);
3782 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3785 struct pqi_general_admin_request request;
3786 struct pqi_general_admin_response response;
3787 struct pqi_device_capability *capability;
3788 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3790 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3794 memset(&request, 0, sizeof(request));
3796 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3797 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3798 &request.header.iu_length);
3799 request.function_code =
3800 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3801 put_unaligned_le32(sizeof(*capability),
3802 &request.data.report_device_capability.buffer_length);
3804 rc = pqi_map_single(ctrl_info->pci_dev,
3805 &request.data.report_device_capability.sg_descriptor,
3806 capability, sizeof(*capability),
3807 PCI_DMA_FROMDEVICE);
3811 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3814 pqi_pci_unmap(ctrl_info->pci_dev,
3815 &request.data.report_device_capability.sg_descriptor, 1,
3816 PCI_DMA_FROMDEVICE);
3821 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3826 ctrl_info->max_inbound_queues =
3827 get_unaligned_le16(&capability->max_inbound_queues);
3828 ctrl_info->max_elements_per_iq =
3829 get_unaligned_le16(&capability->max_elements_per_iq);
3830 ctrl_info->max_iq_element_length =
3831 get_unaligned_le16(&capability->max_iq_element_length)
3833 ctrl_info->max_outbound_queues =
3834 get_unaligned_le16(&capability->max_outbound_queues);
3835 ctrl_info->max_elements_per_oq =
3836 get_unaligned_le16(&capability->max_elements_per_oq);
3837 ctrl_info->max_oq_element_length =
3838 get_unaligned_le16(&capability->max_oq_element_length)
3841 sop_iu_layer_descriptor =
3842 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3844 ctrl_info->max_inbound_iu_length_per_firmware =
3846 &sop_iu_layer_descriptor->max_inbound_iu_length);
3847 ctrl_info->inbound_spanning_supported =
3848 sop_iu_layer_descriptor->inbound_spanning_supported;
3849 ctrl_info->outbound_spanning_supported =
3850 sop_iu_layer_descriptor->outbound_spanning_supported;
3858 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3860 if (ctrl_info->max_iq_element_length <
3861 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3862 dev_err(&ctrl_info->pci_dev->dev,
3863 "max. inbound queue element length of %d is less than the required length of %d\n",
3864 ctrl_info->max_iq_element_length,
3865 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3869 if (ctrl_info->max_oq_element_length <
3870 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3871 dev_err(&ctrl_info->pci_dev->dev,
3872 "max. outbound queue element length of %d is less than the required length of %d\n",
3873 ctrl_info->max_oq_element_length,
3874 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3878 if (ctrl_info->max_inbound_iu_length_per_firmware <
3879 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3880 dev_err(&ctrl_info->pci_dev->dev,
3881 "max. inbound IU length of %u is less than the min. required length of %d\n",
3882 ctrl_info->max_inbound_iu_length_per_firmware,
3883 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3887 if (!ctrl_info->inbound_spanning_supported) {
3888 dev_err(&ctrl_info->pci_dev->dev,
3889 "the controller does not support inbound spanning\n");
3893 if (ctrl_info->outbound_spanning_supported) {
3894 dev_err(&ctrl_info->pci_dev->dev,
3895 "the controller supports outbound spanning but this driver does not\n");
3902 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3905 struct pqi_event_queue *event_queue;
3906 struct pqi_general_admin_request request;
3907 struct pqi_general_admin_response response;
3909 event_queue = &ctrl_info->event_queue;
3912 * Create OQ (Outbound Queue - device to host queue) to dedicate
3915 memset(&request, 0, sizeof(request));
3916 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3917 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3918 &request.header.iu_length);
3919 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3920 put_unaligned_le16(event_queue->oq_id,
3921 &request.data.create_operational_oq.queue_id);
3922 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3923 &request.data.create_operational_oq.element_array_addr);
3924 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3925 &request.data.create_operational_oq.pi_addr);
3926 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3927 &request.data.create_operational_oq.num_elements);
3928 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3929 &request.data.create_operational_oq.element_length);
3930 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3931 put_unaligned_le16(event_queue->int_msg_num,
3932 &request.data.create_operational_oq.int_msg_num);
3934 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3939 event_queue->oq_ci = ctrl_info->iomem_base +
3940 PQI_DEVICE_REGISTERS_OFFSET +
3942 &response.data.create_operational_oq.oq_ci_offset);
3947 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3948 unsigned int group_number)
3951 struct pqi_queue_group *queue_group;
3952 struct pqi_general_admin_request request;
3953 struct pqi_general_admin_response response;
3955 queue_group = &ctrl_info->queue_groups[group_number];
3958 * Create IQ (Inbound Queue - host to device queue) for
3961 memset(&request, 0, sizeof(request));
3962 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3963 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3964 &request.header.iu_length);
3965 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3966 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3967 &request.data.create_operational_iq.queue_id);
3969 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3970 &request.data.create_operational_iq.element_array_addr);
3971 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3972 &request.data.create_operational_iq.ci_addr);
3973 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3974 &request.data.create_operational_iq.num_elements);
3975 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3976 &request.data.create_operational_iq.element_length);
3977 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3979 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3982 dev_err(&ctrl_info->pci_dev->dev,
3983 "error creating inbound RAID queue\n");
3987 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3988 PQI_DEVICE_REGISTERS_OFFSET +
3990 &response.data.create_operational_iq.iq_pi_offset);
3993 * Create IQ (Inbound Queue - host to device queue) for
3994 * Advanced I/O (AIO) path.
3996 memset(&request, 0, sizeof(request));
3997 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3998 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3999 &request.header.iu_length);
4000 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4001 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4002 &request.data.create_operational_iq.queue_id);
4003 put_unaligned_le64((u64)queue_group->
4004 iq_element_array_bus_addr[AIO_PATH],
4005 &request.data.create_operational_iq.element_array_addr);
4006 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4007 &request.data.create_operational_iq.ci_addr);
4008 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4009 &request.data.create_operational_iq.num_elements);
4010 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4011 &request.data.create_operational_iq.element_length);
4012 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4014 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4017 dev_err(&ctrl_info->pci_dev->dev,
4018 "error creating inbound AIO queue\n");
4022 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4023 PQI_DEVICE_REGISTERS_OFFSET +
4025 &response.data.create_operational_iq.iq_pi_offset);
4028 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4029 * assumed to be for RAID path I/O unless we change the queue's
4032 memset(&request, 0, sizeof(request));
4033 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4034 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4035 &request.header.iu_length);
4036 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4037 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4038 &request.data.change_operational_iq_properties.queue_id);
4039 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4040 &request.data.change_operational_iq_properties.vendor_specific);
4042 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4045 dev_err(&ctrl_info->pci_dev->dev,
4046 "error changing queue property\n");
4051 * Create OQ (Outbound Queue - device to host queue).
4053 memset(&request, 0, sizeof(request));
4054 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4055 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4056 &request.header.iu_length);
4057 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4058 put_unaligned_le16(queue_group->oq_id,
4059 &request.data.create_operational_oq.queue_id);
4060 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4061 &request.data.create_operational_oq.element_array_addr);
4062 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4063 &request.data.create_operational_oq.pi_addr);
4064 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4065 &request.data.create_operational_oq.num_elements);
4066 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4067 &request.data.create_operational_oq.element_length);
4068 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4069 put_unaligned_le16(queue_group->int_msg_num,
4070 &request.data.create_operational_oq.int_msg_num);
4072 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4075 dev_err(&ctrl_info->pci_dev->dev,
4076 "error creating outbound queue\n");
4080 queue_group->oq_ci = ctrl_info->iomem_base +
4081 PQI_DEVICE_REGISTERS_OFFSET +
4083 &response.data.create_operational_oq.oq_ci_offset);
4088 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4093 rc = pqi_create_event_queue(ctrl_info);
4095 dev_err(&ctrl_info->pci_dev->dev,
4096 "error creating event queue\n");
4100 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4101 rc = pqi_create_queue_group(ctrl_info, i);
4103 dev_err(&ctrl_info->pci_dev->dev,
4104 "error creating queue group number %u/%u\n",
4105 i, ctrl_info->num_queue_groups);
4113 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4114 (offsetof(struct pqi_event_config, descriptors) + \
4115 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4117 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4122 struct pqi_event_config *event_config;
4123 struct pqi_event_descriptor *event_descriptor;
4124 struct pqi_general_management_request request;
4126 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4131 memset(&request, 0, sizeof(request));
4133 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4134 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4135 data.report_event_configuration.sg_descriptors[1]) -
4136 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4137 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4138 &request.data.report_event_configuration.buffer_length);
4140 rc = pqi_map_single(ctrl_info->pci_dev,
4141 request.data.report_event_configuration.sg_descriptors,
4142 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4143 PCI_DMA_FROMDEVICE);
4147 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4148 0, NULL, NO_TIMEOUT);
4150 pqi_pci_unmap(ctrl_info->pci_dev,
4151 request.data.report_event_configuration.sg_descriptors, 1,
4152 PCI_DMA_FROMDEVICE);
4157 for (i = 0; i < event_config->num_event_descriptors; i++) {
4158 event_descriptor = &event_config->descriptors[i];
4159 if (enable_events &&
4160 pqi_is_supported_event(event_descriptor->event_type))
4161 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4162 &event_descriptor->oq_id);
4164 put_unaligned_le16(0, &event_descriptor->oq_id);
4167 memset(&request, 0, sizeof(request));
4169 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4170 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4171 data.report_event_configuration.sg_descriptors[1]) -
4172 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4173 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4174 &request.data.report_event_configuration.buffer_length);
4176 rc = pqi_map_single(ctrl_info->pci_dev,
4177 request.data.report_event_configuration.sg_descriptors,
4178 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4183 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4186 pqi_pci_unmap(ctrl_info->pci_dev,
4187 request.data.report_event_configuration.sg_descriptors, 1,
4191 kfree(event_config);
4196 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4198 return pqi_configure_events(ctrl_info, true);
4201 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4203 return pqi_configure_events(ctrl_info, false);
4206 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4210 size_t sg_chain_buffer_length;
4211 struct pqi_io_request *io_request;
4213 if (!ctrl_info->io_request_pool)
4216 dev = &ctrl_info->pci_dev->dev;
4217 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4218 io_request = ctrl_info->io_request_pool;
4220 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4221 kfree(io_request->iu);
4222 if (!io_request->sg_chain_buffer)
4224 dma_free_coherent(dev, sg_chain_buffer_length,
4225 io_request->sg_chain_buffer,
4226 io_request->sg_chain_buffer_dma_handle);
4230 kfree(ctrl_info->io_request_pool);
4231 ctrl_info->io_request_pool = NULL;
4234 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4236 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4237 ctrl_info->error_buffer_length,
4238 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4240 if (!ctrl_info->error_buffer)
4246 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4249 void *sg_chain_buffer;
4250 size_t sg_chain_buffer_length;
4251 dma_addr_t sg_chain_buffer_dma_handle;
4253 struct pqi_io_request *io_request;
4255 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4256 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4258 if (!ctrl_info->io_request_pool) {
4259 dev_err(&ctrl_info->pci_dev->dev,
4260 "failed to allocate I/O request pool\n");
4264 dev = &ctrl_info->pci_dev->dev;
4265 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4266 io_request = ctrl_info->io_request_pool;
4268 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4270 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4272 if (!io_request->iu) {
4273 dev_err(&ctrl_info->pci_dev->dev,
4274 "failed to allocate IU buffers\n");
4278 sg_chain_buffer = dma_alloc_coherent(dev,
4279 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4282 if (!sg_chain_buffer) {
4283 dev_err(&ctrl_info->pci_dev->dev,
4284 "failed to allocate PQI scatter-gather chain buffers\n");
4288 io_request->index = i;
4289 io_request->sg_chain_buffer = sg_chain_buffer;
4290 io_request->sg_chain_buffer_dma_handle =
4291 sg_chain_buffer_dma_handle;
4298 pqi_free_all_io_requests(ctrl_info);
4304 * Calculate required resources that are sized based on max. outstanding
4305 * requests and max. transfer size.
4308 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4310 u32 max_transfer_size;
4313 ctrl_info->scsi_ml_can_queue =
4314 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4315 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4317 ctrl_info->error_buffer_length =
4318 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4321 max_transfer_size = min(ctrl_info->max_transfer_size,
4322 PQI_MAX_TRANSFER_SIZE_KDUMP);
4324 max_transfer_size = min(ctrl_info->max_transfer_size,
4325 PQI_MAX_TRANSFER_SIZE);
4327 max_sg_entries = max_transfer_size / PAGE_SIZE;
4329 /* +1 to cover when the buffer is not page-aligned. */
4332 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4334 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4336 ctrl_info->sg_chain_buffer_length =
4337 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4338 PQI_EXTRA_SGL_MEMORY;
4339 ctrl_info->sg_tablesize = max_sg_entries;
4340 ctrl_info->max_sectors = max_transfer_size / 512;
4343 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4345 int num_queue_groups;
4346 u16 num_elements_per_iq;
4347 u16 num_elements_per_oq;
4349 if (reset_devices) {
4350 num_queue_groups = 1;
4353 int max_queue_groups;
4355 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4356 ctrl_info->max_outbound_queues - 1);
4357 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4359 num_cpus = num_online_cpus();
4360 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4361 num_queue_groups = min(num_queue_groups, max_queue_groups);
4364 ctrl_info->num_queue_groups = num_queue_groups;
4365 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4368 * Make sure that the max. inbound IU length is an even multiple
4369 * of our inbound element length.
4371 ctrl_info->max_inbound_iu_length =
4372 (ctrl_info->max_inbound_iu_length_per_firmware /
4373 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4374 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4376 num_elements_per_iq =
4377 (ctrl_info->max_inbound_iu_length /
4378 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4380 /* Add one because one element in each queue is unusable. */
4381 num_elements_per_iq++;
4383 num_elements_per_iq = min(num_elements_per_iq,
4384 ctrl_info->max_elements_per_iq);
4386 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4387 num_elements_per_oq = min(num_elements_per_oq,
4388 ctrl_info->max_elements_per_oq);
4390 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4391 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4393 ctrl_info->max_sg_per_iu =
4394 ((ctrl_info->max_inbound_iu_length -
4395 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4396 sizeof(struct pqi_sg_descriptor)) +
4397 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4400 static inline void pqi_set_sg_descriptor(
4401 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4403 u64 address = (u64)sg_dma_address(sg);
4404 unsigned int length = sg_dma_len(sg);
4406 put_unaligned_le64(address, &sg_descriptor->address);
4407 put_unaligned_le32(length, &sg_descriptor->length);
4408 put_unaligned_le32(0, &sg_descriptor->flags);
4411 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4412 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4413 struct pqi_io_request *io_request)
4419 unsigned int num_sg_in_iu;
4420 unsigned int max_sg_per_iu;
4421 struct scatterlist *sg;
4422 struct pqi_sg_descriptor *sg_descriptor;
4424 sg_count = scsi_dma_map(scmd);
4428 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4429 PQI_REQUEST_HEADER_LENGTH;
4434 sg = scsi_sglist(scmd);
4435 sg_descriptor = request->sg_descriptors;
4436 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4442 pqi_set_sg_descriptor(sg_descriptor, sg);
4449 if (i == max_sg_per_iu) {
4451 (u64)io_request->sg_chain_buffer_dma_handle,
4452 &sg_descriptor->address);
4453 put_unaligned_le32((sg_count - num_sg_in_iu)
4454 * sizeof(*sg_descriptor),
4455 &sg_descriptor->length);
4456 put_unaligned_le32(CISS_SG_CHAIN,
4457 &sg_descriptor->flags);
4460 sg_descriptor = io_request->sg_chain_buffer;
4465 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4466 request->partial = chained;
4467 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4470 put_unaligned_le16(iu_length, &request->header.iu_length);
4475 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4476 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4477 struct pqi_io_request *io_request)
4483 unsigned int num_sg_in_iu;
4484 unsigned int max_sg_per_iu;
4485 struct scatterlist *sg;
4486 struct pqi_sg_descriptor *sg_descriptor;
4488 sg_count = scsi_dma_map(scmd);
4492 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4493 PQI_REQUEST_HEADER_LENGTH;
4499 sg = scsi_sglist(scmd);
4500 sg_descriptor = request->sg_descriptors;
4501 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4506 pqi_set_sg_descriptor(sg_descriptor, sg);
4513 if (i == max_sg_per_iu) {
4515 (u64)io_request->sg_chain_buffer_dma_handle,
4516 &sg_descriptor->address);
4517 put_unaligned_le32((sg_count - num_sg_in_iu)
4518 * sizeof(*sg_descriptor),
4519 &sg_descriptor->length);
4520 put_unaligned_le32(CISS_SG_CHAIN,
4521 &sg_descriptor->flags);
4524 sg_descriptor = io_request->sg_chain_buffer;
4529 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4530 request->partial = chained;
4531 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4534 put_unaligned_le16(iu_length, &request->header.iu_length);
4535 request->num_sg_descriptors = num_sg_in_iu;
4540 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4543 struct scsi_cmnd *scmd;
4545 scmd = io_request->scmd;
4546 pqi_free_io_request(io_request);
4547 scsi_dma_unmap(scmd);
4548 pqi_scsi_done(scmd);
4551 static int pqi_raid_submit_scsi_cmd_with_io_request(
4552 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
4553 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4554 struct pqi_queue_group *queue_group)
4558 struct pqi_raid_path_request *request;
4560 io_request->io_complete_callback = pqi_raid_io_complete;
4561 io_request->scmd = scmd;
4563 request = io_request->iu;
4565 offsetof(struct pqi_raid_path_request, sg_descriptors));
4567 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4568 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4569 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4570 put_unaligned_le16(io_request->index, &request->request_id);
4571 request->error_index = request->request_id;
4572 memcpy(request->lun_number, device->scsi3addr,
4573 sizeof(request->lun_number));
4575 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4576 memcpy(request->cdb, scmd->cmnd, cdb_length);
4578 switch (cdb_length) {
4583 /* No bytes in the Additional CDB bytes field */
4584 request->additional_cdb_bytes_usage =
4585 SOP_ADDITIONAL_CDB_BYTES_0;
4588 /* 4 bytes in the Additional cdb field */
4589 request->additional_cdb_bytes_usage =
4590 SOP_ADDITIONAL_CDB_BYTES_4;
4593 /* 8 bytes in the Additional cdb field */
4594 request->additional_cdb_bytes_usage =
4595 SOP_ADDITIONAL_CDB_BYTES_8;
4598 /* 12 bytes in the Additional cdb field */
4599 request->additional_cdb_bytes_usage =
4600 SOP_ADDITIONAL_CDB_BYTES_12;
4604 /* 16 bytes in the Additional cdb field */
4605 request->additional_cdb_bytes_usage =
4606 SOP_ADDITIONAL_CDB_BYTES_16;
4610 switch (scmd->sc_data_direction) {
4612 request->data_direction = SOP_READ_FLAG;
4614 case DMA_FROM_DEVICE:
4615 request->data_direction = SOP_WRITE_FLAG;
4618 request->data_direction = SOP_NO_DIRECTION_FLAG;
4620 case DMA_BIDIRECTIONAL:
4621 request->data_direction = SOP_BIDIRECTIONAL;
4624 dev_err(&ctrl_info->pci_dev->dev,
4625 "unknown data direction: %d\n",
4626 scmd->sc_data_direction);
4630 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4632 pqi_free_io_request(io_request);
4633 return SCSI_MLQUEUE_HOST_BUSY;
4636 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4641 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4642 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4643 struct pqi_queue_group *queue_group)
4645 struct pqi_io_request *io_request;
4647 io_request = pqi_alloc_io_request(ctrl_info);
4649 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4650 device, scmd, queue_group);
4653 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
4655 if (!pqi_ctrl_blocked(ctrl_info))
4656 schedule_work(&ctrl_info->raid_bypass_retry_work);
4659 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
4661 struct scsi_cmnd *scmd;
4662 struct pqi_scsi_dev *device;
4663 struct pqi_ctrl_info *ctrl_info;
4665 if (!io_request->raid_bypass)
4668 scmd = io_request->scmd;
4669 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
4671 if (host_byte(scmd->result) == DID_NO_CONNECT)
4674 device = scmd->device->hostdata;
4675 if (pqi_device_offline(device))
4678 ctrl_info = shost_to_hba(scmd->device->host);
4679 if (pqi_ctrl_offline(ctrl_info))
4685 static inline void pqi_add_to_raid_bypass_retry_list(
4686 struct pqi_ctrl_info *ctrl_info,
4687 struct pqi_io_request *io_request, bool at_head)
4689 unsigned long flags;
4691 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4693 list_add(&io_request->request_list_entry,
4694 &ctrl_info->raid_bypass_retry_list);
4696 list_add_tail(&io_request->request_list_entry,
4697 &ctrl_info->raid_bypass_retry_list);
4698 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4701 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
4704 struct scsi_cmnd *scmd;
4706 scmd = io_request->scmd;
4707 pqi_free_io_request(io_request);
4708 pqi_scsi_done(scmd);
4711 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
4713 struct scsi_cmnd *scmd;
4714 struct pqi_ctrl_info *ctrl_info;
4716 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
4717 scmd = io_request->scmd;
4719 ctrl_info = shost_to_hba(scmd->device->host);
4721 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
4722 pqi_schedule_bypass_retry(ctrl_info);
4725 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
4727 struct scsi_cmnd *scmd;
4728 struct pqi_scsi_dev *device;
4729 struct pqi_ctrl_info *ctrl_info;
4730 struct pqi_queue_group *queue_group;
4732 scmd = io_request->scmd;
4733 device = scmd->device->hostdata;
4734 if (pqi_device_in_reset(device)) {
4735 pqi_free_io_request(io_request);
4736 set_host_byte(scmd, DID_RESET);
4737 pqi_scsi_done(scmd);
4741 ctrl_info = shost_to_hba(scmd->device->host);
4742 queue_group = io_request->queue_group;
4744 pqi_reinit_io_request(io_request);
4746 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4747 device, scmd, queue_group);
4750 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
4751 struct pqi_ctrl_info *ctrl_info)
4753 unsigned long flags;
4754 struct pqi_io_request *io_request;
4756 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4757 io_request = list_first_entry_or_null(
4758 &ctrl_info->raid_bypass_retry_list,
4759 struct pqi_io_request, request_list_entry);
4761 list_del(&io_request->request_list_entry);
4762 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4767 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
4770 struct pqi_io_request *io_request;
4772 pqi_ctrl_busy(ctrl_info);
4775 if (pqi_ctrl_blocked(ctrl_info))
4777 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
4780 rc = pqi_retry_raid_bypass(io_request);
4782 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
4784 pqi_schedule_bypass_retry(ctrl_info);
4789 pqi_ctrl_unbusy(ctrl_info);
4792 static void pqi_raid_bypass_retry_worker(struct work_struct *work)
4794 struct pqi_ctrl_info *ctrl_info;
4796 ctrl_info = container_of(work, struct pqi_ctrl_info,
4797 raid_bypass_retry_work);
4798 pqi_retry_raid_bypass_requests(ctrl_info);
4801 static void pqi_clear_all_queued_raid_bypass_retries(
4802 struct pqi_ctrl_info *ctrl_info)
4804 unsigned long flags;
4806 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4807 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
4808 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4811 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4814 struct scsi_cmnd *scmd;
4816 scmd = io_request->scmd;
4817 scsi_dma_unmap(scmd);
4818 if (io_request->status == -EAGAIN)
4819 set_host_byte(scmd, DID_IMM_RETRY);
4820 else if (pqi_raid_bypass_retry_needed(io_request)) {
4821 pqi_queue_raid_bypass_retry(io_request);
4824 pqi_free_io_request(io_request);
4825 pqi_scsi_done(scmd);
4828 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4829 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4830 struct pqi_queue_group *queue_group)
4832 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4833 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
4836 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4837 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4838 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4839 struct pqi_encryption_info *encryption_info, bool raid_bypass)
4842 struct pqi_io_request *io_request;
4843 struct pqi_aio_path_request *request;
4845 io_request = pqi_alloc_io_request(ctrl_info);
4846 io_request->io_complete_callback = pqi_aio_io_complete;
4847 io_request->scmd = scmd;
4848 io_request->raid_bypass = raid_bypass;
4850 request = io_request->iu;
4852 offsetof(struct pqi_raid_path_request, sg_descriptors));
4854 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4855 put_unaligned_le32(aio_handle, &request->nexus_id);
4856 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4857 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4858 put_unaligned_le16(io_request->index, &request->request_id);
4859 request->error_index = request->request_id;
4860 if (cdb_length > sizeof(request->cdb))
4861 cdb_length = sizeof(request->cdb);
4862 request->cdb_length = cdb_length;
4863 memcpy(request->cdb, cdb, cdb_length);
4865 switch (scmd->sc_data_direction) {
4867 request->data_direction = SOP_READ_FLAG;
4869 case DMA_FROM_DEVICE:
4870 request->data_direction = SOP_WRITE_FLAG;
4873 request->data_direction = SOP_NO_DIRECTION_FLAG;
4875 case DMA_BIDIRECTIONAL:
4876 request->data_direction = SOP_BIDIRECTIONAL;
4879 dev_err(&ctrl_info->pci_dev->dev,
4880 "unknown data direction: %d\n",
4881 scmd->sc_data_direction);
4885 if (encryption_info) {
4886 request->encryption_enable = true;
4887 put_unaligned_le16(encryption_info->data_encryption_key_index,
4888 &request->data_encryption_key_index);
4889 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4890 &request->encrypt_tweak_lower);
4891 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4892 &request->encrypt_tweak_upper);
4895 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4897 pqi_free_io_request(io_request);
4898 return SCSI_MLQUEUE_HOST_BUSY;
4901 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4906 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4907 struct scsi_cmnd *scmd)
4911 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4912 if (hw_queue > ctrl_info->max_hw_queue_index)
4919 * This function gets called just before we hand the completed SCSI request
4923 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4925 struct pqi_scsi_dev *device;
4927 device = scmd->device->hostdata;
4928 atomic_dec(&device->scsi_cmds_outstanding);
4931 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4932 struct scsi_cmnd *scmd)
4935 struct pqi_ctrl_info *ctrl_info;
4936 struct pqi_scsi_dev *device;
4938 struct pqi_queue_group *queue_group;
4941 device = scmd->device->hostdata;
4942 ctrl_info = shost_to_hba(shost);
4944 atomic_inc(&device->scsi_cmds_outstanding);
4946 if (pqi_ctrl_offline(ctrl_info)) {
4947 set_host_byte(scmd, DID_NO_CONNECT);
4948 pqi_scsi_done(scmd);
4952 pqi_ctrl_busy(ctrl_info);
4953 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4954 rc = SCSI_MLQUEUE_HOST_BUSY;
4959 * This is necessary because the SML doesn't zero out this field during
4964 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
4965 queue_group = &ctrl_info->queue_groups[hw_queue];
4967 if (pqi_is_logical_device(device)) {
4968 raid_bypassed = false;
4969 if (device->raid_bypass_enabled &&
4970 !blk_rq_is_passthrough(scmd->request)) {
4971 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4973 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
4974 raid_bypassed = true;
4977 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4980 if (device->aio_enabled)
4981 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4984 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4989 pqi_ctrl_unbusy(ctrl_info);
4991 atomic_dec(&device->scsi_cmds_outstanding);
4996 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
4997 struct pqi_queue_group *queue_group)
5000 unsigned long flags;
5003 for (path = 0; path < 2; path++) {
5006 &queue_group->submit_lock[path], flags);
5008 list_empty(&queue_group->request_list[path]);
5009 spin_unlock_irqrestore(
5010 &queue_group->submit_lock[path], flags);
5013 pqi_check_ctrl_health(ctrl_info);
5014 if (pqi_ctrl_offline(ctrl_info))
5016 usleep_range(1000, 2000);
5023 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5028 struct pqi_queue_group *queue_group;
5032 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5033 queue_group = &ctrl_info->queue_groups[i];
5035 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5039 for (path = 0; path < 2; path++) {
5040 iq_pi = queue_group->iq_pi_copy[path];
5043 iq_ci = *queue_group->iq_ci[path];
5046 pqi_check_ctrl_health(ctrl_info);
5047 if (pqi_ctrl_offline(ctrl_info))
5049 usleep_range(1000, 2000);
5057 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5058 struct pqi_scsi_dev *device)
5062 struct pqi_queue_group *queue_group;
5063 unsigned long flags;
5064 struct pqi_io_request *io_request;
5065 struct pqi_io_request *next;
5066 struct scsi_cmnd *scmd;
5067 struct pqi_scsi_dev *scsi_device;
5069 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5070 queue_group = &ctrl_info->queue_groups[i];
5072 for (path = 0; path < 2; path++) {
5074 &queue_group->submit_lock[path], flags);
5076 list_for_each_entry_safe(io_request, next,
5077 &queue_group->request_list[path],
5078 request_list_entry) {
5079 scmd = io_request->scmd;
5083 scsi_device = scmd->device->hostdata;
5084 if (scsi_device != device)
5087 list_del(&io_request->request_list_entry);
5088 set_host_byte(scmd, DID_RESET);
5089 pqi_scsi_done(scmd);
5092 spin_unlock_irqrestore(
5093 &queue_group->submit_lock[path], flags);
5098 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5099 struct pqi_scsi_dev *device)
5101 while (atomic_read(&device->scsi_cmds_outstanding)) {
5102 pqi_check_ctrl_health(ctrl_info);
5103 if (pqi_ctrl_offline(ctrl_info))
5105 usleep_range(1000, 2000);
5111 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
5114 unsigned long flags;
5115 struct pqi_scsi_dev *device;
5120 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5121 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5122 scsi_device_list_entry) {
5123 if (atomic_read(&device->scsi_cmds_outstanding)) {
5128 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5134 pqi_check_ctrl_health(ctrl_info);
5135 if (pqi_ctrl_offline(ctrl_info))
5138 usleep_range(1000, 2000);
5144 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5147 struct completion *waiting = context;
5152 #define PQI_LUN_RESET_TIMEOUT_SECS 10
5154 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5155 struct pqi_scsi_dev *device, struct completion *wait)
5160 if (wait_for_completion_io_timeout(wait,
5161 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
5166 pqi_check_ctrl_health(ctrl_info);
5167 if (pqi_ctrl_offline(ctrl_info)) {
5176 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
5177 struct pqi_scsi_dev *device)
5180 struct pqi_io_request *io_request;
5181 DECLARE_COMPLETION_ONSTACK(wait);
5182 struct pqi_task_management_request *request;
5184 io_request = pqi_alloc_io_request(ctrl_info);
5185 io_request->io_complete_callback = pqi_lun_reset_complete;
5186 io_request->context = &wait;
5188 request = io_request->iu;
5189 memset(request, 0, sizeof(*request));
5191 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5192 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5193 &request->header.iu_length);
5194 put_unaligned_le16(io_request->index, &request->request_id);
5195 memcpy(request->lun_number, device->scsi3addr,
5196 sizeof(request->lun_number));
5197 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5199 pqi_start_io(ctrl_info,
5200 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5203 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5205 rc = io_request->status;
5207 pqi_free_io_request(io_request);
5212 /* Performs a reset at the LUN level. */
5214 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5215 struct pqi_scsi_dev *device)
5219 rc = pqi_lun_reset(ctrl_info, device);
5221 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
5223 return rc == 0 ? SUCCESS : FAILED;
5226 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5229 struct Scsi_Host *shost;
5230 struct pqi_ctrl_info *ctrl_info;
5231 struct pqi_scsi_dev *device;
5233 shost = scmd->device->host;
5234 ctrl_info = shost_to_hba(shost);
5235 device = scmd->device->hostdata;
5237 dev_err(&ctrl_info->pci_dev->dev,
5238 "resetting scsi %d:%d:%d:%d\n",
5239 shost->host_no, device->bus, device->target, device->lun);
5241 pqi_check_ctrl_health(ctrl_info);
5242 if (pqi_ctrl_offline(ctrl_info)) {
5247 mutex_lock(&ctrl_info->lun_reset_mutex);
5249 pqi_ctrl_block_requests(ctrl_info);
5250 pqi_ctrl_wait_until_quiesced(ctrl_info);
5251 pqi_fail_io_queued_for_device(ctrl_info, device);
5252 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5253 pqi_device_reset_start(device);
5254 pqi_ctrl_unblock_requests(ctrl_info);
5259 rc = pqi_device_reset(ctrl_info, device);
5261 pqi_device_reset_done(device);
5263 mutex_unlock(&ctrl_info->lun_reset_mutex);
5266 dev_err(&ctrl_info->pci_dev->dev,
5267 "reset of scsi %d:%d:%d:%d: %s\n",
5268 shost->host_no, device->bus, device->target, device->lun,
5269 rc == SUCCESS ? "SUCCESS" : "FAILED");
5274 static int pqi_slave_alloc(struct scsi_device *sdev)
5276 struct pqi_scsi_dev *device;
5277 unsigned long flags;
5278 struct pqi_ctrl_info *ctrl_info;
5279 struct scsi_target *starget;
5280 struct sas_rphy *rphy;
5282 ctrl_info = shost_to_hba(sdev->host);
5284 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5286 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5287 starget = scsi_target(sdev);
5288 rphy = target_to_rphy(starget);
5289 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5291 device->target = sdev_id(sdev);
5292 device->lun = sdev->lun;
5293 device->target_lun_valid = true;
5296 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5297 sdev_id(sdev), sdev->lun);
5301 sdev->hostdata = device;
5302 device->sdev = sdev;
5303 if (device->queue_depth) {
5304 device->advertised_queue_depth = device->queue_depth;
5305 scsi_change_queue_depth(sdev,
5306 device->advertised_queue_depth);
5310 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5315 static int pqi_map_queues(struct Scsi_Host *shost)
5317 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5319 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
5322 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5325 struct pci_dev *pci_dev;
5326 u32 subsystem_vendor;
5327 u32 subsystem_device;
5328 cciss_pci_info_struct pciinfo;
5333 pci_dev = ctrl_info->pci_dev;
5335 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5336 pciinfo.bus = pci_dev->bus->number;
5337 pciinfo.dev_fn = pci_dev->devfn;
5338 subsystem_vendor = pci_dev->subsystem_vendor;
5339 subsystem_device = pci_dev->subsystem_device;
5340 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5343 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5349 static int pqi_getdrivver_ioctl(void __user *arg)
5356 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5357 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5359 if (copy_to_user(arg, &version, sizeof(version)))
5365 struct ciss_error_info {
5368 size_t sense_data_length;
5371 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5372 struct ciss_error_info *ciss_error_info)
5374 int ciss_cmd_status;
5375 size_t sense_data_length;
5377 switch (pqi_error_info->data_out_result) {
5378 case PQI_DATA_IN_OUT_GOOD:
5379 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5381 case PQI_DATA_IN_OUT_UNDERFLOW:
5382 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5384 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5385 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5387 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5388 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5389 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5390 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5391 case PQI_DATA_IN_OUT_ERROR:
5392 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5394 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5395 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5396 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5397 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5398 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5399 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5400 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5401 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5402 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5403 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5404 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5406 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5407 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5409 case PQI_DATA_IN_OUT_ABORTED:
5410 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5412 case PQI_DATA_IN_OUT_TIMEOUT:
5413 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5416 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5421 get_unaligned_le16(&pqi_error_info->sense_data_length);
5422 if (sense_data_length == 0)
5424 get_unaligned_le16(&pqi_error_info->response_data_length);
5425 if (sense_data_length)
5426 if (sense_data_length > sizeof(pqi_error_info->data))
5427 sense_data_length = sizeof(pqi_error_info->data);
5429 ciss_error_info->scsi_status = pqi_error_info->status;
5430 ciss_error_info->command_status = ciss_cmd_status;
5431 ciss_error_info->sense_data_length = sense_data_length;
5434 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5437 char *kernel_buffer = NULL;
5439 size_t sense_data_length;
5440 IOCTL_Command_struct iocommand;
5441 struct pqi_raid_path_request request;
5442 struct pqi_raid_error_info pqi_error_info;
5443 struct ciss_error_info ciss_error_info;
5445 if (pqi_ctrl_offline(ctrl_info))
5449 if (!capable(CAP_SYS_RAWIO))
5451 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5453 if (iocommand.buf_size < 1 &&
5454 iocommand.Request.Type.Direction != XFER_NONE)
5456 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5458 if (iocommand.Request.Type.Type != TYPE_CMD)
5461 switch (iocommand.Request.Type.Direction) {
5465 case XFER_READ | XFER_WRITE:
5471 if (iocommand.buf_size > 0) {
5472 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5475 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5476 if (copy_from_user(kernel_buffer, iocommand.buf,
5477 iocommand.buf_size)) {
5482 memset(kernel_buffer, 0, iocommand.buf_size);
5486 memset(&request, 0, sizeof(request));
5488 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5489 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5490 PQI_REQUEST_HEADER_LENGTH;
5491 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5492 sizeof(request.lun_number));
5493 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5494 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5496 switch (iocommand.Request.Type.Direction) {
5498 request.data_direction = SOP_NO_DIRECTION_FLAG;
5501 request.data_direction = SOP_WRITE_FLAG;
5504 request.data_direction = SOP_READ_FLAG;
5506 case XFER_READ | XFER_WRITE:
5507 request.data_direction = SOP_BIDIRECTIONAL;
5511 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5513 if (iocommand.buf_size > 0) {
5514 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5516 rc = pqi_map_single(ctrl_info->pci_dev,
5517 &request.sg_descriptors[0], kernel_buffer,
5518 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5522 iu_length += sizeof(request.sg_descriptors[0]);
5525 put_unaligned_le16(iu_length, &request.header.iu_length);
5527 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5528 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5530 if (iocommand.buf_size > 0)
5531 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5532 PCI_DMA_BIDIRECTIONAL);
5534 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5537 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5538 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5539 iocommand.error_info.CommandStatus =
5540 ciss_error_info.command_status;
5541 sense_data_length = ciss_error_info.sense_data_length;
5542 if (sense_data_length) {
5543 if (sense_data_length >
5544 sizeof(iocommand.error_info.SenseInfo))
5546 sizeof(iocommand.error_info.SenseInfo);
5547 memcpy(iocommand.error_info.SenseInfo,
5548 pqi_error_info.data, sense_data_length);
5549 iocommand.error_info.SenseLen = sense_data_length;
5553 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5558 if (rc == 0 && iocommand.buf_size > 0 &&
5559 (iocommand.Request.Type.Direction & XFER_READ)) {
5560 if (copy_to_user(iocommand.buf, kernel_buffer,
5561 iocommand.buf_size)) {
5567 kfree(kernel_buffer);
5572 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5575 struct pqi_ctrl_info *ctrl_info;
5577 ctrl_info = shost_to_hba(sdev->host);
5580 case CCISS_DEREGDISK:
5581 case CCISS_REGNEWDISK:
5583 rc = pqi_scan_scsi_devices(ctrl_info);
5585 case CCISS_GETPCIINFO:
5586 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5588 case CCISS_GETDRIVVER:
5589 rc = pqi_getdrivver_ioctl(arg);
5591 case CCISS_PASSTHRU:
5592 rc = pqi_passthru_ioctl(ctrl_info, arg);
5602 static ssize_t pqi_version_show(struct device *dev,
5603 struct device_attribute *attr, char *buffer)
5606 struct Scsi_Host *shost;
5607 struct pqi_ctrl_info *ctrl_info;
5609 shost = class_to_shost(dev);
5610 ctrl_info = shost_to_hba(shost);
5612 count += snprintf(buffer + count, PAGE_SIZE - count,
5613 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5615 count += snprintf(buffer + count, PAGE_SIZE - count,
5616 "firmware: %s\n", ctrl_info->firmware_version);
5621 static ssize_t pqi_host_rescan_store(struct device *dev,
5622 struct device_attribute *attr, const char *buffer, size_t count)
5624 struct Scsi_Host *shost = class_to_shost(dev);
5626 pqi_scan_start(shost);
5631 static ssize_t pqi_lockup_action_show(struct device *dev,
5632 struct device_attribute *attr, char *buffer)
5637 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5638 if (pqi_lockup_actions[i].action == pqi_lockup_action)
5639 count += snprintf(buffer + count, PAGE_SIZE - count,
5640 "[%s] ", pqi_lockup_actions[i].name);
5642 count += snprintf(buffer + count, PAGE_SIZE - count,
5643 "%s ", pqi_lockup_actions[i].name);
5646 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
5651 static ssize_t pqi_lockup_action_store(struct device *dev,
5652 struct device_attribute *attr, const char *buffer, size_t count)
5656 char action_name_buffer[32];
5658 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
5659 action_name = strstrip(action_name_buffer);
5661 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5662 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
5663 pqi_lockup_action = pqi_lockup_actions[i].action;
5671 static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5672 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
5673 static DEVICE_ATTR(lockup_action, 0644,
5674 pqi_lockup_action_show, pqi_lockup_action_store);
5676 static struct device_attribute *pqi_shost_attrs[] = {
5679 &dev_attr_lockup_action,
5683 static ssize_t pqi_sas_address_show(struct device *dev,
5684 struct device_attribute *attr, char *buffer)
5686 struct pqi_ctrl_info *ctrl_info;
5687 struct scsi_device *sdev;
5688 struct pqi_scsi_dev *device;
5689 unsigned long flags;
5692 sdev = to_scsi_device(dev);
5693 ctrl_info = shost_to_hba(sdev->host);
5695 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5697 device = sdev->hostdata;
5698 if (pqi_is_logical_device(device)) {
5699 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5703 sas_address = device->sas_address;
5705 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5707 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5710 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5711 struct device_attribute *attr, char *buffer)
5713 struct pqi_ctrl_info *ctrl_info;
5714 struct scsi_device *sdev;
5715 struct pqi_scsi_dev *device;
5716 unsigned long flags;
5718 sdev = to_scsi_device(dev);
5719 ctrl_info = shost_to_hba(sdev->host);
5721 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5723 device = sdev->hostdata;
5724 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
5728 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5733 static ssize_t pqi_raid_level_show(struct device *dev,
5734 struct device_attribute *attr, char *buffer)
5736 struct pqi_ctrl_info *ctrl_info;
5737 struct scsi_device *sdev;
5738 struct pqi_scsi_dev *device;
5739 unsigned long flags;
5742 sdev = to_scsi_device(dev);
5743 ctrl_info = shost_to_hba(sdev->host);
5745 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5747 device = sdev->hostdata;
5749 if (pqi_is_logical_device(device))
5750 raid_level = pqi_raid_level_to_string(device->raid_level);
5754 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5756 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
5759 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5760 static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
5761 pqi_ssd_smart_path_enabled_show, NULL);
5762 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
5764 static struct device_attribute *pqi_sdev_attrs[] = {
5765 &dev_attr_sas_address,
5766 &dev_attr_ssd_smart_path_enabled,
5767 &dev_attr_raid_level,
5771 static struct scsi_host_template pqi_driver_template = {
5772 .module = THIS_MODULE,
5773 .name = DRIVER_NAME_SHORT,
5774 .proc_name = DRIVER_NAME_SHORT,
5775 .queuecommand = pqi_scsi_queue_command,
5776 .scan_start = pqi_scan_start,
5777 .scan_finished = pqi_scan_finished,
5779 .use_clustering = ENABLE_CLUSTERING,
5780 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5782 .slave_alloc = pqi_slave_alloc,
5783 .map_queues = pqi_map_queues,
5784 .sdev_attrs = pqi_sdev_attrs,
5785 .shost_attrs = pqi_shost_attrs,
5788 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5791 struct Scsi_Host *shost;
5793 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5795 dev_err(&ctrl_info->pci_dev->dev,
5796 "scsi_host_alloc failed for controller %u\n",
5797 ctrl_info->ctrl_id);
5802 shost->n_io_port = 0;
5803 shost->this_id = -1;
5804 shost->max_channel = PQI_MAX_BUS;
5805 shost->max_cmd_len = MAX_COMMAND_SIZE;
5806 shost->max_lun = ~0;
5808 shost->max_sectors = ctrl_info->max_sectors;
5809 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5810 shost->cmd_per_lun = shost->can_queue;
5811 shost->sg_tablesize = ctrl_info->sg_tablesize;
5812 shost->transportt = pqi_sas_transport_template;
5813 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
5814 shost->unique_id = shost->irq;
5815 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5816 shost->hostdata[0] = (unsigned long)ctrl_info;
5818 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5820 dev_err(&ctrl_info->pci_dev->dev,
5821 "scsi_add_host failed for controller %u\n",
5822 ctrl_info->ctrl_id);
5826 rc = pqi_add_sas_host(shost, ctrl_info);
5828 dev_err(&ctrl_info->pci_dev->dev,
5829 "add SAS host failed for controller %u\n",
5830 ctrl_info->ctrl_id);
5834 ctrl_info->scsi_host = shost;
5839 scsi_remove_host(shost);
5841 scsi_host_put(shost);
5846 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5848 struct Scsi_Host *shost;
5850 pqi_delete_sas_host(ctrl_info);
5852 shost = ctrl_info->scsi_host;
5856 scsi_remove_host(shost);
5857 scsi_host_put(shost);
5860 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
5863 struct pqi_device_registers __iomem *pqi_registers;
5864 unsigned long timeout;
5865 unsigned int timeout_msecs;
5866 union pqi_reset_register reset_reg;
5868 pqi_registers = ctrl_info->pqi_registers;
5869 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
5870 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
5873 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
5874 reset_reg.all_bits = readl(&pqi_registers->device_reset);
5875 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
5877 pqi_check_ctrl_health(ctrl_info);
5878 if (pqi_ctrl_offline(ctrl_info)) {
5882 if (time_after(jiffies, timeout)) {
5891 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5894 union pqi_reset_register reset_reg;
5896 if (ctrl_info->pqi_reset_quiesce_supported) {
5897 rc = sis_pqi_reset_quiesce(ctrl_info);
5899 dev_err(&ctrl_info->pci_dev->dev,
5900 "PQI reset failed during quiesce with error %d\n",
5906 reset_reg.all_bits = 0;
5907 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
5908 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
5910 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
5912 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
5914 dev_err(&ctrl_info->pci_dev->dev,
5915 "PQI reset failed with error %d\n", rc);
5920 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5923 struct bmic_identify_controller *identify;
5925 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5929 rc = pqi_identify_controller(ctrl_info, identify);
5933 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5934 sizeof(identify->firmware_version));
5935 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5936 snprintf(ctrl_info->firmware_version +
5937 strlen(ctrl_info->firmware_version),
5938 sizeof(ctrl_info->firmware_version),
5939 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5947 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5951 void __iomem *table_iomem_addr;
5952 struct pqi_config_table *config_table;
5953 struct pqi_config_table_section_header *section;
5955 table_length = ctrl_info->config_table_length;
5957 config_table = kmalloc(table_length, GFP_KERNEL);
5958 if (!config_table) {
5959 dev_err(&ctrl_info->pci_dev->dev,
5960 "failed to allocate memory for PQI configuration table\n");
5965 * Copy the config table contents from I/O memory space into the
5968 table_iomem_addr = ctrl_info->iomem_base +
5969 ctrl_info->config_table_offset;
5970 memcpy_fromio(config_table, table_iomem_addr, table_length);
5973 get_unaligned_le32(&config_table->first_section_offset);
5975 while (section_offset) {
5976 section = (void *)config_table + section_offset;
5978 switch (get_unaligned_le16(§ion->section_id)) {
5979 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5980 if (pqi_disable_heartbeat)
5981 dev_warn(&ctrl_info->pci_dev->dev,
5982 "heartbeat disabled by module parameter\n");
5984 ctrl_info->heartbeat_counter =
5988 struct pqi_config_table_heartbeat,
5994 get_unaligned_le16(§ion->next_section_offset);
5997 kfree(config_table);
6002 /* Switches the controller from PQI mode back into SIS mode. */
6004 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
6008 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
6009 rc = pqi_reset(ctrl_info);
6012 rc = sis_reenable_sis_mode(ctrl_info);
6014 dev_err(&ctrl_info->pci_dev->dev,
6015 "re-enabling SIS mode failed with error %d\n", rc);
6018 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6024 * If the controller isn't already in SIS mode, this function forces it into
6028 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
6030 if (!sis_is_firmware_running(ctrl_info))
6033 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
6036 if (sis_is_kernel_up(ctrl_info)) {
6037 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6041 return pqi_revert_to_sis_mode(ctrl_info);
6044 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6048 rc = pqi_force_sis_mode(ctrl_info);
6053 * Wait until the controller is ready to start accepting SIS
6056 rc = sis_wait_for_ctrl_ready(ctrl_info);
6061 * Get the controller properties. This allows us to determine
6062 * whether or not it supports PQI mode.
6064 rc = sis_get_ctrl_properties(ctrl_info);
6066 dev_err(&ctrl_info->pci_dev->dev,
6067 "error obtaining controller properties\n");
6071 rc = sis_get_pqi_capabilities(ctrl_info);
6073 dev_err(&ctrl_info->pci_dev->dev,
6074 "error obtaining controller capabilities\n");
6078 if (reset_devices) {
6079 if (ctrl_info->max_outstanding_requests >
6080 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
6081 ctrl_info->max_outstanding_requests =
6082 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
6084 if (ctrl_info->max_outstanding_requests >
6085 PQI_MAX_OUTSTANDING_REQUESTS)
6086 ctrl_info->max_outstanding_requests =
6087 PQI_MAX_OUTSTANDING_REQUESTS;
6090 pqi_calculate_io_resources(ctrl_info);
6092 rc = pqi_alloc_error_buffer(ctrl_info);
6094 dev_err(&ctrl_info->pci_dev->dev,
6095 "failed to allocate PQI error buffer\n");
6100 * If the function we are about to call succeeds, the
6101 * controller will transition from legacy SIS mode
6104 rc = sis_init_base_struct_addr(ctrl_info);
6106 dev_err(&ctrl_info->pci_dev->dev,
6107 "error initializing PQI mode\n");
6111 /* Wait for the controller to complete the SIS -> PQI transition. */
6112 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6114 dev_err(&ctrl_info->pci_dev->dev,
6115 "transition to PQI mode failed\n");
6119 /* From here on, we are running in PQI mode. */
6120 ctrl_info->pqi_mode_enabled = true;
6121 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6123 rc = pqi_process_config_table(ctrl_info);
6127 rc = pqi_alloc_admin_queues(ctrl_info);
6129 dev_err(&ctrl_info->pci_dev->dev,
6130 "failed to allocate admin queues\n");
6134 rc = pqi_create_admin_queues(ctrl_info);
6136 dev_err(&ctrl_info->pci_dev->dev,
6137 "error creating admin queues\n");
6141 rc = pqi_report_device_capability(ctrl_info);
6143 dev_err(&ctrl_info->pci_dev->dev,
6144 "obtaining device capability failed\n");
6148 rc = pqi_validate_device_capability(ctrl_info);
6152 pqi_calculate_queue_resources(ctrl_info);
6154 rc = pqi_enable_msix_interrupts(ctrl_info);
6158 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
6159 ctrl_info->max_msix_vectors =
6160 ctrl_info->num_msix_vectors_enabled;
6161 pqi_calculate_queue_resources(ctrl_info);
6164 rc = pqi_alloc_io_resources(ctrl_info);
6168 rc = pqi_alloc_operational_queues(ctrl_info);
6170 dev_err(&ctrl_info->pci_dev->dev,
6171 "failed to allocate operational queues\n");
6175 pqi_init_operational_queues(ctrl_info);
6177 rc = pqi_request_irqs(ctrl_info);
6181 rc = pqi_create_queues(ctrl_info);
6185 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6187 ctrl_info->controller_online = true;
6188 pqi_start_heartbeat_timer(ctrl_info);
6190 rc = pqi_enable_events(ctrl_info);
6192 dev_err(&ctrl_info->pci_dev->dev,
6193 "error enabling events\n");
6197 /* Register with the SCSI subsystem. */
6198 rc = pqi_register_scsi(ctrl_info);
6202 rc = pqi_get_ctrl_firmware_version(ctrl_info);
6204 dev_err(&ctrl_info->pci_dev->dev,
6205 "error obtaining firmware version\n");
6209 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6211 dev_err(&ctrl_info->pci_dev->dev,
6212 "error updating host wellness\n");
6216 pqi_schedule_update_time_worker(ctrl_info);
6218 pqi_scan_scsi_devices(ctrl_info);
6223 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
6226 struct pqi_admin_queues *admin_queues;
6227 struct pqi_event_queue *event_queue;
6229 admin_queues = &ctrl_info->admin_queues;
6230 admin_queues->iq_pi_copy = 0;
6231 admin_queues->oq_ci_copy = 0;
6232 *admin_queues->oq_pi = 0;
6234 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6235 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
6236 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
6237 ctrl_info->queue_groups[i].oq_ci_copy = 0;
6239 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
6240 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
6241 *ctrl_info->queue_groups[i].oq_pi = 0;
6244 event_queue = &ctrl_info->event_queue;
6245 *event_queue->oq_pi = 0;
6246 event_queue->oq_ci_copy = 0;
6249 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6253 rc = pqi_force_sis_mode(ctrl_info);
6258 * Wait until the controller is ready to start accepting SIS
6261 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
6266 * If the function we are about to call succeeds, the
6267 * controller will transition from legacy SIS mode
6270 rc = sis_init_base_struct_addr(ctrl_info);
6272 dev_err(&ctrl_info->pci_dev->dev,
6273 "error initializing PQI mode\n");
6277 /* Wait for the controller to complete the SIS -> PQI transition. */
6278 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6280 dev_err(&ctrl_info->pci_dev->dev,
6281 "transition to PQI mode failed\n");
6285 /* From here on, we are running in PQI mode. */
6286 ctrl_info->pqi_mode_enabled = true;
6287 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6289 pqi_reinit_queues(ctrl_info);
6291 rc = pqi_create_admin_queues(ctrl_info);
6293 dev_err(&ctrl_info->pci_dev->dev,
6294 "error creating admin queues\n");
6298 rc = pqi_create_queues(ctrl_info);
6302 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6304 ctrl_info->controller_online = true;
6305 pqi_start_heartbeat_timer(ctrl_info);
6306 pqi_ctrl_unblock_requests(ctrl_info);
6308 rc = pqi_enable_events(ctrl_info);
6310 dev_err(&ctrl_info->pci_dev->dev,
6311 "error enabling events\n");
6315 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6317 dev_err(&ctrl_info->pci_dev->dev,
6318 "error updating host wellness\n");
6322 pqi_schedule_update_time_worker(ctrl_info);
6324 pqi_scan_scsi_devices(ctrl_info);
6329 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6332 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6333 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6336 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6341 rc = pci_enable_device(ctrl_info->pci_dev);
6343 dev_err(&ctrl_info->pci_dev->dev,
6344 "failed to enable PCI device\n");
6348 if (sizeof(dma_addr_t) > 4)
6349 mask = DMA_BIT_MASK(64);
6351 mask = DMA_BIT_MASK(32);
6353 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6355 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6356 goto disable_device;
6359 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6361 dev_err(&ctrl_info->pci_dev->dev,
6362 "failed to obtain PCI resources\n");
6363 goto disable_device;
6366 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6367 ctrl_info->pci_dev, 0),
6368 sizeof(struct pqi_ctrl_registers));
6369 if (!ctrl_info->iomem_base) {
6370 dev_err(&ctrl_info->pci_dev->dev,
6371 "failed to map memory for controller registers\n");
6373 goto release_regions;
6376 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6378 /* Increase the PCIe completion timeout. */
6379 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6380 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6382 dev_err(&ctrl_info->pci_dev->dev,
6383 "failed to set PCIe completion timeout\n");
6384 goto release_regions;
6387 /* Enable bus mastering. */
6388 pci_set_master(ctrl_info->pci_dev);
6390 ctrl_info->registers = ctrl_info->iomem_base;
6391 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6393 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6398 pci_release_regions(ctrl_info->pci_dev);
6400 pci_disable_device(ctrl_info->pci_dev);
6405 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6407 iounmap(ctrl_info->iomem_base);
6408 pci_release_regions(ctrl_info->pci_dev);
6409 if (pci_is_enabled(ctrl_info->pci_dev))
6410 pci_disable_device(ctrl_info->pci_dev);
6411 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6414 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6416 struct pqi_ctrl_info *ctrl_info;
6418 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6419 GFP_KERNEL, numa_node);
6423 mutex_init(&ctrl_info->scan_mutex);
6424 mutex_init(&ctrl_info->lun_reset_mutex);
6426 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6427 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6429 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6430 atomic_set(&ctrl_info->num_interrupts, 0);
6432 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6433 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6435 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
6436 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
6438 sema_init(&ctrl_info->sync_request_sem,
6439 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
6440 init_waitqueue_head(&ctrl_info->block_requests_wait);
6442 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
6443 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
6444 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
6445 pqi_raid_bypass_retry_worker);
6447 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
6448 ctrl_info->irq_mode = IRQ_MODE_NONE;
6449 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6454 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6459 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6461 pqi_free_irqs(ctrl_info);
6462 pqi_disable_msix_interrupts(ctrl_info);
6465 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6467 pqi_stop_heartbeat_timer(ctrl_info);
6468 pqi_free_interrupts(ctrl_info);
6469 if (ctrl_info->queue_memory_base)
6470 dma_free_coherent(&ctrl_info->pci_dev->dev,
6471 ctrl_info->queue_memory_length,
6472 ctrl_info->queue_memory_base,
6473 ctrl_info->queue_memory_base_dma_handle);
6474 if (ctrl_info->admin_queue_memory_base)
6475 dma_free_coherent(&ctrl_info->pci_dev->dev,
6476 ctrl_info->admin_queue_memory_length,
6477 ctrl_info->admin_queue_memory_base,
6478 ctrl_info->admin_queue_memory_base_dma_handle);
6479 pqi_free_all_io_requests(ctrl_info);
6480 if (ctrl_info->error_buffer)
6481 dma_free_coherent(&ctrl_info->pci_dev->dev,
6482 ctrl_info->error_buffer_length,
6483 ctrl_info->error_buffer,
6484 ctrl_info->error_buffer_dma_handle);
6485 if (ctrl_info->iomem_base)
6486 pqi_cleanup_pci_init(ctrl_info);
6487 pqi_free_ctrl_info(ctrl_info);
6490 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6492 pqi_cancel_rescan_worker(ctrl_info);
6493 pqi_cancel_update_time_worker(ctrl_info);
6494 pqi_remove_all_scsi_devices(ctrl_info);
6495 pqi_unregister_scsi(ctrl_info);
6496 if (ctrl_info->pqi_mode_enabled)
6497 pqi_revert_to_sis_mode(ctrl_info);
6498 pqi_free_ctrl_resources(ctrl_info);
6501 static void pqi_perform_lockup_action(void)
6503 switch (pqi_lockup_action) {
6505 panic("FATAL: Smart Family Controller lockup detected");
6508 emergency_restart();
6516 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
6517 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
6518 .status = SAM_STAT_CHECK_CONDITION,
6521 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
6524 struct pqi_io_request *io_request;
6525 struct scsi_cmnd *scmd;
6527 for (i = 0; i < ctrl_info->max_io_slots; i++) {
6528 io_request = &ctrl_info->io_request_pool[i];
6529 if (atomic_read(&io_request->refcount) == 0)
6532 scmd = io_request->scmd;
6534 set_host_byte(scmd, DID_NO_CONNECT);
6536 io_request->status = -ENXIO;
6537 io_request->error_info =
6538 &pqi_ctrl_offline_raid_error_info;
6541 io_request->io_complete_callback(io_request,
6542 io_request->context);
6546 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
6548 pqi_perform_lockup_action();
6549 pqi_stop_heartbeat_timer(ctrl_info);
6550 pqi_free_interrupts(ctrl_info);
6551 pqi_cancel_rescan_worker(ctrl_info);
6552 pqi_cancel_update_time_worker(ctrl_info);
6553 pqi_ctrl_wait_until_quiesced(ctrl_info);
6554 pqi_fail_all_outstanding_requests(ctrl_info);
6555 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
6556 pqi_ctrl_unblock_requests(ctrl_info);
6559 static void pqi_ctrl_offline_worker(struct work_struct *work)
6561 struct pqi_ctrl_info *ctrl_info;
6563 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
6564 pqi_take_ctrl_offline_deferred(ctrl_info);
6567 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
6569 if (!ctrl_info->controller_online)
6572 ctrl_info->controller_online = false;
6573 ctrl_info->pqi_mode_enabled = false;
6574 pqi_ctrl_block_requests(ctrl_info);
6575 if (!pqi_disable_ctrl_shutdown)
6576 sis_shutdown_ctrl(ctrl_info);
6577 pci_disable_device(ctrl_info->pci_dev);
6578 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
6579 schedule_work(&ctrl_info->ctrl_offline_work);
6582 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6583 const struct pci_device_id *id)
6585 char *ctrl_description;
6587 if (id->driver_data)
6588 ctrl_description = (char *)id->driver_data;
6590 ctrl_description = "Microsemi Smart Family Controller";
6592 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6595 static int pqi_pci_probe(struct pci_dev *pci_dev,
6596 const struct pci_device_id *id)
6600 struct pqi_ctrl_info *ctrl_info;
6602 pqi_print_ctrl_info(pci_dev, id);
6604 if (pqi_disable_device_id_wildcards &&
6605 id->subvendor == PCI_ANY_ID &&
6606 id->subdevice == PCI_ANY_ID) {
6607 dev_warn(&pci_dev->dev,
6608 "controller not probed because device ID wildcards are disabled\n");
6612 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
6613 dev_warn(&pci_dev->dev,
6614 "controller device ID matched using wildcards\n");
6616 node = dev_to_node(&pci_dev->dev);
6617 if (node == NUMA_NO_NODE)
6618 set_dev_node(&pci_dev->dev, 0);
6620 ctrl_info = pqi_alloc_ctrl_info(node);
6622 dev_err(&pci_dev->dev,
6623 "failed to allocate controller info block\n");
6627 ctrl_info->pci_dev = pci_dev;
6629 rc = pqi_pci_init(ctrl_info);
6633 rc = pqi_ctrl_init(ctrl_info);
6640 pqi_remove_ctrl(ctrl_info);
6645 static void pqi_pci_remove(struct pci_dev *pci_dev)
6647 struct pqi_ctrl_info *ctrl_info;
6649 ctrl_info = pci_get_drvdata(pci_dev);
6653 pqi_remove_ctrl(ctrl_info);
6656 static void pqi_shutdown(struct pci_dev *pci_dev)
6659 struct pqi_ctrl_info *ctrl_info;
6661 ctrl_info = pci_get_drvdata(pci_dev);
6666 * Write all data in the controller's battery-backed cache to
6669 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
6670 pqi_reset(ctrl_info);
6675 dev_warn(&pci_dev->dev,
6676 "unable to flush controller cache\n");
6679 static void pqi_process_lockup_action_param(void)
6683 if (!pqi_lockup_action_param)
6686 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6687 if (strcmp(pqi_lockup_action_param,
6688 pqi_lockup_actions[i].name) == 0) {
6689 pqi_lockup_action = pqi_lockup_actions[i].action;
6694 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
6695 DRIVER_NAME_SHORT, pqi_lockup_action_param);
6698 static void pqi_process_module_params(void)
6700 pqi_process_lockup_action_param();
6703 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
6705 struct pqi_ctrl_info *ctrl_info;
6707 ctrl_info = pci_get_drvdata(pci_dev);
6709 pqi_disable_events(ctrl_info);
6710 pqi_cancel_update_time_worker(ctrl_info);
6711 pqi_cancel_rescan_worker(ctrl_info);
6712 pqi_wait_until_scan_finished(ctrl_info);
6713 pqi_wait_until_lun_reset_finished(ctrl_info);
6714 pqi_flush_cache(ctrl_info, SUSPEND);
6715 pqi_ctrl_block_requests(ctrl_info);
6716 pqi_ctrl_wait_until_quiesced(ctrl_info);
6717 pqi_wait_until_inbound_queues_empty(ctrl_info);
6718 pqi_ctrl_wait_for_pending_io(ctrl_info);
6719 pqi_stop_heartbeat_timer(ctrl_info);
6721 if (state.event == PM_EVENT_FREEZE)
6724 pci_save_state(pci_dev);
6725 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6727 ctrl_info->controller_online = false;
6728 ctrl_info->pqi_mode_enabled = false;
6733 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
6736 struct pqi_ctrl_info *ctrl_info;
6738 ctrl_info = pci_get_drvdata(pci_dev);
6740 if (pci_dev->current_state != PCI_D0) {
6741 ctrl_info->max_hw_queue_index = 0;
6742 pqi_free_interrupts(ctrl_info);
6743 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6744 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6745 IRQF_SHARED, DRIVER_NAME_SHORT,
6746 &ctrl_info->queue_groups[0]);
6748 dev_err(&ctrl_info->pci_dev->dev,
6749 "irq %u init failed with error %d\n",
6753 pqi_start_heartbeat_timer(ctrl_info);
6754 pqi_ctrl_unblock_requests(ctrl_info);
6758 pci_set_power_state(pci_dev, PCI_D0);
6759 pci_restore_state(pci_dev);
6761 return pqi_ctrl_init_resume(ctrl_info);
6764 /* Define the PCI IDs for the controllers that we support. */
6765 static const struct pci_device_id pqi_pci_id_table[] = {
6767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6803 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6811 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6815 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6819 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6823 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6827 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6828 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6831 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6832 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
6835 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6836 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6839 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6840 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6844 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6847 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6848 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6851 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6852 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6855 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6856 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6859 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6860 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6863 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6864 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
6867 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6868 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6871 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6872 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6875 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6876 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6879 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6880 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6883 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6884 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6887 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6888 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6891 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6892 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6895 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6896 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6899 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6900 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6903 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6904 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
6907 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6908 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6911 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6912 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6915 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6916 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6920 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6924 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6928 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
6931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6932 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6936 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6940 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
6943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6944 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
6947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6948 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
6951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6952 PCI_VENDOR_ID_DELL, 0x1fe0)
6955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6956 PCI_VENDOR_ID_HP, 0x0600)
6959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6960 PCI_VENDOR_ID_HP, 0x0601)
6963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6964 PCI_VENDOR_ID_HP, 0x0602)
6967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6968 PCI_VENDOR_ID_HP, 0x0603)
6971 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6972 PCI_VENDOR_ID_HP, 0x0609)
6975 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6976 PCI_VENDOR_ID_HP, 0x0650)
6979 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6980 PCI_VENDOR_ID_HP, 0x0651)
6983 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6984 PCI_VENDOR_ID_HP, 0x0652)
6987 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6988 PCI_VENDOR_ID_HP, 0x0653)
6991 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6992 PCI_VENDOR_ID_HP, 0x0654)
6995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6996 PCI_VENDOR_ID_HP, 0x0655)
6999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7000 PCI_VENDOR_ID_HP, 0x0700)
7003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7004 PCI_VENDOR_ID_HP, 0x0701)
7007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7008 PCI_VENDOR_ID_HP, 0x1001)
7011 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7012 PCI_VENDOR_ID_HP, 0x1100)
7015 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7016 PCI_VENDOR_ID_HP, 0x1101)
7019 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7020 PCI_ANY_ID, PCI_ANY_ID)
7025 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
7027 static struct pci_driver pqi_pci_driver = {
7028 .name = DRIVER_NAME_SHORT,
7029 .id_table = pqi_pci_id_table,
7030 .probe = pqi_pci_probe,
7031 .remove = pqi_pci_remove,
7032 .shutdown = pqi_shutdown,
7033 #if defined(CONFIG_PM)
7034 .suspend = pqi_suspend,
7035 .resume = pqi_resume,
7039 static int __init pqi_init(void)
7043 pr_info(DRIVER_NAME "\n");
7045 pqi_sas_transport_template =
7046 sas_attach_transport(&pqi_sas_transport_functions);
7047 if (!pqi_sas_transport_template)
7050 pqi_process_module_params();
7052 rc = pci_register_driver(&pqi_pci_driver);
7054 sas_release_transport(pqi_sas_transport_template);
7059 static void __exit pqi_cleanup(void)
7061 pci_unregister_driver(&pqi_pci_driver);
7062 sas_release_transport(pqi_sas_transport_template);
7065 module_init(pqi_init);
7066 module_exit(pqi_cleanup);
7068 static void __attribute__((unused)) verify_structures(void)
7070 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7071 sis_host_to_ctrl_doorbell) != 0x20);
7072 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7073 sis_interrupt_mask) != 0x34);
7074 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7075 sis_ctrl_to_host_doorbell) != 0x9c);
7076 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7077 sis_ctrl_to_host_doorbell_clear) != 0xa0);
7078 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7079 sis_driver_scratch) != 0xb0);
7080 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7081 sis_firmware_status) != 0xbc);
7082 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7083 sis_mailbox) != 0x1000);
7084 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7085 pqi_registers) != 0x4000);
7087 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7089 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7091 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7092 response_queue_id) != 0x4);
7093 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7095 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
7097 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7099 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7100 service_response) != 0x1);
7101 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7102 data_present) != 0x2);
7103 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7105 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7106 residual_count) != 0x4);
7107 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7108 data_length) != 0x8);
7109 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7111 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7113 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
7115 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7116 data_in_result) != 0x0);
7117 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7118 data_out_result) != 0x1);
7119 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7121 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7123 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7124 status_qualifier) != 0x6);
7125 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7126 sense_data_length) != 0x8);
7127 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7128 response_data_length) != 0xa);
7129 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7130 data_in_transferred) != 0xc);
7131 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7132 data_out_transferred) != 0x10);
7133 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7135 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
7137 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7139 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7140 function_and_status_code) != 0x8);
7141 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7142 max_admin_iq_elements) != 0x10);
7143 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7144 max_admin_oq_elements) != 0x11);
7145 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7146 admin_iq_element_length) != 0x12);
7147 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7148 admin_oq_element_length) != 0x13);
7149 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7150 max_reset_timeout) != 0x14);
7151 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7152 legacy_intx_status) != 0x18);
7153 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7154 legacy_intx_mask_set) != 0x1c);
7155 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7156 legacy_intx_mask_clear) != 0x20);
7157 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7158 device_status) != 0x40);
7159 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7160 admin_iq_pi_offset) != 0x48);
7161 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7162 admin_oq_ci_offset) != 0x50);
7163 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7164 admin_iq_element_array_addr) != 0x58);
7165 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7166 admin_oq_element_array_addr) != 0x60);
7167 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7168 admin_iq_ci_addr) != 0x68);
7169 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7170 admin_oq_pi_addr) != 0x70);
7171 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7172 admin_iq_num_elements) != 0x78);
7173 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7174 admin_oq_num_elements) != 0x79);
7175 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7176 admin_queue_int_msg_num) != 0x7a);
7177 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7178 device_error) != 0x80);
7179 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7180 error_details) != 0x88);
7181 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7182 device_reset) != 0x90);
7183 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7184 power_action) != 0x94);
7185 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
7187 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7188 header.iu_type) != 0);
7189 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7190 header.iu_length) != 2);
7191 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7192 header.work_area) != 6);
7193 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7195 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7196 function_code) != 10);
7197 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7198 data.report_device_capability.buffer_length) != 44);
7199 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7200 data.report_device_capability.sg_descriptor) != 48);
7201 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7202 data.create_operational_iq.queue_id) != 12);
7203 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7204 data.create_operational_iq.element_array_addr) != 16);
7205 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7206 data.create_operational_iq.ci_addr) != 24);
7207 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7208 data.create_operational_iq.num_elements) != 32);
7209 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7210 data.create_operational_iq.element_length) != 34);
7211 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7212 data.create_operational_iq.queue_protocol) != 36);
7213 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7214 data.create_operational_oq.queue_id) != 12);
7215 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7216 data.create_operational_oq.element_array_addr) != 16);
7217 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7218 data.create_operational_oq.pi_addr) != 24);
7219 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7220 data.create_operational_oq.num_elements) != 32);
7221 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7222 data.create_operational_oq.element_length) != 34);
7223 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7224 data.create_operational_oq.queue_protocol) != 36);
7225 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7226 data.create_operational_oq.int_msg_num) != 40);
7227 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7228 data.create_operational_oq.coalescing_count) != 42);
7229 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7230 data.create_operational_oq.min_coalescing_time) != 44);
7231 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7232 data.create_operational_oq.max_coalescing_time) != 48);
7233 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7234 data.delete_operational_queue.queue_id) != 12);
7235 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
7236 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7237 data.create_operational_iq) != 64 - 11);
7238 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7239 data.create_operational_oq) != 64 - 11);
7240 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7241 data.delete_operational_queue) != 64 - 11);
7243 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7244 header.iu_type) != 0);
7245 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7246 header.iu_length) != 2);
7247 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7248 header.work_area) != 6);
7249 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7251 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7252 function_code) != 10);
7253 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7255 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7256 data.create_operational_iq.status_descriptor) != 12);
7257 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7258 data.create_operational_iq.iq_pi_offset) != 16);
7259 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7260 data.create_operational_oq.status_descriptor) != 12);
7261 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7262 data.create_operational_oq.oq_ci_offset) != 16);
7263 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
7265 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7266 header.iu_type) != 0);
7267 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7268 header.iu_length) != 2);
7269 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7270 header.response_queue_id) != 4);
7271 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7272 header.work_area) != 6);
7273 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7275 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7277 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7278 buffer_length) != 12);
7279 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7281 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7282 protocol_specific) != 24);
7283 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7284 error_index) != 27);
7285 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7287 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7288 sg_descriptors) != 64);
7289 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
7290 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7292 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7293 header.iu_type) != 0);
7294 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7295 header.iu_length) != 2);
7296 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7297 header.response_queue_id) != 4);
7298 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7299 header.work_area) != 6);
7300 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7302 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7304 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7305 buffer_length) != 16);
7306 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7307 data_encryption_key_index) != 22);
7308 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7309 encrypt_tweak_lower) != 24);
7310 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7311 encrypt_tweak_upper) != 28);
7312 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7314 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7315 error_index) != 48);
7316 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7317 num_sg_descriptors) != 50);
7318 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7320 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7322 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7323 sg_descriptors) != 64);
7324 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
7325 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7327 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7328 header.iu_type) != 0);
7329 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7330 header.iu_length) != 2);
7331 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7333 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7334 error_index) != 10);
7336 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7337 header.iu_type) != 0);
7338 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7339 header.iu_length) != 2);
7340 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7341 header.response_queue_id) != 4);
7342 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7344 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7345 data.report_event_configuration.buffer_length) != 12);
7346 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7347 data.report_event_configuration.sg_descriptors) != 16);
7348 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7349 data.set_event_configuration.global_event_oq_id) != 10);
7350 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7351 data.set_event_configuration.buffer_length) != 12);
7352 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7353 data.set_event_configuration.sg_descriptors) != 16);
7355 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7356 max_inbound_iu_length) != 6);
7357 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7358 max_outbound_iu_length) != 14);
7359 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
7361 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7363 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7364 iq_arbitration_priority_support_bitmask) != 8);
7365 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7366 maximum_aw_a) != 9);
7367 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7368 maximum_aw_b) != 10);
7369 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7370 maximum_aw_c) != 11);
7371 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7372 max_inbound_queues) != 16);
7373 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7374 max_elements_per_iq) != 18);
7375 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7376 max_iq_element_length) != 24);
7377 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7378 min_iq_element_length) != 26);
7379 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7380 max_outbound_queues) != 30);
7381 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7382 max_elements_per_oq) != 32);
7383 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7384 intr_coalescing_time_granularity) != 34);
7385 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7386 max_oq_element_length) != 36);
7387 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7388 min_oq_element_length) != 38);
7389 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7390 iu_layer_descriptors) != 64);
7391 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
7393 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7395 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7397 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
7399 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7400 num_event_descriptors) != 2);
7401 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7404 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
7405 ARRAY_SIZE(pqi_supported_event_types));
7407 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7408 header.iu_type) != 0);
7409 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7410 header.iu_length) != 2);
7411 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7413 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7415 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7416 additional_event_id) != 12);
7417 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7419 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
7421 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7422 header.iu_type) != 0);
7423 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7424 header.iu_length) != 2);
7425 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7427 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7429 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7430 additional_event_id) != 12);
7431 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
7433 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7434 header.iu_type) != 0);
7435 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7436 header.iu_length) != 2);
7437 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7439 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7441 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7443 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7444 protocol_specific) != 24);
7445 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7446 outbound_queue_id_to_manage) != 26);
7447 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7448 request_id_to_manage) != 28);
7449 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7450 task_management_function) != 30);
7451 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
7453 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7454 header.iu_type) != 0);
7455 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7456 header.iu_length) != 2);
7457 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7459 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7461 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7462 additional_response_info) != 12);
7463 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7464 response_code) != 15);
7465 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
7467 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7468 configured_logical_drive_count) != 0);
7469 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7470 configuration_signature) != 1);
7471 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7472 firmware_version) != 5);
7473 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7474 extended_logical_unit_count) != 154);
7475 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7476 firmware_build_number) != 190);
7477 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7478 controller_mode) != 292);
7480 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7481 phys_bay_in_box) != 115);
7482 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7483 device_type) != 120);
7484 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7485 redundant_path_present_map) != 1736);
7486 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7487 active_path_number) != 1738);
7488 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7489 alternate_paths_phys_connector) != 1739);
7490 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7491 alternate_paths_phys_box_on_port) != 1755);
7492 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7493 current_queue_depth_limit) != 1796);
7494 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
7496 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
7497 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
7498 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
7499 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7500 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
7501 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7502 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
7503 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
7504 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7505 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
7506 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
7507 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7509 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
7510 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
7511 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);