1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microsemi PQI-based storage controllers
4 * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "1.2.10-025"
37 #define DRIVER_MAJOR 1
38 #define DRIVER_MINOR 2
39 #define DRIVER_RELEASE 10
40 #define DRIVER_REVISION 25
42 #define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 MODULE_AUTHOR("Microsemi");
49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
51 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
52 MODULE_VERSION(DRIVER_VERSION);
53 MODULE_LICENSE("GPL");
55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
56 static void pqi_ctrl_offline_worker(struct work_struct *work);
57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
59 static void pqi_scan_start(struct Scsi_Host *shost);
60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
61 struct pqi_queue_group *queue_group, enum pqi_io_path path,
62 struct pqi_io_request *io_request);
63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
64 struct pqi_iu_header *request, unsigned int flags,
65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
68 unsigned int cdb_length, struct pqi_queue_group *queue_group,
69 struct pqi_encryption_info *encryption_info, bool raid_bypass);
70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
78 struct pqi_scsi_dev *device, unsigned long timeout_secs);
80 /* for flags argument to pqi_submit_raid_request_synchronous() */
81 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
83 static struct scsi_transport_template *pqi_sas_transport_template;
85 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
87 enum pqi_lockup_action {
93 static enum pqi_lockup_action pqi_lockup_action = NONE;
96 enum pqi_lockup_action action;
98 } pqi_lockup_actions[] = {
113 static unsigned int pqi_supported_event_types[] = {
114 PQI_EVENT_TYPE_HOTPLUG,
115 PQI_EVENT_TYPE_HARDWARE,
116 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
117 PQI_EVENT_TYPE_LOGICAL_DEVICE,
119 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
123 static int pqi_disable_device_id_wildcards;
124 module_param_named(disable_device_id_wildcards,
125 pqi_disable_device_id_wildcards, int, 0644);
126 MODULE_PARM_DESC(disable_device_id_wildcards,
127 "Disable device ID wildcards.");
129 static int pqi_disable_heartbeat;
130 module_param_named(disable_heartbeat,
131 pqi_disable_heartbeat, int, 0644);
132 MODULE_PARM_DESC(disable_heartbeat,
133 "Disable heartbeat.");
135 static int pqi_disable_ctrl_shutdown;
136 module_param_named(disable_ctrl_shutdown,
137 pqi_disable_ctrl_shutdown, int, 0644);
138 MODULE_PARM_DESC(disable_ctrl_shutdown,
139 "Disable controller shutdown when controller locked up.");
141 static char *pqi_lockup_action_param;
142 module_param_named(lockup_action,
143 pqi_lockup_action_param, charp, 0644);
144 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
145 "\t\tSupported: none, reboot, panic\n"
146 "\t\tDefault: none");
148 static int pqi_expose_ld_first;
149 module_param_named(expose_ld_first,
150 pqi_expose_ld_first, int, 0644);
151 MODULE_PARM_DESC(expose_ld_first,
152 "Expose logical drives before physical drives.");
154 static int pqi_hide_vsep;
155 module_param_named(hide_vsep,
156 pqi_hide_vsep, int, 0644);
157 MODULE_PARM_DESC(hide_vsep,
158 "Hide the virtual SEP for direct attached drives.");
160 static char *raid_levels[] = {
170 static char *pqi_raid_level_to_string(u8 raid_level)
172 if (raid_level < ARRAY_SIZE(raid_levels))
173 return raid_levels[raid_level];
175 return "RAID UNKNOWN";
180 #define SA_RAID_1 2 /* also used for RAID 10 */
181 #define SA_RAID_5 3 /* also used for RAID 50 */
183 #define SA_RAID_6 5 /* also used for RAID 60 */
184 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
185 #define SA_RAID_MAX SA_RAID_ADM
186 #define SA_RAID_UNKNOWN 0xff
188 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
190 pqi_prep_for_scsi_done(scmd);
191 scmd->scsi_done(scmd);
194 static inline void pqi_disable_write_same(struct scsi_device *sdev)
196 sdev->no_write_same = 1;
199 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
201 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
204 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
206 return !device->is_physical_device;
209 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
211 return scsi3addr[2] != 0;
214 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
216 return !ctrl_info->controller_online;
219 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
221 if (ctrl_info->controller_online)
222 if (!sis_is_firmware_running(ctrl_info))
223 pqi_take_ctrl_offline(ctrl_info);
226 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
228 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
231 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
232 struct pqi_ctrl_info *ctrl_info)
234 return sis_read_driver_scratch(ctrl_info);
237 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
238 enum pqi_ctrl_mode mode)
240 sis_write_driver_scratch(ctrl_info, mode);
243 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
245 ctrl_info->block_device_reset = true;
248 static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
250 return ctrl_info->block_device_reset;
253 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
255 return ctrl_info->block_requests;
258 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
260 ctrl_info->block_requests = true;
261 scsi_block_requests(ctrl_info->scsi_host);
264 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
266 ctrl_info->block_requests = false;
267 wake_up_all(&ctrl_info->block_requests_wait);
268 pqi_retry_raid_bypass_requests(ctrl_info);
269 scsi_unblock_requests(ctrl_info->scsi_host);
272 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
273 unsigned long timeout_msecs)
275 unsigned long remaining_msecs;
277 if (!pqi_ctrl_blocked(ctrl_info))
278 return timeout_msecs;
280 atomic_inc(&ctrl_info->num_blocked_threads);
282 if (timeout_msecs == NO_TIMEOUT) {
283 wait_event(ctrl_info->block_requests_wait,
284 !pqi_ctrl_blocked(ctrl_info));
285 remaining_msecs = timeout_msecs;
287 unsigned long remaining_jiffies;
290 wait_event_timeout(ctrl_info->block_requests_wait,
291 !pqi_ctrl_blocked(ctrl_info),
292 msecs_to_jiffies(timeout_msecs));
293 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
296 atomic_dec(&ctrl_info->num_blocked_threads);
298 return remaining_msecs;
301 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
303 while (atomic_read(&ctrl_info->num_busy_threads) >
304 atomic_read(&ctrl_info->num_blocked_threads))
305 usleep_range(1000, 2000);
308 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
310 return device->device_offline;
313 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
315 device->in_reset = true;
318 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
320 device->in_reset = false;
323 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
325 return device->in_reset;
328 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
330 ctrl_info->in_ofa = true;
333 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
335 ctrl_info->in_ofa = false;
338 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
340 return ctrl_info->in_ofa;
343 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
345 device->in_remove = true;
348 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
349 struct pqi_scsi_dev *device)
351 return device->in_remove && !ctrl_info->in_shutdown;
354 static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
356 ctrl_info->in_shutdown = true;
359 static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
361 return ctrl_info->in_shutdown;
364 static inline void pqi_schedule_rescan_worker_with_delay(
365 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
367 if (pqi_ctrl_offline(ctrl_info))
369 if (pqi_ctrl_in_ofa(ctrl_info))
372 schedule_delayed_work(&ctrl_info->rescan_work, delay);
375 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
377 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
380 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
382 static inline void pqi_schedule_rescan_worker_delayed(
383 struct pqi_ctrl_info *ctrl_info)
385 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
388 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
390 cancel_delayed_work_sync(&ctrl_info->rescan_work);
393 static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
395 cancel_work_sync(&ctrl_info->event_work);
398 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
400 if (!ctrl_info->heartbeat_counter)
403 return readl(ctrl_info->heartbeat_counter);
406 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
408 if (!ctrl_info->soft_reset_status)
411 return readb(ctrl_info->soft_reset_status);
414 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
419 if (!ctrl_info->soft_reset_status)
422 status = pqi_read_soft_reset_status(ctrl_info);
424 writeb(status, ctrl_info->soft_reset_status);
427 static int pqi_map_single(struct pci_dev *pci_dev,
428 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
429 size_t buffer_length, enum dma_data_direction data_direction)
431 dma_addr_t bus_address;
433 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
436 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
438 if (dma_mapping_error(&pci_dev->dev, bus_address))
441 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
442 put_unaligned_le32(buffer_length, &sg_descriptor->length);
443 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
448 static void pqi_pci_unmap(struct pci_dev *pci_dev,
449 struct pqi_sg_descriptor *descriptors, int num_descriptors,
450 enum dma_data_direction data_direction)
454 if (data_direction == DMA_NONE)
457 for (i = 0; i < num_descriptors; i++)
458 dma_unmap_single(&pci_dev->dev,
459 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
460 get_unaligned_le32(&descriptors[i].length),
464 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
465 struct pqi_raid_path_request *request, u8 cmd,
466 u8 *scsi3addr, void *buffer, size_t buffer_length,
467 u16 vpd_page, enum dma_data_direction *dir)
470 size_t cdb_length = buffer_length;
472 memset(request, 0, sizeof(*request));
474 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
475 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
476 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
477 &request->header.iu_length);
478 put_unaligned_le32(buffer_length, &request->buffer_length);
479 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
480 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
481 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
487 request->data_direction = SOP_READ_FLAG;
489 if (vpd_page & VPD_PAGE) {
491 cdb[2] = (u8)vpd_page;
493 cdb[4] = (u8)cdb_length;
495 case CISS_REPORT_LOG:
496 case CISS_REPORT_PHYS:
497 request->data_direction = SOP_READ_FLAG;
499 if (cmd == CISS_REPORT_PHYS)
500 cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
502 cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
503 put_unaligned_be32(cdb_length, &cdb[6]);
505 case CISS_GET_RAID_MAP:
506 request->data_direction = SOP_READ_FLAG;
508 cdb[1] = CISS_GET_RAID_MAP;
509 put_unaligned_be32(cdb_length, &cdb[6]);
512 request->data_direction = SOP_WRITE_FLAG;
514 cdb[6] = BMIC_FLUSH_CACHE;
515 put_unaligned_be16(cdb_length, &cdb[7]);
517 case BMIC_SENSE_DIAG_OPTIONS:
520 case BMIC_IDENTIFY_CONTROLLER:
521 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
522 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
523 request->data_direction = SOP_READ_FLAG;
526 put_unaligned_be16(cdb_length, &cdb[7]);
528 case BMIC_SET_DIAG_OPTIONS:
531 case BMIC_WRITE_HOST_WELLNESS:
532 request->data_direction = SOP_WRITE_FLAG;
535 put_unaligned_be16(cdb_length, &cdb[7]);
537 case BMIC_CSMI_PASSTHRU:
538 request->data_direction = SOP_BIDIRECTIONAL;
540 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
542 put_unaligned_be16(cdb_length, &cdb[7]);
545 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
550 switch (request->data_direction) {
552 *dir = DMA_FROM_DEVICE;
555 *dir = DMA_TO_DEVICE;
557 case SOP_NO_DIRECTION_FLAG:
561 *dir = DMA_BIDIRECTIONAL;
565 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
566 buffer, buffer_length, *dir);
569 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
571 io_request->scmd = NULL;
572 io_request->status = 0;
573 io_request->error_info = NULL;
574 io_request->raid_bypass = false;
577 static struct pqi_io_request *pqi_alloc_io_request(
578 struct pqi_ctrl_info *ctrl_info)
580 struct pqi_io_request *io_request;
581 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
584 io_request = &ctrl_info->io_request_pool[i];
585 if (atomic_inc_return(&io_request->refcount) == 1)
587 atomic_dec(&io_request->refcount);
588 i = (i + 1) % ctrl_info->max_io_slots;
592 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
594 pqi_reinit_io_request(io_request);
599 static void pqi_free_io_request(struct pqi_io_request *io_request)
601 atomic_dec(&io_request->refcount);
604 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
605 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
606 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
609 struct pqi_raid_path_request request;
610 enum dma_data_direction dir;
612 rc = pqi_build_raid_path_request(ctrl_info, &request,
613 cmd, scsi3addr, buffer,
614 buffer_length, vpd_page, &dir);
618 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
619 error_info, timeout_msecs);
621 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
626 /* helper functions for pqi_send_scsi_raid_request */
628 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
629 u8 cmd, void *buffer, size_t buffer_length)
631 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
632 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
635 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
636 u8 cmd, void *buffer, size_t buffer_length,
637 struct pqi_raid_error_info *error_info)
639 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
640 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
643 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
644 struct bmic_identify_controller *buffer)
646 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
647 buffer, sizeof(*buffer));
650 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
651 struct bmic_sense_subsystem_info *sense_info)
653 return pqi_send_ctrl_raid_request(ctrl_info,
654 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
655 sizeof(*sense_info));
658 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
659 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
661 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
662 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
665 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
666 struct pqi_scsi_dev *device,
667 struct bmic_identify_physical_device *buffer, size_t buffer_length)
670 enum dma_data_direction dir;
671 u16 bmic_device_index;
672 struct pqi_raid_path_request request;
674 rc = pqi_build_raid_path_request(ctrl_info, &request,
675 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
676 buffer_length, 0, &dir);
680 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
681 request.cdb[2] = (u8)bmic_device_index;
682 request.cdb[9] = (u8)(bmic_device_index >> 8);
684 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
685 0, NULL, NO_TIMEOUT);
687 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
692 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
693 enum bmic_flush_cache_shutdown_event shutdown_event)
696 struct bmic_flush_cache *flush_cache;
699 * Don't bother trying to flush the cache if the controller is
702 if (pqi_ctrl_offline(ctrl_info))
705 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
709 flush_cache->shutdown_event = shutdown_event;
711 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
712 sizeof(*flush_cache));
719 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
720 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
721 struct pqi_raid_error_info *error_info)
723 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
724 buffer, buffer_length, error_info);
727 #define PQI_FETCH_PTRAID_DATA (1 << 31)
729 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
732 struct bmic_diag_options *diag;
734 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
738 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
739 diag, sizeof(*diag));
743 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
745 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
754 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
755 void *buffer, size_t buffer_length)
757 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
758 buffer, buffer_length);
763 struct bmic_host_wellness_driver_version {
765 u8 driver_version_tag[2];
766 __le16 driver_version_length;
767 char driver_version[32];
768 u8 dont_write_tag[2];
774 static int pqi_write_driver_version_to_host_wellness(
775 struct pqi_ctrl_info *ctrl_info)
778 struct bmic_host_wellness_driver_version *buffer;
779 size_t buffer_length;
781 buffer_length = sizeof(*buffer);
783 buffer = kmalloc(buffer_length, GFP_KERNEL);
787 buffer->start_tag[0] = '<';
788 buffer->start_tag[1] = 'H';
789 buffer->start_tag[2] = 'W';
790 buffer->start_tag[3] = '>';
791 buffer->driver_version_tag[0] = 'D';
792 buffer->driver_version_tag[1] = 'V';
793 put_unaligned_le16(sizeof(buffer->driver_version),
794 &buffer->driver_version_length);
795 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
796 sizeof(buffer->driver_version) - 1);
797 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
798 buffer->dont_write_tag[0] = 'D';
799 buffer->dont_write_tag[1] = 'W';
800 buffer->end_tag[0] = 'Z';
801 buffer->end_tag[1] = 'Z';
803 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
812 struct bmic_host_wellness_time {
817 u8 dont_write_tag[2];
823 static int pqi_write_current_time_to_host_wellness(
824 struct pqi_ctrl_info *ctrl_info)
827 struct bmic_host_wellness_time *buffer;
828 size_t buffer_length;
833 buffer_length = sizeof(*buffer);
835 buffer = kmalloc(buffer_length, GFP_KERNEL);
839 buffer->start_tag[0] = '<';
840 buffer->start_tag[1] = 'H';
841 buffer->start_tag[2] = 'W';
842 buffer->start_tag[3] = '>';
843 buffer->time_tag[0] = 'T';
844 buffer->time_tag[1] = 'D';
845 put_unaligned_le16(sizeof(buffer->time),
846 &buffer->time_length);
848 local_time = ktime_get_real_seconds();
849 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
850 year = tm.tm_year + 1900;
852 buffer->time[0] = bin2bcd(tm.tm_hour);
853 buffer->time[1] = bin2bcd(tm.tm_min);
854 buffer->time[2] = bin2bcd(tm.tm_sec);
856 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
857 buffer->time[5] = bin2bcd(tm.tm_mday);
858 buffer->time[6] = bin2bcd(year / 100);
859 buffer->time[7] = bin2bcd(year % 100);
861 buffer->dont_write_tag[0] = 'D';
862 buffer->dont_write_tag[1] = 'W';
863 buffer->end_tag[0] = 'Z';
864 buffer->end_tag[1] = 'Z';
866 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
873 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
875 static void pqi_update_time_worker(struct work_struct *work)
878 struct pqi_ctrl_info *ctrl_info;
880 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
883 if (pqi_ctrl_offline(ctrl_info))
886 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
888 dev_warn(&ctrl_info->pci_dev->dev,
889 "error updating time on controller\n");
891 schedule_delayed_work(&ctrl_info->update_time_work,
892 PQI_UPDATE_TIME_WORK_INTERVAL);
895 static inline void pqi_schedule_update_time_worker(
896 struct pqi_ctrl_info *ctrl_info)
898 schedule_delayed_work(&ctrl_info->update_time_work, 0);
901 static inline void pqi_cancel_update_time_worker(
902 struct pqi_ctrl_info *ctrl_info)
904 cancel_delayed_work_sync(&ctrl_info->update_time_work);
907 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
908 void *buffer, size_t buffer_length)
910 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
914 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
918 size_t lun_list_length;
919 size_t lun_data_length;
920 size_t new_lun_list_length;
921 void *lun_data = NULL;
922 struct report_lun_header *report_lun_header;
924 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
925 if (!report_lun_header) {
930 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
931 sizeof(*report_lun_header));
935 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
938 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
940 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
946 if (lun_list_length == 0) {
947 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
951 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
955 new_lun_list_length = get_unaligned_be32(
956 &((struct report_lun_header *)lun_data)->list_length);
958 if (new_lun_list_length > lun_list_length) {
959 lun_list_length = new_lun_list_length;
965 kfree(report_lun_header);
977 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
980 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
984 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
987 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
990 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
991 struct report_phys_lun_extended **physdev_list,
992 struct report_log_lun_extended **logdev_list)
995 size_t logdev_list_length;
996 size_t logdev_data_length;
997 struct report_log_lun_extended *internal_logdev_list;
998 struct report_log_lun_extended *logdev_data;
999 struct report_lun_header report_lun_header;
1001 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1003 dev_err(&ctrl_info->pci_dev->dev,
1004 "report physical LUNs failed\n");
1006 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1008 dev_err(&ctrl_info->pci_dev->dev,
1009 "report logical LUNs failed\n");
1012 * Tack the controller itself onto the end of the logical device list.
1015 logdev_data = *logdev_list;
1018 logdev_list_length =
1019 get_unaligned_be32(&logdev_data->header.list_length);
1021 memset(&report_lun_header, 0, sizeof(report_lun_header));
1023 (struct report_log_lun_extended *)&report_lun_header;
1024 logdev_list_length = 0;
1027 logdev_data_length = sizeof(struct report_lun_header) +
1030 internal_logdev_list = kmalloc(logdev_data_length +
1031 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1032 if (!internal_logdev_list) {
1033 kfree(*logdev_list);
1034 *logdev_list = NULL;
1038 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1039 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1040 sizeof(struct report_log_lun_extended_entry));
1041 put_unaligned_be32(logdev_list_length +
1042 sizeof(struct report_log_lun_extended_entry),
1043 &internal_logdev_list->header.list_length);
1045 kfree(*logdev_list);
1046 *logdev_list = internal_logdev_list;
1051 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1052 int bus, int target, int lun)
1055 device->target = target;
1059 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1067 scsi3addr = device->scsi3addr;
1068 lunid = get_unaligned_le32(scsi3addr);
1070 if (pqi_is_hba_lunid(scsi3addr)) {
1071 /* The specified device is the controller. */
1072 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1073 device->target_lun_valid = true;
1077 if (pqi_is_logical_device(device)) {
1078 if (device->is_external_raid_device) {
1079 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1080 target = (lunid >> 16) & 0x3fff;
1083 bus = PQI_RAID_VOLUME_BUS;
1085 lun = lunid & 0x3fff;
1087 pqi_set_bus_target_lun(device, bus, target, lun);
1088 device->target_lun_valid = true;
1093 * Defer target and LUN assignment for non-controller physical devices
1094 * because the SAS transport layer will make these assignments later.
1096 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1099 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1100 struct pqi_scsi_dev *device)
1106 raid_level = SA_RAID_UNKNOWN;
1108 buffer = kmalloc(64, GFP_KERNEL);
1110 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1111 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1113 raid_level = buffer[8];
1114 if (raid_level > SA_RAID_MAX)
1115 raid_level = SA_RAID_UNKNOWN;
1120 device->raid_level = raid_level;
1123 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1124 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1128 u32 r5or6_blocks_per_row;
1130 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1132 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1133 err_msg = "RAID map too small";
1137 if (device->raid_level == SA_RAID_1) {
1138 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1139 err_msg = "invalid RAID-1 map";
1142 } else if (device->raid_level == SA_RAID_ADM) {
1143 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1144 err_msg = "invalid RAID-1(ADM) map";
1147 } else if ((device->raid_level == SA_RAID_5 ||
1148 device->raid_level == SA_RAID_6) &&
1149 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1151 r5or6_blocks_per_row =
1152 get_unaligned_le16(&raid_map->strip_size) *
1153 get_unaligned_le16(&raid_map->data_disks_per_row);
1154 if (r5or6_blocks_per_row == 0) {
1155 err_msg = "invalid RAID-5 or RAID-6 map";
1163 dev_warn(&ctrl_info->pci_dev->dev,
1164 "logical device %08x%08x %s\n",
1165 *((u32 *)&device->scsi3addr),
1166 *((u32 *)&device->scsi3addr[4]), err_msg);
1171 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1172 struct pqi_scsi_dev *device)
1176 struct raid_map *raid_map;
1178 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1182 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1183 device->scsi3addr, raid_map, sizeof(*raid_map),
1184 0, NULL, NO_TIMEOUT);
1189 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1191 if (raid_map_size > sizeof(*raid_map)) {
1195 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1199 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1200 device->scsi3addr, raid_map, raid_map_size,
1201 0, NULL, NO_TIMEOUT);
1205 if (get_unaligned_le32(&raid_map->structure_size)
1207 dev_warn(&ctrl_info->pci_dev->dev,
1208 "Requested %d bytes, received %d bytes",
1210 get_unaligned_le32(&raid_map->structure_size));
1215 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1219 device->raid_map = raid_map;
1229 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1230 struct pqi_scsi_dev *device)
1236 buffer = kmalloc(64, GFP_KERNEL);
1240 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1241 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1245 #define RAID_BYPASS_STATUS 4
1246 #define RAID_BYPASS_CONFIGURED 0x1
1247 #define RAID_BYPASS_ENABLED 0x2
1249 bypass_status = buffer[RAID_BYPASS_STATUS];
1250 device->raid_bypass_configured =
1251 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1252 if (device->raid_bypass_configured &&
1253 (bypass_status & RAID_BYPASS_ENABLED) &&
1254 pqi_get_raid_map(ctrl_info, device) == 0)
1255 device->raid_bypass_enabled = true;
1262 * Use vendor-specific VPD to determine online/offline status of a volume.
1265 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1266 struct pqi_scsi_dev *device)
1270 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1271 bool volume_offline = true;
1273 struct ciss_vpd_logical_volume_status *vpd;
1275 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1279 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1280 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1284 if (vpd->page_code != CISS_VPD_LV_STATUS)
1287 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1288 volume_status) + vpd->page_length;
1289 if (page_length < sizeof(*vpd))
1292 volume_status = vpd->volume_status;
1293 volume_flags = get_unaligned_be32(&vpd->flags);
1294 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1299 device->volume_status = volume_status;
1300 device->volume_offline = volume_offline;
1303 #define PQI_INQUIRY_PAGE0_RETRIES 3
1305 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1306 struct pqi_scsi_dev *device)
1310 unsigned int retries;
1312 if (device->is_expander_smp_device)
1315 buffer = kmalloc(64, GFP_KERNEL);
1319 /* Send an inquiry to the device to see what it is. */
1320 for (retries = 0;;) {
1321 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
1325 if (pqi_is_logical_device(device) ||
1326 rc != PQI_CMD_STATUS_ABORTED ||
1327 ++retries > PQI_INQUIRY_PAGE0_RETRIES)
1331 scsi_sanitize_inquiry_string(&buffer[8], 8);
1332 scsi_sanitize_inquiry_string(&buffer[16], 16);
1334 device->devtype = buffer[0] & 0x1f;
1335 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1336 memcpy(device->model, &buffer[16], sizeof(device->model));
1338 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1339 if (device->is_external_raid_device) {
1340 device->raid_level = SA_RAID_UNKNOWN;
1341 device->volume_status = CISS_LV_OK;
1342 device->volume_offline = false;
1344 pqi_get_raid_level(ctrl_info, device);
1345 pqi_get_raid_bypass_status(ctrl_info, device);
1346 pqi_get_volume_status(ctrl_info, device);
1356 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1357 struct pqi_scsi_dev *device,
1358 struct bmic_identify_physical_device *id_phys)
1362 memset(id_phys, 0, sizeof(*id_phys));
1364 rc = pqi_identify_physical_device(ctrl_info, device,
1365 id_phys, sizeof(*id_phys));
1367 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1371 device->box_index = id_phys->box_index;
1372 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1373 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1374 device->queue_depth =
1375 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1376 device->device_type = id_phys->device_type;
1377 device->active_path_index = id_phys->active_path_number;
1378 device->path_map = id_phys->redundant_path_present_map;
1379 memcpy(&device->box,
1380 &id_phys->alternate_paths_phys_box_on_port,
1381 sizeof(device->box));
1382 memcpy(&device->phys_connector,
1383 &id_phys->alternate_paths_phys_connector,
1384 sizeof(device->phys_connector));
1385 device->bay = id_phys->phys_bay_in_box;
1388 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1389 struct pqi_scsi_dev *device)
1392 static const char unknown_state_str[] =
1393 "Volume is in an unknown state (%u)";
1394 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1396 switch (device->volume_status) {
1398 status = "Volume online";
1400 case CISS_LV_FAILED:
1401 status = "Volume failed";
1403 case CISS_LV_NOT_CONFIGURED:
1404 status = "Volume not configured";
1406 case CISS_LV_DEGRADED:
1407 status = "Volume degraded";
1409 case CISS_LV_READY_FOR_RECOVERY:
1410 status = "Volume ready for recovery operation";
1412 case CISS_LV_UNDERGOING_RECOVERY:
1413 status = "Volume undergoing recovery";
1415 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1416 status = "Wrong physical drive was replaced";
1418 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1419 status = "A physical drive not properly connected";
1421 case CISS_LV_HARDWARE_OVERHEATING:
1422 status = "Hardware is overheating";
1424 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1425 status = "Hardware has overheated";
1427 case CISS_LV_UNDERGOING_EXPANSION:
1428 status = "Volume undergoing expansion";
1430 case CISS_LV_NOT_AVAILABLE:
1431 status = "Volume waiting for transforming volume";
1433 case CISS_LV_QUEUED_FOR_EXPANSION:
1434 status = "Volume queued for expansion";
1436 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1437 status = "Volume disabled due to SCSI ID conflict";
1439 case CISS_LV_EJECTED:
1440 status = "Volume has been ejected";
1442 case CISS_LV_UNDERGOING_ERASE:
1443 status = "Volume undergoing background erase";
1445 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1446 status = "Volume ready for predictive spare rebuild";
1448 case CISS_LV_UNDERGOING_RPI:
1449 status = "Volume undergoing rapid parity initialization";
1451 case CISS_LV_PENDING_RPI:
1452 status = "Volume queued for rapid parity initialization";
1454 case CISS_LV_ENCRYPTED_NO_KEY:
1455 status = "Encrypted volume inaccessible - key not present";
1457 case CISS_LV_UNDERGOING_ENCRYPTION:
1458 status = "Volume undergoing encryption process";
1460 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1461 status = "Volume undergoing encryption re-keying process";
1463 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1464 status = "Volume encrypted but encryption is disabled";
1466 case CISS_LV_PENDING_ENCRYPTION:
1467 status = "Volume pending migration to encrypted state";
1469 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1470 status = "Volume pending encryption rekeying";
1472 case CISS_LV_NOT_SUPPORTED:
1473 status = "Volume not supported on this controller";
1475 case CISS_LV_STATUS_UNAVAILABLE:
1476 status = "Volume status not available";
1479 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1480 unknown_state_str, device->volume_status);
1481 status = unknown_state_buffer;
1485 dev_info(&ctrl_info->pci_dev->dev,
1486 "scsi %d:%d:%d:%d %s\n",
1487 ctrl_info->scsi_host->host_no,
1488 device->bus, device->target, device->lun, status);
1491 static void pqi_rescan_worker(struct work_struct *work)
1493 struct pqi_ctrl_info *ctrl_info;
1495 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1498 pqi_scan_scsi_devices(ctrl_info);
1501 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1502 struct pqi_scsi_dev *device)
1506 if (pqi_is_logical_device(device))
1507 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1508 device->target, device->lun);
1510 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1515 #define PQI_PENDING_IO_TIMEOUT_SECS 20
1517 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1518 struct pqi_scsi_dev *device)
1522 pqi_device_remove_start(device);
1524 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1525 PQI_PENDING_IO_TIMEOUT_SECS);
1527 dev_err(&ctrl_info->pci_dev->dev,
1528 "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
1529 ctrl_info->scsi_host->host_no, device->bus,
1530 device->target, device->lun,
1531 atomic_read(&device->scsi_cmds_outstanding));
1533 if (pqi_is_logical_device(device))
1534 scsi_remove_device(device->sdev);
1536 pqi_remove_sas_device(device);
1539 /* Assumes the SCSI device list lock is held. */
1541 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1542 int bus, int target, int lun)
1544 struct pqi_scsi_dev *device;
1546 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1547 scsi_device_list_entry)
1548 if (device->bus == bus && device->target == target &&
1555 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1556 struct pqi_scsi_dev *dev2)
1558 if (dev1->is_physical_device != dev2->is_physical_device)
1561 if (dev1->is_physical_device)
1562 return dev1->wwid == dev2->wwid;
1564 return memcmp(dev1->volume_id, dev2->volume_id,
1565 sizeof(dev1->volume_id)) == 0;
1568 enum pqi_find_result {
1574 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1575 struct pqi_scsi_dev *device_to_find,
1576 struct pqi_scsi_dev **matching_device)
1578 struct pqi_scsi_dev *device;
1580 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1581 scsi_device_list_entry) {
1582 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1583 device->scsi3addr)) {
1584 *matching_device = device;
1585 if (pqi_device_equal(device_to_find, device)) {
1586 if (device_to_find->volume_offline)
1587 return DEVICE_CHANGED;
1590 return DEVICE_CHANGED;
1594 return DEVICE_NOT_FOUND;
1597 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1599 if (device->is_expander_smp_device)
1600 return "Enclosure SMP ";
1602 return scsi_device_type(device->devtype);
1605 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1607 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1608 char *action, struct pqi_scsi_dev *device)
1611 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1613 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1614 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1616 if (device->target_lun_valid)
1617 count += scnprintf(buffer + count,
1618 PQI_DEV_INFO_BUFFER_LENGTH - count,
1623 count += scnprintf(buffer + count,
1624 PQI_DEV_INFO_BUFFER_LENGTH - count,
1627 if (pqi_is_logical_device(device))
1628 count += scnprintf(buffer + count,
1629 PQI_DEV_INFO_BUFFER_LENGTH - count,
1631 *((u32 *)&device->scsi3addr),
1632 *((u32 *)&device->scsi3addr[4]));
1634 count += scnprintf(buffer + count,
1635 PQI_DEV_INFO_BUFFER_LENGTH - count,
1636 " %016llx", device->sas_address);
1638 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1640 pqi_device_type(device),
1644 if (pqi_is_logical_device(device)) {
1645 if (device->devtype == TYPE_DISK)
1646 count += scnprintf(buffer + count,
1647 PQI_DEV_INFO_BUFFER_LENGTH - count,
1648 "SSDSmartPathCap%c En%c %-12s",
1649 device->raid_bypass_configured ? '+' : '-',
1650 device->raid_bypass_enabled ? '+' : '-',
1651 pqi_raid_level_to_string(device->raid_level));
1653 count += scnprintf(buffer + count,
1654 PQI_DEV_INFO_BUFFER_LENGTH - count,
1655 "AIO%c", device->aio_enabled ? '+' : '-');
1656 if (device->devtype == TYPE_DISK ||
1657 device->devtype == TYPE_ZBC)
1658 count += scnprintf(buffer + count,
1659 PQI_DEV_INFO_BUFFER_LENGTH - count,
1660 " qd=%-6d", device->queue_depth);
1663 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1666 /* Assumes the SCSI device list lock is held. */
1668 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1669 struct pqi_scsi_dev *new_device)
1671 existing_device->devtype = new_device->devtype;
1672 existing_device->device_type = new_device->device_type;
1673 existing_device->bus = new_device->bus;
1674 if (new_device->target_lun_valid) {
1675 existing_device->target = new_device->target;
1676 existing_device->lun = new_device->lun;
1677 existing_device->target_lun_valid = true;
1680 /* By definition, the scsi3addr and wwid fields are already the same. */
1682 existing_device->is_physical_device = new_device->is_physical_device;
1683 existing_device->is_external_raid_device =
1684 new_device->is_external_raid_device;
1685 existing_device->is_expander_smp_device =
1686 new_device->is_expander_smp_device;
1687 existing_device->aio_enabled = new_device->aio_enabled;
1688 memcpy(existing_device->vendor, new_device->vendor,
1689 sizeof(existing_device->vendor));
1690 memcpy(existing_device->model, new_device->model,
1691 sizeof(existing_device->model));
1692 existing_device->sas_address = new_device->sas_address;
1693 existing_device->raid_level = new_device->raid_level;
1694 existing_device->queue_depth = new_device->queue_depth;
1695 existing_device->aio_handle = new_device->aio_handle;
1696 existing_device->volume_status = new_device->volume_status;
1697 existing_device->active_path_index = new_device->active_path_index;
1698 existing_device->path_map = new_device->path_map;
1699 existing_device->bay = new_device->bay;
1700 existing_device->box_index = new_device->box_index;
1701 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1702 existing_device->phy_connected_dev_type =
1703 new_device->phy_connected_dev_type;
1704 memcpy(existing_device->box, new_device->box,
1705 sizeof(existing_device->box));
1706 memcpy(existing_device->phys_connector, new_device->phys_connector,
1707 sizeof(existing_device->phys_connector));
1708 existing_device->offload_to_mirror = 0;
1709 kfree(existing_device->raid_map);
1710 existing_device->raid_map = new_device->raid_map;
1711 existing_device->raid_bypass_configured =
1712 new_device->raid_bypass_configured;
1713 existing_device->raid_bypass_enabled =
1714 new_device->raid_bypass_enabled;
1715 existing_device->device_offline = false;
1717 /* To prevent this from being freed later. */
1718 new_device->raid_map = NULL;
1721 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1724 kfree(device->raid_map);
1730 * Called when exposing a new device to the OS fails in order to re-adjust
1731 * our internal SCSI device list to match the SCSI ML's view.
1734 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1735 struct pqi_scsi_dev *device)
1737 unsigned long flags;
1739 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1740 list_del(&device->scsi_device_list_entry);
1741 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1743 /* Allow the device structure to be freed later. */
1744 device->keep_device = false;
1747 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1749 if (device->is_expander_smp_device)
1750 return device->sas_port != NULL;
1752 return device->sdev != NULL;
1755 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1756 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1760 unsigned long flags;
1761 enum pqi_find_result find_result;
1762 struct pqi_scsi_dev *device;
1763 struct pqi_scsi_dev *next;
1764 struct pqi_scsi_dev *matching_device;
1765 LIST_HEAD(add_list);
1766 LIST_HEAD(delete_list);
1769 * The idea here is to do as little work as possible while holding the
1770 * spinlock. That's why we go to great pains to defer anything other
1771 * than updating the internal device list until after we release the
1775 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1777 /* Assume that all devices in the existing list have gone away. */
1778 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1779 scsi_device_list_entry)
1780 device->device_gone = true;
1782 for (i = 0; i < num_new_devices; i++) {
1783 device = new_device_list[i];
1785 find_result = pqi_scsi_find_entry(ctrl_info, device,
1788 switch (find_result) {
1791 * The newly found device is already in the existing
1794 device->new_device = false;
1795 matching_device->device_gone = false;
1796 pqi_scsi_update_device(matching_device, device);
1798 case DEVICE_NOT_FOUND:
1800 * The newly found device is NOT in the existing device
1803 device->new_device = true;
1805 case DEVICE_CHANGED:
1807 * The original device has gone away and we need to add
1810 device->new_device = true;
1815 /* Process all devices that have gone away. */
1816 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1817 scsi_device_list_entry) {
1818 if (device->device_gone) {
1819 list_del(&device->scsi_device_list_entry);
1820 list_add_tail(&device->delete_list_entry, &delete_list);
1824 /* Process all new devices. */
1825 for (i = 0; i < num_new_devices; i++) {
1826 device = new_device_list[i];
1827 if (!device->new_device)
1829 if (device->volume_offline)
1831 list_add_tail(&device->scsi_device_list_entry,
1832 &ctrl_info->scsi_device_list);
1833 list_add_tail(&device->add_list_entry, &add_list);
1834 /* To prevent this device structure from being freed later. */
1835 device->keep_device = true;
1838 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1840 if (pqi_ctrl_in_ofa(ctrl_info))
1841 pqi_ctrl_ofa_done(ctrl_info);
1843 /* Remove all devices that have gone away. */
1844 list_for_each_entry_safe(device, next, &delete_list,
1845 delete_list_entry) {
1846 if (device->volume_offline) {
1847 pqi_dev_info(ctrl_info, "offline", device);
1848 pqi_show_volume_status(ctrl_info, device);
1850 pqi_dev_info(ctrl_info, "removed", device);
1852 if (pqi_is_device_added(device))
1853 pqi_remove_device(ctrl_info, device);
1854 list_del(&device->delete_list_entry);
1855 pqi_free_device(device);
1859 * Notify the SCSI ML if the queue depth of any existing device has
1862 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1863 scsi_device_list_entry) {
1864 if (device->sdev && device->queue_depth !=
1865 device->advertised_queue_depth) {
1866 device->advertised_queue_depth = device->queue_depth;
1867 scsi_change_queue_depth(device->sdev,
1868 device->advertised_queue_depth);
1872 /* Expose any new devices. */
1873 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1874 if (!pqi_is_device_added(device)) {
1875 pqi_dev_info(ctrl_info, "added", device);
1876 rc = pqi_add_device(ctrl_info, device);
1878 dev_warn(&ctrl_info->pci_dev->dev,
1879 "scsi %d:%d:%d:%d addition failed, device not added\n",
1880 ctrl_info->scsi_host->host_no,
1881 device->bus, device->target,
1883 pqi_fixup_botched_add(ctrl_info, device);
1889 static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1893 if (device->is_expander_smp_device)
1896 is_supported = false;
1898 switch (device->devtype) {
1902 case TYPE_MEDIUM_CHANGER:
1903 case TYPE_ENCLOSURE:
1904 is_supported = true;
1908 * Only support the HBA controller itself as a RAID
1909 * controller. If it's a RAID controller other than
1910 * the HBA itself (an external RAID controller, for
1911 * example), we don't support it.
1913 if (pqi_is_hba_lunid(device->scsi3addr))
1914 is_supported = true;
1918 return is_supported;
1921 static inline bool pqi_skip_device(u8 *scsi3addr)
1923 /* Ignore all masked devices. */
1924 if (MASKED_DEVICE(scsi3addr))
1930 static inline void pqi_mask_device(u8 *scsi3addr)
1932 scsi3addr[3] |= 0xc0;
1935 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1937 if (!device->is_physical_device)
1940 if (device->is_expander_smp_device)
1943 switch (device->devtype) {
1946 case TYPE_ENCLOSURE:
1953 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1955 return !device->is_physical_device ||
1956 !pqi_skip_device(device->scsi3addr);
1959 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1963 LIST_HEAD(new_device_list_head);
1964 struct report_phys_lun_extended *physdev_list = NULL;
1965 struct report_log_lun_extended *logdev_list = NULL;
1966 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1967 struct report_log_lun_extended_entry *log_lun_ext_entry;
1968 struct bmic_identify_physical_device *id_phys = NULL;
1971 struct pqi_scsi_dev **new_device_list = NULL;
1972 struct pqi_scsi_dev *device;
1973 struct pqi_scsi_dev *next;
1974 unsigned int num_new_devices;
1975 unsigned int num_valid_devices;
1976 bool is_physical_device;
1978 unsigned int physical_index;
1979 unsigned int logical_index;
1980 static char *out_of_memory_msg =
1981 "failed to allocate memory, device discovery stopped";
1983 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1989 get_unaligned_be32(&physdev_list->header.list_length)
1990 / sizeof(physdev_list->lun_entries[0]);
1996 get_unaligned_be32(&logdev_list->header.list_length)
1997 / sizeof(logdev_list->lun_entries[0]);
2001 if (num_physicals) {
2003 * We need this buffer for calls to pqi_get_physical_disk_info()
2004 * below. We allocate it here instead of inside
2005 * pqi_get_physical_disk_info() because it's a fairly large
2008 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2010 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2016 if (pqi_hide_vsep) {
2017 for (i = num_physicals - 1; i >= 0; i--) {
2018 phys_lun_ext_entry =
2019 &physdev_list->lun_entries[i];
2020 if (CISS_GET_DRIVE_NUMBER(
2021 phys_lun_ext_entry->lunid) ==
2022 PQI_VSEP_CISS_BTL) {
2024 phys_lun_ext_entry->lunid);
2031 num_new_devices = num_physicals + num_logicals;
2033 new_device_list = kmalloc_array(num_new_devices,
2034 sizeof(*new_device_list),
2036 if (!new_device_list) {
2037 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2042 for (i = 0; i < num_new_devices; i++) {
2043 device = kzalloc(sizeof(*device), GFP_KERNEL);
2045 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2050 list_add_tail(&device->new_device_list_entry,
2051 &new_device_list_head);
2055 num_valid_devices = 0;
2059 for (i = 0; i < num_new_devices; i++) {
2061 if ((!pqi_expose_ld_first && i < num_physicals) ||
2062 (pqi_expose_ld_first && i >= num_logicals)) {
2063 is_physical_device = true;
2064 phys_lun_ext_entry =
2065 &physdev_list->lun_entries[physical_index++];
2066 log_lun_ext_entry = NULL;
2067 scsi3addr = phys_lun_ext_entry->lunid;
2069 is_physical_device = false;
2070 phys_lun_ext_entry = NULL;
2072 &logdev_list->lun_entries[logical_index++];
2073 scsi3addr = log_lun_ext_entry->lunid;
2076 if (is_physical_device && pqi_skip_device(scsi3addr))
2080 device = list_next_entry(device, new_device_list_entry);
2082 device = list_first_entry(&new_device_list_head,
2083 struct pqi_scsi_dev, new_device_list_entry);
2085 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2086 device->is_physical_device = is_physical_device;
2087 if (is_physical_device) {
2088 if (phys_lun_ext_entry->device_type ==
2089 SA_DEVICE_TYPE_EXPANDER_SMP)
2090 device->is_expander_smp_device = true;
2092 device->is_external_raid_device =
2093 pqi_is_external_raid_addr(scsi3addr);
2096 /* Gather information about the device. */
2097 rc = pqi_get_device_info(ctrl_info, device);
2098 if (rc == -ENOMEM) {
2099 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2104 if (device->is_physical_device)
2105 dev_warn(&ctrl_info->pci_dev->dev,
2106 "obtaining device info failed, skipping physical device %016llx\n",
2108 &phys_lun_ext_entry->wwid));
2110 dev_warn(&ctrl_info->pci_dev->dev,
2111 "obtaining device info failed, skipping logical device %08x%08x\n",
2112 *((u32 *)&device->scsi3addr),
2113 *((u32 *)&device->scsi3addr[4]));
2118 if (!pqi_is_supported_device(device))
2121 pqi_assign_bus_target_lun(device);
2123 if (device->is_physical_device) {
2124 device->wwid = phys_lun_ext_entry->wwid;
2125 if ((phys_lun_ext_entry->device_flags &
2126 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2127 phys_lun_ext_entry->aio_handle) {
2128 device->aio_enabled = true;
2129 device->aio_handle =
2130 phys_lun_ext_entry->aio_handle;
2132 pqi_get_physical_disk_info(ctrl_info, device, id_phys);
2134 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2135 sizeof(device->volume_id));
2138 if (pqi_is_device_with_sas_address(device))
2139 device->sas_address = get_unaligned_be64(&device->wwid);
2141 new_device_list[num_valid_devices++] = device;
2144 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2147 list_for_each_entry_safe(device, next, &new_device_list_head,
2148 new_device_list_entry) {
2149 if (device->keep_device)
2151 list_del(&device->new_device_list_entry);
2152 pqi_free_device(device);
2155 kfree(new_device_list);
2156 kfree(physdev_list);
2163 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2165 unsigned long flags;
2166 struct pqi_scsi_dev *device;
2169 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2171 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
2172 struct pqi_scsi_dev, scsi_device_list_entry);
2174 list_del(&device->scsi_device_list_entry);
2176 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
2182 if (pqi_is_device_added(device))
2183 pqi_remove_device(ctrl_info, device);
2184 pqi_free_device(device);
2188 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2192 if (pqi_ctrl_offline(ctrl_info))
2195 if (!mutex_trylock(&ctrl_info->scan_mutex)) {
2196 pqi_schedule_rescan_worker_delayed(ctrl_info);
2199 rc = pqi_update_scsi_devices(ctrl_info);
2201 pqi_schedule_rescan_worker_delayed(ctrl_info);
2202 mutex_unlock(&ctrl_info->scan_mutex);
2208 static void pqi_scan_start(struct Scsi_Host *shost)
2210 struct pqi_ctrl_info *ctrl_info;
2212 ctrl_info = shost_to_hba(shost);
2213 if (pqi_ctrl_in_ofa(ctrl_info))
2216 pqi_scan_scsi_devices(ctrl_info);
2219 /* Returns TRUE if scan is finished. */
2221 static int pqi_scan_finished(struct Scsi_Host *shost,
2222 unsigned long elapsed_time)
2224 struct pqi_ctrl_info *ctrl_info;
2226 ctrl_info = shost_priv(shost);
2228 return !mutex_is_locked(&ctrl_info->scan_mutex);
2231 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2233 mutex_lock(&ctrl_info->scan_mutex);
2234 mutex_unlock(&ctrl_info->scan_mutex);
2237 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2239 mutex_lock(&ctrl_info->lun_reset_mutex);
2240 mutex_unlock(&ctrl_info->lun_reset_mutex);
2243 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2245 mutex_lock(&ctrl_info->ofa_mutex);
2246 mutex_unlock(&ctrl_info->ofa_mutex);
2249 static inline void pqi_set_encryption_info(
2250 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2253 u32 volume_blk_size;
2256 * Set the encryption tweak values based on logical block address.
2257 * If the block size is 512, the tweak value is equal to the LBA.
2258 * For other block sizes, tweak value is (LBA * block size) / 512.
2260 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2261 if (volume_blk_size != 512)
2262 first_block = (first_block * volume_blk_size) / 512;
2264 encryption_info->data_encryption_key_index =
2265 get_unaligned_le16(&raid_map->data_encryption_key_index);
2266 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2267 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2271 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2274 #define PQI_RAID_BYPASS_INELIGIBLE 1
2276 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2277 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2278 struct pqi_queue_group *queue_group)
2280 struct raid_map *raid_map;
2281 bool is_write = false;
2289 u32 first_row_offset;
2290 u32 last_row_offset;
2295 u32 r5or6_blocks_per_row;
2296 u64 r5or6_first_row;
2298 u32 r5or6_first_row_offset;
2299 u32 r5or6_last_row_offset;
2300 u32 r5or6_first_column;
2301 u32 r5or6_last_column;
2302 u16 data_disks_per_row;
2303 u32 total_disks_per_row;
2304 u16 layout_map_count;
2316 int offload_to_mirror;
2317 struct pqi_encryption_info *encryption_info_ptr;
2318 struct pqi_encryption_info encryption_info;
2319 #if BITS_PER_LONG == 32
2323 /* Check for valid opcode, get LBA and block count. */
2324 switch (scmd->cmnd[0]) {
2329 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2330 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2331 block_cnt = (u32)scmd->cmnd[4];
2339 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2340 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2346 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2347 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2353 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2354 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2357 /* Process via normal I/O path. */
2358 return PQI_RAID_BYPASS_INELIGIBLE;
2361 /* Check for write to non-RAID-0. */
2362 if (is_write && device->raid_level != SA_RAID_0)
2363 return PQI_RAID_BYPASS_INELIGIBLE;
2365 if (unlikely(block_cnt == 0))
2366 return PQI_RAID_BYPASS_INELIGIBLE;
2368 last_block = first_block + block_cnt - 1;
2369 raid_map = device->raid_map;
2371 /* Check for invalid block or wraparound. */
2372 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2373 last_block < first_block)
2374 return PQI_RAID_BYPASS_INELIGIBLE;
2376 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2377 strip_size = get_unaligned_le16(&raid_map->strip_size);
2378 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2380 /* Calculate stripe information for the request. */
2381 blocks_per_row = data_disks_per_row * strip_size;
2382 #if BITS_PER_LONG == 32
2383 tmpdiv = first_block;
2384 do_div(tmpdiv, blocks_per_row);
2386 tmpdiv = last_block;
2387 do_div(tmpdiv, blocks_per_row);
2389 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2390 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2391 tmpdiv = first_row_offset;
2392 do_div(tmpdiv, strip_size);
2393 first_column = tmpdiv;
2394 tmpdiv = last_row_offset;
2395 do_div(tmpdiv, strip_size);
2396 last_column = tmpdiv;
2398 first_row = first_block / blocks_per_row;
2399 last_row = last_block / blocks_per_row;
2400 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2401 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2402 first_column = first_row_offset / strip_size;
2403 last_column = last_row_offset / strip_size;
2406 /* If this isn't a single row/column then give to the controller. */
2407 if (first_row != last_row || first_column != last_column)
2408 return PQI_RAID_BYPASS_INELIGIBLE;
2410 /* Proceeding with driver mapping. */
2411 total_disks_per_row = data_disks_per_row +
2412 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2413 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2414 get_unaligned_le16(&raid_map->row_cnt);
2415 map_index = (map_row * total_disks_per_row) + first_column;
2418 if (device->raid_level == SA_RAID_1) {
2419 if (device->offload_to_mirror)
2420 map_index += data_disks_per_row;
2421 device->offload_to_mirror = !device->offload_to_mirror;
2422 } else if (device->raid_level == SA_RAID_ADM) {
2425 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2428 offload_to_mirror = device->offload_to_mirror;
2429 if (offload_to_mirror == 0) {
2430 /* use physical disk in the first mirrored group. */
2431 map_index %= data_disks_per_row;
2435 * Determine mirror group that map_index
2438 current_group = map_index / data_disks_per_row;
2440 if (offload_to_mirror != current_group) {
2442 layout_map_count - 1) {
2444 * Select raid index from
2447 map_index += data_disks_per_row;
2451 * Select raid index from first
2454 map_index %= data_disks_per_row;
2458 } while (offload_to_mirror != current_group);
2461 /* Set mirror group to use next time. */
2463 (offload_to_mirror >= layout_map_count - 1) ?
2464 0 : offload_to_mirror + 1;
2465 WARN_ON(offload_to_mirror >= layout_map_count);
2466 device->offload_to_mirror = offload_to_mirror;
2468 * Avoid direct use of device->offload_to_mirror within this
2469 * function since multiple threads might simultaneously
2470 * increment it beyond the range of device->layout_map_count -1.
2472 } else if ((device->raid_level == SA_RAID_5 ||
2473 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2475 /* Verify first and last block are in same RAID group */
2476 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2477 stripesize = r5or6_blocks_per_row * layout_map_count;
2478 #if BITS_PER_LONG == 32
2479 tmpdiv = first_block;
2480 first_group = do_div(tmpdiv, stripesize);
2481 tmpdiv = first_group;
2482 do_div(tmpdiv, r5or6_blocks_per_row);
2483 first_group = tmpdiv;
2484 tmpdiv = last_block;
2485 last_group = do_div(tmpdiv, stripesize);
2486 tmpdiv = last_group;
2487 do_div(tmpdiv, r5or6_blocks_per_row);
2488 last_group = tmpdiv;
2490 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2491 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2493 if (first_group != last_group)
2494 return PQI_RAID_BYPASS_INELIGIBLE;
2496 /* Verify request is in a single row of RAID 5/6 */
2497 #if BITS_PER_LONG == 32
2498 tmpdiv = first_block;
2499 do_div(tmpdiv, stripesize);
2500 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2501 tmpdiv = last_block;
2502 do_div(tmpdiv, stripesize);
2503 r5or6_last_row = r0_last_row = tmpdiv;
2505 first_row = r5or6_first_row = r0_first_row =
2506 first_block / stripesize;
2507 r5or6_last_row = r0_last_row = last_block / stripesize;
2509 if (r5or6_first_row != r5or6_last_row)
2510 return PQI_RAID_BYPASS_INELIGIBLE;
2512 /* Verify request is in a single column */
2513 #if BITS_PER_LONG == 32
2514 tmpdiv = first_block;
2515 first_row_offset = do_div(tmpdiv, stripesize);
2516 tmpdiv = first_row_offset;
2517 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2518 r5or6_first_row_offset = first_row_offset;
2519 tmpdiv = last_block;
2520 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2521 tmpdiv = r5or6_last_row_offset;
2522 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2523 tmpdiv = r5or6_first_row_offset;
2524 do_div(tmpdiv, strip_size);
2525 first_column = r5or6_first_column = tmpdiv;
2526 tmpdiv = r5or6_last_row_offset;
2527 do_div(tmpdiv, strip_size);
2528 r5or6_last_column = tmpdiv;
2530 first_row_offset = r5or6_first_row_offset =
2531 (u32)((first_block % stripesize) %
2532 r5or6_blocks_per_row);
2534 r5or6_last_row_offset =
2535 (u32)((last_block % stripesize) %
2536 r5or6_blocks_per_row);
2538 first_column = r5or6_first_row_offset / strip_size;
2539 r5or6_first_column = first_column;
2540 r5or6_last_column = r5or6_last_row_offset / strip_size;
2542 if (r5or6_first_column != r5or6_last_column)
2543 return PQI_RAID_BYPASS_INELIGIBLE;
2545 /* Request is eligible */
2547 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2548 get_unaligned_le16(&raid_map->row_cnt);
2550 map_index = (first_group *
2551 (get_unaligned_le16(&raid_map->row_cnt) *
2552 total_disks_per_row)) +
2553 (map_row * total_disks_per_row) + first_column;
2556 aio_handle = raid_map->disk_data[map_index].aio_handle;
2557 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2558 first_row * strip_size +
2559 (first_row_offset - first_column * strip_size);
2560 disk_block_cnt = block_cnt;
2562 /* Handle differing logical/physical block sizes. */
2563 if (raid_map->phys_blk_shift) {
2564 disk_block <<= raid_map->phys_blk_shift;
2565 disk_block_cnt <<= raid_map->phys_blk_shift;
2568 if (unlikely(disk_block_cnt > 0xffff))
2569 return PQI_RAID_BYPASS_INELIGIBLE;
2571 /* Build the new CDB for the physical disk I/O. */
2572 if (disk_block > 0xffffffff) {
2573 cdb[0] = is_write ? WRITE_16 : READ_16;
2575 put_unaligned_be64(disk_block, &cdb[2]);
2576 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2581 cdb[0] = is_write ? WRITE_10 : READ_10;
2583 put_unaligned_be32((u32)disk_block, &cdb[2]);
2585 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2590 if (get_unaligned_le16(&raid_map->flags) &
2591 RAID_MAP_ENCRYPTION_ENABLED) {
2592 pqi_set_encryption_info(&encryption_info, raid_map,
2594 encryption_info_ptr = &encryption_info;
2596 encryption_info_ptr = NULL;
2599 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2600 cdb, cdb_length, queue_group, encryption_info_ptr, true);
2603 #define PQI_STATUS_IDLE 0x0
2605 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2606 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2608 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2609 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2610 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2611 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2612 #define PQI_DEVICE_STATE_ERROR 0x4
2614 #define PQI_MODE_READY_TIMEOUT_SECS 30
2615 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2617 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2619 struct pqi_device_registers __iomem *pqi_registers;
2620 unsigned long timeout;
2624 pqi_registers = ctrl_info->pqi_registers;
2625 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
2628 signature = readq(&pqi_registers->signature);
2629 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2630 sizeof(signature)) == 0)
2632 if (time_after(jiffies, timeout)) {
2633 dev_err(&ctrl_info->pci_dev->dev,
2634 "timed out waiting for PQI signature\n");
2637 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2641 status = readb(&pqi_registers->function_and_status_code);
2642 if (status == PQI_STATUS_IDLE)
2644 if (time_after(jiffies, timeout)) {
2645 dev_err(&ctrl_info->pci_dev->dev,
2646 "timed out waiting for PQI IDLE\n");
2649 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2653 if (readl(&pqi_registers->device_status) ==
2654 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2656 if (time_after(jiffies, timeout)) {
2657 dev_err(&ctrl_info->pci_dev->dev,
2658 "timed out waiting for PQI all registers ready\n");
2661 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2667 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2669 struct pqi_scsi_dev *device;
2671 device = io_request->scmd->device->hostdata;
2672 device->raid_bypass_enabled = false;
2673 device->aio_enabled = false;
2676 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2678 struct pqi_ctrl_info *ctrl_info;
2679 struct pqi_scsi_dev *device;
2681 device = sdev->hostdata;
2682 if (device->device_offline)
2685 device->device_offline = true;
2686 ctrl_info = shost_to_hba(sdev->host);
2687 pqi_schedule_rescan_worker(ctrl_info);
2688 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2689 path, ctrl_info->scsi_host->host_no, device->bus,
2690 device->target, device->lun);
2693 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2697 struct scsi_cmnd *scmd;
2698 struct pqi_raid_error_info *error_info;
2699 size_t sense_data_length;
2702 struct scsi_sense_hdr sshdr;
2704 scmd = io_request->scmd;
2708 error_info = io_request->error_info;
2709 scsi_status = error_info->status;
2712 switch (error_info->data_out_result) {
2713 case PQI_DATA_IN_OUT_GOOD:
2715 case PQI_DATA_IN_OUT_UNDERFLOW:
2717 get_unaligned_le32(&error_info->data_out_transferred);
2718 residual_count = scsi_bufflen(scmd) - xfer_count;
2719 scsi_set_resid(scmd, residual_count);
2720 if (xfer_count < scmd->underflow)
2721 host_byte = DID_SOFT_ERROR;
2723 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2724 case PQI_DATA_IN_OUT_ABORTED:
2725 host_byte = DID_ABORT;
2727 case PQI_DATA_IN_OUT_TIMEOUT:
2728 host_byte = DID_TIME_OUT;
2730 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2731 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2732 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2733 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2734 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2735 case PQI_DATA_IN_OUT_ERROR:
2736 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2737 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2738 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2739 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2740 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2741 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2742 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2743 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2744 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2745 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2747 host_byte = DID_ERROR;
2751 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2752 if (sense_data_length == 0)
2754 get_unaligned_le16(&error_info->response_data_length);
2755 if (sense_data_length) {
2756 if (sense_data_length > sizeof(error_info->data))
2757 sense_data_length = sizeof(error_info->data);
2759 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2760 scsi_normalize_sense(error_info->data,
2761 sense_data_length, &sshdr) &&
2762 sshdr.sense_key == HARDWARE_ERROR &&
2763 sshdr.asc == 0x3e) {
2764 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2765 struct pqi_scsi_dev *device = scmd->device->hostdata;
2767 switch (sshdr.ascq) {
2768 case 0x1: /* LOGICAL UNIT FAILURE */
2769 if (printk_ratelimit())
2770 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2771 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2772 pqi_take_device_offline(scmd->device, "RAID");
2773 host_byte = DID_NO_CONNECT;
2776 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2777 if (printk_ratelimit())
2778 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2779 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2784 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2785 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2786 memcpy(scmd->sense_buffer, error_info->data,
2790 scmd->result = scsi_status;
2791 set_host_byte(scmd, host_byte);
2794 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2798 struct scsi_cmnd *scmd;
2799 struct pqi_aio_error_info *error_info;
2800 size_t sense_data_length;
2803 bool device_offline;
2805 scmd = io_request->scmd;
2806 error_info = io_request->error_info;
2808 sense_data_length = 0;
2809 device_offline = false;
2811 switch (error_info->service_response) {
2812 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2813 scsi_status = error_info->status;
2815 case PQI_AIO_SERV_RESPONSE_FAILURE:
2816 switch (error_info->status) {
2817 case PQI_AIO_STATUS_IO_ABORTED:
2818 scsi_status = SAM_STAT_TASK_ABORTED;
2820 case PQI_AIO_STATUS_UNDERRUN:
2821 scsi_status = SAM_STAT_GOOD;
2822 residual_count = get_unaligned_le32(
2823 &error_info->residual_count);
2824 scsi_set_resid(scmd, residual_count);
2825 xfer_count = scsi_bufflen(scmd) - residual_count;
2826 if (xfer_count < scmd->underflow)
2827 host_byte = DID_SOFT_ERROR;
2829 case PQI_AIO_STATUS_OVERRUN:
2830 scsi_status = SAM_STAT_GOOD;
2832 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2833 pqi_aio_path_disabled(io_request);
2834 scsi_status = SAM_STAT_GOOD;
2835 io_request->status = -EAGAIN;
2837 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2838 case PQI_AIO_STATUS_INVALID_DEVICE:
2839 if (!io_request->raid_bypass) {
2840 device_offline = true;
2841 pqi_take_device_offline(scmd->device, "AIO");
2842 host_byte = DID_NO_CONNECT;
2844 scsi_status = SAM_STAT_CHECK_CONDITION;
2846 case PQI_AIO_STATUS_IO_ERROR:
2848 scsi_status = SAM_STAT_CHECK_CONDITION;
2852 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2853 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2854 scsi_status = SAM_STAT_GOOD;
2856 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2857 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2859 scsi_status = SAM_STAT_CHECK_CONDITION;
2863 if (error_info->data_present) {
2865 get_unaligned_le16(&error_info->data_length);
2866 if (sense_data_length) {
2867 if (sense_data_length > sizeof(error_info->data))
2868 sense_data_length = sizeof(error_info->data);
2869 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2870 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2871 memcpy(scmd->sense_buffer, error_info->data,
2876 if (device_offline && sense_data_length == 0)
2877 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2880 scmd->result = scsi_status;
2881 set_host_byte(scmd, host_byte);
2884 static void pqi_process_io_error(unsigned int iu_type,
2885 struct pqi_io_request *io_request)
2888 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2889 pqi_process_raid_io_error(io_request);
2891 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2892 pqi_process_aio_io_error(io_request);
2897 static int pqi_interpret_task_management_response(
2898 struct pqi_task_management_response *response)
2902 switch (response->response_code) {
2903 case SOP_TMF_COMPLETE:
2904 case SOP_TMF_FUNCTION_SUCCEEDED:
2907 case SOP_TMF_REJECTED:
2918 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2919 struct pqi_queue_group *queue_group)
2921 unsigned int num_responses;
2924 struct pqi_io_request *io_request;
2925 struct pqi_io_response *response;
2929 oq_ci = queue_group->oq_ci_copy;
2932 oq_pi = readl(queue_group->oq_pi);
2937 response = queue_group->oq_element_array +
2938 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2940 request_id = get_unaligned_le16(&response->request_id);
2941 WARN_ON(request_id >= ctrl_info->max_io_slots);
2943 io_request = &ctrl_info->io_request_pool[request_id];
2944 WARN_ON(atomic_read(&io_request->refcount) == 0);
2946 switch (response->header.iu_type) {
2947 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2948 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2949 if (io_request->scmd)
2950 io_request->scmd->result = 0;
2952 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2954 case PQI_RESPONSE_IU_VENDOR_GENERAL:
2955 io_request->status =
2957 &((struct pqi_vendor_general_response *)
2960 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2961 io_request->status =
2962 pqi_interpret_task_management_response(
2965 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2966 pqi_aio_path_disabled(io_request);
2967 io_request->status = -EAGAIN;
2969 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2970 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2971 io_request->error_info = ctrl_info->error_buffer +
2972 (get_unaligned_le16(&response->error_index) *
2973 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2974 pqi_process_io_error(response->header.iu_type,
2978 dev_err(&ctrl_info->pci_dev->dev,
2979 "unexpected IU type: 0x%x\n",
2980 response->header.iu_type);
2984 io_request->io_complete_callback(io_request,
2985 io_request->context);
2988 * Note that the I/O request structure CANNOT BE TOUCHED after
2989 * returning from the I/O completion callback!
2992 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2995 if (num_responses) {
2996 queue_group->oq_ci_copy = oq_ci;
2997 writel(oq_ci, queue_group->oq_ci);
3000 return num_responses;
3003 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3004 unsigned int ci, unsigned int elements_in_queue)
3006 unsigned int num_elements_used;
3009 num_elements_used = pi - ci;
3011 num_elements_used = elements_in_queue - ci + pi;
3013 return elements_in_queue - num_elements_used - 1;
3016 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3017 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3021 unsigned long flags;
3023 struct pqi_queue_group *queue_group;
3025 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3026 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3029 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3031 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3032 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3034 if (pqi_num_elements_free(iq_pi, iq_ci,
3035 ctrl_info->num_elements_per_iq))
3038 spin_unlock_irqrestore(
3039 &queue_group->submit_lock[RAID_PATH], flags);
3041 if (pqi_ctrl_offline(ctrl_info))
3045 next_element = queue_group->iq_element_array[RAID_PATH] +
3046 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3048 memcpy(next_element, iu, iu_length);
3050 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3051 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3054 * This write notifies the controller that an IU is available to be
3057 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3059 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3062 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3063 struct pqi_event *event)
3065 struct pqi_event_acknowledge_request request;
3067 memset(&request, 0, sizeof(request));
3069 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3070 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3071 &request.header.iu_length);
3072 request.event_type = event->event_type;
3073 request.event_id = event->event_id;
3074 request.additional_event_id = event->additional_event_id;
3076 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3079 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3080 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3082 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3083 struct pqi_ctrl_info *ctrl_info)
3085 unsigned long timeout;
3088 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3091 status = pqi_read_soft_reset_status(ctrl_info);
3092 if (status & PQI_SOFT_RESET_INITIATE)
3093 return RESET_INITIATE_DRIVER;
3095 if (status & PQI_SOFT_RESET_ABORT)
3098 if (time_after(jiffies, timeout)) {
3099 dev_err(&ctrl_info->pci_dev->dev,
3100 "timed out waiting for soft reset status\n");
3101 return RESET_TIMEDOUT;
3104 if (!sis_is_firmware_running(ctrl_info))
3105 return RESET_NORESPONSE;
3107 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3111 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3112 enum pqi_soft_reset_status reset_status)
3116 switch (reset_status) {
3117 case RESET_INITIATE_DRIVER:
3118 case RESET_TIMEDOUT:
3119 dev_info(&ctrl_info->pci_dev->dev,
3120 "resetting controller %u\n", ctrl_info->ctrl_id);
3121 sis_soft_reset(ctrl_info);
3123 case RESET_INITIATE_FIRMWARE:
3124 rc = pqi_ofa_ctrl_restart(ctrl_info);
3125 pqi_ofa_free_host_buffer(ctrl_info);
3126 dev_info(&ctrl_info->pci_dev->dev,
3127 "Online Firmware Activation for controller %u: %s\n",
3128 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3131 pqi_ofa_ctrl_unquiesce(ctrl_info);
3132 dev_info(&ctrl_info->pci_dev->dev,
3133 "Online Firmware Activation for controller %u: %s\n",
3134 ctrl_info->ctrl_id, "ABORTED");
3136 case RESET_NORESPONSE:
3137 pqi_ofa_free_host_buffer(ctrl_info);
3138 pqi_take_ctrl_offline(ctrl_info);
3143 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3144 struct pqi_event *event)
3147 enum pqi_soft_reset_status status;
3149 event_id = get_unaligned_le16(&event->event_id);
3151 mutex_lock(&ctrl_info->ofa_mutex);
3153 if (event_id == PQI_EVENT_OFA_QUIESCE) {
3154 dev_info(&ctrl_info->pci_dev->dev,
3155 "Received Online Firmware Activation quiesce event for controller %u\n",
3156 ctrl_info->ctrl_id);
3157 pqi_ofa_ctrl_quiesce(ctrl_info);
3158 pqi_acknowledge_event(ctrl_info, event);
3159 if (ctrl_info->soft_reset_handshake_supported) {
3160 status = pqi_poll_for_soft_reset_status(ctrl_info);
3161 pqi_process_soft_reset(ctrl_info, status);
3163 pqi_process_soft_reset(ctrl_info,
3164 RESET_INITIATE_FIRMWARE);
3167 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3168 pqi_acknowledge_event(ctrl_info, event);
3169 pqi_ofa_setup_host_buffer(ctrl_info,
3170 le32_to_cpu(event->ofa_bytes_requested));
3171 pqi_ofa_host_memory_update(ctrl_info);
3172 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3173 pqi_ofa_free_host_buffer(ctrl_info);
3174 pqi_acknowledge_event(ctrl_info, event);
3175 dev_info(&ctrl_info->pci_dev->dev,
3176 "Online Firmware Activation(%u) cancel reason : %u\n",
3177 ctrl_info->ctrl_id, event->ofa_cancel_reason);
3180 mutex_unlock(&ctrl_info->ofa_mutex);
3183 static void pqi_event_worker(struct work_struct *work)
3186 struct pqi_ctrl_info *ctrl_info;
3187 struct pqi_event *event;
3189 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3191 pqi_ctrl_busy(ctrl_info);
3192 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
3193 if (pqi_ctrl_offline(ctrl_info))
3196 pqi_schedule_rescan_worker_delayed(ctrl_info);
3198 event = ctrl_info->events;
3199 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3200 if (event->pending) {
3201 event->pending = false;
3202 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3203 pqi_ctrl_unbusy(ctrl_info);
3204 pqi_ofa_process_event(ctrl_info, event);
3207 pqi_acknowledge_event(ctrl_info, event);
3213 pqi_ctrl_unbusy(ctrl_info);
3216 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
3218 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3221 u32 heartbeat_count;
3222 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3225 pqi_check_ctrl_health(ctrl_info);
3226 if (pqi_ctrl_offline(ctrl_info))
3229 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3230 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3232 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3233 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3234 dev_err(&ctrl_info->pci_dev->dev,
3235 "no heartbeat detected - last heartbeat count: %u\n",
3237 pqi_take_ctrl_offline(ctrl_info);
3241 ctrl_info->previous_num_interrupts = num_interrupts;
3244 ctrl_info->previous_heartbeat_count = heartbeat_count;
3245 mod_timer(&ctrl_info->heartbeat_timer,
3246 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3249 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3251 if (!ctrl_info->heartbeat_counter)
3254 ctrl_info->previous_num_interrupts =
3255 atomic_read(&ctrl_info->num_interrupts);
3256 ctrl_info->previous_heartbeat_count =
3257 pqi_read_heartbeat_counter(ctrl_info);
3259 ctrl_info->heartbeat_timer.expires =
3260 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3261 add_timer(&ctrl_info->heartbeat_timer);
3264 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3266 del_timer_sync(&ctrl_info->heartbeat_timer);
3269 static inline int pqi_event_type_to_event_index(unsigned int event_type)
3273 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3274 if (event_type == pqi_supported_event_types[index])
3280 static inline bool pqi_is_supported_event(unsigned int event_type)
3282 return pqi_event_type_to_event_index(event_type) != -1;
3285 static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3286 struct pqi_event_response *response)
3290 event_id = get_unaligned_le16(&event->event_id);
3292 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3293 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3294 event->ofa_bytes_requested =
3295 response->data.ofa_memory_allocation.bytes_requested;
3296 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3297 event->ofa_cancel_reason =
3298 response->data.ofa_cancelled.reason;
3303 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3305 unsigned int num_events;
3308 struct pqi_event_queue *event_queue;
3309 struct pqi_event_response *response;
3310 struct pqi_event *event;
3313 event_queue = &ctrl_info->event_queue;
3315 oq_ci = event_queue->oq_ci_copy;
3318 oq_pi = readl(event_queue->oq_pi);
3323 response = event_queue->oq_element_array +
3324 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3327 pqi_event_type_to_event_index(response->event_type);
3329 if (event_index >= 0) {
3330 if (response->request_acknowlege) {
3331 event = &ctrl_info->events[event_index];
3332 event->pending = true;
3333 event->event_type = response->event_type;
3334 event->event_id = response->event_id;
3335 event->additional_event_id =
3336 response->additional_event_id;
3337 pqi_ofa_capture_event_payload(event, response);
3341 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3345 event_queue->oq_ci_copy = oq_ci;
3346 writel(oq_ci, event_queue->oq_ci);
3347 schedule_work(&ctrl_info->event_work);
3353 #define PQI_LEGACY_INTX_MASK 0x1
3355 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
3359 struct pqi_device_registers __iomem *pqi_registers;
3360 volatile void __iomem *register_addr;
3362 pqi_registers = ctrl_info->pqi_registers;
3365 register_addr = &pqi_registers->legacy_intx_mask_clear;
3367 register_addr = &pqi_registers->legacy_intx_mask_set;
3369 intx_mask = readl(register_addr);
3370 intx_mask |= PQI_LEGACY_INTX_MASK;
3371 writel(intx_mask, register_addr);
3374 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3375 enum pqi_irq_mode new_mode)
3377 switch (ctrl_info->irq_mode) {
3383 pqi_configure_legacy_intx(ctrl_info, true);
3384 sis_enable_intx(ctrl_info);
3393 pqi_configure_legacy_intx(ctrl_info, false);
3394 sis_enable_msix(ctrl_info);
3399 pqi_configure_legacy_intx(ctrl_info, false);
3406 sis_enable_msix(ctrl_info);
3409 pqi_configure_legacy_intx(ctrl_info, true);
3410 sis_enable_intx(ctrl_info);
3418 ctrl_info->irq_mode = new_mode;
3421 #define PQI_LEGACY_INTX_PENDING 0x1
3423 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3428 switch (ctrl_info->irq_mode) {
3434 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3435 if (intx_status & PQI_LEGACY_INTX_PENDING)
3449 static irqreturn_t pqi_irq_handler(int irq, void *data)
3451 struct pqi_ctrl_info *ctrl_info;
3452 struct pqi_queue_group *queue_group;
3453 unsigned int num_responses_handled;
3456 ctrl_info = queue_group->ctrl_info;
3458 if (!pqi_is_valid_irq(ctrl_info))
3461 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3463 if (irq == ctrl_info->event_irq)
3464 num_responses_handled += pqi_process_event_intr(ctrl_info);
3466 if (num_responses_handled)
3467 atomic_inc(&ctrl_info->num_interrupts);
3469 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3470 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3475 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3477 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3481 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3483 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3484 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3485 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3487 dev_err(&pci_dev->dev,
3488 "irq %u init failed with error %d\n",
3489 pci_irq_vector(pci_dev, i), rc);
3492 ctrl_info->num_msix_vectors_initialized++;
3498 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3502 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3503 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3504 &ctrl_info->queue_groups[i]);
3506 ctrl_info->num_msix_vectors_initialized = 0;
3509 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3511 int num_vectors_enabled;
3513 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3514 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3515 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3516 if (num_vectors_enabled < 0) {
3517 dev_err(&ctrl_info->pci_dev->dev,
3518 "MSI-X init failed with error %d\n",
3519 num_vectors_enabled);
3520 return num_vectors_enabled;
3523 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3524 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3528 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3530 if (ctrl_info->num_msix_vectors_enabled) {
3531 pci_free_irq_vectors(ctrl_info->pci_dev);
3532 ctrl_info->num_msix_vectors_enabled = 0;
3536 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3539 size_t alloc_length;
3540 size_t element_array_length_per_iq;
3541 size_t element_array_length_per_oq;
3542 void *element_array;
3543 void __iomem *next_queue_index;
3544 void *aligned_pointer;
3545 unsigned int num_inbound_queues;
3546 unsigned int num_outbound_queues;
3547 unsigned int num_queue_indexes;
3548 struct pqi_queue_group *queue_group;
3550 element_array_length_per_iq =
3551 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3552 ctrl_info->num_elements_per_iq;
3553 element_array_length_per_oq =
3554 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3555 ctrl_info->num_elements_per_oq;
3556 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3557 num_outbound_queues = ctrl_info->num_queue_groups;
3558 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3560 aligned_pointer = NULL;
3562 for (i = 0; i < num_inbound_queues; i++) {
3563 aligned_pointer = PTR_ALIGN(aligned_pointer,
3564 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3565 aligned_pointer += element_array_length_per_iq;
3568 for (i = 0; i < num_outbound_queues; i++) {
3569 aligned_pointer = PTR_ALIGN(aligned_pointer,
3570 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3571 aligned_pointer += element_array_length_per_oq;
3574 aligned_pointer = PTR_ALIGN(aligned_pointer,
3575 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3576 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3577 PQI_EVENT_OQ_ELEMENT_LENGTH;
3579 for (i = 0; i < num_queue_indexes; i++) {
3580 aligned_pointer = PTR_ALIGN(aligned_pointer,
3581 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3582 aligned_pointer += sizeof(pqi_index_t);
3585 alloc_length = (size_t)aligned_pointer +
3586 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3588 alloc_length += PQI_EXTRA_SGL_MEMORY;
3590 ctrl_info->queue_memory_base =
3591 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3592 &ctrl_info->queue_memory_base_dma_handle,
3595 if (!ctrl_info->queue_memory_base)
3598 ctrl_info->queue_memory_length = alloc_length;
3600 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3601 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3603 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3604 queue_group = &ctrl_info->queue_groups[i];
3605 queue_group->iq_element_array[RAID_PATH] = element_array;
3606 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3607 ctrl_info->queue_memory_base_dma_handle +
3608 (element_array - ctrl_info->queue_memory_base);
3609 element_array += element_array_length_per_iq;
3610 element_array = PTR_ALIGN(element_array,
3611 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3612 queue_group->iq_element_array[AIO_PATH] = element_array;
3613 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3614 ctrl_info->queue_memory_base_dma_handle +
3615 (element_array - ctrl_info->queue_memory_base);
3616 element_array += element_array_length_per_iq;
3617 element_array = PTR_ALIGN(element_array,
3618 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3621 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3622 queue_group = &ctrl_info->queue_groups[i];
3623 queue_group->oq_element_array = element_array;
3624 queue_group->oq_element_array_bus_addr =
3625 ctrl_info->queue_memory_base_dma_handle +
3626 (element_array - ctrl_info->queue_memory_base);
3627 element_array += element_array_length_per_oq;
3628 element_array = PTR_ALIGN(element_array,
3629 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3632 ctrl_info->event_queue.oq_element_array = element_array;
3633 ctrl_info->event_queue.oq_element_array_bus_addr =
3634 ctrl_info->queue_memory_base_dma_handle +
3635 (element_array - ctrl_info->queue_memory_base);
3636 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3637 PQI_EVENT_OQ_ELEMENT_LENGTH;
3639 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
3640 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3642 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3643 queue_group = &ctrl_info->queue_groups[i];
3644 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3645 queue_group->iq_ci_bus_addr[RAID_PATH] =
3646 ctrl_info->queue_memory_base_dma_handle +
3648 (void __iomem *)ctrl_info->queue_memory_base);
3649 next_queue_index += sizeof(pqi_index_t);
3650 next_queue_index = PTR_ALIGN(next_queue_index,
3651 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3652 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3653 queue_group->iq_ci_bus_addr[AIO_PATH] =
3654 ctrl_info->queue_memory_base_dma_handle +
3656 (void __iomem *)ctrl_info->queue_memory_base);
3657 next_queue_index += sizeof(pqi_index_t);
3658 next_queue_index = PTR_ALIGN(next_queue_index,
3659 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3660 queue_group->oq_pi = next_queue_index;
3661 queue_group->oq_pi_bus_addr =
3662 ctrl_info->queue_memory_base_dma_handle +
3664 (void __iomem *)ctrl_info->queue_memory_base);
3665 next_queue_index += sizeof(pqi_index_t);
3666 next_queue_index = PTR_ALIGN(next_queue_index,
3667 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3670 ctrl_info->event_queue.oq_pi = next_queue_index;
3671 ctrl_info->event_queue.oq_pi_bus_addr =
3672 ctrl_info->queue_memory_base_dma_handle +
3674 (void __iomem *)ctrl_info->queue_memory_base);
3679 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3682 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3683 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3686 * Initialize the backpointers to the controller structure in
3687 * each operational queue group structure.
3689 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3690 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3693 * Assign IDs to all operational queues. Note that the IDs
3694 * assigned to operational IQs are independent of the IDs
3695 * assigned to operational OQs.
3697 ctrl_info->event_queue.oq_id = next_oq_id++;
3698 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3699 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3700 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3701 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3705 * Assign MSI-X table entry indexes to all queues. Note that the
3706 * interrupt for the event queue is shared with the first queue group.
3708 ctrl_info->event_queue.int_msg_num = 0;
3709 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3710 ctrl_info->queue_groups[i].int_msg_num = i;
3712 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3713 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3714 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3715 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3716 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3720 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3722 size_t alloc_length;
3723 struct pqi_admin_queues_aligned *admin_queues_aligned;
3724 struct pqi_admin_queues *admin_queues;
3726 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3727 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3729 ctrl_info->admin_queue_memory_base =
3730 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3731 &ctrl_info->admin_queue_memory_base_dma_handle,
3734 if (!ctrl_info->admin_queue_memory_base)
3737 ctrl_info->admin_queue_memory_length = alloc_length;
3739 admin_queues = &ctrl_info->admin_queues;
3740 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3741 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3742 admin_queues->iq_element_array =
3743 &admin_queues_aligned->iq_element_array;
3744 admin_queues->oq_element_array =
3745 &admin_queues_aligned->oq_element_array;
3746 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3747 admin_queues->oq_pi =
3748 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
3750 admin_queues->iq_element_array_bus_addr =
3751 ctrl_info->admin_queue_memory_base_dma_handle +
3752 (admin_queues->iq_element_array -
3753 ctrl_info->admin_queue_memory_base);
3754 admin_queues->oq_element_array_bus_addr =
3755 ctrl_info->admin_queue_memory_base_dma_handle +
3756 (admin_queues->oq_element_array -
3757 ctrl_info->admin_queue_memory_base);
3758 admin_queues->iq_ci_bus_addr =
3759 ctrl_info->admin_queue_memory_base_dma_handle +
3760 ((void *)admin_queues->iq_ci -
3761 ctrl_info->admin_queue_memory_base);
3762 admin_queues->oq_pi_bus_addr =
3763 ctrl_info->admin_queue_memory_base_dma_handle +
3764 ((void __iomem *)admin_queues->oq_pi -
3765 (void __iomem *)ctrl_info->admin_queue_memory_base);
3770 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
3771 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3773 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3775 struct pqi_device_registers __iomem *pqi_registers;
3776 struct pqi_admin_queues *admin_queues;
3777 unsigned long timeout;
3781 pqi_registers = ctrl_info->pqi_registers;
3782 admin_queues = &ctrl_info->admin_queues;
3784 writeq((u64)admin_queues->iq_element_array_bus_addr,
3785 &pqi_registers->admin_iq_element_array_addr);
3786 writeq((u64)admin_queues->oq_element_array_bus_addr,
3787 &pqi_registers->admin_oq_element_array_addr);
3788 writeq((u64)admin_queues->iq_ci_bus_addr,
3789 &pqi_registers->admin_iq_ci_addr);
3790 writeq((u64)admin_queues->oq_pi_bus_addr,
3791 &pqi_registers->admin_oq_pi_addr);
3793 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3794 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
3795 (admin_queues->int_msg_num << 16);
3796 writel(reg, &pqi_registers->admin_iq_num_elements);
3797 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3798 &pqi_registers->function_and_status_code);
3800 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3802 status = readb(&pqi_registers->function_and_status_code);
3803 if (status == PQI_STATUS_IDLE)
3805 if (time_after(jiffies, timeout))
3807 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3811 * The offset registers are not initialized to the correct
3812 * offsets until *after* the create admin queue pair command
3813 * completes successfully.
3815 admin_queues->iq_pi = ctrl_info->iomem_base +
3816 PQI_DEVICE_REGISTERS_OFFSET +
3817 readq(&pqi_registers->admin_iq_pi_offset);
3818 admin_queues->oq_ci = ctrl_info->iomem_base +
3819 PQI_DEVICE_REGISTERS_OFFSET +
3820 readq(&pqi_registers->admin_oq_ci_offset);
3825 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3826 struct pqi_general_admin_request *request)
3828 struct pqi_admin_queues *admin_queues;
3832 admin_queues = &ctrl_info->admin_queues;
3833 iq_pi = admin_queues->iq_pi_copy;
3835 next_element = admin_queues->iq_element_array +
3836 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3838 memcpy(next_element, request, sizeof(*request));
3840 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3841 admin_queues->iq_pi_copy = iq_pi;
3844 * This write notifies the controller that an IU is available to be
3847 writel(iq_pi, admin_queues->iq_pi);
3850 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3852 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3853 struct pqi_general_admin_response *response)
3855 struct pqi_admin_queues *admin_queues;
3858 unsigned long timeout;
3860 admin_queues = &ctrl_info->admin_queues;
3861 oq_ci = admin_queues->oq_ci_copy;
3863 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
3866 oq_pi = readl(admin_queues->oq_pi);
3869 if (time_after(jiffies, timeout)) {
3870 dev_err(&ctrl_info->pci_dev->dev,
3871 "timed out waiting for admin response\n");
3874 if (!sis_is_firmware_running(ctrl_info))
3876 usleep_range(1000, 2000);
3879 memcpy(response, admin_queues->oq_element_array +
3880 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3882 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3883 admin_queues->oq_ci_copy = oq_ci;
3884 writel(oq_ci, admin_queues->oq_ci);
3889 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3890 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3891 struct pqi_io_request *io_request)
3893 struct pqi_io_request *next;
3898 unsigned long flags;
3899 unsigned int num_elements_needed;
3900 unsigned int num_elements_to_end_of_queue;
3902 struct pqi_iu_header *request;
3904 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3907 io_request->queue_group = queue_group;
3908 list_add_tail(&io_request->request_list_entry,
3909 &queue_group->request_list[path]);
3912 iq_pi = queue_group->iq_pi_copy[path];
3914 list_for_each_entry_safe(io_request, next,
3915 &queue_group->request_list[path], request_list_entry) {
3917 request = io_request->iu;
3919 iu_length = get_unaligned_le16(&request->iu_length) +
3920 PQI_REQUEST_HEADER_LENGTH;
3921 num_elements_needed =
3922 DIV_ROUND_UP(iu_length,
3923 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3925 iq_ci = readl(queue_group->iq_ci[path]);
3927 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3928 ctrl_info->num_elements_per_iq))
3931 put_unaligned_le16(queue_group->oq_id,
3932 &request->response_queue_id);
3934 next_element = queue_group->iq_element_array[path] +
3935 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3937 num_elements_to_end_of_queue =
3938 ctrl_info->num_elements_per_iq - iq_pi;
3940 if (num_elements_needed <= num_elements_to_end_of_queue) {
3941 memcpy(next_element, request, iu_length);
3943 copy_count = num_elements_to_end_of_queue *
3944 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3945 memcpy(next_element, request, copy_count);
3946 memcpy(queue_group->iq_element_array[path],
3947 (u8 *)request + copy_count,
3948 iu_length - copy_count);
3951 iq_pi = (iq_pi + num_elements_needed) %
3952 ctrl_info->num_elements_per_iq;
3954 list_del(&io_request->request_list_entry);
3957 if (iq_pi != queue_group->iq_pi_copy[path]) {
3958 queue_group->iq_pi_copy[path] = iq_pi;
3960 * This write notifies the controller that one or more IUs are
3961 * available to be processed.
3963 writel(iq_pi, queue_group->iq_pi[path]);
3966 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3969 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3971 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3972 struct completion *wait)
3977 if (wait_for_completion_io_timeout(wait,
3978 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
3983 pqi_check_ctrl_health(ctrl_info);
3984 if (pqi_ctrl_offline(ctrl_info)) {
3993 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3996 struct completion *waiting = context;
4001 static int pqi_process_raid_io_error_synchronous(
4002 struct pqi_raid_error_info *error_info)
4006 switch (error_info->data_out_result) {
4007 case PQI_DATA_IN_OUT_GOOD:
4008 if (error_info->status == SAM_STAT_GOOD)
4011 case PQI_DATA_IN_OUT_UNDERFLOW:
4012 if (error_info->status == SAM_STAT_GOOD ||
4013 error_info->status == SAM_STAT_CHECK_CONDITION)
4016 case PQI_DATA_IN_OUT_ABORTED:
4017 rc = PQI_CMD_STATUS_ABORTED;
4024 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4025 struct pqi_iu_header *request, unsigned int flags,
4026 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
4029 struct pqi_io_request *io_request;
4030 unsigned long start_jiffies;
4031 unsigned long msecs_blocked;
4033 DECLARE_COMPLETION_ONSTACK(wait);
4036 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
4037 * are mutually exclusive.
4040 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4041 if (down_interruptible(&ctrl_info->sync_request_sem))
4042 return -ERESTARTSYS;
4044 if (timeout_msecs == NO_TIMEOUT) {
4045 down(&ctrl_info->sync_request_sem);
4047 start_jiffies = jiffies;
4048 if (down_timeout(&ctrl_info->sync_request_sem,
4049 msecs_to_jiffies(timeout_msecs)))
4052 jiffies_to_msecs(jiffies - start_jiffies);
4053 if (msecs_blocked >= timeout_msecs) {
4057 timeout_msecs -= msecs_blocked;
4061 pqi_ctrl_busy(ctrl_info);
4062 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
4063 if (timeout_msecs == 0) {
4064 pqi_ctrl_unbusy(ctrl_info);
4069 if (pqi_ctrl_offline(ctrl_info)) {
4070 pqi_ctrl_unbusy(ctrl_info);
4075 atomic_inc(&ctrl_info->sync_cmds_outstanding);
4077 io_request = pqi_alloc_io_request(ctrl_info);
4079 put_unaligned_le16(io_request->index,
4080 &(((struct pqi_raid_path_request *)request)->request_id));
4082 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4083 ((struct pqi_raid_path_request *)request)->error_index =
4084 ((struct pqi_raid_path_request *)request)->request_id;
4086 iu_length = get_unaligned_le16(&request->iu_length) +
4087 PQI_REQUEST_HEADER_LENGTH;
4088 memcpy(io_request->iu, request, iu_length);
4090 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4091 io_request->context = &wait;
4093 pqi_start_io(ctrl_info,
4094 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4097 pqi_ctrl_unbusy(ctrl_info);
4099 if (timeout_msecs == NO_TIMEOUT) {
4100 pqi_wait_for_completion_io(ctrl_info, &wait);
4102 if (!wait_for_completion_io_timeout(&wait,
4103 msecs_to_jiffies(timeout_msecs))) {
4104 dev_warn(&ctrl_info->pci_dev->dev,
4105 "command timed out\n");
4111 if (io_request->error_info)
4112 memcpy(error_info, io_request->error_info,
4113 sizeof(*error_info));
4115 memset(error_info, 0, sizeof(*error_info));
4116 } else if (rc == 0 && io_request->error_info) {
4117 rc = pqi_process_raid_io_error_synchronous(
4118 io_request->error_info);
4121 pqi_free_io_request(io_request);
4123 atomic_dec(&ctrl_info->sync_cmds_outstanding);
4125 up(&ctrl_info->sync_request_sem);
4130 static int pqi_validate_admin_response(
4131 struct pqi_general_admin_response *response, u8 expected_function_code)
4133 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4136 if (get_unaligned_le16(&response->header.iu_length) !=
4137 PQI_GENERAL_ADMIN_IU_LENGTH)
4140 if (response->function_code != expected_function_code)
4143 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4149 static int pqi_submit_admin_request_synchronous(
4150 struct pqi_ctrl_info *ctrl_info,
4151 struct pqi_general_admin_request *request,
4152 struct pqi_general_admin_response *response)
4156 pqi_submit_admin_request(ctrl_info, request);
4158 rc = pqi_poll_for_admin_response(ctrl_info, response);
4161 rc = pqi_validate_admin_response(response,
4162 request->function_code);
4167 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4170 struct pqi_general_admin_request request;
4171 struct pqi_general_admin_response response;
4172 struct pqi_device_capability *capability;
4173 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4175 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4179 memset(&request, 0, sizeof(request));
4181 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4182 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4183 &request.header.iu_length);
4184 request.function_code =
4185 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4186 put_unaligned_le32(sizeof(*capability),
4187 &request.data.report_device_capability.buffer_length);
4189 rc = pqi_map_single(ctrl_info->pci_dev,
4190 &request.data.report_device_capability.sg_descriptor,
4191 capability, sizeof(*capability),
4196 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4199 pqi_pci_unmap(ctrl_info->pci_dev,
4200 &request.data.report_device_capability.sg_descriptor, 1,
4206 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4211 ctrl_info->max_inbound_queues =
4212 get_unaligned_le16(&capability->max_inbound_queues);
4213 ctrl_info->max_elements_per_iq =
4214 get_unaligned_le16(&capability->max_elements_per_iq);
4215 ctrl_info->max_iq_element_length =
4216 get_unaligned_le16(&capability->max_iq_element_length)
4218 ctrl_info->max_outbound_queues =
4219 get_unaligned_le16(&capability->max_outbound_queues);
4220 ctrl_info->max_elements_per_oq =
4221 get_unaligned_le16(&capability->max_elements_per_oq);
4222 ctrl_info->max_oq_element_length =
4223 get_unaligned_le16(&capability->max_oq_element_length)
4226 sop_iu_layer_descriptor =
4227 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4229 ctrl_info->max_inbound_iu_length_per_firmware =
4231 &sop_iu_layer_descriptor->max_inbound_iu_length);
4232 ctrl_info->inbound_spanning_supported =
4233 sop_iu_layer_descriptor->inbound_spanning_supported;
4234 ctrl_info->outbound_spanning_supported =
4235 sop_iu_layer_descriptor->outbound_spanning_supported;
4243 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4245 if (ctrl_info->max_iq_element_length <
4246 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4247 dev_err(&ctrl_info->pci_dev->dev,
4248 "max. inbound queue element length of %d is less than the required length of %d\n",
4249 ctrl_info->max_iq_element_length,
4250 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4254 if (ctrl_info->max_oq_element_length <
4255 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4256 dev_err(&ctrl_info->pci_dev->dev,
4257 "max. outbound queue element length of %d is less than the required length of %d\n",
4258 ctrl_info->max_oq_element_length,
4259 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4263 if (ctrl_info->max_inbound_iu_length_per_firmware <
4264 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4265 dev_err(&ctrl_info->pci_dev->dev,
4266 "max. inbound IU length of %u is less than the min. required length of %d\n",
4267 ctrl_info->max_inbound_iu_length_per_firmware,
4268 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4272 if (!ctrl_info->inbound_spanning_supported) {
4273 dev_err(&ctrl_info->pci_dev->dev,
4274 "the controller does not support inbound spanning\n");
4278 if (ctrl_info->outbound_spanning_supported) {
4279 dev_err(&ctrl_info->pci_dev->dev,
4280 "the controller supports outbound spanning but this driver does not\n");
4287 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4290 struct pqi_event_queue *event_queue;
4291 struct pqi_general_admin_request request;
4292 struct pqi_general_admin_response response;
4294 event_queue = &ctrl_info->event_queue;
4297 * Create OQ (Outbound Queue - device to host queue) to dedicate
4300 memset(&request, 0, sizeof(request));
4301 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4302 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4303 &request.header.iu_length);
4304 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4305 put_unaligned_le16(event_queue->oq_id,
4306 &request.data.create_operational_oq.queue_id);
4307 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4308 &request.data.create_operational_oq.element_array_addr);
4309 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4310 &request.data.create_operational_oq.pi_addr);
4311 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4312 &request.data.create_operational_oq.num_elements);
4313 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4314 &request.data.create_operational_oq.element_length);
4315 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4316 put_unaligned_le16(event_queue->int_msg_num,
4317 &request.data.create_operational_oq.int_msg_num);
4319 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4324 event_queue->oq_ci = ctrl_info->iomem_base +
4325 PQI_DEVICE_REGISTERS_OFFSET +
4327 &response.data.create_operational_oq.oq_ci_offset);
4332 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4333 unsigned int group_number)
4336 struct pqi_queue_group *queue_group;
4337 struct pqi_general_admin_request request;
4338 struct pqi_general_admin_response response;
4340 queue_group = &ctrl_info->queue_groups[group_number];
4343 * Create IQ (Inbound Queue - host to device queue) for
4346 memset(&request, 0, sizeof(request));
4347 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4348 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4349 &request.header.iu_length);
4350 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4351 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4352 &request.data.create_operational_iq.queue_id);
4354 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4355 &request.data.create_operational_iq.element_array_addr);
4356 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4357 &request.data.create_operational_iq.ci_addr);
4358 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4359 &request.data.create_operational_iq.num_elements);
4360 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4361 &request.data.create_operational_iq.element_length);
4362 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4364 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4367 dev_err(&ctrl_info->pci_dev->dev,
4368 "error creating inbound RAID queue\n");
4372 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4373 PQI_DEVICE_REGISTERS_OFFSET +
4375 &response.data.create_operational_iq.iq_pi_offset);
4378 * Create IQ (Inbound Queue - host to device queue) for
4379 * Advanced I/O (AIO) path.
4381 memset(&request, 0, sizeof(request));
4382 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4383 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4384 &request.header.iu_length);
4385 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4386 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4387 &request.data.create_operational_iq.queue_id);
4388 put_unaligned_le64((u64)queue_group->
4389 iq_element_array_bus_addr[AIO_PATH],
4390 &request.data.create_operational_iq.element_array_addr);
4391 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4392 &request.data.create_operational_iq.ci_addr);
4393 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4394 &request.data.create_operational_iq.num_elements);
4395 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4396 &request.data.create_operational_iq.element_length);
4397 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4399 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4402 dev_err(&ctrl_info->pci_dev->dev,
4403 "error creating inbound AIO queue\n");
4407 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4408 PQI_DEVICE_REGISTERS_OFFSET +
4410 &response.data.create_operational_iq.iq_pi_offset);
4413 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4414 * assumed to be for RAID path I/O unless we change the queue's
4417 memset(&request, 0, sizeof(request));
4418 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4419 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4420 &request.header.iu_length);
4421 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4422 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4423 &request.data.change_operational_iq_properties.queue_id);
4424 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4425 &request.data.change_operational_iq_properties.vendor_specific);
4427 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4430 dev_err(&ctrl_info->pci_dev->dev,
4431 "error changing queue property\n");
4436 * Create OQ (Outbound Queue - device to host queue).
4438 memset(&request, 0, sizeof(request));
4439 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4440 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4441 &request.header.iu_length);
4442 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4443 put_unaligned_le16(queue_group->oq_id,
4444 &request.data.create_operational_oq.queue_id);
4445 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4446 &request.data.create_operational_oq.element_array_addr);
4447 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4448 &request.data.create_operational_oq.pi_addr);
4449 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4450 &request.data.create_operational_oq.num_elements);
4451 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4452 &request.data.create_operational_oq.element_length);
4453 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4454 put_unaligned_le16(queue_group->int_msg_num,
4455 &request.data.create_operational_oq.int_msg_num);
4457 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4460 dev_err(&ctrl_info->pci_dev->dev,
4461 "error creating outbound queue\n");
4465 queue_group->oq_ci = ctrl_info->iomem_base +
4466 PQI_DEVICE_REGISTERS_OFFSET +
4468 &response.data.create_operational_oq.oq_ci_offset);
4473 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4478 rc = pqi_create_event_queue(ctrl_info);
4480 dev_err(&ctrl_info->pci_dev->dev,
4481 "error creating event queue\n");
4485 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4486 rc = pqi_create_queue_group(ctrl_info, i);
4488 dev_err(&ctrl_info->pci_dev->dev,
4489 "error creating queue group number %u/%u\n",
4490 i, ctrl_info->num_queue_groups);
4498 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4499 (offsetof(struct pqi_event_config, descriptors) + \
4500 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4502 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4507 struct pqi_event_config *event_config;
4508 struct pqi_event_descriptor *event_descriptor;
4509 struct pqi_general_management_request request;
4511 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4516 memset(&request, 0, sizeof(request));
4518 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4519 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4520 data.report_event_configuration.sg_descriptors[1]) -
4521 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4522 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4523 &request.data.report_event_configuration.buffer_length);
4525 rc = pqi_map_single(ctrl_info->pci_dev,
4526 request.data.report_event_configuration.sg_descriptors,
4527 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4532 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4533 0, NULL, NO_TIMEOUT);
4535 pqi_pci_unmap(ctrl_info->pci_dev,
4536 request.data.report_event_configuration.sg_descriptors, 1,
4542 for (i = 0; i < event_config->num_event_descriptors; i++) {
4543 event_descriptor = &event_config->descriptors[i];
4544 if (enable_events &&
4545 pqi_is_supported_event(event_descriptor->event_type))
4546 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4547 &event_descriptor->oq_id);
4549 put_unaligned_le16(0, &event_descriptor->oq_id);
4552 memset(&request, 0, sizeof(request));
4554 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4555 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4556 data.report_event_configuration.sg_descriptors[1]) -
4557 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4558 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4559 &request.data.report_event_configuration.buffer_length);
4561 rc = pqi_map_single(ctrl_info->pci_dev,
4562 request.data.report_event_configuration.sg_descriptors,
4563 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4568 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4571 pqi_pci_unmap(ctrl_info->pci_dev,
4572 request.data.report_event_configuration.sg_descriptors, 1,
4576 kfree(event_config);
4581 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4583 return pqi_configure_events(ctrl_info, true);
4586 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4588 return pqi_configure_events(ctrl_info, false);
4591 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4595 size_t sg_chain_buffer_length;
4596 struct pqi_io_request *io_request;
4598 if (!ctrl_info->io_request_pool)
4601 dev = &ctrl_info->pci_dev->dev;
4602 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4603 io_request = ctrl_info->io_request_pool;
4605 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4606 kfree(io_request->iu);
4607 if (!io_request->sg_chain_buffer)
4609 dma_free_coherent(dev, sg_chain_buffer_length,
4610 io_request->sg_chain_buffer,
4611 io_request->sg_chain_buffer_dma_handle);
4615 kfree(ctrl_info->io_request_pool);
4616 ctrl_info->io_request_pool = NULL;
4619 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4622 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4623 ctrl_info->error_buffer_length,
4624 &ctrl_info->error_buffer_dma_handle,
4626 if (!ctrl_info->error_buffer)
4632 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4635 void *sg_chain_buffer;
4636 size_t sg_chain_buffer_length;
4637 dma_addr_t sg_chain_buffer_dma_handle;
4639 struct pqi_io_request *io_request;
4641 ctrl_info->io_request_pool =
4642 kcalloc(ctrl_info->max_io_slots,
4643 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4645 if (!ctrl_info->io_request_pool) {
4646 dev_err(&ctrl_info->pci_dev->dev,
4647 "failed to allocate I/O request pool\n");
4651 dev = &ctrl_info->pci_dev->dev;
4652 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4653 io_request = ctrl_info->io_request_pool;
4655 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4657 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4659 if (!io_request->iu) {
4660 dev_err(&ctrl_info->pci_dev->dev,
4661 "failed to allocate IU buffers\n");
4665 sg_chain_buffer = dma_alloc_coherent(dev,
4666 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4669 if (!sg_chain_buffer) {
4670 dev_err(&ctrl_info->pci_dev->dev,
4671 "failed to allocate PQI scatter-gather chain buffers\n");
4675 io_request->index = i;
4676 io_request->sg_chain_buffer = sg_chain_buffer;
4677 io_request->sg_chain_buffer_dma_handle =
4678 sg_chain_buffer_dma_handle;
4685 pqi_free_all_io_requests(ctrl_info);
4691 * Calculate required resources that are sized based on max. outstanding
4692 * requests and max. transfer size.
4695 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4697 u32 max_transfer_size;
4700 ctrl_info->scsi_ml_can_queue =
4701 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4702 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4704 ctrl_info->error_buffer_length =
4705 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4708 max_transfer_size = min(ctrl_info->max_transfer_size,
4709 PQI_MAX_TRANSFER_SIZE_KDUMP);
4711 max_transfer_size = min(ctrl_info->max_transfer_size,
4712 PQI_MAX_TRANSFER_SIZE);
4714 max_sg_entries = max_transfer_size / PAGE_SIZE;
4716 /* +1 to cover when the buffer is not page-aligned. */
4719 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4721 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4723 ctrl_info->sg_chain_buffer_length =
4724 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4725 PQI_EXTRA_SGL_MEMORY;
4726 ctrl_info->sg_tablesize = max_sg_entries;
4727 ctrl_info->max_sectors = max_transfer_size / 512;
4730 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4732 int num_queue_groups;
4733 u16 num_elements_per_iq;
4734 u16 num_elements_per_oq;
4736 if (reset_devices) {
4737 num_queue_groups = 1;
4740 int max_queue_groups;
4742 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4743 ctrl_info->max_outbound_queues - 1);
4744 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4746 num_cpus = num_online_cpus();
4747 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4748 num_queue_groups = min(num_queue_groups, max_queue_groups);
4751 ctrl_info->num_queue_groups = num_queue_groups;
4752 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4755 * Make sure that the max. inbound IU length is an even multiple
4756 * of our inbound element length.
4758 ctrl_info->max_inbound_iu_length =
4759 (ctrl_info->max_inbound_iu_length_per_firmware /
4760 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4761 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4763 num_elements_per_iq =
4764 (ctrl_info->max_inbound_iu_length /
4765 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4767 /* Add one because one element in each queue is unusable. */
4768 num_elements_per_iq++;
4770 num_elements_per_iq = min(num_elements_per_iq,
4771 ctrl_info->max_elements_per_iq);
4773 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4774 num_elements_per_oq = min(num_elements_per_oq,
4775 ctrl_info->max_elements_per_oq);
4777 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4778 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4780 ctrl_info->max_sg_per_iu =
4781 ((ctrl_info->max_inbound_iu_length -
4782 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4783 sizeof(struct pqi_sg_descriptor)) +
4784 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4787 static inline void pqi_set_sg_descriptor(
4788 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4790 u64 address = (u64)sg_dma_address(sg);
4791 unsigned int length = sg_dma_len(sg);
4793 put_unaligned_le64(address, &sg_descriptor->address);
4794 put_unaligned_le32(length, &sg_descriptor->length);
4795 put_unaligned_le32(0, &sg_descriptor->flags);
4798 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4799 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4800 struct pqi_io_request *io_request)
4806 unsigned int num_sg_in_iu;
4807 unsigned int max_sg_per_iu;
4808 struct scatterlist *sg;
4809 struct pqi_sg_descriptor *sg_descriptor;
4811 sg_count = scsi_dma_map(scmd);
4815 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4816 PQI_REQUEST_HEADER_LENGTH;
4821 sg = scsi_sglist(scmd);
4822 sg_descriptor = request->sg_descriptors;
4823 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4829 pqi_set_sg_descriptor(sg_descriptor, sg);
4836 if (i == max_sg_per_iu) {
4838 (u64)io_request->sg_chain_buffer_dma_handle,
4839 &sg_descriptor->address);
4840 put_unaligned_le32((sg_count - num_sg_in_iu)
4841 * sizeof(*sg_descriptor),
4842 &sg_descriptor->length);
4843 put_unaligned_le32(CISS_SG_CHAIN,
4844 &sg_descriptor->flags);
4847 sg_descriptor = io_request->sg_chain_buffer;
4852 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4853 request->partial = chained;
4854 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4857 put_unaligned_le16(iu_length, &request->header.iu_length);
4862 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4863 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4864 struct pqi_io_request *io_request)
4870 unsigned int num_sg_in_iu;
4871 unsigned int max_sg_per_iu;
4872 struct scatterlist *sg;
4873 struct pqi_sg_descriptor *sg_descriptor;
4875 sg_count = scsi_dma_map(scmd);
4879 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4880 PQI_REQUEST_HEADER_LENGTH;
4886 sg = scsi_sglist(scmd);
4887 sg_descriptor = request->sg_descriptors;
4888 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4893 pqi_set_sg_descriptor(sg_descriptor, sg);
4900 if (i == max_sg_per_iu) {
4902 (u64)io_request->sg_chain_buffer_dma_handle,
4903 &sg_descriptor->address);
4904 put_unaligned_le32((sg_count - num_sg_in_iu)
4905 * sizeof(*sg_descriptor),
4906 &sg_descriptor->length);
4907 put_unaligned_le32(CISS_SG_CHAIN,
4908 &sg_descriptor->flags);
4911 sg_descriptor = io_request->sg_chain_buffer;
4916 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4917 request->partial = chained;
4918 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4921 put_unaligned_le16(iu_length, &request->header.iu_length);
4922 request->num_sg_descriptors = num_sg_in_iu;
4927 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4930 struct scsi_cmnd *scmd;
4932 scmd = io_request->scmd;
4933 pqi_free_io_request(io_request);
4934 scsi_dma_unmap(scmd);
4935 pqi_scsi_done(scmd);
4938 static int pqi_raid_submit_scsi_cmd_with_io_request(
4939 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
4940 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4941 struct pqi_queue_group *queue_group)
4945 struct pqi_raid_path_request *request;
4947 io_request->io_complete_callback = pqi_raid_io_complete;
4948 io_request->scmd = scmd;
4950 request = io_request->iu;
4952 offsetof(struct pqi_raid_path_request, sg_descriptors));
4954 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4955 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4956 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4957 put_unaligned_le16(io_request->index, &request->request_id);
4958 request->error_index = request->request_id;
4959 memcpy(request->lun_number, device->scsi3addr,
4960 sizeof(request->lun_number));
4962 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4963 memcpy(request->cdb, scmd->cmnd, cdb_length);
4965 switch (cdb_length) {
4970 /* No bytes in the Additional CDB bytes field */
4971 request->additional_cdb_bytes_usage =
4972 SOP_ADDITIONAL_CDB_BYTES_0;
4975 /* 4 bytes in the Additional cdb field */
4976 request->additional_cdb_bytes_usage =
4977 SOP_ADDITIONAL_CDB_BYTES_4;
4980 /* 8 bytes in the Additional cdb field */
4981 request->additional_cdb_bytes_usage =
4982 SOP_ADDITIONAL_CDB_BYTES_8;
4985 /* 12 bytes in the Additional cdb field */
4986 request->additional_cdb_bytes_usage =
4987 SOP_ADDITIONAL_CDB_BYTES_12;
4991 /* 16 bytes in the Additional cdb field */
4992 request->additional_cdb_bytes_usage =
4993 SOP_ADDITIONAL_CDB_BYTES_16;
4997 switch (scmd->sc_data_direction) {
4999 request->data_direction = SOP_READ_FLAG;
5001 case DMA_FROM_DEVICE:
5002 request->data_direction = SOP_WRITE_FLAG;
5005 request->data_direction = SOP_NO_DIRECTION_FLAG;
5007 case DMA_BIDIRECTIONAL:
5008 request->data_direction = SOP_BIDIRECTIONAL;
5011 dev_err(&ctrl_info->pci_dev->dev,
5012 "unknown data direction: %d\n",
5013 scmd->sc_data_direction);
5017 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5019 pqi_free_io_request(io_request);
5020 return SCSI_MLQUEUE_HOST_BUSY;
5023 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5028 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5029 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5030 struct pqi_queue_group *queue_group)
5032 struct pqi_io_request *io_request;
5034 io_request = pqi_alloc_io_request(ctrl_info);
5036 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5037 device, scmd, queue_group);
5040 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
5042 if (!pqi_ctrl_blocked(ctrl_info))
5043 schedule_work(&ctrl_info->raid_bypass_retry_work);
5046 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5048 struct scsi_cmnd *scmd;
5049 struct pqi_scsi_dev *device;
5050 struct pqi_ctrl_info *ctrl_info;
5052 if (!io_request->raid_bypass)
5055 scmd = io_request->scmd;
5056 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5058 if (host_byte(scmd->result) == DID_NO_CONNECT)
5061 device = scmd->device->hostdata;
5062 if (pqi_device_offline(device))
5065 ctrl_info = shost_to_hba(scmd->device->host);
5066 if (pqi_ctrl_offline(ctrl_info))
5072 static inline void pqi_add_to_raid_bypass_retry_list(
5073 struct pqi_ctrl_info *ctrl_info,
5074 struct pqi_io_request *io_request, bool at_head)
5076 unsigned long flags;
5078 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5080 list_add(&io_request->request_list_entry,
5081 &ctrl_info->raid_bypass_retry_list);
5083 list_add_tail(&io_request->request_list_entry,
5084 &ctrl_info->raid_bypass_retry_list);
5085 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5088 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
5091 struct scsi_cmnd *scmd;
5093 scmd = io_request->scmd;
5094 pqi_free_io_request(io_request);
5095 pqi_scsi_done(scmd);
5098 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
5100 struct scsi_cmnd *scmd;
5101 struct pqi_ctrl_info *ctrl_info;
5103 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
5104 scmd = io_request->scmd;
5106 ctrl_info = shost_to_hba(scmd->device->host);
5108 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
5109 pqi_schedule_bypass_retry(ctrl_info);
5112 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
5114 struct scsi_cmnd *scmd;
5115 struct pqi_scsi_dev *device;
5116 struct pqi_ctrl_info *ctrl_info;
5117 struct pqi_queue_group *queue_group;
5119 scmd = io_request->scmd;
5120 device = scmd->device->hostdata;
5121 if (pqi_device_in_reset(device)) {
5122 pqi_free_io_request(io_request);
5123 set_host_byte(scmd, DID_RESET);
5124 pqi_scsi_done(scmd);
5128 ctrl_info = shost_to_hba(scmd->device->host);
5129 queue_group = io_request->queue_group;
5131 pqi_reinit_io_request(io_request);
5133 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5134 device, scmd, queue_group);
5137 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
5138 struct pqi_ctrl_info *ctrl_info)
5140 unsigned long flags;
5141 struct pqi_io_request *io_request;
5143 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5144 io_request = list_first_entry_or_null(
5145 &ctrl_info->raid_bypass_retry_list,
5146 struct pqi_io_request, request_list_entry);
5148 list_del(&io_request->request_list_entry);
5149 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5154 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
5157 struct pqi_io_request *io_request;
5159 pqi_ctrl_busy(ctrl_info);
5162 if (pqi_ctrl_blocked(ctrl_info))
5164 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
5167 rc = pqi_retry_raid_bypass(io_request);
5169 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
5171 pqi_schedule_bypass_retry(ctrl_info);
5176 pqi_ctrl_unbusy(ctrl_info);
5179 static void pqi_raid_bypass_retry_worker(struct work_struct *work)
5181 struct pqi_ctrl_info *ctrl_info;
5183 ctrl_info = container_of(work, struct pqi_ctrl_info,
5184 raid_bypass_retry_work);
5185 pqi_retry_raid_bypass_requests(ctrl_info);
5188 static void pqi_clear_all_queued_raid_bypass_retries(
5189 struct pqi_ctrl_info *ctrl_info)
5191 unsigned long flags;
5193 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5194 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
5195 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5198 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5201 struct scsi_cmnd *scmd;
5203 scmd = io_request->scmd;
5204 scsi_dma_unmap(scmd);
5205 if (io_request->status == -EAGAIN)
5206 set_host_byte(scmd, DID_IMM_RETRY);
5207 else if (pqi_raid_bypass_retry_needed(io_request)) {
5208 pqi_queue_raid_bypass_retry(io_request);
5211 pqi_free_io_request(io_request);
5212 pqi_scsi_done(scmd);
5215 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5216 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5217 struct pqi_queue_group *queue_group)
5219 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5220 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
5223 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5224 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5225 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5226 struct pqi_encryption_info *encryption_info, bool raid_bypass)
5229 struct pqi_io_request *io_request;
5230 struct pqi_aio_path_request *request;
5232 io_request = pqi_alloc_io_request(ctrl_info);
5233 io_request->io_complete_callback = pqi_aio_io_complete;
5234 io_request->scmd = scmd;
5235 io_request->raid_bypass = raid_bypass;
5237 request = io_request->iu;
5239 offsetof(struct pqi_raid_path_request, sg_descriptors));
5241 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5242 put_unaligned_le32(aio_handle, &request->nexus_id);
5243 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5244 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5245 put_unaligned_le16(io_request->index, &request->request_id);
5246 request->error_index = request->request_id;
5247 if (cdb_length > sizeof(request->cdb))
5248 cdb_length = sizeof(request->cdb);
5249 request->cdb_length = cdb_length;
5250 memcpy(request->cdb, cdb, cdb_length);
5252 switch (scmd->sc_data_direction) {
5254 request->data_direction = SOP_READ_FLAG;
5256 case DMA_FROM_DEVICE:
5257 request->data_direction = SOP_WRITE_FLAG;
5260 request->data_direction = SOP_NO_DIRECTION_FLAG;
5262 case DMA_BIDIRECTIONAL:
5263 request->data_direction = SOP_BIDIRECTIONAL;
5266 dev_err(&ctrl_info->pci_dev->dev,
5267 "unknown data direction: %d\n",
5268 scmd->sc_data_direction);
5272 if (encryption_info) {
5273 request->encryption_enable = true;
5274 put_unaligned_le16(encryption_info->data_encryption_key_index,
5275 &request->data_encryption_key_index);
5276 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5277 &request->encrypt_tweak_lower);
5278 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5279 &request->encrypt_tweak_upper);
5282 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5284 pqi_free_io_request(io_request);
5285 return SCSI_MLQUEUE_HOST_BUSY;
5288 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5293 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5294 struct scsi_cmnd *scmd)
5298 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5299 if (hw_queue > ctrl_info->max_hw_queue_index)
5306 * This function gets called just before we hand the completed SCSI request
5310 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5312 struct pqi_scsi_dev *device;
5314 if (!scmd->device) {
5315 set_host_byte(scmd, DID_NO_CONNECT);
5319 device = scmd->device->hostdata;
5321 set_host_byte(scmd, DID_NO_CONNECT);
5325 atomic_dec(&device->scsi_cmds_outstanding);
5328 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
5329 struct scsi_cmnd *scmd)
5332 struct pqi_ctrl_info *ctrl_info;
5333 struct pqi_scsi_dev *device;
5335 struct pqi_queue_group *queue_group;
5338 device = scmd->device->hostdata;
5339 ctrl_info = shost_to_hba(shost);
5342 set_host_byte(scmd, DID_NO_CONNECT);
5343 pqi_scsi_done(scmd);
5347 atomic_inc(&device->scsi_cmds_outstanding);
5349 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info,
5351 set_host_byte(scmd, DID_NO_CONNECT);
5352 pqi_scsi_done(scmd);
5356 pqi_ctrl_busy(ctrl_info);
5357 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5358 pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
5359 rc = SCSI_MLQUEUE_HOST_BUSY;
5364 * This is necessary because the SML doesn't zero out this field during
5369 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5370 queue_group = &ctrl_info->queue_groups[hw_queue];
5372 if (pqi_is_logical_device(device)) {
5373 raid_bypassed = false;
5374 if (device->raid_bypass_enabled &&
5375 !blk_rq_is_passthrough(scmd->request)) {
5376 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5378 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
5379 raid_bypassed = true;
5382 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5385 if (device->aio_enabled)
5386 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5389 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5394 pqi_ctrl_unbusy(ctrl_info);
5396 atomic_dec(&device->scsi_cmds_outstanding);
5401 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5402 struct pqi_queue_group *queue_group)
5405 unsigned long flags;
5408 for (path = 0; path < 2; path++) {
5411 &queue_group->submit_lock[path], flags);
5413 list_empty(&queue_group->request_list[path]);
5414 spin_unlock_irqrestore(
5415 &queue_group->submit_lock[path], flags);
5418 pqi_check_ctrl_health(ctrl_info);
5419 if (pqi_ctrl_offline(ctrl_info))
5421 usleep_range(1000, 2000);
5428 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5433 struct pqi_queue_group *queue_group;
5437 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5438 queue_group = &ctrl_info->queue_groups[i];
5440 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5444 for (path = 0; path < 2; path++) {
5445 iq_pi = queue_group->iq_pi_copy[path];
5448 iq_ci = readl(queue_group->iq_ci[path]);
5451 pqi_check_ctrl_health(ctrl_info);
5452 if (pqi_ctrl_offline(ctrl_info))
5454 usleep_range(1000, 2000);
5462 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5463 struct pqi_scsi_dev *device)
5467 struct pqi_queue_group *queue_group;
5468 unsigned long flags;
5469 struct pqi_io_request *io_request;
5470 struct pqi_io_request *next;
5471 struct scsi_cmnd *scmd;
5472 struct pqi_scsi_dev *scsi_device;
5474 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5475 queue_group = &ctrl_info->queue_groups[i];
5477 for (path = 0; path < 2; path++) {
5479 &queue_group->submit_lock[path], flags);
5481 list_for_each_entry_safe(io_request, next,
5482 &queue_group->request_list[path],
5483 request_list_entry) {
5484 scmd = io_request->scmd;
5488 scsi_device = scmd->device->hostdata;
5489 if (scsi_device != device)
5492 list_del(&io_request->request_list_entry);
5493 set_host_byte(scmd, DID_RESET);
5494 pqi_scsi_done(scmd);
5497 spin_unlock_irqrestore(
5498 &queue_group->submit_lock[path], flags);
5503 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5507 struct pqi_queue_group *queue_group;
5508 unsigned long flags;
5509 struct pqi_io_request *io_request;
5510 struct pqi_io_request *next;
5511 struct scsi_cmnd *scmd;
5513 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5514 queue_group = &ctrl_info->queue_groups[i];
5516 for (path = 0; path < 2; path++) {
5517 spin_lock_irqsave(&queue_group->submit_lock[path],
5520 list_for_each_entry_safe(io_request, next,
5521 &queue_group->request_list[path],
5522 request_list_entry) {
5524 scmd = io_request->scmd;
5528 list_del(&io_request->request_list_entry);
5529 set_host_byte(scmd, DID_RESET);
5530 pqi_scsi_done(scmd);
5533 spin_unlock_irqrestore(
5534 &queue_group->submit_lock[path], flags);
5539 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5540 struct pqi_scsi_dev *device, unsigned long timeout_secs)
5542 unsigned long timeout;
5544 timeout = (timeout_secs * PQI_HZ) + jiffies;
5546 while (atomic_read(&device->scsi_cmds_outstanding)) {
5547 pqi_check_ctrl_health(ctrl_info);
5548 if (pqi_ctrl_offline(ctrl_info))
5550 if (timeout_secs != NO_TIMEOUT) {
5551 if (time_after(jiffies, timeout)) {
5552 dev_err(&ctrl_info->pci_dev->dev,
5553 "timed out waiting for pending IO\n");
5557 usleep_range(1000, 2000);
5563 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5564 unsigned long timeout_secs)
5567 unsigned long flags;
5568 unsigned long timeout;
5569 struct pqi_scsi_dev *device;
5571 timeout = (timeout_secs * PQI_HZ) + jiffies;
5575 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5576 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5577 scsi_device_list_entry) {
5578 if (atomic_read(&device->scsi_cmds_outstanding)) {
5583 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5589 pqi_check_ctrl_health(ctrl_info);
5590 if (pqi_ctrl_offline(ctrl_info))
5593 if (timeout_secs != NO_TIMEOUT) {
5594 if (time_after(jiffies, timeout)) {
5595 dev_err(&ctrl_info->pci_dev->dev,
5596 "timed out waiting for pending IO\n");
5600 usleep_range(1000, 2000);
5606 static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
5608 while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
5609 pqi_check_ctrl_health(ctrl_info);
5610 if (pqi_ctrl_offline(ctrl_info))
5612 usleep_range(1000, 2000);
5618 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5621 struct completion *waiting = context;
5626 #define PQI_LUN_RESET_TIMEOUT_SECS 30
5627 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
5629 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5630 struct pqi_scsi_dev *device, struct completion *wait)
5635 if (wait_for_completion_io_timeout(wait,
5636 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
5641 pqi_check_ctrl_health(ctrl_info);
5642 if (pqi_ctrl_offline(ctrl_info)) {
5651 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
5652 struct pqi_scsi_dev *device)
5655 struct pqi_io_request *io_request;
5656 DECLARE_COMPLETION_ONSTACK(wait);
5657 struct pqi_task_management_request *request;
5659 io_request = pqi_alloc_io_request(ctrl_info);
5660 io_request->io_complete_callback = pqi_lun_reset_complete;
5661 io_request->context = &wait;
5663 request = io_request->iu;
5664 memset(request, 0, sizeof(*request));
5666 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5667 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5668 &request->header.iu_length);
5669 put_unaligned_le16(io_request->index, &request->request_id);
5670 memcpy(request->lun_number, device->scsi3addr,
5671 sizeof(request->lun_number));
5672 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5673 if (ctrl_info->tmf_iu_timeout_supported)
5674 put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
5677 pqi_start_io(ctrl_info,
5678 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5681 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5683 rc = io_request->status;
5685 pqi_free_io_request(io_request);
5690 /* Performs a reset at the LUN level. */
5692 #define PQI_LUN_RESET_RETRIES 3
5693 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
5694 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
5696 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5697 struct pqi_scsi_dev *device)
5700 unsigned int retries;
5701 unsigned long timeout_secs;
5703 for (retries = 0;;) {
5704 rc = pqi_lun_reset(ctrl_info, device);
5705 if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
5707 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5710 timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
5712 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
5714 return rc == 0 ? SUCCESS : FAILED;
5717 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5718 struct pqi_scsi_dev *device)
5722 mutex_lock(&ctrl_info->lun_reset_mutex);
5724 pqi_ctrl_block_requests(ctrl_info);
5725 pqi_ctrl_wait_until_quiesced(ctrl_info);
5726 pqi_fail_io_queued_for_device(ctrl_info, device);
5727 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5728 pqi_device_reset_start(device);
5729 pqi_ctrl_unblock_requests(ctrl_info);
5734 rc = _pqi_device_reset(ctrl_info, device);
5736 pqi_device_reset_done(device);
5738 mutex_unlock(&ctrl_info->lun_reset_mutex);
5743 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5746 struct Scsi_Host *shost;
5747 struct pqi_ctrl_info *ctrl_info;
5748 struct pqi_scsi_dev *device;
5750 shost = scmd->device->host;
5751 ctrl_info = shost_to_hba(shost);
5752 device = scmd->device->hostdata;
5754 dev_err(&ctrl_info->pci_dev->dev,
5755 "resetting scsi %d:%d:%d:%d\n",
5756 shost->host_no, device->bus, device->target, device->lun);
5758 pqi_check_ctrl_health(ctrl_info);
5759 if (pqi_ctrl_offline(ctrl_info) ||
5760 pqi_device_reset_blocked(ctrl_info)) {
5765 pqi_wait_until_ofa_finished(ctrl_info);
5767 atomic_inc(&ctrl_info->sync_cmds_outstanding);
5768 rc = pqi_device_reset(ctrl_info, device);
5769 atomic_dec(&ctrl_info->sync_cmds_outstanding);
5772 dev_err(&ctrl_info->pci_dev->dev,
5773 "reset of scsi %d:%d:%d:%d: %s\n",
5774 shost->host_no, device->bus, device->target, device->lun,
5775 rc == SUCCESS ? "SUCCESS" : "FAILED");
5780 static int pqi_slave_alloc(struct scsi_device *sdev)
5782 struct pqi_scsi_dev *device;
5783 unsigned long flags;
5784 struct pqi_ctrl_info *ctrl_info;
5785 struct scsi_target *starget;
5786 struct sas_rphy *rphy;
5788 ctrl_info = shost_to_hba(sdev->host);
5790 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5792 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5793 starget = scsi_target(sdev);
5794 rphy = target_to_rphy(starget);
5795 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5797 device->target = sdev_id(sdev);
5798 device->lun = sdev->lun;
5799 device->target_lun_valid = true;
5802 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5803 sdev_id(sdev), sdev->lun);
5807 sdev->hostdata = device;
5808 device->sdev = sdev;
5809 if (device->queue_depth) {
5810 device->advertised_queue_depth = device->queue_depth;
5811 scsi_change_queue_depth(sdev,
5812 device->advertised_queue_depth);
5814 if (pqi_is_logical_device(device))
5815 pqi_disable_write_same(sdev);
5817 sdev->allow_restart = 1;
5820 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5825 static int pqi_map_queues(struct Scsi_Host *shost)
5827 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5829 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
5830 ctrl_info->pci_dev, 0);
5833 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5836 struct pci_dev *pci_dev;
5837 u32 subsystem_vendor;
5838 u32 subsystem_device;
5839 cciss_pci_info_struct pciinfo;
5844 pci_dev = ctrl_info->pci_dev;
5846 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5847 pciinfo.bus = pci_dev->bus->number;
5848 pciinfo.dev_fn = pci_dev->devfn;
5849 subsystem_vendor = pci_dev->subsystem_vendor;
5850 subsystem_device = pci_dev->subsystem_device;
5851 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5854 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5860 static int pqi_getdrivver_ioctl(void __user *arg)
5867 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5868 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5870 if (copy_to_user(arg, &version, sizeof(version)))
5876 struct ciss_error_info {
5879 size_t sense_data_length;
5882 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5883 struct ciss_error_info *ciss_error_info)
5885 int ciss_cmd_status;
5886 size_t sense_data_length;
5888 switch (pqi_error_info->data_out_result) {
5889 case PQI_DATA_IN_OUT_GOOD:
5890 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5892 case PQI_DATA_IN_OUT_UNDERFLOW:
5893 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5895 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5896 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5898 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5899 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5900 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5901 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5902 case PQI_DATA_IN_OUT_ERROR:
5903 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5905 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5906 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5907 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5908 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5909 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5910 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5911 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5912 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5913 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5914 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5915 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5917 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5918 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5920 case PQI_DATA_IN_OUT_ABORTED:
5921 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5923 case PQI_DATA_IN_OUT_TIMEOUT:
5924 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5927 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5932 get_unaligned_le16(&pqi_error_info->sense_data_length);
5933 if (sense_data_length == 0)
5935 get_unaligned_le16(&pqi_error_info->response_data_length);
5936 if (sense_data_length)
5937 if (sense_data_length > sizeof(pqi_error_info->data))
5938 sense_data_length = sizeof(pqi_error_info->data);
5940 ciss_error_info->scsi_status = pqi_error_info->status;
5941 ciss_error_info->command_status = ciss_cmd_status;
5942 ciss_error_info->sense_data_length = sense_data_length;
5945 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5948 char *kernel_buffer = NULL;
5950 size_t sense_data_length;
5951 IOCTL_Command_struct iocommand;
5952 struct pqi_raid_path_request request;
5953 struct pqi_raid_error_info pqi_error_info;
5954 struct ciss_error_info ciss_error_info;
5956 if (pqi_ctrl_offline(ctrl_info))
5960 if (!capable(CAP_SYS_RAWIO))
5962 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5964 if (iocommand.buf_size < 1 &&
5965 iocommand.Request.Type.Direction != XFER_NONE)
5967 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5969 if (iocommand.Request.Type.Type != TYPE_CMD)
5972 switch (iocommand.Request.Type.Direction) {
5976 case XFER_READ | XFER_WRITE:
5982 if (iocommand.buf_size > 0) {
5983 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5986 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5987 if (copy_from_user(kernel_buffer, iocommand.buf,
5988 iocommand.buf_size)) {
5993 memset(kernel_buffer, 0, iocommand.buf_size);
5997 memset(&request, 0, sizeof(request));
5999 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6000 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6001 PQI_REQUEST_HEADER_LENGTH;
6002 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6003 sizeof(request.lun_number));
6004 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6005 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6007 switch (iocommand.Request.Type.Direction) {
6009 request.data_direction = SOP_NO_DIRECTION_FLAG;
6012 request.data_direction = SOP_WRITE_FLAG;
6015 request.data_direction = SOP_READ_FLAG;
6017 case XFER_READ | XFER_WRITE:
6018 request.data_direction = SOP_BIDIRECTIONAL;
6022 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6024 if (iocommand.buf_size > 0) {
6025 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6027 rc = pqi_map_single(ctrl_info->pci_dev,
6028 &request.sg_descriptors[0], kernel_buffer,
6029 iocommand.buf_size, DMA_BIDIRECTIONAL);
6033 iu_length += sizeof(request.sg_descriptors[0]);
6036 put_unaligned_le16(iu_length, &request.header.iu_length);
6038 if (ctrl_info->raid_iu_timeout_supported)
6039 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6041 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6042 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
6044 if (iocommand.buf_size > 0)
6045 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6048 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6051 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6052 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6053 iocommand.error_info.CommandStatus =
6054 ciss_error_info.command_status;
6055 sense_data_length = ciss_error_info.sense_data_length;
6056 if (sense_data_length) {
6057 if (sense_data_length >
6058 sizeof(iocommand.error_info.SenseInfo))
6060 sizeof(iocommand.error_info.SenseInfo);
6061 memcpy(iocommand.error_info.SenseInfo,
6062 pqi_error_info.data, sense_data_length);
6063 iocommand.error_info.SenseLen = sense_data_length;
6067 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6072 if (rc == 0 && iocommand.buf_size > 0 &&
6073 (iocommand.Request.Type.Direction & XFER_READ)) {
6074 if (copy_to_user(iocommand.buf, kernel_buffer,
6075 iocommand.buf_size)) {
6081 kfree(kernel_buffer);
6086 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6090 struct pqi_ctrl_info *ctrl_info;
6092 ctrl_info = shost_to_hba(sdev->host);
6094 if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
6098 case CCISS_DEREGDISK:
6099 case CCISS_REGNEWDISK:
6101 rc = pqi_scan_scsi_devices(ctrl_info);
6103 case CCISS_GETPCIINFO:
6104 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6106 case CCISS_GETDRIVVER:
6107 rc = pqi_getdrivver_ioctl(arg);
6109 case CCISS_PASSTHRU:
6110 rc = pqi_passthru_ioctl(ctrl_info, arg);
6120 static ssize_t pqi_firmware_version_show(struct device *dev,
6121 struct device_attribute *attr, char *buffer)
6123 struct Scsi_Host *shost;
6124 struct pqi_ctrl_info *ctrl_info;
6126 shost = class_to_shost(dev);
6127 ctrl_info = shost_to_hba(shost);
6129 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6132 static ssize_t pqi_driver_version_show(struct device *dev,
6133 struct device_attribute *attr, char *buffer)
6135 return snprintf(buffer, PAGE_SIZE, "%s\n",
6136 DRIVER_VERSION BUILD_TIMESTAMP);
6139 static ssize_t pqi_serial_number_show(struct device *dev,
6140 struct device_attribute *attr, char *buffer)
6142 struct Scsi_Host *shost;
6143 struct pqi_ctrl_info *ctrl_info;
6145 shost = class_to_shost(dev);
6146 ctrl_info = shost_to_hba(shost);
6148 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6151 static ssize_t pqi_model_show(struct device *dev,
6152 struct device_attribute *attr, char *buffer)
6154 struct Scsi_Host *shost;
6155 struct pqi_ctrl_info *ctrl_info;
6157 shost = class_to_shost(dev);
6158 ctrl_info = shost_to_hba(shost);
6160 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6163 static ssize_t pqi_vendor_show(struct device *dev,
6164 struct device_attribute *attr, char *buffer)
6166 struct Scsi_Host *shost;
6167 struct pqi_ctrl_info *ctrl_info;
6169 shost = class_to_shost(dev);
6170 ctrl_info = shost_to_hba(shost);
6172 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6175 static ssize_t pqi_host_rescan_store(struct device *dev,
6176 struct device_attribute *attr, const char *buffer, size_t count)
6178 struct Scsi_Host *shost = class_to_shost(dev);
6180 pqi_scan_start(shost);
6185 static ssize_t pqi_lockup_action_show(struct device *dev,
6186 struct device_attribute *attr, char *buffer)
6191 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6192 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6193 count += scnprintf(buffer + count, PAGE_SIZE - count,
6194 "[%s] ", pqi_lockup_actions[i].name);
6196 count += scnprintf(buffer + count, PAGE_SIZE - count,
6197 "%s ", pqi_lockup_actions[i].name);
6200 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6205 static ssize_t pqi_lockup_action_store(struct device *dev,
6206 struct device_attribute *attr, const char *buffer, size_t count)
6210 char action_name_buffer[32];
6212 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6213 action_name = strstrip(action_name_buffer);
6215 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6216 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6217 pqi_lockup_action = pqi_lockup_actions[i].action;
6225 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6226 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6227 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6228 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6229 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6230 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6231 static DEVICE_ATTR(lockup_action, 0644,
6232 pqi_lockup_action_show, pqi_lockup_action_store);
6234 static struct device_attribute *pqi_shost_attrs[] = {
6235 &dev_attr_driver_version,
6236 &dev_attr_firmware_version,
6238 &dev_attr_serial_number,
6241 &dev_attr_lockup_action,
6245 static ssize_t pqi_unique_id_show(struct device *dev,
6246 struct device_attribute *attr, char *buffer)
6248 struct pqi_ctrl_info *ctrl_info;
6249 struct scsi_device *sdev;
6250 struct pqi_scsi_dev *device;
6251 unsigned long flags;
6254 sdev = to_scsi_device(dev);
6255 ctrl_info = shost_to_hba(sdev->host);
6257 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6259 device = sdev->hostdata;
6261 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6266 if (device->is_physical_device) {
6267 memset(unique_id, 0, 8);
6268 memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
6270 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6273 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6275 return snprintf(buffer, PAGE_SIZE,
6276 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
6277 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6278 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6279 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6280 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6283 static ssize_t pqi_lunid_show(struct device *dev,
6284 struct device_attribute *attr, char *buffer)
6286 struct pqi_ctrl_info *ctrl_info;
6287 struct scsi_device *sdev;
6288 struct pqi_scsi_dev *device;
6289 unsigned long flags;
6292 sdev = to_scsi_device(dev);
6293 ctrl_info = shost_to_hba(sdev->host);
6295 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6297 device = sdev->hostdata;
6299 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6304 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6306 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6308 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6313 static ssize_t pqi_path_info_show(struct device *dev,
6314 struct device_attribute *attr, char *buf)
6316 struct pqi_ctrl_info *ctrl_info;
6317 struct scsi_device *sdev;
6318 struct pqi_scsi_dev *device;
6319 unsigned long flags;
6326 u8 phys_connector[2];
6328 sdev = to_scsi_device(dev);
6329 ctrl_info = shost_to_hba(sdev->host);
6331 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6333 device = sdev->hostdata;
6335 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6341 for (i = 0; i < MAX_PATHS; i++) {
6342 path_map_index = 1 << i;
6343 if (i == device->active_path_index)
6345 else if (device->path_map & path_map_index)
6346 active = "Inactive";
6350 output_len += scnprintf(buf + output_len,
6351 PAGE_SIZE - output_len,
6352 "[%d:%d:%d:%d] %20.20s ",
6353 ctrl_info->scsi_host->host_no,
6354 device->bus, device->target,
6356 scsi_device_type(device->devtype));
6358 if (device->devtype == TYPE_RAID ||
6359 pqi_is_logical_device(device))
6362 memcpy(&phys_connector, &device->phys_connector[i],
6363 sizeof(phys_connector));
6364 if (phys_connector[0] < '0')
6365 phys_connector[0] = '0';
6366 if (phys_connector[1] < '0')
6367 phys_connector[1] = '0';
6369 output_len += scnprintf(buf + output_len,
6370 PAGE_SIZE - output_len,
6371 "PORT: %.2s ", phys_connector);
6373 box = device->box[i];
6374 if (box != 0 && box != 0xFF)
6375 output_len += scnprintf(buf + output_len,
6376 PAGE_SIZE - output_len,
6379 if ((device->devtype == TYPE_DISK ||
6380 device->devtype == TYPE_ZBC) &&
6381 pqi_expose_device(device))
6382 output_len += scnprintf(buf + output_len,
6383 PAGE_SIZE - output_len,
6387 output_len += scnprintf(buf + output_len,
6388 PAGE_SIZE - output_len,
6392 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6397 static ssize_t pqi_sas_address_show(struct device *dev,
6398 struct device_attribute *attr, char *buffer)
6400 struct pqi_ctrl_info *ctrl_info;
6401 struct scsi_device *sdev;
6402 struct pqi_scsi_dev *device;
6403 unsigned long flags;
6406 sdev = to_scsi_device(dev);
6407 ctrl_info = shost_to_hba(sdev->host);
6409 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6411 device = sdev->hostdata;
6412 if (pqi_is_logical_device(device)) {
6413 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
6418 sas_address = device->sas_address;
6420 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6422 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6425 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6426 struct device_attribute *attr, char *buffer)
6428 struct pqi_ctrl_info *ctrl_info;
6429 struct scsi_device *sdev;
6430 struct pqi_scsi_dev *device;
6431 unsigned long flags;
6433 sdev = to_scsi_device(dev);
6434 ctrl_info = shost_to_hba(sdev->host);
6436 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6438 device = sdev->hostdata;
6439 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6443 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6448 static ssize_t pqi_raid_level_show(struct device *dev,
6449 struct device_attribute *attr, char *buffer)
6451 struct pqi_ctrl_info *ctrl_info;
6452 struct scsi_device *sdev;
6453 struct pqi_scsi_dev *device;
6454 unsigned long flags;
6457 sdev = to_scsi_device(dev);
6458 ctrl_info = shost_to_hba(sdev->host);
6460 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6462 device = sdev->hostdata;
6464 if (pqi_is_logical_device(device))
6465 raid_level = pqi_raid_level_to_string(device->raid_level);
6469 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6471 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6474 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6475 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6476 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
6477 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6478 static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
6479 pqi_ssd_smart_path_enabled_show, NULL);
6480 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6482 static struct device_attribute *pqi_sdev_attrs[] = {
6484 &dev_attr_unique_id,
6485 &dev_attr_path_info,
6486 &dev_attr_sas_address,
6487 &dev_attr_ssd_smart_path_enabled,
6488 &dev_attr_raid_level,
6492 static struct scsi_host_template pqi_driver_template = {
6493 .module = THIS_MODULE,
6494 .name = DRIVER_NAME_SHORT,
6495 .proc_name = DRIVER_NAME_SHORT,
6496 .queuecommand = pqi_scsi_queue_command,
6497 .scan_start = pqi_scan_start,
6498 .scan_finished = pqi_scan_finished,
6500 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6502 .slave_alloc = pqi_slave_alloc,
6503 .map_queues = pqi_map_queues,
6504 .sdev_attrs = pqi_sdev_attrs,
6505 .shost_attrs = pqi_shost_attrs,
6508 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6511 struct Scsi_Host *shost;
6513 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6515 dev_err(&ctrl_info->pci_dev->dev,
6516 "scsi_host_alloc failed for controller %u\n",
6517 ctrl_info->ctrl_id);
6522 shost->n_io_port = 0;
6523 shost->this_id = -1;
6524 shost->max_channel = PQI_MAX_BUS;
6525 shost->max_cmd_len = MAX_COMMAND_SIZE;
6526 shost->max_lun = ~0;
6528 shost->max_sectors = ctrl_info->max_sectors;
6529 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6530 shost->cmd_per_lun = shost->can_queue;
6531 shost->sg_tablesize = ctrl_info->sg_tablesize;
6532 shost->transportt = pqi_sas_transport_template;
6533 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6534 shost->unique_id = shost->irq;
6535 shost->nr_hw_queues = ctrl_info->num_queue_groups;
6536 shost->hostdata[0] = (unsigned long)ctrl_info;
6538 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6540 dev_err(&ctrl_info->pci_dev->dev,
6541 "scsi_add_host failed for controller %u\n",
6542 ctrl_info->ctrl_id);
6546 rc = pqi_add_sas_host(shost, ctrl_info);
6548 dev_err(&ctrl_info->pci_dev->dev,
6549 "add SAS host failed for controller %u\n",
6550 ctrl_info->ctrl_id);
6554 ctrl_info->scsi_host = shost;
6559 scsi_remove_host(shost);
6561 scsi_host_put(shost);
6566 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6568 struct Scsi_Host *shost;
6570 pqi_delete_sas_host(ctrl_info);
6572 shost = ctrl_info->scsi_host;
6576 scsi_remove_host(shost);
6577 scsi_host_put(shost);
6580 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6583 struct pqi_device_registers __iomem *pqi_registers;
6584 unsigned long timeout;
6585 unsigned int timeout_msecs;
6586 union pqi_reset_register reset_reg;
6588 pqi_registers = ctrl_info->pqi_registers;
6589 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
6590 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
6593 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
6594 reset_reg.all_bits = readl(&pqi_registers->device_reset);
6595 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
6597 pqi_check_ctrl_health(ctrl_info);
6598 if (pqi_ctrl_offline(ctrl_info)) {
6602 if (time_after(jiffies, timeout)) {
6611 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6614 union pqi_reset_register reset_reg;
6616 if (ctrl_info->pqi_reset_quiesce_supported) {
6617 rc = sis_pqi_reset_quiesce(ctrl_info);
6619 dev_err(&ctrl_info->pci_dev->dev,
6620 "PQI reset failed during quiesce with error %d\n",
6626 reset_reg.all_bits = 0;
6627 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
6628 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6630 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6632 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6634 dev_err(&ctrl_info->pci_dev->dev,
6635 "PQI reset failed with error %d\n", rc);
6640 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
6643 struct bmic_sense_subsystem_info *sense_info;
6645 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
6649 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
6653 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
6654 sizeof(sense_info->ctrl_serial_number));
6655 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
6663 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6666 struct bmic_identify_controller *identify;
6668 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
6672 rc = pqi_identify_controller(ctrl_info, identify);
6676 memcpy(ctrl_info->firmware_version, identify->firmware_version,
6677 sizeof(identify->firmware_version));
6678 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6679 snprintf(ctrl_info->firmware_version +
6680 strlen(ctrl_info->firmware_version),
6681 sizeof(ctrl_info->firmware_version),
6682 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6684 memcpy(ctrl_info->model, identify->product_id,
6685 sizeof(identify->product_id));
6686 ctrl_info->model[sizeof(identify->product_id)] = '\0';
6688 memcpy(ctrl_info->vendor, identify->vendor_id,
6689 sizeof(identify->vendor_id));
6690 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
6698 struct pqi_config_table_section_info {
6699 struct pqi_ctrl_info *ctrl_info;
6702 void __iomem *section_iomem_addr;
6705 static inline bool pqi_is_firmware_feature_supported(
6706 struct pqi_config_table_firmware_features *firmware_features,
6707 unsigned int bit_position)
6709 unsigned int byte_index;
6711 byte_index = bit_position / BITS_PER_BYTE;
6713 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6716 return firmware_features->features_supported[byte_index] &
6717 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6720 static inline bool pqi_is_firmware_feature_enabled(
6721 struct pqi_config_table_firmware_features *firmware_features,
6722 void __iomem *firmware_features_iomem_addr,
6723 unsigned int bit_position)
6725 unsigned int byte_index;
6726 u8 __iomem *features_enabled_iomem_addr;
6728 byte_index = (bit_position / BITS_PER_BYTE) +
6729 (le16_to_cpu(firmware_features->num_elements) * 2);
6731 features_enabled_iomem_addr = firmware_features_iomem_addr +
6732 offsetof(struct pqi_config_table_firmware_features,
6733 features_supported) + byte_index;
6735 return *((__force u8 *)features_enabled_iomem_addr) &
6736 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6739 static inline void pqi_request_firmware_feature(
6740 struct pqi_config_table_firmware_features *firmware_features,
6741 unsigned int bit_position)
6743 unsigned int byte_index;
6745 byte_index = (bit_position / BITS_PER_BYTE) +
6746 le16_to_cpu(firmware_features->num_elements);
6748 firmware_features->features_supported[byte_index] |=
6749 (1 << (bit_position % BITS_PER_BYTE));
6752 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6753 u16 first_section, u16 last_section)
6755 struct pqi_vendor_general_request request;
6757 memset(&request, 0, sizeof(request));
6759 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6760 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6761 &request.header.iu_length);
6762 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6763 &request.function_code);
6764 put_unaligned_le16(first_section,
6765 &request.data.config_table_update.first_section);
6766 put_unaligned_le16(last_section,
6767 &request.data.config_table_update.last_section);
6769 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6770 0, NULL, NO_TIMEOUT);
6773 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6774 struct pqi_config_table_firmware_features *firmware_features,
6775 void __iomem *firmware_features_iomem_addr)
6777 void *features_requested;
6778 void __iomem *features_requested_iomem_addr;
6780 features_requested = firmware_features->features_supported +
6781 le16_to_cpu(firmware_features->num_elements);
6783 features_requested_iomem_addr = firmware_features_iomem_addr +
6784 (features_requested - (void *)firmware_features);
6786 memcpy_toio(features_requested_iomem_addr, features_requested,
6787 le16_to_cpu(firmware_features->num_elements));
6789 return pqi_config_table_update(ctrl_info,
6790 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6791 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6794 struct pqi_firmware_feature {
6796 unsigned int feature_bit;
6799 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6800 struct pqi_firmware_feature *firmware_feature);
6803 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6804 struct pqi_firmware_feature *firmware_feature)
6806 if (!firmware_feature->supported) {
6807 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6808 firmware_feature->feature_name);
6812 if (firmware_feature->enabled) {
6813 dev_info(&ctrl_info->pci_dev->dev,
6814 "%s enabled\n", firmware_feature->feature_name);
6818 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6819 firmware_feature->feature_name);
6822 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
6823 struct pqi_firmware_feature *firmware_feature)
6825 switch (firmware_feature->feature_bit) {
6826 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
6827 ctrl_info->soft_reset_handshake_supported =
6828 firmware_feature->enabled;
6830 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
6831 ctrl_info->raid_iu_timeout_supported =
6832 firmware_feature->enabled;
6834 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
6835 ctrl_info->tmf_iu_timeout_supported =
6836 firmware_feature->enabled;
6840 pqi_firmware_feature_status(ctrl_info, firmware_feature);
6843 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6844 struct pqi_firmware_feature *firmware_feature)
6846 if (firmware_feature->feature_status)
6847 firmware_feature->feature_status(ctrl_info, firmware_feature);
6850 static DEFINE_MUTEX(pqi_firmware_features_mutex);
6852 static struct pqi_firmware_feature pqi_firmware_features[] = {
6854 .feature_name = "Online Firmware Activation",
6855 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6856 .feature_status = pqi_firmware_feature_status,
6859 .feature_name = "Serial Management Protocol",
6860 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6861 .feature_status = pqi_firmware_feature_status,
6864 .feature_name = "New Soft Reset Handshake",
6865 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
6866 .feature_status = pqi_ctrl_update_feature_flags,
6869 .feature_name = "RAID IU Timeout",
6870 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
6871 .feature_status = pqi_ctrl_update_feature_flags,
6874 .feature_name = "TMF IU Timeout",
6875 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
6876 .feature_status = pqi_ctrl_update_feature_flags,
6880 static void pqi_process_firmware_features(
6881 struct pqi_config_table_section_info *section_info)
6884 struct pqi_ctrl_info *ctrl_info;
6885 struct pqi_config_table_firmware_features *firmware_features;
6886 void __iomem *firmware_features_iomem_addr;
6888 unsigned int num_features_supported;
6890 ctrl_info = section_info->ctrl_info;
6891 firmware_features = section_info->section;
6892 firmware_features_iomem_addr = section_info->section_iomem_addr;
6894 for (i = 0, num_features_supported = 0;
6895 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6896 if (pqi_is_firmware_feature_supported(firmware_features,
6897 pqi_firmware_features[i].feature_bit)) {
6898 pqi_firmware_features[i].supported = true;
6899 num_features_supported++;
6901 pqi_firmware_feature_update(ctrl_info,
6902 &pqi_firmware_features[i]);
6906 if (num_features_supported == 0)
6909 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6910 if (!pqi_firmware_features[i].supported)
6912 pqi_request_firmware_feature(firmware_features,
6913 pqi_firmware_features[i].feature_bit);
6916 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6917 firmware_features_iomem_addr);
6919 dev_err(&ctrl_info->pci_dev->dev,
6920 "failed to enable firmware features in PQI configuration table\n");
6921 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6922 if (!pqi_firmware_features[i].supported)
6924 pqi_firmware_feature_update(ctrl_info,
6925 &pqi_firmware_features[i]);
6930 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6931 if (!pqi_firmware_features[i].supported)
6933 if (pqi_is_firmware_feature_enabled(firmware_features,
6934 firmware_features_iomem_addr,
6935 pqi_firmware_features[i].feature_bit)) {
6936 pqi_firmware_features[i].enabled = true;
6938 pqi_firmware_feature_update(ctrl_info,
6939 &pqi_firmware_features[i]);
6943 static void pqi_init_firmware_features(void)
6947 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6948 pqi_firmware_features[i].supported = false;
6949 pqi_firmware_features[i].enabled = false;
6953 static void pqi_process_firmware_features_section(
6954 struct pqi_config_table_section_info *section_info)
6956 mutex_lock(&pqi_firmware_features_mutex);
6957 pqi_init_firmware_features();
6958 pqi_process_firmware_features(section_info);
6959 mutex_unlock(&pqi_firmware_features_mutex);
6962 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
6966 void __iomem *table_iomem_addr;
6967 struct pqi_config_table *config_table;
6968 struct pqi_config_table_section_header *section;
6969 struct pqi_config_table_section_info section_info;
6971 table_length = ctrl_info->config_table_length;
6972 if (table_length == 0)
6975 config_table = kmalloc(table_length, GFP_KERNEL);
6976 if (!config_table) {
6977 dev_err(&ctrl_info->pci_dev->dev,
6978 "failed to allocate memory for PQI configuration table\n");
6983 * Copy the config table contents from I/O memory space into the
6986 table_iomem_addr = ctrl_info->iomem_base +
6987 ctrl_info->config_table_offset;
6988 memcpy_fromio(config_table, table_iomem_addr, table_length);
6990 section_info.ctrl_info = ctrl_info;
6992 get_unaligned_le32(&config_table->first_section_offset);
6994 while (section_offset) {
6995 section = (void *)config_table + section_offset;
6997 section_info.section = section;
6998 section_info.section_offset = section_offset;
6999 section_info.section_iomem_addr =
7000 table_iomem_addr + section_offset;
7002 switch (get_unaligned_le16(§ion->section_id)) {
7003 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7004 pqi_process_firmware_features_section(§ion_info);
7006 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7007 if (pqi_disable_heartbeat)
7008 dev_warn(&ctrl_info->pci_dev->dev,
7009 "heartbeat disabled by module parameter\n");
7011 ctrl_info->heartbeat_counter =
7015 struct pqi_config_table_heartbeat,
7018 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7019 ctrl_info->soft_reset_status =
7022 offsetof(struct pqi_config_table_soft_reset,
7028 get_unaligned_le16(§ion->next_section_offset);
7031 kfree(config_table);
7036 /* Switches the controller from PQI mode back into SIS mode. */
7038 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7042 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7043 rc = pqi_reset(ctrl_info);
7046 rc = sis_reenable_sis_mode(ctrl_info);
7048 dev_err(&ctrl_info->pci_dev->dev,
7049 "re-enabling SIS mode failed with error %d\n", rc);
7052 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7058 * If the controller isn't already in SIS mode, this function forces it into
7062 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7064 if (!sis_is_firmware_running(ctrl_info))
7067 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7070 if (sis_is_kernel_up(ctrl_info)) {
7071 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7075 return pqi_revert_to_sis_mode(ctrl_info);
7078 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7080 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7084 if (reset_devices) {
7085 sis_soft_reset(ctrl_info);
7086 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7088 rc = pqi_force_sis_mode(ctrl_info);
7094 * Wait until the controller is ready to start accepting SIS
7097 rc = sis_wait_for_ctrl_ready(ctrl_info);
7102 * Get the controller properties. This allows us to determine
7103 * whether or not it supports PQI mode.
7105 rc = sis_get_ctrl_properties(ctrl_info);
7107 dev_err(&ctrl_info->pci_dev->dev,
7108 "error obtaining controller properties\n");
7112 rc = sis_get_pqi_capabilities(ctrl_info);
7114 dev_err(&ctrl_info->pci_dev->dev,
7115 "error obtaining controller capabilities\n");
7119 if (reset_devices) {
7120 if (ctrl_info->max_outstanding_requests >
7121 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
7122 ctrl_info->max_outstanding_requests =
7123 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7125 if (ctrl_info->max_outstanding_requests >
7126 PQI_MAX_OUTSTANDING_REQUESTS)
7127 ctrl_info->max_outstanding_requests =
7128 PQI_MAX_OUTSTANDING_REQUESTS;
7131 pqi_calculate_io_resources(ctrl_info);
7133 rc = pqi_alloc_error_buffer(ctrl_info);
7135 dev_err(&ctrl_info->pci_dev->dev,
7136 "failed to allocate PQI error buffer\n");
7141 * If the function we are about to call succeeds, the
7142 * controller will transition from legacy SIS mode
7145 rc = sis_init_base_struct_addr(ctrl_info);
7147 dev_err(&ctrl_info->pci_dev->dev,
7148 "error initializing PQI mode\n");
7152 /* Wait for the controller to complete the SIS -> PQI transition. */
7153 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7155 dev_err(&ctrl_info->pci_dev->dev,
7156 "transition to PQI mode failed\n");
7160 /* From here on, we are running in PQI mode. */
7161 ctrl_info->pqi_mode_enabled = true;
7162 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7164 rc = pqi_alloc_admin_queues(ctrl_info);
7166 dev_err(&ctrl_info->pci_dev->dev,
7167 "failed to allocate admin queues\n");
7171 rc = pqi_create_admin_queues(ctrl_info);
7173 dev_err(&ctrl_info->pci_dev->dev,
7174 "error creating admin queues\n");
7178 rc = pqi_report_device_capability(ctrl_info);
7180 dev_err(&ctrl_info->pci_dev->dev,
7181 "obtaining device capability failed\n");
7185 rc = pqi_validate_device_capability(ctrl_info);
7189 pqi_calculate_queue_resources(ctrl_info);
7191 rc = pqi_enable_msix_interrupts(ctrl_info);
7195 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7196 ctrl_info->max_msix_vectors =
7197 ctrl_info->num_msix_vectors_enabled;
7198 pqi_calculate_queue_resources(ctrl_info);
7201 rc = pqi_alloc_io_resources(ctrl_info);
7205 rc = pqi_alloc_operational_queues(ctrl_info);
7207 dev_err(&ctrl_info->pci_dev->dev,
7208 "failed to allocate operational queues\n");
7212 pqi_init_operational_queues(ctrl_info);
7214 rc = pqi_request_irqs(ctrl_info);
7218 rc = pqi_create_queues(ctrl_info);
7222 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7224 ctrl_info->controller_online = true;
7226 rc = pqi_process_config_table(ctrl_info);
7230 pqi_start_heartbeat_timer(ctrl_info);
7232 rc = pqi_enable_events(ctrl_info);
7234 dev_err(&ctrl_info->pci_dev->dev,
7235 "error enabling events\n");
7239 /* Register with the SCSI subsystem. */
7240 rc = pqi_register_scsi(ctrl_info);
7244 rc = pqi_get_ctrl_product_details(ctrl_info);
7246 dev_err(&ctrl_info->pci_dev->dev,
7247 "error obtaining product details\n");
7251 rc = pqi_get_ctrl_serial_number(ctrl_info);
7253 dev_err(&ctrl_info->pci_dev->dev,
7254 "error obtaining ctrl serial number\n");
7258 rc = pqi_set_diag_rescan(ctrl_info);
7260 dev_err(&ctrl_info->pci_dev->dev,
7261 "error enabling multi-lun rescan\n");
7265 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7267 dev_err(&ctrl_info->pci_dev->dev,
7268 "error updating host wellness\n");
7272 pqi_schedule_update_time_worker(ctrl_info);
7274 pqi_scan_scsi_devices(ctrl_info);
7279 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7282 struct pqi_admin_queues *admin_queues;
7283 struct pqi_event_queue *event_queue;
7285 admin_queues = &ctrl_info->admin_queues;
7286 admin_queues->iq_pi_copy = 0;
7287 admin_queues->oq_ci_copy = 0;
7288 writel(0, admin_queues->oq_pi);
7290 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7291 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7292 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7293 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7295 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7296 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7297 writel(0, ctrl_info->queue_groups[i].oq_pi);
7300 event_queue = &ctrl_info->event_queue;
7301 writel(0, event_queue->oq_pi);
7302 event_queue->oq_ci_copy = 0;
7305 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7309 rc = pqi_force_sis_mode(ctrl_info);
7314 * Wait until the controller is ready to start accepting SIS
7317 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7322 * Get the controller properties. This allows us to determine
7323 * whether or not it supports PQI mode.
7325 rc = sis_get_ctrl_properties(ctrl_info);
7327 dev_err(&ctrl_info->pci_dev->dev,
7328 "error obtaining controller properties\n");
7332 rc = sis_get_pqi_capabilities(ctrl_info);
7334 dev_err(&ctrl_info->pci_dev->dev,
7335 "error obtaining controller capabilities\n");
7340 * If the function we are about to call succeeds, the
7341 * controller will transition from legacy SIS mode
7344 rc = sis_init_base_struct_addr(ctrl_info);
7346 dev_err(&ctrl_info->pci_dev->dev,
7347 "error initializing PQI mode\n");
7351 /* Wait for the controller to complete the SIS -> PQI transition. */
7352 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7354 dev_err(&ctrl_info->pci_dev->dev,
7355 "transition to PQI mode failed\n");
7359 /* From here on, we are running in PQI mode. */
7360 ctrl_info->pqi_mode_enabled = true;
7361 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7363 pqi_reinit_queues(ctrl_info);
7365 rc = pqi_create_admin_queues(ctrl_info);
7367 dev_err(&ctrl_info->pci_dev->dev,
7368 "error creating admin queues\n");
7372 rc = pqi_create_queues(ctrl_info);
7376 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7378 ctrl_info->controller_online = true;
7379 pqi_ctrl_unblock_requests(ctrl_info);
7381 rc = pqi_process_config_table(ctrl_info);
7385 pqi_start_heartbeat_timer(ctrl_info);
7387 rc = pqi_enable_events(ctrl_info);
7389 dev_err(&ctrl_info->pci_dev->dev,
7390 "error enabling events\n");
7394 rc = pqi_get_ctrl_product_details(ctrl_info);
7396 dev_err(&ctrl_info->pci_dev->dev,
7397 "error obtaining product details\n");
7401 rc = pqi_set_diag_rescan(ctrl_info);
7403 dev_err(&ctrl_info->pci_dev->dev,
7404 "error enabling multi-lun rescan\n");
7408 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7410 dev_err(&ctrl_info->pci_dev->dev,
7411 "error updating host wellness\n");
7415 pqi_schedule_update_time_worker(ctrl_info);
7417 pqi_scan_scsi_devices(ctrl_info);
7422 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
7427 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
7428 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
7430 return pcibios_err_to_errno(rc);
7433 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
7438 rc = pci_enable_device(ctrl_info->pci_dev);
7440 dev_err(&ctrl_info->pci_dev->dev,
7441 "failed to enable PCI device\n");
7445 if (sizeof(dma_addr_t) > 4)
7446 mask = DMA_BIT_MASK(64);
7448 mask = DMA_BIT_MASK(32);
7450 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
7452 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
7453 goto disable_device;
7456 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
7458 dev_err(&ctrl_info->pci_dev->dev,
7459 "failed to obtain PCI resources\n");
7460 goto disable_device;
7463 ctrl_info->iomem_base = ioremap(pci_resource_start(
7464 ctrl_info->pci_dev, 0),
7465 sizeof(struct pqi_ctrl_registers));
7466 if (!ctrl_info->iomem_base) {
7467 dev_err(&ctrl_info->pci_dev->dev,
7468 "failed to map memory for controller registers\n");
7470 goto release_regions;
7473 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
7475 /* Increase the PCIe completion timeout. */
7476 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
7477 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
7479 dev_err(&ctrl_info->pci_dev->dev,
7480 "failed to set PCIe completion timeout\n");
7481 goto release_regions;
7484 /* Enable bus mastering. */
7485 pci_set_master(ctrl_info->pci_dev);
7487 ctrl_info->registers = ctrl_info->iomem_base;
7488 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
7490 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
7495 pci_release_regions(ctrl_info->pci_dev);
7497 pci_disable_device(ctrl_info->pci_dev);
7502 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
7504 iounmap(ctrl_info->iomem_base);
7505 pci_release_regions(ctrl_info->pci_dev);
7506 if (pci_is_enabled(ctrl_info->pci_dev))
7507 pci_disable_device(ctrl_info->pci_dev);
7508 pci_set_drvdata(ctrl_info->pci_dev, NULL);
7511 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
7513 struct pqi_ctrl_info *ctrl_info;
7515 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7516 GFP_KERNEL, numa_node);
7520 mutex_init(&ctrl_info->scan_mutex);
7521 mutex_init(&ctrl_info->lun_reset_mutex);
7522 mutex_init(&ctrl_info->ofa_mutex);
7524 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7525 spin_lock_init(&ctrl_info->scsi_device_list_lock);
7527 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7528 atomic_set(&ctrl_info->num_interrupts, 0);
7529 atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
7531 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7532 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7534 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
7535 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
7537 sema_init(&ctrl_info->sync_request_sem,
7538 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7539 init_waitqueue_head(&ctrl_info->block_requests_wait);
7541 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7542 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7543 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7544 pqi_raid_bypass_retry_worker);
7546 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
7547 ctrl_info->irq_mode = IRQ_MODE_NONE;
7548 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7553 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7558 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7560 pqi_free_irqs(ctrl_info);
7561 pqi_disable_msix_interrupts(ctrl_info);
7564 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7566 pqi_stop_heartbeat_timer(ctrl_info);
7567 pqi_free_interrupts(ctrl_info);
7568 if (ctrl_info->queue_memory_base)
7569 dma_free_coherent(&ctrl_info->pci_dev->dev,
7570 ctrl_info->queue_memory_length,
7571 ctrl_info->queue_memory_base,
7572 ctrl_info->queue_memory_base_dma_handle);
7573 if (ctrl_info->admin_queue_memory_base)
7574 dma_free_coherent(&ctrl_info->pci_dev->dev,
7575 ctrl_info->admin_queue_memory_length,
7576 ctrl_info->admin_queue_memory_base,
7577 ctrl_info->admin_queue_memory_base_dma_handle);
7578 pqi_free_all_io_requests(ctrl_info);
7579 if (ctrl_info->error_buffer)
7580 dma_free_coherent(&ctrl_info->pci_dev->dev,
7581 ctrl_info->error_buffer_length,
7582 ctrl_info->error_buffer,
7583 ctrl_info->error_buffer_dma_handle);
7584 if (ctrl_info->iomem_base)
7585 pqi_cleanup_pci_init(ctrl_info);
7586 pqi_free_ctrl_info(ctrl_info);
7589 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7591 pqi_cancel_rescan_worker(ctrl_info);
7592 pqi_cancel_update_time_worker(ctrl_info);
7593 pqi_remove_all_scsi_devices(ctrl_info);
7594 pqi_unregister_scsi(ctrl_info);
7595 if (ctrl_info->pqi_mode_enabled)
7596 pqi_revert_to_sis_mode(ctrl_info);
7597 pqi_free_ctrl_resources(ctrl_info);
7600 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7602 pqi_cancel_update_time_worker(ctrl_info);
7603 pqi_cancel_rescan_worker(ctrl_info);
7604 pqi_wait_until_lun_reset_finished(ctrl_info);
7605 pqi_wait_until_scan_finished(ctrl_info);
7606 pqi_ctrl_ofa_start(ctrl_info);
7607 pqi_ctrl_block_requests(ctrl_info);
7608 pqi_ctrl_wait_until_quiesced(ctrl_info);
7609 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7610 pqi_fail_io_queued_for_all_devices(ctrl_info);
7611 pqi_wait_until_inbound_queues_empty(ctrl_info);
7612 pqi_stop_heartbeat_timer(ctrl_info);
7613 ctrl_info->pqi_mode_enabled = false;
7614 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7617 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7619 pqi_ofa_free_host_buffer(ctrl_info);
7620 ctrl_info->pqi_mode_enabled = true;
7621 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7622 ctrl_info->controller_online = true;
7623 pqi_ctrl_unblock_requests(ctrl_info);
7624 pqi_start_heartbeat_timer(ctrl_info);
7625 pqi_schedule_update_time_worker(ctrl_info);
7626 pqi_clear_soft_reset_status(ctrl_info,
7627 PQI_SOFT_RESET_ABORT);
7628 pqi_scan_scsi_devices(ctrl_info);
7631 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7632 u32 total_size, u32 chunk_size)
7637 struct pqi_sg_descriptor *mem_descriptor = NULL;
7639 struct pqi_ofa_memory *ofap;
7641 dev = &ctrl_info->pci_dev->dev;
7643 sg_count = (total_size + chunk_size - 1);
7644 sg_count /= chunk_size;
7646 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7648 if (sg_count*chunk_size < total_size)
7651 ctrl_info->pqi_ofa_chunk_virt_addr =
7652 kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
7653 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7656 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
7657 dma_addr_t dma_handle;
7659 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7660 dma_alloc_coherent(dev, chunk_size, &dma_handle,
7663 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7666 mem_descriptor = &ofap->sg_descriptor[i];
7667 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
7668 put_unaligned_le32 (chunk_size, &mem_descriptor->length);
7671 if (!size || size < total_size)
7672 goto out_free_chunks;
7674 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
7675 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
7676 put_unaligned_le32(size, &ofap->bytes_allocated);
7682 mem_descriptor = &ofap->sg_descriptor[i];
7683 dma_free_coherent(dev, chunk_size,
7684 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7685 get_unaligned_le64(&mem_descriptor->address));
7687 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7690 put_unaligned_le32 (0, &ofap->bytes_allocated);
7694 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7700 total_size = le32_to_cpu(
7701 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7702 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
7704 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
7705 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7711 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7712 u32 bytes_requested)
7714 struct pqi_ofa_memory *pqi_ofa_memory;
7717 dev = &ctrl_info->pci_dev->dev;
7718 pqi_ofa_memory = dma_alloc_coherent(dev,
7719 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7720 &ctrl_info->pqi_ofa_mem_dma_handle,
7723 if (!pqi_ofa_memory)
7726 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
7727 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
7728 sizeof(pqi_ofa_memory->signature));
7729 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
7731 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7733 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7734 dev_err(dev, "Failed to allocate host buffer of size = %u",
7741 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7744 struct pqi_sg_descriptor *mem_descriptor;
7745 struct pqi_ofa_memory *ofap;
7747 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7752 if (!ofap->bytes_allocated)
7755 mem_descriptor = ofap->sg_descriptor;
7757 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
7759 dma_free_coherent(&ctrl_info->pci_dev->dev,
7760 get_unaligned_le32(&mem_descriptor[i].length),
7761 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7762 get_unaligned_le64(&mem_descriptor[i].address));
7764 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7767 dma_free_coherent(&ctrl_info->pci_dev->dev,
7768 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
7769 ctrl_info->pqi_ofa_mem_dma_handle);
7770 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7773 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7775 struct pqi_vendor_general_request request;
7777 struct pqi_ofa_memory *ofap;
7779 memset(&request, 0, sizeof(request));
7781 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7783 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7784 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7785 &request.header.iu_length);
7786 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
7787 &request.function_code);
7790 size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
7791 get_unaligned_le16(&ofap->num_memory_descriptors) *
7792 sizeof(struct pqi_sg_descriptor);
7794 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7795 &request.data.ofa_memory_allocation.buffer_address);
7796 put_unaligned_le32(size,
7797 &request.data.ofa_memory_allocation.buffer_length);
7801 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7802 0, NULL, NO_TIMEOUT);
7805 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7807 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7808 return pqi_ctrl_init_resume(ctrl_info);
7811 static void pqi_perform_lockup_action(void)
7813 switch (pqi_lockup_action) {
7815 panic("FATAL: Smart Family Controller lockup detected");
7818 emergency_restart();
7826 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
7827 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
7828 .status = SAM_STAT_CHECK_CONDITION,
7831 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
7834 struct pqi_io_request *io_request;
7835 struct scsi_cmnd *scmd;
7837 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7838 io_request = &ctrl_info->io_request_pool[i];
7839 if (atomic_read(&io_request->refcount) == 0)
7842 scmd = io_request->scmd;
7844 set_host_byte(scmd, DID_NO_CONNECT);
7846 io_request->status = -ENXIO;
7847 io_request->error_info =
7848 &pqi_ctrl_offline_raid_error_info;
7851 io_request->io_complete_callback(io_request,
7852 io_request->context);
7856 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
7858 pqi_perform_lockup_action();
7859 pqi_stop_heartbeat_timer(ctrl_info);
7860 pqi_free_interrupts(ctrl_info);
7861 pqi_cancel_rescan_worker(ctrl_info);
7862 pqi_cancel_update_time_worker(ctrl_info);
7863 pqi_ctrl_wait_until_quiesced(ctrl_info);
7864 pqi_fail_all_outstanding_requests(ctrl_info);
7865 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7866 pqi_ctrl_unblock_requests(ctrl_info);
7869 static void pqi_ctrl_offline_worker(struct work_struct *work)
7871 struct pqi_ctrl_info *ctrl_info;
7873 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7874 pqi_take_ctrl_offline_deferred(ctrl_info);
7877 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7879 if (!ctrl_info->controller_online)
7882 ctrl_info->controller_online = false;
7883 ctrl_info->pqi_mode_enabled = false;
7884 pqi_ctrl_block_requests(ctrl_info);
7885 if (!pqi_disable_ctrl_shutdown)
7886 sis_shutdown_ctrl(ctrl_info);
7887 pci_disable_device(ctrl_info->pci_dev);
7888 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
7889 schedule_work(&ctrl_info->ctrl_offline_work);
7892 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
7893 const struct pci_device_id *id)
7895 char *ctrl_description;
7897 if (id->driver_data)
7898 ctrl_description = (char *)id->driver_data;
7900 ctrl_description = "Microsemi Smart Family Controller";
7902 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
7905 static int pqi_pci_probe(struct pci_dev *pci_dev,
7906 const struct pci_device_id *id)
7910 struct pqi_ctrl_info *ctrl_info;
7912 pqi_print_ctrl_info(pci_dev, id);
7914 if (pqi_disable_device_id_wildcards &&
7915 id->subvendor == PCI_ANY_ID &&
7916 id->subdevice == PCI_ANY_ID) {
7917 dev_warn(&pci_dev->dev,
7918 "controller not probed because device ID wildcards are disabled\n");
7922 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
7923 dev_warn(&pci_dev->dev,
7924 "controller device ID matched using wildcards\n");
7926 node = dev_to_node(&pci_dev->dev);
7927 if (node == NUMA_NO_NODE) {
7928 cp_node = cpu_to_node(0);
7929 if (cp_node == NUMA_NO_NODE)
7931 set_dev_node(&pci_dev->dev, cp_node);
7934 ctrl_info = pqi_alloc_ctrl_info(node);
7936 dev_err(&pci_dev->dev,
7937 "failed to allocate controller info block\n");
7941 ctrl_info->pci_dev = pci_dev;
7943 rc = pqi_pci_init(ctrl_info);
7947 rc = pqi_ctrl_init(ctrl_info);
7954 pqi_remove_ctrl(ctrl_info);
7959 static void pqi_pci_remove(struct pci_dev *pci_dev)
7961 struct pqi_ctrl_info *ctrl_info;
7963 ctrl_info = pci_get_drvdata(pci_dev);
7967 ctrl_info->in_shutdown = true;
7969 pqi_remove_ctrl(ctrl_info);
7972 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
7975 struct pqi_io_request *io_request;
7976 struct scsi_cmnd *scmd;
7978 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7979 io_request = &ctrl_info->io_request_pool[i];
7980 if (atomic_read(&io_request->refcount) == 0)
7982 scmd = io_request->scmd;
7983 WARN_ON(scmd != NULL); /* IO command from SML */
7984 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
7988 static void pqi_shutdown(struct pci_dev *pci_dev)
7991 struct pqi_ctrl_info *ctrl_info;
7993 ctrl_info = pci_get_drvdata(pci_dev);
7995 dev_err(&pci_dev->dev,
7996 "cache could not be flushed\n");
8000 pqi_disable_events(ctrl_info);
8001 pqi_wait_until_ofa_finished(ctrl_info);
8002 pqi_cancel_update_time_worker(ctrl_info);
8003 pqi_cancel_rescan_worker(ctrl_info);
8004 pqi_cancel_event_worker(ctrl_info);
8006 pqi_ctrl_shutdown_start(ctrl_info);
8007 pqi_ctrl_wait_until_quiesced(ctrl_info);
8009 rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8011 dev_err(&pci_dev->dev,
8012 "wait for pending I/O failed\n");
8016 pqi_ctrl_block_device_reset(ctrl_info);
8017 pqi_wait_until_lun_reset_finished(ctrl_info);
8020 * Write all data in the controller's battery-backed cache to
8023 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
8025 dev_err(&pci_dev->dev,
8026 "unable to flush controller cache\n");
8028 pqi_ctrl_block_requests(ctrl_info);
8030 rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
8032 dev_err(&pci_dev->dev,
8033 "wait for pending sync cmds failed\n");
8037 pqi_crash_if_pending_command(ctrl_info);
8038 pqi_reset(ctrl_info);
8041 static void pqi_process_lockup_action_param(void)
8045 if (!pqi_lockup_action_param)
8048 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8049 if (strcmp(pqi_lockup_action_param,
8050 pqi_lockup_actions[i].name) == 0) {
8051 pqi_lockup_action = pqi_lockup_actions[i].action;
8056 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8057 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8060 static void pqi_process_module_params(void)
8062 pqi_process_lockup_action_param();
8065 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
8067 struct pqi_ctrl_info *ctrl_info;
8069 ctrl_info = pci_get_drvdata(pci_dev);
8071 pqi_disable_events(ctrl_info);
8072 pqi_cancel_update_time_worker(ctrl_info);
8073 pqi_cancel_rescan_worker(ctrl_info);
8074 pqi_wait_until_scan_finished(ctrl_info);
8075 pqi_wait_until_lun_reset_finished(ctrl_info);
8076 pqi_wait_until_ofa_finished(ctrl_info);
8077 pqi_flush_cache(ctrl_info, SUSPEND);
8078 pqi_ctrl_block_requests(ctrl_info);
8079 pqi_ctrl_wait_until_quiesced(ctrl_info);
8080 pqi_wait_until_inbound_queues_empty(ctrl_info);
8081 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8082 pqi_stop_heartbeat_timer(ctrl_info);
8084 if (state.event == PM_EVENT_FREEZE)
8087 pci_save_state(pci_dev);
8088 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8090 ctrl_info->controller_online = false;
8091 ctrl_info->pqi_mode_enabled = false;
8096 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
8099 struct pqi_ctrl_info *ctrl_info;
8101 ctrl_info = pci_get_drvdata(pci_dev);
8103 if (pci_dev->current_state != PCI_D0) {
8104 ctrl_info->max_hw_queue_index = 0;
8105 pqi_free_interrupts(ctrl_info);
8106 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8107 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8108 IRQF_SHARED, DRIVER_NAME_SHORT,
8109 &ctrl_info->queue_groups[0]);
8111 dev_err(&ctrl_info->pci_dev->dev,
8112 "irq %u init failed with error %d\n",
8116 pqi_start_heartbeat_timer(ctrl_info);
8117 pqi_ctrl_unblock_requests(ctrl_info);
8121 pci_set_power_state(pci_dev, PCI_D0);
8122 pci_restore_state(pci_dev);
8124 return pqi_ctrl_init_resume(ctrl_info);
8127 /* Define the PCI IDs for the controllers that we support. */
8128 static const struct pci_device_id pqi_pci_id_table[] = {
8130 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8134 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8138 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8142 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8146 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8150 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8154 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8158 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8162 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8166 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8170 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8174 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8178 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8182 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8186 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8190 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8194 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8198 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8202 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8206 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8210 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8214 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8218 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8222 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8226 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8230 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8234 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8238 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8242 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8246 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8250 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8254 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8255 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8258 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8259 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
8262 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8263 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
8266 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8267 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
8270 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8271 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
8274 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8275 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
8278 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8279 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
8282 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8283 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
8286 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8287 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
8290 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8291 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8294 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8295 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8298 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8299 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8302 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8303 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8306 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8307 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
8310 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8311 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
8314 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8315 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
8318 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8319 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
8322 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8323 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
8326 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8327 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
8330 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8331 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
8334 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8335 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
8338 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8339 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8342 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8343 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
8346 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8347 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
8350 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8351 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
8354 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8355 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
8358 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8359 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
8362 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8363 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8366 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8367 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
8370 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8371 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
8374 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8375 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8378 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8379 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8382 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8383 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
8386 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8387 PCI_VENDOR_ID_ADVANTECH, 0x8312)
8390 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8391 PCI_VENDOR_ID_DELL, 0x1fe0)
8394 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8395 PCI_VENDOR_ID_HP, 0x0600)
8398 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8399 PCI_VENDOR_ID_HP, 0x0601)
8402 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8403 PCI_VENDOR_ID_HP, 0x0602)
8406 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8407 PCI_VENDOR_ID_HP, 0x0603)
8410 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8411 PCI_VENDOR_ID_HP, 0x0609)
8414 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8415 PCI_VENDOR_ID_HP, 0x0650)
8418 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8419 PCI_VENDOR_ID_HP, 0x0651)
8422 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8423 PCI_VENDOR_ID_HP, 0x0652)
8426 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8427 PCI_VENDOR_ID_HP, 0x0653)
8430 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8431 PCI_VENDOR_ID_HP, 0x0654)
8434 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8435 PCI_VENDOR_ID_HP, 0x0655)
8438 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8439 PCI_VENDOR_ID_HP, 0x0700)
8442 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8443 PCI_VENDOR_ID_HP, 0x0701)
8446 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8447 PCI_VENDOR_ID_HP, 0x1001)
8450 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8451 PCI_VENDOR_ID_HP, 0x1100)
8454 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8455 PCI_VENDOR_ID_HP, 0x1101)
8458 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8462 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8466 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8470 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8474 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8475 PCI_VENDOR_ID_GIGABYTE, 0x1000)
8478 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8479 PCI_ANY_ID, PCI_ANY_ID)
8484 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
8486 static struct pci_driver pqi_pci_driver = {
8487 .name = DRIVER_NAME_SHORT,
8488 .id_table = pqi_pci_id_table,
8489 .probe = pqi_pci_probe,
8490 .remove = pqi_pci_remove,
8491 .shutdown = pqi_shutdown,
8492 #if defined(CONFIG_PM)
8493 .suspend = pqi_suspend,
8494 .resume = pqi_resume,
8498 static int __init pqi_init(void)
8502 pr_info(DRIVER_NAME "\n");
8504 pqi_sas_transport_template =
8505 sas_attach_transport(&pqi_sas_transport_functions);
8506 if (!pqi_sas_transport_template)
8509 pqi_process_module_params();
8511 rc = pci_register_driver(&pqi_pci_driver);
8513 sas_release_transport(pqi_sas_transport_template);
8518 static void __exit pqi_cleanup(void)
8520 pci_unregister_driver(&pqi_pci_driver);
8521 sas_release_transport(pqi_sas_transport_template);
8524 module_init(pqi_init);
8525 module_exit(pqi_cleanup);
8527 static void __attribute__((unused)) verify_structures(void)
8529 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8530 sis_host_to_ctrl_doorbell) != 0x20);
8531 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8532 sis_interrupt_mask) != 0x34);
8533 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8534 sis_ctrl_to_host_doorbell) != 0x9c);
8535 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8536 sis_ctrl_to_host_doorbell_clear) != 0xa0);
8537 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8538 sis_driver_scratch) != 0xb0);
8539 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8540 sis_firmware_status) != 0xbc);
8541 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8542 sis_mailbox) != 0x1000);
8543 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8544 pqi_registers) != 0x4000);
8546 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8548 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8550 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8551 response_queue_id) != 0x4);
8552 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8554 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
8556 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8558 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8559 service_response) != 0x1);
8560 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8561 data_present) != 0x2);
8562 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8564 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8565 residual_count) != 0x4);
8566 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8567 data_length) != 0x8);
8568 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8570 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8572 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
8574 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8575 data_in_result) != 0x0);
8576 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8577 data_out_result) != 0x1);
8578 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8580 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8582 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8583 status_qualifier) != 0x6);
8584 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8585 sense_data_length) != 0x8);
8586 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8587 response_data_length) != 0xa);
8588 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8589 data_in_transferred) != 0xc);
8590 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8591 data_out_transferred) != 0x10);
8592 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8594 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
8596 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8598 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8599 function_and_status_code) != 0x8);
8600 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8601 max_admin_iq_elements) != 0x10);
8602 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8603 max_admin_oq_elements) != 0x11);
8604 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8605 admin_iq_element_length) != 0x12);
8606 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8607 admin_oq_element_length) != 0x13);
8608 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8609 max_reset_timeout) != 0x14);
8610 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8611 legacy_intx_status) != 0x18);
8612 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8613 legacy_intx_mask_set) != 0x1c);
8614 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8615 legacy_intx_mask_clear) != 0x20);
8616 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8617 device_status) != 0x40);
8618 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8619 admin_iq_pi_offset) != 0x48);
8620 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8621 admin_oq_ci_offset) != 0x50);
8622 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8623 admin_iq_element_array_addr) != 0x58);
8624 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8625 admin_oq_element_array_addr) != 0x60);
8626 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8627 admin_iq_ci_addr) != 0x68);
8628 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8629 admin_oq_pi_addr) != 0x70);
8630 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8631 admin_iq_num_elements) != 0x78);
8632 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8633 admin_oq_num_elements) != 0x79);
8634 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8635 admin_queue_int_msg_num) != 0x7a);
8636 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8637 device_error) != 0x80);
8638 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8639 error_details) != 0x88);
8640 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8641 device_reset) != 0x90);
8642 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8643 power_action) != 0x94);
8644 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
8646 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8647 header.iu_type) != 0);
8648 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8649 header.iu_length) != 2);
8650 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8651 header.work_area) != 6);
8652 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8654 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8655 function_code) != 10);
8656 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8657 data.report_device_capability.buffer_length) != 44);
8658 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8659 data.report_device_capability.sg_descriptor) != 48);
8660 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8661 data.create_operational_iq.queue_id) != 12);
8662 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8663 data.create_operational_iq.element_array_addr) != 16);
8664 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8665 data.create_operational_iq.ci_addr) != 24);
8666 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8667 data.create_operational_iq.num_elements) != 32);
8668 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8669 data.create_operational_iq.element_length) != 34);
8670 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8671 data.create_operational_iq.queue_protocol) != 36);
8672 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8673 data.create_operational_oq.queue_id) != 12);
8674 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8675 data.create_operational_oq.element_array_addr) != 16);
8676 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8677 data.create_operational_oq.pi_addr) != 24);
8678 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8679 data.create_operational_oq.num_elements) != 32);
8680 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8681 data.create_operational_oq.element_length) != 34);
8682 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8683 data.create_operational_oq.queue_protocol) != 36);
8684 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8685 data.create_operational_oq.int_msg_num) != 40);
8686 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8687 data.create_operational_oq.coalescing_count) != 42);
8688 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8689 data.create_operational_oq.min_coalescing_time) != 44);
8690 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8691 data.create_operational_oq.max_coalescing_time) != 48);
8692 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8693 data.delete_operational_queue.queue_id) != 12);
8694 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
8695 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
8696 data.create_operational_iq) != 64 - 11);
8697 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
8698 data.create_operational_oq) != 64 - 11);
8699 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
8700 data.delete_operational_queue) != 64 - 11);
8702 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8703 header.iu_type) != 0);
8704 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8705 header.iu_length) != 2);
8706 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8707 header.work_area) != 6);
8708 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8710 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8711 function_code) != 10);
8712 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8714 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8715 data.create_operational_iq.status_descriptor) != 12);
8716 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8717 data.create_operational_iq.iq_pi_offset) != 16);
8718 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8719 data.create_operational_oq.status_descriptor) != 12);
8720 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8721 data.create_operational_oq.oq_ci_offset) != 16);
8722 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
8724 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8725 header.iu_type) != 0);
8726 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8727 header.iu_length) != 2);
8728 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8729 header.response_queue_id) != 4);
8730 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8731 header.work_area) != 6);
8732 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8734 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8736 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8737 buffer_length) != 12);
8738 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8740 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8741 protocol_specific) != 24);
8742 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8743 error_index) != 27);
8744 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8746 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8748 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8749 sg_descriptors) != 64);
8750 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
8751 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8753 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8754 header.iu_type) != 0);
8755 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8756 header.iu_length) != 2);
8757 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8758 header.response_queue_id) != 4);
8759 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8760 header.work_area) != 6);
8761 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8763 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8765 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8766 buffer_length) != 16);
8767 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8768 data_encryption_key_index) != 22);
8769 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8770 encrypt_tweak_lower) != 24);
8771 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8772 encrypt_tweak_upper) != 28);
8773 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8775 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8776 error_index) != 48);
8777 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8778 num_sg_descriptors) != 50);
8779 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8781 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8783 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8784 sg_descriptors) != 64);
8785 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
8786 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8788 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8789 header.iu_type) != 0);
8790 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8791 header.iu_length) != 2);
8792 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8794 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8795 error_index) != 10);
8797 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8798 header.iu_type) != 0);
8799 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8800 header.iu_length) != 2);
8801 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8802 header.response_queue_id) != 4);
8803 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8805 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8806 data.report_event_configuration.buffer_length) != 12);
8807 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8808 data.report_event_configuration.sg_descriptors) != 16);
8809 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8810 data.set_event_configuration.global_event_oq_id) != 10);
8811 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8812 data.set_event_configuration.buffer_length) != 12);
8813 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8814 data.set_event_configuration.sg_descriptors) != 16);
8816 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8817 max_inbound_iu_length) != 6);
8818 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8819 max_outbound_iu_length) != 14);
8820 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
8822 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8824 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8825 iq_arbitration_priority_support_bitmask) != 8);
8826 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8827 maximum_aw_a) != 9);
8828 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8829 maximum_aw_b) != 10);
8830 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8831 maximum_aw_c) != 11);
8832 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8833 max_inbound_queues) != 16);
8834 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8835 max_elements_per_iq) != 18);
8836 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8837 max_iq_element_length) != 24);
8838 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8839 min_iq_element_length) != 26);
8840 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8841 max_outbound_queues) != 30);
8842 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8843 max_elements_per_oq) != 32);
8844 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8845 intr_coalescing_time_granularity) != 34);
8846 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8847 max_oq_element_length) != 36);
8848 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8849 min_oq_element_length) != 38);
8850 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8851 iu_layer_descriptors) != 64);
8852 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
8854 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8856 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8858 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
8860 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8861 num_event_descriptors) != 2);
8862 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8865 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
8866 ARRAY_SIZE(pqi_supported_event_types));
8868 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8869 header.iu_type) != 0);
8870 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8871 header.iu_length) != 2);
8872 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8874 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8876 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8877 additional_event_id) != 12);
8878 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8880 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
8882 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8883 header.iu_type) != 0);
8884 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8885 header.iu_length) != 2);
8886 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8888 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8890 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8891 additional_event_id) != 12);
8892 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
8894 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8895 header.iu_type) != 0);
8896 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8897 header.iu_length) != 2);
8898 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8900 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8902 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8904 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8906 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8907 protocol_specific) != 24);
8908 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8909 outbound_queue_id_to_manage) != 26);
8910 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8911 request_id_to_manage) != 28);
8912 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8913 task_management_function) != 30);
8914 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
8916 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8917 header.iu_type) != 0);
8918 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8919 header.iu_length) != 2);
8920 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8922 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8924 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8925 additional_response_info) != 12);
8926 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8927 response_code) != 15);
8928 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
8930 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8931 configured_logical_drive_count) != 0);
8932 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8933 configuration_signature) != 1);
8934 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8935 firmware_version) != 5);
8936 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8937 extended_logical_unit_count) != 154);
8938 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8939 firmware_build_number) != 190);
8940 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8941 controller_mode) != 292);
8943 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8944 phys_bay_in_box) != 115);
8945 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8946 device_type) != 120);
8947 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8948 redundant_path_present_map) != 1736);
8949 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8950 active_path_number) != 1738);
8951 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8952 alternate_paths_phys_connector) != 1739);
8953 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8954 alternate_paths_phys_box_on_port) != 1755);
8955 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
8956 current_queue_depth_limit) != 1796);
8957 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
8959 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
8960 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
8961 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
8962 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8963 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
8964 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8965 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
8966 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
8967 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8968 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
8969 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
8970 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
8972 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
8973 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
8974 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);