1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microsemi PQI-based storage controllers
4 * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "1.2.16-012"
37 #define DRIVER_MAJOR 1
38 #define DRIVER_MINOR 2
39 #define DRIVER_RELEASE 16
40 #define DRIVER_REVISION 12
42 #define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 MODULE_AUTHOR("Microsemi");
49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
51 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
52 MODULE_VERSION(DRIVER_VERSION);
53 MODULE_LICENSE("GPL");
55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
56 static void pqi_ctrl_offline_worker(struct work_struct *work);
57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
59 static void pqi_scan_start(struct Scsi_Host *shost);
60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
61 struct pqi_queue_group *queue_group, enum pqi_io_path path,
62 struct pqi_io_request *io_request);
63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
64 struct pqi_iu_header *request, unsigned int flags,
65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
68 unsigned int cdb_length, struct pqi_queue_group *queue_group,
69 struct pqi_encryption_info *encryption_info, bool raid_bypass);
70 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
71 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
72 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
73 struct pqi_scsi_dev_raid_map_data *rmd);
74 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
75 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
76 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
77 struct pqi_scsi_dev_raid_map_data *rmd);
78 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
79 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
80 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
81 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
83 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
84 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
85 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
86 struct pqi_scsi_dev *device, unsigned long timeout_secs);
88 /* for flags argument to pqi_submit_raid_request_synchronous() */
89 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
91 static struct scsi_transport_template *pqi_sas_transport_template;
93 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
95 enum pqi_lockup_action {
101 static enum pqi_lockup_action pqi_lockup_action = NONE;
104 enum pqi_lockup_action action;
106 } pqi_lockup_actions[] = {
121 static unsigned int pqi_supported_event_types[] = {
122 PQI_EVENT_TYPE_HOTPLUG,
123 PQI_EVENT_TYPE_HARDWARE,
124 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
125 PQI_EVENT_TYPE_LOGICAL_DEVICE,
127 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
128 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
131 static int pqi_disable_device_id_wildcards;
132 module_param_named(disable_device_id_wildcards,
133 pqi_disable_device_id_wildcards, int, 0644);
134 MODULE_PARM_DESC(disable_device_id_wildcards,
135 "Disable device ID wildcards.");
137 static int pqi_disable_heartbeat;
138 module_param_named(disable_heartbeat,
139 pqi_disable_heartbeat, int, 0644);
140 MODULE_PARM_DESC(disable_heartbeat,
141 "Disable heartbeat.");
143 static int pqi_disable_ctrl_shutdown;
144 module_param_named(disable_ctrl_shutdown,
145 pqi_disable_ctrl_shutdown, int, 0644);
146 MODULE_PARM_DESC(disable_ctrl_shutdown,
147 "Disable controller shutdown when controller locked up.");
149 static char *pqi_lockup_action_param;
150 module_param_named(lockup_action,
151 pqi_lockup_action_param, charp, 0644);
152 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
153 "\t\tSupported: none, reboot, panic\n"
154 "\t\tDefault: none");
156 static int pqi_expose_ld_first;
157 module_param_named(expose_ld_first,
158 pqi_expose_ld_first, int, 0644);
159 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
161 static int pqi_hide_vsep;
162 module_param_named(hide_vsep,
163 pqi_hide_vsep, int, 0644);
164 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
166 static char *raid_levels[] = {
176 static char *pqi_raid_level_to_string(u8 raid_level)
178 if (raid_level < ARRAY_SIZE(raid_levels))
179 return raid_levels[raid_level];
181 return "RAID UNKNOWN";
186 #define SA_RAID_1 2 /* also used for RAID 10 */
187 #define SA_RAID_5 3 /* also used for RAID 50 */
189 #define SA_RAID_6 5 /* also used for RAID 60 */
190 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
191 #define SA_RAID_MAX SA_RAID_TRIPLE
192 #define SA_RAID_UNKNOWN 0xff
194 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
196 pqi_prep_for_scsi_done(scmd);
197 scmd->scsi_done(scmd);
200 static inline void pqi_disable_write_same(struct scsi_device *sdev)
202 sdev->no_write_same = 1;
205 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
207 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
210 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
212 return !device->is_physical_device;
215 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
217 return scsi3addr[2] != 0;
220 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
222 return !ctrl_info->controller_online;
225 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
227 if (ctrl_info->controller_online)
228 if (!sis_is_firmware_running(ctrl_info))
229 pqi_take_ctrl_offline(ctrl_info);
232 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
234 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
237 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
239 return sis_read_driver_scratch(ctrl_info);
242 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
243 enum pqi_ctrl_mode mode)
245 sis_write_driver_scratch(ctrl_info, mode);
248 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
250 ctrl_info->block_device_reset = true;
253 static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
255 return ctrl_info->block_device_reset;
258 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
260 return ctrl_info->block_requests;
263 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
265 ctrl_info->block_requests = true;
266 scsi_block_requests(ctrl_info->scsi_host);
269 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
271 ctrl_info->block_requests = false;
272 wake_up_all(&ctrl_info->block_requests_wait);
273 pqi_retry_raid_bypass_requests(ctrl_info);
274 scsi_unblock_requests(ctrl_info->scsi_host);
277 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
278 unsigned long timeout_msecs)
280 unsigned long remaining_msecs;
282 if (!pqi_ctrl_blocked(ctrl_info))
283 return timeout_msecs;
285 atomic_inc(&ctrl_info->num_blocked_threads);
287 if (timeout_msecs == NO_TIMEOUT) {
288 wait_event(ctrl_info->block_requests_wait,
289 !pqi_ctrl_blocked(ctrl_info));
290 remaining_msecs = timeout_msecs;
292 unsigned long remaining_jiffies;
295 wait_event_timeout(ctrl_info->block_requests_wait,
296 !pqi_ctrl_blocked(ctrl_info),
297 msecs_to_jiffies(timeout_msecs));
298 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
301 atomic_dec(&ctrl_info->num_blocked_threads);
303 return remaining_msecs;
306 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
308 while (atomic_read(&ctrl_info->num_busy_threads) >
309 atomic_read(&ctrl_info->num_blocked_threads))
310 usleep_range(1000, 2000);
313 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
315 return device->device_offline;
318 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
320 device->in_reset = true;
323 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
325 device->in_reset = false;
328 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
330 return device->in_reset;
333 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
335 ctrl_info->in_ofa = true;
338 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
340 ctrl_info->in_ofa = false;
343 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
345 return ctrl_info->in_ofa;
348 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
350 device->in_remove = true;
353 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
355 return device->in_remove;
358 static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
360 ctrl_info->in_shutdown = true;
363 static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
365 return ctrl_info->in_shutdown;
368 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
371 if (pqi_ctrl_offline(ctrl_info))
373 if (pqi_ctrl_in_ofa(ctrl_info))
376 schedule_delayed_work(&ctrl_info->rescan_work, delay);
379 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
381 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
384 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
386 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
388 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
391 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
393 cancel_delayed_work_sync(&ctrl_info->rescan_work);
396 static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
398 cancel_work_sync(&ctrl_info->event_work);
401 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
403 if (!ctrl_info->heartbeat_counter)
406 return readl(ctrl_info->heartbeat_counter);
409 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
411 if (!ctrl_info->soft_reset_status)
414 return readb(ctrl_info->soft_reset_status);
417 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, u8 clear)
421 if (!ctrl_info->soft_reset_status)
424 status = pqi_read_soft_reset_status(ctrl_info);
426 writeb(status, ctrl_info->soft_reset_status);
429 static int pqi_map_single(struct pci_dev *pci_dev,
430 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
431 size_t buffer_length, enum dma_data_direction data_direction)
433 dma_addr_t bus_address;
435 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
438 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
440 if (dma_mapping_error(&pci_dev->dev, bus_address))
443 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
444 put_unaligned_le32(buffer_length, &sg_descriptor->length);
445 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
450 static void pqi_pci_unmap(struct pci_dev *pci_dev,
451 struct pqi_sg_descriptor *descriptors, int num_descriptors,
452 enum dma_data_direction data_direction)
456 if (data_direction == DMA_NONE)
459 for (i = 0; i < num_descriptors; i++)
460 dma_unmap_single(&pci_dev->dev,
461 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
462 get_unaligned_le32(&descriptors[i].length),
466 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
467 struct pqi_raid_path_request *request, u8 cmd,
468 u8 *scsi3addr, void *buffer, size_t buffer_length,
469 u16 vpd_page, enum dma_data_direction *dir)
472 size_t cdb_length = buffer_length;
474 memset(request, 0, sizeof(*request));
476 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
477 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
478 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
479 &request->header.iu_length);
480 put_unaligned_le32(buffer_length, &request->buffer_length);
481 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
482 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
483 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
489 request->data_direction = SOP_READ_FLAG;
491 if (vpd_page & VPD_PAGE) {
493 cdb[2] = (u8)vpd_page;
495 cdb[4] = (u8)cdb_length;
497 case CISS_REPORT_LOG:
498 case CISS_REPORT_PHYS:
499 request->data_direction = SOP_READ_FLAG;
501 if (cmd == CISS_REPORT_PHYS)
502 cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
504 cdb[1] = ctrl_info->ciss_report_log_flags;
505 put_unaligned_be32(cdb_length, &cdb[6]);
507 case CISS_GET_RAID_MAP:
508 request->data_direction = SOP_READ_FLAG;
510 cdb[1] = CISS_GET_RAID_MAP;
511 put_unaligned_be32(cdb_length, &cdb[6]);
514 request->data_direction = SOP_WRITE_FLAG;
516 cdb[6] = BMIC_FLUSH_CACHE;
517 put_unaligned_be16(cdb_length, &cdb[7]);
519 case BMIC_SENSE_DIAG_OPTIONS:
522 case BMIC_IDENTIFY_CONTROLLER:
523 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
524 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
525 case BMIC_SENSE_FEATURE:
526 request->data_direction = SOP_READ_FLAG;
529 put_unaligned_be16(cdb_length, &cdb[7]);
531 case BMIC_SET_DIAG_OPTIONS:
534 case BMIC_WRITE_HOST_WELLNESS:
535 request->data_direction = SOP_WRITE_FLAG;
538 put_unaligned_be16(cdb_length, &cdb[7]);
540 case BMIC_CSMI_PASSTHRU:
541 request->data_direction = SOP_BIDIRECTIONAL;
543 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
545 put_unaligned_be16(cdb_length, &cdb[7]);
548 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
552 switch (request->data_direction) {
554 *dir = DMA_FROM_DEVICE;
557 *dir = DMA_TO_DEVICE;
559 case SOP_NO_DIRECTION_FLAG:
563 *dir = DMA_BIDIRECTIONAL;
567 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
568 buffer, buffer_length, *dir);
571 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
573 io_request->scmd = NULL;
574 io_request->status = 0;
575 io_request->error_info = NULL;
576 io_request->raid_bypass = false;
579 static struct pqi_io_request *pqi_alloc_io_request(
580 struct pqi_ctrl_info *ctrl_info)
582 struct pqi_io_request *io_request;
583 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
586 io_request = &ctrl_info->io_request_pool[i];
587 if (atomic_inc_return(&io_request->refcount) == 1)
589 atomic_dec(&io_request->refcount);
590 i = (i + 1) % ctrl_info->max_io_slots;
594 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
596 pqi_reinit_io_request(io_request);
601 static void pqi_free_io_request(struct pqi_io_request *io_request)
603 atomic_dec(&io_request->refcount);
606 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
607 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
608 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
611 struct pqi_raid_path_request request;
612 enum dma_data_direction dir;
614 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
615 buffer, buffer_length, vpd_page, &dir);
619 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
620 error_info, timeout_msecs);
622 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
627 /* helper functions for pqi_send_scsi_raid_request */
629 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
630 u8 cmd, void *buffer, size_t buffer_length)
632 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
633 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
636 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
637 u8 cmd, void *buffer, size_t buffer_length,
638 struct pqi_raid_error_info *error_info)
640 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
641 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
644 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
645 struct bmic_identify_controller *buffer)
647 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
648 buffer, sizeof(*buffer));
651 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
652 struct bmic_sense_subsystem_info *sense_info)
654 return pqi_send_ctrl_raid_request(ctrl_info,
655 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
656 sizeof(*sense_info));
659 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
660 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
662 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
663 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
666 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
667 struct pqi_scsi_dev *device,
668 struct bmic_identify_physical_device *buffer, size_t buffer_length)
671 enum dma_data_direction dir;
672 u16 bmic_device_index;
673 struct pqi_raid_path_request request;
675 rc = pqi_build_raid_path_request(ctrl_info, &request,
676 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
677 buffer_length, 0, &dir);
681 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
682 request.cdb[2] = (u8)bmic_device_index;
683 request.cdb[9] = (u8)(bmic_device_index >> 8);
685 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
686 0, NULL, NO_TIMEOUT);
688 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
693 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
697 bytes = get_unaligned_le16(limit);
708 struct bmic_sense_feature_buffer {
709 struct bmic_sense_feature_buffer_header header;
710 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
715 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
716 offsetofend(struct bmic_sense_feature_buffer, \
717 aio_subpage.max_write_raid_1_10_3drive)
719 #define MINIMUM_AIO_SUBPAGE_LENGTH \
720 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
721 max_write_raid_1_10_3drive) - \
722 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
724 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
727 enum dma_data_direction dir;
728 struct pqi_raid_path_request request;
729 struct bmic_sense_feature_buffer *buffer;
731 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
735 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
736 buffer, sizeof(*buffer), 0, &dir);
740 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
741 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
743 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL, NO_TIMEOUT);
745 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
750 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
751 buffer->header.subpage_code !=
752 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
753 get_unaligned_le16(&buffer->header.buffer_length) <
754 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
755 buffer->aio_subpage.header.page_code !=
756 BMIC_SENSE_FEATURE_IO_PAGE ||
757 buffer->aio_subpage.header.subpage_code !=
758 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
759 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
760 MINIMUM_AIO_SUBPAGE_LENGTH) {
764 ctrl_info->max_transfer_encrypted_sas_sata =
765 pqi_aio_limit_to_bytes(
766 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
768 ctrl_info->max_transfer_encrypted_nvme =
769 pqi_aio_limit_to_bytes(
770 &buffer->aio_subpage.max_transfer_encrypted_nvme);
772 ctrl_info->max_write_raid_5_6 =
773 pqi_aio_limit_to_bytes(
774 &buffer->aio_subpage.max_write_raid_5_6);
776 ctrl_info->max_write_raid_1_10_2drive =
777 pqi_aio_limit_to_bytes(
778 &buffer->aio_subpage.max_write_raid_1_10_2drive);
780 ctrl_info->max_write_raid_1_10_3drive =
781 pqi_aio_limit_to_bytes(
782 &buffer->aio_subpage.max_write_raid_1_10_3drive);
790 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
791 enum bmic_flush_cache_shutdown_event shutdown_event)
794 struct bmic_flush_cache *flush_cache;
797 * Don't bother trying to flush the cache if the controller is
800 if (pqi_ctrl_offline(ctrl_info))
803 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
807 flush_cache->shutdown_event = shutdown_event;
809 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
810 sizeof(*flush_cache));
817 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
818 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
819 struct pqi_raid_error_info *error_info)
821 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
822 buffer, buffer_length, error_info);
825 #define PQI_FETCH_PTRAID_DATA (1 << 31)
827 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
830 struct bmic_diag_options *diag;
832 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
836 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
837 diag, sizeof(*diag));
841 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
843 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
852 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
853 void *buffer, size_t buffer_length)
855 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
856 buffer, buffer_length);
861 struct bmic_host_wellness_driver_version {
863 u8 driver_version_tag[2];
864 __le16 driver_version_length;
865 char driver_version[32];
866 u8 dont_write_tag[2];
872 static int pqi_write_driver_version_to_host_wellness(
873 struct pqi_ctrl_info *ctrl_info)
876 struct bmic_host_wellness_driver_version *buffer;
877 size_t buffer_length;
879 buffer_length = sizeof(*buffer);
881 buffer = kmalloc(buffer_length, GFP_KERNEL);
885 buffer->start_tag[0] = '<';
886 buffer->start_tag[1] = 'H';
887 buffer->start_tag[2] = 'W';
888 buffer->start_tag[3] = '>';
889 buffer->driver_version_tag[0] = 'D';
890 buffer->driver_version_tag[1] = 'V';
891 put_unaligned_le16(sizeof(buffer->driver_version),
892 &buffer->driver_version_length);
893 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
894 sizeof(buffer->driver_version) - 1);
895 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
896 buffer->dont_write_tag[0] = 'D';
897 buffer->dont_write_tag[1] = 'W';
898 buffer->end_tag[0] = 'Z';
899 buffer->end_tag[1] = 'Z';
901 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
910 struct bmic_host_wellness_time {
915 u8 dont_write_tag[2];
921 static int pqi_write_current_time_to_host_wellness(
922 struct pqi_ctrl_info *ctrl_info)
925 struct bmic_host_wellness_time *buffer;
926 size_t buffer_length;
931 buffer_length = sizeof(*buffer);
933 buffer = kmalloc(buffer_length, GFP_KERNEL);
937 buffer->start_tag[0] = '<';
938 buffer->start_tag[1] = 'H';
939 buffer->start_tag[2] = 'W';
940 buffer->start_tag[3] = '>';
941 buffer->time_tag[0] = 'T';
942 buffer->time_tag[1] = 'D';
943 put_unaligned_le16(sizeof(buffer->time),
944 &buffer->time_length);
946 local_time = ktime_get_real_seconds();
947 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
948 year = tm.tm_year + 1900;
950 buffer->time[0] = bin2bcd(tm.tm_hour);
951 buffer->time[1] = bin2bcd(tm.tm_min);
952 buffer->time[2] = bin2bcd(tm.tm_sec);
954 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
955 buffer->time[5] = bin2bcd(tm.tm_mday);
956 buffer->time[6] = bin2bcd(year / 100);
957 buffer->time[7] = bin2bcd(year % 100);
959 buffer->dont_write_tag[0] = 'D';
960 buffer->dont_write_tag[1] = 'W';
961 buffer->end_tag[0] = 'Z';
962 buffer->end_tag[1] = 'Z';
964 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
971 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
973 static void pqi_update_time_worker(struct work_struct *work)
976 struct pqi_ctrl_info *ctrl_info;
978 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
981 if (pqi_ctrl_offline(ctrl_info))
984 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
986 dev_warn(&ctrl_info->pci_dev->dev,
987 "error updating time on controller\n");
989 schedule_delayed_work(&ctrl_info->update_time_work,
990 PQI_UPDATE_TIME_WORK_INTERVAL);
993 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
995 schedule_delayed_work(&ctrl_info->update_time_work, 0);
998 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1000 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1003 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1004 size_t buffer_length)
1006 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1009 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1012 size_t lun_list_length;
1013 size_t lun_data_length;
1014 size_t new_lun_list_length;
1015 void *lun_data = NULL;
1016 struct report_lun_header *report_lun_header;
1018 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1019 if (!report_lun_header) {
1024 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1028 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1031 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1033 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1039 if (lun_list_length == 0) {
1040 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1044 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1048 new_lun_list_length =
1049 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1051 if (new_lun_list_length > lun_list_length) {
1052 lun_list_length = new_lun_list_length;
1058 kfree(report_lun_header);
1070 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1072 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer);
1075 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1077 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1080 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1081 struct report_phys_lun_extended **physdev_list,
1082 struct report_log_lun_extended **logdev_list)
1085 size_t logdev_list_length;
1086 size_t logdev_data_length;
1087 struct report_log_lun_extended *internal_logdev_list;
1088 struct report_log_lun_extended *logdev_data;
1089 struct report_lun_header report_lun_header;
1091 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1093 dev_err(&ctrl_info->pci_dev->dev,
1094 "report physical LUNs failed\n");
1096 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1098 dev_err(&ctrl_info->pci_dev->dev,
1099 "report logical LUNs failed\n");
1102 * Tack the controller itself onto the end of the logical device list.
1105 logdev_data = *logdev_list;
1108 logdev_list_length =
1109 get_unaligned_be32(&logdev_data->header.list_length);
1111 memset(&report_lun_header, 0, sizeof(report_lun_header));
1113 (struct report_log_lun_extended *)&report_lun_header;
1114 logdev_list_length = 0;
1117 logdev_data_length = sizeof(struct report_lun_header) +
1120 internal_logdev_list = kmalloc(logdev_data_length +
1121 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1122 if (!internal_logdev_list) {
1123 kfree(*logdev_list);
1124 *logdev_list = NULL;
1128 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1129 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1130 sizeof(struct report_log_lun_extended_entry));
1131 put_unaligned_be32(logdev_list_length +
1132 sizeof(struct report_log_lun_extended_entry),
1133 &internal_logdev_list->header.list_length);
1135 kfree(*logdev_list);
1136 *logdev_list = internal_logdev_list;
1141 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1142 int bus, int target, int lun)
1145 device->target = target;
1149 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1157 scsi3addr = device->scsi3addr;
1158 lunid = get_unaligned_le32(scsi3addr);
1160 if (pqi_is_hba_lunid(scsi3addr)) {
1161 /* The specified device is the controller. */
1162 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1163 device->target_lun_valid = true;
1167 if (pqi_is_logical_device(device)) {
1168 if (device->is_external_raid_device) {
1169 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1170 target = (lunid >> 16) & 0x3fff;
1173 bus = PQI_RAID_VOLUME_BUS;
1175 lun = lunid & 0x3fff;
1177 pqi_set_bus_target_lun(device, bus, target, lun);
1178 device->target_lun_valid = true;
1183 * Defer target and LUN assignment for non-controller physical devices
1184 * because the SAS transport layer will make these assignments later.
1186 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1189 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1190 struct pqi_scsi_dev *device)
1196 raid_level = SA_RAID_UNKNOWN;
1198 buffer = kmalloc(64, GFP_KERNEL);
1200 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1201 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1203 raid_level = buffer[8];
1204 if (raid_level > SA_RAID_MAX)
1205 raid_level = SA_RAID_UNKNOWN;
1210 device->raid_level = raid_level;
1213 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1214 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1218 u32 r5or6_blocks_per_row;
1220 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1222 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1223 err_msg = "RAID map too small";
1227 if (device->raid_level == SA_RAID_1) {
1228 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1229 err_msg = "invalid RAID-1 map";
1232 } else if (device->raid_level == SA_RAID_TRIPLE) {
1233 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1234 err_msg = "invalid RAID-1(Triple) map";
1237 } else if ((device->raid_level == SA_RAID_5 ||
1238 device->raid_level == SA_RAID_6) &&
1239 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1241 r5or6_blocks_per_row =
1242 get_unaligned_le16(&raid_map->strip_size) *
1243 get_unaligned_le16(&raid_map->data_disks_per_row);
1244 if (r5or6_blocks_per_row == 0) {
1245 err_msg = "invalid RAID-5 or RAID-6 map";
1253 dev_warn(&ctrl_info->pci_dev->dev,
1254 "logical device %08x%08x %s\n",
1255 *((u32 *)&device->scsi3addr),
1256 *((u32 *)&device->scsi3addr[4]), err_msg);
1261 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1262 struct pqi_scsi_dev *device)
1266 struct raid_map *raid_map;
1268 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1272 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1273 device->scsi3addr, raid_map, sizeof(*raid_map),
1274 0, NULL, NO_TIMEOUT);
1279 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1281 if (raid_map_size > sizeof(*raid_map)) {
1285 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1289 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1290 device->scsi3addr, raid_map, raid_map_size,
1291 0, NULL, NO_TIMEOUT);
1295 if (get_unaligned_le32(&raid_map->structure_size)
1297 dev_warn(&ctrl_info->pci_dev->dev,
1298 "requested %u bytes, received %u bytes\n",
1300 get_unaligned_le32(&raid_map->structure_size));
1305 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1309 device->raid_map = raid_map;
1319 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1320 struct pqi_scsi_dev *device)
1322 if (!ctrl_info->lv_drive_type_mix_valid) {
1323 device->max_transfer_encrypted = ~0;
1327 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1328 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1329 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1330 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1331 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1332 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1333 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1334 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1335 device->max_transfer_encrypted =
1336 ctrl_info->max_transfer_encrypted_sas_sata;
1338 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1339 device->max_transfer_encrypted =
1340 ctrl_info->max_transfer_encrypted_nvme;
1342 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1343 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1345 device->max_transfer_encrypted =
1346 min(ctrl_info->max_transfer_encrypted_sas_sata,
1347 ctrl_info->max_transfer_encrypted_nvme);
1352 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1353 struct pqi_scsi_dev *device)
1359 buffer = kmalloc(64, GFP_KERNEL);
1363 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1364 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1368 #define RAID_BYPASS_STATUS 4
1369 #define RAID_BYPASS_CONFIGURED 0x1
1370 #define RAID_BYPASS_ENABLED 0x2
1372 bypass_status = buffer[RAID_BYPASS_STATUS];
1373 device->raid_bypass_configured =
1374 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1375 if (device->raid_bypass_configured &&
1376 (bypass_status & RAID_BYPASS_ENABLED) &&
1377 pqi_get_raid_map(ctrl_info, device) == 0) {
1378 device->raid_bypass_enabled = true;
1379 if (get_unaligned_le16(&device->raid_map->flags) &
1380 RAID_MAP_ENCRYPTION_ENABLED)
1381 pqi_set_max_transfer_encrypted(ctrl_info, device);
1389 * Use vendor-specific VPD to determine online/offline status of a volume.
1392 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1393 struct pqi_scsi_dev *device)
1397 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1398 bool volume_offline = true;
1400 struct ciss_vpd_logical_volume_status *vpd;
1402 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1406 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1407 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1411 if (vpd->page_code != CISS_VPD_LV_STATUS)
1414 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1415 volume_status) + vpd->page_length;
1416 if (page_length < sizeof(*vpd))
1419 volume_status = vpd->volume_status;
1420 volume_flags = get_unaligned_be32(&vpd->flags);
1421 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1426 device->volume_status = volume_status;
1427 device->volume_offline = volume_offline;
1430 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1431 struct pqi_scsi_dev *device,
1432 struct bmic_identify_physical_device *id_phys)
1436 memset(id_phys, 0, sizeof(*id_phys));
1438 rc = pqi_identify_physical_device(ctrl_info, device,
1439 id_phys, sizeof(*id_phys));
1441 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1445 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1446 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1448 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1449 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1451 device->box_index = id_phys->box_index;
1452 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1453 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1454 device->queue_depth =
1455 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1456 device->active_path_index = id_phys->active_path_number;
1457 device->path_map = id_phys->redundant_path_present_map;
1458 memcpy(&device->box,
1459 &id_phys->alternate_paths_phys_box_on_port,
1460 sizeof(device->box));
1461 memcpy(&device->phys_connector,
1462 &id_phys->alternate_paths_phys_connector,
1463 sizeof(device->phys_connector));
1464 device->bay = id_phys->phys_bay_in_box;
1469 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1470 struct pqi_scsi_dev *device)
1475 buffer = kmalloc(64, GFP_KERNEL);
1479 /* Send an inquiry to the device to see what it is. */
1480 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1484 scsi_sanitize_inquiry_string(&buffer[8], 8);
1485 scsi_sanitize_inquiry_string(&buffer[16], 16);
1487 device->devtype = buffer[0] & 0x1f;
1488 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1489 memcpy(device->model, &buffer[16], sizeof(device->model));
1491 if (device->devtype == TYPE_DISK) {
1492 if (device->is_external_raid_device) {
1493 device->raid_level = SA_RAID_UNKNOWN;
1494 device->volume_status = CISS_LV_OK;
1495 device->volume_offline = false;
1497 pqi_get_raid_level(ctrl_info, device);
1498 pqi_get_raid_bypass_status(ctrl_info, device);
1499 pqi_get_volume_status(ctrl_info, device);
1509 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1510 struct pqi_scsi_dev *device,
1511 struct bmic_identify_physical_device *id_phys)
1515 if (device->is_expander_smp_device)
1518 if (pqi_is_logical_device(device))
1519 rc = pqi_get_logical_device_info(ctrl_info, device);
1521 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1526 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1527 struct pqi_scsi_dev *device)
1530 static const char unknown_state_str[] =
1531 "Volume is in an unknown state (%u)";
1532 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1534 switch (device->volume_status) {
1536 status = "Volume online";
1538 case CISS_LV_FAILED:
1539 status = "Volume failed";
1541 case CISS_LV_NOT_CONFIGURED:
1542 status = "Volume not configured";
1544 case CISS_LV_DEGRADED:
1545 status = "Volume degraded";
1547 case CISS_LV_READY_FOR_RECOVERY:
1548 status = "Volume ready for recovery operation";
1550 case CISS_LV_UNDERGOING_RECOVERY:
1551 status = "Volume undergoing recovery";
1553 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1554 status = "Wrong physical drive was replaced";
1556 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1557 status = "A physical drive not properly connected";
1559 case CISS_LV_HARDWARE_OVERHEATING:
1560 status = "Hardware is overheating";
1562 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1563 status = "Hardware has overheated";
1565 case CISS_LV_UNDERGOING_EXPANSION:
1566 status = "Volume undergoing expansion";
1568 case CISS_LV_NOT_AVAILABLE:
1569 status = "Volume waiting for transforming volume";
1571 case CISS_LV_QUEUED_FOR_EXPANSION:
1572 status = "Volume queued for expansion";
1574 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1575 status = "Volume disabled due to SCSI ID conflict";
1577 case CISS_LV_EJECTED:
1578 status = "Volume has been ejected";
1580 case CISS_LV_UNDERGOING_ERASE:
1581 status = "Volume undergoing background erase";
1583 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1584 status = "Volume ready for predictive spare rebuild";
1586 case CISS_LV_UNDERGOING_RPI:
1587 status = "Volume undergoing rapid parity initialization";
1589 case CISS_LV_PENDING_RPI:
1590 status = "Volume queued for rapid parity initialization";
1592 case CISS_LV_ENCRYPTED_NO_KEY:
1593 status = "Encrypted volume inaccessible - key not present";
1595 case CISS_LV_UNDERGOING_ENCRYPTION:
1596 status = "Volume undergoing encryption process";
1598 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1599 status = "Volume undergoing encryption re-keying process";
1601 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1602 status = "Volume encrypted but encryption is disabled";
1604 case CISS_LV_PENDING_ENCRYPTION:
1605 status = "Volume pending migration to encrypted state";
1607 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1608 status = "Volume pending encryption rekeying";
1610 case CISS_LV_NOT_SUPPORTED:
1611 status = "Volume not supported on this controller";
1613 case CISS_LV_STATUS_UNAVAILABLE:
1614 status = "Volume status not available";
1617 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1618 unknown_state_str, device->volume_status);
1619 status = unknown_state_buffer;
1623 dev_info(&ctrl_info->pci_dev->dev,
1624 "scsi %d:%d:%d:%d %s\n",
1625 ctrl_info->scsi_host->host_no,
1626 device->bus, device->target, device->lun, status);
1629 static void pqi_rescan_worker(struct work_struct *work)
1631 struct pqi_ctrl_info *ctrl_info;
1633 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1636 pqi_scan_scsi_devices(ctrl_info);
1639 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1640 struct pqi_scsi_dev *device)
1644 if (pqi_is_logical_device(device))
1645 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1646 device->target, device->lun);
1648 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1653 #define PQI_PENDING_IO_TIMEOUT_SECS 20
1655 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1659 pqi_device_remove_start(device);
1661 rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
1663 dev_err(&ctrl_info->pci_dev->dev,
1664 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1665 ctrl_info->scsi_host->host_no, device->bus,
1666 device->target, device->lun,
1667 atomic_read(&device->scsi_cmds_outstanding));
1669 if (pqi_is_logical_device(device))
1670 scsi_remove_device(device->sdev);
1672 pqi_remove_sas_device(device);
1675 /* Assumes the SCSI device list lock is held. */
1677 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1678 int bus, int target, int lun)
1680 struct pqi_scsi_dev *device;
1682 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1683 if (device->bus == bus && device->target == target && device->lun == lun)
1689 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1691 if (dev1->is_physical_device != dev2->is_physical_device)
1694 if (dev1->is_physical_device)
1695 return dev1->wwid == dev2->wwid;
1697 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1700 enum pqi_find_result {
1706 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1707 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1709 struct pqi_scsi_dev *device;
1711 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1712 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1713 *matching_device = device;
1714 if (pqi_device_equal(device_to_find, device)) {
1715 if (device_to_find->volume_offline)
1716 return DEVICE_CHANGED;
1719 return DEVICE_CHANGED;
1723 return DEVICE_NOT_FOUND;
1726 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1728 if (device->is_expander_smp_device)
1729 return "Enclosure SMP ";
1731 return scsi_device_type(device->devtype);
1734 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1736 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1737 char *action, struct pqi_scsi_dev *device)
1740 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1742 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1743 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1745 if (device->target_lun_valid)
1746 count += scnprintf(buffer + count,
1747 PQI_DEV_INFO_BUFFER_LENGTH - count,
1752 count += scnprintf(buffer + count,
1753 PQI_DEV_INFO_BUFFER_LENGTH - count,
1756 if (pqi_is_logical_device(device))
1757 count += scnprintf(buffer + count,
1758 PQI_DEV_INFO_BUFFER_LENGTH - count,
1760 *((u32 *)&device->scsi3addr),
1761 *((u32 *)&device->scsi3addr[4]));
1763 count += scnprintf(buffer + count,
1764 PQI_DEV_INFO_BUFFER_LENGTH - count,
1765 " %016llx", device->sas_address);
1767 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1769 pqi_device_type(device),
1773 if (pqi_is_logical_device(device)) {
1774 if (device->devtype == TYPE_DISK)
1775 count += scnprintf(buffer + count,
1776 PQI_DEV_INFO_BUFFER_LENGTH - count,
1777 "SSDSmartPathCap%c En%c %-12s",
1778 device->raid_bypass_configured ? '+' : '-',
1779 device->raid_bypass_enabled ? '+' : '-',
1780 pqi_raid_level_to_string(device->raid_level));
1782 count += scnprintf(buffer + count,
1783 PQI_DEV_INFO_BUFFER_LENGTH - count,
1784 "AIO%c", device->aio_enabled ? '+' : '-');
1785 if (device->devtype == TYPE_DISK ||
1786 device->devtype == TYPE_ZBC)
1787 count += scnprintf(buffer + count,
1788 PQI_DEV_INFO_BUFFER_LENGTH - count,
1789 " qd=%-6d", device->queue_depth);
1792 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
1795 /* Assumes the SCSI device list lock is held. */
1797 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1798 struct pqi_scsi_dev *new_device)
1800 existing_device->devtype = new_device->devtype;
1801 existing_device->device_type = new_device->device_type;
1802 existing_device->bus = new_device->bus;
1803 if (new_device->target_lun_valid) {
1804 existing_device->target = new_device->target;
1805 existing_device->lun = new_device->lun;
1806 existing_device->target_lun_valid = true;
1809 if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
1810 existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
1811 new_device->volume_status == CISS_LV_OK)
1812 existing_device->rescan = true;
1814 /* By definition, the scsi3addr and wwid fields are already the same. */
1816 existing_device->is_physical_device = new_device->is_physical_device;
1817 existing_device->is_external_raid_device =
1818 new_device->is_external_raid_device;
1819 existing_device->is_expander_smp_device =
1820 new_device->is_expander_smp_device;
1821 existing_device->aio_enabled = new_device->aio_enabled;
1822 memcpy(existing_device->vendor, new_device->vendor,
1823 sizeof(existing_device->vendor));
1824 memcpy(existing_device->model, new_device->model,
1825 sizeof(existing_device->model));
1826 existing_device->sas_address = new_device->sas_address;
1827 existing_device->raid_level = new_device->raid_level;
1828 existing_device->queue_depth = new_device->queue_depth;
1829 existing_device->aio_handle = new_device->aio_handle;
1830 existing_device->volume_status = new_device->volume_status;
1831 existing_device->active_path_index = new_device->active_path_index;
1832 existing_device->path_map = new_device->path_map;
1833 existing_device->bay = new_device->bay;
1834 existing_device->box_index = new_device->box_index;
1835 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1836 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
1837 memcpy(existing_device->box, new_device->box,
1838 sizeof(existing_device->box));
1839 memcpy(existing_device->phys_connector, new_device->phys_connector,
1840 sizeof(existing_device->phys_connector));
1841 existing_device->next_bypass_group = 0;
1842 kfree(existing_device->raid_map);
1843 existing_device->raid_map = new_device->raid_map;
1844 existing_device->raid_bypass_configured =
1845 new_device->raid_bypass_configured;
1846 existing_device->raid_bypass_enabled =
1847 new_device->raid_bypass_enabled;
1848 existing_device->device_offline = false;
1850 /* To prevent this from being freed later. */
1851 new_device->raid_map = NULL;
1854 static inline void pqi_free_device(struct pqi_scsi_dev *device)
1857 kfree(device->raid_map);
1863 * Called when exposing a new device to the OS fails in order to re-adjust
1864 * our internal SCSI device list to match the SCSI ML's view.
1867 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1868 struct pqi_scsi_dev *device)
1870 unsigned long flags;
1872 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1873 list_del(&device->scsi_device_list_entry);
1874 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1876 /* Allow the device structure to be freed later. */
1877 device->keep_device = false;
1880 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1882 if (device->is_expander_smp_device)
1883 return device->sas_port != NULL;
1885 return device->sdev != NULL;
1888 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1889 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1893 unsigned long flags;
1894 enum pqi_find_result find_result;
1895 struct pqi_scsi_dev *device;
1896 struct pqi_scsi_dev *next;
1897 struct pqi_scsi_dev *matching_device;
1898 LIST_HEAD(add_list);
1899 LIST_HEAD(delete_list);
1902 * The idea here is to do as little work as possible while holding the
1903 * spinlock. That's why we go to great pains to defer anything other
1904 * than updating the internal device list until after we release the
1908 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1910 /* Assume that all devices in the existing list have gone away. */
1911 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1912 device->device_gone = true;
1914 for (i = 0; i < num_new_devices; i++) {
1915 device = new_device_list[i];
1917 find_result = pqi_scsi_find_entry(ctrl_info, device,
1920 switch (find_result) {
1923 * The newly found device is already in the existing
1926 device->new_device = false;
1927 matching_device->device_gone = false;
1928 pqi_scsi_update_device(matching_device, device);
1930 case DEVICE_NOT_FOUND:
1932 * The newly found device is NOT in the existing device
1935 device->new_device = true;
1937 case DEVICE_CHANGED:
1939 * The original device has gone away and we need to add
1942 device->new_device = true;
1947 /* Process all devices that have gone away. */
1948 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1949 scsi_device_list_entry) {
1950 if (device->device_gone) {
1951 list_del_init(&device->scsi_device_list_entry);
1952 list_add_tail(&device->delete_list_entry, &delete_list);
1956 /* Process all new devices. */
1957 for (i = 0; i < num_new_devices; i++) {
1958 device = new_device_list[i];
1959 if (!device->new_device)
1961 if (device->volume_offline)
1963 list_add_tail(&device->scsi_device_list_entry,
1964 &ctrl_info->scsi_device_list);
1965 list_add_tail(&device->add_list_entry, &add_list);
1966 /* To prevent this device structure from being freed later. */
1967 device->keep_device = true;
1970 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1972 if (pqi_ctrl_in_ofa(ctrl_info))
1973 pqi_ctrl_ofa_done(ctrl_info);
1975 /* Remove all devices that have gone away. */
1976 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
1977 if (device->volume_offline) {
1978 pqi_dev_info(ctrl_info, "offline", device);
1979 pqi_show_volume_status(ctrl_info, device);
1981 list_del(&device->delete_list_entry);
1982 if (pqi_is_device_added(device)) {
1983 pqi_remove_device(ctrl_info, device);
1985 if (!device->volume_offline)
1986 pqi_dev_info(ctrl_info, "removed", device);
1987 pqi_free_device(device);
1992 * Notify the SCSI ML if the queue depth of any existing device has
1995 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1996 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
1997 device->advertised_queue_depth = device->queue_depth;
1998 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
1999 if (device->rescan) {
2000 scsi_rescan_device(&device->sdev->sdev_gendev);
2001 device->rescan = false;
2006 /* Expose any new devices. */
2007 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2008 if (!pqi_is_device_added(device)) {
2009 rc = pqi_add_device(ctrl_info, device);
2011 pqi_dev_info(ctrl_info, "added", device);
2013 dev_warn(&ctrl_info->pci_dev->dev,
2014 "scsi %d:%d:%d:%d addition failed, device not added\n",
2015 ctrl_info->scsi_host->host_no,
2016 device->bus, device->target,
2018 pqi_fixup_botched_add(ctrl_info, device);
2024 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2027 * Only support the HBA controller itself as a RAID
2028 * controller. If it's a RAID controller other than
2029 * the HBA itself (an external RAID controller, for
2030 * example), we don't support it.
2032 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2033 !pqi_is_hba_lunid(device->scsi3addr))
2039 static inline bool pqi_skip_device(u8 *scsi3addr)
2041 /* Ignore all masked devices. */
2042 if (MASKED_DEVICE(scsi3addr))
2048 static inline void pqi_mask_device(u8 *scsi3addr)
2050 scsi3addr[3] |= 0xc0;
2053 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
2055 switch (device->device_type) {
2056 case SA_DEVICE_TYPE_SAS:
2057 case SA_DEVICE_TYPE_EXPANDER_SMP:
2058 case SA_DEVICE_TYPE_SES:
2065 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2067 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2070 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2074 LIST_HEAD(new_device_list_head);
2075 struct report_phys_lun_extended *physdev_list = NULL;
2076 struct report_log_lun_extended *logdev_list = NULL;
2077 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
2078 struct report_log_lun_extended_entry *log_lun_ext_entry;
2079 struct bmic_identify_physical_device *id_phys = NULL;
2082 struct pqi_scsi_dev **new_device_list = NULL;
2083 struct pqi_scsi_dev *device;
2084 struct pqi_scsi_dev *next;
2085 unsigned int num_new_devices;
2086 unsigned int num_valid_devices;
2087 bool is_physical_device;
2089 unsigned int physical_index;
2090 unsigned int logical_index;
2091 static char *out_of_memory_msg =
2092 "failed to allocate memory, device discovery stopped";
2094 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2100 get_unaligned_be32(&physdev_list->header.list_length)
2101 / sizeof(physdev_list->lun_entries[0]);
2107 get_unaligned_be32(&logdev_list->header.list_length)
2108 / sizeof(logdev_list->lun_entries[0]);
2112 if (num_physicals) {
2114 * We need this buffer for calls to pqi_get_physical_disk_info()
2115 * below. We allocate it here instead of inside
2116 * pqi_get_physical_disk_info() because it's a fairly large
2119 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2121 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2127 if (pqi_hide_vsep) {
2128 for (i = num_physicals - 1; i >= 0; i--) {
2129 phys_lun_ext_entry =
2130 &physdev_list->lun_entries[i];
2131 if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) {
2132 pqi_mask_device(phys_lun_ext_entry->lunid);
2140 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2141 ctrl_info->lv_drive_type_mix_valid = true;
2143 num_new_devices = num_physicals + num_logicals;
2145 new_device_list = kmalloc_array(num_new_devices,
2146 sizeof(*new_device_list),
2148 if (!new_device_list) {
2149 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2154 for (i = 0; i < num_new_devices; i++) {
2155 device = kzalloc(sizeof(*device), GFP_KERNEL);
2157 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2162 list_add_tail(&device->new_device_list_entry,
2163 &new_device_list_head);
2167 num_valid_devices = 0;
2171 for (i = 0; i < num_new_devices; i++) {
2173 if ((!pqi_expose_ld_first && i < num_physicals) ||
2174 (pqi_expose_ld_first && i >= num_logicals)) {
2175 is_physical_device = true;
2176 phys_lun_ext_entry =
2177 &physdev_list->lun_entries[physical_index++];
2178 log_lun_ext_entry = NULL;
2179 scsi3addr = phys_lun_ext_entry->lunid;
2181 is_physical_device = false;
2182 phys_lun_ext_entry = NULL;
2184 &logdev_list->lun_entries[logical_index++];
2185 scsi3addr = log_lun_ext_entry->lunid;
2188 if (is_physical_device && pqi_skip_device(scsi3addr))
2192 device = list_next_entry(device, new_device_list_entry);
2194 device = list_first_entry(&new_device_list_head,
2195 struct pqi_scsi_dev, new_device_list_entry);
2197 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2198 device->is_physical_device = is_physical_device;
2199 if (is_physical_device) {
2200 device->device_type = phys_lun_ext_entry->device_type;
2201 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2202 device->is_expander_smp_device = true;
2204 device->is_external_raid_device =
2205 pqi_is_external_raid_addr(scsi3addr);
2208 if (!pqi_is_supported_device(device))
2211 /* Gather information about the device. */
2212 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2213 if (rc == -ENOMEM) {
2214 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2219 if (device->is_physical_device)
2220 dev_warn(&ctrl_info->pci_dev->dev,
2221 "obtaining device info failed, skipping physical device %016llx\n",
2222 get_unaligned_be64(&phys_lun_ext_entry->wwid));
2224 dev_warn(&ctrl_info->pci_dev->dev,
2225 "obtaining device info failed, skipping logical device %08x%08x\n",
2226 *((u32 *)&device->scsi3addr),
2227 *((u32 *)&device->scsi3addr[4]));
2232 pqi_assign_bus_target_lun(device);
2234 if (device->is_physical_device) {
2235 device->wwid = phys_lun_ext_entry->wwid;
2236 if ((phys_lun_ext_entry->device_flags &
2237 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2238 phys_lun_ext_entry->aio_handle) {
2239 device->aio_enabled = true;
2240 device->aio_handle =
2241 phys_lun_ext_entry->aio_handle;
2244 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2245 sizeof(device->volume_id));
2248 if (pqi_is_device_with_sas_address(device))
2249 device->sas_address = get_unaligned_be64(&device->wwid);
2251 new_device_list[num_valid_devices++] = device;
2254 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2257 list_for_each_entry_safe(device, next, &new_device_list_head,
2258 new_device_list_entry) {
2259 if (device->keep_device)
2261 list_del(&device->new_device_list_entry);
2262 pqi_free_device(device);
2265 kfree(new_device_list);
2266 kfree(physdev_list);
2273 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2277 if (pqi_ctrl_offline(ctrl_info))
2280 if (!mutex_trylock(&ctrl_info->scan_mutex)) {
2281 pqi_schedule_rescan_worker_delayed(ctrl_info);
2284 rc = pqi_update_scsi_devices(ctrl_info);
2286 pqi_schedule_rescan_worker_delayed(ctrl_info);
2287 mutex_unlock(&ctrl_info->scan_mutex);
2293 static void pqi_scan_start(struct Scsi_Host *shost)
2295 struct pqi_ctrl_info *ctrl_info;
2297 ctrl_info = shost_to_hba(shost);
2298 if (pqi_ctrl_in_ofa(ctrl_info))
2301 pqi_scan_scsi_devices(ctrl_info);
2304 /* Returns TRUE if scan is finished. */
2306 static int pqi_scan_finished(struct Scsi_Host *shost,
2307 unsigned long elapsed_time)
2309 struct pqi_ctrl_info *ctrl_info;
2311 ctrl_info = shost_priv(shost);
2313 return !mutex_is_locked(&ctrl_info->scan_mutex);
2316 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2318 mutex_lock(&ctrl_info->scan_mutex);
2319 mutex_unlock(&ctrl_info->scan_mutex);
2322 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2324 mutex_lock(&ctrl_info->lun_reset_mutex);
2325 mutex_unlock(&ctrl_info->lun_reset_mutex);
2328 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2330 mutex_lock(&ctrl_info->ofa_mutex);
2331 mutex_unlock(&ctrl_info->ofa_mutex);
2334 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2335 struct raid_map *raid_map, u64 first_block)
2337 u32 volume_blk_size;
2340 * Set the encryption tweak values based on logical block address.
2341 * If the block size is 512, the tweak value is equal to the LBA.
2342 * For other block sizes, tweak value is (LBA * block size) / 512.
2344 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2345 if (volume_blk_size != 512)
2346 first_block = (first_block * volume_blk_size) / 512;
2348 encryption_info->data_encryption_key_index =
2349 get_unaligned_le16(&raid_map->data_encryption_key_index);
2350 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2351 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2355 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2358 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2359 struct pqi_scsi_dev_raid_map_data *rmd)
2361 bool is_supported = true;
2363 switch (rmd->raid_level) {
2367 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2368 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2369 is_supported = false;
2371 case SA_RAID_TRIPLE:
2372 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2373 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2374 is_supported = false;
2377 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2378 rmd->data_length > ctrl_info->max_write_raid_5_6))
2379 is_supported = false;
2382 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2383 rmd->data_length > ctrl_info->max_write_raid_5_6))
2384 is_supported = false;
2387 is_supported = false;
2391 return is_supported;
2394 #define PQI_RAID_BYPASS_INELIGIBLE 1
2396 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2397 struct pqi_scsi_dev_raid_map_data *rmd)
2399 /* Check for valid opcode, get LBA and block count. */
2400 switch (scmd->cmnd[0]) {
2402 rmd->is_write = true;
2405 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2406 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2407 rmd->block_cnt = (u32)scmd->cmnd[4];
2408 if (rmd->block_cnt == 0)
2409 rmd->block_cnt = 256;
2412 rmd->is_write = true;
2415 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2416 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2419 rmd->is_write = true;
2422 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2423 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2426 rmd->is_write = true;
2429 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2430 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2433 /* Process via normal I/O path. */
2434 return PQI_RAID_BYPASS_INELIGIBLE;
2437 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2442 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2443 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2445 #if BITS_PER_LONG == 32
2449 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2451 /* Check for invalid block or wraparound. */
2452 if (rmd->last_block >=
2453 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2454 rmd->last_block < rmd->first_block)
2455 return PQI_RAID_BYPASS_INELIGIBLE;
2457 rmd->data_disks_per_row =
2458 get_unaligned_le16(&raid_map->data_disks_per_row);
2459 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2460 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2462 /* Calculate stripe information for the request. */
2463 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2464 #if BITS_PER_LONG == 32
2465 tmpdiv = rmd->first_block;
2466 do_div(tmpdiv, rmd->blocks_per_row);
2467 rmd->first_row = tmpdiv;
2468 tmpdiv = rmd->last_block;
2469 do_div(tmpdiv, rmd->blocks_per_row);
2470 rmd->last_row = tmpdiv;
2471 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2472 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2473 tmpdiv = rmd->first_row_offset;
2474 do_div(tmpdiv, rmd->strip_size);
2475 rmd->first_column = tmpdiv;
2476 tmpdiv = rmd->last_row_offset;
2477 do_div(tmpdiv, rmd->strip_size);
2478 rmd->last_column = tmpdiv;
2480 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2481 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2482 rmd->first_row_offset = (u32)(rmd->first_block -
2483 (rmd->first_row * rmd->blocks_per_row));
2484 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2485 rmd->blocks_per_row));
2486 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2487 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2490 /* If this isn't a single row/column then give to the controller. */
2491 if (rmd->first_row != rmd->last_row ||
2492 rmd->first_column != rmd->last_column)
2493 return PQI_RAID_BYPASS_INELIGIBLE;
2495 /* Proceeding with driver mapping. */
2496 rmd->total_disks_per_row = rmd->data_disks_per_row +
2497 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2498 rmd->map_row = ((u32)(rmd->first_row >>
2499 raid_map->parity_rotation_shift)) %
2500 get_unaligned_le16(&raid_map->row_cnt);
2501 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2507 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2508 struct raid_map *raid_map)
2510 #if BITS_PER_LONG == 32
2514 /* Verify first and last block are in same RAID group. */
2515 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2516 #if BITS_PER_LONG == 32
2517 tmpdiv = rmd->first_block;
2518 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2519 tmpdiv = rmd->first_group;
2520 do_div(tmpdiv, rmd->blocks_per_row);
2521 rmd->first_group = tmpdiv;
2522 tmpdiv = rmd->last_block;
2523 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2524 tmpdiv = rmd->last_group;
2525 do_div(tmpdiv, rmd->blocks_per_row);
2526 rmd->last_group = tmpdiv;
2528 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2529 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2531 if (rmd->first_group != rmd->last_group)
2532 return PQI_RAID_BYPASS_INELIGIBLE;
2534 /* Verify request is in a single row of RAID 5/6. */
2535 #if BITS_PER_LONG == 32
2536 tmpdiv = rmd->first_block;
2537 do_div(tmpdiv, rmd->stripesize);
2538 rmd->first_row = tmpdiv;
2539 rmd->r5or6_first_row = tmpdiv;
2540 tmpdiv = rmd->last_block;
2541 do_div(tmpdiv, rmd->stripesize);
2542 rmd->r5or6_last_row = tmpdiv;
2544 rmd->first_row = rmd->r5or6_first_row =
2545 rmd->first_block / rmd->stripesize;
2546 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2548 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2549 return PQI_RAID_BYPASS_INELIGIBLE;
2551 /* Verify request is in a single column. */
2552 #if BITS_PER_LONG == 32
2553 tmpdiv = rmd->first_block;
2554 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2555 tmpdiv = rmd->first_row_offset;
2556 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2557 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2558 tmpdiv = rmd->last_block;
2559 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2560 tmpdiv = rmd->r5or6_last_row_offset;
2561 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2562 tmpdiv = rmd->r5or6_first_row_offset;
2563 do_div(tmpdiv, rmd->strip_size);
2564 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2565 tmpdiv = rmd->r5or6_last_row_offset;
2566 do_div(tmpdiv, rmd->strip_size);
2567 rmd->r5or6_last_column = tmpdiv;
2569 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2570 (u32)((rmd->first_block % rmd->stripesize) %
2571 rmd->blocks_per_row);
2573 rmd->r5or6_last_row_offset =
2574 (u32)((rmd->last_block % rmd->stripesize) %
2575 rmd->blocks_per_row);
2578 rmd->r5or6_first_row_offset / rmd->strip_size;
2579 rmd->r5or6_first_column = rmd->first_column;
2580 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2582 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2583 return PQI_RAID_BYPASS_INELIGIBLE;
2585 /* Request is eligible. */
2587 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2588 get_unaligned_le16(&raid_map->row_cnt);
2590 rmd->map_index = (rmd->first_group *
2591 (get_unaligned_le16(&raid_map->row_cnt) *
2592 rmd->total_disks_per_row)) +
2593 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2595 if (rmd->is_write) {
2599 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2600 * parity entries inside the device's raid_map.
2602 * A device's RAID map is bounded by: number of RAID disks squared.
2604 * The devices RAID map size is checked during device
2607 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2608 index *= rmd->total_disks_per_row;
2609 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2611 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2612 if (rmd->raid_level == SA_RAID_6) {
2613 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2614 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2616 if (rmd->blocks_per_row == 0)
2617 return PQI_RAID_BYPASS_INELIGIBLE;
2618 #if BITS_PER_LONG == 32
2619 tmpdiv = rmd->first_block;
2620 do_div(tmpdiv, rmd->blocks_per_row);
2623 rmd->row = rmd->first_block / rmd->blocks_per_row;
2630 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2632 /* Build the new CDB for the physical disk I/O. */
2633 if (rmd->disk_block > 0xffffffff) {
2634 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2636 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2637 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2640 rmd->cdb_length = 16;
2642 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2644 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2646 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2648 rmd->cdb_length = 10;
2652 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2653 struct pqi_scsi_dev_raid_map_data *rmd)
2658 group = rmd->map_index / rmd->data_disks_per_row;
2660 index = rmd->map_index - (group * rmd->data_disks_per_row);
2661 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2662 index += rmd->data_disks_per_row;
2663 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2664 if (rmd->layout_map_count > 2) {
2665 index += rmd->data_disks_per_row;
2666 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2669 rmd->num_it_nexus_entries = rmd->layout_map_count;
2672 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2673 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2674 struct pqi_queue_group *queue_group)
2677 struct raid_map *raid_map;
2679 u32 next_bypass_group;
2680 struct pqi_encryption_info *encryption_info_ptr;
2681 struct pqi_encryption_info encryption_info;
2682 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2684 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2686 return PQI_RAID_BYPASS_INELIGIBLE;
2688 rmd.raid_level = device->raid_level;
2690 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2691 return PQI_RAID_BYPASS_INELIGIBLE;
2693 if (unlikely(rmd.block_cnt == 0))
2694 return PQI_RAID_BYPASS_INELIGIBLE;
2696 raid_map = device->raid_map;
2698 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2700 return PQI_RAID_BYPASS_INELIGIBLE;
2702 if (device->raid_level == SA_RAID_1 ||
2703 device->raid_level == SA_RAID_TRIPLE) {
2705 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2707 group = device->next_bypass_group;
2708 next_bypass_group = group + 1;
2709 if (next_bypass_group >= rmd.layout_map_count)
2710 next_bypass_group = 0;
2711 device->next_bypass_group = next_bypass_group;
2712 rmd.map_index += group * rmd.data_disks_per_row;
2714 } else if ((device->raid_level == SA_RAID_5 ||
2715 device->raid_level == SA_RAID_6) &&
2716 (rmd.layout_map_count > 1 || rmd.is_write)) {
2717 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2719 return PQI_RAID_BYPASS_INELIGIBLE;
2722 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2723 return PQI_RAID_BYPASS_INELIGIBLE;
2725 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2726 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2727 rmd.first_row * rmd.strip_size +
2728 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2729 rmd.disk_block_cnt = rmd.block_cnt;
2731 /* Handle differing logical/physical block sizes. */
2732 if (raid_map->phys_blk_shift) {
2733 rmd.disk_block <<= raid_map->phys_blk_shift;
2734 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
2737 if (unlikely(rmd.disk_block_cnt > 0xffff))
2738 return PQI_RAID_BYPASS_INELIGIBLE;
2740 pqi_set_aio_cdb(&rmd);
2742 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
2743 if (rmd.data_length > device->max_transfer_encrypted)
2744 return PQI_RAID_BYPASS_INELIGIBLE;
2745 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
2746 encryption_info_ptr = &encryption_info;
2748 encryption_info_ptr = NULL;
2752 switch (device->raid_level) {
2754 case SA_RAID_TRIPLE:
2755 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
2756 encryption_info_ptr, device, &rmd);
2759 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
2760 encryption_info_ptr, device, &rmd);
2764 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
2765 rmd.cdb, rmd.cdb_length, queue_group,
2766 encryption_info_ptr, true);
2769 #define PQI_STATUS_IDLE 0x0
2771 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2772 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2774 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2775 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2776 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2777 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2778 #define PQI_DEVICE_STATE_ERROR 0x4
2780 #define PQI_MODE_READY_TIMEOUT_SECS 30
2781 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2783 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2785 struct pqi_device_registers __iomem *pqi_registers;
2786 unsigned long timeout;
2790 pqi_registers = ctrl_info->pqi_registers;
2791 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
2794 signature = readq(&pqi_registers->signature);
2795 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2796 sizeof(signature)) == 0)
2798 if (time_after(jiffies, timeout)) {
2799 dev_err(&ctrl_info->pci_dev->dev,
2800 "timed out waiting for PQI signature\n");
2803 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2807 status = readb(&pqi_registers->function_and_status_code);
2808 if (status == PQI_STATUS_IDLE)
2810 if (time_after(jiffies, timeout)) {
2811 dev_err(&ctrl_info->pci_dev->dev,
2812 "timed out waiting for PQI IDLE\n");
2815 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2819 if (readl(&pqi_registers->device_status) ==
2820 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2822 if (time_after(jiffies, timeout)) {
2823 dev_err(&ctrl_info->pci_dev->dev,
2824 "timed out waiting for PQI all registers ready\n");
2827 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2833 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2835 struct pqi_scsi_dev *device;
2837 device = io_request->scmd->device->hostdata;
2838 device->raid_bypass_enabled = false;
2839 device->aio_enabled = false;
2842 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
2844 struct pqi_ctrl_info *ctrl_info;
2845 struct pqi_scsi_dev *device;
2847 device = sdev->hostdata;
2848 if (device->device_offline)
2851 device->device_offline = true;
2852 ctrl_info = shost_to_hba(sdev->host);
2853 pqi_schedule_rescan_worker(ctrl_info);
2854 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
2855 path, ctrl_info->scsi_host->host_no, device->bus,
2856 device->target, device->lun);
2859 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2863 struct scsi_cmnd *scmd;
2864 struct pqi_raid_error_info *error_info;
2865 size_t sense_data_length;
2868 struct scsi_sense_hdr sshdr;
2870 scmd = io_request->scmd;
2874 error_info = io_request->error_info;
2875 scsi_status = error_info->status;
2878 switch (error_info->data_out_result) {
2879 case PQI_DATA_IN_OUT_GOOD:
2881 case PQI_DATA_IN_OUT_UNDERFLOW:
2883 get_unaligned_le32(&error_info->data_out_transferred);
2884 residual_count = scsi_bufflen(scmd) - xfer_count;
2885 scsi_set_resid(scmd, residual_count);
2886 if (xfer_count < scmd->underflow)
2887 host_byte = DID_SOFT_ERROR;
2889 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2890 case PQI_DATA_IN_OUT_ABORTED:
2891 host_byte = DID_ABORT;
2893 case PQI_DATA_IN_OUT_TIMEOUT:
2894 host_byte = DID_TIME_OUT;
2896 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2897 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2898 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2899 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2900 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2901 case PQI_DATA_IN_OUT_ERROR:
2902 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2903 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2904 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2905 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2906 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2907 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2908 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2909 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2910 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2911 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2913 host_byte = DID_ERROR;
2917 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2918 if (sense_data_length == 0)
2920 get_unaligned_le16(&error_info->response_data_length);
2921 if (sense_data_length) {
2922 if (sense_data_length > sizeof(error_info->data))
2923 sense_data_length = sizeof(error_info->data);
2925 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2926 scsi_normalize_sense(error_info->data,
2927 sense_data_length, &sshdr) &&
2928 sshdr.sense_key == HARDWARE_ERROR &&
2929 sshdr.asc == 0x3e) {
2930 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2931 struct pqi_scsi_dev *device = scmd->device->hostdata;
2933 switch (sshdr.ascq) {
2934 case 0x1: /* LOGICAL UNIT FAILURE */
2935 if (printk_ratelimit())
2936 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2937 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2938 pqi_take_device_offline(scmd->device, "RAID");
2939 host_byte = DID_NO_CONNECT;
2942 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2943 if (printk_ratelimit())
2944 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2945 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2950 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2951 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2952 memcpy(scmd->sense_buffer, error_info->data,
2956 scmd->result = scsi_status;
2957 set_host_byte(scmd, host_byte);
2960 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2964 struct scsi_cmnd *scmd;
2965 struct pqi_aio_error_info *error_info;
2966 size_t sense_data_length;
2969 bool device_offline;
2971 scmd = io_request->scmd;
2972 error_info = io_request->error_info;
2974 sense_data_length = 0;
2975 device_offline = false;
2977 switch (error_info->service_response) {
2978 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2979 scsi_status = error_info->status;
2981 case PQI_AIO_SERV_RESPONSE_FAILURE:
2982 switch (error_info->status) {
2983 case PQI_AIO_STATUS_IO_ABORTED:
2984 scsi_status = SAM_STAT_TASK_ABORTED;
2986 case PQI_AIO_STATUS_UNDERRUN:
2987 scsi_status = SAM_STAT_GOOD;
2988 residual_count = get_unaligned_le32(
2989 &error_info->residual_count);
2990 scsi_set_resid(scmd, residual_count);
2991 xfer_count = scsi_bufflen(scmd) - residual_count;
2992 if (xfer_count < scmd->underflow)
2993 host_byte = DID_SOFT_ERROR;
2995 case PQI_AIO_STATUS_OVERRUN:
2996 scsi_status = SAM_STAT_GOOD;
2998 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2999 pqi_aio_path_disabled(io_request);
3000 scsi_status = SAM_STAT_GOOD;
3001 io_request->status = -EAGAIN;
3003 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3004 case PQI_AIO_STATUS_INVALID_DEVICE:
3005 if (!io_request->raid_bypass) {
3006 device_offline = true;
3007 pqi_take_device_offline(scmd->device, "AIO");
3008 host_byte = DID_NO_CONNECT;
3010 scsi_status = SAM_STAT_CHECK_CONDITION;
3012 case PQI_AIO_STATUS_IO_ERROR:
3014 scsi_status = SAM_STAT_CHECK_CONDITION;
3018 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3019 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3020 scsi_status = SAM_STAT_GOOD;
3022 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3023 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3025 scsi_status = SAM_STAT_CHECK_CONDITION;
3029 if (error_info->data_present) {
3031 get_unaligned_le16(&error_info->data_length);
3032 if (sense_data_length) {
3033 if (sense_data_length > sizeof(error_info->data))
3034 sense_data_length = sizeof(error_info->data);
3035 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3036 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3037 memcpy(scmd->sense_buffer, error_info->data,
3042 if (device_offline && sense_data_length == 0)
3043 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
3046 scmd->result = scsi_status;
3047 set_host_byte(scmd, host_byte);
3050 static void pqi_process_io_error(unsigned int iu_type,
3051 struct pqi_io_request *io_request)
3054 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3055 pqi_process_raid_io_error(io_request);
3057 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3058 pqi_process_aio_io_error(io_request);
3063 static int pqi_interpret_task_management_response(
3064 struct pqi_task_management_response *response)
3068 switch (response->response_code) {
3069 case SOP_TMF_COMPLETE:
3070 case SOP_TMF_FUNCTION_SUCCEEDED:
3073 case SOP_TMF_REJECTED:
3084 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
3086 pqi_take_ctrl_offline(ctrl_info);
3089 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3094 struct pqi_io_request *io_request;
3095 struct pqi_io_response *response;
3099 oq_ci = queue_group->oq_ci_copy;
3102 oq_pi = readl(queue_group->oq_pi);
3103 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3104 pqi_invalid_response(ctrl_info);
3105 dev_err(&ctrl_info->pci_dev->dev,
3106 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3107 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3114 response = queue_group->oq_element_array +
3115 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3117 request_id = get_unaligned_le16(&response->request_id);
3118 if (request_id >= ctrl_info->max_io_slots) {
3119 pqi_invalid_response(ctrl_info);
3120 dev_err(&ctrl_info->pci_dev->dev,
3121 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3122 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3126 io_request = &ctrl_info->io_request_pool[request_id];
3127 if (atomic_read(&io_request->refcount) == 0) {
3128 pqi_invalid_response(ctrl_info);
3129 dev_err(&ctrl_info->pci_dev->dev,
3130 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3131 request_id, oq_pi, oq_ci);
3135 switch (response->header.iu_type) {
3136 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3137 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3138 if (io_request->scmd)
3139 io_request->scmd->result = 0;
3141 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3143 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3144 io_request->status =
3146 &((struct pqi_vendor_general_response *)response)->status);
3148 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3149 io_request->status =
3150 pqi_interpret_task_management_response(
3153 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3154 pqi_aio_path_disabled(io_request);
3155 io_request->status = -EAGAIN;
3157 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3158 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3159 io_request->error_info = ctrl_info->error_buffer +
3160 (get_unaligned_le16(&response->error_index) *
3161 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3162 pqi_process_io_error(response->header.iu_type, io_request);
3165 pqi_invalid_response(ctrl_info);
3166 dev_err(&ctrl_info->pci_dev->dev,
3167 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3168 response->header.iu_type, oq_pi, oq_ci);
3172 io_request->io_complete_callback(io_request, io_request->context);
3175 * Note that the I/O request structure CANNOT BE TOUCHED after
3176 * returning from the I/O completion callback!
3178 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3181 if (num_responses) {
3182 queue_group->oq_ci_copy = oq_ci;
3183 writel(oq_ci, queue_group->oq_ci);
3186 return num_responses;
3189 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3190 unsigned int ci, unsigned int elements_in_queue)
3192 unsigned int num_elements_used;
3195 num_elements_used = pi - ci;
3197 num_elements_used = elements_in_queue - ci + pi;
3199 return elements_in_queue - num_elements_used - 1;
3202 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3203 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3207 unsigned long flags;
3209 struct pqi_queue_group *queue_group;
3211 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3212 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3215 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3217 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3218 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3220 if (pqi_num_elements_free(iq_pi, iq_ci,
3221 ctrl_info->num_elements_per_iq))
3224 spin_unlock_irqrestore(
3225 &queue_group->submit_lock[RAID_PATH], flags);
3227 if (pqi_ctrl_offline(ctrl_info))
3231 next_element = queue_group->iq_element_array[RAID_PATH] +
3232 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3234 memcpy(next_element, iu, iu_length);
3236 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3237 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3240 * This write notifies the controller that an IU is available to be
3243 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3245 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3248 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3249 struct pqi_event *event)
3251 struct pqi_event_acknowledge_request request;
3253 memset(&request, 0, sizeof(request));
3255 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3256 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3257 &request.header.iu_length);
3258 request.event_type = event->event_type;
3259 request.event_id = event->event_id;
3260 request.additional_event_id = event->additional_event_id;
3262 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3265 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3266 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3268 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3269 struct pqi_ctrl_info *ctrl_info)
3272 unsigned long timeout;
3274 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
3277 status = pqi_read_soft_reset_status(ctrl_info);
3278 if (status & PQI_SOFT_RESET_INITIATE)
3279 return RESET_INITIATE_DRIVER;
3281 if (status & PQI_SOFT_RESET_ABORT)
3284 if (time_after(jiffies, timeout)) {
3285 dev_err(&ctrl_info->pci_dev->dev,
3286 "timed out waiting for soft reset status\n");
3287 return RESET_TIMEDOUT;
3290 if (!sis_is_firmware_running(ctrl_info))
3291 return RESET_NORESPONSE;
3293 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3297 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
3298 enum pqi_soft_reset_status reset_status)
3302 switch (reset_status) {
3303 case RESET_INITIATE_DRIVER:
3304 case RESET_TIMEDOUT:
3305 dev_info(&ctrl_info->pci_dev->dev,
3306 "resetting controller %u\n", ctrl_info->ctrl_id);
3307 sis_soft_reset(ctrl_info);
3309 case RESET_INITIATE_FIRMWARE:
3310 rc = pqi_ofa_ctrl_restart(ctrl_info);
3311 pqi_ofa_free_host_buffer(ctrl_info);
3312 dev_info(&ctrl_info->pci_dev->dev,
3313 "Online Firmware Activation for controller %u: %s\n",
3314 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3317 pqi_ofa_ctrl_unquiesce(ctrl_info);
3318 dev_info(&ctrl_info->pci_dev->dev,
3319 "Online Firmware Activation for controller %u: %s\n",
3320 ctrl_info->ctrl_id, "ABORTED");
3322 case RESET_NORESPONSE:
3323 pqi_ofa_free_host_buffer(ctrl_info);
3324 pqi_take_ctrl_offline(ctrl_info);
3329 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3330 struct pqi_event *event)
3333 enum pqi_soft_reset_status status;
3335 event_id = get_unaligned_le16(&event->event_id);
3337 mutex_lock(&ctrl_info->ofa_mutex);
3339 if (event_id == PQI_EVENT_OFA_QUIESCE) {
3340 dev_info(&ctrl_info->pci_dev->dev,
3341 "Received Online Firmware Activation quiesce event for controller %u\n",
3342 ctrl_info->ctrl_id);
3343 pqi_ofa_ctrl_quiesce(ctrl_info);
3344 pqi_acknowledge_event(ctrl_info, event);
3345 if (ctrl_info->soft_reset_handshake_supported) {
3346 status = pqi_poll_for_soft_reset_status(ctrl_info);
3347 pqi_process_soft_reset(ctrl_info, status);
3349 pqi_process_soft_reset(ctrl_info,
3350 RESET_INITIATE_FIRMWARE);
3353 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3354 pqi_acknowledge_event(ctrl_info, event);
3355 pqi_ofa_setup_host_buffer(ctrl_info,
3356 le32_to_cpu(event->ofa_bytes_requested));
3357 pqi_ofa_host_memory_update(ctrl_info);
3358 } else if (event_id == PQI_EVENT_OFA_CANCELED) {
3359 pqi_ofa_free_host_buffer(ctrl_info);
3360 pqi_acknowledge_event(ctrl_info, event);
3361 dev_info(&ctrl_info->pci_dev->dev,
3362 "Online Firmware Activation(%u) cancel reason : %u\n",
3363 ctrl_info->ctrl_id, event->ofa_cancel_reason);
3366 mutex_unlock(&ctrl_info->ofa_mutex);
3369 static void pqi_event_worker(struct work_struct *work)
3372 struct pqi_ctrl_info *ctrl_info;
3373 struct pqi_event *event;
3375 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3377 pqi_ctrl_busy(ctrl_info);
3378 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
3379 if (pqi_ctrl_offline(ctrl_info))
3382 pqi_schedule_rescan_worker_delayed(ctrl_info);
3384 event = ctrl_info->events;
3385 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3386 if (event->pending) {
3387 event->pending = false;
3388 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3389 pqi_ctrl_unbusy(ctrl_info);
3390 pqi_ofa_process_event(ctrl_info, event);
3393 pqi_acknowledge_event(ctrl_info, event);
3399 pqi_ctrl_unbusy(ctrl_info);
3402 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
3404 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3407 u32 heartbeat_count;
3408 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3410 pqi_check_ctrl_health(ctrl_info);
3411 if (pqi_ctrl_offline(ctrl_info))
3414 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3415 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3417 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3418 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3419 dev_err(&ctrl_info->pci_dev->dev,
3420 "no heartbeat detected - last heartbeat count: %u\n",
3422 pqi_take_ctrl_offline(ctrl_info);
3426 ctrl_info->previous_num_interrupts = num_interrupts;
3429 ctrl_info->previous_heartbeat_count = heartbeat_count;
3430 mod_timer(&ctrl_info->heartbeat_timer,
3431 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3434 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3436 if (!ctrl_info->heartbeat_counter)
3439 ctrl_info->previous_num_interrupts =
3440 atomic_read(&ctrl_info->num_interrupts);
3441 ctrl_info->previous_heartbeat_count =
3442 pqi_read_heartbeat_counter(ctrl_info);
3444 ctrl_info->heartbeat_timer.expires =
3445 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3446 add_timer(&ctrl_info->heartbeat_timer);
3449 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3451 del_timer_sync(&ctrl_info->heartbeat_timer);
3454 static inline int pqi_event_type_to_event_index(unsigned int event_type)
3458 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3459 if (event_type == pqi_supported_event_types[index])
3465 static inline bool pqi_is_supported_event(unsigned int event_type)
3467 return pqi_event_type_to_event_index(event_type) != -1;
3470 static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3471 struct pqi_event_response *response)
3475 event_id = get_unaligned_le16(&event->event_id);
3477 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3478 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3479 event->ofa_bytes_requested =
3480 response->data.ofa_memory_allocation.bytes_requested;
3481 } else if (event_id == PQI_EVENT_OFA_CANCELED) {
3482 event->ofa_cancel_reason =
3483 response->data.ofa_cancelled.reason;
3488 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3493 struct pqi_event_queue *event_queue;
3494 struct pqi_event_response *response;
3495 struct pqi_event *event;
3498 event_queue = &ctrl_info->event_queue;
3500 oq_ci = event_queue->oq_ci_copy;
3503 oq_pi = readl(event_queue->oq_pi);
3504 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3505 pqi_invalid_response(ctrl_info);
3506 dev_err(&ctrl_info->pci_dev->dev,
3507 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3508 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3516 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3518 event_index = pqi_event_type_to_event_index(response->event_type);
3520 if (event_index >= 0 && response->request_acknowledge) {
3521 event = &ctrl_info->events[event_index];
3522 event->pending = true;
3523 event->event_type = response->event_type;
3524 event->event_id = response->event_id;
3525 event->additional_event_id = response->additional_event_id;
3526 if (event->event_type == PQI_EVENT_TYPE_OFA)
3527 pqi_ofa_capture_event_payload(event, response);
3530 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3534 event_queue->oq_ci_copy = oq_ci;
3535 writel(oq_ci, event_queue->oq_ci);
3536 schedule_work(&ctrl_info->event_work);
3542 #define PQI_LEGACY_INTX_MASK 0x1
3544 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3547 struct pqi_device_registers __iomem *pqi_registers;
3548 volatile void __iomem *register_addr;
3550 pqi_registers = ctrl_info->pqi_registers;
3553 register_addr = &pqi_registers->legacy_intx_mask_clear;
3555 register_addr = &pqi_registers->legacy_intx_mask_set;
3557 intx_mask = readl(register_addr);
3558 intx_mask |= PQI_LEGACY_INTX_MASK;
3559 writel(intx_mask, register_addr);
3562 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3563 enum pqi_irq_mode new_mode)
3565 switch (ctrl_info->irq_mode) {
3571 pqi_configure_legacy_intx(ctrl_info, true);
3572 sis_enable_intx(ctrl_info);
3581 pqi_configure_legacy_intx(ctrl_info, false);
3582 sis_enable_msix(ctrl_info);
3587 pqi_configure_legacy_intx(ctrl_info, false);
3594 sis_enable_msix(ctrl_info);
3597 pqi_configure_legacy_intx(ctrl_info, true);
3598 sis_enable_intx(ctrl_info);
3606 ctrl_info->irq_mode = new_mode;
3609 #define PQI_LEGACY_INTX_PENDING 0x1
3611 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3616 switch (ctrl_info->irq_mode) {
3621 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3622 if (intx_status & PQI_LEGACY_INTX_PENDING)
3636 static irqreturn_t pqi_irq_handler(int irq, void *data)
3638 struct pqi_ctrl_info *ctrl_info;
3639 struct pqi_queue_group *queue_group;
3640 int num_io_responses_handled;
3641 int num_events_handled;
3644 ctrl_info = queue_group->ctrl_info;
3646 if (!pqi_is_valid_irq(ctrl_info))
3649 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3650 if (num_io_responses_handled < 0)
3653 if (irq == ctrl_info->event_irq) {
3654 num_events_handled = pqi_process_event_intr(ctrl_info);
3655 if (num_events_handled < 0)
3658 num_events_handled = 0;
3661 if (num_io_responses_handled + num_events_handled > 0)
3662 atomic_inc(&ctrl_info->num_interrupts);
3664 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3665 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3671 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3673 struct pci_dev *pci_dev = ctrl_info->pci_dev;
3677 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
3679 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
3680 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
3681 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
3683 dev_err(&pci_dev->dev,
3684 "irq %u init failed with error %d\n",
3685 pci_irq_vector(pci_dev, i), rc);
3688 ctrl_info->num_msix_vectors_initialized++;
3694 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3698 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3699 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3700 &ctrl_info->queue_groups[i]);
3702 ctrl_info->num_msix_vectors_initialized = 0;
3705 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3707 int num_vectors_enabled;
3709 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
3710 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3711 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3712 if (num_vectors_enabled < 0) {
3713 dev_err(&ctrl_info->pci_dev->dev,
3714 "MSI-X init failed with error %d\n",
3715 num_vectors_enabled);
3716 return num_vectors_enabled;
3719 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
3720 ctrl_info->irq_mode = IRQ_MODE_MSIX;
3724 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3726 if (ctrl_info->num_msix_vectors_enabled) {
3727 pci_free_irq_vectors(ctrl_info->pci_dev);
3728 ctrl_info->num_msix_vectors_enabled = 0;
3732 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3735 size_t alloc_length;
3736 size_t element_array_length_per_iq;
3737 size_t element_array_length_per_oq;
3738 void *element_array;
3739 void __iomem *next_queue_index;
3740 void *aligned_pointer;
3741 unsigned int num_inbound_queues;
3742 unsigned int num_outbound_queues;
3743 unsigned int num_queue_indexes;
3744 struct pqi_queue_group *queue_group;
3746 element_array_length_per_iq =
3747 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3748 ctrl_info->num_elements_per_iq;
3749 element_array_length_per_oq =
3750 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3751 ctrl_info->num_elements_per_oq;
3752 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3753 num_outbound_queues = ctrl_info->num_queue_groups;
3754 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3756 aligned_pointer = NULL;
3758 for (i = 0; i < num_inbound_queues; i++) {
3759 aligned_pointer = PTR_ALIGN(aligned_pointer,
3760 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3761 aligned_pointer += element_array_length_per_iq;
3764 for (i = 0; i < num_outbound_queues; i++) {
3765 aligned_pointer = PTR_ALIGN(aligned_pointer,
3766 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3767 aligned_pointer += element_array_length_per_oq;
3770 aligned_pointer = PTR_ALIGN(aligned_pointer,
3771 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3772 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3773 PQI_EVENT_OQ_ELEMENT_LENGTH;
3775 for (i = 0; i < num_queue_indexes; i++) {
3776 aligned_pointer = PTR_ALIGN(aligned_pointer,
3777 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3778 aligned_pointer += sizeof(pqi_index_t);
3781 alloc_length = (size_t)aligned_pointer +
3782 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3784 alloc_length += PQI_EXTRA_SGL_MEMORY;
3786 ctrl_info->queue_memory_base =
3787 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3788 &ctrl_info->queue_memory_base_dma_handle,
3791 if (!ctrl_info->queue_memory_base)
3794 ctrl_info->queue_memory_length = alloc_length;
3796 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3797 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3799 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3800 queue_group = &ctrl_info->queue_groups[i];
3801 queue_group->iq_element_array[RAID_PATH] = element_array;
3802 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3803 ctrl_info->queue_memory_base_dma_handle +
3804 (element_array - ctrl_info->queue_memory_base);
3805 element_array += element_array_length_per_iq;
3806 element_array = PTR_ALIGN(element_array,
3807 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3808 queue_group->iq_element_array[AIO_PATH] = element_array;
3809 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3810 ctrl_info->queue_memory_base_dma_handle +
3811 (element_array - ctrl_info->queue_memory_base);
3812 element_array += element_array_length_per_iq;
3813 element_array = PTR_ALIGN(element_array,
3814 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3817 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3818 queue_group = &ctrl_info->queue_groups[i];
3819 queue_group->oq_element_array = element_array;
3820 queue_group->oq_element_array_bus_addr =
3821 ctrl_info->queue_memory_base_dma_handle +
3822 (element_array - ctrl_info->queue_memory_base);
3823 element_array += element_array_length_per_oq;
3824 element_array = PTR_ALIGN(element_array,
3825 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3828 ctrl_info->event_queue.oq_element_array = element_array;
3829 ctrl_info->event_queue.oq_element_array_bus_addr =
3830 ctrl_info->queue_memory_base_dma_handle +
3831 (element_array - ctrl_info->queue_memory_base);
3832 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3833 PQI_EVENT_OQ_ELEMENT_LENGTH;
3835 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
3836 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3838 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3839 queue_group = &ctrl_info->queue_groups[i];
3840 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3841 queue_group->iq_ci_bus_addr[RAID_PATH] =
3842 ctrl_info->queue_memory_base_dma_handle +
3844 (void __iomem *)ctrl_info->queue_memory_base);
3845 next_queue_index += sizeof(pqi_index_t);
3846 next_queue_index = PTR_ALIGN(next_queue_index,
3847 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3848 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3849 queue_group->iq_ci_bus_addr[AIO_PATH] =
3850 ctrl_info->queue_memory_base_dma_handle +
3852 (void __iomem *)ctrl_info->queue_memory_base);
3853 next_queue_index += sizeof(pqi_index_t);
3854 next_queue_index = PTR_ALIGN(next_queue_index,
3855 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3856 queue_group->oq_pi = next_queue_index;
3857 queue_group->oq_pi_bus_addr =
3858 ctrl_info->queue_memory_base_dma_handle +
3860 (void __iomem *)ctrl_info->queue_memory_base);
3861 next_queue_index += sizeof(pqi_index_t);
3862 next_queue_index = PTR_ALIGN(next_queue_index,
3863 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3866 ctrl_info->event_queue.oq_pi = next_queue_index;
3867 ctrl_info->event_queue.oq_pi_bus_addr =
3868 ctrl_info->queue_memory_base_dma_handle +
3870 (void __iomem *)ctrl_info->queue_memory_base);
3875 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3878 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3879 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3882 * Initialize the backpointers to the controller structure in
3883 * each operational queue group structure.
3885 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3886 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3889 * Assign IDs to all operational queues. Note that the IDs
3890 * assigned to operational IQs are independent of the IDs
3891 * assigned to operational OQs.
3893 ctrl_info->event_queue.oq_id = next_oq_id++;
3894 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3895 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3896 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3897 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3901 * Assign MSI-X table entry indexes to all queues. Note that the
3902 * interrupt for the event queue is shared with the first queue group.
3904 ctrl_info->event_queue.int_msg_num = 0;
3905 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3906 ctrl_info->queue_groups[i].int_msg_num = i;
3908 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3909 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3910 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3911 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3912 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3916 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3918 size_t alloc_length;
3919 struct pqi_admin_queues_aligned *admin_queues_aligned;
3920 struct pqi_admin_queues *admin_queues;
3922 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3923 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3925 ctrl_info->admin_queue_memory_base =
3926 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3927 &ctrl_info->admin_queue_memory_base_dma_handle,
3930 if (!ctrl_info->admin_queue_memory_base)
3933 ctrl_info->admin_queue_memory_length = alloc_length;
3935 admin_queues = &ctrl_info->admin_queues;
3936 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3937 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3938 admin_queues->iq_element_array =
3939 &admin_queues_aligned->iq_element_array;
3940 admin_queues->oq_element_array =
3941 &admin_queues_aligned->oq_element_array;
3942 admin_queues->iq_ci =
3943 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
3944 admin_queues->oq_pi =
3945 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
3947 admin_queues->iq_element_array_bus_addr =
3948 ctrl_info->admin_queue_memory_base_dma_handle +
3949 (admin_queues->iq_element_array -
3950 ctrl_info->admin_queue_memory_base);
3951 admin_queues->oq_element_array_bus_addr =
3952 ctrl_info->admin_queue_memory_base_dma_handle +
3953 (admin_queues->oq_element_array -
3954 ctrl_info->admin_queue_memory_base);
3955 admin_queues->iq_ci_bus_addr =
3956 ctrl_info->admin_queue_memory_base_dma_handle +
3957 ((void __iomem *)admin_queues->iq_ci -
3958 (void __iomem *)ctrl_info->admin_queue_memory_base);
3959 admin_queues->oq_pi_bus_addr =
3960 ctrl_info->admin_queue_memory_base_dma_handle +
3961 ((void __iomem *)admin_queues->oq_pi -
3962 (void __iomem *)ctrl_info->admin_queue_memory_base);
3967 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
3968 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3970 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3972 struct pqi_device_registers __iomem *pqi_registers;
3973 struct pqi_admin_queues *admin_queues;
3974 unsigned long timeout;
3978 pqi_registers = ctrl_info->pqi_registers;
3979 admin_queues = &ctrl_info->admin_queues;
3981 writeq((u64)admin_queues->iq_element_array_bus_addr,
3982 &pqi_registers->admin_iq_element_array_addr);
3983 writeq((u64)admin_queues->oq_element_array_bus_addr,
3984 &pqi_registers->admin_oq_element_array_addr);
3985 writeq((u64)admin_queues->iq_ci_bus_addr,
3986 &pqi_registers->admin_iq_ci_addr);
3987 writeq((u64)admin_queues->oq_pi_bus_addr,
3988 &pqi_registers->admin_oq_pi_addr);
3990 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3991 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
3992 (admin_queues->int_msg_num << 16);
3993 writel(reg, &pqi_registers->admin_iq_num_elements);
3995 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3996 &pqi_registers->function_and_status_code);
3998 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4000 status = readb(&pqi_registers->function_and_status_code);
4001 if (status == PQI_STATUS_IDLE)
4003 if (time_after(jiffies, timeout))
4005 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4009 * The offset registers are not initialized to the correct
4010 * offsets until *after* the create admin queue pair command
4011 * completes successfully.
4013 admin_queues->iq_pi = ctrl_info->iomem_base +
4014 PQI_DEVICE_REGISTERS_OFFSET +
4015 readq(&pqi_registers->admin_iq_pi_offset);
4016 admin_queues->oq_ci = ctrl_info->iomem_base +
4017 PQI_DEVICE_REGISTERS_OFFSET +
4018 readq(&pqi_registers->admin_oq_ci_offset);
4023 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4024 struct pqi_general_admin_request *request)
4026 struct pqi_admin_queues *admin_queues;
4030 admin_queues = &ctrl_info->admin_queues;
4031 iq_pi = admin_queues->iq_pi_copy;
4033 next_element = admin_queues->iq_element_array +
4034 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4036 memcpy(next_element, request, sizeof(*request));
4038 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4039 admin_queues->iq_pi_copy = iq_pi;
4042 * This write notifies the controller that an IU is available to be
4045 writel(iq_pi, admin_queues->iq_pi);
4048 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4050 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4051 struct pqi_general_admin_response *response)
4053 struct pqi_admin_queues *admin_queues;
4056 unsigned long timeout;
4058 admin_queues = &ctrl_info->admin_queues;
4059 oq_ci = admin_queues->oq_ci_copy;
4061 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
4064 oq_pi = readl(admin_queues->oq_pi);
4067 if (time_after(jiffies, timeout)) {
4068 dev_err(&ctrl_info->pci_dev->dev,
4069 "timed out waiting for admin response\n");
4072 if (!sis_is_firmware_running(ctrl_info))
4074 usleep_range(1000, 2000);
4077 memcpy(response, admin_queues->oq_element_array +
4078 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4080 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4081 admin_queues->oq_ci_copy = oq_ci;
4082 writel(oq_ci, admin_queues->oq_ci);
4087 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4088 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4089 struct pqi_io_request *io_request)
4091 struct pqi_io_request *next;
4096 unsigned long flags;
4097 unsigned int num_elements_needed;
4098 unsigned int num_elements_to_end_of_queue;
4100 struct pqi_iu_header *request;
4102 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4105 io_request->queue_group = queue_group;
4106 list_add_tail(&io_request->request_list_entry,
4107 &queue_group->request_list[path]);
4110 iq_pi = queue_group->iq_pi_copy[path];
4112 list_for_each_entry_safe(io_request, next,
4113 &queue_group->request_list[path], request_list_entry) {
4115 request = io_request->iu;
4117 iu_length = get_unaligned_le16(&request->iu_length) +
4118 PQI_REQUEST_HEADER_LENGTH;
4119 num_elements_needed =
4120 DIV_ROUND_UP(iu_length,
4121 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4123 iq_ci = readl(queue_group->iq_ci[path]);
4125 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4126 ctrl_info->num_elements_per_iq))
4129 put_unaligned_le16(queue_group->oq_id,
4130 &request->response_queue_id);
4132 next_element = queue_group->iq_element_array[path] +
4133 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4135 num_elements_to_end_of_queue =
4136 ctrl_info->num_elements_per_iq - iq_pi;
4138 if (num_elements_needed <= num_elements_to_end_of_queue) {
4139 memcpy(next_element, request, iu_length);
4141 copy_count = num_elements_to_end_of_queue *
4142 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4143 memcpy(next_element, request, copy_count);
4144 memcpy(queue_group->iq_element_array[path],
4145 (u8 *)request + copy_count,
4146 iu_length - copy_count);
4149 iq_pi = (iq_pi + num_elements_needed) %
4150 ctrl_info->num_elements_per_iq;
4152 list_del(&io_request->request_list_entry);
4155 if (iq_pi != queue_group->iq_pi_copy[path]) {
4156 queue_group->iq_pi_copy[path] = iq_pi;
4158 * This write notifies the controller that one or more IUs are
4159 * available to be processed.
4161 writel(iq_pi, queue_group->iq_pi[path]);
4164 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4167 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4169 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4170 struct completion *wait)
4175 if (wait_for_completion_io_timeout(wait,
4176 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
4181 pqi_check_ctrl_health(ctrl_info);
4182 if (pqi_ctrl_offline(ctrl_info)) {
4191 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4194 struct completion *waiting = context;
4199 static int pqi_process_raid_io_error_synchronous(
4200 struct pqi_raid_error_info *error_info)
4204 switch (error_info->data_out_result) {
4205 case PQI_DATA_IN_OUT_GOOD:
4206 if (error_info->status == SAM_STAT_GOOD)
4209 case PQI_DATA_IN_OUT_UNDERFLOW:
4210 if (error_info->status == SAM_STAT_GOOD ||
4211 error_info->status == SAM_STAT_CHECK_CONDITION)
4214 case PQI_DATA_IN_OUT_ABORTED:
4215 rc = PQI_CMD_STATUS_ABORTED;
4222 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4223 struct pqi_iu_header *request, unsigned int flags,
4224 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
4227 struct pqi_io_request *io_request;
4228 unsigned long start_jiffies;
4229 unsigned long msecs_blocked;
4231 DECLARE_COMPLETION_ONSTACK(wait);
4234 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
4235 * are mutually exclusive.
4238 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4239 if (down_interruptible(&ctrl_info->sync_request_sem))
4240 return -ERESTARTSYS;
4242 if (timeout_msecs == NO_TIMEOUT) {
4243 down(&ctrl_info->sync_request_sem);
4245 start_jiffies = jiffies;
4246 if (down_timeout(&ctrl_info->sync_request_sem,
4247 msecs_to_jiffies(timeout_msecs)))
4250 jiffies_to_msecs(jiffies - start_jiffies);
4251 if (msecs_blocked >= timeout_msecs) {
4255 timeout_msecs -= msecs_blocked;
4259 pqi_ctrl_busy(ctrl_info);
4260 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
4261 if (timeout_msecs == 0) {
4262 pqi_ctrl_unbusy(ctrl_info);
4267 if (pqi_ctrl_offline(ctrl_info)) {
4268 pqi_ctrl_unbusy(ctrl_info);
4273 atomic_inc(&ctrl_info->sync_cmds_outstanding);
4275 io_request = pqi_alloc_io_request(ctrl_info);
4277 put_unaligned_le16(io_request->index,
4278 &(((struct pqi_raid_path_request *)request)->request_id));
4280 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4281 ((struct pqi_raid_path_request *)request)->error_index =
4282 ((struct pqi_raid_path_request *)request)->request_id;
4284 iu_length = get_unaligned_le16(&request->iu_length) +
4285 PQI_REQUEST_HEADER_LENGTH;
4286 memcpy(io_request->iu, request, iu_length);
4288 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4289 io_request->context = &wait;
4291 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4294 pqi_ctrl_unbusy(ctrl_info);
4296 if (timeout_msecs == NO_TIMEOUT) {
4297 pqi_wait_for_completion_io(ctrl_info, &wait);
4299 if (!wait_for_completion_io_timeout(&wait,
4300 msecs_to_jiffies(timeout_msecs))) {
4301 dev_warn(&ctrl_info->pci_dev->dev,
4302 "command timed out\n");
4308 if (io_request->error_info)
4309 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4311 memset(error_info, 0, sizeof(*error_info));
4312 } else if (rc == 0 && io_request->error_info) {
4313 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4316 pqi_free_io_request(io_request);
4318 atomic_dec(&ctrl_info->sync_cmds_outstanding);
4320 up(&ctrl_info->sync_request_sem);
4325 static int pqi_validate_admin_response(
4326 struct pqi_general_admin_response *response, u8 expected_function_code)
4328 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4331 if (get_unaligned_le16(&response->header.iu_length) !=
4332 PQI_GENERAL_ADMIN_IU_LENGTH)
4335 if (response->function_code != expected_function_code)
4338 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4344 static int pqi_submit_admin_request_synchronous(
4345 struct pqi_ctrl_info *ctrl_info,
4346 struct pqi_general_admin_request *request,
4347 struct pqi_general_admin_response *response)
4351 pqi_submit_admin_request(ctrl_info, request);
4353 rc = pqi_poll_for_admin_response(ctrl_info, response);
4356 rc = pqi_validate_admin_response(response,
4357 request->function_code);
4362 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4365 struct pqi_general_admin_request request;
4366 struct pqi_general_admin_response response;
4367 struct pqi_device_capability *capability;
4368 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4370 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4374 memset(&request, 0, sizeof(request));
4376 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4377 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4378 &request.header.iu_length);
4379 request.function_code =
4380 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4381 put_unaligned_le32(sizeof(*capability),
4382 &request.data.report_device_capability.buffer_length);
4384 rc = pqi_map_single(ctrl_info->pci_dev,
4385 &request.data.report_device_capability.sg_descriptor,
4386 capability, sizeof(*capability),
4391 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4393 pqi_pci_unmap(ctrl_info->pci_dev,
4394 &request.data.report_device_capability.sg_descriptor, 1,
4400 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4405 ctrl_info->max_inbound_queues =
4406 get_unaligned_le16(&capability->max_inbound_queues);
4407 ctrl_info->max_elements_per_iq =
4408 get_unaligned_le16(&capability->max_elements_per_iq);
4409 ctrl_info->max_iq_element_length =
4410 get_unaligned_le16(&capability->max_iq_element_length)
4412 ctrl_info->max_outbound_queues =
4413 get_unaligned_le16(&capability->max_outbound_queues);
4414 ctrl_info->max_elements_per_oq =
4415 get_unaligned_le16(&capability->max_elements_per_oq);
4416 ctrl_info->max_oq_element_length =
4417 get_unaligned_le16(&capability->max_oq_element_length)
4420 sop_iu_layer_descriptor =
4421 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4423 ctrl_info->max_inbound_iu_length_per_firmware =
4425 &sop_iu_layer_descriptor->max_inbound_iu_length);
4426 ctrl_info->inbound_spanning_supported =
4427 sop_iu_layer_descriptor->inbound_spanning_supported;
4428 ctrl_info->outbound_spanning_supported =
4429 sop_iu_layer_descriptor->outbound_spanning_supported;
4437 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4439 if (ctrl_info->max_iq_element_length <
4440 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4441 dev_err(&ctrl_info->pci_dev->dev,
4442 "max. inbound queue element length of %d is less than the required length of %d\n",
4443 ctrl_info->max_iq_element_length,
4444 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4448 if (ctrl_info->max_oq_element_length <
4449 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4450 dev_err(&ctrl_info->pci_dev->dev,
4451 "max. outbound queue element length of %d is less than the required length of %d\n",
4452 ctrl_info->max_oq_element_length,
4453 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4457 if (ctrl_info->max_inbound_iu_length_per_firmware <
4458 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4459 dev_err(&ctrl_info->pci_dev->dev,
4460 "max. inbound IU length of %u is less than the min. required length of %d\n",
4461 ctrl_info->max_inbound_iu_length_per_firmware,
4462 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4466 if (!ctrl_info->inbound_spanning_supported) {
4467 dev_err(&ctrl_info->pci_dev->dev,
4468 "the controller does not support inbound spanning\n");
4472 if (ctrl_info->outbound_spanning_supported) {
4473 dev_err(&ctrl_info->pci_dev->dev,
4474 "the controller supports outbound spanning but this driver does not\n");
4481 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4484 struct pqi_event_queue *event_queue;
4485 struct pqi_general_admin_request request;
4486 struct pqi_general_admin_response response;
4488 event_queue = &ctrl_info->event_queue;
4491 * Create OQ (Outbound Queue - device to host queue) to dedicate
4494 memset(&request, 0, sizeof(request));
4495 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4496 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4497 &request.header.iu_length);
4498 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4499 put_unaligned_le16(event_queue->oq_id,
4500 &request.data.create_operational_oq.queue_id);
4501 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4502 &request.data.create_operational_oq.element_array_addr);
4503 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4504 &request.data.create_operational_oq.pi_addr);
4505 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4506 &request.data.create_operational_oq.num_elements);
4507 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4508 &request.data.create_operational_oq.element_length);
4509 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4510 put_unaligned_le16(event_queue->int_msg_num,
4511 &request.data.create_operational_oq.int_msg_num);
4513 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4518 event_queue->oq_ci = ctrl_info->iomem_base +
4519 PQI_DEVICE_REGISTERS_OFFSET +
4521 &response.data.create_operational_oq.oq_ci_offset);
4526 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4527 unsigned int group_number)
4530 struct pqi_queue_group *queue_group;
4531 struct pqi_general_admin_request request;
4532 struct pqi_general_admin_response response;
4534 queue_group = &ctrl_info->queue_groups[group_number];
4537 * Create IQ (Inbound Queue - host to device queue) for
4540 memset(&request, 0, sizeof(request));
4541 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4542 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4543 &request.header.iu_length);
4544 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4545 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4546 &request.data.create_operational_iq.queue_id);
4548 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4549 &request.data.create_operational_iq.element_array_addr);
4550 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4551 &request.data.create_operational_iq.ci_addr);
4552 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4553 &request.data.create_operational_iq.num_elements);
4554 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4555 &request.data.create_operational_iq.element_length);
4556 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4558 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4561 dev_err(&ctrl_info->pci_dev->dev,
4562 "error creating inbound RAID queue\n");
4566 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4567 PQI_DEVICE_REGISTERS_OFFSET +
4569 &response.data.create_operational_iq.iq_pi_offset);
4572 * Create IQ (Inbound Queue - host to device queue) for
4573 * Advanced I/O (AIO) path.
4575 memset(&request, 0, sizeof(request));
4576 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4577 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4578 &request.header.iu_length);
4579 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4580 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4581 &request.data.create_operational_iq.queue_id);
4582 put_unaligned_le64((u64)queue_group->
4583 iq_element_array_bus_addr[AIO_PATH],
4584 &request.data.create_operational_iq.element_array_addr);
4585 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4586 &request.data.create_operational_iq.ci_addr);
4587 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4588 &request.data.create_operational_iq.num_elements);
4589 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4590 &request.data.create_operational_iq.element_length);
4591 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4593 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4596 dev_err(&ctrl_info->pci_dev->dev,
4597 "error creating inbound AIO queue\n");
4601 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4602 PQI_DEVICE_REGISTERS_OFFSET +
4604 &response.data.create_operational_iq.iq_pi_offset);
4607 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4608 * assumed to be for RAID path I/O unless we change the queue's
4611 memset(&request, 0, sizeof(request));
4612 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4613 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4614 &request.header.iu_length);
4615 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4616 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4617 &request.data.change_operational_iq_properties.queue_id);
4618 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4619 &request.data.change_operational_iq_properties.vendor_specific);
4621 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4624 dev_err(&ctrl_info->pci_dev->dev,
4625 "error changing queue property\n");
4630 * Create OQ (Outbound Queue - device to host queue).
4632 memset(&request, 0, sizeof(request));
4633 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4634 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4635 &request.header.iu_length);
4636 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4637 put_unaligned_le16(queue_group->oq_id,
4638 &request.data.create_operational_oq.queue_id);
4639 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4640 &request.data.create_operational_oq.element_array_addr);
4641 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4642 &request.data.create_operational_oq.pi_addr);
4643 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4644 &request.data.create_operational_oq.num_elements);
4645 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4646 &request.data.create_operational_oq.element_length);
4647 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4648 put_unaligned_le16(queue_group->int_msg_num,
4649 &request.data.create_operational_oq.int_msg_num);
4651 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4654 dev_err(&ctrl_info->pci_dev->dev,
4655 "error creating outbound queue\n");
4659 queue_group->oq_ci = ctrl_info->iomem_base +
4660 PQI_DEVICE_REGISTERS_OFFSET +
4662 &response.data.create_operational_oq.oq_ci_offset);
4667 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4672 rc = pqi_create_event_queue(ctrl_info);
4674 dev_err(&ctrl_info->pci_dev->dev,
4675 "error creating event queue\n");
4679 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4680 rc = pqi_create_queue_group(ctrl_info, i);
4682 dev_err(&ctrl_info->pci_dev->dev,
4683 "error creating queue group number %u/%u\n",
4684 i, ctrl_info->num_queue_groups);
4692 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4693 (offsetof(struct pqi_event_config, descriptors) + \
4694 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4696 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4701 struct pqi_event_config *event_config;
4702 struct pqi_event_descriptor *event_descriptor;
4703 struct pqi_general_management_request request;
4705 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4710 memset(&request, 0, sizeof(request));
4712 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4713 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4714 data.report_event_configuration.sg_descriptors[1]) -
4715 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4716 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4717 &request.data.report_event_configuration.buffer_length);
4719 rc = pqi_map_single(ctrl_info->pci_dev,
4720 request.data.report_event_configuration.sg_descriptors,
4721 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4726 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4727 0, NULL, NO_TIMEOUT);
4729 pqi_pci_unmap(ctrl_info->pci_dev,
4730 request.data.report_event_configuration.sg_descriptors, 1,
4736 for (i = 0; i < event_config->num_event_descriptors; i++) {
4737 event_descriptor = &event_config->descriptors[i];
4738 if (enable_events &&
4739 pqi_is_supported_event(event_descriptor->event_type))
4740 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4741 &event_descriptor->oq_id);
4743 put_unaligned_le16(0, &event_descriptor->oq_id);
4746 memset(&request, 0, sizeof(request));
4748 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4749 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4750 data.report_event_configuration.sg_descriptors[1]) -
4751 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4752 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4753 &request.data.report_event_configuration.buffer_length);
4755 rc = pqi_map_single(ctrl_info->pci_dev,
4756 request.data.report_event_configuration.sg_descriptors,
4757 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4762 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4765 pqi_pci_unmap(ctrl_info->pci_dev,
4766 request.data.report_event_configuration.sg_descriptors, 1,
4770 kfree(event_config);
4775 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4777 return pqi_configure_events(ctrl_info, true);
4780 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4782 return pqi_configure_events(ctrl_info, false);
4785 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4789 size_t sg_chain_buffer_length;
4790 struct pqi_io_request *io_request;
4792 if (!ctrl_info->io_request_pool)
4795 dev = &ctrl_info->pci_dev->dev;
4796 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4797 io_request = ctrl_info->io_request_pool;
4799 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4800 kfree(io_request->iu);
4801 if (!io_request->sg_chain_buffer)
4803 dma_free_coherent(dev, sg_chain_buffer_length,
4804 io_request->sg_chain_buffer,
4805 io_request->sg_chain_buffer_dma_handle);
4809 kfree(ctrl_info->io_request_pool);
4810 ctrl_info->io_request_pool = NULL;
4813 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4815 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4816 ctrl_info->error_buffer_length,
4817 &ctrl_info->error_buffer_dma_handle,
4819 if (!ctrl_info->error_buffer)
4825 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4828 void *sg_chain_buffer;
4829 size_t sg_chain_buffer_length;
4830 dma_addr_t sg_chain_buffer_dma_handle;
4832 struct pqi_io_request *io_request;
4834 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
4835 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4837 if (!ctrl_info->io_request_pool) {
4838 dev_err(&ctrl_info->pci_dev->dev,
4839 "failed to allocate I/O request pool\n");
4843 dev = &ctrl_info->pci_dev->dev;
4844 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4845 io_request = ctrl_info->io_request_pool;
4847 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4848 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4850 if (!io_request->iu) {
4851 dev_err(&ctrl_info->pci_dev->dev,
4852 "failed to allocate IU buffers\n");
4856 sg_chain_buffer = dma_alloc_coherent(dev,
4857 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4860 if (!sg_chain_buffer) {
4861 dev_err(&ctrl_info->pci_dev->dev,
4862 "failed to allocate PQI scatter-gather chain buffers\n");
4866 io_request->index = i;
4867 io_request->sg_chain_buffer = sg_chain_buffer;
4868 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
4875 pqi_free_all_io_requests(ctrl_info);
4881 * Calculate required resources that are sized based on max. outstanding
4882 * requests and max. transfer size.
4885 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4887 u32 max_transfer_size;
4890 ctrl_info->scsi_ml_can_queue =
4891 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4892 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4894 ctrl_info->error_buffer_length =
4895 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4898 max_transfer_size = min(ctrl_info->max_transfer_size,
4899 PQI_MAX_TRANSFER_SIZE_KDUMP);
4901 max_transfer_size = min(ctrl_info->max_transfer_size,
4902 PQI_MAX_TRANSFER_SIZE);
4904 max_sg_entries = max_transfer_size / PAGE_SIZE;
4906 /* +1 to cover when the buffer is not page-aligned. */
4909 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4911 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4913 ctrl_info->sg_chain_buffer_length =
4914 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4915 PQI_EXTRA_SGL_MEMORY;
4916 ctrl_info->sg_tablesize = max_sg_entries;
4917 ctrl_info->max_sectors = max_transfer_size / 512;
4920 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4922 int num_queue_groups;
4923 u16 num_elements_per_iq;
4924 u16 num_elements_per_oq;
4926 if (reset_devices) {
4927 num_queue_groups = 1;
4930 int max_queue_groups;
4932 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4933 ctrl_info->max_outbound_queues - 1);
4934 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4936 num_cpus = num_online_cpus();
4937 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4938 num_queue_groups = min(num_queue_groups, max_queue_groups);
4941 ctrl_info->num_queue_groups = num_queue_groups;
4942 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
4945 * Make sure that the max. inbound IU length is an even multiple
4946 * of our inbound element length.
4948 ctrl_info->max_inbound_iu_length =
4949 (ctrl_info->max_inbound_iu_length_per_firmware /
4950 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4951 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4953 num_elements_per_iq =
4954 (ctrl_info->max_inbound_iu_length /
4955 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4957 /* Add one because one element in each queue is unusable. */
4958 num_elements_per_iq++;
4960 num_elements_per_iq = min(num_elements_per_iq,
4961 ctrl_info->max_elements_per_iq);
4963 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4964 num_elements_per_oq = min(num_elements_per_oq,
4965 ctrl_info->max_elements_per_oq);
4967 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4968 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4970 ctrl_info->max_sg_per_iu =
4971 ((ctrl_info->max_inbound_iu_length -
4972 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4973 sizeof(struct pqi_sg_descriptor)) +
4974 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4976 ctrl_info->max_sg_per_r56_iu =
4977 ((ctrl_info->max_inbound_iu_length -
4978 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4979 sizeof(struct pqi_sg_descriptor)) +
4980 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
4983 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
4984 struct scatterlist *sg)
4986 u64 address = (u64)sg_dma_address(sg);
4987 unsigned int length = sg_dma_len(sg);
4989 put_unaligned_le64(address, &sg_descriptor->address);
4990 put_unaligned_le32(length, &sg_descriptor->length);
4991 put_unaligned_le32(0, &sg_descriptor->flags);
4994 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
4995 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
4996 int max_sg_per_iu, bool *chained)
4999 unsigned int num_sg_in_iu;
5004 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
5007 pqi_set_sg_descriptor(sg_descriptor, sg);
5014 if (i == max_sg_per_iu) {
5015 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5016 &sg_descriptor->address);
5017 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5018 &sg_descriptor->length);
5019 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5022 sg_descriptor = io_request->sg_chain_buffer;
5027 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5029 return num_sg_in_iu;
5032 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5033 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5034 struct pqi_io_request *io_request)
5039 unsigned int num_sg_in_iu;
5040 struct scatterlist *sg;
5041 struct pqi_sg_descriptor *sg_descriptor;
5043 sg_count = scsi_dma_map(scmd);
5047 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5048 PQI_REQUEST_HEADER_LENGTH;
5053 sg = scsi_sglist(scmd);
5054 sg_descriptor = request->sg_descriptors;
5056 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5057 ctrl_info->max_sg_per_iu, &chained);
5059 request->partial = chained;
5060 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5063 put_unaligned_le16(iu_length, &request->header.iu_length);
5068 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5069 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5070 struct pqi_io_request *io_request)
5075 unsigned int num_sg_in_iu;
5076 struct scatterlist *sg;
5077 struct pqi_sg_descriptor *sg_descriptor;
5079 sg_count = scsi_dma_map(scmd);
5083 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5084 PQI_REQUEST_HEADER_LENGTH;
5090 sg = scsi_sglist(scmd);
5091 sg_descriptor = request->sg_descriptors;
5093 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5094 ctrl_info->max_sg_per_iu, &chained);
5096 request->partial = chained;
5097 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5100 put_unaligned_le16(iu_length, &request->header.iu_length);
5101 request->num_sg_descriptors = num_sg_in_iu;
5106 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5107 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5108 struct pqi_io_request *io_request)
5113 unsigned int num_sg_in_iu;
5114 struct scatterlist *sg;
5115 struct pqi_sg_descriptor *sg_descriptor;
5117 sg_count = scsi_dma_map(scmd);
5121 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5122 PQI_REQUEST_HEADER_LENGTH;
5125 if (sg_count != 0) {
5126 sg = scsi_sglist(scmd);
5127 sg_descriptor = request->sg_descriptors;
5129 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5130 ctrl_info->max_sg_per_r56_iu, &chained);
5132 request->partial = chained;
5133 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5136 put_unaligned_le16(iu_length, &request->header.iu_length);
5137 request->num_sg_descriptors = num_sg_in_iu;
5142 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5143 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5144 struct pqi_io_request *io_request)
5149 unsigned int num_sg_in_iu;
5150 struct scatterlist *sg;
5151 struct pqi_sg_descriptor *sg_descriptor;
5153 sg_count = scsi_dma_map(scmd);
5157 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5158 PQI_REQUEST_HEADER_LENGTH;
5164 sg = scsi_sglist(scmd);
5165 sg_descriptor = request->sg_descriptors;
5167 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5168 ctrl_info->max_sg_per_iu, &chained);
5170 request->partial = chained;
5171 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5174 put_unaligned_le16(iu_length, &request->header.iu_length);
5175 request->num_sg_descriptors = num_sg_in_iu;
5180 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5183 struct scsi_cmnd *scmd;
5185 scmd = io_request->scmd;
5186 pqi_free_io_request(io_request);
5187 scsi_dma_unmap(scmd);
5188 pqi_scsi_done(scmd);
5191 static int pqi_raid_submit_scsi_cmd_with_io_request(
5192 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
5193 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5194 struct pqi_queue_group *queue_group)
5198 struct pqi_raid_path_request *request;
5200 io_request->io_complete_callback = pqi_raid_io_complete;
5201 io_request->scmd = scmd;
5203 request = io_request->iu;
5204 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5206 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5207 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5208 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5209 put_unaligned_le16(io_request->index, &request->request_id);
5210 request->error_index = request->request_id;
5211 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5213 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5214 memcpy(request->cdb, scmd->cmnd, cdb_length);
5216 switch (cdb_length) {
5221 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5224 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5227 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5230 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5234 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5238 switch (scmd->sc_data_direction) {
5240 request->data_direction = SOP_READ_FLAG;
5242 case DMA_FROM_DEVICE:
5243 request->data_direction = SOP_WRITE_FLAG;
5246 request->data_direction = SOP_NO_DIRECTION_FLAG;
5248 case DMA_BIDIRECTIONAL:
5249 request->data_direction = SOP_BIDIRECTIONAL;
5252 dev_err(&ctrl_info->pci_dev->dev,
5253 "unknown data direction: %d\n",
5254 scmd->sc_data_direction);
5258 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5260 pqi_free_io_request(io_request);
5261 return SCSI_MLQUEUE_HOST_BUSY;
5264 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5269 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5270 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5271 struct pqi_queue_group *queue_group)
5273 struct pqi_io_request *io_request;
5275 io_request = pqi_alloc_io_request(ctrl_info);
5277 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5278 device, scmd, queue_group);
5281 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
5283 if (!pqi_ctrl_blocked(ctrl_info))
5284 schedule_work(&ctrl_info->raid_bypass_retry_work);
5287 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5289 struct scsi_cmnd *scmd;
5290 struct pqi_scsi_dev *device;
5291 struct pqi_ctrl_info *ctrl_info;
5293 if (!io_request->raid_bypass)
5296 scmd = io_request->scmd;
5297 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5299 if (host_byte(scmd->result) == DID_NO_CONNECT)
5302 device = scmd->device->hostdata;
5303 if (pqi_device_offline(device))
5306 ctrl_info = shost_to_hba(scmd->device->host);
5307 if (pqi_ctrl_offline(ctrl_info))
5313 static inline void pqi_add_to_raid_bypass_retry_list(
5314 struct pqi_ctrl_info *ctrl_info,
5315 struct pqi_io_request *io_request, bool at_head)
5317 unsigned long flags;
5319 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5321 list_add(&io_request->request_list_entry,
5322 &ctrl_info->raid_bypass_retry_list);
5324 list_add_tail(&io_request->request_list_entry,
5325 &ctrl_info->raid_bypass_retry_list);
5326 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5329 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
5332 struct scsi_cmnd *scmd;
5334 scmd = io_request->scmd;
5335 pqi_free_io_request(io_request);
5336 pqi_scsi_done(scmd);
5339 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
5341 struct scsi_cmnd *scmd;
5342 struct pqi_ctrl_info *ctrl_info;
5344 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
5345 scmd = io_request->scmd;
5347 ctrl_info = shost_to_hba(scmd->device->host);
5349 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
5350 pqi_schedule_bypass_retry(ctrl_info);
5353 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
5355 struct scsi_cmnd *scmd;
5356 struct pqi_scsi_dev *device;
5357 struct pqi_ctrl_info *ctrl_info;
5358 struct pqi_queue_group *queue_group;
5360 scmd = io_request->scmd;
5361 device = scmd->device->hostdata;
5362 if (pqi_device_in_reset(device)) {
5363 pqi_free_io_request(io_request);
5364 set_host_byte(scmd, DID_RESET);
5365 pqi_scsi_done(scmd);
5369 ctrl_info = shost_to_hba(scmd->device->host);
5370 queue_group = io_request->queue_group;
5372 pqi_reinit_io_request(io_request);
5374 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5375 device, scmd, queue_group);
5378 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
5379 struct pqi_ctrl_info *ctrl_info)
5381 unsigned long flags;
5382 struct pqi_io_request *io_request;
5384 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5385 io_request = list_first_entry_or_null(
5386 &ctrl_info->raid_bypass_retry_list,
5387 struct pqi_io_request, request_list_entry);
5389 list_del(&io_request->request_list_entry);
5390 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5395 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
5398 struct pqi_io_request *io_request;
5400 pqi_ctrl_busy(ctrl_info);
5403 if (pqi_ctrl_blocked(ctrl_info))
5405 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
5408 rc = pqi_retry_raid_bypass(io_request);
5410 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
5412 pqi_schedule_bypass_retry(ctrl_info);
5417 pqi_ctrl_unbusy(ctrl_info);
5420 static void pqi_raid_bypass_retry_worker(struct work_struct *work)
5422 struct pqi_ctrl_info *ctrl_info;
5424 ctrl_info = container_of(work, struct pqi_ctrl_info,
5425 raid_bypass_retry_work);
5426 pqi_retry_raid_bypass_requests(ctrl_info);
5429 static void pqi_clear_all_queued_raid_bypass_retries(
5430 struct pqi_ctrl_info *ctrl_info)
5432 unsigned long flags;
5434 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5435 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
5436 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5439 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5442 struct scsi_cmnd *scmd;
5444 scmd = io_request->scmd;
5445 scsi_dma_unmap(scmd);
5446 if (io_request->status == -EAGAIN)
5447 set_host_byte(scmd, DID_IMM_RETRY);
5448 else if (pqi_raid_bypass_retry_needed(io_request)) {
5449 pqi_queue_raid_bypass_retry(io_request);
5452 pqi_free_io_request(io_request);
5453 pqi_scsi_done(scmd);
5456 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5457 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5458 struct pqi_queue_group *queue_group)
5460 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5461 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
5464 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5465 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5466 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5467 struct pqi_encryption_info *encryption_info, bool raid_bypass)
5470 struct pqi_io_request *io_request;
5471 struct pqi_aio_path_request *request;
5473 io_request = pqi_alloc_io_request(ctrl_info);
5474 io_request->io_complete_callback = pqi_aio_io_complete;
5475 io_request->scmd = scmd;
5476 io_request->raid_bypass = raid_bypass;
5478 request = io_request->iu;
5479 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5481 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5482 put_unaligned_le32(aio_handle, &request->nexus_id);
5483 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5484 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5485 put_unaligned_le16(io_request->index, &request->request_id);
5486 request->error_index = request->request_id;
5487 if (cdb_length > sizeof(request->cdb))
5488 cdb_length = sizeof(request->cdb);
5489 request->cdb_length = cdb_length;
5490 memcpy(request->cdb, cdb, cdb_length);
5492 switch (scmd->sc_data_direction) {
5494 request->data_direction = SOP_READ_FLAG;
5496 case DMA_FROM_DEVICE:
5497 request->data_direction = SOP_WRITE_FLAG;
5500 request->data_direction = SOP_NO_DIRECTION_FLAG;
5502 case DMA_BIDIRECTIONAL:
5503 request->data_direction = SOP_BIDIRECTIONAL;
5506 dev_err(&ctrl_info->pci_dev->dev,
5507 "unknown data direction: %d\n",
5508 scmd->sc_data_direction);
5512 if (encryption_info) {
5513 request->encryption_enable = true;
5514 put_unaligned_le16(encryption_info->data_encryption_key_index,
5515 &request->data_encryption_key_index);
5516 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5517 &request->encrypt_tweak_lower);
5518 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5519 &request->encrypt_tweak_upper);
5522 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5524 pqi_free_io_request(io_request);
5525 return SCSI_MLQUEUE_HOST_BUSY;
5528 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5533 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5534 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5535 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5536 struct pqi_scsi_dev_raid_map_data *rmd)
5539 struct pqi_io_request *io_request;
5540 struct pqi_aio_r1_path_request *r1_request;
5542 io_request = pqi_alloc_io_request(ctrl_info);
5543 io_request->io_complete_callback = pqi_aio_io_complete;
5544 io_request->scmd = scmd;
5545 io_request->raid_bypass = true;
5547 r1_request = io_request->iu;
5548 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5550 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5551 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5552 r1_request->num_drives = rmd->num_it_nexus_entries;
5553 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5554 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5555 if (rmd->num_it_nexus_entries == 3)
5556 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5558 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5559 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5560 put_unaligned_le16(io_request->index, &r1_request->request_id);
5561 r1_request->error_index = r1_request->request_id;
5562 if (rmd->cdb_length > sizeof(r1_request->cdb))
5563 rmd->cdb_length = sizeof(r1_request->cdb);
5564 r1_request->cdb_length = rmd->cdb_length;
5565 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5567 /* The direction is always write. */
5568 r1_request->data_direction = SOP_READ_FLAG;
5570 if (encryption_info) {
5571 r1_request->encryption_enable = true;
5572 put_unaligned_le16(encryption_info->data_encryption_key_index,
5573 &r1_request->data_encryption_key_index);
5574 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5575 &r1_request->encrypt_tweak_lower);
5576 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5577 &r1_request->encrypt_tweak_upper);
5580 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5582 pqi_free_io_request(io_request);
5583 return SCSI_MLQUEUE_HOST_BUSY;
5586 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5591 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5592 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5593 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5594 struct pqi_scsi_dev_raid_map_data *rmd)
5597 struct pqi_io_request *io_request;
5598 struct pqi_aio_r56_path_request *r56_request;
5600 io_request = pqi_alloc_io_request(ctrl_info);
5601 io_request->io_complete_callback = pqi_aio_io_complete;
5602 io_request->scmd = scmd;
5603 io_request->raid_bypass = true;
5605 r56_request = io_request->iu;
5606 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5608 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5609 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5611 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5613 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5614 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5615 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5616 if (rmd->raid_level == SA_RAID_6) {
5617 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5618 r56_request->xor_multiplier = rmd->xor_mult;
5620 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5621 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5622 put_unaligned_le64(rmd->row, &r56_request->row);
5624 put_unaligned_le16(io_request->index, &r56_request->request_id);
5625 r56_request->error_index = r56_request->request_id;
5627 if (rmd->cdb_length > sizeof(r56_request->cdb))
5628 rmd->cdb_length = sizeof(r56_request->cdb);
5629 r56_request->cdb_length = rmd->cdb_length;
5630 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5632 /* The direction is always write. */
5633 r56_request->data_direction = SOP_READ_FLAG;
5635 if (encryption_info) {
5636 r56_request->encryption_enable = true;
5637 put_unaligned_le16(encryption_info->data_encryption_key_index,
5638 &r56_request->data_encryption_key_index);
5639 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5640 &r56_request->encrypt_tweak_lower);
5641 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5642 &r56_request->encrypt_tweak_upper);
5645 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5647 pqi_free_io_request(io_request);
5648 return SCSI_MLQUEUE_HOST_BUSY;
5651 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5656 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5657 struct scsi_cmnd *scmd)
5661 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5662 if (hw_queue > ctrl_info->max_hw_queue_index)
5669 * This function gets called just before we hand the completed SCSI request
5673 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5675 struct pqi_scsi_dev *device;
5677 if (!scmd->device) {
5678 set_host_byte(scmd, DID_NO_CONNECT);
5682 device = scmd->device->hostdata;
5684 set_host_byte(scmd, DID_NO_CONNECT);
5688 atomic_dec(&device->scsi_cmds_outstanding);
5691 static int pqi_scsi_queue_command(struct Scsi_Host *shost,
5692 struct scsi_cmnd *scmd)
5695 struct pqi_ctrl_info *ctrl_info;
5696 struct pqi_scsi_dev *device;
5698 struct pqi_queue_group *queue_group;
5701 device = scmd->device->hostdata;
5704 set_host_byte(scmd, DID_NO_CONNECT);
5705 pqi_scsi_done(scmd);
5709 atomic_inc(&device->scsi_cmds_outstanding);
5711 ctrl_info = shost_to_hba(shost);
5713 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
5714 set_host_byte(scmd, DID_NO_CONNECT);
5715 pqi_scsi_done(scmd);
5719 pqi_ctrl_busy(ctrl_info);
5720 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
5721 pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
5722 rc = SCSI_MLQUEUE_HOST_BUSY;
5727 * This is necessary because the SML doesn't zero out this field during
5732 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5733 queue_group = &ctrl_info->queue_groups[hw_queue];
5735 if (pqi_is_logical_device(device)) {
5736 raid_bypassed = false;
5737 if (device->raid_bypass_enabled &&
5738 !blk_rq_is_passthrough(scmd->request)) {
5739 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5741 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
5742 raid_bypassed = true;
5743 atomic_inc(&device->raid_bypass_cnt);
5747 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5749 if (device->aio_enabled)
5750 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5752 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
5756 pqi_ctrl_unbusy(ctrl_info);
5758 atomic_dec(&device->scsi_cmds_outstanding);
5763 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5764 struct pqi_queue_group *queue_group)
5767 unsigned long flags;
5770 for (path = 0; path < 2; path++) {
5773 &queue_group->submit_lock[path], flags);
5775 list_empty(&queue_group->request_list[path]);
5776 spin_unlock_irqrestore(
5777 &queue_group->submit_lock[path], flags);
5780 pqi_check_ctrl_health(ctrl_info);
5781 if (pqi_ctrl_offline(ctrl_info))
5783 usleep_range(1000, 2000);
5790 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5795 struct pqi_queue_group *queue_group;
5799 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5800 queue_group = &ctrl_info->queue_groups[i];
5802 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5806 for (path = 0; path < 2; path++) {
5807 iq_pi = queue_group->iq_pi_copy[path];
5810 iq_ci = readl(queue_group->iq_ci[path]);
5813 pqi_check_ctrl_health(ctrl_info);
5814 if (pqi_ctrl_offline(ctrl_info))
5816 usleep_range(1000, 2000);
5824 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5825 struct pqi_scsi_dev *device)
5829 struct pqi_queue_group *queue_group;
5830 unsigned long flags;
5831 struct pqi_io_request *io_request;
5832 struct pqi_io_request *next;
5833 struct scsi_cmnd *scmd;
5834 struct pqi_scsi_dev *scsi_device;
5836 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5837 queue_group = &ctrl_info->queue_groups[i];
5839 for (path = 0; path < 2; path++) {
5841 &queue_group->submit_lock[path], flags);
5843 list_for_each_entry_safe(io_request, next,
5844 &queue_group->request_list[path],
5845 request_list_entry) {
5847 scmd = io_request->scmd;
5851 scsi_device = scmd->device->hostdata;
5852 if (scsi_device != device)
5855 list_del(&io_request->request_list_entry);
5856 set_host_byte(scmd, DID_RESET);
5857 pqi_free_io_request(io_request);
5858 scsi_dma_unmap(scmd);
5859 pqi_scsi_done(scmd);
5862 spin_unlock_irqrestore(
5863 &queue_group->submit_lock[path], flags);
5868 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5872 struct pqi_queue_group *queue_group;
5873 unsigned long flags;
5874 struct pqi_io_request *io_request;
5875 struct pqi_io_request *next;
5876 struct scsi_cmnd *scmd;
5878 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5879 queue_group = &ctrl_info->queue_groups[i];
5881 for (path = 0; path < 2; path++) {
5882 spin_lock_irqsave(&queue_group->submit_lock[path],
5885 list_for_each_entry_safe(io_request, next,
5886 &queue_group->request_list[path],
5887 request_list_entry) {
5889 scmd = io_request->scmd;
5893 list_del(&io_request->request_list_entry);
5894 set_host_byte(scmd, DID_RESET);
5895 pqi_free_io_request(io_request);
5896 scsi_dma_unmap(scmd);
5897 pqi_scsi_done(scmd);
5900 spin_unlock_irqrestore(
5901 &queue_group->submit_lock[path], flags);
5906 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5907 struct pqi_scsi_dev *device, unsigned long timeout_secs)
5909 unsigned long timeout;
5911 timeout = (timeout_secs * PQI_HZ) + jiffies;
5913 while (atomic_read(&device->scsi_cmds_outstanding)) {
5914 pqi_check_ctrl_health(ctrl_info);
5915 if (pqi_ctrl_offline(ctrl_info))
5917 if (timeout_secs != NO_TIMEOUT) {
5918 if (time_after(jiffies, timeout)) {
5919 dev_err(&ctrl_info->pci_dev->dev,
5920 "timed out waiting for pending IO\n");
5924 usleep_range(1000, 2000);
5930 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5931 unsigned long timeout_secs)
5934 unsigned long flags;
5935 unsigned long timeout;
5936 struct pqi_scsi_dev *device;
5938 timeout = (timeout_secs * PQI_HZ) + jiffies;
5942 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5943 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5944 scsi_device_list_entry) {
5945 if (atomic_read(&device->scsi_cmds_outstanding)) {
5950 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5956 pqi_check_ctrl_health(ctrl_info);
5957 if (pqi_ctrl_offline(ctrl_info))
5960 if (timeout_secs != NO_TIMEOUT) {
5961 if (time_after(jiffies, timeout)) {
5962 dev_err(&ctrl_info->pci_dev->dev,
5963 "timed out waiting for pending IO\n");
5967 usleep_range(1000, 2000);
5973 static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
5975 while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
5976 pqi_check_ctrl_health(ctrl_info);
5977 if (pqi_ctrl_offline(ctrl_info))
5979 usleep_range(1000, 2000);
5985 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5988 struct completion *waiting = context;
5993 #define PQI_LUN_RESET_TIMEOUT_SECS 30
5994 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
5996 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5997 struct pqi_scsi_dev *device, struct completion *wait)
6002 if (wait_for_completion_io_timeout(wait,
6003 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
6008 pqi_check_ctrl_health(ctrl_info);
6009 if (pqi_ctrl_offline(ctrl_info)) {
6018 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
6019 struct pqi_scsi_dev *device)
6022 struct pqi_io_request *io_request;
6023 DECLARE_COMPLETION_ONSTACK(wait);
6024 struct pqi_task_management_request *request;
6026 io_request = pqi_alloc_io_request(ctrl_info);
6027 io_request->io_complete_callback = pqi_lun_reset_complete;
6028 io_request->context = &wait;
6030 request = io_request->iu;
6031 memset(request, 0, sizeof(*request));
6033 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6034 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6035 &request->header.iu_length);
6036 put_unaligned_le16(io_request->index, &request->request_id);
6037 memcpy(request->lun_number, device->scsi3addr,
6038 sizeof(request->lun_number));
6039 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6040 if (ctrl_info->tmf_iu_timeout_supported)
6041 put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
6044 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6047 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
6049 rc = io_request->status;
6051 pqi_free_io_request(io_request);
6056 /* Performs a reset at the LUN level. */
6058 #define PQI_LUN_RESET_RETRIES 3
6059 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
6060 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
6062 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
6063 struct pqi_scsi_dev *device)
6066 unsigned int retries;
6067 unsigned long timeout_secs;
6069 for (retries = 0;;) {
6070 rc = pqi_lun_reset(ctrl_info, device);
6071 if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
6073 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6076 timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
6078 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
6080 return rc == 0 ? SUCCESS : FAILED;
6083 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
6084 struct pqi_scsi_dev *device)
6088 mutex_lock(&ctrl_info->lun_reset_mutex);
6090 pqi_ctrl_block_requests(ctrl_info);
6091 pqi_ctrl_wait_until_quiesced(ctrl_info);
6092 pqi_fail_io_queued_for_device(ctrl_info, device);
6093 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6094 pqi_device_reset_start(device);
6095 pqi_ctrl_unblock_requests(ctrl_info);
6100 rc = _pqi_device_reset(ctrl_info, device);
6102 pqi_device_reset_done(device);
6104 mutex_unlock(&ctrl_info->lun_reset_mutex);
6109 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6112 struct Scsi_Host *shost;
6113 struct pqi_ctrl_info *ctrl_info;
6114 struct pqi_scsi_dev *device;
6116 shost = scmd->device->host;
6117 ctrl_info = shost_to_hba(shost);
6118 device = scmd->device->hostdata;
6120 dev_err(&ctrl_info->pci_dev->dev,
6121 "resetting scsi %d:%d:%d:%d\n",
6122 shost->host_no, device->bus, device->target, device->lun);
6124 pqi_check_ctrl_health(ctrl_info);
6125 if (pqi_ctrl_offline(ctrl_info) ||
6126 pqi_device_reset_blocked(ctrl_info)) {
6131 pqi_wait_until_ofa_finished(ctrl_info);
6133 atomic_inc(&ctrl_info->sync_cmds_outstanding);
6134 rc = pqi_device_reset(ctrl_info, device);
6135 atomic_dec(&ctrl_info->sync_cmds_outstanding);
6138 dev_err(&ctrl_info->pci_dev->dev,
6139 "reset of scsi %d:%d:%d:%d: %s\n",
6140 shost->host_no, device->bus, device->target, device->lun,
6141 rc == SUCCESS ? "SUCCESS" : "FAILED");
6146 static int pqi_slave_alloc(struct scsi_device *sdev)
6148 struct pqi_scsi_dev *device;
6149 unsigned long flags;
6150 struct pqi_ctrl_info *ctrl_info;
6151 struct scsi_target *starget;
6152 struct sas_rphy *rphy;
6154 ctrl_info = shost_to_hba(sdev->host);
6156 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6158 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6159 starget = scsi_target(sdev);
6160 rphy = target_to_rphy(starget);
6161 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6163 device->target = sdev_id(sdev);
6164 device->lun = sdev->lun;
6165 device->target_lun_valid = true;
6168 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6169 sdev_id(sdev), sdev->lun);
6173 sdev->hostdata = device;
6174 device->sdev = sdev;
6175 if (device->queue_depth) {
6176 device->advertised_queue_depth = device->queue_depth;
6177 scsi_change_queue_depth(sdev,
6178 device->advertised_queue_depth);
6180 if (pqi_is_logical_device(device))
6181 pqi_disable_write_same(sdev);
6183 sdev->allow_restart = 1;
6186 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6191 static int pqi_map_queues(struct Scsi_Host *shost)
6193 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6195 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6196 ctrl_info->pci_dev, 0);
6199 static int pqi_slave_configure(struct scsi_device *sdev)
6201 struct pqi_scsi_dev *device;
6203 device = sdev->hostdata;
6204 device->devtype = sdev->type;
6209 static void pqi_slave_destroy(struct scsi_device *sdev)
6211 unsigned long flags;
6212 struct pqi_scsi_dev *device;
6213 struct pqi_ctrl_info *ctrl_info;
6215 ctrl_info = shost_to_hba(sdev->host);
6217 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6219 device = sdev->hostdata;
6221 sdev->hostdata = NULL;
6222 if (!list_empty(&device->scsi_device_list_entry))
6223 list_del(&device->scsi_device_list_entry);
6226 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6229 pqi_dev_info(ctrl_info, "removed", device);
6230 pqi_free_device(device);
6234 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6236 struct pci_dev *pci_dev;
6237 u32 subsystem_vendor;
6238 u32 subsystem_device;
6239 cciss_pci_info_struct pciinfo;
6244 pci_dev = ctrl_info->pci_dev;
6246 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6247 pciinfo.bus = pci_dev->bus->number;
6248 pciinfo.dev_fn = pci_dev->devfn;
6249 subsystem_vendor = pci_dev->subsystem_vendor;
6250 subsystem_device = pci_dev->subsystem_device;
6251 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6253 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6259 static int pqi_getdrivver_ioctl(void __user *arg)
6266 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6267 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6269 if (copy_to_user(arg, &version, sizeof(version)))
6275 struct ciss_error_info {
6278 size_t sense_data_length;
6281 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6282 struct ciss_error_info *ciss_error_info)
6284 int ciss_cmd_status;
6285 size_t sense_data_length;
6287 switch (pqi_error_info->data_out_result) {
6288 case PQI_DATA_IN_OUT_GOOD:
6289 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6291 case PQI_DATA_IN_OUT_UNDERFLOW:
6292 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6294 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6295 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6297 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6298 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6299 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6300 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6301 case PQI_DATA_IN_OUT_ERROR:
6302 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6304 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6305 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6306 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6307 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6308 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6309 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6310 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6311 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6312 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6313 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6314 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6316 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6317 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6319 case PQI_DATA_IN_OUT_ABORTED:
6320 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6322 case PQI_DATA_IN_OUT_TIMEOUT:
6323 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6326 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6331 get_unaligned_le16(&pqi_error_info->sense_data_length);
6332 if (sense_data_length == 0)
6334 get_unaligned_le16(&pqi_error_info->response_data_length);
6335 if (sense_data_length)
6336 if (sense_data_length > sizeof(pqi_error_info->data))
6337 sense_data_length = sizeof(pqi_error_info->data);
6339 ciss_error_info->scsi_status = pqi_error_info->status;
6340 ciss_error_info->command_status = ciss_cmd_status;
6341 ciss_error_info->sense_data_length = sense_data_length;
6344 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6347 char *kernel_buffer = NULL;
6349 size_t sense_data_length;
6350 IOCTL_Command_struct iocommand;
6351 struct pqi_raid_path_request request;
6352 struct pqi_raid_error_info pqi_error_info;
6353 struct ciss_error_info ciss_error_info;
6355 if (pqi_ctrl_offline(ctrl_info))
6359 if (!capable(CAP_SYS_RAWIO))
6361 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6363 if (iocommand.buf_size < 1 &&
6364 iocommand.Request.Type.Direction != XFER_NONE)
6366 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6368 if (iocommand.Request.Type.Type != TYPE_CMD)
6371 switch (iocommand.Request.Type.Direction) {
6375 case XFER_READ | XFER_WRITE:
6381 if (iocommand.buf_size > 0) {
6382 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6385 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6386 if (copy_from_user(kernel_buffer, iocommand.buf,
6387 iocommand.buf_size)) {
6392 memset(kernel_buffer, 0, iocommand.buf_size);
6396 memset(&request, 0, sizeof(request));
6398 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6399 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6400 PQI_REQUEST_HEADER_LENGTH;
6401 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6402 sizeof(request.lun_number));
6403 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6404 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6406 switch (iocommand.Request.Type.Direction) {
6408 request.data_direction = SOP_NO_DIRECTION_FLAG;
6411 request.data_direction = SOP_WRITE_FLAG;
6414 request.data_direction = SOP_READ_FLAG;
6416 case XFER_READ | XFER_WRITE:
6417 request.data_direction = SOP_BIDIRECTIONAL;
6421 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6423 if (iocommand.buf_size > 0) {
6424 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6426 rc = pqi_map_single(ctrl_info->pci_dev,
6427 &request.sg_descriptors[0], kernel_buffer,
6428 iocommand.buf_size, DMA_BIDIRECTIONAL);
6432 iu_length += sizeof(request.sg_descriptors[0]);
6435 put_unaligned_le16(iu_length, &request.header.iu_length);
6437 if (ctrl_info->raid_iu_timeout_supported)
6438 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6440 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6441 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
6443 if (iocommand.buf_size > 0)
6444 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6447 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6450 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6451 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6452 iocommand.error_info.CommandStatus =
6453 ciss_error_info.command_status;
6454 sense_data_length = ciss_error_info.sense_data_length;
6455 if (sense_data_length) {
6456 if (sense_data_length >
6457 sizeof(iocommand.error_info.SenseInfo))
6459 sizeof(iocommand.error_info.SenseInfo);
6460 memcpy(iocommand.error_info.SenseInfo,
6461 pqi_error_info.data, sense_data_length);
6462 iocommand.error_info.SenseLen = sense_data_length;
6466 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6471 if (rc == 0 && iocommand.buf_size > 0 &&
6472 (iocommand.Request.Type.Direction & XFER_READ)) {
6473 if (copy_to_user(iocommand.buf, kernel_buffer,
6474 iocommand.buf_size)) {
6480 kfree(kernel_buffer);
6485 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6489 struct pqi_ctrl_info *ctrl_info;
6491 ctrl_info = shost_to_hba(sdev->host);
6493 if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
6497 case CCISS_DEREGDISK:
6498 case CCISS_REGNEWDISK:
6500 rc = pqi_scan_scsi_devices(ctrl_info);
6502 case CCISS_GETPCIINFO:
6503 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6505 case CCISS_GETDRIVVER:
6506 rc = pqi_getdrivver_ioctl(arg);
6508 case CCISS_PASSTHRU:
6509 rc = pqi_passthru_ioctl(ctrl_info, arg);
6519 static ssize_t pqi_firmware_version_show(struct device *dev,
6520 struct device_attribute *attr, char *buffer)
6522 struct Scsi_Host *shost;
6523 struct pqi_ctrl_info *ctrl_info;
6525 shost = class_to_shost(dev);
6526 ctrl_info = shost_to_hba(shost);
6528 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6531 static ssize_t pqi_driver_version_show(struct device *dev,
6532 struct device_attribute *attr, char *buffer)
6534 return snprintf(buffer, PAGE_SIZE, "%s\n",
6535 DRIVER_VERSION BUILD_TIMESTAMP);
6538 static ssize_t pqi_serial_number_show(struct device *dev,
6539 struct device_attribute *attr, char *buffer)
6541 struct Scsi_Host *shost;
6542 struct pqi_ctrl_info *ctrl_info;
6544 shost = class_to_shost(dev);
6545 ctrl_info = shost_to_hba(shost);
6547 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6550 static ssize_t pqi_model_show(struct device *dev,
6551 struct device_attribute *attr, char *buffer)
6553 struct Scsi_Host *shost;
6554 struct pqi_ctrl_info *ctrl_info;
6556 shost = class_to_shost(dev);
6557 ctrl_info = shost_to_hba(shost);
6559 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6562 static ssize_t pqi_vendor_show(struct device *dev,
6563 struct device_attribute *attr, char *buffer)
6565 struct Scsi_Host *shost;
6566 struct pqi_ctrl_info *ctrl_info;
6568 shost = class_to_shost(dev);
6569 ctrl_info = shost_to_hba(shost);
6571 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6574 static ssize_t pqi_host_rescan_store(struct device *dev,
6575 struct device_attribute *attr, const char *buffer, size_t count)
6577 struct Scsi_Host *shost = class_to_shost(dev);
6579 pqi_scan_start(shost);
6584 static ssize_t pqi_lockup_action_show(struct device *dev,
6585 struct device_attribute *attr, char *buffer)
6590 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6591 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6592 count += scnprintf(buffer + count, PAGE_SIZE - count,
6593 "[%s] ", pqi_lockup_actions[i].name);
6595 count += scnprintf(buffer + count, PAGE_SIZE - count,
6596 "%s ", pqi_lockup_actions[i].name);
6599 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6604 static ssize_t pqi_lockup_action_store(struct device *dev,
6605 struct device_attribute *attr, const char *buffer, size_t count)
6609 char action_name_buffer[32];
6611 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6612 action_name = strstrip(action_name_buffer);
6614 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6615 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6616 pqi_lockup_action = pqi_lockup_actions[i].action;
6624 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6625 struct device_attribute *attr, char *buffer)
6627 struct Scsi_Host *shost = class_to_shost(dev);
6628 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6630 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6633 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6634 struct device_attribute *attr, const char *buffer, size_t count)
6636 struct Scsi_Host *shost = class_to_shost(dev);
6637 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6638 u8 set_r5_writes = 0;
6640 if (kstrtou8(buffer, 0, &set_r5_writes))
6643 if (set_r5_writes > 0)
6646 ctrl_info->enable_r5_writes = set_r5_writes;
6651 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6652 struct device_attribute *attr, char *buffer)
6654 struct Scsi_Host *shost = class_to_shost(dev);
6655 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6657 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6660 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6661 struct device_attribute *attr, const char *buffer, size_t count)
6663 struct Scsi_Host *shost = class_to_shost(dev);
6664 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6665 u8 set_r6_writes = 0;
6667 if (kstrtou8(buffer, 0, &set_r6_writes))
6670 if (set_r6_writes > 0)
6673 ctrl_info->enable_r6_writes = set_r6_writes;
6678 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6679 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6680 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6681 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6682 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
6683 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6684 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6685 pqi_lockup_action_store);
6686 static DEVICE_ATTR(enable_r5_writes, 0644,
6687 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6688 static DEVICE_ATTR(enable_r6_writes, 0644,
6689 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6691 static struct device_attribute *pqi_shost_attrs[] = {
6692 &dev_attr_driver_version,
6693 &dev_attr_firmware_version,
6695 &dev_attr_serial_number,
6698 &dev_attr_lockup_action,
6699 &dev_attr_enable_r5_writes,
6700 &dev_attr_enable_r6_writes,
6704 static ssize_t pqi_unique_id_show(struct device *dev,
6705 struct device_attribute *attr, char *buffer)
6707 struct pqi_ctrl_info *ctrl_info;
6708 struct scsi_device *sdev;
6709 struct pqi_scsi_dev *device;
6710 unsigned long flags;
6713 sdev = to_scsi_device(dev);
6714 ctrl_info = shost_to_hba(sdev->host);
6716 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6718 device = sdev->hostdata;
6720 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6724 if (device->is_physical_device) {
6725 memset(unique_id, 0, 8);
6726 memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
6728 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6731 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6733 return snprintf(buffer, PAGE_SIZE,
6734 "%02X%02X%02X%02X%02X%02X%02X%02X"
6735 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
6736 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6737 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6738 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6739 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
6742 static ssize_t pqi_lunid_show(struct device *dev,
6743 struct device_attribute *attr, char *buffer)
6745 struct pqi_ctrl_info *ctrl_info;
6746 struct scsi_device *sdev;
6747 struct pqi_scsi_dev *device;
6748 unsigned long flags;
6751 sdev = to_scsi_device(dev);
6752 ctrl_info = shost_to_hba(sdev->host);
6754 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6756 device = sdev->hostdata;
6758 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6762 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6764 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6766 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6771 static ssize_t pqi_path_info_show(struct device *dev,
6772 struct device_attribute *attr, char *buf)
6774 struct pqi_ctrl_info *ctrl_info;
6775 struct scsi_device *sdev;
6776 struct pqi_scsi_dev *device;
6777 unsigned long flags;
6784 u8 phys_connector[2];
6786 sdev = to_scsi_device(dev);
6787 ctrl_info = shost_to_hba(sdev->host);
6789 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6791 device = sdev->hostdata;
6793 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6798 for (i = 0; i < MAX_PATHS; i++) {
6799 path_map_index = 1 << i;
6800 if (i == device->active_path_index)
6802 else if (device->path_map & path_map_index)
6803 active = "Inactive";
6807 output_len += scnprintf(buf + output_len,
6808 PAGE_SIZE - output_len,
6809 "[%d:%d:%d:%d] %20.20s ",
6810 ctrl_info->scsi_host->host_no,
6811 device->bus, device->target,
6813 scsi_device_type(device->devtype));
6815 if (device->devtype == TYPE_RAID ||
6816 pqi_is_logical_device(device))
6819 memcpy(&phys_connector, &device->phys_connector[i],
6820 sizeof(phys_connector));
6821 if (phys_connector[0] < '0')
6822 phys_connector[0] = '0';
6823 if (phys_connector[1] < '0')
6824 phys_connector[1] = '0';
6826 output_len += scnprintf(buf + output_len,
6827 PAGE_SIZE - output_len,
6828 "PORT: %.2s ", phys_connector);
6830 box = device->box[i];
6831 if (box != 0 && box != 0xFF)
6832 output_len += scnprintf(buf + output_len,
6833 PAGE_SIZE - output_len,
6836 if ((device->devtype == TYPE_DISK ||
6837 device->devtype == TYPE_ZBC) &&
6838 pqi_expose_device(device))
6839 output_len += scnprintf(buf + output_len,
6840 PAGE_SIZE - output_len,
6844 output_len += scnprintf(buf + output_len,
6845 PAGE_SIZE - output_len,
6849 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6854 static ssize_t pqi_sas_address_show(struct device *dev,
6855 struct device_attribute *attr, char *buffer)
6857 struct pqi_ctrl_info *ctrl_info;
6858 struct scsi_device *sdev;
6859 struct pqi_scsi_dev *device;
6860 unsigned long flags;
6863 sdev = to_scsi_device(dev);
6864 ctrl_info = shost_to_hba(sdev->host);
6866 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6868 device = sdev->hostdata;
6869 if (!device || !pqi_is_device_with_sas_address(device)) {
6870 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6874 sas_address = device->sas_address;
6876 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6878 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6881 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6882 struct device_attribute *attr, char *buffer)
6884 struct pqi_ctrl_info *ctrl_info;
6885 struct scsi_device *sdev;
6886 struct pqi_scsi_dev *device;
6887 unsigned long flags;
6889 sdev = to_scsi_device(dev);
6890 ctrl_info = shost_to_hba(sdev->host);
6892 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6894 device = sdev->hostdata;
6896 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6900 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6904 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6909 static ssize_t pqi_raid_level_show(struct device *dev,
6910 struct device_attribute *attr, char *buffer)
6912 struct pqi_ctrl_info *ctrl_info;
6913 struct scsi_device *sdev;
6914 struct pqi_scsi_dev *device;
6915 unsigned long flags;
6918 sdev = to_scsi_device(dev);
6919 ctrl_info = shost_to_hba(sdev->host);
6921 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6923 device = sdev->hostdata;
6925 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6929 if (pqi_is_logical_device(device))
6930 raid_level = pqi_raid_level_to_string(device->raid_level);
6934 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6936 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6939 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
6940 struct device_attribute *attr, char *buffer)
6942 struct pqi_ctrl_info *ctrl_info;
6943 struct scsi_device *sdev;
6944 struct pqi_scsi_dev *device;
6945 unsigned long flags;
6946 int raid_bypass_cnt;
6948 sdev = to_scsi_device(dev);
6949 ctrl_info = shost_to_hba(sdev->host);
6951 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6953 device = sdev->hostdata;
6955 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6959 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
6961 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6963 return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
6966 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6967 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6968 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
6969 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
6970 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
6971 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6972 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
6974 static struct device_attribute *pqi_sdev_attrs[] = {
6976 &dev_attr_unique_id,
6977 &dev_attr_path_info,
6978 &dev_attr_sas_address,
6979 &dev_attr_ssd_smart_path_enabled,
6980 &dev_attr_raid_level,
6981 &dev_attr_raid_bypass_cnt,
6985 static struct scsi_host_template pqi_driver_template = {
6986 .module = THIS_MODULE,
6987 .name = DRIVER_NAME_SHORT,
6988 .proc_name = DRIVER_NAME_SHORT,
6989 .queuecommand = pqi_scsi_queue_command,
6990 .scan_start = pqi_scan_start,
6991 .scan_finished = pqi_scan_finished,
6993 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6995 .slave_alloc = pqi_slave_alloc,
6996 .slave_configure = pqi_slave_configure,
6997 .slave_destroy = pqi_slave_destroy,
6998 .map_queues = pqi_map_queues,
6999 .sdev_attrs = pqi_sdev_attrs,
7000 .shost_attrs = pqi_shost_attrs,
7003 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7006 struct Scsi_Host *shost;
7008 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7010 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7015 shost->n_io_port = 0;
7016 shost->this_id = -1;
7017 shost->max_channel = PQI_MAX_BUS;
7018 shost->max_cmd_len = MAX_COMMAND_SIZE;
7019 shost->max_lun = ~0;
7021 shost->max_sectors = ctrl_info->max_sectors;
7022 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7023 shost->cmd_per_lun = shost->can_queue;
7024 shost->sg_tablesize = ctrl_info->sg_tablesize;
7025 shost->transportt = pqi_sas_transport_template;
7026 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7027 shost->unique_id = shost->irq;
7028 shost->nr_hw_queues = ctrl_info->num_queue_groups;
7029 shost->host_tagset = 1;
7030 shost->hostdata[0] = (unsigned long)ctrl_info;
7032 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7034 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7038 rc = pqi_add_sas_host(shost, ctrl_info);
7040 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7044 ctrl_info->scsi_host = shost;
7049 scsi_remove_host(shost);
7051 scsi_host_put(shost);
7056 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7058 struct Scsi_Host *shost;
7060 pqi_delete_sas_host(ctrl_info);
7062 shost = ctrl_info->scsi_host;
7066 scsi_remove_host(shost);
7067 scsi_host_put(shost);
7070 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7073 struct pqi_device_registers __iomem *pqi_registers;
7074 unsigned long timeout;
7075 unsigned int timeout_msecs;
7076 union pqi_reset_register reset_reg;
7078 pqi_registers = ctrl_info->pqi_registers;
7079 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7080 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7083 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7084 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7085 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7087 pqi_check_ctrl_health(ctrl_info);
7088 if (pqi_ctrl_offline(ctrl_info)) {
7092 if (time_after(jiffies, timeout)) {
7101 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7104 union pqi_reset_register reset_reg;
7106 if (ctrl_info->pqi_reset_quiesce_supported) {
7107 rc = sis_pqi_reset_quiesce(ctrl_info);
7109 dev_err(&ctrl_info->pci_dev->dev,
7110 "PQI reset failed during quiesce with error %d\n", rc);
7115 reset_reg.all_bits = 0;
7116 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7117 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7119 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7121 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7123 dev_err(&ctrl_info->pci_dev->dev,
7124 "PQI reset failed with error %d\n", rc);
7129 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7132 struct bmic_sense_subsystem_info *sense_info;
7134 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7138 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7142 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7143 sizeof(sense_info->ctrl_serial_number));
7144 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7152 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7155 struct bmic_identify_controller *identify;
7157 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7161 rc = pqi_identify_controller(ctrl_info, identify);
7165 if (get_unaligned_le32(&identify->extra_controller_flags) &
7166 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7167 memcpy(ctrl_info->firmware_version,
7168 identify->firmware_version_long,
7169 sizeof(identify->firmware_version_long));
7171 memcpy(ctrl_info->firmware_version,
7172 identify->firmware_version_short,
7173 sizeof(identify->firmware_version_short));
7174 ctrl_info->firmware_version
7175 [sizeof(identify->firmware_version_short)] = '\0';
7176 snprintf(ctrl_info->firmware_version +
7177 strlen(ctrl_info->firmware_version),
7178 sizeof(ctrl_info->firmware_version) -
7179 sizeof(identify->firmware_version_short),
7181 get_unaligned_le16(&identify->firmware_build_number));
7184 memcpy(ctrl_info->model, identify->product_id,
7185 sizeof(identify->product_id));
7186 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7188 memcpy(ctrl_info->vendor, identify->vendor_id,
7189 sizeof(identify->vendor_id));
7190 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7198 struct pqi_config_table_section_info {
7199 struct pqi_ctrl_info *ctrl_info;
7202 void __iomem *section_iomem_addr;
7205 static inline bool pqi_is_firmware_feature_supported(
7206 struct pqi_config_table_firmware_features *firmware_features,
7207 unsigned int bit_position)
7209 unsigned int byte_index;
7211 byte_index = bit_position / BITS_PER_BYTE;
7213 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7216 return firmware_features->features_supported[byte_index] &
7217 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7220 static inline bool pqi_is_firmware_feature_enabled(
7221 struct pqi_config_table_firmware_features *firmware_features,
7222 void __iomem *firmware_features_iomem_addr,
7223 unsigned int bit_position)
7225 unsigned int byte_index;
7226 u8 __iomem *features_enabled_iomem_addr;
7228 byte_index = (bit_position / BITS_PER_BYTE) +
7229 (le16_to_cpu(firmware_features->num_elements) * 2);
7231 features_enabled_iomem_addr = firmware_features_iomem_addr +
7232 offsetof(struct pqi_config_table_firmware_features,
7233 features_supported) + byte_index;
7235 return *((__force u8 *)features_enabled_iomem_addr) &
7236 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7239 static inline void pqi_request_firmware_feature(
7240 struct pqi_config_table_firmware_features *firmware_features,
7241 unsigned int bit_position)
7243 unsigned int byte_index;
7245 byte_index = (bit_position / BITS_PER_BYTE) +
7246 le16_to_cpu(firmware_features->num_elements);
7248 firmware_features->features_supported[byte_index] |=
7249 (1 << (bit_position % BITS_PER_BYTE));
7252 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7253 u16 first_section, u16 last_section)
7255 struct pqi_vendor_general_request request;
7257 memset(&request, 0, sizeof(request));
7259 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7260 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7261 &request.header.iu_length);
7262 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7263 &request.function_code);
7264 put_unaligned_le16(first_section,
7265 &request.data.config_table_update.first_section);
7266 put_unaligned_le16(last_section,
7267 &request.data.config_table_update.last_section);
7269 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7270 0, NULL, NO_TIMEOUT);
7273 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7274 struct pqi_config_table_firmware_features *firmware_features,
7275 void __iomem *firmware_features_iomem_addr)
7277 void *features_requested;
7278 void __iomem *features_requested_iomem_addr;
7279 void __iomem *host_max_known_feature_iomem_addr;
7281 features_requested = firmware_features->features_supported +
7282 le16_to_cpu(firmware_features->num_elements);
7284 features_requested_iomem_addr = firmware_features_iomem_addr +
7285 (features_requested - (void *)firmware_features);
7287 memcpy_toio(features_requested_iomem_addr, features_requested,
7288 le16_to_cpu(firmware_features->num_elements));
7290 if (pqi_is_firmware_feature_supported(firmware_features,
7291 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7292 host_max_known_feature_iomem_addr =
7293 features_requested_iomem_addr +
7294 (le16_to_cpu(firmware_features->num_elements) * 2) +
7296 writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7297 host_max_known_feature_iomem_addr);
7300 return pqi_config_table_update(ctrl_info,
7301 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7302 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7305 struct pqi_firmware_feature {
7307 unsigned int feature_bit;
7310 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7311 struct pqi_firmware_feature *firmware_feature);
7314 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7315 struct pqi_firmware_feature *firmware_feature)
7317 if (!firmware_feature->supported) {
7318 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7319 firmware_feature->feature_name);
7323 if (firmware_feature->enabled) {
7324 dev_info(&ctrl_info->pci_dev->dev,
7325 "%s enabled\n", firmware_feature->feature_name);
7329 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7330 firmware_feature->feature_name);
7333 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7334 struct pqi_firmware_feature *firmware_feature)
7336 switch (firmware_feature->feature_bit) {
7337 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7338 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7340 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7341 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7343 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7344 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7346 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7347 ctrl_info->soft_reset_handshake_supported =
7348 firmware_feature->enabled;
7350 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7351 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7353 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7354 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7358 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7361 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7362 struct pqi_firmware_feature *firmware_feature)
7364 if (firmware_feature->feature_status)
7365 firmware_feature->feature_status(ctrl_info, firmware_feature);
7368 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7370 static struct pqi_firmware_feature pqi_firmware_features[] = {
7372 .feature_name = "Online Firmware Activation",
7373 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7374 .feature_status = pqi_firmware_feature_status,
7377 .feature_name = "Serial Management Protocol",
7378 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7379 .feature_status = pqi_firmware_feature_status,
7382 .feature_name = "Maximum Known Feature",
7383 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7384 .feature_status = pqi_firmware_feature_status,
7387 .feature_name = "RAID 0 Read Bypass",
7388 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7389 .feature_status = pqi_firmware_feature_status,
7392 .feature_name = "RAID 1 Read Bypass",
7393 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7394 .feature_status = pqi_firmware_feature_status,
7397 .feature_name = "RAID 5 Read Bypass",
7398 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7399 .feature_status = pqi_firmware_feature_status,
7402 .feature_name = "RAID 6 Read Bypass",
7403 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7404 .feature_status = pqi_firmware_feature_status,
7407 .feature_name = "RAID 0 Write Bypass",
7408 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7409 .feature_status = pqi_firmware_feature_status,
7412 .feature_name = "RAID 1 Write Bypass",
7413 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7414 .feature_status = pqi_ctrl_update_feature_flags,
7417 .feature_name = "RAID 5 Write Bypass",
7418 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7419 .feature_status = pqi_ctrl_update_feature_flags,
7422 .feature_name = "RAID 6 Write Bypass",
7423 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7424 .feature_status = pqi_ctrl_update_feature_flags,
7427 .feature_name = "New Soft Reset Handshake",
7428 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7429 .feature_status = pqi_ctrl_update_feature_flags,
7432 .feature_name = "RAID IU Timeout",
7433 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7434 .feature_status = pqi_ctrl_update_feature_flags,
7437 .feature_name = "TMF IU Timeout",
7438 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7439 .feature_status = pqi_ctrl_update_feature_flags,
7442 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7443 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7444 .feature_status = pqi_firmware_feature_status,
7448 static void pqi_process_firmware_features(
7449 struct pqi_config_table_section_info *section_info)
7452 struct pqi_ctrl_info *ctrl_info;
7453 struct pqi_config_table_firmware_features *firmware_features;
7454 void __iomem *firmware_features_iomem_addr;
7456 unsigned int num_features_supported;
7458 ctrl_info = section_info->ctrl_info;
7459 firmware_features = section_info->section;
7460 firmware_features_iomem_addr = section_info->section_iomem_addr;
7462 for (i = 0, num_features_supported = 0;
7463 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7464 if (pqi_is_firmware_feature_supported(firmware_features,
7465 pqi_firmware_features[i].feature_bit)) {
7466 pqi_firmware_features[i].supported = true;
7467 num_features_supported++;
7469 pqi_firmware_feature_update(ctrl_info,
7470 &pqi_firmware_features[i]);
7474 if (num_features_supported == 0)
7477 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7478 if (!pqi_firmware_features[i].supported)
7480 pqi_request_firmware_feature(firmware_features,
7481 pqi_firmware_features[i].feature_bit);
7484 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7485 firmware_features_iomem_addr);
7487 dev_err(&ctrl_info->pci_dev->dev,
7488 "failed to enable firmware features in PQI configuration table\n");
7489 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7490 if (!pqi_firmware_features[i].supported)
7492 pqi_firmware_feature_update(ctrl_info,
7493 &pqi_firmware_features[i]);
7498 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7499 if (!pqi_firmware_features[i].supported)
7501 if (pqi_is_firmware_feature_enabled(firmware_features,
7502 firmware_features_iomem_addr,
7503 pqi_firmware_features[i].feature_bit)) {
7504 pqi_firmware_features[i].enabled = true;
7506 pqi_firmware_feature_update(ctrl_info,
7507 &pqi_firmware_features[i]);
7511 static void pqi_init_firmware_features(void)
7515 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7516 pqi_firmware_features[i].supported = false;
7517 pqi_firmware_features[i].enabled = false;
7521 static void pqi_process_firmware_features_section(
7522 struct pqi_config_table_section_info *section_info)
7524 mutex_lock(&pqi_firmware_features_mutex);
7525 pqi_init_firmware_features();
7526 pqi_process_firmware_features(section_info);
7527 mutex_unlock(&pqi_firmware_features_mutex);
7531 * Reset all controller settings that can be initialized during the processing
7532 * of the PQI Configuration Table.
7535 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7539 bool firmware_feature_section_present;
7540 void __iomem *table_iomem_addr;
7541 struct pqi_config_table *config_table;
7542 struct pqi_config_table_section_header *section;
7543 struct pqi_config_table_section_info section_info;
7544 struct pqi_config_table_section_info feature_section_info;
7546 table_length = ctrl_info->config_table_length;
7547 if (table_length == 0)
7550 config_table = kmalloc(table_length, GFP_KERNEL);
7551 if (!config_table) {
7552 dev_err(&ctrl_info->pci_dev->dev,
7553 "failed to allocate memory for PQI configuration table\n");
7558 * Copy the config table contents from I/O memory space into the
7561 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
7562 memcpy_fromio(config_table, table_iomem_addr, table_length);
7564 firmware_feature_section_present = false;
7565 section_info.ctrl_info = ctrl_info;
7566 section_offset = get_unaligned_le32(&config_table->first_section_offset);
7568 while (section_offset) {
7569 section = (void *)config_table + section_offset;
7571 section_info.section = section;
7572 section_info.section_offset = section_offset;
7573 section_info.section_iomem_addr = table_iomem_addr + section_offset;
7575 switch (get_unaligned_le16(§ion->section_id)) {
7576 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7577 firmware_feature_section_present = true;
7578 feature_section_info = section_info;
7580 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
7581 if (pqi_disable_heartbeat)
7582 dev_warn(&ctrl_info->pci_dev->dev,
7583 "heartbeat disabled by module parameter\n");
7585 ctrl_info->heartbeat_counter =
7588 offsetof(struct pqi_config_table_heartbeat,
7591 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7592 ctrl_info->soft_reset_status =
7595 offsetof(struct pqi_config_table_soft_reset,
7600 section_offset = get_unaligned_le16(§ion->next_section_offset);
7604 * We process the firmware feature section after all other sections
7605 * have been processed so that the feature bit callbacks can take
7606 * into account the settings configured by other sections.
7608 if (firmware_feature_section_present)
7609 pqi_process_firmware_features_section(&feature_section_info);
7611 kfree(config_table);
7616 /* Switches the controller from PQI mode back into SIS mode. */
7618 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7622 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
7623 rc = pqi_reset(ctrl_info);
7626 rc = sis_reenable_sis_mode(ctrl_info);
7628 dev_err(&ctrl_info->pci_dev->dev,
7629 "re-enabling SIS mode failed with error %d\n", rc);
7632 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7638 * If the controller isn't already in SIS mode, this function forces it into
7642 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
7644 if (!sis_is_firmware_running(ctrl_info))
7647 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7650 if (sis_is_kernel_up(ctrl_info)) {
7651 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7655 return pqi_revert_to_sis_mode(ctrl_info);
7658 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7660 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7665 if (reset_devices) {
7666 sis_soft_reset(ctrl_info);
7667 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7669 rc = pqi_force_sis_mode(ctrl_info);
7675 * Wait until the controller is ready to start accepting SIS
7678 rc = sis_wait_for_ctrl_ready(ctrl_info);
7683 * Get the controller properties. This allows us to determine
7684 * whether or not it supports PQI mode.
7686 rc = sis_get_ctrl_properties(ctrl_info);
7688 dev_err(&ctrl_info->pci_dev->dev,
7689 "error obtaining controller properties\n");
7693 rc = sis_get_pqi_capabilities(ctrl_info);
7695 dev_err(&ctrl_info->pci_dev->dev,
7696 "error obtaining controller capabilities\n");
7700 product_id = sis_get_product_id(ctrl_info);
7701 ctrl_info->product_id = (u8)product_id;
7702 ctrl_info->product_revision = (u8)(product_id >> 8);
7704 if (reset_devices) {
7705 if (ctrl_info->max_outstanding_requests >
7706 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
7707 ctrl_info->max_outstanding_requests =
7708 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7710 if (ctrl_info->max_outstanding_requests >
7711 PQI_MAX_OUTSTANDING_REQUESTS)
7712 ctrl_info->max_outstanding_requests =
7713 PQI_MAX_OUTSTANDING_REQUESTS;
7716 pqi_calculate_io_resources(ctrl_info);
7718 rc = pqi_alloc_error_buffer(ctrl_info);
7720 dev_err(&ctrl_info->pci_dev->dev,
7721 "failed to allocate PQI error buffer\n");
7726 * If the function we are about to call succeeds, the
7727 * controller will transition from legacy SIS mode
7730 rc = sis_init_base_struct_addr(ctrl_info);
7732 dev_err(&ctrl_info->pci_dev->dev,
7733 "error initializing PQI mode\n");
7737 /* Wait for the controller to complete the SIS -> PQI transition. */
7738 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7740 dev_err(&ctrl_info->pci_dev->dev,
7741 "transition to PQI mode failed\n");
7745 /* From here on, we are running in PQI mode. */
7746 ctrl_info->pqi_mode_enabled = true;
7747 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7749 rc = pqi_alloc_admin_queues(ctrl_info);
7751 dev_err(&ctrl_info->pci_dev->dev,
7752 "failed to allocate admin queues\n");
7756 rc = pqi_create_admin_queues(ctrl_info);
7758 dev_err(&ctrl_info->pci_dev->dev,
7759 "error creating admin queues\n");
7763 rc = pqi_report_device_capability(ctrl_info);
7765 dev_err(&ctrl_info->pci_dev->dev,
7766 "obtaining device capability failed\n");
7770 rc = pqi_validate_device_capability(ctrl_info);
7774 pqi_calculate_queue_resources(ctrl_info);
7776 rc = pqi_enable_msix_interrupts(ctrl_info);
7780 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7781 ctrl_info->max_msix_vectors =
7782 ctrl_info->num_msix_vectors_enabled;
7783 pqi_calculate_queue_resources(ctrl_info);
7786 rc = pqi_alloc_io_resources(ctrl_info);
7790 rc = pqi_alloc_operational_queues(ctrl_info);
7792 dev_err(&ctrl_info->pci_dev->dev,
7793 "failed to allocate operational queues\n");
7797 pqi_init_operational_queues(ctrl_info);
7799 rc = pqi_request_irqs(ctrl_info);
7803 rc = pqi_create_queues(ctrl_info);
7807 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7809 ctrl_info->controller_online = true;
7811 rc = pqi_process_config_table(ctrl_info);
7815 pqi_start_heartbeat_timer(ctrl_info);
7817 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
7818 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
7819 if (rc) { /* Supported features not returned correctly. */
7820 dev_err(&ctrl_info->pci_dev->dev,
7821 "error obtaining advanced RAID bypass configuration\n");
7824 ctrl_info->ciss_report_log_flags |=
7825 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
7828 rc = pqi_enable_events(ctrl_info);
7830 dev_err(&ctrl_info->pci_dev->dev,
7831 "error enabling events\n");
7835 /* Register with the SCSI subsystem. */
7836 rc = pqi_register_scsi(ctrl_info);
7840 rc = pqi_get_ctrl_product_details(ctrl_info);
7842 dev_err(&ctrl_info->pci_dev->dev,
7843 "error obtaining product details\n");
7847 rc = pqi_get_ctrl_serial_number(ctrl_info);
7849 dev_err(&ctrl_info->pci_dev->dev,
7850 "error obtaining ctrl serial number\n");
7854 rc = pqi_set_diag_rescan(ctrl_info);
7856 dev_err(&ctrl_info->pci_dev->dev,
7857 "error enabling multi-lun rescan\n");
7861 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7863 dev_err(&ctrl_info->pci_dev->dev,
7864 "error updating host wellness\n");
7868 pqi_schedule_update_time_worker(ctrl_info);
7870 pqi_scan_scsi_devices(ctrl_info);
7875 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7878 struct pqi_admin_queues *admin_queues;
7879 struct pqi_event_queue *event_queue;
7881 admin_queues = &ctrl_info->admin_queues;
7882 admin_queues->iq_pi_copy = 0;
7883 admin_queues->oq_ci_copy = 0;
7884 writel(0, admin_queues->oq_pi);
7886 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7887 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7888 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7889 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7891 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7892 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7893 writel(0, ctrl_info->queue_groups[i].oq_pi);
7896 event_queue = &ctrl_info->event_queue;
7897 writel(0, event_queue->oq_pi);
7898 event_queue->oq_ci_copy = 0;
7901 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7905 rc = pqi_force_sis_mode(ctrl_info);
7910 * Wait until the controller is ready to start accepting SIS
7913 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7918 * Get the controller properties. This allows us to determine
7919 * whether or not it supports PQI mode.
7921 rc = sis_get_ctrl_properties(ctrl_info);
7923 dev_err(&ctrl_info->pci_dev->dev,
7924 "error obtaining controller properties\n");
7928 rc = sis_get_pqi_capabilities(ctrl_info);
7930 dev_err(&ctrl_info->pci_dev->dev,
7931 "error obtaining controller capabilities\n");
7936 * If the function we are about to call succeeds, the
7937 * controller will transition from legacy SIS mode
7940 rc = sis_init_base_struct_addr(ctrl_info);
7942 dev_err(&ctrl_info->pci_dev->dev,
7943 "error initializing PQI mode\n");
7947 /* Wait for the controller to complete the SIS -> PQI transition. */
7948 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7950 dev_err(&ctrl_info->pci_dev->dev,
7951 "transition to PQI mode failed\n");
7955 /* From here on, we are running in PQI mode. */
7956 ctrl_info->pqi_mode_enabled = true;
7957 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7959 pqi_reinit_queues(ctrl_info);
7961 rc = pqi_create_admin_queues(ctrl_info);
7963 dev_err(&ctrl_info->pci_dev->dev,
7964 "error creating admin queues\n");
7968 rc = pqi_create_queues(ctrl_info);
7972 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7974 ctrl_info->controller_online = true;
7975 pqi_ctrl_unblock_requests(ctrl_info);
7977 rc = pqi_process_config_table(ctrl_info);
7981 pqi_start_heartbeat_timer(ctrl_info);
7983 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
7984 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
7986 dev_err(&ctrl_info->pci_dev->dev,
7987 "error obtaining advanced RAID bypass configuration\n");
7990 ctrl_info->ciss_report_log_flags |=
7991 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
7994 rc = pqi_enable_events(ctrl_info);
7996 dev_err(&ctrl_info->pci_dev->dev,
7997 "error enabling events\n");
8001 rc = pqi_get_ctrl_product_details(ctrl_info);
8003 dev_err(&ctrl_info->pci_dev->dev,
8004 "error obtaining product details\n");
8008 rc = pqi_set_diag_rescan(ctrl_info);
8010 dev_err(&ctrl_info->pci_dev->dev,
8011 "error enabling multi-lun rescan\n");
8015 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8017 dev_err(&ctrl_info->pci_dev->dev,
8018 "error updating host wellness\n");
8022 pqi_schedule_update_time_worker(ctrl_info);
8024 pqi_scan_scsi_devices(ctrl_info);
8029 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8033 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8034 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8036 return pcibios_err_to_errno(rc);
8039 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8044 rc = pci_enable_device(ctrl_info->pci_dev);
8046 dev_err(&ctrl_info->pci_dev->dev,
8047 "failed to enable PCI device\n");
8051 if (sizeof(dma_addr_t) > 4)
8052 mask = DMA_BIT_MASK(64);
8054 mask = DMA_BIT_MASK(32);
8056 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8058 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8059 goto disable_device;
8062 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8064 dev_err(&ctrl_info->pci_dev->dev,
8065 "failed to obtain PCI resources\n");
8066 goto disable_device;
8069 ctrl_info->iomem_base = ioremap(pci_resource_start(
8070 ctrl_info->pci_dev, 0),
8071 sizeof(struct pqi_ctrl_registers));
8072 if (!ctrl_info->iomem_base) {
8073 dev_err(&ctrl_info->pci_dev->dev,
8074 "failed to map memory for controller registers\n");
8076 goto release_regions;
8079 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8081 /* Increase the PCIe completion timeout. */
8082 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8083 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8085 dev_err(&ctrl_info->pci_dev->dev,
8086 "failed to set PCIe completion timeout\n");
8087 goto release_regions;
8090 /* Enable bus mastering. */
8091 pci_set_master(ctrl_info->pci_dev);
8093 ctrl_info->registers = ctrl_info->iomem_base;
8094 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8096 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8101 pci_release_regions(ctrl_info->pci_dev);
8103 pci_disable_device(ctrl_info->pci_dev);
8108 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8110 iounmap(ctrl_info->iomem_base);
8111 pci_release_regions(ctrl_info->pci_dev);
8112 if (pci_is_enabled(ctrl_info->pci_dev))
8113 pci_disable_device(ctrl_info->pci_dev);
8114 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8117 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8119 struct pqi_ctrl_info *ctrl_info;
8121 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8122 GFP_KERNEL, numa_node);
8126 mutex_init(&ctrl_info->scan_mutex);
8127 mutex_init(&ctrl_info->lun_reset_mutex);
8128 mutex_init(&ctrl_info->ofa_mutex);
8130 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8131 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8133 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8134 atomic_set(&ctrl_info->num_interrupts, 0);
8135 atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
8137 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8138 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8140 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8141 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8143 sema_init(&ctrl_info->sync_request_sem,
8144 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8145 init_waitqueue_head(&ctrl_info->block_requests_wait);
8147 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
8148 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
8149 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
8150 pqi_raid_bypass_retry_worker);
8152 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8153 ctrl_info->irq_mode = IRQ_MODE_NONE;
8154 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8156 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8157 ctrl_info->max_transfer_encrypted_sas_sata =
8158 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8159 ctrl_info->max_transfer_encrypted_nvme =
8160 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8161 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8162 ctrl_info->max_write_raid_1_10_2drive = ~0;
8163 ctrl_info->max_write_raid_1_10_3drive = ~0;
8168 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8173 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8175 pqi_free_irqs(ctrl_info);
8176 pqi_disable_msix_interrupts(ctrl_info);
8179 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8181 pqi_stop_heartbeat_timer(ctrl_info);
8182 pqi_free_interrupts(ctrl_info);
8183 if (ctrl_info->queue_memory_base)
8184 dma_free_coherent(&ctrl_info->pci_dev->dev,
8185 ctrl_info->queue_memory_length,
8186 ctrl_info->queue_memory_base,
8187 ctrl_info->queue_memory_base_dma_handle);
8188 if (ctrl_info->admin_queue_memory_base)
8189 dma_free_coherent(&ctrl_info->pci_dev->dev,
8190 ctrl_info->admin_queue_memory_length,
8191 ctrl_info->admin_queue_memory_base,
8192 ctrl_info->admin_queue_memory_base_dma_handle);
8193 pqi_free_all_io_requests(ctrl_info);
8194 if (ctrl_info->error_buffer)
8195 dma_free_coherent(&ctrl_info->pci_dev->dev,
8196 ctrl_info->error_buffer_length,
8197 ctrl_info->error_buffer,
8198 ctrl_info->error_buffer_dma_handle);
8199 if (ctrl_info->iomem_base)
8200 pqi_cleanup_pci_init(ctrl_info);
8201 pqi_free_ctrl_info(ctrl_info);
8204 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8206 pqi_cancel_rescan_worker(ctrl_info);
8207 pqi_cancel_update_time_worker(ctrl_info);
8208 pqi_unregister_scsi(ctrl_info);
8209 if (ctrl_info->pqi_mode_enabled)
8210 pqi_revert_to_sis_mode(ctrl_info);
8211 pqi_free_ctrl_resources(ctrl_info);
8214 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8216 pqi_cancel_update_time_worker(ctrl_info);
8217 pqi_cancel_rescan_worker(ctrl_info);
8218 pqi_wait_until_lun_reset_finished(ctrl_info);
8219 pqi_wait_until_scan_finished(ctrl_info);
8220 pqi_ctrl_ofa_start(ctrl_info);
8221 pqi_ctrl_block_requests(ctrl_info);
8222 pqi_ctrl_wait_until_quiesced(ctrl_info);
8223 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
8224 pqi_fail_io_queued_for_all_devices(ctrl_info);
8225 pqi_wait_until_inbound_queues_empty(ctrl_info);
8226 pqi_stop_heartbeat_timer(ctrl_info);
8227 ctrl_info->pqi_mode_enabled = false;
8228 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8231 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8233 pqi_ofa_free_host_buffer(ctrl_info);
8234 ctrl_info->pqi_mode_enabled = true;
8235 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8236 ctrl_info->controller_online = true;
8237 pqi_ctrl_unblock_requests(ctrl_info);
8238 pqi_start_heartbeat_timer(ctrl_info);
8239 pqi_schedule_update_time_worker(ctrl_info);
8240 pqi_clear_soft_reset_status(ctrl_info,
8241 PQI_SOFT_RESET_ABORT);
8242 pqi_scan_scsi_devices(ctrl_info);
8245 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
8246 u32 total_size, u32 chunk_size)
8251 struct pqi_sg_descriptor *mem_descriptor = NULL;
8253 struct pqi_ofa_memory *ofap;
8255 dev = &ctrl_info->pci_dev->dev;
8257 sg_count = (total_size + chunk_size - 1);
8258 sg_count /= chunk_size;
8260 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8262 if (sg_count*chunk_size < total_size)
8265 ctrl_info->pqi_ofa_chunk_virt_addr =
8266 kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
8267 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8270 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
8271 dma_addr_t dma_handle;
8273 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8274 dma_alloc_coherent(dev, chunk_size, &dma_handle,
8277 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8280 mem_descriptor = &ofap->sg_descriptor[i];
8281 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8282 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8285 if (!size || size < total_size)
8286 goto out_free_chunks;
8288 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8289 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8290 put_unaligned_le32(size, &ofap->bytes_allocated);
8296 mem_descriptor = &ofap->sg_descriptor[i];
8297 dma_free_coherent(dev, chunk_size,
8298 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8299 get_unaligned_le64(&mem_descriptor->address));
8301 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8304 put_unaligned_le32 (0, &ofap->bytes_allocated);
8308 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8314 total_size = le32_to_cpu(
8315 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
8316 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
8318 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
8319 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
8325 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
8326 u32 bytes_requested)
8328 struct pqi_ofa_memory *pqi_ofa_memory;
8331 dev = &ctrl_info->pci_dev->dev;
8332 pqi_ofa_memory = dma_alloc_coherent(dev,
8333 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
8334 &ctrl_info->pqi_ofa_mem_dma_handle,
8337 if (!pqi_ofa_memory)
8340 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
8341 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
8342 sizeof(pqi_ofa_memory->signature));
8343 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
8345 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
8347 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8348 dev_err(dev, "Failed to allocate host buffer of size = %u",
8355 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8358 struct pqi_sg_descriptor *mem_descriptor;
8359 struct pqi_ofa_memory *ofap;
8361 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8366 if (!ofap->bytes_allocated)
8369 mem_descriptor = ofap->sg_descriptor;
8371 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
8373 dma_free_coherent(&ctrl_info->pci_dev->dev,
8374 get_unaligned_le32(&mem_descriptor[i].length),
8375 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8376 get_unaligned_le64(&mem_descriptor[i].address));
8378 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8381 dma_free_coherent(&ctrl_info->pci_dev->dev,
8382 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
8383 ctrl_info->pqi_ofa_mem_dma_handle);
8384 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8387 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8389 struct pqi_vendor_general_request request;
8391 struct pqi_ofa_memory *ofap;
8393 memset(&request, 0, sizeof(request));
8395 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8397 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8398 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8399 &request.header.iu_length);
8400 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8401 &request.function_code);
8404 size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8405 get_unaligned_le16(&ofap->num_memory_descriptors) *
8406 sizeof(struct pqi_sg_descriptor);
8408 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8409 &request.data.ofa_memory_allocation.buffer_address);
8410 put_unaligned_le32(size,
8411 &request.data.ofa_memory_allocation.buffer_length);
8415 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
8416 0, NULL, NO_TIMEOUT);
8419 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
8421 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
8422 return pqi_ctrl_init_resume(ctrl_info);
8425 static void pqi_perform_lockup_action(void)
8427 switch (pqi_lockup_action) {
8429 panic("FATAL: Smart Family Controller lockup detected");
8432 emergency_restart();
8440 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8441 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8442 .status = SAM_STAT_CHECK_CONDITION,
8445 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8448 struct pqi_io_request *io_request;
8449 struct scsi_cmnd *scmd;
8451 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8452 io_request = &ctrl_info->io_request_pool[i];
8453 if (atomic_read(&io_request->refcount) == 0)
8456 scmd = io_request->scmd;
8458 set_host_byte(scmd, DID_NO_CONNECT);
8460 io_request->status = -ENXIO;
8461 io_request->error_info =
8462 &pqi_ctrl_offline_raid_error_info;
8465 io_request->io_complete_callback(io_request,
8466 io_request->context);
8470 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8472 pqi_perform_lockup_action();
8473 pqi_stop_heartbeat_timer(ctrl_info);
8474 pqi_free_interrupts(ctrl_info);
8475 pqi_cancel_rescan_worker(ctrl_info);
8476 pqi_cancel_update_time_worker(ctrl_info);
8477 pqi_ctrl_wait_until_quiesced(ctrl_info);
8478 pqi_fail_all_outstanding_requests(ctrl_info);
8479 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
8480 pqi_ctrl_unblock_requests(ctrl_info);
8483 static void pqi_ctrl_offline_worker(struct work_struct *work)
8485 struct pqi_ctrl_info *ctrl_info;
8487 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8488 pqi_take_ctrl_offline_deferred(ctrl_info);
8491 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
8493 if (!ctrl_info->controller_online)
8496 ctrl_info->controller_online = false;
8497 ctrl_info->pqi_mode_enabled = false;
8498 pqi_ctrl_block_requests(ctrl_info);
8499 if (!pqi_disable_ctrl_shutdown)
8500 sis_shutdown_ctrl(ctrl_info);
8501 pci_disable_device(ctrl_info->pci_dev);
8502 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
8503 schedule_work(&ctrl_info->ctrl_offline_work);
8506 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
8507 const struct pci_device_id *id)
8509 char *ctrl_description;
8511 if (id->driver_data)
8512 ctrl_description = (char *)id->driver_data;
8514 ctrl_description = "Microsemi Smart Family Controller";
8516 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
8519 static int pqi_pci_probe(struct pci_dev *pci_dev,
8520 const struct pci_device_id *id)
8524 struct pqi_ctrl_info *ctrl_info;
8526 pqi_print_ctrl_info(pci_dev, id);
8528 if (pqi_disable_device_id_wildcards &&
8529 id->subvendor == PCI_ANY_ID &&
8530 id->subdevice == PCI_ANY_ID) {
8531 dev_warn(&pci_dev->dev,
8532 "controller not probed because device ID wildcards are disabled\n");
8536 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
8537 dev_warn(&pci_dev->dev,
8538 "controller device ID matched using wildcards\n");
8540 node = dev_to_node(&pci_dev->dev);
8541 if (node == NUMA_NO_NODE) {
8542 cp_node = cpu_to_node(0);
8543 if (cp_node == NUMA_NO_NODE)
8545 set_dev_node(&pci_dev->dev, cp_node);
8548 ctrl_info = pqi_alloc_ctrl_info(node);
8550 dev_err(&pci_dev->dev,
8551 "failed to allocate controller info block\n");
8555 ctrl_info->pci_dev = pci_dev;
8557 rc = pqi_pci_init(ctrl_info);
8561 rc = pqi_ctrl_init(ctrl_info);
8568 pqi_remove_ctrl(ctrl_info);
8573 static void pqi_pci_remove(struct pci_dev *pci_dev)
8575 struct pqi_ctrl_info *ctrl_info;
8577 ctrl_info = pci_get_drvdata(pci_dev);
8581 pqi_remove_ctrl(ctrl_info);
8584 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8587 struct pqi_io_request *io_request;
8588 struct scsi_cmnd *scmd;
8590 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8591 io_request = &ctrl_info->io_request_pool[i];
8592 if (atomic_read(&io_request->refcount) == 0)
8594 scmd = io_request->scmd;
8595 WARN_ON(scmd != NULL); /* IO command from SML */
8596 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8600 static void pqi_shutdown(struct pci_dev *pci_dev)
8603 struct pqi_ctrl_info *ctrl_info;
8605 ctrl_info = pci_get_drvdata(pci_dev);
8607 dev_err(&pci_dev->dev,
8608 "cache could not be flushed\n");
8612 pqi_disable_events(ctrl_info);
8613 pqi_wait_until_ofa_finished(ctrl_info);
8614 pqi_cancel_update_time_worker(ctrl_info);
8615 pqi_cancel_rescan_worker(ctrl_info);
8616 pqi_cancel_event_worker(ctrl_info);
8618 pqi_ctrl_shutdown_start(ctrl_info);
8619 pqi_ctrl_wait_until_quiesced(ctrl_info);
8621 rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8623 dev_err(&pci_dev->dev,
8624 "wait for pending I/O failed\n");
8628 pqi_ctrl_block_device_reset(ctrl_info);
8629 pqi_wait_until_lun_reset_finished(ctrl_info);
8632 * Write all data in the controller's battery-backed cache to
8635 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
8637 dev_err(&pci_dev->dev,
8638 "unable to flush controller cache\n");
8640 pqi_ctrl_block_requests(ctrl_info);
8642 rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
8644 dev_err(&pci_dev->dev,
8645 "wait for pending sync cmds failed\n");
8649 pqi_crash_if_pending_command(ctrl_info);
8650 pqi_reset(ctrl_info);
8653 static void pqi_process_lockup_action_param(void)
8657 if (!pqi_lockup_action_param)
8660 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8661 if (strcmp(pqi_lockup_action_param,
8662 pqi_lockup_actions[i].name) == 0) {
8663 pqi_lockup_action = pqi_lockup_actions[i].action;
8668 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8669 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8672 static void pqi_process_module_params(void)
8674 pqi_process_lockup_action_param();
8677 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
8679 struct pqi_ctrl_info *ctrl_info;
8681 ctrl_info = pci_get_drvdata(pci_dev);
8683 pqi_disable_events(ctrl_info);
8684 pqi_cancel_update_time_worker(ctrl_info);
8685 pqi_cancel_rescan_worker(ctrl_info);
8686 pqi_wait_until_scan_finished(ctrl_info);
8687 pqi_wait_until_lun_reset_finished(ctrl_info);
8688 pqi_wait_until_ofa_finished(ctrl_info);
8689 pqi_flush_cache(ctrl_info, SUSPEND);
8690 pqi_ctrl_block_requests(ctrl_info);
8691 pqi_ctrl_wait_until_quiesced(ctrl_info);
8692 pqi_wait_until_inbound_queues_empty(ctrl_info);
8693 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8694 pqi_stop_heartbeat_timer(ctrl_info);
8696 if (state.event == PM_EVENT_FREEZE)
8699 pci_save_state(pci_dev);
8700 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8702 ctrl_info->controller_online = false;
8703 ctrl_info->pqi_mode_enabled = false;
8708 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
8711 struct pqi_ctrl_info *ctrl_info;
8713 ctrl_info = pci_get_drvdata(pci_dev);
8715 if (pci_dev->current_state != PCI_D0) {
8716 ctrl_info->max_hw_queue_index = 0;
8717 pqi_free_interrupts(ctrl_info);
8718 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8719 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8720 IRQF_SHARED, DRIVER_NAME_SHORT,
8721 &ctrl_info->queue_groups[0]);
8723 dev_err(&ctrl_info->pci_dev->dev,
8724 "irq %u init failed with error %d\n",
8728 pqi_start_heartbeat_timer(ctrl_info);
8729 pqi_ctrl_unblock_requests(ctrl_info);
8733 pci_set_power_state(pci_dev, PCI_D0);
8734 pci_restore_state(pci_dev);
8736 return pqi_ctrl_init_resume(ctrl_info);
8739 /* Define the PCI IDs for the controllers that we support. */
8740 static const struct pci_device_id pqi_pci_id_table[] = {
8742 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8746 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8750 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8754 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8758 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8762 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8766 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8770 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8774 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8778 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8782 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8786 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8790 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8794 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8798 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8802 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8806 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8810 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8814 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8818 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8822 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8826 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8830 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8834 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8838 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8842 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8846 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8850 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8854 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8858 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8862 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8866 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8867 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8870 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8871 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
8874 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8875 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
8878 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8879 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
8882 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8883 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
8886 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8887 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
8890 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8891 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
8894 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8895 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
8898 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8899 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
8902 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8903 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8906 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8907 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8910 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8911 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8914 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8915 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
8918 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8919 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
8922 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8923 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
8926 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8927 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
8930 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8931 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
8934 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8935 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
8938 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8939 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
8942 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8943 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
8946 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8947 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
8950 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8951 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
8954 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8955 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8958 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8959 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
8962 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8963 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
8966 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8967 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
8970 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8971 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
8974 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8975 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
8978 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8979 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8982 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8983 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
8986 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8987 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
8990 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8991 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8994 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8995 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8998 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8999 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9002 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9003 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9006 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9007 PCI_VENDOR_ID_DELL, 0x1fe0)
9010 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9011 PCI_VENDOR_ID_HP, 0x0600)
9014 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9015 PCI_VENDOR_ID_HP, 0x0601)
9018 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9019 PCI_VENDOR_ID_HP, 0x0602)
9022 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9023 PCI_VENDOR_ID_HP, 0x0603)
9026 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9027 PCI_VENDOR_ID_HP, 0x0609)
9030 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9031 PCI_VENDOR_ID_HP, 0x0650)
9034 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9035 PCI_VENDOR_ID_HP, 0x0651)
9038 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9039 PCI_VENDOR_ID_HP, 0x0652)
9042 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9043 PCI_VENDOR_ID_HP, 0x0653)
9046 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9047 PCI_VENDOR_ID_HP, 0x0654)
9050 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9051 PCI_VENDOR_ID_HP, 0x0655)
9054 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9055 PCI_VENDOR_ID_HP, 0x0700)
9058 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9059 PCI_VENDOR_ID_HP, 0x0701)
9062 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9063 PCI_VENDOR_ID_HP, 0x1001)
9066 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9067 PCI_VENDOR_ID_HP, 0x1100)
9070 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9071 PCI_VENDOR_ID_HP, 0x1101)
9074 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9078 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9082 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9086 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9090 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9091 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9094 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9095 PCI_ANY_ID, PCI_ANY_ID)
9100 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
9102 static struct pci_driver pqi_pci_driver = {
9103 .name = DRIVER_NAME_SHORT,
9104 .id_table = pqi_pci_id_table,
9105 .probe = pqi_pci_probe,
9106 .remove = pqi_pci_remove,
9107 .shutdown = pqi_shutdown,
9108 #if defined(CONFIG_PM)
9109 .suspend = pqi_suspend,
9110 .resume = pqi_resume,
9114 static int __init pqi_init(void)
9118 pr_info(DRIVER_NAME "\n");
9120 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
9121 if (!pqi_sas_transport_template)
9124 pqi_process_module_params();
9126 rc = pci_register_driver(&pqi_pci_driver);
9128 sas_release_transport(pqi_sas_transport_template);
9133 static void __exit pqi_cleanup(void)
9135 pci_unregister_driver(&pqi_pci_driver);
9136 sas_release_transport(pqi_sas_transport_template);
9139 module_init(pqi_init);
9140 module_exit(pqi_cleanup);
9142 static void __attribute__((unused)) verify_structures(void)
9144 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9145 sis_host_to_ctrl_doorbell) != 0x20);
9146 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9147 sis_interrupt_mask) != 0x34);
9148 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9149 sis_ctrl_to_host_doorbell) != 0x9c);
9150 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9151 sis_ctrl_to_host_doorbell_clear) != 0xa0);
9152 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9153 sis_driver_scratch) != 0xb0);
9154 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9155 sis_product_identifier) != 0xb4);
9156 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9157 sis_firmware_status) != 0xbc);
9158 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9159 sis_mailbox) != 0x1000);
9160 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9161 pqi_registers) != 0x4000);
9163 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9165 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9167 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9168 response_queue_id) != 0x4);
9169 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9171 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
9173 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9175 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9176 service_response) != 0x1);
9177 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9178 data_present) != 0x2);
9179 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9181 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9182 residual_count) != 0x4);
9183 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9184 data_length) != 0x8);
9185 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9187 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9189 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
9191 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9192 data_in_result) != 0x0);
9193 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9194 data_out_result) != 0x1);
9195 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9197 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9199 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9200 status_qualifier) != 0x6);
9201 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9202 sense_data_length) != 0x8);
9203 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9204 response_data_length) != 0xa);
9205 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9206 data_in_transferred) != 0xc);
9207 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9208 data_out_transferred) != 0x10);
9209 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9211 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
9213 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9215 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9216 function_and_status_code) != 0x8);
9217 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9218 max_admin_iq_elements) != 0x10);
9219 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9220 max_admin_oq_elements) != 0x11);
9221 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9222 admin_iq_element_length) != 0x12);
9223 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9224 admin_oq_element_length) != 0x13);
9225 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9226 max_reset_timeout) != 0x14);
9227 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9228 legacy_intx_status) != 0x18);
9229 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9230 legacy_intx_mask_set) != 0x1c);
9231 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9232 legacy_intx_mask_clear) != 0x20);
9233 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9234 device_status) != 0x40);
9235 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9236 admin_iq_pi_offset) != 0x48);
9237 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9238 admin_oq_ci_offset) != 0x50);
9239 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9240 admin_iq_element_array_addr) != 0x58);
9241 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9242 admin_oq_element_array_addr) != 0x60);
9243 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9244 admin_iq_ci_addr) != 0x68);
9245 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9246 admin_oq_pi_addr) != 0x70);
9247 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9248 admin_iq_num_elements) != 0x78);
9249 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9250 admin_oq_num_elements) != 0x79);
9251 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9252 admin_queue_int_msg_num) != 0x7a);
9253 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9254 device_error) != 0x80);
9255 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9256 error_details) != 0x88);
9257 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9258 device_reset) != 0x90);
9259 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9260 power_action) != 0x94);
9261 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
9263 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9264 header.iu_type) != 0);
9265 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9266 header.iu_length) != 2);
9267 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9268 header.work_area) != 6);
9269 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9271 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9272 function_code) != 10);
9273 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9274 data.report_device_capability.buffer_length) != 44);
9275 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9276 data.report_device_capability.sg_descriptor) != 48);
9277 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9278 data.create_operational_iq.queue_id) != 12);
9279 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9280 data.create_operational_iq.element_array_addr) != 16);
9281 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9282 data.create_operational_iq.ci_addr) != 24);
9283 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9284 data.create_operational_iq.num_elements) != 32);
9285 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9286 data.create_operational_iq.element_length) != 34);
9287 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9288 data.create_operational_iq.queue_protocol) != 36);
9289 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9290 data.create_operational_oq.queue_id) != 12);
9291 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9292 data.create_operational_oq.element_array_addr) != 16);
9293 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9294 data.create_operational_oq.pi_addr) != 24);
9295 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9296 data.create_operational_oq.num_elements) != 32);
9297 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9298 data.create_operational_oq.element_length) != 34);
9299 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9300 data.create_operational_oq.queue_protocol) != 36);
9301 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9302 data.create_operational_oq.int_msg_num) != 40);
9303 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9304 data.create_operational_oq.coalescing_count) != 42);
9305 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9306 data.create_operational_oq.min_coalescing_time) != 44);
9307 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9308 data.create_operational_oq.max_coalescing_time) != 48);
9309 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9310 data.delete_operational_queue.queue_id) != 12);
9311 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
9312 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9313 data.create_operational_iq) != 64 - 11);
9314 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9315 data.create_operational_oq) != 64 - 11);
9316 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
9317 data.delete_operational_queue) != 64 - 11);
9319 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9320 header.iu_type) != 0);
9321 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9322 header.iu_length) != 2);
9323 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9324 header.work_area) != 6);
9325 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9327 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9328 function_code) != 10);
9329 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9331 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9332 data.create_operational_iq.status_descriptor) != 12);
9333 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9334 data.create_operational_iq.iq_pi_offset) != 16);
9335 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9336 data.create_operational_oq.status_descriptor) != 12);
9337 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9338 data.create_operational_oq.oq_ci_offset) != 16);
9339 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
9341 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9342 header.iu_type) != 0);
9343 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9344 header.iu_length) != 2);
9345 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9346 header.response_queue_id) != 4);
9347 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9348 header.work_area) != 6);
9349 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9351 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9353 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9354 buffer_length) != 12);
9355 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9357 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9358 protocol_specific) != 24);
9359 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9360 error_index) != 27);
9361 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9363 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9365 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9366 sg_descriptors) != 64);
9367 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
9368 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9370 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9371 header.iu_type) != 0);
9372 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9373 header.iu_length) != 2);
9374 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9375 header.response_queue_id) != 4);
9376 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9377 header.work_area) != 6);
9378 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9380 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9382 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9383 buffer_length) != 16);
9384 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9385 data_encryption_key_index) != 22);
9386 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9387 encrypt_tweak_lower) != 24);
9388 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9389 encrypt_tweak_upper) != 28);
9390 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9392 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9393 error_index) != 48);
9394 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9395 num_sg_descriptors) != 50);
9396 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9398 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9400 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9401 sg_descriptors) != 64);
9402 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
9403 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9405 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9406 header.iu_type) != 0);
9407 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9408 header.iu_length) != 2);
9409 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9411 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9412 error_index) != 10);
9414 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9415 header.iu_type) != 0);
9416 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9417 header.iu_length) != 2);
9418 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9419 header.response_queue_id) != 4);
9420 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9422 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9423 data.report_event_configuration.buffer_length) != 12);
9424 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9425 data.report_event_configuration.sg_descriptors) != 16);
9426 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9427 data.set_event_configuration.global_event_oq_id) != 10);
9428 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9429 data.set_event_configuration.buffer_length) != 12);
9430 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9431 data.set_event_configuration.sg_descriptors) != 16);
9433 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9434 max_inbound_iu_length) != 6);
9435 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9436 max_outbound_iu_length) != 14);
9437 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
9439 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9441 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9442 iq_arbitration_priority_support_bitmask) != 8);
9443 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9444 maximum_aw_a) != 9);
9445 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9446 maximum_aw_b) != 10);
9447 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9448 maximum_aw_c) != 11);
9449 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9450 max_inbound_queues) != 16);
9451 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9452 max_elements_per_iq) != 18);
9453 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9454 max_iq_element_length) != 24);
9455 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9456 min_iq_element_length) != 26);
9457 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9458 max_outbound_queues) != 30);
9459 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9460 max_elements_per_oq) != 32);
9461 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9462 intr_coalescing_time_granularity) != 34);
9463 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9464 max_oq_element_length) != 36);
9465 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9466 min_oq_element_length) != 38);
9467 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9468 iu_layer_descriptors) != 64);
9469 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
9471 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9473 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9475 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
9477 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9478 num_event_descriptors) != 2);
9479 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9482 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
9483 ARRAY_SIZE(pqi_supported_event_types));
9485 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9486 header.iu_type) != 0);
9487 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9488 header.iu_length) != 2);
9489 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9491 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9493 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9494 additional_event_id) != 12);
9495 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9497 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
9499 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9500 header.iu_type) != 0);
9501 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9502 header.iu_length) != 2);
9503 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9505 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9507 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9508 additional_event_id) != 12);
9509 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
9511 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9512 header.iu_type) != 0);
9513 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9514 header.iu_length) != 2);
9515 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9517 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9519 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9521 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9523 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9524 protocol_specific) != 24);
9525 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9526 outbound_queue_id_to_manage) != 26);
9527 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9528 request_id_to_manage) != 28);
9529 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9530 task_management_function) != 30);
9531 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
9533 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9534 header.iu_type) != 0);
9535 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9536 header.iu_length) != 2);
9537 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9539 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9541 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9542 additional_response_info) != 12);
9543 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9544 response_code) != 15);
9545 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
9547 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9548 configured_logical_drive_count) != 0);
9549 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9550 configuration_signature) != 1);
9551 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9552 firmware_version_short) != 5);
9553 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9554 extended_logical_unit_count) != 154);
9555 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9556 firmware_build_number) != 190);
9557 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9559 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9560 product_id) != 208);
9561 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9562 extra_controller_flags) != 286);
9563 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9564 controller_mode) != 292);
9565 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9566 spare_part_number) != 293);
9567 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9568 firmware_version_long) != 325);
9570 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9571 phys_bay_in_box) != 115);
9572 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9573 device_type) != 120);
9574 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9575 redundant_path_present_map) != 1736);
9576 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9577 active_path_number) != 1738);
9578 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9579 alternate_paths_phys_connector) != 1739);
9580 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9581 alternate_paths_phys_box_on_port) != 1755);
9582 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9583 current_queue_depth_limit) != 1796);
9584 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
9586 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
9587 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9589 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9590 subpage_code) != 1);
9591 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9592 buffer_length) != 2);
9594 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
9595 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9597 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9598 subpage_code) != 1);
9599 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9602 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
9604 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9606 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9607 firmware_read_support) != 4);
9608 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9609 driver_read_support) != 5);
9610 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9611 firmware_write_support) != 6);
9612 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9613 driver_write_support) != 7);
9614 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9615 max_transfer_encrypted_sas_sata) != 8);
9616 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9617 max_transfer_encrypted_nvme) != 10);
9618 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9619 max_write_raid_5_6) != 12);
9620 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9621 max_write_raid_1_10_2drive) != 14);
9622 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9623 max_write_raid_1_10_3drive) != 16);
9625 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
9626 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
9627 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
9628 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9629 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
9630 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9631 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
9632 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
9633 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9634 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
9635 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
9636 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9638 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
9639 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
9640 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);