| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * driver for Microsemi PQI-based storage controllers |
| 4 | * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries |
| 5 | * Copyright (c) 2016-2018 Microsemi Corporation |
| 6 | * Copyright (c) 2016 PMC-Sierra, Inc. |
| 7 | * |
| 8 | * Questions/Comments/Bugfixes to storagedev@microchip.com |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/pci.h> |
| 15 | #include <linux/delay.h> |
| 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/sched.h> |
| 18 | #include <linux/rtc.h> |
| 19 | #include <linux/bcd.h> |
| 20 | #include <linux/reboot.h> |
| 21 | #include <linux/cciss_ioctl.h> |
| 22 | #include <linux/blk-mq-pci.h> |
| 23 | #include <scsi/scsi_host.h> |
| 24 | #include <scsi/scsi_cmnd.h> |
| 25 | #include <scsi/scsi_device.h> |
| 26 | #include <scsi/scsi_eh.h> |
| 27 | #include <scsi/scsi_transport_sas.h> |
| 28 | #include <asm/unaligned.h> |
| 29 | #include "smartpqi.h" |
| 30 | #include "smartpqi_sis.h" |
| 31 | |
| 32 | #if !defined(BUILD_TIMESTAMP) |
| 33 | #define BUILD_TIMESTAMP |
| 34 | #endif |
| 35 | |
| 36 | #define DRIVER_VERSION "1.2.16-012" |
| 37 | #define DRIVER_MAJOR 1 |
| 38 | #define DRIVER_MINOR 2 |
| 39 | #define DRIVER_RELEASE 16 |
| 40 | #define DRIVER_REVISION 12 |
| 41 | |
| 42 | #define DRIVER_NAME "Microsemi PQI Driver (v" \ |
| 43 | DRIVER_VERSION BUILD_TIMESTAMP ")" |
| 44 | #define DRIVER_NAME_SHORT "smartpqi" |
| 45 | |
| 46 | #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) |
| 47 | |
| 48 | #define PQI_POST_RESET_DELAY_SECS 5 |
| 49 | #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 |
| 50 | |
| 51 | MODULE_AUTHOR("Microsemi"); |
| 52 | MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " |
| 53 | DRIVER_VERSION); |
| 54 | MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); |
| 55 | MODULE_VERSION(DRIVER_VERSION); |
| 56 | MODULE_LICENSE("GPL"); |
| 57 | |
| 58 | static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); |
| 59 | static void pqi_ctrl_offline_worker(struct work_struct *work); |
| 60 | static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); |
| 61 | static void pqi_scan_start(struct Scsi_Host *shost); |
| 62 | static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, |
| 63 | struct pqi_queue_group *queue_group, enum pqi_io_path path, |
| 64 | struct pqi_io_request *io_request); |
| 65 | static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, |
| 66 | struct pqi_iu_header *request, unsigned int flags, |
| 67 | struct pqi_raid_error_info *error_info); |
| 68 | static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, |
| 69 | struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, |
| 70 | unsigned int cdb_length, struct pqi_queue_group *queue_group, |
| 71 | struct pqi_encryption_info *encryption_info, bool raid_bypass); |
| 72 | static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, |
| 73 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, |
| 74 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, |
| 75 | struct pqi_scsi_dev_raid_map_data *rmd); |
| 76 | static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, |
| 77 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, |
| 78 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, |
| 79 | struct pqi_scsi_dev_raid_map_data *rmd); |
| 80 | static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); |
| 81 | static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); |
| 82 | static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); |
| 83 | static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); |
| 84 | static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); |
| 85 | static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); |
| 86 | static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, |
| 87 | struct pqi_scsi_dev *device, unsigned long timeout_msecs); |
| 88 | |
| 89 | /* for flags argument to pqi_submit_raid_request_synchronous() */ |
| 90 | #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 |
| 91 | |
| 92 | static struct scsi_transport_template *pqi_sas_transport_template; |
| 93 | |
| 94 | static atomic_t pqi_controller_count = ATOMIC_INIT(0); |
| 95 | |
| 96 | enum pqi_lockup_action { |
| 97 | NONE, |
| 98 | REBOOT, |
| 99 | PANIC |
| 100 | }; |
| 101 | |
| 102 | static enum pqi_lockup_action pqi_lockup_action = NONE; |
| 103 | |
| 104 | static struct { |
| 105 | enum pqi_lockup_action action; |
| 106 | char *name; |
| 107 | } pqi_lockup_actions[] = { |
| 108 | { |
| 109 | .action = NONE, |
| 110 | .name = "none", |
| 111 | }, |
| 112 | { |
| 113 | .action = REBOOT, |
| 114 | .name = "reboot", |
| 115 | }, |
| 116 | { |
| 117 | .action = PANIC, |
| 118 | .name = "panic", |
| 119 | }, |
| 120 | }; |
| 121 | |
| 122 | static unsigned int pqi_supported_event_types[] = { |
| 123 | PQI_EVENT_TYPE_HOTPLUG, |
| 124 | PQI_EVENT_TYPE_HARDWARE, |
| 125 | PQI_EVENT_TYPE_PHYSICAL_DEVICE, |
| 126 | PQI_EVENT_TYPE_LOGICAL_DEVICE, |
| 127 | PQI_EVENT_TYPE_OFA, |
| 128 | PQI_EVENT_TYPE_AIO_STATE_CHANGE, |
| 129 | PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, |
| 130 | }; |
| 131 | |
| 132 | static int pqi_disable_device_id_wildcards; |
| 133 | module_param_named(disable_device_id_wildcards, |
| 134 | pqi_disable_device_id_wildcards, int, 0644); |
| 135 | MODULE_PARM_DESC(disable_device_id_wildcards, |
| 136 | "Disable device ID wildcards."); |
| 137 | |
| 138 | static int pqi_disable_heartbeat; |
| 139 | module_param_named(disable_heartbeat, |
| 140 | pqi_disable_heartbeat, int, 0644); |
| 141 | MODULE_PARM_DESC(disable_heartbeat, |
| 142 | "Disable heartbeat."); |
| 143 | |
| 144 | static int pqi_disable_ctrl_shutdown; |
| 145 | module_param_named(disable_ctrl_shutdown, |
| 146 | pqi_disable_ctrl_shutdown, int, 0644); |
| 147 | MODULE_PARM_DESC(disable_ctrl_shutdown, |
| 148 | "Disable controller shutdown when controller locked up."); |
| 149 | |
| 150 | static char *pqi_lockup_action_param; |
| 151 | module_param_named(lockup_action, |
| 152 | pqi_lockup_action_param, charp, 0644); |
| 153 | MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" |
| 154 | "\t\tSupported: none, reboot, panic\n" |
| 155 | "\t\tDefault: none"); |
| 156 | |
| 157 | static int pqi_expose_ld_first; |
| 158 | module_param_named(expose_ld_first, |
| 159 | pqi_expose_ld_first, int, 0644); |
| 160 | MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives."); |
| 161 | |
| 162 | static int pqi_hide_vsep; |
| 163 | module_param_named(hide_vsep, |
| 164 | pqi_hide_vsep, int, 0644); |
| 165 | MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives."); |
| 166 | |
| 167 | static char *raid_levels[] = { |
| 168 | "RAID-0", |
| 169 | "RAID-4", |
| 170 | "RAID-1(1+0)", |
| 171 | "RAID-5", |
| 172 | "RAID-5+1", |
| 173 | "RAID-6", |
| 174 | "RAID-1(Triple)", |
| 175 | }; |
| 176 | |
| 177 | static char *pqi_raid_level_to_string(u8 raid_level) |
| 178 | { |
| 179 | if (raid_level < ARRAY_SIZE(raid_levels)) |
| 180 | return raid_levels[raid_level]; |
| 181 | |
| 182 | return "RAID UNKNOWN"; |
| 183 | } |
| 184 | |
| 185 | #define SA_RAID_0 0 |
| 186 | #define SA_RAID_4 1 |
| 187 | #define SA_RAID_1 2 /* also used for RAID 10 */ |
| 188 | #define SA_RAID_5 3 /* also used for RAID 50 */ |
| 189 | #define SA_RAID_51 4 |
| 190 | #define SA_RAID_6 5 /* also used for RAID 60 */ |
| 191 | #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */ |
| 192 | #define SA_RAID_MAX SA_RAID_TRIPLE |
| 193 | #define SA_RAID_UNKNOWN 0xff |
| 194 | |
| 195 | static inline void pqi_scsi_done(struct scsi_cmnd *scmd) |
| 196 | { |
| 197 | pqi_prep_for_scsi_done(scmd); |
| 198 | scmd->scsi_done(scmd); |
| 199 | } |
| 200 | |
| 201 | static inline void pqi_disable_write_same(struct scsi_device *sdev) |
| 202 | { |
| 203 | sdev->no_write_same = 1; |
| 204 | } |
| 205 | |
| 206 | static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) |
| 207 | { |
| 208 | return memcmp(scsi3addr1, scsi3addr2, 8) == 0; |
| 209 | } |
| 210 | |
| 211 | static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) |
| 212 | { |
| 213 | return !device->is_physical_device; |
| 214 | } |
| 215 | |
| 216 | static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) |
| 217 | { |
| 218 | return scsi3addr[2] != 0; |
| 219 | } |
| 220 | |
| 221 | static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) |
| 222 | { |
| 223 | return !ctrl_info->controller_online; |
| 224 | } |
| 225 | |
| 226 | static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) |
| 227 | { |
| 228 | if (ctrl_info->controller_online) |
| 229 | if (!sis_is_firmware_running(ctrl_info)) |
| 230 | pqi_take_ctrl_offline(ctrl_info); |
| 231 | } |
| 232 | |
| 233 | static inline bool pqi_is_hba_lunid(u8 *scsi3addr) |
| 234 | { |
| 235 | return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); |
| 236 | } |
| 237 | |
| 238 | static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) |
| 239 | { |
| 240 | return sis_read_driver_scratch(ctrl_info); |
| 241 | } |
| 242 | |
| 243 | static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, |
| 244 | enum pqi_ctrl_mode mode) |
| 245 | { |
| 246 | sis_write_driver_scratch(ctrl_info, mode); |
| 247 | } |
| 248 | |
| 249 | static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) |
| 250 | { |
| 251 | ctrl_info->scan_blocked = true; |
| 252 | mutex_lock(&ctrl_info->scan_mutex); |
| 253 | } |
| 254 | |
| 255 | static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) |
| 256 | { |
| 257 | ctrl_info->scan_blocked = false; |
| 258 | mutex_unlock(&ctrl_info->scan_mutex); |
| 259 | } |
| 260 | |
| 261 | static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) |
| 262 | { |
| 263 | return ctrl_info->scan_blocked; |
| 264 | } |
| 265 | |
| 266 | static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) |
| 267 | { |
| 268 | mutex_lock(&ctrl_info->lun_reset_mutex); |
| 269 | } |
| 270 | |
| 271 | static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) |
| 272 | { |
| 273 | mutex_unlock(&ctrl_info->lun_reset_mutex); |
| 274 | } |
| 275 | |
| 276 | static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) |
| 277 | { |
| 278 | struct Scsi_Host *shost; |
| 279 | unsigned int num_loops; |
| 280 | int msecs_sleep; |
| 281 | |
| 282 | shost = ctrl_info->scsi_host; |
| 283 | |
| 284 | scsi_block_requests(shost); |
| 285 | |
| 286 | num_loops = 0; |
| 287 | msecs_sleep = 20; |
| 288 | while (scsi_host_busy(shost)) { |
| 289 | num_loops++; |
| 290 | if (num_loops == 10) |
| 291 | msecs_sleep = 500; |
| 292 | msleep(msecs_sleep); |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) |
| 297 | { |
| 298 | scsi_unblock_requests(ctrl_info->scsi_host); |
| 299 | } |
| 300 | |
| 301 | static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) |
| 302 | { |
| 303 | atomic_inc(&ctrl_info->num_busy_threads); |
| 304 | } |
| 305 | |
| 306 | static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) |
| 307 | { |
| 308 | atomic_dec(&ctrl_info->num_busy_threads); |
| 309 | } |
| 310 | |
| 311 | static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) |
| 312 | { |
| 313 | return ctrl_info->block_requests; |
| 314 | } |
| 315 | |
| 316 | static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) |
| 317 | { |
| 318 | ctrl_info->block_requests = true; |
| 319 | } |
| 320 | |
| 321 | static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) |
| 322 | { |
| 323 | ctrl_info->block_requests = false; |
| 324 | wake_up_all(&ctrl_info->block_requests_wait); |
| 325 | } |
| 326 | |
| 327 | static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) |
| 328 | { |
| 329 | if (!pqi_ctrl_blocked(ctrl_info)) |
| 330 | return; |
| 331 | |
| 332 | atomic_inc(&ctrl_info->num_blocked_threads); |
| 333 | wait_event(ctrl_info->block_requests_wait, |
| 334 | !pqi_ctrl_blocked(ctrl_info)); |
| 335 | atomic_dec(&ctrl_info->num_blocked_threads); |
| 336 | } |
| 337 | |
| 338 | #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10 |
| 339 | |
| 340 | static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) |
| 341 | { |
| 342 | unsigned long start_jiffies; |
| 343 | unsigned long warning_timeout; |
| 344 | bool displayed_warning; |
| 345 | |
| 346 | displayed_warning = false; |
| 347 | start_jiffies = jiffies; |
| 348 | warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies; |
| 349 | |
| 350 | while (atomic_read(&ctrl_info->num_busy_threads) > |
| 351 | atomic_read(&ctrl_info->num_blocked_threads)) { |
| 352 | if (time_after(jiffies, warning_timeout)) { |
| 353 | dev_warn(&ctrl_info->pci_dev->dev, |
| 354 | "waiting %u seconds for driver activity to quiesce\n", |
| 355 | jiffies_to_msecs(jiffies - start_jiffies) / 1000); |
| 356 | displayed_warning = true; |
| 357 | warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies; |
| 358 | } |
| 359 | usleep_range(1000, 2000); |
| 360 | } |
| 361 | |
| 362 | if (displayed_warning) |
| 363 | dev_warn(&ctrl_info->pci_dev->dev, |
| 364 | "driver activity quiesced after waiting for %u seconds\n", |
| 365 | jiffies_to_msecs(jiffies - start_jiffies) / 1000); |
| 366 | } |
| 367 | |
| 368 | static inline bool pqi_device_offline(struct pqi_scsi_dev *device) |
| 369 | { |
| 370 | return device->device_offline; |
| 371 | } |
| 372 | |
| 373 | static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) |
| 374 | { |
| 375 | mutex_lock(&ctrl_info->ofa_mutex); |
| 376 | } |
| 377 | |
| 378 | static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) |
| 379 | { |
| 380 | mutex_unlock(&ctrl_info->ofa_mutex); |
| 381 | } |
| 382 | |
| 383 | static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) |
| 384 | { |
| 385 | mutex_lock(&ctrl_info->ofa_mutex); |
| 386 | mutex_unlock(&ctrl_info->ofa_mutex); |
| 387 | } |
| 388 | |
| 389 | static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) |
| 390 | { |
| 391 | return mutex_is_locked(&ctrl_info->ofa_mutex); |
| 392 | } |
| 393 | |
| 394 | static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) |
| 395 | { |
| 396 | device->in_remove = true; |
| 397 | } |
| 398 | |
| 399 | static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) |
| 400 | { |
| 401 | return device->in_remove; |
| 402 | } |
| 403 | |
| 404 | static inline int pqi_event_type_to_event_index(unsigned int event_type) |
| 405 | { |
| 406 | int index; |
| 407 | |
| 408 | for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) |
| 409 | if (event_type == pqi_supported_event_types[index]) |
| 410 | return index; |
| 411 | |
| 412 | return -1; |
| 413 | } |
| 414 | |
| 415 | static inline bool pqi_is_supported_event(unsigned int event_type) |
| 416 | { |
| 417 | return pqi_event_type_to_event_index(event_type) != -1; |
| 418 | } |
| 419 | |
| 420 | static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, |
| 421 | unsigned long delay) |
| 422 | { |
| 423 | if (pqi_ctrl_offline(ctrl_info)) |
| 424 | return; |
| 425 | |
| 426 | schedule_delayed_work(&ctrl_info->rescan_work, delay); |
| 427 | } |
| 428 | |
| 429 | static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) |
| 430 | { |
| 431 | pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); |
| 432 | } |
| 433 | |
| 434 | #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ) |
| 435 | |
| 436 | static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) |
| 437 | { |
| 438 | pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); |
| 439 | } |
| 440 | |
| 441 | static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) |
| 442 | { |
| 443 | cancel_delayed_work_sync(&ctrl_info->rescan_work); |
| 444 | } |
| 445 | |
| 446 | static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info) |
| 447 | { |
| 448 | cancel_work_sync(&ctrl_info->event_work); |
| 449 | } |
| 450 | |
| 451 | static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) |
| 452 | { |
| 453 | if (!ctrl_info->heartbeat_counter) |
| 454 | return 0; |
| 455 | |
| 456 | return readl(ctrl_info->heartbeat_counter); |
| 457 | } |
| 458 | |
| 459 | static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) |
| 460 | { |
| 461 | return readb(ctrl_info->soft_reset_status); |
| 462 | } |
| 463 | |
| 464 | static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) |
| 465 | { |
| 466 | u8 status; |
| 467 | |
| 468 | status = pqi_read_soft_reset_status(ctrl_info); |
| 469 | status &= ~PQI_SOFT_RESET_ABORT; |
| 470 | writeb(status, ctrl_info->soft_reset_status); |
| 471 | } |
| 472 | |
| 473 | static int pqi_map_single(struct pci_dev *pci_dev, |
| 474 | struct pqi_sg_descriptor *sg_descriptor, void *buffer, |
| 475 | size_t buffer_length, enum dma_data_direction data_direction) |
| 476 | { |
| 477 | dma_addr_t bus_address; |
| 478 | |
| 479 | if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) |
| 480 | return 0; |
| 481 | |
| 482 | bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, |
| 483 | data_direction); |
| 484 | if (dma_mapping_error(&pci_dev->dev, bus_address)) |
| 485 | return -ENOMEM; |
| 486 | |
| 487 | put_unaligned_le64((u64)bus_address, &sg_descriptor->address); |
| 488 | put_unaligned_le32(buffer_length, &sg_descriptor->length); |
| 489 | put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); |
| 490 | |
| 491 | return 0; |
| 492 | } |
| 493 | |
| 494 | static void pqi_pci_unmap(struct pci_dev *pci_dev, |
| 495 | struct pqi_sg_descriptor *descriptors, int num_descriptors, |
| 496 | enum dma_data_direction data_direction) |
| 497 | { |
| 498 | int i; |
| 499 | |
| 500 | if (data_direction == DMA_NONE) |
| 501 | return; |
| 502 | |
| 503 | for (i = 0; i < num_descriptors; i++) |
| 504 | dma_unmap_single(&pci_dev->dev, |
| 505 | (dma_addr_t)get_unaligned_le64(&descriptors[i].address), |
| 506 | get_unaligned_le32(&descriptors[i].length), |
| 507 | data_direction); |
| 508 | } |
| 509 | |
| 510 | static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, |
| 511 | struct pqi_raid_path_request *request, u8 cmd, |
| 512 | u8 *scsi3addr, void *buffer, size_t buffer_length, |
| 513 | u16 vpd_page, enum dma_data_direction *dir) |
| 514 | { |
| 515 | u8 *cdb; |
| 516 | size_t cdb_length = buffer_length; |
| 517 | |
| 518 | memset(request, 0, sizeof(*request)); |
| 519 | |
| 520 | request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; |
| 521 | put_unaligned_le16(offsetof(struct pqi_raid_path_request, |
| 522 | sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, |
| 523 | &request->header.iu_length); |
| 524 | put_unaligned_le32(buffer_length, &request->buffer_length); |
| 525 | memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); |
| 526 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
| 527 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; |
| 528 | |
| 529 | cdb = request->cdb; |
| 530 | |
| 531 | switch (cmd) { |
| 532 | case INQUIRY: |
| 533 | request->data_direction = SOP_READ_FLAG; |
| 534 | cdb[0] = INQUIRY; |
| 535 | if (vpd_page & VPD_PAGE) { |
| 536 | cdb[1] = 0x1; |
| 537 | cdb[2] = (u8)vpd_page; |
| 538 | } |
| 539 | cdb[4] = (u8)cdb_length; |
| 540 | break; |
| 541 | case CISS_REPORT_LOG: |
| 542 | case CISS_REPORT_PHYS: |
| 543 | request->data_direction = SOP_READ_FLAG; |
| 544 | cdb[0] = cmd; |
| 545 | if (cmd == CISS_REPORT_PHYS) |
| 546 | cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER; |
| 547 | else |
| 548 | cdb[1] = ctrl_info->ciss_report_log_flags; |
| 549 | put_unaligned_be32(cdb_length, &cdb[6]); |
| 550 | break; |
| 551 | case CISS_GET_RAID_MAP: |
| 552 | request->data_direction = SOP_READ_FLAG; |
| 553 | cdb[0] = CISS_READ; |
| 554 | cdb[1] = CISS_GET_RAID_MAP; |
| 555 | put_unaligned_be32(cdb_length, &cdb[6]); |
| 556 | break; |
| 557 | case SA_FLUSH_CACHE: |
| 558 | request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; |
| 559 | request->data_direction = SOP_WRITE_FLAG; |
| 560 | cdb[0] = BMIC_WRITE; |
| 561 | cdb[6] = BMIC_FLUSH_CACHE; |
| 562 | put_unaligned_be16(cdb_length, &cdb[7]); |
| 563 | break; |
| 564 | case BMIC_SENSE_DIAG_OPTIONS: |
| 565 | cdb_length = 0; |
| 566 | fallthrough; |
| 567 | case BMIC_IDENTIFY_CONTROLLER: |
| 568 | case BMIC_IDENTIFY_PHYSICAL_DEVICE: |
| 569 | case BMIC_SENSE_SUBSYSTEM_INFORMATION: |
| 570 | case BMIC_SENSE_FEATURE: |
| 571 | request->data_direction = SOP_READ_FLAG; |
| 572 | cdb[0] = BMIC_READ; |
| 573 | cdb[6] = cmd; |
| 574 | put_unaligned_be16(cdb_length, &cdb[7]); |
| 575 | break; |
| 576 | case BMIC_SET_DIAG_OPTIONS: |
| 577 | cdb_length = 0; |
| 578 | fallthrough; |
| 579 | case BMIC_WRITE_HOST_WELLNESS: |
| 580 | request->data_direction = SOP_WRITE_FLAG; |
| 581 | cdb[0] = BMIC_WRITE; |
| 582 | cdb[6] = cmd; |
| 583 | put_unaligned_be16(cdb_length, &cdb[7]); |
| 584 | break; |
| 585 | case BMIC_CSMI_PASSTHRU: |
| 586 | request->data_direction = SOP_BIDIRECTIONAL; |
| 587 | cdb[0] = BMIC_WRITE; |
| 588 | cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; |
| 589 | cdb[6] = cmd; |
| 590 | put_unaligned_be16(cdb_length, &cdb[7]); |
| 591 | break; |
| 592 | default: |
| 593 | dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); |
| 594 | break; |
| 595 | } |
| 596 | |
| 597 | switch (request->data_direction) { |
| 598 | case SOP_READ_FLAG: |
| 599 | *dir = DMA_FROM_DEVICE; |
| 600 | break; |
| 601 | case SOP_WRITE_FLAG: |
| 602 | *dir = DMA_TO_DEVICE; |
| 603 | break; |
| 604 | case SOP_NO_DIRECTION_FLAG: |
| 605 | *dir = DMA_NONE; |
| 606 | break; |
| 607 | default: |
| 608 | *dir = DMA_BIDIRECTIONAL; |
| 609 | break; |
| 610 | } |
| 611 | |
| 612 | return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], |
| 613 | buffer, buffer_length, *dir); |
| 614 | } |
| 615 | |
| 616 | static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) |
| 617 | { |
| 618 | io_request->scmd = NULL; |
| 619 | io_request->status = 0; |
| 620 | io_request->error_info = NULL; |
| 621 | io_request->raid_bypass = false; |
| 622 | } |
| 623 | |
| 624 | static struct pqi_io_request *pqi_alloc_io_request( |
| 625 | struct pqi_ctrl_info *ctrl_info) |
| 626 | { |
| 627 | struct pqi_io_request *io_request; |
| 628 | u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ |
| 629 | |
| 630 | while (1) { |
| 631 | io_request = &ctrl_info->io_request_pool[i]; |
| 632 | if (atomic_inc_return(&io_request->refcount) == 1) |
| 633 | break; |
| 634 | atomic_dec(&io_request->refcount); |
| 635 | i = (i + 1) % ctrl_info->max_io_slots; |
| 636 | } |
| 637 | |
| 638 | /* benignly racy */ |
| 639 | ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; |
| 640 | |
| 641 | pqi_reinit_io_request(io_request); |
| 642 | |
| 643 | return io_request; |
| 644 | } |
| 645 | |
| 646 | static void pqi_free_io_request(struct pqi_io_request *io_request) |
| 647 | { |
| 648 | atomic_dec(&io_request->refcount); |
| 649 | } |
| 650 | |
| 651 | static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, |
| 652 | u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, |
| 653 | struct pqi_raid_error_info *error_info) |
| 654 | { |
| 655 | int rc; |
| 656 | struct pqi_raid_path_request request; |
| 657 | enum dma_data_direction dir; |
| 658 | |
| 659 | rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, |
| 660 | buffer, buffer_length, vpd_page, &dir); |
| 661 | if (rc) |
| 662 | return rc; |
| 663 | |
| 664 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info); |
| 665 | |
| 666 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); |
| 667 | |
| 668 | return rc; |
| 669 | } |
| 670 | |
| 671 | /* helper functions for pqi_send_scsi_raid_request */ |
| 672 | |
| 673 | static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, |
| 674 | u8 cmd, void *buffer, size_t buffer_length) |
| 675 | { |
| 676 | return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, |
| 677 | buffer, buffer_length, 0, NULL); |
| 678 | } |
| 679 | |
| 680 | static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, |
| 681 | u8 cmd, void *buffer, size_t buffer_length, |
| 682 | struct pqi_raid_error_info *error_info) |
| 683 | { |
| 684 | return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, |
| 685 | buffer, buffer_length, 0, error_info); |
| 686 | } |
| 687 | |
| 688 | static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, |
| 689 | struct bmic_identify_controller *buffer) |
| 690 | { |
| 691 | return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, |
| 692 | buffer, sizeof(*buffer)); |
| 693 | } |
| 694 | |
| 695 | static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, |
| 696 | struct bmic_sense_subsystem_info *sense_info) |
| 697 | { |
| 698 | return pqi_send_ctrl_raid_request(ctrl_info, |
| 699 | BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, |
| 700 | sizeof(*sense_info)); |
| 701 | } |
| 702 | |
| 703 | static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, |
| 704 | u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) |
| 705 | { |
| 706 | return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, |
| 707 | buffer, buffer_length, vpd_page, NULL); |
| 708 | } |
| 709 | |
| 710 | static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, |
| 711 | struct pqi_scsi_dev *device, |
| 712 | struct bmic_identify_physical_device *buffer, size_t buffer_length) |
| 713 | { |
| 714 | int rc; |
| 715 | enum dma_data_direction dir; |
| 716 | u16 bmic_device_index; |
| 717 | struct pqi_raid_path_request request; |
| 718 | |
| 719 | rc = pqi_build_raid_path_request(ctrl_info, &request, |
| 720 | BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, |
| 721 | buffer_length, 0, &dir); |
| 722 | if (rc) |
| 723 | return rc; |
| 724 | |
| 725 | bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); |
| 726 | request.cdb[2] = (u8)bmic_device_index; |
| 727 | request.cdb[9] = (u8)(bmic_device_index >> 8); |
| 728 | |
| 729 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); |
| 730 | |
| 731 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); |
| 732 | |
| 733 | return rc; |
| 734 | } |
| 735 | |
| 736 | static inline u32 pqi_aio_limit_to_bytes(__le16 *limit) |
| 737 | { |
| 738 | u32 bytes; |
| 739 | |
| 740 | bytes = get_unaligned_le16(limit); |
| 741 | if (bytes == 0) |
| 742 | bytes = ~0; |
| 743 | else |
| 744 | bytes *= 1024; |
| 745 | |
| 746 | return bytes; |
| 747 | } |
| 748 | |
| 749 | #pragma pack(1) |
| 750 | |
| 751 | struct bmic_sense_feature_buffer { |
| 752 | struct bmic_sense_feature_buffer_header header; |
| 753 | struct bmic_sense_feature_io_page_aio_subpage aio_subpage; |
| 754 | }; |
| 755 | |
| 756 | #pragma pack() |
| 757 | |
| 758 | #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \ |
| 759 | offsetofend(struct bmic_sense_feature_buffer, \ |
| 760 | aio_subpage.max_write_raid_1_10_3drive) |
| 761 | |
| 762 | #define MINIMUM_AIO_SUBPAGE_LENGTH \ |
| 763 | (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \ |
| 764 | max_write_raid_1_10_3drive) - \ |
| 765 | sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header)) |
| 766 | |
| 767 | static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) |
| 768 | { |
| 769 | int rc; |
| 770 | enum dma_data_direction dir; |
| 771 | struct pqi_raid_path_request request; |
| 772 | struct bmic_sense_feature_buffer *buffer; |
| 773 | |
| 774 | buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); |
| 775 | if (!buffer) |
| 776 | return -ENOMEM; |
| 777 | |
| 778 | rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, |
| 779 | buffer, sizeof(*buffer), 0, &dir); |
| 780 | if (rc) |
| 781 | goto error; |
| 782 | |
| 783 | request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE; |
| 784 | request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE; |
| 785 | |
| 786 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); |
| 787 | |
| 788 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); |
| 789 | |
| 790 | if (rc) |
| 791 | goto error; |
| 792 | |
| 793 | if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || |
| 794 | buffer->header.subpage_code != |
| 795 | BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || |
| 796 | get_unaligned_le16(&buffer->header.buffer_length) < |
| 797 | MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH || |
| 798 | buffer->aio_subpage.header.page_code != |
| 799 | BMIC_SENSE_FEATURE_IO_PAGE || |
| 800 | buffer->aio_subpage.header.subpage_code != |
| 801 | BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || |
| 802 | get_unaligned_le16(&buffer->aio_subpage.header.page_length) < |
| 803 | MINIMUM_AIO_SUBPAGE_LENGTH) { |
| 804 | goto error; |
| 805 | } |
| 806 | |
| 807 | ctrl_info->max_transfer_encrypted_sas_sata = |
| 808 | pqi_aio_limit_to_bytes( |
| 809 | &buffer->aio_subpage.max_transfer_encrypted_sas_sata); |
| 810 | |
| 811 | ctrl_info->max_transfer_encrypted_nvme = |
| 812 | pqi_aio_limit_to_bytes( |
| 813 | &buffer->aio_subpage.max_transfer_encrypted_nvme); |
| 814 | |
| 815 | ctrl_info->max_write_raid_5_6 = |
| 816 | pqi_aio_limit_to_bytes( |
| 817 | &buffer->aio_subpage.max_write_raid_5_6); |
| 818 | |
| 819 | ctrl_info->max_write_raid_1_10_2drive = |
| 820 | pqi_aio_limit_to_bytes( |
| 821 | &buffer->aio_subpage.max_write_raid_1_10_2drive); |
| 822 | |
| 823 | ctrl_info->max_write_raid_1_10_3drive = |
| 824 | pqi_aio_limit_to_bytes( |
| 825 | &buffer->aio_subpage.max_write_raid_1_10_3drive); |
| 826 | |
| 827 | error: |
| 828 | kfree(buffer); |
| 829 | |
| 830 | return rc; |
| 831 | } |
| 832 | |
| 833 | static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, |
| 834 | enum bmic_flush_cache_shutdown_event shutdown_event) |
| 835 | { |
| 836 | int rc; |
| 837 | struct bmic_flush_cache *flush_cache; |
| 838 | |
| 839 | flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); |
| 840 | if (!flush_cache) |
| 841 | return -ENOMEM; |
| 842 | |
| 843 | flush_cache->shutdown_event = shutdown_event; |
| 844 | |
| 845 | rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, |
| 846 | sizeof(*flush_cache)); |
| 847 | |
| 848 | kfree(flush_cache); |
| 849 | |
| 850 | return rc; |
| 851 | } |
| 852 | |
| 853 | int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, |
| 854 | struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, |
| 855 | struct pqi_raid_error_info *error_info) |
| 856 | { |
| 857 | return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, |
| 858 | buffer, buffer_length, error_info); |
| 859 | } |
| 860 | |
| 861 | #define PQI_FETCH_PTRAID_DATA (1 << 31) |
| 862 | |
| 863 | static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) |
| 864 | { |
| 865 | int rc; |
| 866 | struct bmic_diag_options *diag; |
| 867 | |
| 868 | diag = kzalloc(sizeof(*diag), GFP_KERNEL); |
| 869 | if (!diag) |
| 870 | return -ENOMEM; |
| 871 | |
| 872 | rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, |
| 873 | diag, sizeof(*diag)); |
| 874 | if (rc) |
| 875 | goto out; |
| 876 | |
| 877 | diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); |
| 878 | |
| 879 | rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, |
| 880 | sizeof(*diag)); |
| 881 | |
| 882 | out: |
| 883 | kfree(diag); |
| 884 | |
| 885 | return rc; |
| 886 | } |
| 887 | |
| 888 | static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, |
| 889 | void *buffer, size_t buffer_length) |
| 890 | { |
| 891 | return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, |
| 892 | buffer, buffer_length); |
| 893 | } |
| 894 | |
| 895 | #pragma pack(1) |
| 896 | |
| 897 | struct bmic_host_wellness_driver_version { |
| 898 | u8 start_tag[4]; |
| 899 | u8 driver_version_tag[2]; |
| 900 | __le16 driver_version_length; |
| 901 | char driver_version[32]; |
| 902 | u8 dont_write_tag[2]; |
| 903 | u8 end_tag[2]; |
| 904 | }; |
| 905 | |
| 906 | #pragma pack() |
| 907 | |
| 908 | static int pqi_write_driver_version_to_host_wellness( |
| 909 | struct pqi_ctrl_info *ctrl_info) |
| 910 | { |
| 911 | int rc; |
| 912 | struct bmic_host_wellness_driver_version *buffer; |
| 913 | size_t buffer_length; |
| 914 | |
| 915 | buffer_length = sizeof(*buffer); |
| 916 | |
| 917 | buffer = kmalloc(buffer_length, GFP_KERNEL); |
| 918 | if (!buffer) |
| 919 | return -ENOMEM; |
| 920 | |
| 921 | buffer->start_tag[0] = '<'; |
| 922 | buffer->start_tag[1] = 'H'; |
| 923 | buffer->start_tag[2] = 'W'; |
| 924 | buffer->start_tag[3] = '>'; |
| 925 | buffer->driver_version_tag[0] = 'D'; |
| 926 | buffer->driver_version_tag[1] = 'V'; |
| 927 | put_unaligned_le16(sizeof(buffer->driver_version), |
| 928 | &buffer->driver_version_length); |
| 929 | strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, |
| 930 | sizeof(buffer->driver_version) - 1); |
| 931 | buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; |
| 932 | buffer->dont_write_tag[0] = 'D'; |
| 933 | buffer->dont_write_tag[1] = 'W'; |
| 934 | buffer->end_tag[0] = 'Z'; |
| 935 | buffer->end_tag[1] = 'Z'; |
| 936 | |
| 937 | rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); |
| 938 | |
| 939 | kfree(buffer); |
| 940 | |
| 941 | return rc; |
| 942 | } |
| 943 | |
| 944 | #pragma pack(1) |
| 945 | |
| 946 | struct bmic_host_wellness_time { |
| 947 | u8 start_tag[4]; |
| 948 | u8 time_tag[2]; |
| 949 | __le16 time_length; |
| 950 | u8 time[8]; |
| 951 | u8 dont_write_tag[2]; |
| 952 | u8 end_tag[2]; |
| 953 | }; |
| 954 | |
| 955 | #pragma pack() |
| 956 | |
| 957 | static int pqi_write_current_time_to_host_wellness( |
| 958 | struct pqi_ctrl_info *ctrl_info) |
| 959 | { |
| 960 | int rc; |
| 961 | struct bmic_host_wellness_time *buffer; |
| 962 | size_t buffer_length; |
| 963 | time64_t local_time; |
| 964 | unsigned int year; |
| 965 | struct tm tm; |
| 966 | |
| 967 | buffer_length = sizeof(*buffer); |
| 968 | |
| 969 | buffer = kmalloc(buffer_length, GFP_KERNEL); |
| 970 | if (!buffer) |
| 971 | return -ENOMEM; |
| 972 | |
| 973 | buffer->start_tag[0] = '<'; |
| 974 | buffer->start_tag[1] = 'H'; |
| 975 | buffer->start_tag[2] = 'W'; |
| 976 | buffer->start_tag[3] = '>'; |
| 977 | buffer->time_tag[0] = 'T'; |
| 978 | buffer->time_tag[1] = 'D'; |
| 979 | put_unaligned_le16(sizeof(buffer->time), |
| 980 | &buffer->time_length); |
| 981 | |
| 982 | local_time = ktime_get_real_seconds(); |
| 983 | time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); |
| 984 | year = tm.tm_year + 1900; |
| 985 | |
| 986 | buffer->time[0] = bin2bcd(tm.tm_hour); |
| 987 | buffer->time[1] = bin2bcd(tm.tm_min); |
| 988 | buffer->time[2] = bin2bcd(tm.tm_sec); |
| 989 | buffer->time[3] = 0; |
| 990 | buffer->time[4] = bin2bcd(tm.tm_mon + 1); |
| 991 | buffer->time[5] = bin2bcd(tm.tm_mday); |
| 992 | buffer->time[6] = bin2bcd(year / 100); |
| 993 | buffer->time[7] = bin2bcd(year % 100); |
| 994 | |
| 995 | buffer->dont_write_tag[0] = 'D'; |
| 996 | buffer->dont_write_tag[1] = 'W'; |
| 997 | buffer->end_tag[0] = 'Z'; |
| 998 | buffer->end_tag[1] = 'Z'; |
| 999 | |
| 1000 | rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); |
| 1001 | |
| 1002 | kfree(buffer); |
| 1003 | |
| 1004 | return rc; |
| 1005 | } |
| 1006 | |
| 1007 | #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ) |
| 1008 | |
| 1009 | static void pqi_update_time_worker(struct work_struct *work) |
| 1010 | { |
| 1011 | int rc; |
| 1012 | struct pqi_ctrl_info *ctrl_info; |
| 1013 | |
| 1014 | ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, |
| 1015 | update_time_work); |
| 1016 | |
| 1017 | rc = pqi_write_current_time_to_host_wellness(ctrl_info); |
| 1018 | if (rc) |
| 1019 | dev_warn(&ctrl_info->pci_dev->dev, |
| 1020 | "error updating time on controller\n"); |
| 1021 | |
| 1022 | schedule_delayed_work(&ctrl_info->update_time_work, |
| 1023 | PQI_UPDATE_TIME_WORK_INTERVAL); |
| 1024 | } |
| 1025 | |
| 1026 | static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) |
| 1027 | { |
| 1028 | schedule_delayed_work(&ctrl_info->update_time_work, 0); |
| 1029 | } |
| 1030 | |
| 1031 | static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) |
| 1032 | { |
| 1033 | cancel_delayed_work_sync(&ctrl_info->update_time_work); |
| 1034 | } |
| 1035 | |
| 1036 | static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, |
| 1037 | size_t buffer_length) |
| 1038 | { |
| 1039 | return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); |
| 1040 | } |
| 1041 | |
| 1042 | static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) |
| 1043 | { |
| 1044 | int rc; |
| 1045 | size_t lun_list_length; |
| 1046 | size_t lun_data_length; |
| 1047 | size_t new_lun_list_length; |
| 1048 | void *lun_data = NULL; |
| 1049 | struct report_lun_header *report_lun_header; |
| 1050 | |
| 1051 | report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); |
| 1052 | if (!report_lun_header) { |
| 1053 | rc = -ENOMEM; |
| 1054 | goto out; |
| 1055 | } |
| 1056 | |
| 1057 | rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); |
| 1058 | if (rc) |
| 1059 | goto out; |
| 1060 | |
| 1061 | lun_list_length = get_unaligned_be32(&report_lun_header->list_length); |
| 1062 | |
| 1063 | again: |
| 1064 | lun_data_length = sizeof(struct report_lun_header) + lun_list_length; |
| 1065 | |
| 1066 | lun_data = kmalloc(lun_data_length, GFP_KERNEL); |
| 1067 | if (!lun_data) { |
| 1068 | rc = -ENOMEM; |
| 1069 | goto out; |
| 1070 | } |
| 1071 | |
| 1072 | if (lun_list_length == 0) { |
| 1073 | memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); |
| 1074 | goto out; |
| 1075 | } |
| 1076 | |
| 1077 | rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); |
| 1078 | if (rc) |
| 1079 | goto out; |
| 1080 | |
| 1081 | new_lun_list_length = |
| 1082 | get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); |
| 1083 | |
| 1084 | if (new_lun_list_length > lun_list_length) { |
| 1085 | lun_list_length = new_lun_list_length; |
| 1086 | kfree(lun_data); |
| 1087 | goto again; |
| 1088 | } |
| 1089 | |
| 1090 | out: |
| 1091 | kfree(report_lun_header); |
| 1092 | |
| 1093 | if (rc) { |
| 1094 | kfree(lun_data); |
| 1095 | lun_data = NULL; |
| 1096 | } |
| 1097 | |
| 1098 | *buffer = lun_data; |
| 1099 | |
| 1100 | return rc; |
| 1101 | } |
| 1102 | |
| 1103 | static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) |
| 1104 | { |
| 1105 | return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer); |
| 1106 | } |
| 1107 | |
| 1108 | static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) |
| 1109 | { |
| 1110 | return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); |
| 1111 | } |
| 1112 | |
| 1113 | static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, |
| 1114 | struct report_phys_lun_extended **physdev_list, |
| 1115 | struct report_log_lun_extended **logdev_list) |
| 1116 | { |
| 1117 | int rc; |
| 1118 | size_t logdev_list_length; |
| 1119 | size_t logdev_data_length; |
| 1120 | struct report_log_lun_extended *internal_logdev_list; |
| 1121 | struct report_log_lun_extended *logdev_data; |
| 1122 | struct report_lun_header report_lun_header; |
| 1123 | |
| 1124 | rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); |
| 1125 | if (rc) |
| 1126 | dev_err(&ctrl_info->pci_dev->dev, |
| 1127 | "report physical LUNs failed\n"); |
| 1128 | |
| 1129 | rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); |
| 1130 | if (rc) |
| 1131 | dev_err(&ctrl_info->pci_dev->dev, |
| 1132 | "report logical LUNs failed\n"); |
| 1133 | |
| 1134 | /* |
| 1135 | * Tack the controller itself onto the end of the logical device list. |
| 1136 | */ |
| 1137 | |
| 1138 | logdev_data = *logdev_list; |
| 1139 | |
| 1140 | if (logdev_data) { |
| 1141 | logdev_list_length = |
| 1142 | get_unaligned_be32(&logdev_data->header.list_length); |
| 1143 | } else { |
| 1144 | memset(&report_lun_header, 0, sizeof(report_lun_header)); |
| 1145 | logdev_data = |
| 1146 | (struct report_log_lun_extended *)&report_lun_header; |
| 1147 | logdev_list_length = 0; |
| 1148 | } |
| 1149 | |
| 1150 | logdev_data_length = sizeof(struct report_lun_header) + |
| 1151 | logdev_list_length; |
| 1152 | |
| 1153 | internal_logdev_list = kmalloc(logdev_data_length + |
| 1154 | sizeof(struct report_log_lun_extended), GFP_KERNEL); |
| 1155 | if (!internal_logdev_list) { |
| 1156 | kfree(*logdev_list); |
| 1157 | *logdev_list = NULL; |
| 1158 | return -ENOMEM; |
| 1159 | } |
| 1160 | |
| 1161 | memcpy(internal_logdev_list, logdev_data, logdev_data_length); |
| 1162 | memset((u8 *)internal_logdev_list + logdev_data_length, 0, |
| 1163 | sizeof(struct report_log_lun_extended_entry)); |
| 1164 | put_unaligned_be32(logdev_list_length + |
| 1165 | sizeof(struct report_log_lun_extended_entry), |
| 1166 | &internal_logdev_list->header.list_length); |
| 1167 | |
| 1168 | kfree(*logdev_list); |
| 1169 | *logdev_list = internal_logdev_list; |
| 1170 | |
| 1171 | return 0; |
| 1172 | } |
| 1173 | |
| 1174 | static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, |
| 1175 | int bus, int target, int lun) |
| 1176 | { |
| 1177 | device->bus = bus; |
| 1178 | device->target = target; |
| 1179 | device->lun = lun; |
| 1180 | } |
| 1181 | |
| 1182 | static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) |
| 1183 | { |
| 1184 | u8 *scsi3addr; |
| 1185 | u32 lunid; |
| 1186 | int bus; |
| 1187 | int target; |
| 1188 | int lun; |
| 1189 | |
| 1190 | scsi3addr = device->scsi3addr; |
| 1191 | lunid = get_unaligned_le32(scsi3addr); |
| 1192 | |
| 1193 | if (pqi_is_hba_lunid(scsi3addr)) { |
| 1194 | /* The specified device is the controller. */ |
| 1195 | pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); |
| 1196 | device->target_lun_valid = true; |
| 1197 | return; |
| 1198 | } |
| 1199 | |
| 1200 | if (pqi_is_logical_device(device)) { |
| 1201 | if (device->is_external_raid_device) { |
| 1202 | bus = PQI_EXTERNAL_RAID_VOLUME_BUS; |
| 1203 | target = (lunid >> 16) & 0x3fff; |
| 1204 | lun = lunid & 0xff; |
| 1205 | } else { |
| 1206 | bus = PQI_RAID_VOLUME_BUS; |
| 1207 | target = 0; |
| 1208 | lun = lunid & 0x3fff; |
| 1209 | } |
| 1210 | pqi_set_bus_target_lun(device, bus, target, lun); |
| 1211 | device->target_lun_valid = true; |
| 1212 | return; |
| 1213 | } |
| 1214 | |
| 1215 | /* |
| 1216 | * Defer target and LUN assignment for non-controller physical devices |
| 1217 | * because the SAS transport layer will make these assignments later. |
| 1218 | */ |
| 1219 | pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); |
| 1220 | } |
| 1221 | |
| 1222 | static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, |
| 1223 | struct pqi_scsi_dev *device) |
| 1224 | { |
| 1225 | int rc; |
| 1226 | u8 raid_level; |
| 1227 | u8 *buffer; |
| 1228 | |
| 1229 | raid_level = SA_RAID_UNKNOWN; |
| 1230 | |
| 1231 | buffer = kmalloc(64, GFP_KERNEL); |
| 1232 | if (buffer) { |
| 1233 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, |
| 1234 | VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); |
| 1235 | if (rc == 0) { |
| 1236 | raid_level = buffer[8]; |
| 1237 | if (raid_level > SA_RAID_MAX) |
| 1238 | raid_level = SA_RAID_UNKNOWN; |
| 1239 | } |
| 1240 | kfree(buffer); |
| 1241 | } |
| 1242 | |
| 1243 | device->raid_level = raid_level; |
| 1244 | } |
| 1245 | |
| 1246 | static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, |
| 1247 | struct pqi_scsi_dev *device, struct raid_map *raid_map) |
| 1248 | { |
| 1249 | char *err_msg; |
| 1250 | u32 raid_map_size; |
| 1251 | u32 r5or6_blocks_per_row; |
| 1252 | |
| 1253 | raid_map_size = get_unaligned_le32(&raid_map->structure_size); |
| 1254 | |
| 1255 | if (raid_map_size < offsetof(struct raid_map, disk_data)) { |
| 1256 | err_msg = "RAID map too small"; |
| 1257 | goto bad_raid_map; |
| 1258 | } |
| 1259 | |
| 1260 | if (device->raid_level == SA_RAID_1) { |
| 1261 | if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { |
| 1262 | err_msg = "invalid RAID-1 map"; |
| 1263 | goto bad_raid_map; |
| 1264 | } |
| 1265 | } else if (device->raid_level == SA_RAID_TRIPLE) { |
| 1266 | if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { |
| 1267 | err_msg = "invalid RAID-1(Triple) map"; |
| 1268 | goto bad_raid_map; |
| 1269 | } |
| 1270 | } else if ((device->raid_level == SA_RAID_5 || |
| 1271 | device->raid_level == SA_RAID_6) && |
| 1272 | get_unaligned_le16(&raid_map->layout_map_count) > 1) { |
| 1273 | /* RAID 50/60 */ |
| 1274 | r5or6_blocks_per_row = |
| 1275 | get_unaligned_le16(&raid_map->strip_size) * |
| 1276 | get_unaligned_le16(&raid_map->data_disks_per_row); |
| 1277 | if (r5or6_blocks_per_row == 0) { |
| 1278 | err_msg = "invalid RAID-5 or RAID-6 map"; |
| 1279 | goto bad_raid_map; |
| 1280 | } |
| 1281 | } |
| 1282 | |
| 1283 | return 0; |
| 1284 | |
| 1285 | bad_raid_map: |
| 1286 | dev_warn(&ctrl_info->pci_dev->dev, |
| 1287 | "logical device %08x%08x %s\n", |
| 1288 | *((u32 *)&device->scsi3addr), |
| 1289 | *((u32 *)&device->scsi3addr[4]), err_msg); |
| 1290 | |
| 1291 | return -EINVAL; |
| 1292 | } |
| 1293 | |
| 1294 | static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, |
| 1295 | struct pqi_scsi_dev *device) |
| 1296 | { |
| 1297 | int rc; |
| 1298 | u32 raid_map_size; |
| 1299 | struct raid_map *raid_map; |
| 1300 | |
| 1301 | raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); |
| 1302 | if (!raid_map) |
| 1303 | return -ENOMEM; |
| 1304 | |
| 1305 | rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, |
| 1306 | device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL); |
| 1307 | if (rc) |
| 1308 | goto error; |
| 1309 | |
| 1310 | raid_map_size = get_unaligned_le32(&raid_map->structure_size); |
| 1311 | |
| 1312 | if (raid_map_size > sizeof(*raid_map)) { |
| 1313 | |
| 1314 | kfree(raid_map); |
| 1315 | |
| 1316 | raid_map = kmalloc(raid_map_size, GFP_KERNEL); |
| 1317 | if (!raid_map) |
| 1318 | return -ENOMEM; |
| 1319 | |
| 1320 | rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, |
| 1321 | device->scsi3addr, raid_map, raid_map_size, 0, NULL); |
| 1322 | if (rc) |
| 1323 | goto error; |
| 1324 | |
| 1325 | if (get_unaligned_le32(&raid_map->structure_size) |
| 1326 | != raid_map_size) { |
| 1327 | dev_warn(&ctrl_info->pci_dev->dev, |
| 1328 | "requested %u bytes, received %u bytes\n", |
| 1329 | raid_map_size, |
| 1330 | get_unaligned_le32(&raid_map->structure_size)); |
| 1331 | goto error; |
| 1332 | } |
| 1333 | } |
| 1334 | |
| 1335 | rc = pqi_validate_raid_map(ctrl_info, device, raid_map); |
| 1336 | if (rc) |
| 1337 | goto error; |
| 1338 | |
| 1339 | device->raid_map = raid_map; |
| 1340 | |
| 1341 | return 0; |
| 1342 | |
| 1343 | error: |
| 1344 | kfree(raid_map); |
| 1345 | |
| 1346 | return rc; |
| 1347 | } |
| 1348 | |
| 1349 | static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, |
| 1350 | struct pqi_scsi_dev *device) |
| 1351 | { |
| 1352 | if (!ctrl_info->lv_drive_type_mix_valid) { |
| 1353 | device->max_transfer_encrypted = ~0; |
| 1354 | return; |
| 1355 | } |
| 1356 | |
| 1357 | switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { |
| 1358 | case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY: |
| 1359 | case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY: |
| 1360 | case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY: |
| 1361 | case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY: |
| 1362 | case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY: |
| 1363 | case LV_DRIVE_TYPE_MIX_SAS_ONLY: |
| 1364 | case LV_DRIVE_TYPE_MIX_SATA_ONLY: |
| 1365 | device->max_transfer_encrypted = |
| 1366 | ctrl_info->max_transfer_encrypted_sas_sata; |
| 1367 | break; |
| 1368 | case LV_DRIVE_TYPE_MIX_NVME_ONLY: |
| 1369 | device->max_transfer_encrypted = |
| 1370 | ctrl_info->max_transfer_encrypted_nvme; |
| 1371 | break; |
| 1372 | case LV_DRIVE_TYPE_MIX_UNKNOWN: |
| 1373 | case LV_DRIVE_TYPE_MIX_NO_RESTRICTION: |
| 1374 | default: |
| 1375 | device->max_transfer_encrypted = |
| 1376 | min(ctrl_info->max_transfer_encrypted_sas_sata, |
| 1377 | ctrl_info->max_transfer_encrypted_nvme); |
| 1378 | break; |
| 1379 | } |
| 1380 | } |
| 1381 | |
| 1382 | static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, |
| 1383 | struct pqi_scsi_dev *device) |
| 1384 | { |
| 1385 | int rc; |
| 1386 | u8 *buffer; |
| 1387 | u8 bypass_status; |
| 1388 | |
| 1389 | buffer = kmalloc(64, GFP_KERNEL); |
| 1390 | if (!buffer) |
| 1391 | return; |
| 1392 | |
| 1393 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, |
| 1394 | VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); |
| 1395 | if (rc) |
| 1396 | goto out; |
| 1397 | |
| 1398 | #define RAID_BYPASS_STATUS 4 |
| 1399 | #define RAID_BYPASS_CONFIGURED 0x1 |
| 1400 | #define RAID_BYPASS_ENABLED 0x2 |
| 1401 | |
| 1402 | bypass_status = buffer[RAID_BYPASS_STATUS]; |
| 1403 | device->raid_bypass_configured = |
| 1404 | (bypass_status & RAID_BYPASS_CONFIGURED) != 0; |
| 1405 | if (device->raid_bypass_configured && |
| 1406 | (bypass_status & RAID_BYPASS_ENABLED) && |
| 1407 | pqi_get_raid_map(ctrl_info, device) == 0) { |
| 1408 | device->raid_bypass_enabled = true; |
| 1409 | if (get_unaligned_le16(&device->raid_map->flags) & |
| 1410 | RAID_MAP_ENCRYPTION_ENABLED) |
| 1411 | pqi_set_max_transfer_encrypted(ctrl_info, device); |
| 1412 | } |
| 1413 | |
| 1414 | out: |
| 1415 | kfree(buffer); |
| 1416 | } |
| 1417 | |
| 1418 | /* |
| 1419 | * Use vendor-specific VPD to determine online/offline status of a volume. |
| 1420 | */ |
| 1421 | |
| 1422 | static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, |
| 1423 | struct pqi_scsi_dev *device) |
| 1424 | { |
| 1425 | int rc; |
| 1426 | size_t page_length; |
| 1427 | u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; |
| 1428 | bool volume_offline = true; |
| 1429 | u32 volume_flags; |
| 1430 | struct ciss_vpd_logical_volume_status *vpd; |
| 1431 | |
| 1432 | vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); |
| 1433 | if (!vpd) |
| 1434 | goto no_buffer; |
| 1435 | |
| 1436 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, |
| 1437 | VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); |
| 1438 | if (rc) |
| 1439 | goto out; |
| 1440 | |
| 1441 | if (vpd->page_code != CISS_VPD_LV_STATUS) |
| 1442 | goto out; |
| 1443 | |
| 1444 | page_length = offsetof(struct ciss_vpd_logical_volume_status, |
| 1445 | volume_status) + vpd->page_length; |
| 1446 | if (page_length < sizeof(*vpd)) |
| 1447 | goto out; |
| 1448 | |
| 1449 | volume_status = vpd->volume_status; |
| 1450 | volume_flags = get_unaligned_be32(&vpd->flags); |
| 1451 | volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; |
| 1452 | |
| 1453 | out: |
| 1454 | kfree(vpd); |
| 1455 | no_buffer: |
| 1456 | device->volume_status = volume_status; |
| 1457 | device->volume_offline = volume_offline; |
| 1458 | } |
| 1459 | |
| 1460 | #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 |
| 1461 | |
| 1462 | static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, |
| 1463 | struct pqi_scsi_dev *device, |
| 1464 | struct bmic_identify_physical_device *id_phys) |
| 1465 | { |
| 1466 | int rc; |
| 1467 | |
| 1468 | memset(id_phys, 0, sizeof(*id_phys)); |
| 1469 | |
| 1470 | rc = pqi_identify_physical_device(ctrl_info, device, |
| 1471 | id_phys, sizeof(*id_phys)); |
| 1472 | if (rc) { |
| 1473 | device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; |
| 1474 | return rc; |
| 1475 | } |
| 1476 | |
| 1477 | scsi_sanitize_inquiry_string(&id_phys->model[0], 8); |
| 1478 | scsi_sanitize_inquiry_string(&id_phys->model[8], 16); |
| 1479 | |
| 1480 | memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); |
| 1481 | memcpy(device->model, &id_phys->model[8], sizeof(device->model)); |
| 1482 | |
| 1483 | device->box_index = id_phys->box_index; |
| 1484 | device->phys_box_on_bus = id_phys->phys_box_on_bus; |
| 1485 | device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; |
| 1486 | device->queue_depth = |
| 1487 | get_unaligned_le16(&id_phys->current_queue_depth_limit); |
| 1488 | device->active_path_index = id_phys->active_path_number; |
| 1489 | device->path_map = id_phys->redundant_path_present_map; |
| 1490 | memcpy(&device->box, |
| 1491 | &id_phys->alternate_paths_phys_box_on_port, |
| 1492 | sizeof(device->box)); |
| 1493 | memcpy(&device->phys_connector, |
| 1494 | &id_phys->alternate_paths_phys_connector, |
| 1495 | sizeof(device->phys_connector)); |
| 1496 | device->bay = id_phys->phys_bay_in_box; |
| 1497 | |
| 1498 | memcpy(&device->page_83_identifier, &id_phys->page_83_identifier, |
| 1499 | sizeof(device->page_83_identifier)); |
| 1500 | |
| 1501 | if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && |
| 1502 | id_phys->phy_count) |
| 1503 | device->phy_id = |
| 1504 | id_phys->phy_to_phy_map[device->active_path_index]; |
| 1505 | else |
| 1506 | device->phy_id = 0xFF; |
| 1507 | |
| 1508 | return 0; |
| 1509 | } |
| 1510 | |
| 1511 | static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, |
| 1512 | struct pqi_scsi_dev *device) |
| 1513 | { |
| 1514 | int rc; |
| 1515 | u8 *buffer; |
| 1516 | |
| 1517 | buffer = kmalloc(64, GFP_KERNEL); |
| 1518 | if (!buffer) |
| 1519 | return -ENOMEM; |
| 1520 | |
| 1521 | /* Send an inquiry to the device to see what it is. */ |
| 1522 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); |
| 1523 | if (rc) |
| 1524 | goto out; |
| 1525 | |
| 1526 | scsi_sanitize_inquiry_string(&buffer[8], 8); |
| 1527 | scsi_sanitize_inquiry_string(&buffer[16], 16); |
| 1528 | |
| 1529 | device->devtype = buffer[0] & 0x1f; |
| 1530 | memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); |
| 1531 | memcpy(device->model, &buffer[16], sizeof(device->model)); |
| 1532 | |
| 1533 | if (device->devtype == TYPE_DISK) { |
| 1534 | if (device->is_external_raid_device) { |
| 1535 | device->raid_level = SA_RAID_UNKNOWN; |
| 1536 | device->volume_status = CISS_LV_OK; |
| 1537 | device->volume_offline = false; |
| 1538 | } else { |
| 1539 | pqi_get_raid_level(ctrl_info, device); |
| 1540 | pqi_get_raid_bypass_status(ctrl_info, device); |
| 1541 | pqi_get_volume_status(ctrl_info, device); |
| 1542 | } |
| 1543 | } |
| 1544 | |
| 1545 | out: |
| 1546 | kfree(buffer); |
| 1547 | |
| 1548 | return rc; |
| 1549 | } |
| 1550 | |
| 1551 | static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, |
| 1552 | struct pqi_scsi_dev *device, |
| 1553 | struct bmic_identify_physical_device *id_phys) |
| 1554 | { |
| 1555 | int rc; |
| 1556 | |
| 1557 | if (device->is_expander_smp_device) |
| 1558 | return 0; |
| 1559 | |
| 1560 | if (pqi_is_logical_device(device)) |
| 1561 | rc = pqi_get_logical_device_info(ctrl_info, device); |
| 1562 | else |
| 1563 | rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); |
| 1564 | |
| 1565 | return rc; |
| 1566 | } |
| 1567 | |
| 1568 | static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, |
| 1569 | struct pqi_scsi_dev *device) |
| 1570 | { |
| 1571 | char *status; |
| 1572 | static const char unknown_state_str[] = |
| 1573 | "Volume is in an unknown state (%u)"; |
| 1574 | char unknown_state_buffer[sizeof(unknown_state_str) + 10]; |
| 1575 | |
| 1576 | switch (device->volume_status) { |
| 1577 | case CISS_LV_OK: |
| 1578 | status = "Volume online"; |
| 1579 | break; |
| 1580 | case CISS_LV_FAILED: |
| 1581 | status = "Volume failed"; |
| 1582 | break; |
| 1583 | case CISS_LV_NOT_CONFIGURED: |
| 1584 | status = "Volume not configured"; |
| 1585 | break; |
| 1586 | case CISS_LV_DEGRADED: |
| 1587 | status = "Volume degraded"; |
| 1588 | break; |
| 1589 | case CISS_LV_READY_FOR_RECOVERY: |
| 1590 | status = "Volume ready for recovery operation"; |
| 1591 | break; |
| 1592 | case CISS_LV_UNDERGOING_RECOVERY: |
| 1593 | status = "Volume undergoing recovery"; |
| 1594 | break; |
| 1595 | case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: |
| 1596 | status = "Wrong physical drive was replaced"; |
| 1597 | break; |
| 1598 | case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: |
| 1599 | status = "A physical drive not properly connected"; |
| 1600 | break; |
| 1601 | case CISS_LV_HARDWARE_OVERHEATING: |
| 1602 | status = "Hardware is overheating"; |
| 1603 | break; |
| 1604 | case CISS_LV_HARDWARE_HAS_OVERHEATED: |
| 1605 | status = "Hardware has overheated"; |
| 1606 | break; |
| 1607 | case CISS_LV_UNDERGOING_EXPANSION: |
| 1608 | status = "Volume undergoing expansion"; |
| 1609 | break; |
| 1610 | case CISS_LV_NOT_AVAILABLE: |
| 1611 | status = "Volume waiting for transforming volume"; |
| 1612 | break; |
| 1613 | case CISS_LV_QUEUED_FOR_EXPANSION: |
| 1614 | status = "Volume queued for expansion"; |
| 1615 | break; |
| 1616 | case CISS_LV_DISABLED_SCSI_ID_CONFLICT: |
| 1617 | status = "Volume disabled due to SCSI ID conflict"; |
| 1618 | break; |
| 1619 | case CISS_LV_EJECTED: |
| 1620 | status = "Volume has been ejected"; |
| 1621 | break; |
| 1622 | case CISS_LV_UNDERGOING_ERASE: |
| 1623 | status = "Volume undergoing background erase"; |
| 1624 | break; |
| 1625 | case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: |
| 1626 | status = "Volume ready for predictive spare rebuild"; |
| 1627 | break; |
| 1628 | case CISS_LV_UNDERGOING_RPI: |
| 1629 | status = "Volume undergoing rapid parity initialization"; |
| 1630 | break; |
| 1631 | case CISS_LV_PENDING_RPI: |
| 1632 | status = "Volume queued for rapid parity initialization"; |
| 1633 | break; |
| 1634 | case CISS_LV_ENCRYPTED_NO_KEY: |
| 1635 | status = "Encrypted volume inaccessible - key not present"; |
| 1636 | break; |
| 1637 | case CISS_LV_UNDERGOING_ENCRYPTION: |
| 1638 | status = "Volume undergoing encryption process"; |
| 1639 | break; |
| 1640 | case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: |
| 1641 | status = "Volume undergoing encryption re-keying process"; |
| 1642 | break; |
| 1643 | case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: |
| 1644 | status = "Volume encrypted but encryption is disabled"; |
| 1645 | break; |
| 1646 | case CISS_LV_PENDING_ENCRYPTION: |
| 1647 | status = "Volume pending migration to encrypted state"; |
| 1648 | break; |
| 1649 | case CISS_LV_PENDING_ENCRYPTION_REKEYING: |
| 1650 | status = "Volume pending encryption rekeying"; |
| 1651 | break; |
| 1652 | case CISS_LV_NOT_SUPPORTED: |
| 1653 | status = "Volume not supported on this controller"; |
| 1654 | break; |
| 1655 | case CISS_LV_STATUS_UNAVAILABLE: |
| 1656 | status = "Volume status not available"; |
| 1657 | break; |
| 1658 | default: |
| 1659 | snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), |
| 1660 | unknown_state_str, device->volume_status); |
| 1661 | status = unknown_state_buffer; |
| 1662 | break; |
| 1663 | } |
| 1664 | |
| 1665 | dev_info(&ctrl_info->pci_dev->dev, |
| 1666 | "scsi %d:%d:%d:%d %s\n", |
| 1667 | ctrl_info->scsi_host->host_no, |
| 1668 | device->bus, device->target, device->lun, status); |
| 1669 | } |
| 1670 | |
| 1671 | static void pqi_rescan_worker(struct work_struct *work) |
| 1672 | { |
| 1673 | struct pqi_ctrl_info *ctrl_info; |
| 1674 | |
| 1675 | ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, |
| 1676 | rescan_work); |
| 1677 | |
| 1678 | pqi_scan_scsi_devices(ctrl_info); |
| 1679 | } |
| 1680 | |
| 1681 | static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, |
| 1682 | struct pqi_scsi_dev *device) |
| 1683 | { |
| 1684 | int rc; |
| 1685 | |
| 1686 | if (pqi_is_logical_device(device)) |
| 1687 | rc = scsi_add_device(ctrl_info->scsi_host, device->bus, |
| 1688 | device->target, device->lun); |
| 1689 | else |
| 1690 | rc = pqi_add_sas_device(ctrl_info->sas_host, device); |
| 1691 | |
| 1692 | return rc; |
| 1693 | } |
| 1694 | |
| 1695 | #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000) |
| 1696 | |
| 1697 | static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) |
| 1698 | { |
| 1699 | int rc; |
| 1700 | |
| 1701 | pqi_device_remove_start(device); |
| 1702 | |
| 1703 | rc = pqi_device_wait_for_pending_io(ctrl_info, device, |
| 1704 | PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); |
| 1705 | if (rc) |
| 1706 | dev_err(&ctrl_info->pci_dev->dev, |
| 1707 | "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", |
| 1708 | ctrl_info->scsi_host->host_no, device->bus, |
| 1709 | device->target, device->lun, |
| 1710 | atomic_read(&device->scsi_cmds_outstanding)); |
| 1711 | |
| 1712 | if (pqi_is_logical_device(device)) |
| 1713 | scsi_remove_device(device->sdev); |
| 1714 | else |
| 1715 | pqi_remove_sas_device(device); |
| 1716 | } |
| 1717 | |
| 1718 | /* Assumes the SCSI device list lock is held. */ |
| 1719 | |
| 1720 | static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, |
| 1721 | int bus, int target, int lun) |
| 1722 | { |
| 1723 | struct pqi_scsi_dev *device; |
| 1724 | |
| 1725 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) |
| 1726 | if (device->bus == bus && device->target == target && device->lun == lun) |
| 1727 | return device; |
| 1728 | |
| 1729 | return NULL; |
| 1730 | } |
| 1731 | |
| 1732 | static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2) |
| 1733 | { |
| 1734 | if (dev1->is_physical_device != dev2->is_physical_device) |
| 1735 | return false; |
| 1736 | |
| 1737 | if (dev1->is_physical_device) |
| 1738 | return dev1->wwid == dev2->wwid; |
| 1739 | |
| 1740 | return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; |
| 1741 | } |
| 1742 | |
| 1743 | enum pqi_find_result { |
| 1744 | DEVICE_NOT_FOUND, |
| 1745 | DEVICE_CHANGED, |
| 1746 | DEVICE_SAME, |
| 1747 | }; |
| 1748 | |
| 1749 | static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, |
| 1750 | struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) |
| 1751 | { |
| 1752 | struct pqi_scsi_dev *device; |
| 1753 | |
| 1754 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { |
| 1755 | if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { |
| 1756 | *matching_device = device; |
| 1757 | if (pqi_device_equal(device_to_find, device)) { |
| 1758 | if (device_to_find->volume_offline) |
| 1759 | return DEVICE_CHANGED; |
| 1760 | return DEVICE_SAME; |
| 1761 | } |
| 1762 | return DEVICE_CHANGED; |
| 1763 | } |
| 1764 | } |
| 1765 | |
| 1766 | return DEVICE_NOT_FOUND; |
| 1767 | } |
| 1768 | |
| 1769 | static inline const char *pqi_device_type(struct pqi_scsi_dev *device) |
| 1770 | { |
| 1771 | if (device->is_expander_smp_device) |
| 1772 | return "Enclosure SMP "; |
| 1773 | |
| 1774 | return scsi_device_type(device->devtype); |
| 1775 | } |
| 1776 | |
| 1777 | #define PQI_DEV_INFO_BUFFER_LENGTH 128 |
| 1778 | |
| 1779 | static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, |
| 1780 | char *action, struct pqi_scsi_dev *device) |
| 1781 | { |
| 1782 | ssize_t count; |
| 1783 | char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; |
| 1784 | |
| 1785 | count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, |
| 1786 | "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); |
| 1787 | |
| 1788 | if (device->target_lun_valid) |
| 1789 | count += scnprintf(buffer + count, |
| 1790 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
| 1791 | "%d:%d", |
| 1792 | device->target, |
| 1793 | device->lun); |
| 1794 | else |
| 1795 | count += scnprintf(buffer + count, |
| 1796 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
| 1797 | "-:-"); |
| 1798 | |
| 1799 | if (pqi_is_logical_device(device)) |
| 1800 | count += scnprintf(buffer + count, |
| 1801 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
| 1802 | " %08x%08x", |
| 1803 | *((u32 *)&device->scsi3addr), |
| 1804 | *((u32 *)&device->scsi3addr[4])); |
| 1805 | else |
| 1806 | count += scnprintf(buffer + count, |
| 1807 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
| 1808 | " %016llx", device->sas_address); |
| 1809 | |
| 1810 | count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, |
| 1811 | " %s %.8s %.16s ", |
| 1812 | pqi_device_type(device), |
| 1813 | device->vendor, |
| 1814 | device->model); |
| 1815 | |
| 1816 | if (pqi_is_logical_device(device)) { |
| 1817 | if (device->devtype == TYPE_DISK) |
| 1818 | count += scnprintf(buffer + count, |
| 1819 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
| 1820 | "SSDSmartPathCap%c En%c %-12s", |
| 1821 | device->raid_bypass_configured ? '+' : '-', |
| 1822 | device->raid_bypass_enabled ? '+' : '-', |
| 1823 | pqi_raid_level_to_string(device->raid_level)); |
| 1824 | } else { |
| 1825 | count += scnprintf(buffer + count, |
| 1826 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
| 1827 | "AIO%c", device->aio_enabled ? '+' : '-'); |
| 1828 | if (device->devtype == TYPE_DISK || |
| 1829 | device->devtype == TYPE_ZBC) |
| 1830 | count += scnprintf(buffer + count, |
| 1831 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
| 1832 | " qd=%-6d", device->queue_depth); |
| 1833 | } |
| 1834 | |
| 1835 | dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); |
| 1836 | } |
| 1837 | |
| 1838 | /* Assumes the SCSI device list lock is held. */ |
| 1839 | |
| 1840 | static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, |
| 1841 | struct pqi_scsi_dev *new_device) |
| 1842 | { |
| 1843 | existing_device->device_type = new_device->device_type; |
| 1844 | existing_device->bus = new_device->bus; |
| 1845 | if (new_device->target_lun_valid) { |
| 1846 | existing_device->target = new_device->target; |
| 1847 | existing_device->lun = new_device->lun; |
| 1848 | existing_device->target_lun_valid = true; |
| 1849 | } |
| 1850 | |
| 1851 | if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION || |
| 1852 | existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) && |
| 1853 | new_device->volume_status == CISS_LV_OK) |
| 1854 | existing_device->rescan = true; |
| 1855 | |
| 1856 | /* By definition, the scsi3addr and wwid fields are already the same. */ |
| 1857 | |
| 1858 | existing_device->is_physical_device = new_device->is_physical_device; |
| 1859 | existing_device->is_external_raid_device = |
| 1860 | new_device->is_external_raid_device; |
| 1861 | existing_device->is_expander_smp_device = |
| 1862 | new_device->is_expander_smp_device; |
| 1863 | existing_device->aio_enabled = new_device->aio_enabled; |
| 1864 | memcpy(existing_device->vendor, new_device->vendor, |
| 1865 | sizeof(existing_device->vendor)); |
| 1866 | memcpy(existing_device->model, new_device->model, |
| 1867 | sizeof(existing_device->model)); |
| 1868 | existing_device->sas_address = new_device->sas_address; |
| 1869 | existing_device->raid_level = new_device->raid_level; |
| 1870 | existing_device->queue_depth = new_device->queue_depth; |
| 1871 | existing_device->aio_handle = new_device->aio_handle; |
| 1872 | existing_device->volume_status = new_device->volume_status; |
| 1873 | existing_device->active_path_index = new_device->active_path_index; |
| 1874 | existing_device->phy_id = new_device->phy_id; |
| 1875 | existing_device->path_map = new_device->path_map; |
| 1876 | existing_device->bay = new_device->bay; |
| 1877 | existing_device->box_index = new_device->box_index; |
| 1878 | existing_device->phys_box_on_bus = new_device->phys_box_on_bus; |
| 1879 | existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; |
| 1880 | memcpy(existing_device->box, new_device->box, |
| 1881 | sizeof(existing_device->box)); |
| 1882 | memcpy(existing_device->phys_connector, new_device->phys_connector, |
| 1883 | sizeof(existing_device->phys_connector)); |
| 1884 | existing_device->next_bypass_group = 0; |
| 1885 | kfree(existing_device->raid_map); |
| 1886 | existing_device->raid_map = new_device->raid_map; |
| 1887 | existing_device->raid_bypass_configured = |
| 1888 | new_device->raid_bypass_configured; |
| 1889 | existing_device->raid_bypass_enabled = |
| 1890 | new_device->raid_bypass_enabled; |
| 1891 | existing_device->device_offline = false; |
| 1892 | |
| 1893 | /* To prevent this from being freed later. */ |
| 1894 | new_device->raid_map = NULL; |
| 1895 | } |
| 1896 | |
| 1897 | static inline void pqi_free_device(struct pqi_scsi_dev *device) |
| 1898 | { |
| 1899 | if (device) { |
| 1900 | kfree(device->raid_map); |
| 1901 | kfree(device); |
| 1902 | } |
| 1903 | } |
| 1904 | |
| 1905 | /* |
| 1906 | * Called when exposing a new device to the OS fails in order to re-adjust |
| 1907 | * our internal SCSI device list to match the SCSI ML's view. |
| 1908 | */ |
| 1909 | |
| 1910 | static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, |
| 1911 | struct pqi_scsi_dev *device) |
| 1912 | { |
| 1913 | unsigned long flags; |
| 1914 | |
| 1915 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 1916 | list_del(&device->scsi_device_list_entry); |
| 1917 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 1918 | |
| 1919 | /* Allow the device structure to be freed later. */ |
| 1920 | device->keep_device = false; |
| 1921 | } |
| 1922 | |
| 1923 | static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) |
| 1924 | { |
| 1925 | if (device->is_expander_smp_device) |
| 1926 | return device->sas_port != NULL; |
| 1927 | |
| 1928 | return device->sdev != NULL; |
| 1929 | } |
| 1930 | |
| 1931 | static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, |
| 1932 | struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) |
| 1933 | { |
| 1934 | int rc; |
| 1935 | unsigned int i; |
| 1936 | unsigned long flags; |
| 1937 | enum pqi_find_result find_result; |
| 1938 | struct pqi_scsi_dev *device; |
| 1939 | struct pqi_scsi_dev *next; |
| 1940 | struct pqi_scsi_dev *matching_device; |
| 1941 | LIST_HEAD(add_list); |
| 1942 | LIST_HEAD(delete_list); |
| 1943 | |
| 1944 | /* |
| 1945 | * The idea here is to do as little work as possible while holding the |
| 1946 | * spinlock. That's why we go to great pains to defer anything other |
| 1947 | * than updating the internal device list until after we release the |
| 1948 | * spinlock. |
| 1949 | */ |
| 1950 | |
| 1951 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 1952 | |
| 1953 | /* Assume that all devices in the existing list have gone away. */ |
| 1954 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) |
| 1955 | device->device_gone = true; |
| 1956 | |
| 1957 | for (i = 0; i < num_new_devices; i++) { |
| 1958 | device = new_device_list[i]; |
| 1959 | |
| 1960 | find_result = pqi_scsi_find_entry(ctrl_info, device, |
| 1961 | &matching_device); |
| 1962 | |
| 1963 | switch (find_result) { |
| 1964 | case DEVICE_SAME: |
| 1965 | /* |
| 1966 | * The newly found device is already in the existing |
| 1967 | * device list. |
| 1968 | */ |
| 1969 | device->new_device = false; |
| 1970 | matching_device->device_gone = false; |
| 1971 | pqi_scsi_update_device(matching_device, device); |
| 1972 | break; |
| 1973 | case DEVICE_NOT_FOUND: |
| 1974 | /* |
| 1975 | * The newly found device is NOT in the existing device |
| 1976 | * list. |
| 1977 | */ |
| 1978 | device->new_device = true; |
| 1979 | break; |
| 1980 | case DEVICE_CHANGED: |
| 1981 | /* |
| 1982 | * The original device has gone away and we need to add |
| 1983 | * the new device. |
| 1984 | */ |
| 1985 | device->new_device = true; |
| 1986 | break; |
| 1987 | } |
| 1988 | } |
| 1989 | |
| 1990 | /* Process all devices that have gone away. */ |
| 1991 | list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, |
| 1992 | scsi_device_list_entry) { |
| 1993 | if (device->device_gone) { |
| 1994 | list_del_init(&device->scsi_device_list_entry); |
| 1995 | list_add_tail(&device->delete_list_entry, &delete_list); |
| 1996 | } |
| 1997 | } |
| 1998 | |
| 1999 | /* Process all new devices. */ |
| 2000 | for (i = 0; i < num_new_devices; i++) { |
| 2001 | device = new_device_list[i]; |
| 2002 | if (!device->new_device) |
| 2003 | continue; |
| 2004 | if (device->volume_offline) |
| 2005 | continue; |
| 2006 | list_add_tail(&device->scsi_device_list_entry, |
| 2007 | &ctrl_info->scsi_device_list); |
| 2008 | list_add_tail(&device->add_list_entry, &add_list); |
| 2009 | /* To prevent this device structure from being freed later. */ |
| 2010 | device->keep_device = true; |
| 2011 | } |
| 2012 | |
| 2013 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 2014 | |
| 2015 | /* |
| 2016 | * If OFA is in progress and there are devices that need to be deleted, |
| 2017 | * allow any pending reset operations to continue and unblock any SCSI |
| 2018 | * requests before removal. |
| 2019 | */ |
| 2020 | if (pqi_ofa_in_progress(ctrl_info)) { |
| 2021 | list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) |
| 2022 | if (pqi_is_device_added(device)) |
| 2023 | pqi_device_remove_start(device); |
| 2024 | pqi_ctrl_unblock_device_reset(ctrl_info); |
| 2025 | pqi_scsi_unblock_requests(ctrl_info); |
| 2026 | } |
| 2027 | |
| 2028 | /* Remove all devices that have gone away. */ |
| 2029 | list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { |
| 2030 | if (device->volume_offline) { |
| 2031 | pqi_dev_info(ctrl_info, "offline", device); |
| 2032 | pqi_show_volume_status(ctrl_info, device); |
| 2033 | } |
| 2034 | list_del(&device->delete_list_entry); |
| 2035 | if (pqi_is_device_added(device)) { |
| 2036 | pqi_remove_device(ctrl_info, device); |
| 2037 | } else { |
| 2038 | if (!device->volume_offline) |
| 2039 | pqi_dev_info(ctrl_info, "removed", device); |
| 2040 | pqi_free_device(device); |
| 2041 | } |
| 2042 | } |
| 2043 | |
| 2044 | /* |
| 2045 | * Notify the SCSI ML if the queue depth of any existing device has |
| 2046 | * changed. |
| 2047 | */ |
| 2048 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { |
| 2049 | if (device->sdev && device->queue_depth != device->advertised_queue_depth) { |
| 2050 | device->advertised_queue_depth = device->queue_depth; |
| 2051 | scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); |
| 2052 | if (device->rescan) { |
| 2053 | scsi_rescan_device(&device->sdev->sdev_gendev); |
| 2054 | device->rescan = false; |
| 2055 | } |
| 2056 | } |
| 2057 | } |
| 2058 | |
| 2059 | /* Expose any new devices. */ |
| 2060 | list_for_each_entry_safe(device, next, &add_list, add_list_entry) { |
| 2061 | if (!pqi_is_device_added(device)) { |
| 2062 | rc = pqi_add_device(ctrl_info, device); |
| 2063 | if (rc == 0) { |
| 2064 | pqi_dev_info(ctrl_info, "added", device); |
| 2065 | } else { |
| 2066 | dev_warn(&ctrl_info->pci_dev->dev, |
| 2067 | "scsi %d:%d:%d:%d addition failed, device not added\n", |
| 2068 | ctrl_info->scsi_host->host_no, |
| 2069 | device->bus, device->target, |
| 2070 | device->lun); |
| 2071 | pqi_fixup_botched_add(ctrl_info, device); |
| 2072 | } |
| 2073 | } |
| 2074 | } |
| 2075 | } |
| 2076 | |
| 2077 | static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) |
| 2078 | { |
| 2079 | /* |
| 2080 | * Only support the HBA controller itself as a RAID |
| 2081 | * controller. If it's a RAID controller other than |
| 2082 | * the HBA itself (an external RAID controller, for |
| 2083 | * example), we don't support it. |
| 2084 | */ |
| 2085 | if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && |
| 2086 | !pqi_is_hba_lunid(device->scsi3addr)) |
| 2087 | return false; |
| 2088 | |
| 2089 | return true; |
| 2090 | } |
| 2091 | |
| 2092 | static inline bool pqi_skip_device(u8 *scsi3addr) |
| 2093 | { |
| 2094 | /* Ignore all masked devices. */ |
| 2095 | if (MASKED_DEVICE(scsi3addr)) |
| 2096 | return true; |
| 2097 | |
| 2098 | return false; |
| 2099 | } |
| 2100 | |
| 2101 | static inline void pqi_mask_device(u8 *scsi3addr) |
| 2102 | { |
| 2103 | scsi3addr[3] |= 0xc0; |
| 2104 | } |
| 2105 | |
| 2106 | static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) |
| 2107 | { |
| 2108 | switch (device->device_type) { |
| 2109 | case SA_DEVICE_TYPE_SAS: |
| 2110 | case SA_DEVICE_TYPE_EXPANDER_SMP: |
| 2111 | case SA_DEVICE_TYPE_SES: |
| 2112 | return true; |
| 2113 | } |
| 2114 | |
| 2115 | return false; |
| 2116 | } |
| 2117 | |
| 2118 | static inline bool pqi_expose_device(struct pqi_scsi_dev *device) |
| 2119 | { |
| 2120 | return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); |
| 2121 | } |
| 2122 | |
| 2123 | static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info, |
| 2124 | struct pqi_scsi_dev *device, struct report_phys_lun_extended_entry *phys_lun_ext_entry) |
| 2125 | { |
| 2126 | if (ctrl_info->unique_wwid_in_report_phys_lun_supported || |
| 2127 | pqi_is_device_with_sas_address(device)) |
| 2128 | device->wwid = phys_lun_ext_entry->wwid; |
| 2129 | else |
| 2130 | device->wwid = cpu_to_be64(get_unaligned_be64(&device->page_83_identifier)); |
| 2131 | } |
| 2132 | |
| 2133 | static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) |
| 2134 | { |
| 2135 | int i; |
| 2136 | int rc; |
| 2137 | LIST_HEAD(new_device_list_head); |
| 2138 | struct report_phys_lun_extended *physdev_list = NULL; |
| 2139 | struct report_log_lun_extended *logdev_list = NULL; |
| 2140 | struct report_phys_lun_extended_entry *phys_lun_ext_entry; |
| 2141 | struct report_log_lun_extended_entry *log_lun_ext_entry; |
| 2142 | struct bmic_identify_physical_device *id_phys = NULL; |
| 2143 | u32 num_physicals; |
| 2144 | u32 num_logicals; |
| 2145 | struct pqi_scsi_dev **new_device_list = NULL; |
| 2146 | struct pqi_scsi_dev *device; |
| 2147 | struct pqi_scsi_dev *next; |
| 2148 | unsigned int num_new_devices; |
| 2149 | unsigned int num_valid_devices; |
| 2150 | bool is_physical_device; |
| 2151 | u8 *scsi3addr; |
| 2152 | unsigned int physical_index; |
| 2153 | unsigned int logical_index; |
| 2154 | static char *out_of_memory_msg = |
| 2155 | "failed to allocate memory, device discovery stopped"; |
| 2156 | |
| 2157 | rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); |
| 2158 | if (rc) |
| 2159 | goto out; |
| 2160 | |
| 2161 | if (physdev_list) |
| 2162 | num_physicals = |
| 2163 | get_unaligned_be32(&physdev_list->header.list_length) |
| 2164 | / sizeof(physdev_list->lun_entries[0]); |
| 2165 | else |
| 2166 | num_physicals = 0; |
| 2167 | |
| 2168 | if (logdev_list) |
| 2169 | num_logicals = |
| 2170 | get_unaligned_be32(&logdev_list->header.list_length) |
| 2171 | / sizeof(logdev_list->lun_entries[0]); |
| 2172 | else |
| 2173 | num_logicals = 0; |
| 2174 | |
| 2175 | if (num_physicals) { |
| 2176 | /* |
| 2177 | * We need this buffer for calls to pqi_get_physical_disk_info() |
| 2178 | * below. We allocate it here instead of inside |
| 2179 | * pqi_get_physical_disk_info() because it's a fairly large |
| 2180 | * buffer. |
| 2181 | */ |
| 2182 | id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); |
| 2183 | if (!id_phys) { |
| 2184 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", |
| 2185 | out_of_memory_msg); |
| 2186 | rc = -ENOMEM; |
| 2187 | goto out; |
| 2188 | } |
| 2189 | |
| 2190 | if (pqi_hide_vsep) { |
| 2191 | for (i = num_physicals - 1; i >= 0; i--) { |
| 2192 | phys_lun_ext_entry = |
| 2193 | &physdev_list->lun_entries[i]; |
| 2194 | if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) { |
| 2195 | pqi_mask_device(phys_lun_ext_entry->lunid); |
| 2196 | break; |
| 2197 | } |
| 2198 | } |
| 2199 | } |
| 2200 | } |
| 2201 | |
| 2202 | if (num_logicals && |
| 2203 | (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) |
| 2204 | ctrl_info->lv_drive_type_mix_valid = true; |
| 2205 | |
| 2206 | num_new_devices = num_physicals + num_logicals; |
| 2207 | |
| 2208 | new_device_list = kmalloc_array(num_new_devices, |
| 2209 | sizeof(*new_device_list), |
| 2210 | GFP_KERNEL); |
| 2211 | if (!new_device_list) { |
| 2212 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); |
| 2213 | rc = -ENOMEM; |
| 2214 | goto out; |
| 2215 | } |
| 2216 | |
| 2217 | for (i = 0; i < num_new_devices; i++) { |
| 2218 | device = kzalloc(sizeof(*device), GFP_KERNEL); |
| 2219 | if (!device) { |
| 2220 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", |
| 2221 | out_of_memory_msg); |
| 2222 | rc = -ENOMEM; |
| 2223 | goto out; |
| 2224 | } |
| 2225 | list_add_tail(&device->new_device_list_entry, |
| 2226 | &new_device_list_head); |
| 2227 | } |
| 2228 | |
| 2229 | device = NULL; |
| 2230 | num_valid_devices = 0; |
| 2231 | physical_index = 0; |
| 2232 | logical_index = 0; |
| 2233 | |
| 2234 | for (i = 0; i < num_new_devices; i++) { |
| 2235 | |
| 2236 | if ((!pqi_expose_ld_first && i < num_physicals) || |
| 2237 | (pqi_expose_ld_first && i >= num_logicals)) { |
| 2238 | is_physical_device = true; |
| 2239 | phys_lun_ext_entry = |
| 2240 | &physdev_list->lun_entries[physical_index++]; |
| 2241 | log_lun_ext_entry = NULL; |
| 2242 | scsi3addr = phys_lun_ext_entry->lunid; |
| 2243 | } else { |
| 2244 | is_physical_device = false; |
| 2245 | phys_lun_ext_entry = NULL; |
| 2246 | log_lun_ext_entry = |
| 2247 | &logdev_list->lun_entries[logical_index++]; |
| 2248 | scsi3addr = log_lun_ext_entry->lunid; |
| 2249 | } |
| 2250 | |
| 2251 | if (is_physical_device && pqi_skip_device(scsi3addr)) |
| 2252 | continue; |
| 2253 | |
| 2254 | if (device) |
| 2255 | device = list_next_entry(device, new_device_list_entry); |
| 2256 | else |
| 2257 | device = list_first_entry(&new_device_list_head, |
| 2258 | struct pqi_scsi_dev, new_device_list_entry); |
| 2259 | |
| 2260 | memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); |
| 2261 | device->is_physical_device = is_physical_device; |
| 2262 | if (is_physical_device) { |
| 2263 | device->device_type = phys_lun_ext_entry->device_type; |
| 2264 | if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) |
| 2265 | device->is_expander_smp_device = true; |
| 2266 | } else { |
| 2267 | device->is_external_raid_device = |
| 2268 | pqi_is_external_raid_addr(scsi3addr); |
| 2269 | } |
| 2270 | |
| 2271 | if (!pqi_is_supported_device(device)) |
| 2272 | continue; |
| 2273 | |
| 2274 | /* Gather information about the device. */ |
| 2275 | rc = pqi_get_device_info(ctrl_info, device, id_phys); |
| 2276 | if (rc == -ENOMEM) { |
| 2277 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", |
| 2278 | out_of_memory_msg); |
| 2279 | goto out; |
| 2280 | } |
| 2281 | if (rc) { |
| 2282 | if (device->is_physical_device) |
| 2283 | dev_warn(&ctrl_info->pci_dev->dev, |
| 2284 | "obtaining device info failed, skipping physical device %016llx\n", |
| 2285 | get_unaligned_be64(&phys_lun_ext_entry->wwid)); |
| 2286 | else |
| 2287 | dev_warn(&ctrl_info->pci_dev->dev, |
| 2288 | "obtaining device info failed, skipping logical device %08x%08x\n", |
| 2289 | *((u32 *)&device->scsi3addr), |
| 2290 | *((u32 *)&device->scsi3addr[4])); |
| 2291 | rc = 0; |
| 2292 | continue; |
| 2293 | } |
| 2294 | |
| 2295 | pqi_assign_bus_target_lun(device); |
| 2296 | |
| 2297 | if (device->is_physical_device) { |
| 2298 | pqi_set_physical_device_wwid(ctrl_info, device, phys_lun_ext_entry); |
| 2299 | if ((phys_lun_ext_entry->device_flags & |
| 2300 | CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && |
| 2301 | phys_lun_ext_entry->aio_handle) { |
| 2302 | device->aio_enabled = true; |
| 2303 | device->aio_handle = |
| 2304 | phys_lun_ext_entry->aio_handle; |
| 2305 | } |
| 2306 | } else { |
| 2307 | memcpy(device->volume_id, log_lun_ext_entry->volume_id, |
| 2308 | sizeof(device->volume_id)); |
| 2309 | } |
| 2310 | |
| 2311 | if (pqi_is_device_with_sas_address(device)) |
| 2312 | device->sas_address = get_unaligned_be64(&device->wwid); |
| 2313 | |
| 2314 | new_device_list[num_valid_devices++] = device; |
| 2315 | } |
| 2316 | |
| 2317 | pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); |
| 2318 | |
| 2319 | out: |
| 2320 | list_for_each_entry_safe(device, next, &new_device_list_head, |
| 2321 | new_device_list_entry) { |
| 2322 | if (device->keep_device) |
| 2323 | continue; |
| 2324 | list_del(&device->new_device_list_entry); |
| 2325 | pqi_free_device(device); |
| 2326 | } |
| 2327 | |
| 2328 | kfree(new_device_list); |
| 2329 | kfree(physdev_list); |
| 2330 | kfree(logdev_list); |
| 2331 | kfree(id_phys); |
| 2332 | |
| 2333 | return rc; |
| 2334 | } |
| 2335 | |
| 2336 | static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) |
| 2337 | { |
| 2338 | int rc; |
| 2339 | int mutex_acquired; |
| 2340 | |
| 2341 | if (pqi_ctrl_offline(ctrl_info)) |
| 2342 | return -ENXIO; |
| 2343 | |
| 2344 | mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); |
| 2345 | |
| 2346 | if (!mutex_acquired) { |
| 2347 | if (pqi_ctrl_scan_blocked(ctrl_info)) |
| 2348 | return -EBUSY; |
| 2349 | pqi_schedule_rescan_worker_delayed(ctrl_info); |
| 2350 | return -EINPROGRESS; |
| 2351 | } |
| 2352 | |
| 2353 | rc = pqi_update_scsi_devices(ctrl_info); |
| 2354 | if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) |
| 2355 | pqi_schedule_rescan_worker_delayed(ctrl_info); |
| 2356 | |
| 2357 | mutex_unlock(&ctrl_info->scan_mutex); |
| 2358 | |
| 2359 | return rc; |
| 2360 | } |
| 2361 | |
| 2362 | static void pqi_scan_start(struct Scsi_Host *shost) |
| 2363 | { |
| 2364 | struct pqi_ctrl_info *ctrl_info; |
| 2365 | |
| 2366 | ctrl_info = shost_to_hba(shost); |
| 2367 | |
| 2368 | pqi_scan_scsi_devices(ctrl_info); |
| 2369 | } |
| 2370 | |
| 2371 | /* Returns TRUE if scan is finished. */ |
| 2372 | |
| 2373 | static int pqi_scan_finished(struct Scsi_Host *shost, |
| 2374 | unsigned long elapsed_time) |
| 2375 | { |
| 2376 | struct pqi_ctrl_info *ctrl_info; |
| 2377 | |
| 2378 | ctrl_info = shost_priv(shost); |
| 2379 | |
| 2380 | return !mutex_is_locked(&ctrl_info->scan_mutex); |
| 2381 | } |
| 2382 | |
| 2383 | static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info, |
| 2384 | struct raid_map *raid_map, u64 first_block) |
| 2385 | { |
| 2386 | u32 volume_blk_size; |
| 2387 | |
| 2388 | /* |
| 2389 | * Set the encryption tweak values based on logical block address. |
| 2390 | * If the block size is 512, the tweak value is equal to the LBA. |
| 2391 | * For other block sizes, tweak value is (LBA * block size) / 512. |
| 2392 | */ |
| 2393 | volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); |
| 2394 | if (volume_blk_size != 512) |
| 2395 | first_block = (first_block * volume_blk_size) / 512; |
| 2396 | |
| 2397 | encryption_info->data_encryption_key_index = |
| 2398 | get_unaligned_le16(&raid_map->data_encryption_key_index); |
| 2399 | encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); |
| 2400 | encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); |
| 2401 | } |
| 2402 | |
| 2403 | /* |
| 2404 | * Attempt to perform RAID bypass mapping for a logical volume I/O. |
| 2405 | */ |
| 2406 | |
| 2407 | static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, |
| 2408 | struct pqi_scsi_dev_raid_map_data *rmd) |
| 2409 | { |
| 2410 | bool is_supported = true; |
| 2411 | |
| 2412 | switch (rmd->raid_level) { |
| 2413 | case SA_RAID_0: |
| 2414 | break; |
| 2415 | case SA_RAID_1: |
| 2416 | if (rmd->is_write && (!ctrl_info->enable_r1_writes || |
| 2417 | rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) |
| 2418 | is_supported = false; |
| 2419 | break; |
| 2420 | case SA_RAID_TRIPLE: |
| 2421 | if (rmd->is_write && (!ctrl_info->enable_r1_writes || |
| 2422 | rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) |
| 2423 | is_supported = false; |
| 2424 | break; |
| 2425 | case SA_RAID_5: |
| 2426 | if (rmd->is_write && (!ctrl_info->enable_r5_writes || |
| 2427 | rmd->data_length > ctrl_info->max_write_raid_5_6)) |
| 2428 | is_supported = false; |
| 2429 | break; |
| 2430 | case SA_RAID_6: |
| 2431 | if (rmd->is_write && (!ctrl_info->enable_r6_writes || |
| 2432 | rmd->data_length > ctrl_info->max_write_raid_5_6)) |
| 2433 | is_supported = false; |
| 2434 | break; |
| 2435 | default: |
| 2436 | is_supported = false; |
| 2437 | break; |
| 2438 | } |
| 2439 | |
| 2440 | return is_supported; |
| 2441 | } |
| 2442 | |
| 2443 | #define PQI_RAID_BYPASS_INELIGIBLE 1 |
| 2444 | |
| 2445 | static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd, |
| 2446 | struct pqi_scsi_dev_raid_map_data *rmd) |
| 2447 | { |
| 2448 | /* Check for valid opcode, get LBA and block count. */ |
| 2449 | switch (scmd->cmnd[0]) { |
| 2450 | case WRITE_6: |
| 2451 | rmd->is_write = true; |
| 2452 | fallthrough; |
| 2453 | case READ_6: |
| 2454 | rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | |
| 2455 | (scmd->cmnd[2] << 8) | scmd->cmnd[3]); |
| 2456 | rmd->block_cnt = (u32)scmd->cmnd[4]; |
| 2457 | if (rmd->block_cnt == 0) |
| 2458 | rmd->block_cnt = 256; |
| 2459 | break; |
| 2460 | case WRITE_10: |
| 2461 | rmd->is_write = true; |
| 2462 | fallthrough; |
| 2463 | case READ_10: |
| 2464 | rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); |
| 2465 | rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); |
| 2466 | break; |
| 2467 | case WRITE_12: |
| 2468 | rmd->is_write = true; |
| 2469 | fallthrough; |
| 2470 | case READ_12: |
| 2471 | rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); |
| 2472 | rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); |
| 2473 | break; |
| 2474 | case WRITE_16: |
| 2475 | rmd->is_write = true; |
| 2476 | fallthrough; |
| 2477 | case READ_16: |
| 2478 | rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); |
| 2479 | rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); |
| 2480 | break; |
| 2481 | default: |
| 2482 | /* Process via normal I/O path. */ |
| 2483 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2484 | } |
| 2485 | |
| 2486 | put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); |
| 2487 | |
| 2488 | return 0; |
| 2489 | } |
| 2490 | |
| 2491 | static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, |
| 2492 | struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) |
| 2493 | { |
| 2494 | #if BITS_PER_LONG == 32 |
| 2495 | u64 tmpdiv; |
| 2496 | #endif |
| 2497 | |
| 2498 | rmd->last_block = rmd->first_block + rmd->block_cnt - 1; |
| 2499 | |
| 2500 | /* Check for invalid block or wraparound. */ |
| 2501 | if (rmd->last_block >= |
| 2502 | get_unaligned_le64(&raid_map->volume_blk_cnt) || |
| 2503 | rmd->last_block < rmd->first_block) |
| 2504 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2505 | |
| 2506 | rmd->data_disks_per_row = |
| 2507 | get_unaligned_le16(&raid_map->data_disks_per_row); |
| 2508 | rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); |
| 2509 | rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); |
| 2510 | |
| 2511 | /* Calculate stripe information for the request. */ |
| 2512 | rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; |
| 2513 | #if BITS_PER_LONG == 32 |
| 2514 | tmpdiv = rmd->first_block; |
| 2515 | do_div(tmpdiv, rmd->blocks_per_row); |
| 2516 | rmd->first_row = tmpdiv; |
| 2517 | tmpdiv = rmd->last_block; |
| 2518 | do_div(tmpdiv, rmd->blocks_per_row); |
| 2519 | rmd->last_row = tmpdiv; |
| 2520 | rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); |
| 2521 | rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); |
| 2522 | tmpdiv = rmd->first_row_offset; |
| 2523 | do_div(tmpdiv, rmd->strip_size); |
| 2524 | rmd->first_column = tmpdiv; |
| 2525 | tmpdiv = rmd->last_row_offset; |
| 2526 | do_div(tmpdiv, rmd->strip_size); |
| 2527 | rmd->last_column = tmpdiv; |
| 2528 | #else |
| 2529 | rmd->first_row = rmd->first_block / rmd->blocks_per_row; |
| 2530 | rmd->last_row = rmd->last_block / rmd->blocks_per_row; |
| 2531 | rmd->first_row_offset = (u32)(rmd->first_block - |
| 2532 | (rmd->first_row * rmd->blocks_per_row)); |
| 2533 | rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * |
| 2534 | rmd->blocks_per_row)); |
| 2535 | rmd->first_column = rmd->first_row_offset / rmd->strip_size; |
| 2536 | rmd->last_column = rmd->last_row_offset / rmd->strip_size; |
| 2537 | #endif |
| 2538 | |
| 2539 | /* If this isn't a single row/column then give to the controller. */ |
| 2540 | if (rmd->first_row != rmd->last_row || |
| 2541 | rmd->first_column != rmd->last_column) |
| 2542 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2543 | |
| 2544 | /* Proceeding with driver mapping. */ |
| 2545 | rmd->total_disks_per_row = rmd->data_disks_per_row + |
| 2546 | get_unaligned_le16(&raid_map->metadata_disks_per_row); |
| 2547 | rmd->map_row = ((u32)(rmd->first_row >> |
| 2548 | raid_map->parity_rotation_shift)) % |
| 2549 | get_unaligned_le16(&raid_map->row_cnt); |
| 2550 | rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + |
| 2551 | rmd->first_column; |
| 2552 | |
| 2553 | return 0; |
| 2554 | } |
| 2555 | |
| 2556 | static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, |
| 2557 | struct raid_map *raid_map) |
| 2558 | { |
| 2559 | #if BITS_PER_LONG == 32 |
| 2560 | u64 tmpdiv; |
| 2561 | #endif |
| 2562 | /* RAID 50/60 */ |
| 2563 | /* Verify first and last block are in same RAID group. */ |
| 2564 | rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; |
| 2565 | #if BITS_PER_LONG == 32 |
| 2566 | tmpdiv = rmd->first_block; |
| 2567 | rmd->first_group = do_div(tmpdiv, rmd->stripesize); |
| 2568 | tmpdiv = rmd->first_group; |
| 2569 | do_div(tmpdiv, rmd->blocks_per_row); |
| 2570 | rmd->first_group = tmpdiv; |
| 2571 | tmpdiv = rmd->last_block; |
| 2572 | rmd->last_group = do_div(tmpdiv, rmd->stripesize); |
| 2573 | tmpdiv = rmd->last_group; |
| 2574 | do_div(tmpdiv, rmd->blocks_per_row); |
| 2575 | rmd->last_group = tmpdiv; |
| 2576 | #else |
| 2577 | rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; |
| 2578 | rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; |
| 2579 | #endif |
| 2580 | if (rmd->first_group != rmd->last_group) |
| 2581 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2582 | |
| 2583 | /* Verify request is in a single row of RAID 5/6. */ |
| 2584 | #if BITS_PER_LONG == 32 |
| 2585 | tmpdiv = rmd->first_block; |
| 2586 | do_div(tmpdiv, rmd->stripesize); |
| 2587 | rmd->first_row = tmpdiv; |
| 2588 | rmd->r5or6_first_row = tmpdiv; |
| 2589 | tmpdiv = rmd->last_block; |
| 2590 | do_div(tmpdiv, rmd->stripesize); |
| 2591 | rmd->r5or6_last_row = tmpdiv; |
| 2592 | #else |
| 2593 | rmd->first_row = rmd->r5or6_first_row = |
| 2594 | rmd->first_block / rmd->stripesize; |
| 2595 | rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; |
| 2596 | #endif |
| 2597 | if (rmd->r5or6_first_row != rmd->r5or6_last_row) |
| 2598 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2599 | |
| 2600 | /* Verify request is in a single column. */ |
| 2601 | #if BITS_PER_LONG == 32 |
| 2602 | tmpdiv = rmd->first_block; |
| 2603 | rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); |
| 2604 | tmpdiv = rmd->first_row_offset; |
| 2605 | rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); |
| 2606 | rmd->r5or6_first_row_offset = rmd->first_row_offset; |
| 2607 | tmpdiv = rmd->last_block; |
| 2608 | rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); |
| 2609 | tmpdiv = rmd->r5or6_last_row_offset; |
| 2610 | rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); |
| 2611 | tmpdiv = rmd->r5or6_first_row_offset; |
| 2612 | do_div(tmpdiv, rmd->strip_size); |
| 2613 | rmd->first_column = rmd->r5or6_first_column = tmpdiv; |
| 2614 | tmpdiv = rmd->r5or6_last_row_offset; |
| 2615 | do_div(tmpdiv, rmd->strip_size); |
| 2616 | rmd->r5or6_last_column = tmpdiv; |
| 2617 | #else |
| 2618 | rmd->first_row_offset = rmd->r5or6_first_row_offset = |
| 2619 | (u32)((rmd->first_block % rmd->stripesize) % |
| 2620 | rmd->blocks_per_row); |
| 2621 | |
| 2622 | rmd->r5or6_last_row_offset = |
| 2623 | (u32)((rmd->last_block % rmd->stripesize) % |
| 2624 | rmd->blocks_per_row); |
| 2625 | |
| 2626 | rmd->first_column = |
| 2627 | rmd->r5or6_first_row_offset / rmd->strip_size; |
| 2628 | rmd->r5or6_first_column = rmd->first_column; |
| 2629 | rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; |
| 2630 | #endif |
| 2631 | if (rmd->r5or6_first_column != rmd->r5or6_last_column) |
| 2632 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2633 | |
| 2634 | /* Request is eligible. */ |
| 2635 | rmd->map_row = |
| 2636 | ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % |
| 2637 | get_unaligned_le16(&raid_map->row_cnt); |
| 2638 | |
| 2639 | rmd->map_index = (rmd->first_group * |
| 2640 | (get_unaligned_le16(&raid_map->row_cnt) * |
| 2641 | rmd->total_disks_per_row)) + |
| 2642 | (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; |
| 2643 | |
| 2644 | if (rmd->is_write) { |
| 2645 | u32 index; |
| 2646 | |
| 2647 | /* |
| 2648 | * p_parity_it_nexus and q_parity_it_nexus are pointers to the |
| 2649 | * parity entries inside the device's raid_map. |
| 2650 | * |
| 2651 | * A device's RAID map is bounded by: number of RAID disks squared. |
| 2652 | * |
| 2653 | * The devices RAID map size is checked during device |
| 2654 | * initialization. |
| 2655 | */ |
| 2656 | index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); |
| 2657 | index *= rmd->total_disks_per_row; |
| 2658 | index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); |
| 2659 | |
| 2660 | rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; |
| 2661 | if (rmd->raid_level == SA_RAID_6) { |
| 2662 | rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; |
| 2663 | rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; |
| 2664 | } |
| 2665 | if (rmd->blocks_per_row == 0) |
| 2666 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2667 | #if BITS_PER_LONG == 32 |
| 2668 | tmpdiv = rmd->first_block; |
| 2669 | do_div(tmpdiv, rmd->blocks_per_row); |
| 2670 | rmd->row = tmpdiv; |
| 2671 | #else |
| 2672 | rmd->row = rmd->first_block / rmd->blocks_per_row; |
| 2673 | #endif |
| 2674 | } |
| 2675 | |
| 2676 | return 0; |
| 2677 | } |
| 2678 | |
| 2679 | static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) |
| 2680 | { |
| 2681 | /* Build the new CDB for the physical disk I/O. */ |
| 2682 | if (rmd->disk_block > 0xffffffff) { |
| 2683 | rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; |
| 2684 | rmd->cdb[1] = 0; |
| 2685 | put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); |
| 2686 | put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); |
| 2687 | rmd->cdb[14] = 0; |
| 2688 | rmd->cdb[15] = 0; |
| 2689 | rmd->cdb_length = 16; |
| 2690 | } else { |
| 2691 | rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; |
| 2692 | rmd->cdb[1] = 0; |
| 2693 | put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); |
| 2694 | rmd->cdb[6] = 0; |
| 2695 | put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); |
| 2696 | rmd->cdb[9] = 0; |
| 2697 | rmd->cdb_length = 10; |
| 2698 | } |
| 2699 | } |
| 2700 | |
| 2701 | static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map, |
| 2702 | struct pqi_scsi_dev_raid_map_data *rmd) |
| 2703 | { |
| 2704 | u32 index; |
| 2705 | u32 group; |
| 2706 | |
| 2707 | group = rmd->map_index / rmd->data_disks_per_row; |
| 2708 | |
| 2709 | index = rmd->map_index - (group * rmd->data_disks_per_row); |
| 2710 | rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; |
| 2711 | index += rmd->data_disks_per_row; |
| 2712 | rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; |
| 2713 | if (rmd->layout_map_count > 2) { |
| 2714 | index += rmd->data_disks_per_row; |
| 2715 | rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; |
| 2716 | } |
| 2717 | |
| 2718 | rmd->num_it_nexus_entries = rmd->layout_map_count; |
| 2719 | } |
| 2720 | |
| 2721 | static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, |
| 2722 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, |
| 2723 | struct pqi_queue_group *queue_group) |
| 2724 | { |
| 2725 | int rc; |
| 2726 | struct raid_map *raid_map; |
| 2727 | u32 group; |
| 2728 | u32 next_bypass_group; |
| 2729 | struct pqi_encryption_info *encryption_info_ptr; |
| 2730 | struct pqi_encryption_info encryption_info; |
| 2731 | struct pqi_scsi_dev_raid_map_data rmd = { 0 }; |
| 2732 | |
| 2733 | rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); |
| 2734 | if (rc) |
| 2735 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2736 | |
| 2737 | rmd.raid_level = device->raid_level; |
| 2738 | |
| 2739 | if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) |
| 2740 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2741 | |
| 2742 | if (unlikely(rmd.block_cnt == 0)) |
| 2743 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2744 | |
| 2745 | raid_map = device->raid_map; |
| 2746 | |
| 2747 | rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); |
| 2748 | if (rc) |
| 2749 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2750 | |
| 2751 | if (device->raid_level == SA_RAID_1 || |
| 2752 | device->raid_level == SA_RAID_TRIPLE) { |
| 2753 | if (rmd.is_write) { |
| 2754 | pqi_calc_aio_r1_nexus(raid_map, &rmd); |
| 2755 | } else { |
| 2756 | group = device->next_bypass_group; |
| 2757 | next_bypass_group = group + 1; |
| 2758 | if (next_bypass_group >= rmd.layout_map_count) |
| 2759 | next_bypass_group = 0; |
| 2760 | device->next_bypass_group = next_bypass_group; |
| 2761 | rmd.map_index += group * rmd.data_disks_per_row; |
| 2762 | } |
| 2763 | } else if ((device->raid_level == SA_RAID_5 || |
| 2764 | device->raid_level == SA_RAID_6) && |
| 2765 | (rmd.layout_map_count > 1 || rmd.is_write)) { |
| 2766 | rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); |
| 2767 | if (rc) |
| 2768 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2769 | } |
| 2770 | |
| 2771 | if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) |
| 2772 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2773 | |
| 2774 | rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; |
| 2775 | rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + |
| 2776 | rmd.first_row * rmd.strip_size + |
| 2777 | (rmd.first_row_offset - rmd.first_column * rmd.strip_size); |
| 2778 | rmd.disk_block_cnt = rmd.block_cnt; |
| 2779 | |
| 2780 | /* Handle differing logical/physical block sizes. */ |
| 2781 | if (raid_map->phys_blk_shift) { |
| 2782 | rmd.disk_block <<= raid_map->phys_blk_shift; |
| 2783 | rmd.disk_block_cnt <<= raid_map->phys_blk_shift; |
| 2784 | } |
| 2785 | |
| 2786 | if (unlikely(rmd.disk_block_cnt > 0xffff)) |
| 2787 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2788 | |
| 2789 | pqi_set_aio_cdb(&rmd); |
| 2790 | |
| 2791 | if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { |
| 2792 | if (rmd.data_length > device->max_transfer_encrypted) |
| 2793 | return PQI_RAID_BYPASS_INELIGIBLE; |
| 2794 | pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block); |
| 2795 | encryption_info_ptr = &encryption_info; |
| 2796 | } else { |
| 2797 | encryption_info_ptr = NULL; |
| 2798 | } |
| 2799 | |
| 2800 | if (rmd.is_write) { |
| 2801 | switch (device->raid_level) { |
| 2802 | case SA_RAID_1: |
| 2803 | case SA_RAID_TRIPLE: |
| 2804 | return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, |
| 2805 | encryption_info_ptr, device, &rmd); |
| 2806 | case SA_RAID_5: |
| 2807 | case SA_RAID_6: |
| 2808 | return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, |
| 2809 | encryption_info_ptr, device, &rmd); |
| 2810 | } |
| 2811 | } |
| 2812 | |
| 2813 | return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, |
| 2814 | rmd.cdb, rmd.cdb_length, queue_group, |
| 2815 | encryption_info_ptr, true); |
| 2816 | } |
| 2817 | |
| 2818 | #define PQI_STATUS_IDLE 0x0 |
| 2819 | |
| 2820 | #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 |
| 2821 | #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 |
| 2822 | |
| 2823 | #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 |
| 2824 | #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 |
| 2825 | #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 |
| 2826 | #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 |
| 2827 | #define PQI_DEVICE_STATE_ERROR 0x4 |
| 2828 | |
| 2829 | #define PQI_MODE_READY_TIMEOUT_SECS 30 |
| 2830 | #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 |
| 2831 | |
| 2832 | static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) |
| 2833 | { |
| 2834 | struct pqi_device_registers __iomem *pqi_registers; |
| 2835 | unsigned long timeout; |
| 2836 | u64 signature; |
| 2837 | u8 status; |
| 2838 | |
| 2839 | pqi_registers = ctrl_info->pqi_registers; |
| 2840 | timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies; |
| 2841 | |
| 2842 | while (1) { |
| 2843 | signature = readq(&pqi_registers->signature); |
| 2844 | if (memcmp(&signature, PQI_DEVICE_SIGNATURE, |
| 2845 | sizeof(signature)) == 0) |
| 2846 | break; |
| 2847 | if (time_after(jiffies, timeout)) { |
| 2848 | dev_err(&ctrl_info->pci_dev->dev, |
| 2849 | "timed out waiting for PQI signature\n"); |
| 2850 | return -ETIMEDOUT; |
| 2851 | } |
| 2852 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); |
| 2853 | } |
| 2854 | |
| 2855 | while (1) { |
| 2856 | status = readb(&pqi_registers->function_and_status_code); |
| 2857 | if (status == PQI_STATUS_IDLE) |
| 2858 | break; |
| 2859 | if (time_after(jiffies, timeout)) { |
| 2860 | dev_err(&ctrl_info->pci_dev->dev, |
| 2861 | "timed out waiting for PQI IDLE\n"); |
| 2862 | return -ETIMEDOUT; |
| 2863 | } |
| 2864 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); |
| 2865 | } |
| 2866 | |
| 2867 | while (1) { |
| 2868 | if (readl(&pqi_registers->device_status) == |
| 2869 | PQI_DEVICE_STATE_ALL_REGISTERS_READY) |
| 2870 | break; |
| 2871 | if (time_after(jiffies, timeout)) { |
| 2872 | dev_err(&ctrl_info->pci_dev->dev, |
| 2873 | "timed out waiting for PQI all registers ready\n"); |
| 2874 | return -ETIMEDOUT; |
| 2875 | } |
| 2876 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); |
| 2877 | } |
| 2878 | |
| 2879 | return 0; |
| 2880 | } |
| 2881 | |
| 2882 | static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) |
| 2883 | { |
| 2884 | struct pqi_scsi_dev *device; |
| 2885 | |
| 2886 | device = io_request->scmd->device->hostdata; |
| 2887 | device->raid_bypass_enabled = false; |
| 2888 | device->aio_enabled = false; |
| 2889 | } |
| 2890 | |
| 2891 | static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) |
| 2892 | { |
| 2893 | struct pqi_ctrl_info *ctrl_info; |
| 2894 | struct pqi_scsi_dev *device; |
| 2895 | |
| 2896 | device = sdev->hostdata; |
| 2897 | if (device->device_offline) |
| 2898 | return; |
| 2899 | |
| 2900 | device->device_offline = true; |
| 2901 | ctrl_info = shost_to_hba(sdev->host); |
| 2902 | pqi_schedule_rescan_worker(ctrl_info); |
| 2903 | dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", |
| 2904 | path, ctrl_info->scsi_host->host_no, device->bus, |
| 2905 | device->target, device->lun); |
| 2906 | } |
| 2907 | |
| 2908 | static void pqi_process_raid_io_error(struct pqi_io_request *io_request) |
| 2909 | { |
| 2910 | u8 scsi_status; |
| 2911 | u8 host_byte; |
| 2912 | struct scsi_cmnd *scmd; |
| 2913 | struct pqi_raid_error_info *error_info; |
| 2914 | size_t sense_data_length; |
| 2915 | int residual_count; |
| 2916 | int xfer_count; |
| 2917 | struct scsi_sense_hdr sshdr; |
| 2918 | |
| 2919 | scmd = io_request->scmd; |
| 2920 | if (!scmd) |
| 2921 | return; |
| 2922 | |
| 2923 | error_info = io_request->error_info; |
| 2924 | scsi_status = error_info->status; |
| 2925 | host_byte = DID_OK; |
| 2926 | |
| 2927 | switch (error_info->data_out_result) { |
| 2928 | case PQI_DATA_IN_OUT_GOOD: |
| 2929 | break; |
| 2930 | case PQI_DATA_IN_OUT_UNDERFLOW: |
| 2931 | xfer_count = |
| 2932 | get_unaligned_le32(&error_info->data_out_transferred); |
| 2933 | residual_count = scsi_bufflen(scmd) - xfer_count; |
| 2934 | scsi_set_resid(scmd, residual_count); |
| 2935 | if (xfer_count < scmd->underflow) |
| 2936 | host_byte = DID_SOFT_ERROR; |
| 2937 | break; |
| 2938 | case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: |
| 2939 | case PQI_DATA_IN_OUT_ABORTED: |
| 2940 | host_byte = DID_ABORT; |
| 2941 | break; |
| 2942 | case PQI_DATA_IN_OUT_TIMEOUT: |
| 2943 | host_byte = DID_TIME_OUT; |
| 2944 | break; |
| 2945 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: |
| 2946 | case PQI_DATA_IN_OUT_PROTOCOL_ERROR: |
| 2947 | case PQI_DATA_IN_OUT_BUFFER_ERROR: |
| 2948 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: |
| 2949 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: |
| 2950 | case PQI_DATA_IN_OUT_ERROR: |
| 2951 | case PQI_DATA_IN_OUT_HARDWARE_ERROR: |
| 2952 | case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: |
| 2953 | case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: |
| 2954 | case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: |
| 2955 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: |
| 2956 | case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: |
| 2957 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: |
| 2958 | case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: |
| 2959 | case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: |
| 2960 | case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: |
| 2961 | default: |
| 2962 | host_byte = DID_ERROR; |
| 2963 | break; |
| 2964 | } |
| 2965 | |
| 2966 | sense_data_length = get_unaligned_le16(&error_info->sense_data_length); |
| 2967 | if (sense_data_length == 0) |
| 2968 | sense_data_length = |
| 2969 | get_unaligned_le16(&error_info->response_data_length); |
| 2970 | if (sense_data_length) { |
| 2971 | if (sense_data_length > sizeof(error_info->data)) |
| 2972 | sense_data_length = sizeof(error_info->data); |
| 2973 | |
| 2974 | if (scsi_status == SAM_STAT_CHECK_CONDITION && |
| 2975 | scsi_normalize_sense(error_info->data, |
| 2976 | sense_data_length, &sshdr) && |
| 2977 | sshdr.sense_key == HARDWARE_ERROR && |
| 2978 | sshdr.asc == 0x3e) { |
| 2979 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); |
| 2980 | struct pqi_scsi_dev *device = scmd->device->hostdata; |
| 2981 | |
| 2982 | switch (sshdr.ascq) { |
| 2983 | case 0x1: /* LOGICAL UNIT FAILURE */ |
| 2984 | if (printk_ratelimit()) |
| 2985 | scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", |
| 2986 | ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); |
| 2987 | pqi_take_device_offline(scmd->device, "RAID"); |
| 2988 | host_byte = DID_NO_CONNECT; |
| 2989 | break; |
| 2990 | |
| 2991 | default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ |
| 2992 | if (printk_ratelimit()) |
| 2993 | scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", |
| 2994 | sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); |
| 2995 | break; |
| 2996 | } |
| 2997 | } |
| 2998 | |
| 2999 | if (sense_data_length > SCSI_SENSE_BUFFERSIZE) |
| 3000 | sense_data_length = SCSI_SENSE_BUFFERSIZE; |
| 3001 | memcpy(scmd->sense_buffer, error_info->data, |
| 3002 | sense_data_length); |
| 3003 | } |
| 3004 | |
| 3005 | scmd->result = scsi_status; |
| 3006 | set_host_byte(scmd, host_byte); |
| 3007 | } |
| 3008 | |
| 3009 | static void pqi_process_aio_io_error(struct pqi_io_request *io_request) |
| 3010 | { |
| 3011 | u8 scsi_status; |
| 3012 | u8 host_byte; |
| 3013 | struct scsi_cmnd *scmd; |
| 3014 | struct pqi_aio_error_info *error_info; |
| 3015 | size_t sense_data_length; |
| 3016 | int residual_count; |
| 3017 | int xfer_count; |
| 3018 | bool device_offline; |
| 3019 | |
| 3020 | scmd = io_request->scmd; |
| 3021 | error_info = io_request->error_info; |
| 3022 | host_byte = DID_OK; |
| 3023 | sense_data_length = 0; |
| 3024 | device_offline = false; |
| 3025 | |
| 3026 | switch (error_info->service_response) { |
| 3027 | case PQI_AIO_SERV_RESPONSE_COMPLETE: |
| 3028 | scsi_status = error_info->status; |
| 3029 | break; |
| 3030 | case PQI_AIO_SERV_RESPONSE_FAILURE: |
| 3031 | switch (error_info->status) { |
| 3032 | case PQI_AIO_STATUS_IO_ABORTED: |
| 3033 | scsi_status = SAM_STAT_TASK_ABORTED; |
| 3034 | break; |
| 3035 | case PQI_AIO_STATUS_UNDERRUN: |
| 3036 | scsi_status = SAM_STAT_GOOD; |
| 3037 | residual_count = get_unaligned_le32( |
| 3038 | &error_info->residual_count); |
| 3039 | scsi_set_resid(scmd, residual_count); |
| 3040 | xfer_count = scsi_bufflen(scmd) - residual_count; |
| 3041 | if (xfer_count < scmd->underflow) |
| 3042 | host_byte = DID_SOFT_ERROR; |
| 3043 | break; |
| 3044 | case PQI_AIO_STATUS_OVERRUN: |
| 3045 | scsi_status = SAM_STAT_GOOD; |
| 3046 | break; |
| 3047 | case PQI_AIO_STATUS_AIO_PATH_DISABLED: |
| 3048 | pqi_aio_path_disabled(io_request); |
| 3049 | scsi_status = SAM_STAT_GOOD; |
| 3050 | io_request->status = -EAGAIN; |
| 3051 | break; |
| 3052 | case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: |
| 3053 | case PQI_AIO_STATUS_INVALID_DEVICE: |
| 3054 | if (!io_request->raid_bypass) { |
| 3055 | device_offline = true; |
| 3056 | pqi_take_device_offline(scmd->device, "AIO"); |
| 3057 | host_byte = DID_NO_CONNECT; |
| 3058 | } |
| 3059 | scsi_status = SAM_STAT_CHECK_CONDITION; |
| 3060 | break; |
| 3061 | case PQI_AIO_STATUS_IO_ERROR: |
| 3062 | default: |
| 3063 | scsi_status = SAM_STAT_CHECK_CONDITION; |
| 3064 | break; |
| 3065 | } |
| 3066 | break; |
| 3067 | case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: |
| 3068 | case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: |
| 3069 | scsi_status = SAM_STAT_GOOD; |
| 3070 | break; |
| 3071 | case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: |
| 3072 | case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: |
| 3073 | default: |
| 3074 | scsi_status = SAM_STAT_CHECK_CONDITION; |
| 3075 | break; |
| 3076 | } |
| 3077 | |
| 3078 | if (error_info->data_present) { |
| 3079 | sense_data_length = |
| 3080 | get_unaligned_le16(&error_info->data_length); |
| 3081 | if (sense_data_length) { |
| 3082 | if (sense_data_length > sizeof(error_info->data)) |
| 3083 | sense_data_length = sizeof(error_info->data); |
| 3084 | if (sense_data_length > SCSI_SENSE_BUFFERSIZE) |
| 3085 | sense_data_length = SCSI_SENSE_BUFFERSIZE; |
| 3086 | memcpy(scmd->sense_buffer, error_info->data, |
| 3087 | sense_data_length); |
| 3088 | } |
| 3089 | } |
| 3090 | |
| 3091 | if (device_offline && sense_data_length == 0) |
| 3092 | scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, |
| 3093 | 0x3e, 0x1); |
| 3094 | |
| 3095 | scmd->result = scsi_status; |
| 3096 | set_host_byte(scmd, host_byte); |
| 3097 | } |
| 3098 | |
| 3099 | static void pqi_process_io_error(unsigned int iu_type, |
| 3100 | struct pqi_io_request *io_request) |
| 3101 | { |
| 3102 | switch (iu_type) { |
| 3103 | case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: |
| 3104 | pqi_process_raid_io_error(io_request); |
| 3105 | break; |
| 3106 | case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: |
| 3107 | pqi_process_aio_io_error(io_request); |
| 3108 | break; |
| 3109 | } |
| 3110 | } |
| 3111 | |
| 3112 | static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, |
| 3113 | struct pqi_task_management_response *response) |
| 3114 | { |
| 3115 | int rc; |
| 3116 | |
| 3117 | switch (response->response_code) { |
| 3118 | case SOP_TMF_COMPLETE: |
| 3119 | case SOP_TMF_FUNCTION_SUCCEEDED: |
| 3120 | rc = 0; |
| 3121 | break; |
| 3122 | case SOP_TMF_REJECTED: |
| 3123 | rc = -EAGAIN; |
| 3124 | break; |
| 3125 | default: |
| 3126 | rc = -EIO; |
| 3127 | break; |
| 3128 | } |
| 3129 | |
| 3130 | if (rc) |
| 3131 | dev_err(&ctrl_info->pci_dev->dev, |
| 3132 | "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); |
| 3133 | |
| 3134 | return rc; |
| 3135 | } |
| 3136 | |
| 3137 | static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info) |
| 3138 | { |
| 3139 | pqi_take_ctrl_offline(ctrl_info); |
| 3140 | } |
| 3141 | |
| 3142 | static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) |
| 3143 | { |
| 3144 | int num_responses; |
| 3145 | pqi_index_t oq_pi; |
| 3146 | pqi_index_t oq_ci; |
| 3147 | struct pqi_io_request *io_request; |
| 3148 | struct pqi_io_response *response; |
| 3149 | u16 request_id; |
| 3150 | |
| 3151 | num_responses = 0; |
| 3152 | oq_ci = queue_group->oq_ci_copy; |
| 3153 | |
| 3154 | while (1) { |
| 3155 | oq_pi = readl(queue_group->oq_pi); |
| 3156 | if (oq_pi >= ctrl_info->num_elements_per_oq) { |
| 3157 | pqi_invalid_response(ctrl_info); |
| 3158 | dev_err(&ctrl_info->pci_dev->dev, |
| 3159 | "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", |
| 3160 | oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); |
| 3161 | return -1; |
| 3162 | } |
| 3163 | if (oq_pi == oq_ci) |
| 3164 | break; |
| 3165 | |
| 3166 | num_responses++; |
| 3167 | response = queue_group->oq_element_array + |
| 3168 | (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); |
| 3169 | |
| 3170 | request_id = get_unaligned_le16(&response->request_id); |
| 3171 | if (request_id >= ctrl_info->max_io_slots) { |
| 3172 | pqi_invalid_response(ctrl_info); |
| 3173 | dev_err(&ctrl_info->pci_dev->dev, |
| 3174 | "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", |
| 3175 | request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); |
| 3176 | return -1; |
| 3177 | } |
| 3178 | |
| 3179 | io_request = &ctrl_info->io_request_pool[request_id]; |
| 3180 | if (atomic_read(&io_request->refcount) == 0) { |
| 3181 | pqi_invalid_response(ctrl_info); |
| 3182 | dev_err(&ctrl_info->pci_dev->dev, |
| 3183 | "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", |
| 3184 | request_id, oq_pi, oq_ci); |
| 3185 | return -1; |
| 3186 | } |
| 3187 | |
| 3188 | switch (response->header.iu_type) { |
| 3189 | case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: |
| 3190 | case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: |
| 3191 | if (io_request->scmd) |
| 3192 | io_request->scmd->result = 0; |
| 3193 | fallthrough; |
| 3194 | case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: |
| 3195 | break; |
| 3196 | case PQI_RESPONSE_IU_VENDOR_GENERAL: |
| 3197 | io_request->status = |
| 3198 | get_unaligned_le16( |
| 3199 | &((struct pqi_vendor_general_response *)response)->status); |
| 3200 | break; |
| 3201 | case PQI_RESPONSE_IU_TASK_MANAGEMENT: |
| 3202 | io_request->status = pqi_interpret_task_management_response(ctrl_info, |
| 3203 | (void *)response); |
| 3204 | break; |
| 3205 | case PQI_RESPONSE_IU_AIO_PATH_DISABLED: |
| 3206 | pqi_aio_path_disabled(io_request); |
| 3207 | io_request->status = -EAGAIN; |
| 3208 | break; |
| 3209 | case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: |
| 3210 | case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: |
| 3211 | io_request->error_info = ctrl_info->error_buffer + |
| 3212 | (get_unaligned_le16(&response->error_index) * |
| 3213 | PQI_ERROR_BUFFER_ELEMENT_LENGTH); |
| 3214 | pqi_process_io_error(response->header.iu_type, io_request); |
| 3215 | break; |
| 3216 | default: |
| 3217 | pqi_invalid_response(ctrl_info); |
| 3218 | dev_err(&ctrl_info->pci_dev->dev, |
| 3219 | "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", |
| 3220 | response->header.iu_type, oq_pi, oq_ci); |
| 3221 | return -1; |
| 3222 | } |
| 3223 | |
| 3224 | io_request->io_complete_callback(io_request, io_request->context); |
| 3225 | |
| 3226 | /* |
| 3227 | * Note that the I/O request structure CANNOT BE TOUCHED after |
| 3228 | * returning from the I/O completion callback! |
| 3229 | */ |
| 3230 | oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; |
| 3231 | } |
| 3232 | |
| 3233 | if (num_responses) { |
| 3234 | queue_group->oq_ci_copy = oq_ci; |
| 3235 | writel(oq_ci, queue_group->oq_ci); |
| 3236 | } |
| 3237 | |
| 3238 | return num_responses; |
| 3239 | } |
| 3240 | |
| 3241 | static inline unsigned int pqi_num_elements_free(unsigned int pi, |
| 3242 | unsigned int ci, unsigned int elements_in_queue) |
| 3243 | { |
| 3244 | unsigned int num_elements_used; |
| 3245 | |
| 3246 | if (pi >= ci) |
| 3247 | num_elements_used = pi - ci; |
| 3248 | else |
| 3249 | num_elements_used = elements_in_queue - ci + pi; |
| 3250 | |
| 3251 | return elements_in_queue - num_elements_used - 1; |
| 3252 | } |
| 3253 | |
| 3254 | static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, |
| 3255 | struct pqi_event_acknowledge_request *iu, size_t iu_length) |
| 3256 | { |
| 3257 | pqi_index_t iq_pi; |
| 3258 | pqi_index_t iq_ci; |
| 3259 | unsigned long flags; |
| 3260 | void *next_element; |
| 3261 | struct pqi_queue_group *queue_group; |
| 3262 | |
| 3263 | queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; |
| 3264 | put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); |
| 3265 | |
| 3266 | while (1) { |
| 3267 | spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); |
| 3268 | |
| 3269 | iq_pi = queue_group->iq_pi_copy[RAID_PATH]; |
| 3270 | iq_ci = readl(queue_group->iq_ci[RAID_PATH]); |
| 3271 | |
| 3272 | if (pqi_num_elements_free(iq_pi, iq_ci, |
| 3273 | ctrl_info->num_elements_per_iq)) |
| 3274 | break; |
| 3275 | |
| 3276 | spin_unlock_irqrestore( |
| 3277 | &queue_group->submit_lock[RAID_PATH], flags); |
| 3278 | |
| 3279 | if (pqi_ctrl_offline(ctrl_info)) |
| 3280 | return; |
| 3281 | } |
| 3282 | |
| 3283 | next_element = queue_group->iq_element_array[RAID_PATH] + |
| 3284 | (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
| 3285 | |
| 3286 | memcpy(next_element, iu, iu_length); |
| 3287 | |
| 3288 | iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; |
| 3289 | queue_group->iq_pi_copy[RAID_PATH] = iq_pi; |
| 3290 | |
| 3291 | /* |
| 3292 | * This write notifies the controller that an IU is available to be |
| 3293 | * processed. |
| 3294 | */ |
| 3295 | writel(iq_pi, queue_group->iq_pi[RAID_PATH]); |
| 3296 | |
| 3297 | spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); |
| 3298 | } |
| 3299 | |
| 3300 | static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, |
| 3301 | struct pqi_event *event) |
| 3302 | { |
| 3303 | struct pqi_event_acknowledge_request request; |
| 3304 | |
| 3305 | memset(&request, 0, sizeof(request)); |
| 3306 | |
| 3307 | request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; |
| 3308 | put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, |
| 3309 | &request.header.iu_length); |
| 3310 | request.event_type = event->event_type; |
| 3311 | put_unaligned_le16(event->event_id, &request.event_id); |
| 3312 | put_unaligned_le32(event->additional_event_id, &request.additional_event_id); |
| 3313 | |
| 3314 | pqi_send_event_ack(ctrl_info, &request, sizeof(request)); |
| 3315 | } |
| 3316 | |
| 3317 | #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 |
| 3318 | #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 |
| 3319 | |
| 3320 | static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( |
| 3321 | struct pqi_ctrl_info *ctrl_info) |
| 3322 | { |
| 3323 | u8 status; |
| 3324 | unsigned long timeout; |
| 3325 | |
| 3326 | timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies; |
| 3327 | |
| 3328 | while (1) { |
| 3329 | status = pqi_read_soft_reset_status(ctrl_info); |
| 3330 | if (status & PQI_SOFT_RESET_INITIATE) |
| 3331 | return RESET_INITIATE_DRIVER; |
| 3332 | |
| 3333 | if (status & PQI_SOFT_RESET_ABORT) |
| 3334 | return RESET_ABORT; |
| 3335 | |
| 3336 | if (!sis_is_firmware_running(ctrl_info)) |
| 3337 | return RESET_NORESPONSE; |
| 3338 | |
| 3339 | if (time_after(jiffies, timeout)) { |
| 3340 | dev_warn(&ctrl_info->pci_dev->dev, |
| 3341 | "timed out waiting for soft reset status\n"); |
| 3342 | return RESET_TIMEDOUT; |
| 3343 | } |
| 3344 | |
| 3345 | ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); |
| 3346 | } |
| 3347 | } |
| 3348 | |
| 3349 | static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) |
| 3350 | { |
| 3351 | int rc; |
| 3352 | unsigned int delay_secs; |
| 3353 | enum pqi_soft_reset_status reset_status; |
| 3354 | |
| 3355 | if (ctrl_info->soft_reset_handshake_supported) |
| 3356 | reset_status = pqi_poll_for_soft_reset_status(ctrl_info); |
| 3357 | else |
| 3358 | reset_status = RESET_INITIATE_FIRMWARE; |
| 3359 | |
| 3360 | delay_secs = PQI_POST_RESET_DELAY_SECS; |
| 3361 | |
| 3362 | switch (reset_status) { |
| 3363 | case RESET_TIMEDOUT: |
| 3364 | delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS; |
| 3365 | fallthrough; |
| 3366 | case RESET_INITIATE_DRIVER: |
| 3367 | dev_info(&ctrl_info->pci_dev->dev, |
| 3368 | "Online Firmware Activation: resetting controller\n"); |
| 3369 | sis_soft_reset(ctrl_info); |
| 3370 | fallthrough; |
| 3371 | case RESET_INITIATE_FIRMWARE: |
| 3372 | ctrl_info->pqi_mode_enabled = false; |
| 3373 | pqi_save_ctrl_mode(ctrl_info, SIS_MODE); |
| 3374 | rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); |
| 3375 | pqi_ofa_free_host_buffer(ctrl_info); |
| 3376 | pqi_ctrl_ofa_done(ctrl_info); |
| 3377 | dev_info(&ctrl_info->pci_dev->dev, |
| 3378 | "Online Firmware Activation: %s\n", |
| 3379 | rc == 0 ? "SUCCESS" : "FAILED"); |
| 3380 | break; |
| 3381 | case RESET_ABORT: |
| 3382 | dev_info(&ctrl_info->pci_dev->dev, |
| 3383 | "Online Firmware Activation ABORTED\n"); |
| 3384 | if (ctrl_info->soft_reset_handshake_supported) |
| 3385 | pqi_clear_soft_reset_status(ctrl_info); |
| 3386 | pqi_ofa_free_host_buffer(ctrl_info); |
| 3387 | pqi_ctrl_ofa_done(ctrl_info); |
| 3388 | pqi_ofa_ctrl_unquiesce(ctrl_info); |
| 3389 | break; |
| 3390 | case RESET_NORESPONSE: |
| 3391 | fallthrough; |
| 3392 | default: |
| 3393 | dev_err(&ctrl_info->pci_dev->dev, |
| 3394 | "unexpected Online Firmware Activation reset status: 0x%x\n", |
| 3395 | reset_status); |
| 3396 | pqi_ofa_free_host_buffer(ctrl_info); |
| 3397 | pqi_ctrl_ofa_done(ctrl_info); |
| 3398 | pqi_ofa_ctrl_unquiesce(ctrl_info); |
| 3399 | pqi_take_ctrl_offline(ctrl_info); |
| 3400 | break; |
| 3401 | } |
| 3402 | } |
| 3403 | |
| 3404 | static void pqi_ofa_memory_alloc_worker(struct work_struct *work) |
| 3405 | { |
| 3406 | struct pqi_ctrl_info *ctrl_info; |
| 3407 | |
| 3408 | ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); |
| 3409 | |
| 3410 | pqi_ctrl_ofa_start(ctrl_info); |
| 3411 | pqi_ofa_setup_host_buffer(ctrl_info); |
| 3412 | pqi_ofa_host_memory_update(ctrl_info); |
| 3413 | } |
| 3414 | |
| 3415 | static void pqi_ofa_quiesce_worker(struct work_struct *work) |
| 3416 | { |
| 3417 | struct pqi_ctrl_info *ctrl_info; |
| 3418 | struct pqi_event *event; |
| 3419 | |
| 3420 | ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); |
| 3421 | |
| 3422 | event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; |
| 3423 | |
| 3424 | pqi_ofa_ctrl_quiesce(ctrl_info); |
| 3425 | pqi_acknowledge_event(ctrl_info, event); |
| 3426 | pqi_process_soft_reset(ctrl_info); |
| 3427 | } |
| 3428 | |
| 3429 | static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, |
| 3430 | struct pqi_event *event) |
| 3431 | { |
| 3432 | bool ack_event; |
| 3433 | |
| 3434 | ack_event = true; |
| 3435 | |
| 3436 | switch (event->event_id) { |
| 3437 | case PQI_EVENT_OFA_MEMORY_ALLOCATION: |
| 3438 | dev_info(&ctrl_info->pci_dev->dev, |
| 3439 | "received Online Firmware Activation memory allocation request\n"); |
| 3440 | schedule_work(&ctrl_info->ofa_memory_alloc_work); |
| 3441 | break; |
| 3442 | case PQI_EVENT_OFA_QUIESCE: |
| 3443 | dev_info(&ctrl_info->pci_dev->dev, |
| 3444 | "received Online Firmware Activation quiesce request\n"); |
| 3445 | schedule_work(&ctrl_info->ofa_quiesce_work); |
| 3446 | ack_event = false; |
| 3447 | break; |
| 3448 | case PQI_EVENT_OFA_CANCELED: |
| 3449 | dev_info(&ctrl_info->pci_dev->dev, |
| 3450 | "received Online Firmware Activation cancel request: reason: %u\n", |
| 3451 | ctrl_info->ofa_cancel_reason); |
| 3452 | pqi_ofa_free_host_buffer(ctrl_info); |
| 3453 | pqi_ctrl_ofa_done(ctrl_info); |
| 3454 | break; |
| 3455 | default: |
| 3456 | dev_err(&ctrl_info->pci_dev->dev, |
| 3457 | "received unknown Online Firmware Activation request: event ID: %u\n", |
| 3458 | event->event_id); |
| 3459 | break; |
| 3460 | } |
| 3461 | |
| 3462 | return ack_event; |
| 3463 | } |
| 3464 | |
| 3465 | static void pqi_event_worker(struct work_struct *work) |
| 3466 | { |
| 3467 | unsigned int i; |
| 3468 | bool rescan_needed; |
| 3469 | struct pqi_ctrl_info *ctrl_info; |
| 3470 | struct pqi_event *event; |
| 3471 | bool ack_event; |
| 3472 | |
| 3473 | ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); |
| 3474 | |
| 3475 | pqi_ctrl_busy(ctrl_info); |
| 3476 | pqi_wait_if_ctrl_blocked(ctrl_info); |
| 3477 | if (pqi_ctrl_offline(ctrl_info)) |
| 3478 | goto out; |
| 3479 | |
| 3480 | rescan_needed = false; |
| 3481 | event = ctrl_info->events; |
| 3482 | for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { |
| 3483 | if (event->pending) { |
| 3484 | event->pending = false; |
| 3485 | if (event->event_type == PQI_EVENT_TYPE_OFA) { |
| 3486 | ack_event = pqi_ofa_process_event(ctrl_info, event); |
| 3487 | } else { |
| 3488 | ack_event = true; |
| 3489 | rescan_needed = true; |
| 3490 | } |
| 3491 | if (ack_event) |
| 3492 | pqi_acknowledge_event(ctrl_info, event); |
| 3493 | } |
| 3494 | event++; |
| 3495 | } |
| 3496 | |
| 3497 | if (rescan_needed) |
| 3498 | pqi_schedule_rescan_worker_delayed(ctrl_info); |
| 3499 | |
| 3500 | out: |
| 3501 | pqi_ctrl_unbusy(ctrl_info); |
| 3502 | } |
| 3503 | |
| 3504 | #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ) |
| 3505 | |
| 3506 | static void pqi_heartbeat_timer_handler(struct timer_list *t) |
| 3507 | { |
| 3508 | int num_interrupts; |
| 3509 | u32 heartbeat_count; |
| 3510 | struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); |
| 3511 | |
| 3512 | pqi_check_ctrl_health(ctrl_info); |
| 3513 | if (pqi_ctrl_offline(ctrl_info)) |
| 3514 | return; |
| 3515 | |
| 3516 | num_interrupts = atomic_read(&ctrl_info->num_interrupts); |
| 3517 | heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); |
| 3518 | |
| 3519 | if (num_interrupts == ctrl_info->previous_num_interrupts) { |
| 3520 | if (heartbeat_count == ctrl_info->previous_heartbeat_count) { |
| 3521 | dev_err(&ctrl_info->pci_dev->dev, |
| 3522 | "no heartbeat detected - last heartbeat count: %u\n", |
| 3523 | heartbeat_count); |
| 3524 | pqi_take_ctrl_offline(ctrl_info); |
| 3525 | return; |
| 3526 | } |
| 3527 | } else { |
| 3528 | ctrl_info->previous_num_interrupts = num_interrupts; |
| 3529 | } |
| 3530 | |
| 3531 | ctrl_info->previous_heartbeat_count = heartbeat_count; |
| 3532 | mod_timer(&ctrl_info->heartbeat_timer, |
| 3533 | jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); |
| 3534 | } |
| 3535 | |
| 3536 | static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) |
| 3537 | { |
| 3538 | if (!ctrl_info->heartbeat_counter) |
| 3539 | return; |
| 3540 | |
| 3541 | ctrl_info->previous_num_interrupts = |
| 3542 | atomic_read(&ctrl_info->num_interrupts); |
| 3543 | ctrl_info->previous_heartbeat_count = |
| 3544 | pqi_read_heartbeat_counter(ctrl_info); |
| 3545 | |
| 3546 | ctrl_info->heartbeat_timer.expires = |
| 3547 | jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; |
| 3548 | add_timer(&ctrl_info->heartbeat_timer); |
| 3549 | } |
| 3550 | |
| 3551 | static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) |
| 3552 | { |
| 3553 | del_timer_sync(&ctrl_info->heartbeat_timer); |
| 3554 | } |
| 3555 | |
| 3556 | static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, |
| 3557 | struct pqi_event *event, struct pqi_event_response *response) |
| 3558 | { |
| 3559 | switch (event->event_id) { |
| 3560 | case PQI_EVENT_OFA_MEMORY_ALLOCATION: |
| 3561 | ctrl_info->ofa_bytes_requested = |
| 3562 | get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested); |
| 3563 | break; |
| 3564 | case PQI_EVENT_OFA_CANCELED: |
| 3565 | ctrl_info->ofa_cancel_reason = |
| 3566 | get_unaligned_le16(&response->data.ofa_cancelled.reason); |
| 3567 | break; |
| 3568 | } |
| 3569 | } |
| 3570 | |
| 3571 | static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) |
| 3572 | { |
| 3573 | int num_events; |
| 3574 | pqi_index_t oq_pi; |
| 3575 | pqi_index_t oq_ci; |
| 3576 | struct pqi_event_queue *event_queue; |
| 3577 | struct pqi_event_response *response; |
| 3578 | struct pqi_event *event; |
| 3579 | int event_index; |
| 3580 | |
| 3581 | event_queue = &ctrl_info->event_queue; |
| 3582 | num_events = 0; |
| 3583 | oq_ci = event_queue->oq_ci_copy; |
| 3584 | |
| 3585 | while (1) { |
| 3586 | oq_pi = readl(event_queue->oq_pi); |
| 3587 | if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { |
| 3588 | pqi_invalid_response(ctrl_info); |
| 3589 | dev_err(&ctrl_info->pci_dev->dev, |
| 3590 | "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", |
| 3591 | oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); |
| 3592 | return -1; |
| 3593 | } |
| 3594 | |
| 3595 | if (oq_pi == oq_ci) |
| 3596 | break; |
| 3597 | |
| 3598 | num_events++; |
| 3599 | response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); |
| 3600 | |
| 3601 | event_index = pqi_event_type_to_event_index(response->event_type); |
| 3602 | |
| 3603 | if (event_index >= 0 && response->request_acknowledge) { |
| 3604 | event = &ctrl_info->events[event_index]; |
| 3605 | event->pending = true; |
| 3606 | event->event_type = response->event_type; |
| 3607 | event->event_id = get_unaligned_le16(&response->event_id); |
| 3608 | event->additional_event_id = |
| 3609 | get_unaligned_le32(&response->additional_event_id); |
| 3610 | if (event->event_type == PQI_EVENT_TYPE_OFA) |
| 3611 | pqi_ofa_capture_event_payload(ctrl_info, event, response); |
| 3612 | } |
| 3613 | |
| 3614 | oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; |
| 3615 | } |
| 3616 | |
| 3617 | if (num_events) { |
| 3618 | event_queue->oq_ci_copy = oq_ci; |
| 3619 | writel(oq_ci, event_queue->oq_ci); |
| 3620 | schedule_work(&ctrl_info->event_work); |
| 3621 | } |
| 3622 | |
| 3623 | return num_events; |
| 3624 | } |
| 3625 | |
| 3626 | #define PQI_LEGACY_INTX_MASK 0x1 |
| 3627 | |
| 3628 | static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) |
| 3629 | { |
| 3630 | u32 intx_mask; |
| 3631 | struct pqi_device_registers __iomem *pqi_registers; |
| 3632 | volatile void __iomem *register_addr; |
| 3633 | |
| 3634 | pqi_registers = ctrl_info->pqi_registers; |
| 3635 | |
| 3636 | if (enable_intx) |
| 3637 | register_addr = &pqi_registers->legacy_intx_mask_clear; |
| 3638 | else |
| 3639 | register_addr = &pqi_registers->legacy_intx_mask_set; |
| 3640 | |
| 3641 | intx_mask = readl(register_addr); |
| 3642 | intx_mask |= PQI_LEGACY_INTX_MASK; |
| 3643 | writel(intx_mask, register_addr); |
| 3644 | } |
| 3645 | |
| 3646 | static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, |
| 3647 | enum pqi_irq_mode new_mode) |
| 3648 | { |
| 3649 | switch (ctrl_info->irq_mode) { |
| 3650 | case IRQ_MODE_MSIX: |
| 3651 | switch (new_mode) { |
| 3652 | case IRQ_MODE_MSIX: |
| 3653 | break; |
| 3654 | case IRQ_MODE_INTX: |
| 3655 | pqi_configure_legacy_intx(ctrl_info, true); |
| 3656 | sis_enable_intx(ctrl_info); |
| 3657 | break; |
| 3658 | case IRQ_MODE_NONE: |
| 3659 | break; |
| 3660 | } |
| 3661 | break; |
| 3662 | case IRQ_MODE_INTX: |
| 3663 | switch (new_mode) { |
| 3664 | case IRQ_MODE_MSIX: |
| 3665 | pqi_configure_legacy_intx(ctrl_info, false); |
| 3666 | sis_enable_msix(ctrl_info); |
| 3667 | break; |
| 3668 | case IRQ_MODE_INTX: |
| 3669 | break; |
| 3670 | case IRQ_MODE_NONE: |
| 3671 | pqi_configure_legacy_intx(ctrl_info, false); |
| 3672 | break; |
| 3673 | } |
| 3674 | break; |
| 3675 | case IRQ_MODE_NONE: |
| 3676 | switch (new_mode) { |
| 3677 | case IRQ_MODE_MSIX: |
| 3678 | sis_enable_msix(ctrl_info); |
| 3679 | break; |
| 3680 | case IRQ_MODE_INTX: |
| 3681 | pqi_configure_legacy_intx(ctrl_info, true); |
| 3682 | sis_enable_intx(ctrl_info); |
| 3683 | break; |
| 3684 | case IRQ_MODE_NONE: |
| 3685 | break; |
| 3686 | } |
| 3687 | break; |
| 3688 | } |
| 3689 | |
| 3690 | ctrl_info->irq_mode = new_mode; |
| 3691 | } |
| 3692 | |
| 3693 | #define PQI_LEGACY_INTX_PENDING 0x1 |
| 3694 | |
| 3695 | static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) |
| 3696 | { |
| 3697 | bool valid_irq; |
| 3698 | u32 intx_status; |
| 3699 | |
| 3700 | switch (ctrl_info->irq_mode) { |
| 3701 | case IRQ_MODE_MSIX: |
| 3702 | valid_irq = true; |
| 3703 | break; |
| 3704 | case IRQ_MODE_INTX: |
| 3705 | intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); |
| 3706 | if (intx_status & PQI_LEGACY_INTX_PENDING) |
| 3707 | valid_irq = true; |
| 3708 | else |
| 3709 | valid_irq = false; |
| 3710 | break; |
| 3711 | case IRQ_MODE_NONE: |
| 3712 | default: |
| 3713 | valid_irq = false; |
| 3714 | break; |
| 3715 | } |
| 3716 | |
| 3717 | return valid_irq; |
| 3718 | } |
| 3719 | |
| 3720 | static irqreturn_t pqi_irq_handler(int irq, void *data) |
| 3721 | { |
| 3722 | struct pqi_ctrl_info *ctrl_info; |
| 3723 | struct pqi_queue_group *queue_group; |
| 3724 | int num_io_responses_handled; |
| 3725 | int num_events_handled; |
| 3726 | |
| 3727 | queue_group = data; |
| 3728 | ctrl_info = queue_group->ctrl_info; |
| 3729 | |
| 3730 | if (!pqi_is_valid_irq(ctrl_info)) |
| 3731 | return IRQ_NONE; |
| 3732 | |
| 3733 | num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); |
| 3734 | if (num_io_responses_handled < 0) |
| 3735 | goto out; |
| 3736 | |
| 3737 | if (irq == ctrl_info->event_irq) { |
| 3738 | num_events_handled = pqi_process_event_intr(ctrl_info); |
| 3739 | if (num_events_handled < 0) |
| 3740 | goto out; |
| 3741 | } else { |
| 3742 | num_events_handled = 0; |
| 3743 | } |
| 3744 | |
| 3745 | if (num_io_responses_handled + num_events_handled > 0) |
| 3746 | atomic_inc(&ctrl_info->num_interrupts); |
| 3747 | |
| 3748 | pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); |
| 3749 | pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); |
| 3750 | |
| 3751 | out: |
| 3752 | return IRQ_HANDLED; |
| 3753 | } |
| 3754 | |
| 3755 | static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) |
| 3756 | { |
| 3757 | struct pci_dev *pci_dev = ctrl_info->pci_dev; |
| 3758 | int i; |
| 3759 | int rc; |
| 3760 | |
| 3761 | ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); |
| 3762 | |
| 3763 | for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { |
| 3764 | rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, |
| 3765 | DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); |
| 3766 | if (rc) { |
| 3767 | dev_err(&pci_dev->dev, |
| 3768 | "irq %u init failed with error %d\n", |
| 3769 | pci_irq_vector(pci_dev, i), rc); |
| 3770 | return rc; |
| 3771 | } |
| 3772 | ctrl_info->num_msix_vectors_initialized++; |
| 3773 | } |
| 3774 | |
| 3775 | return 0; |
| 3776 | } |
| 3777 | |
| 3778 | static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) |
| 3779 | { |
| 3780 | int i; |
| 3781 | |
| 3782 | for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) |
| 3783 | free_irq(pci_irq_vector(ctrl_info->pci_dev, i), |
| 3784 | &ctrl_info->queue_groups[i]); |
| 3785 | |
| 3786 | ctrl_info->num_msix_vectors_initialized = 0; |
| 3787 | } |
| 3788 | |
| 3789 | static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) |
| 3790 | { |
| 3791 | int num_vectors_enabled; |
| 3792 | |
| 3793 | num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, |
| 3794 | PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, |
| 3795 | PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); |
| 3796 | if (num_vectors_enabled < 0) { |
| 3797 | dev_err(&ctrl_info->pci_dev->dev, |
| 3798 | "MSI-X init failed with error %d\n", |
| 3799 | num_vectors_enabled); |
| 3800 | return num_vectors_enabled; |
| 3801 | } |
| 3802 | |
| 3803 | ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; |
| 3804 | ctrl_info->irq_mode = IRQ_MODE_MSIX; |
| 3805 | return 0; |
| 3806 | } |
| 3807 | |
| 3808 | static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) |
| 3809 | { |
| 3810 | if (ctrl_info->num_msix_vectors_enabled) { |
| 3811 | pci_free_irq_vectors(ctrl_info->pci_dev); |
| 3812 | ctrl_info->num_msix_vectors_enabled = 0; |
| 3813 | } |
| 3814 | } |
| 3815 | |
| 3816 | static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) |
| 3817 | { |
| 3818 | unsigned int i; |
| 3819 | size_t alloc_length; |
| 3820 | size_t element_array_length_per_iq; |
| 3821 | size_t element_array_length_per_oq; |
| 3822 | void *element_array; |
| 3823 | void __iomem *next_queue_index; |
| 3824 | void *aligned_pointer; |
| 3825 | unsigned int num_inbound_queues; |
| 3826 | unsigned int num_outbound_queues; |
| 3827 | unsigned int num_queue_indexes; |
| 3828 | struct pqi_queue_group *queue_group; |
| 3829 | |
| 3830 | element_array_length_per_iq = |
| 3831 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * |
| 3832 | ctrl_info->num_elements_per_iq; |
| 3833 | element_array_length_per_oq = |
| 3834 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * |
| 3835 | ctrl_info->num_elements_per_oq; |
| 3836 | num_inbound_queues = ctrl_info->num_queue_groups * 2; |
| 3837 | num_outbound_queues = ctrl_info->num_queue_groups; |
| 3838 | num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; |
| 3839 | |
| 3840 | aligned_pointer = NULL; |
| 3841 | |
| 3842 | for (i = 0; i < num_inbound_queues; i++) { |
| 3843 | aligned_pointer = PTR_ALIGN(aligned_pointer, |
| 3844 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
| 3845 | aligned_pointer += element_array_length_per_iq; |
| 3846 | } |
| 3847 | |
| 3848 | for (i = 0; i < num_outbound_queues; i++) { |
| 3849 | aligned_pointer = PTR_ALIGN(aligned_pointer, |
| 3850 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
| 3851 | aligned_pointer += element_array_length_per_oq; |
| 3852 | } |
| 3853 | |
| 3854 | aligned_pointer = PTR_ALIGN(aligned_pointer, |
| 3855 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
| 3856 | aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * |
| 3857 | PQI_EVENT_OQ_ELEMENT_LENGTH; |
| 3858 | |
| 3859 | for (i = 0; i < num_queue_indexes; i++) { |
| 3860 | aligned_pointer = PTR_ALIGN(aligned_pointer, |
| 3861 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
| 3862 | aligned_pointer += sizeof(pqi_index_t); |
| 3863 | } |
| 3864 | |
| 3865 | alloc_length = (size_t)aligned_pointer + |
| 3866 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; |
| 3867 | |
| 3868 | alloc_length += PQI_EXTRA_SGL_MEMORY; |
| 3869 | |
| 3870 | ctrl_info->queue_memory_base = |
| 3871 | dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, |
| 3872 | &ctrl_info->queue_memory_base_dma_handle, |
| 3873 | GFP_KERNEL); |
| 3874 | |
| 3875 | if (!ctrl_info->queue_memory_base) |
| 3876 | return -ENOMEM; |
| 3877 | |
| 3878 | ctrl_info->queue_memory_length = alloc_length; |
| 3879 | |
| 3880 | element_array = PTR_ALIGN(ctrl_info->queue_memory_base, |
| 3881 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
| 3882 | |
| 3883 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
| 3884 | queue_group = &ctrl_info->queue_groups[i]; |
| 3885 | queue_group->iq_element_array[RAID_PATH] = element_array; |
| 3886 | queue_group->iq_element_array_bus_addr[RAID_PATH] = |
| 3887 | ctrl_info->queue_memory_base_dma_handle + |
| 3888 | (element_array - ctrl_info->queue_memory_base); |
| 3889 | element_array += element_array_length_per_iq; |
| 3890 | element_array = PTR_ALIGN(element_array, |
| 3891 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
| 3892 | queue_group->iq_element_array[AIO_PATH] = element_array; |
| 3893 | queue_group->iq_element_array_bus_addr[AIO_PATH] = |
| 3894 | ctrl_info->queue_memory_base_dma_handle + |
| 3895 | (element_array - ctrl_info->queue_memory_base); |
| 3896 | element_array += element_array_length_per_iq; |
| 3897 | element_array = PTR_ALIGN(element_array, |
| 3898 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
| 3899 | } |
| 3900 | |
| 3901 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
| 3902 | queue_group = &ctrl_info->queue_groups[i]; |
| 3903 | queue_group->oq_element_array = element_array; |
| 3904 | queue_group->oq_element_array_bus_addr = |
| 3905 | ctrl_info->queue_memory_base_dma_handle + |
| 3906 | (element_array - ctrl_info->queue_memory_base); |
| 3907 | element_array += element_array_length_per_oq; |
| 3908 | element_array = PTR_ALIGN(element_array, |
| 3909 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
| 3910 | } |
| 3911 | |
| 3912 | ctrl_info->event_queue.oq_element_array = element_array; |
| 3913 | ctrl_info->event_queue.oq_element_array_bus_addr = |
| 3914 | ctrl_info->queue_memory_base_dma_handle + |
| 3915 | (element_array - ctrl_info->queue_memory_base); |
| 3916 | element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * |
| 3917 | PQI_EVENT_OQ_ELEMENT_LENGTH; |
| 3918 | |
| 3919 | next_queue_index = (void __iomem *)PTR_ALIGN(element_array, |
| 3920 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
| 3921 | |
| 3922 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
| 3923 | queue_group = &ctrl_info->queue_groups[i]; |
| 3924 | queue_group->iq_ci[RAID_PATH] = next_queue_index; |
| 3925 | queue_group->iq_ci_bus_addr[RAID_PATH] = |
| 3926 | ctrl_info->queue_memory_base_dma_handle + |
| 3927 | (next_queue_index - |
| 3928 | (void __iomem *)ctrl_info->queue_memory_base); |
| 3929 | next_queue_index += sizeof(pqi_index_t); |
| 3930 | next_queue_index = PTR_ALIGN(next_queue_index, |
| 3931 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
| 3932 | queue_group->iq_ci[AIO_PATH] = next_queue_index; |
| 3933 | queue_group->iq_ci_bus_addr[AIO_PATH] = |
| 3934 | ctrl_info->queue_memory_base_dma_handle + |
| 3935 | (next_queue_index - |
| 3936 | (void __iomem *)ctrl_info->queue_memory_base); |
| 3937 | next_queue_index += sizeof(pqi_index_t); |
| 3938 | next_queue_index = PTR_ALIGN(next_queue_index, |
| 3939 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
| 3940 | queue_group->oq_pi = next_queue_index; |
| 3941 | queue_group->oq_pi_bus_addr = |
| 3942 | ctrl_info->queue_memory_base_dma_handle + |
| 3943 | (next_queue_index - |
| 3944 | (void __iomem *)ctrl_info->queue_memory_base); |
| 3945 | next_queue_index += sizeof(pqi_index_t); |
| 3946 | next_queue_index = PTR_ALIGN(next_queue_index, |
| 3947 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
| 3948 | } |
| 3949 | |
| 3950 | ctrl_info->event_queue.oq_pi = next_queue_index; |
| 3951 | ctrl_info->event_queue.oq_pi_bus_addr = |
| 3952 | ctrl_info->queue_memory_base_dma_handle + |
| 3953 | (next_queue_index - |
| 3954 | (void __iomem *)ctrl_info->queue_memory_base); |
| 3955 | |
| 3956 | return 0; |
| 3957 | } |
| 3958 | |
| 3959 | static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) |
| 3960 | { |
| 3961 | unsigned int i; |
| 3962 | u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; |
| 3963 | u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; |
| 3964 | |
| 3965 | /* |
| 3966 | * Initialize the backpointers to the controller structure in |
| 3967 | * each operational queue group structure. |
| 3968 | */ |
| 3969 | for (i = 0; i < ctrl_info->num_queue_groups; i++) |
| 3970 | ctrl_info->queue_groups[i].ctrl_info = ctrl_info; |
| 3971 | |
| 3972 | /* |
| 3973 | * Assign IDs to all operational queues. Note that the IDs |
| 3974 | * assigned to operational IQs are independent of the IDs |
| 3975 | * assigned to operational OQs. |
| 3976 | */ |
| 3977 | ctrl_info->event_queue.oq_id = next_oq_id++; |
| 3978 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
| 3979 | ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; |
| 3980 | ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; |
| 3981 | ctrl_info->queue_groups[i].oq_id = next_oq_id++; |
| 3982 | } |
| 3983 | |
| 3984 | /* |
| 3985 | * Assign MSI-X table entry indexes to all queues. Note that the |
| 3986 | * interrupt for the event queue is shared with the first queue group. |
| 3987 | */ |
| 3988 | ctrl_info->event_queue.int_msg_num = 0; |
| 3989 | for (i = 0; i < ctrl_info->num_queue_groups; i++) |
| 3990 | ctrl_info->queue_groups[i].int_msg_num = i; |
| 3991 | |
| 3992 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
| 3993 | spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); |
| 3994 | spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); |
| 3995 | INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); |
| 3996 | INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); |
| 3997 | } |
| 3998 | } |
| 3999 | |
| 4000 | static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) |
| 4001 | { |
| 4002 | size_t alloc_length; |
| 4003 | struct pqi_admin_queues_aligned *admin_queues_aligned; |
| 4004 | struct pqi_admin_queues *admin_queues; |
| 4005 | |
| 4006 | alloc_length = sizeof(struct pqi_admin_queues_aligned) + |
| 4007 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; |
| 4008 | |
| 4009 | ctrl_info->admin_queue_memory_base = |
| 4010 | dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, |
| 4011 | &ctrl_info->admin_queue_memory_base_dma_handle, |
| 4012 | GFP_KERNEL); |
| 4013 | |
| 4014 | if (!ctrl_info->admin_queue_memory_base) |
| 4015 | return -ENOMEM; |
| 4016 | |
| 4017 | ctrl_info->admin_queue_memory_length = alloc_length; |
| 4018 | |
| 4019 | admin_queues = &ctrl_info->admin_queues; |
| 4020 | admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, |
| 4021 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); |
| 4022 | admin_queues->iq_element_array = |
| 4023 | &admin_queues_aligned->iq_element_array; |
| 4024 | admin_queues->oq_element_array = |
| 4025 | &admin_queues_aligned->oq_element_array; |
| 4026 | admin_queues->iq_ci = |
| 4027 | (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; |
| 4028 | admin_queues->oq_pi = |
| 4029 | (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; |
| 4030 | |
| 4031 | admin_queues->iq_element_array_bus_addr = |
| 4032 | ctrl_info->admin_queue_memory_base_dma_handle + |
| 4033 | (admin_queues->iq_element_array - |
| 4034 | ctrl_info->admin_queue_memory_base); |
| 4035 | admin_queues->oq_element_array_bus_addr = |
| 4036 | ctrl_info->admin_queue_memory_base_dma_handle + |
| 4037 | (admin_queues->oq_element_array - |
| 4038 | ctrl_info->admin_queue_memory_base); |
| 4039 | admin_queues->iq_ci_bus_addr = |
| 4040 | ctrl_info->admin_queue_memory_base_dma_handle + |
| 4041 | ((void __iomem *)admin_queues->iq_ci - |
| 4042 | (void __iomem *)ctrl_info->admin_queue_memory_base); |
| 4043 | admin_queues->oq_pi_bus_addr = |
| 4044 | ctrl_info->admin_queue_memory_base_dma_handle + |
| 4045 | ((void __iomem *)admin_queues->oq_pi - |
| 4046 | (void __iomem *)ctrl_info->admin_queue_memory_base); |
| 4047 | |
| 4048 | return 0; |
| 4049 | } |
| 4050 | |
| 4051 | #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ |
| 4052 | #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 |
| 4053 | |
| 4054 | static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) |
| 4055 | { |
| 4056 | struct pqi_device_registers __iomem *pqi_registers; |
| 4057 | struct pqi_admin_queues *admin_queues; |
| 4058 | unsigned long timeout; |
| 4059 | u8 status; |
| 4060 | u32 reg; |
| 4061 | |
| 4062 | pqi_registers = ctrl_info->pqi_registers; |
| 4063 | admin_queues = &ctrl_info->admin_queues; |
| 4064 | |
| 4065 | writeq((u64)admin_queues->iq_element_array_bus_addr, |
| 4066 | &pqi_registers->admin_iq_element_array_addr); |
| 4067 | writeq((u64)admin_queues->oq_element_array_bus_addr, |
| 4068 | &pqi_registers->admin_oq_element_array_addr); |
| 4069 | writeq((u64)admin_queues->iq_ci_bus_addr, |
| 4070 | &pqi_registers->admin_iq_ci_addr); |
| 4071 | writeq((u64)admin_queues->oq_pi_bus_addr, |
| 4072 | &pqi_registers->admin_oq_pi_addr); |
| 4073 | |
| 4074 | reg = PQI_ADMIN_IQ_NUM_ELEMENTS | |
| 4075 | (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | |
| 4076 | (admin_queues->int_msg_num << 16); |
| 4077 | writel(reg, &pqi_registers->admin_iq_num_elements); |
| 4078 | |
| 4079 | writel(PQI_CREATE_ADMIN_QUEUE_PAIR, |
| 4080 | &pqi_registers->function_and_status_code); |
| 4081 | |
| 4082 | timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; |
| 4083 | while (1) { |
| 4084 | status = readb(&pqi_registers->function_and_status_code); |
| 4085 | if (status == PQI_STATUS_IDLE) |
| 4086 | break; |
| 4087 | if (time_after(jiffies, timeout)) |
| 4088 | return -ETIMEDOUT; |
| 4089 | msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); |
| 4090 | } |
| 4091 | |
| 4092 | /* |
| 4093 | * The offset registers are not initialized to the correct |
| 4094 | * offsets until *after* the create admin queue pair command |
| 4095 | * completes successfully. |
| 4096 | */ |
| 4097 | admin_queues->iq_pi = ctrl_info->iomem_base + |
| 4098 | PQI_DEVICE_REGISTERS_OFFSET + |
| 4099 | readq(&pqi_registers->admin_iq_pi_offset); |
| 4100 | admin_queues->oq_ci = ctrl_info->iomem_base + |
| 4101 | PQI_DEVICE_REGISTERS_OFFSET + |
| 4102 | readq(&pqi_registers->admin_oq_ci_offset); |
| 4103 | |
| 4104 | return 0; |
| 4105 | } |
| 4106 | |
| 4107 | static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, |
| 4108 | struct pqi_general_admin_request *request) |
| 4109 | { |
| 4110 | struct pqi_admin_queues *admin_queues; |
| 4111 | void *next_element; |
| 4112 | pqi_index_t iq_pi; |
| 4113 | |
| 4114 | admin_queues = &ctrl_info->admin_queues; |
| 4115 | iq_pi = admin_queues->iq_pi_copy; |
| 4116 | |
| 4117 | next_element = admin_queues->iq_element_array + |
| 4118 | (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); |
| 4119 | |
| 4120 | memcpy(next_element, request, sizeof(*request)); |
| 4121 | |
| 4122 | iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; |
| 4123 | admin_queues->iq_pi_copy = iq_pi; |
| 4124 | |
| 4125 | /* |
| 4126 | * This write notifies the controller that an IU is available to be |
| 4127 | * processed. |
| 4128 | */ |
| 4129 | writel(iq_pi, admin_queues->iq_pi); |
| 4130 | } |
| 4131 | |
| 4132 | #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 |
| 4133 | |
| 4134 | static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, |
| 4135 | struct pqi_general_admin_response *response) |
| 4136 | { |
| 4137 | struct pqi_admin_queues *admin_queues; |
| 4138 | pqi_index_t oq_pi; |
| 4139 | pqi_index_t oq_ci; |
| 4140 | unsigned long timeout; |
| 4141 | |
| 4142 | admin_queues = &ctrl_info->admin_queues; |
| 4143 | oq_ci = admin_queues->oq_ci_copy; |
| 4144 | |
| 4145 | timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies; |
| 4146 | |
| 4147 | while (1) { |
| 4148 | oq_pi = readl(admin_queues->oq_pi); |
| 4149 | if (oq_pi != oq_ci) |
| 4150 | break; |
| 4151 | if (time_after(jiffies, timeout)) { |
| 4152 | dev_err(&ctrl_info->pci_dev->dev, |
| 4153 | "timed out waiting for admin response\n"); |
| 4154 | return -ETIMEDOUT; |
| 4155 | } |
| 4156 | if (!sis_is_firmware_running(ctrl_info)) |
| 4157 | return -ENXIO; |
| 4158 | usleep_range(1000, 2000); |
| 4159 | } |
| 4160 | |
| 4161 | memcpy(response, admin_queues->oq_element_array + |
| 4162 | (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); |
| 4163 | |
| 4164 | oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; |
| 4165 | admin_queues->oq_ci_copy = oq_ci; |
| 4166 | writel(oq_ci, admin_queues->oq_ci); |
| 4167 | |
| 4168 | return 0; |
| 4169 | } |
| 4170 | |
| 4171 | static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, |
| 4172 | struct pqi_queue_group *queue_group, enum pqi_io_path path, |
| 4173 | struct pqi_io_request *io_request) |
| 4174 | { |
| 4175 | struct pqi_io_request *next; |
| 4176 | void *next_element; |
| 4177 | pqi_index_t iq_pi; |
| 4178 | pqi_index_t iq_ci; |
| 4179 | size_t iu_length; |
| 4180 | unsigned long flags; |
| 4181 | unsigned int num_elements_needed; |
| 4182 | unsigned int num_elements_to_end_of_queue; |
| 4183 | size_t copy_count; |
| 4184 | struct pqi_iu_header *request; |
| 4185 | |
| 4186 | spin_lock_irqsave(&queue_group->submit_lock[path], flags); |
| 4187 | |
| 4188 | if (io_request) { |
| 4189 | io_request->queue_group = queue_group; |
| 4190 | list_add_tail(&io_request->request_list_entry, |
| 4191 | &queue_group->request_list[path]); |
| 4192 | } |
| 4193 | |
| 4194 | iq_pi = queue_group->iq_pi_copy[path]; |
| 4195 | |
| 4196 | list_for_each_entry_safe(io_request, next, |
| 4197 | &queue_group->request_list[path], request_list_entry) { |
| 4198 | |
| 4199 | request = io_request->iu; |
| 4200 | |
| 4201 | iu_length = get_unaligned_le16(&request->iu_length) + |
| 4202 | PQI_REQUEST_HEADER_LENGTH; |
| 4203 | num_elements_needed = |
| 4204 | DIV_ROUND_UP(iu_length, |
| 4205 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
| 4206 | |
| 4207 | iq_ci = readl(queue_group->iq_ci[path]); |
| 4208 | |
| 4209 | if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, |
| 4210 | ctrl_info->num_elements_per_iq)) |
| 4211 | break; |
| 4212 | |
| 4213 | put_unaligned_le16(queue_group->oq_id, |
| 4214 | &request->response_queue_id); |
| 4215 | |
| 4216 | next_element = queue_group->iq_element_array[path] + |
| 4217 | (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
| 4218 | |
| 4219 | num_elements_to_end_of_queue = |
| 4220 | ctrl_info->num_elements_per_iq - iq_pi; |
| 4221 | |
| 4222 | if (num_elements_needed <= num_elements_to_end_of_queue) { |
| 4223 | memcpy(next_element, request, iu_length); |
| 4224 | } else { |
| 4225 | copy_count = num_elements_to_end_of_queue * |
| 4226 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; |
| 4227 | memcpy(next_element, request, copy_count); |
| 4228 | memcpy(queue_group->iq_element_array[path], |
| 4229 | (u8 *)request + copy_count, |
| 4230 | iu_length - copy_count); |
| 4231 | } |
| 4232 | |
| 4233 | iq_pi = (iq_pi + num_elements_needed) % |
| 4234 | ctrl_info->num_elements_per_iq; |
| 4235 | |
| 4236 | list_del(&io_request->request_list_entry); |
| 4237 | } |
| 4238 | |
| 4239 | if (iq_pi != queue_group->iq_pi_copy[path]) { |
| 4240 | queue_group->iq_pi_copy[path] = iq_pi; |
| 4241 | /* |
| 4242 | * This write notifies the controller that one or more IUs are |
| 4243 | * available to be processed. |
| 4244 | */ |
| 4245 | writel(iq_pi, queue_group->iq_pi[path]); |
| 4246 | } |
| 4247 | |
| 4248 | spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); |
| 4249 | } |
| 4250 | |
| 4251 | #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 |
| 4252 | |
| 4253 | static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, |
| 4254 | struct completion *wait) |
| 4255 | { |
| 4256 | int rc; |
| 4257 | |
| 4258 | while (1) { |
| 4259 | if (wait_for_completion_io_timeout(wait, |
| 4260 | PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) { |
| 4261 | rc = 0; |
| 4262 | break; |
| 4263 | } |
| 4264 | |
| 4265 | pqi_check_ctrl_health(ctrl_info); |
| 4266 | if (pqi_ctrl_offline(ctrl_info)) { |
| 4267 | rc = -ENXIO; |
| 4268 | break; |
| 4269 | } |
| 4270 | } |
| 4271 | |
| 4272 | return rc; |
| 4273 | } |
| 4274 | |
| 4275 | static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, |
| 4276 | void *context) |
| 4277 | { |
| 4278 | struct completion *waiting = context; |
| 4279 | |
| 4280 | complete(waiting); |
| 4281 | } |
| 4282 | |
| 4283 | static int pqi_process_raid_io_error_synchronous( |
| 4284 | struct pqi_raid_error_info *error_info) |
| 4285 | { |
| 4286 | int rc = -EIO; |
| 4287 | |
| 4288 | switch (error_info->data_out_result) { |
| 4289 | case PQI_DATA_IN_OUT_GOOD: |
| 4290 | if (error_info->status == SAM_STAT_GOOD) |
| 4291 | rc = 0; |
| 4292 | break; |
| 4293 | case PQI_DATA_IN_OUT_UNDERFLOW: |
| 4294 | if (error_info->status == SAM_STAT_GOOD || |
| 4295 | error_info->status == SAM_STAT_CHECK_CONDITION) |
| 4296 | rc = 0; |
| 4297 | break; |
| 4298 | case PQI_DATA_IN_OUT_ABORTED: |
| 4299 | rc = PQI_CMD_STATUS_ABORTED; |
| 4300 | break; |
| 4301 | } |
| 4302 | |
| 4303 | return rc; |
| 4304 | } |
| 4305 | |
| 4306 | static inline bool pqi_is_blockable_request(struct pqi_iu_header *request) |
| 4307 | { |
| 4308 | return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; |
| 4309 | } |
| 4310 | |
| 4311 | static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, |
| 4312 | struct pqi_iu_header *request, unsigned int flags, |
| 4313 | struct pqi_raid_error_info *error_info) |
| 4314 | { |
| 4315 | int rc = 0; |
| 4316 | struct pqi_io_request *io_request; |
| 4317 | size_t iu_length; |
| 4318 | DECLARE_COMPLETION_ONSTACK(wait); |
| 4319 | |
| 4320 | if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { |
| 4321 | if (down_interruptible(&ctrl_info->sync_request_sem)) |
| 4322 | return -ERESTARTSYS; |
| 4323 | } else { |
| 4324 | down(&ctrl_info->sync_request_sem); |
| 4325 | } |
| 4326 | |
| 4327 | pqi_ctrl_busy(ctrl_info); |
| 4328 | /* |
| 4329 | * Wait for other admin queue updates such as; |
| 4330 | * config table changes, OFA memory updates, ... |
| 4331 | */ |
| 4332 | if (pqi_is_blockable_request(request)) |
| 4333 | pqi_wait_if_ctrl_blocked(ctrl_info); |
| 4334 | |
| 4335 | if (pqi_ctrl_offline(ctrl_info)) { |
| 4336 | rc = -ENXIO; |
| 4337 | goto out; |
| 4338 | } |
| 4339 | |
| 4340 | io_request = pqi_alloc_io_request(ctrl_info); |
| 4341 | |
| 4342 | put_unaligned_le16(io_request->index, |
| 4343 | &(((struct pqi_raid_path_request *)request)->request_id)); |
| 4344 | |
| 4345 | if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) |
| 4346 | ((struct pqi_raid_path_request *)request)->error_index = |
| 4347 | ((struct pqi_raid_path_request *)request)->request_id; |
| 4348 | |
| 4349 | iu_length = get_unaligned_le16(&request->iu_length) + |
| 4350 | PQI_REQUEST_HEADER_LENGTH; |
| 4351 | memcpy(io_request->iu, request, iu_length); |
| 4352 | |
| 4353 | io_request->io_complete_callback = pqi_raid_synchronous_complete; |
| 4354 | io_request->context = &wait; |
| 4355 | |
| 4356 | pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, |
| 4357 | io_request); |
| 4358 | |
| 4359 | pqi_wait_for_completion_io(ctrl_info, &wait); |
| 4360 | |
| 4361 | if (error_info) { |
| 4362 | if (io_request->error_info) |
| 4363 | memcpy(error_info, io_request->error_info, sizeof(*error_info)); |
| 4364 | else |
| 4365 | memset(error_info, 0, sizeof(*error_info)); |
| 4366 | } else if (rc == 0 && io_request->error_info) { |
| 4367 | rc = pqi_process_raid_io_error_synchronous(io_request->error_info); |
| 4368 | } |
| 4369 | |
| 4370 | pqi_free_io_request(io_request); |
| 4371 | |
| 4372 | out: |
| 4373 | pqi_ctrl_unbusy(ctrl_info); |
| 4374 | up(&ctrl_info->sync_request_sem); |
| 4375 | |
| 4376 | return rc; |
| 4377 | } |
| 4378 | |
| 4379 | static int pqi_validate_admin_response( |
| 4380 | struct pqi_general_admin_response *response, u8 expected_function_code) |
| 4381 | { |
| 4382 | if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) |
| 4383 | return -EINVAL; |
| 4384 | |
| 4385 | if (get_unaligned_le16(&response->header.iu_length) != |
| 4386 | PQI_GENERAL_ADMIN_IU_LENGTH) |
| 4387 | return -EINVAL; |
| 4388 | |
| 4389 | if (response->function_code != expected_function_code) |
| 4390 | return -EINVAL; |
| 4391 | |
| 4392 | if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) |
| 4393 | return -EINVAL; |
| 4394 | |
| 4395 | return 0; |
| 4396 | } |
| 4397 | |
| 4398 | static int pqi_submit_admin_request_synchronous( |
| 4399 | struct pqi_ctrl_info *ctrl_info, |
| 4400 | struct pqi_general_admin_request *request, |
| 4401 | struct pqi_general_admin_response *response) |
| 4402 | { |
| 4403 | int rc; |
| 4404 | |
| 4405 | pqi_submit_admin_request(ctrl_info, request); |
| 4406 | |
| 4407 | rc = pqi_poll_for_admin_response(ctrl_info, response); |
| 4408 | |
| 4409 | if (rc == 0) |
| 4410 | rc = pqi_validate_admin_response(response, request->function_code); |
| 4411 | |
| 4412 | return rc; |
| 4413 | } |
| 4414 | |
| 4415 | static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) |
| 4416 | { |
| 4417 | int rc; |
| 4418 | struct pqi_general_admin_request request; |
| 4419 | struct pqi_general_admin_response response; |
| 4420 | struct pqi_device_capability *capability; |
| 4421 | struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; |
| 4422 | |
| 4423 | capability = kmalloc(sizeof(*capability), GFP_KERNEL); |
| 4424 | if (!capability) |
| 4425 | return -ENOMEM; |
| 4426 | |
| 4427 | memset(&request, 0, sizeof(request)); |
| 4428 | |
| 4429 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
| 4430 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
| 4431 | &request.header.iu_length); |
| 4432 | request.function_code = |
| 4433 | PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; |
| 4434 | put_unaligned_le32(sizeof(*capability), |
| 4435 | &request.data.report_device_capability.buffer_length); |
| 4436 | |
| 4437 | rc = pqi_map_single(ctrl_info->pci_dev, |
| 4438 | &request.data.report_device_capability.sg_descriptor, |
| 4439 | capability, sizeof(*capability), |
| 4440 | DMA_FROM_DEVICE); |
| 4441 | if (rc) |
| 4442 | goto out; |
| 4443 | |
| 4444 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); |
| 4445 | |
| 4446 | pqi_pci_unmap(ctrl_info->pci_dev, |
| 4447 | &request.data.report_device_capability.sg_descriptor, 1, |
| 4448 | DMA_FROM_DEVICE); |
| 4449 | |
| 4450 | if (rc) |
| 4451 | goto out; |
| 4452 | |
| 4453 | if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { |
| 4454 | rc = -EIO; |
| 4455 | goto out; |
| 4456 | } |
| 4457 | |
| 4458 | ctrl_info->max_inbound_queues = |
| 4459 | get_unaligned_le16(&capability->max_inbound_queues); |
| 4460 | ctrl_info->max_elements_per_iq = |
| 4461 | get_unaligned_le16(&capability->max_elements_per_iq); |
| 4462 | ctrl_info->max_iq_element_length = |
| 4463 | get_unaligned_le16(&capability->max_iq_element_length) |
| 4464 | * 16; |
| 4465 | ctrl_info->max_outbound_queues = |
| 4466 | get_unaligned_le16(&capability->max_outbound_queues); |
| 4467 | ctrl_info->max_elements_per_oq = |
| 4468 | get_unaligned_le16(&capability->max_elements_per_oq); |
| 4469 | ctrl_info->max_oq_element_length = |
| 4470 | get_unaligned_le16(&capability->max_oq_element_length) |
| 4471 | * 16; |
| 4472 | |
| 4473 | sop_iu_layer_descriptor = |
| 4474 | &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; |
| 4475 | |
| 4476 | ctrl_info->max_inbound_iu_length_per_firmware = |
| 4477 | get_unaligned_le16( |
| 4478 | &sop_iu_layer_descriptor->max_inbound_iu_length); |
| 4479 | ctrl_info->inbound_spanning_supported = |
| 4480 | sop_iu_layer_descriptor->inbound_spanning_supported; |
| 4481 | ctrl_info->outbound_spanning_supported = |
| 4482 | sop_iu_layer_descriptor->outbound_spanning_supported; |
| 4483 | |
| 4484 | out: |
| 4485 | kfree(capability); |
| 4486 | |
| 4487 | return rc; |
| 4488 | } |
| 4489 | |
| 4490 | static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) |
| 4491 | { |
| 4492 | if (ctrl_info->max_iq_element_length < |
| 4493 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { |
| 4494 | dev_err(&ctrl_info->pci_dev->dev, |
| 4495 | "max. inbound queue element length of %d is less than the required length of %d\n", |
| 4496 | ctrl_info->max_iq_element_length, |
| 4497 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
| 4498 | return -EINVAL; |
| 4499 | } |
| 4500 | |
| 4501 | if (ctrl_info->max_oq_element_length < |
| 4502 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { |
| 4503 | dev_err(&ctrl_info->pci_dev->dev, |
| 4504 | "max. outbound queue element length of %d is less than the required length of %d\n", |
| 4505 | ctrl_info->max_oq_element_length, |
| 4506 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); |
| 4507 | return -EINVAL; |
| 4508 | } |
| 4509 | |
| 4510 | if (ctrl_info->max_inbound_iu_length_per_firmware < |
| 4511 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { |
| 4512 | dev_err(&ctrl_info->pci_dev->dev, |
| 4513 | "max. inbound IU length of %u is less than the min. required length of %d\n", |
| 4514 | ctrl_info->max_inbound_iu_length_per_firmware, |
| 4515 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
| 4516 | return -EINVAL; |
| 4517 | } |
| 4518 | |
| 4519 | if (!ctrl_info->inbound_spanning_supported) { |
| 4520 | dev_err(&ctrl_info->pci_dev->dev, |
| 4521 | "the controller does not support inbound spanning\n"); |
| 4522 | return -EINVAL; |
| 4523 | } |
| 4524 | |
| 4525 | if (ctrl_info->outbound_spanning_supported) { |
| 4526 | dev_err(&ctrl_info->pci_dev->dev, |
| 4527 | "the controller supports outbound spanning but this driver does not\n"); |
| 4528 | return -EINVAL; |
| 4529 | } |
| 4530 | |
| 4531 | return 0; |
| 4532 | } |
| 4533 | |
| 4534 | static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) |
| 4535 | { |
| 4536 | int rc; |
| 4537 | struct pqi_event_queue *event_queue; |
| 4538 | struct pqi_general_admin_request request; |
| 4539 | struct pqi_general_admin_response response; |
| 4540 | |
| 4541 | event_queue = &ctrl_info->event_queue; |
| 4542 | |
| 4543 | /* |
| 4544 | * Create OQ (Outbound Queue - device to host queue) to dedicate |
| 4545 | * to events. |
| 4546 | */ |
| 4547 | memset(&request, 0, sizeof(request)); |
| 4548 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
| 4549 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
| 4550 | &request.header.iu_length); |
| 4551 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; |
| 4552 | put_unaligned_le16(event_queue->oq_id, |
| 4553 | &request.data.create_operational_oq.queue_id); |
| 4554 | put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, |
| 4555 | &request.data.create_operational_oq.element_array_addr); |
| 4556 | put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, |
| 4557 | &request.data.create_operational_oq.pi_addr); |
| 4558 | put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, |
| 4559 | &request.data.create_operational_oq.num_elements); |
| 4560 | put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, |
| 4561 | &request.data.create_operational_oq.element_length); |
| 4562 | request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; |
| 4563 | put_unaligned_le16(event_queue->int_msg_num, |
| 4564 | &request.data.create_operational_oq.int_msg_num); |
| 4565 | |
| 4566 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, |
| 4567 | &response); |
| 4568 | if (rc) |
| 4569 | return rc; |
| 4570 | |
| 4571 | event_queue->oq_ci = ctrl_info->iomem_base + |
| 4572 | PQI_DEVICE_REGISTERS_OFFSET + |
| 4573 | get_unaligned_le64( |
| 4574 | &response.data.create_operational_oq.oq_ci_offset); |
| 4575 | |
| 4576 | return 0; |
| 4577 | } |
| 4578 | |
| 4579 | static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, |
| 4580 | unsigned int group_number) |
| 4581 | { |
| 4582 | int rc; |
| 4583 | struct pqi_queue_group *queue_group; |
| 4584 | struct pqi_general_admin_request request; |
| 4585 | struct pqi_general_admin_response response; |
| 4586 | |
| 4587 | queue_group = &ctrl_info->queue_groups[group_number]; |
| 4588 | |
| 4589 | /* |
| 4590 | * Create IQ (Inbound Queue - host to device queue) for |
| 4591 | * RAID path. |
| 4592 | */ |
| 4593 | memset(&request, 0, sizeof(request)); |
| 4594 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
| 4595 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
| 4596 | &request.header.iu_length); |
| 4597 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; |
| 4598 | put_unaligned_le16(queue_group->iq_id[RAID_PATH], |
| 4599 | &request.data.create_operational_iq.queue_id); |
| 4600 | put_unaligned_le64( |
| 4601 | (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], |
| 4602 | &request.data.create_operational_iq.element_array_addr); |
| 4603 | put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], |
| 4604 | &request.data.create_operational_iq.ci_addr); |
| 4605 | put_unaligned_le16(ctrl_info->num_elements_per_iq, |
| 4606 | &request.data.create_operational_iq.num_elements); |
| 4607 | put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, |
| 4608 | &request.data.create_operational_iq.element_length); |
| 4609 | request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; |
| 4610 | |
| 4611 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, |
| 4612 | &response); |
| 4613 | if (rc) { |
| 4614 | dev_err(&ctrl_info->pci_dev->dev, |
| 4615 | "error creating inbound RAID queue\n"); |
| 4616 | return rc; |
| 4617 | } |
| 4618 | |
| 4619 | queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + |
| 4620 | PQI_DEVICE_REGISTERS_OFFSET + |
| 4621 | get_unaligned_le64( |
| 4622 | &response.data.create_operational_iq.iq_pi_offset); |
| 4623 | |
| 4624 | /* |
| 4625 | * Create IQ (Inbound Queue - host to device queue) for |
| 4626 | * Advanced I/O (AIO) path. |
| 4627 | */ |
| 4628 | memset(&request, 0, sizeof(request)); |
| 4629 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
| 4630 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
| 4631 | &request.header.iu_length); |
| 4632 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; |
| 4633 | put_unaligned_le16(queue_group->iq_id[AIO_PATH], |
| 4634 | &request.data.create_operational_iq.queue_id); |
| 4635 | put_unaligned_le64((u64)queue_group-> |
| 4636 | iq_element_array_bus_addr[AIO_PATH], |
| 4637 | &request.data.create_operational_iq.element_array_addr); |
| 4638 | put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], |
| 4639 | &request.data.create_operational_iq.ci_addr); |
| 4640 | put_unaligned_le16(ctrl_info->num_elements_per_iq, |
| 4641 | &request.data.create_operational_iq.num_elements); |
| 4642 | put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, |
| 4643 | &request.data.create_operational_iq.element_length); |
| 4644 | request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; |
| 4645 | |
| 4646 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, |
| 4647 | &response); |
| 4648 | if (rc) { |
| 4649 | dev_err(&ctrl_info->pci_dev->dev, |
| 4650 | "error creating inbound AIO queue\n"); |
| 4651 | return rc; |
| 4652 | } |
| 4653 | |
| 4654 | queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + |
| 4655 | PQI_DEVICE_REGISTERS_OFFSET + |
| 4656 | get_unaligned_le64( |
| 4657 | &response.data.create_operational_iq.iq_pi_offset); |
| 4658 | |
| 4659 | /* |
| 4660 | * Designate the 2nd IQ as the AIO path. By default, all IQs are |
| 4661 | * assumed to be for RAID path I/O unless we change the queue's |
| 4662 | * property. |
| 4663 | */ |
| 4664 | memset(&request, 0, sizeof(request)); |
| 4665 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
| 4666 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
| 4667 | &request.header.iu_length); |
| 4668 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; |
| 4669 | put_unaligned_le16(queue_group->iq_id[AIO_PATH], |
| 4670 | &request.data.change_operational_iq_properties.queue_id); |
| 4671 | put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, |
| 4672 | &request.data.change_operational_iq_properties.vendor_specific); |
| 4673 | |
| 4674 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, |
| 4675 | &response); |
| 4676 | if (rc) { |
| 4677 | dev_err(&ctrl_info->pci_dev->dev, |
| 4678 | "error changing queue property\n"); |
| 4679 | return rc; |
| 4680 | } |
| 4681 | |
| 4682 | /* |
| 4683 | * Create OQ (Outbound Queue - device to host queue). |
| 4684 | */ |
| 4685 | memset(&request, 0, sizeof(request)); |
| 4686 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; |
| 4687 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, |
| 4688 | &request.header.iu_length); |
| 4689 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; |
| 4690 | put_unaligned_le16(queue_group->oq_id, |
| 4691 | &request.data.create_operational_oq.queue_id); |
| 4692 | put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, |
| 4693 | &request.data.create_operational_oq.element_array_addr); |
| 4694 | put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, |
| 4695 | &request.data.create_operational_oq.pi_addr); |
| 4696 | put_unaligned_le16(ctrl_info->num_elements_per_oq, |
| 4697 | &request.data.create_operational_oq.num_elements); |
| 4698 | put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, |
| 4699 | &request.data.create_operational_oq.element_length); |
| 4700 | request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; |
| 4701 | put_unaligned_le16(queue_group->int_msg_num, |
| 4702 | &request.data.create_operational_oq.int_msg_num); |
| 4703 | |
| 4704 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, |
| 4705 | &response); |
| 4706 | if (rc) { |
| 4707 | dev_err(&ctrl_info->pci_dev->dev, |
| 4708 | "error creating outbound queue\n"); |
| 4709 | return rc; |
| 4710 | } |
| 4711 | |
| 4712 | queue_group->oq_ci = ctrl_info->iomem_base + |
| 4713 | PQI_DEVICE_REGISTERS_OFFSET + |
| 4714 | get_unaligned_le64( |
| 4715 | &response.data.create_operational_oq.oq_ci_offset); |
| 4716 | |
| 4717 | return 0; |
| 4718 | } |
| 4719 | |
| 4720 | static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) |
| 4721 | { |
| 4722 | int rc; |
| 4723 | unsigned int i; |
| 4724 | |
| 4725 | rc = pqi_create_event_queue(ctrl_info); |
| 4726 | if (rc) { |
| 4727 | dev_err(&ctrl_info->pci_dev->dev, |
| 4728 | "error creating event queue\n"); |
| 4729 | return rc; |
| 4730 | } |
| 4731 | |
| 4732 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
| 4733 | rc = pqi_create_queue_group(ctrl_info, i); |
| 4734 | if (rc) { |
| 4735 | dev_err(&ctrl_info->pci_dev->dev, |
| 4736 | "error creating queue group number %u/%u\n", |
| 4737 | i, ctrl_info->num_queue_groups); |
| 4738 | return rc; |
| 4739 | } |
| 4740 | } |
| 4741 | |
| 4742 | return 0; |
| 4743 | } |
| 4744 | |
| 4745 | #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ |
| 4746 | (offsetof(struct pqi_event_config, descriptors) + \ |
| 4747 | (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) |
| 4748 | |
| 4749 | static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, |
| 4750 | bool enable_events) |
| 4751 | { |
| 4752 | int rc; |
| 4753 | unsigned int i; |
| 4754 | struct pqi_event_config *event_config; |
| 4755 | struct pqi_event_descriptor *event_descriptor; |
| 4756 | struct pqi_general_management_request request; |
| 4757 | |
| 4758 | event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, |
| 4759 | GFP_KERNEL); |
| 4760 | if (!event_config) |
| 4761 | return -ENOMEM; |
| 4762 | |
| 4763 | memset(&request, 0, sizeof(request)); |
| 4764 | |
| 4765 | request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; |
| 4766 | put_unaligned_le16(offsetof(struct pqi_general_management_request, |
| 4767 | data.report_event_configuration.sg_descriptors[1]) - |
| 4768 | PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); |
| 4769 | put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, |
| 4770 | &request.data.report_event_configuration.buffer_length); |
| 4771 | |
| 4772 | rc = pqi_map_single(ctrl_info->pci_dev, |
| 4773 | request.data.report_event_configuration.sg_descriptors, |
| 4774 | event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, |
| 4775 | DMA_FROM_DEVICE); |
| 4776 | if (rc) |
| 4777 | goto out; |
| 4778 | |
| 4779 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); |
| 4780 | |
| 4781 | pqi_pci_unmap(ctrl_info->pci_dev, |
| 4782 | request.data.report_event_configuration.sg_descriptors, 1, |
| 4783 | DMA_FROM_DEVICE); |
| 4784 | |
| 4785 | if (rc) |
| 4786 | goto out; |
| 4787 | |
| 4788 | for (i = 0; i < event_config->num_event_descriptors; i++) { |
| 4789 | event_descriptor = &event_config->descriptors[i]; |
| 4790 | if (enable_events && |
| 4791 | pqi_is_supported_event(event_descriptor->event_type)) |
| 4792 | put_unaligned_le16(ctrl_info->event_queue.oq_id, |
| 4793 | &event_descriptor->oq_id); |
| 4794 | else |
| 4795 | put_unaligned_le16(0, &event_descriptor->oq_id); |
| 4796 | } |
| 4797 | |
| 4798 | memset(&request, 0, sizeof(request)); |
| 4799 | |
| 4800 | request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; |
| 4801 | put_unaligned_le16(offsetof(struct pqi_general_management_request, |
| 4802 | data.report_event_configuration.sg_descriptors[1]) - |
| 4803 | PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); |
| 4804 | put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, |
| 4805 | &request.data.report_event_configuration.buffer_length); |
| 4806 | |
| 4807 | rc = pqi_map_single(ctrl_info->pci_dev, |
| 4808 | request.data.report_event_configuration.sg_descriptors, |
| 4809 | event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, |
| 4810 | DMA_TO_DEVICE); |
| 4811 | if (rc) |
| 4812 | goto out; |
| 4813 | |
| 4814 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); |
| 4815 | |
| 4816 | pqi_pci_unmap(ctrl_info->pci_dev, |
| 4817 | request.data.report_event_configuration.sg_descriptors, 1, |
| 4818 | DMA_TO_DEVICE); |
| 4819 | |
| 4820 | out: |
| 4821 | kfree(event_config); |
| 4822 | |
| 4823 | return rc; |
| 4824 | } |
| 4825 | |
| 4826 | static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) |
| 4827 | { |
| 4828 | return pqi_configure_events(ctrl_info, true); |
| 4829 | } |
| 4830 | |
| 4831 | static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) |
| 4832 | { |
| 4833 | return pqi_configure_events(ctrl_info, false); |
| 4834 | } |
| 4835 | |
| 4836 | static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) |
| 4837 | { |
| 4838 | unsigned int i; |
| 4839 | struct device *dev; |
| 4840 | size_t sg_chain_buffer_length; |
| 4841 | struct pqi_io_request *io_request; |
| 4842 | |
| 4843 | if (!ctrl_info->io_request_pool) |
| 4844 | return; |
| 4845 | |
| 4846 | dev = &ctrl_info->pci_dev->dev; |
| 4847 | sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; |
| 4848 | io_request = ctrl_info->io_request_pool; |
| 4849 | |
| 4850 | for (i = 0; i < ctrl_info->max_io_slots; i++) { |
| 4851 | kfree(io_request->iu); |
| 4852 | if (!io_request->sg_chain_buffer) |
| 4853 | break; |
| 4854 | dma_free_coherent(dev, sg_chain_buffer_length, |
| 4855 | io_request->sg_chain_buffer, |
| 4856 | io_request->sg_chain_buffer_dma_handle); |
| 4857 | io_request++; |
| 4858 | } |
| 4859 | |
| 4860 | kfree(ctrl_info->io_request_pool); |
| 4861 | ctrl_info->io_request_pool = NULL; |
| 4862 | } |
| 4863 | |
| 4864 | static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) |
| 4865 | { |
| 4866 | ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, |
| 4867 | ctrl_info->error_buffer_length, |
| 4868 | &ctrl_info->error_buffer_dma_handle, |
| 4869 | GFP_KERNEL); |
| 4870 | if (!ctrl_info->error_buffer) |
| 4871 | return -ENOMEM; |
| 4872 | |
| 4873 | return 0; |
| 4874 | } |
| 4875 | |
| 4876 | static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) |
| 4877 | { |
| 4878 | unsigned int i; |
| 4879 | void *sg_chain_buffer; |
| 4880 | size_t sg_chain_buffer_length; |
| 4881 | dma_addr_t sg_chain_buffer_dma_handle; |
| 4882 | struct device *dev; |
| 4883 | struct pqi_io_request *io_request; |
| 4884 | |
| 4885 | ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, |
| 4886 | sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); |
| 4887 | |
| 4888 | if (!ctrl_info->io_request_pool) { |
| 4889 | dev_err(&ctrl_info->pci_dev->dev, |
| 4890 | "failed to allocate I/O request pool\n"); |
| 4891 | goto error; |
| 4892 | } |
| 4893 | |
| 4894 | dev = &ctrl_info->pci_dev->dev; |
| 4895 | sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; |
| 4896 | io_request = ctrl_info->io_request_pool; |
| 4897 | |
| 4898 | for (i = 0; i < ctrl_info->max_io_slots; i++) { |
| 4899 | io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); |
| 4900 | |
| 4901 | if (!io_request->iu) { |
| 4902 | dev_err(&ctrl_info->pci_dev->dev, |
| 4903 | "failed to allocate IU buffers\n"); |
| 4904 | goto error; |
| 4905 | } |
| 4906 | |
| 4907 | sg_chain_buffer = dma_alloc_coherent(dev, |
| 4908 | sg_chain_buffer_length, &sg_chain_buffer_dma_handle, |
| 4909 | GFP_KERNEL); |
| 4910 | |
| 4911 | if (!sg_chain_buffer) { |
| 4912 | dev_err(&ctrl_info->pci_dev->dev, |
| 4913 | "failed to allocate PQI scatter-gather chain buffers\n"); |
| 4914 | goto error; |
| 4915 | } |
| 4916 | |
| 4917 | io_request->index = i; |
| 4918 | io_request->sg_chain_buffer = sg_chain_buffer; |
| 4919 | io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; |
| 4920 | io_request++; |
| 4921 | } |
| 4922 | |
| 4923 | return 0; |
| 4924 | |
| 4925 | error: |
| 4926 | pqi_free_all_io_requests(ctrl_info); |
| 4927 | |
| 4928 | return -ENOMEM; |
| 4929 | } |
| 4930 | |
| 4931 | /* |
| 4932 | * Calculate required resources that are sized based on max. outstanding |
| 4933 | * requests and max. transfer size. |
| 4934 | */ |
| 4935 | |
| 4936 | static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) |
| 4937 | { |
| 4938 | u32 max_transfer_size; |
| 4939 | u32 max_sg_entries; |
| 4940 | |
| 4941 | ctrl_info->scsi_ml_can_queue = |
| 4942 | ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; |
| 4943 | ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; |
| 4944 | |
| 4945 | ctrl_info->error_buffer_length = |
| 4946 | ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; |
| 4947 | |
| 4948 | if (reset_devices) |
| 4949 | max_transfer_size = min(ctrl_info->max_transfer_size, |
| 4950 | PQI_MAX_TRANSFER_SIZE_KDUMP); |
| 4951 | else |
| 4952 | max_transfer_size = min(ctrl_info->max_transfer_size, |
| 4953 | PQI_MAX_TRANSFER_SIZE); |
| 4954 | |
| 4955 | max_sg_entries = max_transfer_size / PAGE_SIZE; |
| 4956 | |
| 4957 | /* +1 to cover when the buffer is not page-aligned. */ |
| 4958 | max_sg_entries++; |
| 4959 | |
| 4960 | max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); |
| 4961 | |
| 4962 | max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; |
| 4963 | |
| 4964 | ctrl_info->sg_chain_buffer_length = |
| 4965 | (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + |
| 4966 | PQI_EXTRA_SGL_MEMORY; |
| 4967 | ctrl_info->sg_tablesize = max_sg_entries; |
| 4968 | ctrl_info->max_sectors = max_transfer_size / 512; |
| 4969 | } |
| 4970 | |
| 4971 | static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) |
| 4972 | { |
| 4973 | int num_queue_groups; |
| 4974 | u16 num_elements_per_iq; |
| 4975 | u16 num_elements_per_oq; |
| 4976 | |
| 4977 | if (reset_devices) { |
| 4978 | num_queue_groups = 1; |
| 4979 | } else { |
| 4980 | int num_cpus; |
| 4981 | int max_queue_groups; |
| 4982 | |
| 4983 | max_queue_groups = min(ctrl_info->max_inbound_queues / 2, |
| 4984 | ctrl_info->max_outbound_queues - 1); |
| 4985 | max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); |
| 4986 | |
| 4987 | num_cpus = num_online_cpus(); |
| 4988 | num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); |
| 4989 | num_queue_groups = min(num_queue_groups, max_queue_groups); |
| 4990 | } |
| 4991 | |
| 4992 | ctrl_info->num_queue_groups = num_queue_groups; |
| 4993 | ctrl_info->max_hw_queue_index = num_queue_groups - 1; |
| 4994 | |
| 4995 | /* |
| 4996 | * Make sure that the max. inbound IU length is an even multiple |
| 4997 | * of our inbound element length. |
| 4998 | */ |
| 4999 | ctrl_info->max_inbound_iu_length = |
| 5000 | (ctrl_info->max_inbound_iu_length_per_firmware / |
| 5001 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * |
| 5002 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; |
| 5003 | |
| 5004 | num_elements_per_iq = |
| 5005 | (ctrl_info->max_inbound_iu_length / |
| 5006 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
| 5007 | |
| 5008 | /* Add one because one element in each queue is unusable. */ |
| 5009 | num_elements_per_iq++; |
| 5010 | |
| 5011 | num_elements_per_iq = min(num_elements_per_iq, |
| 5012 | ctrl_info->max_elements_per_iq); |
| 5013 | |
| 5014 | num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; |
| 5015 | num_elements_per_oq = min(num_elements_per_oq, |
| 5016 | ctrl_info->max_elements_per_oq); |
| 5017 | |
| 5018 | ctrl_info->num_elements_per_iq = num_elements_per_iq; |
| 5019 | ctrl_info->num_elements_per_oq = num_elements_per_oq; |
| 5020 | |
| 5021 | ctrl_info->max_sg_per_iu = |
| 5022 | ((ctrl_info->max_inbound_iu_length - |
| 5023 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / |
| 5024 | sizeof(struct pqi_sg_descriptor)) + |
| 5025 | PQI_MAX_EMBEDDED_SG_DESCRIPTORS; |
| 5026 | |
| 5027 | ctrl_info->max_sg_per_r56_iu = |
| 5028 | ((ctrl_info->max_inbound_iu_length - |
| 5029 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / |
| 5030 | sizeof(struct pqi_sg_descriptor)) + |
| 5031 | PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; |
| 5032 | } |
| 5033 | |
| 5034 | static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor, |
| 5035 | struct scatterlist *sg) |
| 5036 | { |
| 5037 | u64 address = (u64)sg_dma_address(sg); |
| 5038 | unsigned int length = sg_dma_len(sg); |
| 5039 | |
| 5040 | put_unaligned_le64(address, &sg_descriptor->address); |
| 5041 | put_unaligned_le32(length, &sg_descriptor->length); |
| 5042 | put_unaligned_le32(0, &sg_descriptor->flags); |
| 5043 | } |
| 5044 | |
| 5045 | static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor, |
| 5046 | struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request, |
| 5047 | int max_sg_per_iu, bool *chained) |
| 5048 | { |
| 5049 | int i; |
| 5050 | unsigned int num_sg_in_iu; |
| 5051 | |
| 5052 | *chained = false; |
| 5053 | i = 0; |
| 5054 | num_sg_in_iu = 0; |
| 5055 | max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ |
| 5056 | |
| 5057 | while (1) { |
| 5058 | pqi_set_sg_descriptor(sg_descriptor, sg); |
| 5059 | if (!*chained) |
| 5060 | num_sg_in_iu++; |
| 5061 | i++; |
| 5062 | if (i == sg_count) |
| 5063 | break; |
| 5064 | sg_descriptor++; |
| 5065 | if (i == max_sg_per_iu) { |
| 5066 | put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, |
| 5067 | &sg_descriptor->address); |
| 5068 | put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), |
| 5069 | &sg_descriptor->length); |
| 5070 | put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); |
| 5071 | *chained = true; |
| 5072 | num_sg_in_iu++; |
| 5073 | sg_descriptor = io_request->sg_chain_buffer; |
| 5074 | } |
| 5075 | sg = sg_next(sg); |
| 5076 | } |
| 5077 | |
| 5078 | put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); |
| 5079 | |
| 5080 | return num_sg_in_iu; |
| 5081 | } |
| 5082 | |
| 5083 | static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, |
| 5084 | struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, |
| 5085 | struct pqi_io_request *io_request) |
| 5086 | { |
| 5087 | u16 iu_length; |
| 5088 | int sg_count; |
| 5089 | bool chained; |
| 5090 | unsigned int num_sg_in_iu; |
| 5091 | struct scatterlist *sg; |
| 5092 | struct pqi_sg_descriptor *sg_descriptor; |
| 5093 | |
| 5094 | sg_count = scsi_dma_map(scmd); |
| 5095 | if (sg_count < 0) |
| 5096 | return sg_count; |
| 5097 | |
| 5098 | iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - |
| 5099 | PQI_REQUEST_HEADER_LENGTH; |
| 5100 | |
| 5101 | if (sg_count == 0) |
| 5102 | goto out; |
| 5103 | |
| 5104 | sg = scsi_sglist(scmd); |
| 5105 | sg_descriptor = request->sg_descriptors; |
| 5106 | |
| 5107 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, |
| 5108 | ctrl_info->max_sg_per_iu, &chained); |
| 5109 | |
| 5110 | request->partial = chained; |
| 5111 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); |
| 5112 | |
| 5113 | out: |
| 5114 | put_unaligned_le16(iu_length, &request->header.iu_length); |
| 5115 | |
| 5116 | return 0; |
| 5117 | } |
| 5118 | |
| 5119 | static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, |
| 5120 | struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd, |
| 5121 | struct pqi_io_request *io_request) |
| 5122 | { |
| 5123 | u16 iu_length; |
| 5124 | int sg_count; |
| 5125 | bool chained; |
| 5126 | unsigned int num_sg_in_iu; |
| 5127 | struct scatterlist *sg; |
| 5128 | struct pqi_sg_descriptor *sg_descriptor; |
| 5129 | |
| 5130 | sg_count = scsi_dma_map(scmd); |
| 5131 | if (sg_count < 0) |
| 5132 | return sg_count; |
| 5133 | |
| 5134 | iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - |
| 5135 | PQI_REQUEST_HEADER_LENGTH; |
| 5136 | num_sg_in_iu = 0; |
| 5137 | |
| 5138 | if (sg_count == 0) |
| 5139 | goto out; |
| 5140 | |
| 5141 | sg = scsi_sglist(scmd); |
| 5142 | sg_descriptor = request->sg_descriptors; |
| 5143 | |
| 5144 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, |
| 5145 | ctrl_info->max_sg_per_iu, &chained); |
| 5146 | |
| 5147 | request->partial = chained; |
| 5148 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); |
| 5149 | |
| 5150 | out: |
| 5151 | put_unaligned_le16(iu_length, &request->header.iu_length); |
| 5152 | request->num_sg_descriptors = num_sg_in_iu; |
| 5153 | |
| 5154 | return 0; |
| 5155 | } |
| 5156 | |
| 5157 | static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, |
| 5158 | struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, |
| 5159 | struct pqi_io_request *io_request) |
| 5160 | { |
| 5161 | u16 iu_length; |
| 5162 | int sg_count; |
| 5163 | bool chained; |
| 5164 | unsigned int num_sg_in_iu; |
| 5165 | struct scatterlist *sg; |
| 5166 | struct pqi_sg_descriptor *sg_descriptor; |
| 5167 | |
| 5168 | sg_count = scsi_dma_map(scmd); |
| 5169 | if (sg_count < 0) |
| 5170 | return sg_count; |
| 5171 | |
| 5172 | iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - |
| 5173 | PQI_REQUEST_HEADER_LENGTH; |
| 5174 | num_sg_in_iu = 0; |
| 5175 | |
| 5176 | if (sg_count != 0) { |
| 5177 | sg = scsi_sglist(scmd); |
| 5178 | sg_descriptor = request->sg_descriptors; |
| 5179 | |
| 5180 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, |
| 5181 | ctrl_info->max_sg_per_r56_iu, &chained); |
| 5182 | |
| 5183 | request->partial = chained; |
| 5184 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); |
| 5185 | } |
| 5186 | |
| 5187 | put_unaligned_le16(iu_length, &request->header.iu_length); |
| 5188 | request->num_sg_descriptors = num_sg_in_iu; |
| 5189 | |
| 5190 | return 0; |
| 5191 | } |
| 5192 | |
| 5193 | static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, |
| 5194 | struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, |
| 5195 | struct pqi_io_request *io_request) |
| 5196 | { |
| 5197 | u16 iu_length; |
| 5198 | int sg_count; |
| 5199 | bool chained; |
| 5200 | unsigned int num_sg_in_iu; |
| 5201 | struct scatterlist *sg; |
| 5202 | struct pqi_sg_descriptor *sg_descriptor; |
| 5203 | |
| 5204 | sg_count = scsi_dma_map(scmd); |
| 5205 | if (sg_count < 0) |
| 5206 | return sg_count; |
| 5207 | |
| 5208 | iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - |
| 5209 | PQI_REQUEST_HEADER_LENGTH; |
| 5210 | num_sg_in_iu = 0; |
| 5211 | |
| 5212 | if (sg_count == 0) |
| 5213 | goto out; |
| 5214 | |
| 5215 | sg = scsi_sglist(scmd); |
| 5216 | sg_descriptor = request->sg_descriptors; |
| 5217 | |
| 5218 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, |
| 5219 | ctrl_info->max_sg_per_iu, &chained); |
| 5220 | |
| 5221 | request->partial = chained; |
| 5222 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); |
| 5223 | |
| 5224 | out: |
| 5225 | put_unaligned_le16(iu_length, &request->header.iu_length); |
| 5226 | request->num_sg_descriptors = num_sg_in_iu; |
| 5227 | |
| 5228 | return 0; |
| 5229 | } |
| 5230 | |
| 5231 | static void pqi_raid_io_complete(struct pqi_io_request *io_request, |
| 5232 | void *context) |
| 5233 | { |
| 5234 | struct scsi_cmnd *scmd; |
| 5235 | |
| 5236 | scmd = io_request->scmd; |
| 5237 | pqi_free_io_request(io_request); |
| 5238 | scsi_dma_unmap(scmd); |
| 5239 | pqi_scsi_done(scmd); |
| 5240 | } |
| 5241 | |
| 5242 | static int pqi_raid_submit_scsi_cmd_with_io_request( |
| 5243 | struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, |
| 5244 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, |
| 5245 | struct pqi_queue_group *queue_group) |
| 5246 | { |
| 5247 | int rc; |
| 5248 | size_t cdb_length; |
| 5249 | struct pqi_raid_path_request *request; |
| 5250 | |
| 5251 | io_request->io_complete_callback = pqi_raid_io_complete; |
| 5252 | io_request->scmd = scmd; |
| 5253 | |
| 5254 | request = io_request->iu; |
| 5255 | memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); |
| 5256 | |
| 5257 | request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; |
| 5258 | put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); |
| 5259 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
| 5260 | put_unaligned_le16(io_request->index, &request->request_id); |
| 5261 | request->error_index = request->request_id; |
| 5262 | memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); |
| 5263 | |
| 5264 | cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); |
| 5265 | memcpy(request->cdb, scmd->cmnd, cdb_length); |
| 5266 | |
| 5267 | switch (cdb_length) { |
| 5268 | case 6: |
| 5269 | case 10: |
| 5270 | case 12: |
| 5271 | case 16: |
| 5272 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; |
| 5273 | break; |
| 5274 | case 20: |
| 5275 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; |
| 5276 | break; |
| 5277 | case 24: |
| 5278 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; |
| 5279 | break; |
| 5280 | case 28: |
| 5281 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; |
| 5282 | break; |
| 5283 | case 32: |
| 5284 | default: |
| 5285 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; |
| 5286 | break; |
| 5287 | } |
| 5288 | |
| 5289 | switch (scmd->sc_data_direction) { |
| 5290 | case DMA_TO_DEVICE: |
| 5291 | request->data_direction = SOP_READ_FLAG; |
| 5292 | break; |
| 5293 | case DMA_FROM_DEVICE: |
| 5294 | request->data_direction = SOP_WRITE_FLAG; |
| 5295 | break; |
| 5296 | case DMA_NONE: |
| 5297 | request->data_direction = SOP_NO_DIRECTION_FLAG; |
| 5298 | break; |
| 5299 | case DMA_BIDIRECTIONAL: |
| 5300 | request->data_direction = SOP_BIDIRECTIONAL; |
| 5301 | break; |
| 5302 | default: |
| 5303 | dev_err(&ctrl_info->pci_dev->dev, |
| 5304 | "unknown data direction: %d\n", |
| 5305 | scmd->sc_data_direction); |
| 5306 | break; |
| 5307 | } |
| 5308 | |
| 5309 | rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); |
| 5310 | if (rc) { |
| 5311 | pqi_free_io_request(io_request); |
| 5312 | return SCSI_MLQUEUE_HOST_BUSY; |
| 5313 | } |
| 5314 | |
| 5315 | pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); |
| 5316 | |
| 5317 | return 0; |
| 5318 | } |
| 5319 | |
| 5320 | static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, |
| 5321 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, |
| 5322 | struct pqi_queue_group *queue_group) |
| 5323 | { |
| 5324 | struct pqi_io_request *io_request; |
| 5325 | |
| 5326 | io_request = pqi_alloc_io_request(ctrl_info); |
| 5327 | |
| 5328 | return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, |
| 5329 | device, scmd, queue_group); |
| 5330 | } |
| 5331 | |
| 5332 | static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) |
| 5333 | { |
| 5334 | struct scsi_cmnd *scmd; |
| 5335 | struct pqi_scsi_dev *device; |
| 5336 | struct pqi_ctrl_info *ctrl_info; |
| 5337 | |
| 5338 | if (!io_request->raid_bypass) |
| 5339 | return false; |
| 5340 | |
| 5341 | scmd = io_request->scmd; |
| 5342 | if ((scmd->result & 0xff) == SAM_STAT_GOOD) |
| 5343 | return false; |
| 5344 | if (host_byte(scmd->result) == DID_NO_CONNECT) |
| 5345 | return false; |
| 5346 | |
| 5347 | device = scmd->device->hostdata; |
| 5348 | if (pqi_device_offline(device) || pqi_device_in_remove(device)) |
| 5349 | return false; |
| 5350 | |
| 5351 | ctrl_info = shost_to_hba(scmd->device->host); |
| 5352 | if (pqi_ctrl_offline(ctrl_info)) |
| 5353 | return false; |
| 5354 | |
| 5355 | return true; |
| 5356 | } |
| 5357 | |
| 5358 | static void pqi_aio_io_complete(struct pqi_io_request *io_request, |
| 5359 | void *context) |
| 5360 | { |
| 5361 | struct scsi_cmnd *scmd; |
| 5362 | |
| 5363 | scmd = io_request->scmd; |
| 5364 | scsi_dma_unmap(scmd); |
| 5365 | if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { |
| 5366 | set_host_byte(scmd, DID_IMM_RETRY); |
| 5367 | scmd->SCp.this_residual++; |
| 5368 | } |
| 5369 | |
| 5370 | pqi_free_io_request(io_request); |
| 5371 | pqi_scsi_done(scmd); |
| 5372 | } |
| 5373 | |
| 5374 | static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, |
| 5375 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, |
| 5376 | struct pqi_queue_group *queue_group) |
| 5377 | { |
| 5378 | return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, |
| 5379 | scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); |
| 5380 | } |
| 5381 | |
| 5382 | static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, |
| 5383 | struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, |
| 5384 | unsigned int cdb_length, struct pqi_queue_group *queue_group, |
| 5385 | struct pqi_encryption_info *encryption_info, bool raid_bypass) |
| 5386 | { |
| 5387 | int rc; |
| 5388 | struct pqi_io_request *io_request; |
| 5389 | struct pqi_aio_path_request *request; |
| 5390 | |
| 5391 | io_request = pqi_alloc_io_request(ctrl_info); |
| 5392 | io_request->io_complete_callback = pqi_aio_io_complete; |
| 5393 | io_request->scmd = scmd; |
| 5394 | io_request->raid_bypass = raid_bypass; |
| 5395 | |
| 5396 | request = io_request->iu; |
| 5397 | memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); |
| 5398 | |
| 5399 | request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; |
| 5400 | put_unaligned_le32(aio_handle, &request->nexus_id); |
| 5401 | put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); |
| 5402 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
| 5403 | put_unaligned_le16(io_request->index, &request->request_id); |
| 5404 | request->error_index = request->request_id; |
| 5405 | if (cdb_length > sizeof(request->cdb)) |
| 5406 | cdb_length = sizeof(request->cdb); |
| 5407 | request->cdb_length = cdb_length; |
| 5408 | memcpy(request->cdb, cdb, cdb_length); |
| 5409 | |
| 5410 | switch (scmd->sc_data_direction) { |
| 5411 | case DMA_TO_DEVICE: |
| 5412 | request->data_direction = SOP_READ_FLAG; |
| 5413 | break; |
| 5414 | case DMA_FROM_DEVICE: |
| 5415 | request->data_direction = SOP_WRITE_FLAG; |
| 5416 | break; |
| 5417 | case DMA_NONE: |
| 5418 | request->data_direction = SOP_NO_DIRECTION_FLAG; |
| 5419 | break; |
| 5420 | case DMA_BIDIRECTIONAL: |
| 5421 | request->data_direction = SOP_BIDIRECTIONAL; |
| 5422 | break; |
| 5423 | default: |
| 5424 | dev_err(&ctrl_info->pci_dev->dev, |
| 5425 | "unknown data direction: %d\n", |
| 5426 | scmd->sc_data_direction); |
| 5427 | break; |
| 5428 | } |
| 5429 | |
| 5430 | if (encryption_info) { |
| 5431 | request->encryption_enable = true; |
| 5432 | put_unaligned_le16(encryption_info->data_encryption_key_index, |
| 5433 | &request->data_encryption_key_index); |
| 5434 | put_unaligned_le32(encryption_info->encrypt_tweak_lower, |
| 5435 | &request->encrypt_tweak_lower); |
| 5436 | put_unaligned_le32(encryption_info->encrypt_tweak_upper, |
| 5437 | &request->encrypt_tweak_upper); |
| 5438 | } |
| 5439 | |
| 5440 | rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); |
| 5441 | if (rc) { |
| 5442 | pqi_free_io_request(io_request); |
| 5443 | return SCSI_MLQUEUE_HOST_BUSY; |
| 5444 | } |
| 5445 | |
| 5446 | pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); |
| 5447 | |
| 5448 | return 0; |
| 5449 | } |
| 5450 | |
| 5451 | static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, |
| 5452 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, |
| 5453 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, |
| 5454 | struct pqi_scsi_dev_raid_map_data *rmd) |
| 5455 | { |
| 5456 | int rc; |
| 5457 | struct pqi_io_request *io_request; |
| 5458 | struct pqi_aio_r1_path_request *r1_request; |
| 5459 | |
| 5460 | io_request = pqi_alloc_io_request(ctrl_info); |
| 5461 | io_request->io_complete_callback = pqi_aio_io_complete; |
| 5462 | io_request->scmd = scmd; |
| 5463 | io_request->raid_bypass = true; |
| 5464 | |
| 5465 | r1_request = io_request->iu; |
| 5466 | memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors)); |
| 5467 | |
| 5468 | r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; |
| 5469 | put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); |
| 5470 | r1_request->num_drives = rmd->num_it_nexus_entries; |
| 5471 | put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); |
| 5472 | put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); |
| 5473 | if (rmd->num_it_nexus_entries == 3) |
| 5474 | put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); |
| 5475 | |
| 5476 | put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); |
| 5477 | r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
| 5478 | put_unaligned_le16(io_request->index, &r1_request->request_id); |
| 5479 | r1_request->error_index = r1_request->request_id; |
| 5480 | if (rmd->cdb_length > sizeof(r1_request->cdb)) |
| 5481 | rmd->cdb_length = sizeof(r1_request->cdb); |
| 5482 | r1_request->cdb_length = rmd->cdb_length; |
| 5483 | memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); |
| 5484 | |
| 5485 | /* The direction is always write. */ |
| 5486 | r1_request->data_direction = SOP_READ_FLAG; |
| 5487 | |
| 5488 | if (encryption_info) { |
| 5489 | r1_request->encryption_enable = true; |
| 5490 | put_unaligned_le16(encryption_info->data_encryption_key_index, |
| 5491 | &r1_request->data_encryption_key_index); |
| 5492 | put_unaligned_le32(encryption_info->encrypt_tweak_lower, |
| 5493 | &r1_request->encrypt_tweak_lower); |
| 5494 | put_unaligned_le32(encryption_info->encrypt_tweak_upper, |
| 5495 | &r1_request->encrypt_tweak_upper); |
| 5496 | } |
| 5497 | |
| 5498 | rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); |
| 5499 | if (rc) { |
| 5500 | pqi_free_io_request(io_request); |
| 5501 | return SCSI_MLQUEUE_HOST_BUSY; |
| 5502 | } |
| 5503 | |
| 5504 | pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); |
| 5505 | |
| 5506 | return 0; |
| 5507 | } |
| 5508 | |
| 5509 | static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, |
| 5510 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, |
| 5511 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, |
| 5512 | struct pqi_scsi_dev_raid_map_data *rmd) |
| 5513 | { |
| 5514 | int rc; |
| 5515 | struct pqi_io_request *io_request; |
| 5516 | struct pqi_aio_r56_path_request *r56_request; |
| 5517 | |
| 5518 | io_request = pqi_alloc_io_request(ctrl_info); |
| 5519 | io_request->io_complete_callback = pqi_aio_io_complete; |
| 5520 | io_request->scmd = scmd; |
| 5521 | io_request->raid_bypass = true; |
| 5522 | |
| 5523 | r56_request = io_request->iu; |
| 5524 | memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); |
| 5525 | |
| 5526 | if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) |
| 5527 | r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; |
| 5528 | else |
| 5529 | r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; |
| 5530 | |
| 5531 | put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); |
| 5532 | put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); |
| 5533 | put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); |
| 5534 | if (rmd->raid_level == SA_RAID_6) { |
| 5535 | put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); |
| 5536 | r56_request->xor_multiplier = rmd->xor_mult; |
| 5537 | } |
| 5538 | put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); |
| 5539 | r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
| 5540 | put_unaligned_le64(rmd->row, &r56_request->row); |
| 5541 | |
| 5542 | put_unaligned_le16(io_request->index, &r56_request->request_id); |
| 5543 | r56_request->error_index = r56_request->request_id; |
| 5544 | |
| 5545 | if (rmd->cdb_length > sizeof(r56_request->cdb)) |
| 5546 | rmd->cdb_length = sizeof(r56_request->cdb); |
| 5547 | r56_request->cdb_length = rmd->cdb_length; |
| 5548 | memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); |
| 5549 | |
| 5550 | /* The direction is always write. */ |
| 5551 | r56_request->data_direction = SOP_READ_FLAG; |
| 5552 | |
| 5553 | if (encryption_info) { |
| 5554 | r56_request->encryption_enable = true; |
| 5555 | put_unaligned_le16(encryption_info->data_encryption_key_index, |
| 5556 | &r56_request->data_encryption_key_index); |
| 5557 | put_unaligned_le32(encryption_info->encrypt_tweak_lower, |
| 5558 | &r56_request->encrypt_tweak_lower); |
| 5559 | put_unaligned_le32(encryption_info->encrypt_tweak_upper, |
| 5560 | &r56_request->encrypt_tweak_upper); |
| 5561 | } |
| 5562 | |
| 5563 | rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); |
| 5564 | if (rc) { |
| 5565 | pqi_free_io_request(io_request); |
| 5566 | return SCSI_MLQUEUE_HOST_BUSY; |
| 5567 | } |
| 5568 | |
| 5569 | pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); |
| 5570 | |
| 5571 | return 0; |
| 5572 | } |
| 5573 | |
| 5574 | static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, |
| 5575 | struct scsi_cmnd *scmd) |
| 5576 | { |
| 5577 | u16 hw_queue; |
| 5578 | |
| 5579 | hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); |
| 5580 | if (hw_queue > ctrl_info->max_hw_queue_index) |
| 5581 | hw_queue = 0; |
| 5582 | |
| 5583 | return hw_queue; |
| 5584 | } |
| 5585 | |
| 5586 | static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) |
| 5587 | { |
| 5588 | if (blk_rq_is_passthrough(scmd->request)) |
| 5589 | return false; |
| 5590 | |
| 5591 | return scmd->SCp.this_residual == 0; |
| 5592 | } |
| 5593 | |
| 5594 | /* |
| 5595 | * This function gets called just before we hand the completed SCSI request |
| 5596 | * back to the SML. |
| 5597 | */ |
| 5598 | |
| 5599 | void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) |
| 5600 | { |
| 5601 | struct pqi_scsi_dev *device; |
| 5602 | |
| 5603 | if (!scmd->device) { |
| 5604 | set_host_byte(scmd, DID_NO_CONNECT); |
| 5605 | return; |
| 5606 | } |
| 5607 | |
| 5608 | device = scmd->device->hostdata; |
| 5609 | if (!device) { |
| 5610 | set_host_byte(scmd, DID_NO_CONNECT); |
| 5611 | return; |
| 5612 | } |
| 5613 | |
| 5614 | atomic_dec(&device->scsi_cmds_outstanding); |
| 5615 | } |
| 5616 | |
| 5617 | static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, |
| 5618 | struct scsi_cmnd *scmd) |
| 5619 | { |
| 5620 | u32 oldest_jiffies; |
| 5621 | u8 lru_index; |
| 5622 | int i; |
| 5623 | int rc; |
| 5624 | struct pqi_scsi_dev *device; |
| 5625 | struct pqi_stream_data *pqi_stream_data; |
| 5626 | struct pqi_scsi_dev_raid_map_data rmd; |
| 5627 | |
| 5628 | if (!ctrl_info->enable_stream_detection) |
| 5629 | return false; |
| 5630 | |
| 5631 | rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); |
| 5632 | if (rc) |
| 5633 | return false; |
| 5634 | |
| 5635 | /* Check writes only. */ |
| 5636 | if (!rmd.is_write) |
| 5637 | return false; |
| 5638 | |
| 5639 | device = scmd->device->hostdata; |
| 5640 | |
| 5641 | /* Check for RAID 5/6 streams. */ |
| 5642 | if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) |
| 5643 | return false; |
| 5644 | |
| 5645 | /* |
| 5646 | * If controller does not support AIO RAID{5,6} writes, need to send |
| 5647 | * requests down non-AIO path. |
| 5648 | */ |
| 5649 | if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || |
| 5650 | (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) |
| 5651 | return true; |
| 5652 | |
| 5653 | lru_index = 0; |
| 5654 | oldest_jiffies = INT_MAX; |
| 5655 | for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { |
| 5656 | pqi_stream_data = &device->stream_data[i]; |
| 5657 | /* |
| 5658 | * Check for adjacent request or request is within |
| 5659 | * the previous request. |
| 5660 | */ |
| 5661 | if ((pqi_stream_data->next_lba && |
| 5662 | rmd.first_block >= pqi_stream_data->next_lba) && |
| 5663 | rmd.first_block <= pqi_stream_data->next_lba + |
| 5664 | rmd.block_cnt) { |
| 5665 | pqi_stream_data->next_lba = rmd.first_block + |
| 5666 | rmd.block_cnt; |
| 5667 | pqi_stream_data->last_accessed = jiffies; |
| 5668 | return true; |
| 5669 | } |
| 5670 | |
| 5671 | /* unused entry */ |
| 5672 | if (pqi_stream_data->last_accessed == 0) { |
| 5673 | lru_index = i; |
| 5674 | break; |
| 5675 | } |
| 5676 | |
| 5677 | /* Find entry with oldest last accessed time. */ |
| 5678 | if (pqi_stream_data->last_accessed <= oldest_jiffies) { |
| 5679 | oldest_jiffies = pqi_stream_data->last_accessed; |
| 5680 | lru_index = i; |
| 5681 | } |
| 5682 | } |
| 5683 | |
| 5684 | /* Set LRU entry. */ |
| 5685 | pqi_stream_data = &device->stream_data[lru_index]; |
| 5686 | pqi_stream_data->last_accessed = jiffies; |
| 5687 | pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; |
| 5688 | |
| 5689 | return false; |
| 5690 | } |
| 5691 | |
| 5692 | static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
| 5693 | { |
| 5694 | int rc; |
| 5695 | struct pqi_ctrl_info *ctrl_info; |
| 5696 | struct pqi_scsi_dev *device; |
| 5697 | u16 hw_queue; |
| 5698 | struct pqi_queue_group *queue_group; |
| 5699 | bool raid_bypassed; |
| 5700 | |
| 5701 | device = scmd->device->hostdata; |
| 5702 | |
| 5703 | if (!device) { |
| 5704 | set_host_byte(scmd, DID_NO_CONNECT); |
| 5705 | pqi_scsi_done(scmd); |
| 5706 | return 0; |
| 5707 | } |
| 5708 | |
| 5709 | atomic_inc(&device->scsi_cmds_outstanding); |
| 5710 | |
| 5711 | ctrl_info = shost_to_hba(shost); |
| 5712 | |
| 5713 | if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { |
| 5714 | set_host_byte(scmd, DID_NO_CONNECT); |
| 5715 | pqi_scsi_done(scmd); |
| 5716 | return 0; |
| 5717 | } |
| 5718 | |
| 5719 | if (pqi_ctrl_blocked(ctrl_info)) { |
| 5720 | rc = SCSI_MLQUEUE_HOST_BUSY; |
| 5721 | goto out; |
| 5722 | } |
| 5723 | |
| 5724 | /* |
| 5725 | * This is necessary because the SML doesn't zero out this field during |
| 5726 | * error recovery. |
| 5727 | */ |
| 5728 | scmd->result = 0; |
| 5729 | |
| 5730 | hw_queue = pqi_get_hw_queue(ctrl_info, scmd); |
| 5731 | queue_group = &ctrl_info->queue_groups[hw_queue]; |
| 5732 | |
| 5733 | if (pqi_is_logical_device(device)) { |
| 5734 | raid_bypassed = false; |
| 5735 | if (device->raid_bypass_enabled && |
| 5736 | pqi_is_bypass_eligible_request(scmd) && |
| 5737 | !pqi_is_parity_write_stream(ctrl_info, scmd)) { |
| 5738 | rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
| 5739 | if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { |
| 5740 | raid_bypassed = true; |
| 5741 | atomic_inc(&device->raid_bypass_cnt); |
| 5742 | } |
| 5743 | } |
| 5744 | if (!raid_bypassed) |
| 5745 | rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
| 5746 | } else { |
| 5747 | if (device->aio_enabled) |
| 5748 | rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
| 5749 | else |
| 5750 | rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
| 5751 | } |
| 5752 | |
| 5753 | out: |
| 5754 | if (rc) |
| 5755 | atomic_dec(&device->scsi_cmds_outstanding); |
| 5756 | |
| 5757 | return rc; |
| 5758 | } |
| 5759 | |
| 5760 | static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, |
| 5761 | struct pqi_queue_group *queue_group) |
| 5762 | { |
| 5763 | unsigned int path; |
| 5764 | unsigned long flags; |
| 5765 | bool list_is_empty; |
| 5766 | |
| 5767 | for (path = 0; path < 2; path++) { |
| 5768 | while (1) { |
| 5769 | spin_lock_irqsave( |
| 5770 | &queue_group->submit_lock[path], flags); |
| 5771 | list_is_empty = |
| 5772 | list_empty(&queue_group->request_list[path]); |
| 5773 | spin_unlock_irqrestore( |
| 5774 | &queue_group->submit_lock[path], flags); |
| 5775 | if (list_is_empty) |
| 5776 | break; |
| 5777 | pqi_check_ctrl_health(ctrl_info); |
| 5778 | if (pqi_ctrl_offline(ctrl_info)) |
| 5779 | return -ENXIO; |
| 5780 | usleep_range(1000, 2000); |
| 5781 | } |
| 5782 | } |
| 5783 | |
| 5784 | return 0; |
| 5785 | } |
| 5786 | |
| 5787 | static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) |
| 5788 | { |
| 5789 | int rc; |
| 5790 | unsigned int i; |
| 5791 | unsigned int path; |
| 5792 | struct pqi_queue_group *queue_group; |
| 5793 | pqi_index_t iq_pi; |
| 5794 | pqi_index_t iq_ci; |
| 5795 | |
| 5796 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
| 5797 | queue_group = &ctrl_info->queue_groups[i]; |
| 5798 | |
| 5799 | rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); |
| 5800 | if (rc) |
| 5801 | return rc; |
| 5802 | |
| 5803 | for (path = 0; path < 2; path++) { |
| 5804 | iq_pi = queue_group->iq_pi_copy[path]; |
| 5805 | |
| 5806 | while (1) { |
| 5807 | iq_ci = readl(queue_group->iq_ci[path]); |
| 5808 | if (iq_ci == iq_pi) |
| 5809 | break; |
| 5810 | pqi_check_ctrl_health(ctrl_info); |
| 5811 | if (pqi_ctrl_offline(ctrl_info)) |
| 5812 | return -ENXIO; |
| 5813 | usleep_range(1000, 2000); |
| 5814 | } |
| 5815 | } |
| 5816 | } |
| 5817 | |
| 5818 | return 0; |
| 5819 | } |
| 5820 | |
| 5821 | static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, |
| 5822 | struct pqi_scsi_dev *device) |
| 5823 | { |
| 5824 | unsigned int i; |
| 5825 | unsigned int path; |
| 5826 | struct pqi_queue_group *queue_group; |
| 5827 | unsigned long flags; |
| 5828 | struct pqi_io_request *io_request; |
| 5829 | struct pqi_io_request *next; |
| 5830 | struct scsi_cmnd *scmd; |
| 5831 | struct pqi_scsi_dev *scsi_device; |
| 5832 | |
| 5833 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
| 5834 | queue_group = &ctrl_info->queue_groups[i]; |
| 5835 | |
| 5836 | for (path = 0; path < 2; path++) { |
| 5837 | spin_lock_irqsave( |
| 5838 | &queue_group->submit_lock[path], flags); |
| 5839 | |
| 5840 | list_for_each_entry_safe(io_request, next, |
| 5841 | &queue_group->request_list[path], |
| 5842 | request_list_entry) { |
| 5843 | |
| 5844 | scmd = io_request->scmd; |
| 5845 | if (!scmd) |
| 5846 | continue; |
| 5847 | |
| 5848 | scsi_device = scmd->device->hostdata; |
| 5849 | if (scsi_device != device) |
| 5850 | continue; |
| 5851 | |
| 5852 | list_del(&io_request->request_list_entry); |
| 5853 | set_host_byte(scmd, DID_RESET); |
| 5854 | pqi_free_io_request(io_request); |
| 5855 | scsi_dma_unmap(scmd); |
| 5856 | pqi_scsi_done(scmd); |
| 5857 | } |
| 5858 | |
| 5859 | spin_unlock_irqrestore( |
| 5860 | &queue_group->submit_lock[path], flags); |
| 5861 | } |
| 5862 | } |
| 5863 | } |
| 5864 | |
| 5865 | #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10 |
| 5866 | |
| 5867 | static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, |
| 5868 | struct pqi_scsi_dev *device, unsigned long timeout_msecs) |
| 5869 | { |
| 5870 | int cmds_outstanding; |
| 5871 | unsigned long start_jiffies; |
| 5872 | unsigned long warning_timeout; |
| 5873 | unsigned long msecs_waiting; |
| 5874 | |
| 5875 | start_jiffies = jiffies; |
| 5876 | warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies; |
| 5877 | |
| 5878 | while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) { |
| 5879 | pqi_check_ctrl_health(ctrl_info); |
| 5880 | if (pqi_ctrl_offline(ctrl_info)) |
| 5881 | return -ENXIO; |
| 5882 | msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); |
| 5883 | if (msecs_waiting > timeout_msecs) { |
| 5884 | dev_err(&ctrl_info->pci_dev->dev, |
| 5885 | "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n", |
| 5886 | ctrl_info->scsi_host->host_no, device->bus, device->target, |
| 5887 | device->lun, msecs_waiting / 1000, cmds_outstanding); |
| 5888 | return -ETIMEDOUT; |
| 5889 | } |
| 5890 | if (time_after(jiffies, warning_timeout)) { |
| 5891 | dev_warn(&ctrl_info->pci_dev->dev, |
| 5892 | "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n", |
| 5893 | ctrl_info->scsi_host->host_no, device->bus, device->target, |
| 5894 | device->lun, msecs_waiting / 1000, cmds_outstanding); |
| 5895 | warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies; |
| 5896 | } |
| 5897 | usleep_range(1000, 2000); |
| 5898 | } |
| 5899 | |
| 5900 | return 0; |
| 5901 | } |
| 5902 | |
| 5903 | static void pqi_lun_reset_complete(struct pqi_io_request *io_request, |
| 5904 | void *context) |
| 5905 | { |
| 5906 | struct completion *waiting = context; |
| 5907 | |
| 5908 | complete(waiting); |
| 5909 | } |
| 5910 | |
| 5911 | #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 |
| 5912 | |
| 5913 | static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, |
| 5914 | struct pqi_scsi_dev *device, struct completion *wait) |
| 5915 | { |
| 5916 | int rc; |
| 5917 | unsigned int wait_secs; |
| 5918 | |
| 5919 | wait_secs = 0; |
| 5920 | |
| 5921 | while (1) { |
| 5922 | if (wait_for_completion_io_timeout(wait, |
| 5923 | PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) { |
| 5924 | rc = 0; |
| 5925 | break; |
| 5926 | } |
| 5927 | |
| 5928 | pqi_check_ctrl_health(ctrl_info); |
| 5929 | if (pqi_ctrl_offline(ctrl_info)) { |
| 5930 | rc = -ENXIO; |
| 5931 | break; |
| 5932 | } |
| 5933 | |
| 5934 | wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS; |
| 5935 | |
| 5936 | dev_warn(&ctrl_info->pci_dev->dev, |
| 5937 | "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete\n", |
| 5938 | ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, |
| 5939 | wait_secs); |
| 5940 | } |
| 5941 | |
| 5942 | return rc; |
| 5943 | } |
| 5944 | |
| 5945 | #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30 |
| 5946 | |
| 5947 | static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) |
| 5948 | { |
| 5949 | int rc; |
| 5950 | struct pqi_io_request *io_request; |
| 5951 | DECLARE_COMPLETION_ONSTACK(wait); |
| 5952 | struct pqi_task_management_request *request; |
| 5953 | |
| 5954 | io_request = pqi_alloc_io_request(ctrl_info); |
| 5955 | io_request->io_complete_callback = pqi_lun_reset_complete; |
| 5956 | io_request->context = &wait; |
| 5957 | |
| 5958 | request = io_request->iu; |
| 5959 | memset(request, 0, sizeof(*request)); |
| 5960 | |
| 5961 | request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; |
| 5962 | put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, |
| 5963 | &request->header.iu_length); |
| 5964 | put_unaligned_le16(io_request->index, &request->request_id); |
| 5965 | memcpy(request->lun_number, device->scsi3addr, |
| 5966 | sizeof(request->lun_number)); |
| 5967 | request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; |
| 5968 | if (ctrl_info->tmf_iu_timeout_supported) |
| 5969 | put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); |
| 5970 | |
| 5971 | pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, |
| 5972 | io_request); |
| 5973 | |
| 5974 | rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); |
| 5975 | if (rc == 0) |
| 5976 | rc = io_request->status; |
| 5977 | |
| 5978 | pqi_free_io_request(io_request); |
| 5979 | |
| 5980 | return rc; |
| 5981 | } |
| 5982 | |
| 5983 | #define PQI_LUN_RESET_RETRIES 3 |
| 5984 | #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000) |
| 5985 | #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000) |
| 5986 | #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000) |
| 5987 | |
| 5988 | static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) |
| 5989 | { |
| 5990 | int reset_rc; |
| 5991 | int wait_rc; |
| 5992 | unsigned int retries; |
| 5993 | unsigned long timeout_msecs; |
| 5994 | |
| 5995 | for (retries = 0;;) { |
| 5996 | reset_rc = pqi_lun_reset(ctrl_info, device); |
| 5997 | if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES) |
| 5998 | break; |
| 5999 | msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); |
| 6000 | } |
| 6001 | |
| 6002 | timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS : |
| 6003 | PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS; |
| 6004 | |
| 6005 | wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs); |
| 6006 | if (wait_rc && reset_rc == 0) |
| 6007 | reset_rc = wait_rc; |
| 6008 | |
| 6009 | return reset_rc == 0 ? SUCCESS : FAILED; |
| 6010 | } |
| 6011 | |
| 6012 | static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, |
| 6013 | struct pqi_scsi_dev *device) |
| 6014 | { |
| 6015 | int rc; |
| 6016 | |
| 6017 | pqi_ctrl_block_requests(ctrl_info); |
| 6018 | pqi_ctrl_wait_until_quiesced(ctrl_info); |
| 6019 | pqi_fail_io_queued_for_device(ctrl_info, device); |
| 6020 | rc = pqi_wait_until_inbound_queues_empty(ctrl_info); |
| 6021 | if (rc) |
| 6022 | rc = FAILED; |
| 6023 | else |
| 6024 | rc = pqi_lun_reset_with_retries(ctrl_info, device); |
| 6025 | pqi_ctrl_unblock_requests(ctrl_info); |
| 6026 | |
| 6027 | return rc; |
| 6028 | } |
| 6029 | |
| 6030 | static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) |
| 6031 | { |
| 6032 | int rc; |
| 6033 | struct Scsi_Host *shost; |
| 6034 | struct pqi_ctrl_info *ctrl_info; |
| 6035 | struct pqi_scsi_dev *device; |
| 6036 | |
| 6037 | shost = scmd->device->host; |
| 6038 | ctrl_info = shost_to_hba(shost); |
| 6039 | device = scmd->device->hostdata; |
| 6040 | |
| 6041 | mutex_lock(&ctrl_info->lun_reset_mutex); |
| 6042 | |
| 6043 | dev_err(&ctrl_info->pci_dev->dev, |
| 6044 | "resetting scsi %d:%d:%d:%d\n", |
| 6045 | shost->host_no, device->bus, device->target, device->lun); |
| 6046 | |
| 6047 | pqi_check_ctrl_health(ctrl_info); |
| 6048 | if (pqi_ctrl_offline(ctrl_info)) |
| 6049 | rc = FAILED; |
| 6050 | else |
| 6051 | rc = pqi_device_reset(ctrl_info, device); |
| 6052 | |
| 6053 | dev_err(&ctrl_info->pci_dev->dev, |
| 6054 | "reset of scsi %d:%d:%d:%d: %s\n", |
| 6055 | shost->host_no, device->bus, device->target, device->lun, |
| 6056 | rc == SUCCESS ? "SUCCESS" : "FAILED"); |
| 6057 | |
| 6058 | mutex_unlock(&ctrl_info->lun_reset_mutex); |
| 6059 | |
| 6060 | return rc; |
| 6061 | } |
| 6062 | |
| 6063 | static int pqi_slave_alloc(struct scsi_device *sdev) |
| 6064 | { |
| 6065 | struct pqi_scsi_dev *device; |
| 6066 | unsigned long flags; |
| 6067 | struct pqi_ctrl_info *ctrl_info; |
| 6068 | struct scsi_target *starget; |
| 6069 | struct sas_rphy *rphy; |
| 6070 | |
| 6071 | ctrl_info = shost_to_hba(sdev->host); |
| 6072 | |
| 6073 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 6074 | |
| 6075 | if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { |
| 6076 | starget = scsi_target(sdev); |
| 6077 | rphy = target_to_rphy(starget); |
| 6078 | device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); |
| 6079 | if (device) { |
| 6080 | device->target = sdev_id(sdev); |
| 6081 | device->lun = sdev->lun; |
| 6082 | device->target_lun_valid = true; |
| 6083 | } |
| 6084 | } else { |
| 6085 | device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), |
| 6086 | sdev_id(sdev), sdev->lun); |
| 6087 | } |
| 6088 | |
| 6089 | if (device) { |
| 6090 | sdev->hostdata = device; |
| 6091 | device->sdev = sdev; |
| 6092 | if (device->queue_depth) { |
| 6093 | device->advertised_queue_depth = device->queue_depth; |
| 6094 | scsi_change_queue_depth(sdev, |
| 6095 | device->advertised_queue_depth); |
| 6096 | } |
| 6097 | if (pqi_is_logical_device(device)) { |
| 6098 | pqi_disable_write_same(sdev); |
| 6099 | } else { |
| 6100 | sdev->allow_restart = 1; |
| 6101 | if (device->device_type == SA_DEVICE_TYPE_NVME) |
| 6102 | pqi_disable_write_same(sdev); |
| 6103 | } |
| 6104 | } |
| 6105 | |
| 6106 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6107 | |
| 6108 | return 0; |
| 6109 | } |
| 6110 | |
| 6111 | static int pqi_map_queues(struct Scsi_Host *shost) |
| 6112 | { |
| 6113 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
| 6114 | |
| 6115 | return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], |
| 6116 | ctrl_info->pci_dev, 0); |
| 6117 | } |
| 6118 | |
| 6119 | static int pqi_slave_configure(struct scsi_device *sdev) |
| 6120 | { |
| 6121 | struct pqi_scsi_dev *device; |
| 6122 | |
| 6123 | device = sdev->hostdata; |
| 6124 | device->devtype = sdev->type; |
| 6125 | |
| 6126 | return 0; |
| 6127 | } |
| 6128 | |
| 6129 | static void pqi_slave_destroy(struct scsi_device *sdev) |
| 6130 | { |
| 6131 | unsigned long flags; |
| 6132 | struct pqi_scsi_dev *device; |
| 6133 | struct pqi_ctrl_info *ctrl_info; |
| 6134 | |
| 6135 | ctrl_info = shost_to_hba(sdev->host); |
| 6136 | |
| 6137 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 6138 | |
| 6139 | device = sdev->hostdata; |
| 6140 | if (device) { |
| 6141 | sdev->hostdata = NULL; |
| 6142 | if (!list_empty(&device->scsi_device_list_entry)) |
| 6143 | list_del(&device->scsi_device_list_entry); |
| 6144 | } |
| 6145 | |
| 6146 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6147 | |
| 6148 | if (device) { |
| 6149 | pqi_dev_info(ctrl_info, "removed", device); |
| 6150 | pqi_free_device(device); |
| 6151 | } |
| 6152 | } |
| 6153 | |
| 6154 | static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) |
| 6155 | { |
| 6156 | struct pci_dev *pci_dev; |
| 6157 | u32 subsystem_vendor; |
| 6158 | u32 subsystem_device; |
| 6159 | cciss_pci_info_struct pciinfo; |
| 6160 | |
| 6161 | if (!arg) |
| 6162 | return -EINVAL; |
| 6163 | |
| 6164 | pci_dev = ctrl_info->pci_dev; |
| 6165 | |
| 6166 | pciinfo.domain = pci_domain_nr(pci_dev->bus); |
| 6167 | pciinfo.bus = pci_dev->bus->number; |
| 6168 | pciinfo.dev_fn = pci_dev->devfn; |
| 6169 | subsystem_vendor = pci_dev->subsystem_vendor; |
| 6170 | subsystem_device = pci_dev->subsystem_device; |
| 6171 | pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; |
| 6172 | |
| 6173 | if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) |
| 6174 | return -EFAULT; |
| 6175 | |
| 6176 | return 0; |
| 6177 | } |
| 6178 | |
| 6179 | static int pqi_getdrivver_ioctl(void __user *arg) |
| 6180 | { |
| 6181 | u32 version; |
| 6182 | |
| 6183 | if (!arg) |
| 6184 | return -EINVAL; |
| 6185 | |
| 6186 | version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | |
| 6187 | (DRIVER_RELEASE << 16) | DRIVER_REVISION; |
| 6188 | |
| 6189 | if (copy_to_user(arg, &version, sizeof(version))) |
| 6190 | return -EFAULT; |
| 6191 | |
| 6192 | return 0; |
| 6193 | } |
| 6194 | |
| 6195 | struct ciss_error_info { |
| 6196 | u8 scsi_status; |
| 6197 | int command_status; |
| 6198 | size_t sense_data_length; |
| 6199 | }; |
| 6200 | |
| 6201 | static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, |
| 6202 | struct ciss_error_info *ciss_error_info) |
| 6203 | { |
| 6204 | int ciss_cmd_status; |
| 6205 | size_t sense_data_length; |
| 6206 | |
| 6207 | switch (pqi_error_info->data_out_result) { |
| 6208 | case PQI_DATA_IN_OUT_GOOD: |
| 6209 | ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; |
| 6210 | break; |
| 6211 | case PQI_DATA_IN_OUT_UNDERFLOW: |
| 6212 | ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; |
| 6213 | break; |
| 6214 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: |
| 6215 | ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; |
| 6216 | break; |
| 6217 | case PQI_DATA_IN_OUT_PROTOCOL_ERROR: |
| 6218 | case PQI_DATA_IN_OUT_BUFFER_ERROR: |
| 6219 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: |
| 6220 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: |
| 6221 | case PQI_DATA_IN_OUT_ERROR: |
| 6222 | ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; |
| 6223 | break; |
| 6224 | case PQI_DATA_IN_OUT_HARDWARE_ERROR: |
| 6225 | case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: |
| 6226 | case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: |
| 6227 | case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: |
| 6228 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: |
| 6229 | case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: |
| 6230 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: |
| 6231 | case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: |
| 6232 | case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: |
| 6233 | case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: |
| 6234 | ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; |
| 6235 | break; |
| 6236 | case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: |
| 6237 | ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; |
| 6238 | break; |
| 6239 | case PQI_DATA_IN_OUT_ABORTED: |
| 6240 | ciss_cmd_status = CISS_CMD_STATUS_ABORTED; |
| 6241 | break; |
| 6242 | case PQI_DATA_IN_OUT_TIMEOUT: |
| 6243 | ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; |
| 6244 | break; |
| 6245 | default: |
| 6246 | ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; |
| 6247 | break; |
| 6248 | } |
| 6249 | |
| 6250 | sense_data_length = |
| 6251 | get_unaligned_le16(&pqi_error_info->sense_data_length); |
| 6252 | if (sense_data_length == 0) |
| 6253 | sense_data_length = |
| 6254 | get_unaligned_le16(&pqi_error_info->response_data_length); |
| 6255 | if (sense_data_length) |
| 6256 | if (sense_data_length > sizeof(pqi_error_info->data)) |
| 6257 | sense_data_length = sizeof(pqi_error_info->data); |
| 6258 | |
| 6259 | ciss_error_info->scsi_status = pqi_error_info->status; |
| 6260 | ciss_error_info->command_status = ciss_cmd_status; |
| 6261 | ciss_error_info->sense_data_length = sense_data_length; |
| 6262 | } |
| 6263 | |
| 6264 | static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) |
| 6265 | { |
| 6266 | int rc; |
| 6267 | char *kernel_buffer = NULL; |
| 6268 | u16 iu_length; |
| 6269 | size_t sense_data_length; |
| 6270 | IOCTL_Command_struct iocommand; |
| 6271 | struct pqi_raid_path_request request; |
| 6272 | struct pqi_raid_error_info pqi_error_info; |
| 6273 | struct ciss_error_info ciss_error_info; |
| 6274 | |
| 6275 | if (pqi_ctrl_offline(ctrl_info)) |
| 6276 | return -ENXIO; |
| 6277 | if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) |
| 6278 | return -EBUSY; |
| 6279 | if (!arg) |
| 6280 | return -EINVAL; |
| 6281 | if (!capable(CAP_SYS_RAWIO)) |
| 6282 | return -EPERM; |
| 6283 | if (copy_from_user(&iocommand, arg, sizeof(iocommand))) |
| 6284 | return -EFAULT; |
| 6285 | if (iocommand.buf_size < 1 && |
| 6286 | iocommand.Request.Type.Direction != XFER_NONE) |
| 6287 | return -EINVAL; |
| 6288 | if (iocommand.Request.CDBLen > sizeof(request.cdb)) |
| 6289 | return -EINVAL; |
| 6290 | if (iocommand.Request.Type.Type != TYPE_CMD) |
| 6291 | return -EINVAL; |
| 6292 | |
| 6293 | switch (iocommand.Request.Type.Direction) { |
| 6294 | case XFER_NONE: |
| 6295 | case XFER_WRITE: |
| 6296 | case XFER_READ: |
| 6297 | case XFER_READ | XFER_WRITE: |
| 6298 | break; |
| 6299 | default: |
| 6300 | return -EINVAL; |
| 6301 | } |
| 6302 | |
| 6303 | if (iocommand.buf_size > 0) { |
| 6304 | kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); |
| 6305 | if (!kernel_buffer) |
| 6306 | return -ENOMEM; |
| 6307 | if (iocommand.Request.Type.Direction & XFER_WRITE) { |
| 6308 | if (copy_from_user(kernel_buffer, iocommand.buf, |
| 6309 | iocommand.buf_size)) { |
| 6310 | rc = -EFAULT; |
| 6311 | goto out; |
| 6312 | } |
| 6313 | } else { |
| 6314 | memset(kernel_buffer, 0, iocommand.buf_size); |
| 6315 | } |
| 6316 | } |
| 6317 | |
| 6318 | memset(&request, 0, sizeof(request)); |
| 6319 | |
| 6320 | request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; |
| 6321 | iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - |
| 6322 | PQI_REQUEST_HEADER_LENGTH; |
| 6323 | memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, |
| 6324 | sizeof(request.lun_number)); |
| 6325 | memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); |
| 6326 | request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; |
| 6327 | |
| 6328 | switch (iocommand.Request.Type.Direction) { |
| 6329 | case XFER_NONE: |
| 6330 | request.data_direction = SOP_NO_DIRECTION_FLAG; |
| 6331 | break; |
| 6332 | case XFER_WRITE: |
| 6333 | request.data_direction = SOP_WRITE_FLAG; |
| 6334 | break; |
| 6335 | case XFER_READ: |
| 6336 | request.data_direction = SOP_READ_FLAG; |
| 6337 | break; |
| 6338 | case XFER_READ | XFER_WRITE: |
| 6339 | request.data_direction = SOP_BIDIRECTIONAL; |
| 6340 | break; |
| 6341 | } |
| 6342 | |
| 6343 | request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; |
| 6344 | |
| 6345 | if (iocommand.buf_size > 0) { |
| 6346 | put_unaligned_le32(iocommand.buf_size, &request.buffer_length); |
| 6347 | |
| 6348 | rc = pqi_map_single(ctrl_info->pci_dev, |
| 6349 | &request.sg_descriptors[0], kernel_buffer, |
| 6350 | iocommand.buf_size, DMA_BIDIRECTIONAL); |
| 6351 | if (rc) |
| 6352 | goto out; |
| 6353 | |
| 6354 | iu_length += sizeof(request.sg_descriptors[0]); |
| 6355 | } |
| 6356 | |
| 6357 | put_unaligned_le16(iu_length, &request.header.iu_length); |
| 6358 | |
| 6359 | if (ctrl_info->raid_iu_timeout_supported) |
| 6360 | put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); |
| 6361 | |
| 6362 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, |
| 6363 | PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info); |
| 6364 | |
| 6365 | if (iocommand.buf_size > 0) |
| 6366 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, |
| 6367 | DMA_BIDIRECTIONAL); |
| 6368 | |
| 6369 | memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); |
| 6370 | |
| 6371 | if (rc == 0) { |
| 6372 | pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); |
| 6373 | iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; |
| 6374 | iocommand.error_info.CommandStatus = |
| 6375 | ciss_error_info.command_status; |
| 6376 | sense_data_length = ciss_error_info.sense_data_length; |
| 6377 | if (sense_data_length) { |
| 6378 | if (sense_data_length > |
| 6379 | sizeof(iocommand.error_info.SenseInfo)) |
| 6380 | sense_data_length = |
| 6381 | sizeof(iocommand.error_info.SenseInfo); |
| 6382 | memcpy(iocommand.error_info.SenseInfo, |
| 6383 | pqi_error_info.data, sense_data_length); |
| 6384 | iocommand.error_info.SenseLen = sense_data_length; |
| 6385 | } |
| 6386 | } |
| 6387 | |
| 6388 | if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { |
| 6389 | rc = -EFAULT; |
| 6390 | goto out; |
| 6391 | } |
| 6392 | |
| 6393 | if (rc == 0 && iocommand.buf_size > 0 && |
| 6394 | (iocommand.Request.Type.Direction & XFER_READ)) { |
| 6395 | if (copy_to_user(iocommand.buf, kernel_buffer, |
| 6396 | iocommand.buf_size)) { |
| 6397 | rc = -EFAULT; |
| 6398 | } |
| 6399 | } |
| 6400 | |
| 6401 | out: |
| 6402 | kfree(kernel_buffer); |
| 6403 | |
| 6404 | return rc; |
| 6405 | } |
| 6406 | |
| 6407 | static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, |
| 6408 | void __user *arg) |
| 6409 | { |
| 6410 | int rc; |
| 6411 | struct pqi_ctrl_info *ctrl_info; |
| 6412 | |
| 6413 | ctrl_info = shost_to_hba(sdev->host); |
| 6414 | |
| 6415 | switch (cmd) { |
| 6416 | case CCISS_DEREGDISK: |
| 6417 | case CCISS_REGNEWDISK: |
| 6418 | case CCISS_REGNEWD: |
| 6419 | rc = pqi_scan_scsi_devices(ctrl_info); |
| 6420 | break; |
| 6421 | case CCISS_GETPCIINFO: |
| 6422 | rc = pqi_getpciinfo_ioctl(ctrl_info, arg); |
| 6423 | break; |
| 6424 | case CCISS_GETDRIVVER: |
| 6425 | rc = pqi_getdrivver_ioctl(arg); |
| 6426 | break; |
| 6427 | case CCISS_PASSTHRU: |
| 6428 | rc = pqi_passthru_ioctl(ctrl_info, arg); |
| 6429 | break; |
| 6430 | default: |
| 6431 | rc = -EINVAL; |
| 6432 | break; |
| 6433 | } |
| 6434 | |
| 6435 | return rc; |
| 6436 | } |
| 6437 | |
| 6438 | static ssize_t pqi_firmware_version_show(struct device *dev, |
| 6439 | struct device_attribute *attr, char *buffer) |
| 6440 | { |
| 6441 | struct Scsi_Host *shost; |
| 6442 | struct pqi_ctrl_info *ctrl_info; |
| 6443 | |
| 6444 | shost = class_to_shost(dev); |
| 6445 | ctrl_info = shost_to_hba(shost); |
| 6446 | |
| 6447 | return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); |
| 6448 | } |
| 6449 | |
| 6450 | static ssize_t pqi_driver_version_show(struct device *dev, |
| 6451 | struct device_attribute *attr, char *buffer) |
| 6452 | { |
| 6453 | return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP); |
| 6454 | } |
| 6455 | |
| 6456 | static ssize_t pqi_serial_number_show(struct device *dev, |
| 6457 | struct device_attribute *attr, char *buffer) |
| 6458 | { |
| 6459 | struct Scsi_Host *shost; |
| 6460 | struct pqi_ctrl_info *ctrl_info; |
| 6461 | |
| 6462 | shost = class_to_shost(dev); |
| 6463 | ctrl_info = shost_to_hba(shost); |
| 6464 | |
| 6465 | return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); |
| 6466 | } |
| 6467 | |
| 6468 | static ssize_t pqi_model_show(struct device *dev, |
| 6469 | struct device_attribute *attr, char *buffer) |
| 6470 | { |
| 6471 | struct Scsi_Host *shost; |
| 6472 | struct pqi_ctrl_info *ctrl_info; |
| 6473 | |
| 6474 | shost = class_to_shost(dev); |
| 6475 | ctrl_info = shost_to_hba(shost); |
| 6476 | |
| 6477 | return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); |
| 6478 | } |
| 6479 | |
| 6480 | static ssize_t pqi_vendor_show(struct device *dev, |
| 6481 | struct device_attribute *attr, char *buffer) |
| 6482 | { |
| 6483 | struct Scsi_Host *shost; |
| 6484 | struct pqi_ctrl_info *ctrl_info; |
| 6485 | |
| 6486 | shost = class_to_shost(dev); |
| 6487 | ctrl_info = shost_to_hba(shost); |
| 6488 | |
| 6489 | return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); |
| 6490 | } |
| 6491 | |
| 6492 | static ssize_t pqi_host_rescan_store(struct device *dev, |
| 6493 | struct device_attribute *attr, const char *buffer, size_t count) |
| 6494 | { |
| 6495 | struct Scsi_Host *shost = class_to_shost(dev); |
| 6496 | |
| 6497 | pqi_scan_start(shost); |
| 6498 | |
| 6499 | return count; |
| 6500 | } |
| 6501 | |
| 6502 | static ssize_t pqi_lockup_action_show(struct device *dev, |
| 6503 | struct device_attribute *attr, char *buffer) |
| 6504 | { |
| 6505 | int count = 0; |
| 6506 | unsigned int i; |
| 6507 | |
| 6508 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { |
| 6509 | if (pqi_lockup_actions[i].action == pqi_lockup_action) |
| 6510 | count += scnprintf(buffer + count, PAGE_SIZE - count, |
| 6511 | "[%s] ", pqi_lockup_actions[i].name); |
| 6512 | else |
| 6513 | count += scnprintf(buffer + count, PAGE_SIZE - count, |
| 6514 | "%s ", pqi_lockup_actions[i].name); |
| 6515 | } |
| 6516 | |
| 6517 | count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); |
| 6518 | |
| 6519 | return count; |
| 6520 | } |
| 6521 | |
| 6522 | static ssize_t pqi_lockup_action_store(struct device *dev, |
| 6523 | struct device_attribute *attr, const char *buffer, size_t count) |
| 6524 | { |
| 6525 | unsigned int i; |
| 6526 | char *action_name; |
| 6527 | char action_name_buffer[32]; |
| 6528 | |
| 6529 | strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); |
| 6530 | action_name = strstrip(action_name_buffer); |
| 6531 | |
| 6532 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { |
| 6533 | if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { |
| 6534 | pqi_lockup_action = pqi_lockup_actions[i].action; |
| 6535 | return count; |
| 6536 | } |
| 6537 | } |
| 6538 | |
| 6539 | return -EINVAL; |
| 6540 | } |
| 6541 | |
| 6542 | static ssize_t pqi_host_enable_stream_detection_show(struct device *dev, |
| 6543 | struct device_attribute *attr, char *buffer) |
| 6544 | { |
| 6545 | struct Scsi_Host *shost = class_to_shost(dev); |
| 6546 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
| 6547 | |
| 6548 | return scnprintf(buffer, 10, "%x\n", |
| 6549 | ctrl_info->enable_stream_detection); |
| 6550 | } |
| 6551 | |
| 6552 | static ssize_t pqi_host_enable_stream_detection_store(struct device *dev, |
| 6553 | struct device_attribute *attr, const char *buffer, size_t count) |
| 6554 | { |
| 6555 | struct Scsi_Host *shost = class_to_shost(dev); |
| 6556 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
| 6557 | u8 set_stream_detection = 0; |
| 6558 | |
| 6559 | if (kstrtou8(buffer, 0, &set_stream_detection)) |
| 6560 | return -EINVAL; |
| 6561 | |
| 6562 | if (set_stream_detection > 0) |
| 6563 | set_stream_detection = 1; |
| 6564 | |
| 6565 | ctrl_info->enable_stream_detection = set_stream_detection; |
| 6566 | |
| 6567 | return count; |
| 6568 | } |
| 6569 | |
| 6570 | static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, |
| 6571 | struct device_attribute *attr, char *buffer) |
| 6572 | { |
| 6573 | struct Scsi_Host *shost = class_to_shost(dev); |
| 6574 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
| 6575 | |
| 6576 | return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); |
| 6577 | } |
| 6578 | |
| 6579 | static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, |
| 6580 | struct device_attribute *attr, const char *buffer, size_t count) |
| 6581 | { |
| 6582 | struct Scsi_Host *shost = class_to_shost(dev); |
| 6583 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
| 6584 | u8 set_r5_writes = 0; |
| 6585 | |
| 6586 | if (kstrtou8(buffer, 0, &set_r5_writes)) |
| 6587 | return -EINVAL; |
| 6588 | |
| 6589 | if (set_r5_writes > 0) |
| 6590 | set_r5_writes = 1; |
| 6591 | |
| 6592 | ctrl_info->enable_r5_writes = set_r5_writes; |
| 6593 | |
| 6594 | return count; |
| 6595 | } |
| 6596 | |
| 6597 | static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, |
| 6598 | struct device_attribute *attr, char *buffer) |
| 6599 | { |
| 6600 | struct Scsi_Host *shost = class_to_shost(dev); |
| 6601 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
| 6602 | |
| 6603 | return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); |
| 6604 | } |
| 6605 | |
| 6606 | static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, |
| 6607 | struct device_attribute *attr, const char *buffer, size_t count) |
| 6608 | { |
| 6609 | struct Scsi_Host *shost = class_to_shost(dev); |
| 6610 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); |
| 6611 | u8 set_r6_writes = 0; |
| 6612 | |
| 6613 | if (kstrtou8(buffer, 0, &set_r6_writes)) |
| 6614 | return -EINVAL; |
| 6615 | |
| 6616 | if (set_r6_writes > 0) |
| 6617 | set_r6_writes = 1; |
| 6618 | |
| 6619 | ctrl_info->enable_r6_writes = set_r6_writes; |
| 6620 | |
| 6621 | return count; |
| 6622 | } |
| 6623 | |
| 6624 | static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); |
| 6625 | static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); |
| 6626 | static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); |
| 6627 | static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); |
| 6628 | static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); |
| 6629 | static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); |
| 6630 | static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show, |
| 6631 | pqi_lockup_action_store); |
| 6632 | static DEVICE_ATTR(enable_stream_detection, 0644, |
| 6633 | pqi_host_enable_stream_detection_show, |
| 6634 | pqi_host_enable_stream_detection_store); |
| 6635 | static DEVICE_ATTR(enable_r5_writes, 0644, |
| 6636 | pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); |
| 6637 | static DEVICE_ATTR(enable_r6_writes, 0644, |
| 6638 | pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); |
| 6639 | |
| 6640 | static struct device_attribute *pqi_shost_attrs[] = { |
| 6641 | &dev_attr_driver_version, |
| 6642 | &dev_attr_firmware_version, |
| 6643 | &dev_attr_model, |
| 6644 | &dev_attr_serial_number, |
| 6645 | &dev_attr_vendor, |
| 6646 | &dev_attr_rescan, |
| 6647 | &dev_attr_lockup_action, |
| 6648 | &dev_attr_enable_stream_detection, |
| 6649 | &dev_attr_enable_r5_writes, |
| 6650 | &dev_attr_enable_r6_writes, |
| 6651 | NULL |
| 6652 | }; |
| 6653 | |
| 6654 | static ssize_t pqi_unique_id_show(struct device *dev, |
| 6655 | struct device_attribute *attr, char *buffer) |
| 6656 | { |
| 6657 | struct pqi_ctrl_info *ctrl_info; |
| 6658 | struct scsi_device *sdev; |
| 6659 | struct pqi_scsi_dev *device; |
| 6660 | unsigned long flags; |
| 6661 | u8 unique_id[16]; |
| 6662 | |
| 6663 | sdev = to_scsi_device(dev); |
| 6664 | ctrl_info = shost_to_hba(sdev->host); |
| 6665 | |
| 6666 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 6667 | |
| 6668 | device = sdev->hostdata; |
| 6669 | if (!device) { |
| 6670 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6671 | return -ENODEV; |
| 6672 | } |
| 6673 | |
| 6674 | if (device->is_physical_device) { |
| 6675 | memset(unique_id, 0, 8); |
| 6676 | memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid)); |
| 6677 | } else { |
| 6678 | memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); |
| 6679 | } |
| 6680 | |
| 6681 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6682 | |
| 6683 | return scnprintf(buffer, PAGE_SIZE, |
| 6684 | "%02X%02X%02X%02X%02X%02X%02X%02X" |
| 6685 | "%02X%02X%02X%02X%02X%02X%02X%02X\n", |
| 6686 | unique_id[0], unique_id[1], unique_id[2], unique_id[3], |
| 6687 | unique_id[4], unique_id[5], unique_id[6], unique_id[7], |
| 6688 | unique_id[8], unique_id[9], unique_id[10], unique_id[11], |
| 6689 | unique_id[12], unique_id[13], unique_id[14], unique_id[15]); |
| 6690 | } |
| 6691 | |
| 6692 | static ssize_t pqi_lunid_show(struct device *dev, |
| 6693 | struct device_attribute *attr, char *buffer) |
| 6694 | { |
| 6695 | struct pqi_ctrl_info *ctrl_info; |
| 6696 | struct scsi_device *sdev; |
| 6697 | struct pqi_scsi_dev *device; |
| 6698 | unsigned long flags; |
| 6699 | u8 lunid[8]; |
| 6700 | |
| 6701 | sdev = to_scsi_device(dev); |
| 6702 | ctrl_info = shost_to_hba(sdev->host); |
| 6703 | |
| 6704 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 6705 | |
| 6706 | device = sdev->hostdata; |
| 6707 | if (!device) { |
| 6708 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6709 | return -ENODEV; |
| 6710 | } |
| 6711 | |
| 6712 | memcpy(lunid, device->scsi3addr, sizeof(lunid)); |
| 6713 | |
| 6714 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6715 | |
| 6716 | return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); |
| 6717 | } |
| 6718 | |
| 6719 | #define MAX_PATHS 8 |
| 6720 | |
| 6721 | static ssize_t pqi_path_info_show(struct device *dev, |
| 6722 | struct device_attribute *attr, char *buf) |
| 6723 | { |
| 6724 | struct pqi_ctrl_info *ctrl_info; |
| 6725 | struct scsi_device *sdev; |
| 6726 | struct pqi_scsi_dev *device; |
| 6727 | unsigned long flags; |
| 6728 | int i; |
| 6729 | int output_len = 0; |
| 6730 | u8 box; |
| 6731 | u8 bay; |
| 6732 | u8 path_map_index; |
| 6733 | char *active; |
| 6734 | u8 phys_connector[2]; |
| 6735 | |
| 6736 | sdev = to_scsi_device(dev); |
| 6737 | ctrl_info = shost_to_hba(sdev->host); |
| 6738 | |
| 6739 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 6740 | |
| 6741 | device = sdev->hostdata; |
| 6742 | if (!device) { |
| 6743 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6744 | return -ENODEV; |
| 6745 | } |
| 6746 | |
| 6747 | bay = device->bay; |
| 6748 | for (i = 0; i < MAX_PATHS; i++) { |
| 6749 | path_map_index = 1 << i; |
| 6750 | if (i == device->active_path_index) |
| 6751 | active = "Active"; |
| 6752 | else if (device->path_map & path_map_index) |
| 6753 | active = "Inactive"; |
| 6754 | else |
| 6755 | continue; |
| 6756 | |
| 6757 | output_len += scnprintf(buf + output_len, |
| 6758 | PAGE_SIZE - output_len, |
| 6759 | "[%d:%d:%d:%d] %20.20s ", |
| 6760 | ctrl_info->scsi_host->host_no, |
| 6761 | device->bus, device->target, |
| 6762 | device->lun, |
| 6763 | scsi_device_type(device->devtype)); |
| 6764 | |
| 6765 | if (device->devtype == TYPE_RAID || |
| 6766 | pqi_is_logical_device(device)) |
| 6767 | goto end_buffer; |
| 6768 | |
| 6769 | memcpy(&phys_connector, &device->phys_connector[i], |
| 6770 | sizeof(phys_connector)); |
| 6771 | if (phys_connector[0] < '0') |
| 6772 | phys_connector[0] = '0'; |
| 6773 | if (phys_connector[1] < '0') |
| 6774 | phys_connector[1] = '0'; |
| 6775 | |
| 6776 | output_len += scnprintf(buf + output_len, |
| 6777 | PAGE_SIZE - output_len, |
| 6778 | "PORT: %.2s ", phys_connector); |
| 6779 | |
| 6780 | box = device->box[i]; |
| 6781 | if (box != 0 && box != 0xFF) |
| 6782 | output_len += scnprintf(buf + output_len, |
| 6783 | PAGE_SIZE - output_len, |
| 6784 | "BOX: %hhu ", box); |
| 6785 | |
| 6786 | if ((device->devtype == TYPE_DISK || |
| 6787 | device->devtype == TYPE_ZBC) && |
| 6788 | pqi_expose_device(device)) |
| 6789 | output_len += scnprintf(buf + output_len, |
| 6790 | PAGE_SIZE - output_len, |
| 6791 | "BAY: %hhu ", bay); |
| 6792 | |
| 6793 | end_buffer: |
| 6794 | output_len += scnprintf(buf + output_len, |
| 6795 | PAGE_SIZE - output_len, |
| 6796 | "%s\n", active); |
| 6797 | } |
| 6798 | |
| 6799 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6800 | |
| 6801 | return output_len; |
| 6802 | } |
| 6803 | |
| 6804 | static ssize_t pqi_sas_address_show(struct device *dev, |
| 6805 | struct device_attribute *attr, char *buffer) |
| 6806 | { |
| 6807 | struct pqi_ctrl_info *ctrl_info; |
| 6808 | struct scsi_device *sdev; |
| 6809 | struct pqi_scsi_dev *device; |
| 6810 | unsigned long flags; |
| 6811 | u64 sas_address; |
| 6812 | |
| 6813 | sdev = to_scsi_device(dev); |
| 6814 | ctrl_info = shost_to_hba(sdev->host); |
| 6815 | |
| 6816 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 6817 | |
| 6818 | device = sdev->hostdata; |
| 6819 | if (!device || !pqi_is_device_with_sas_address(device)) { |
| 6820 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6821 | return -ENODEV; |
| 6822 | } |
| 6823 | |
| 6824 | sas_address = device->sas_address; |
| 6825 | |
| 6826 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6827 | |
| 6828 | return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); |
| 6829 | } |
| 6830 | |
| 6831 | static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, |
| 6832 | struct device_attribute *attr, char *buffer) |
| 6833 | { |
| 6834 | struct pqi_ctrl_info *ctrl_info; |
| 6835 | struct scsi_device *sdev; |
| 6836 | struct pqi_scsi_dev *device; |
| 6837 | unsigned long flags; |
| 6838 | |
| 6839 | sdev = to_scsi_device(dev); |
| 6840 | ctrl_info = shost_to_hba(sdev->host); |
| 6841 | |
| 6842 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 6843 | |
| 6844 | device = sdev->hostdata; |
| 6845 | if (!device) { |
| 6846 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6847 | return -ENODEV; |
| 6848 | } |
| 6849 | |
| 6850 | buffer[0] = device->raid_bypass_enabled ? '1' : '0'; |
| 6851 | buffer[1] = '\n'; |
| 6852 | buffer[2] = '\0'; |
| 6853 | |
| 6854 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6855 | |
| 6856 | return 2; |
| 6857 | } |
| 6858 | |
| 6859 | static ssize_t pqi_raid_level_show(struct device *dev, |
| 6860 | struct device_attribute *attr, char *buffer) |
| 6861 | { |
| 6862 | struct pqi_ctrl_info *ctrl_info; |
| 6863 | struct scsi_device *sdev; |
| 6864 | struct pqi_scsi_dev *device; |
| 6865 | unsigned long flags; |
| 6866 | char *raid_level; |
| 6867 | |
| 6868 | sdev = to_scsi_device(dev); |
| 6869 | ctrl_info = shost_to_hba(sdev->host); |
| 6870 | |
| 6871 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 6872 | |
| 6873 | device = sdev->hostdata; |
| 6874 | if (!device) { |
| 6875 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6876 | return -ENODEV; |
| 6877 | } |
| 6878 | |
| 6879 | if (pqi_is_logical_device(device)) |
| 6880 | raid_level = pqi_raid_level_to_string(device->raid_level); |
| 6881 | else |
| 6882 | raid_level = "N/A"; |
| 6883 | |
| 6884 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6885 | |
| 6886 | return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level); |
| 6887 | } |
| 6888 | |
| 6889 | static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, |
| 6890 | struct device_attribute *attr, char *buffer) |
| 6891 | { |
| 6892 | struct pqi_ctrl_info *ctrl_info; |
| 6893 | struct scsi_device *sdev; |
| 6894 | struct pqi_scsi_dev *device; |
| 6895 | unsigned long flags; |
| 6896 | int raid_bypass_cnt; |
| 6897 | |
| 6898 | sdev = to_scsi_device(dev); |
| 6899 | ctrl_info = shost_to_hba(sdev->host); |
| 6900 | |
| 6901 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); |
| 6902 | |
| 6903 | device = sdev->hostdata; |
| 6904 | if (!device) { |
| 6905 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6906 | return -ENODEV; |
| 6907 | } |
| 6908 | |
| 6909 | raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt); |
| 6910 | |
| 6911 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
| 6912 | |
| 6913 | return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt); |
| 6914 | } |
| 6915 | |
| 6916 | static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); |
| 6917 | static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); |
| 6918 | static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); |
| 6919 | static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); |
| 6920 | static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); |
| 6921 | static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); |
| 6922 | static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); |
| 6923 | |
| 6924 | static struct device_attribute *pqi_sdev_attrs[] = { |
| 6925 | &dev_attr_lunid, |
| 6926 | &dev_attr_unique_id, |
| 6927 | &dev_attr_path_info, |
| 6928 | &dev_attr_sas_address, |
| 6929 | &dev_attr_ssd_smart_path_enabled, |
| 6930 | &dev_attr_raid_level, |
| 6931 | &dev_attr_raid_bypass_cnt, |
| 6932 | NULL |
| 6933 | }; |
| 6934 | |
| 6935 | static struct scsi_host_template pqi_driver_template = { |
| 6936 | .module = THIS_MODULE, |
| 6937 | .name = DRIVER_NAME_SHORT, |
| 6938 | .proc_name = DRIVER_NAME_SHORT, |
| 6939 | .queuecommand = pqi_scsi_queue_command, |
| 6940 | .scan_start = pqi_scan_start, |
| 6941 | .scan_finished = pqi_scan_finished, |
| 6942 | .this_id = -1, |
| 6943 | .eh_device_reset_handler = pqi_eh_device_reset_handler, |
| 6944 | .ioctl = pqi_ioctl, |
| 6945 | .slave_alloc = pqi_slave_alloc, |
| 6946 | .slave_configure = pqi_slave_configure, |
| 6947 | .slave_destroy = pqi_slave_destroy, |
| 6948 | .map_queues = pqi_map_queues, |
| 6949 | .sdev_attrs = pqi_sdev_attrs, |
| 6950 | .shost_attrs = pqi_shost_attrs, |
| 6951 | }; |
| 6952 | |
| 6953 | static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) |
| 6954 | { |
| 6955 | int rc; |
| 6956 | struct Scsi_Host *shost; |
| 6957 | |
| 6958 | shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); |
| 6959 | if (!shost) { |
| 6960 | dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); |
| 6961 | return -ENOMEM; |
| 6962 | } |
| 6963 | |
| 6964 | shost->io_port = 0; |
| 6965 | shost->n_io_port = 0; |
| 6966 | shost->this_id = -1; |
| 6967 | shost->max_channel = PQI_MAX_BUS; |
| 6968 | shost->max_cmd_len = MAX_COMMAND_SIZE; |
| 6969 | shost->max_lun = ~0; |
| 6970 | shost->max_id = ~0; |
| 6971 | shost->max_sectors = ctrl_info->max_sectors; |
| 6972 | shost->can_queue = ctrl_info->scsi_ml_can_queue; |
| 6973 | shost->cmd_per_lun = shost->can_queue; |
| 6974 | shost->sg_tablesize = ctrl_info->sg_tablesize; |
| 6975 | shost->transportt = pqi_sas_transport_template; |
| 6976 | shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); |
| 6977 | shost->unique_id = shost->irq; |
| 6978 | shost->nr_hw_queues = ctrl_info->num_queue_groups; |
| 6979 | shost->host_tagset = 1; |
| 6980 | shost->hostdata[0] = (unsigned long)ctrl_info; |
| 6981 | |
| 6982 | rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); |
| 6983 | if (rc) { |
| 6984 | dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); |
| 6985 | goto free_host; |
| 6986 | } |
| 6987 | |
| 6988 | rc = pqi_add_sas_host(shost, ctrl_info); |
| 6989 | if (rc) { |
| 6990 | dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); |
| 6991 | goto remove_host; |
| 6992 | } |
| 6993 | |
| 6994 | ctrl_info->scsi_host = shost; |
| 6995 | |
| 6996 | return 0; |
| 6997 | |
| 6998 | remove_host: |
| 6999 | scsi_remove_host(shost); |
| 7000 | free_host: |
| 7001 | scsi_host_put(shost); |
| 7002 | |
| 7003 | return rc; |
| 7004 | } |
| 7005 | |
| 7006 | static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) |
| 7007 | { |
| 7008 | struct Scsi_Host *shost; |
| 7009 | |
| 7010 | pqi_delete_sas_host(ctrl_info); |
| 7011 | |
| 7012 | shost = ctrl_info->scsi_host; |
| 7013 | if (!shost) |
| 7014 | return; |
| 7015 | |
| 7016 | scsi_remove_host(shost); |
| 7017 | scsi_host_put(shost); |
| 7018 | } |
| 7019 | |
| 7020 | static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) |
| 7021 | { |
| 7022 | int rc = 0; |
| 7023 | struct pqi_device_registers __iomem *pqi_registers; |
| 7024 | unsigned long timeout; |
| 7025 | unsigned int timeout_msecs; |
| 7026 | union pqi_reset_register reset_reg; |
| 7027 | |
| 7028 | pqi_registers = ctrl_info->pqi_registers; |
| 7029 | timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; |
| 7030 | timeout = msecs_to_jiffies(timeout_msecs) + jiffies; |
| 7031 | |
| 7032 | while (1) { |
| 7033 | msleep(PQI_RESET_POLL_INTERVAL_MSECS); |
| 7034 | reset_reg.all_bits = readl(&pqi_registers->device_reset); |
| 7035 | if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) |
| 7036 | break; |
| 7037 | pqi_check_ctrl_health(ctrl_info); |
| 7038 | if (pqi_ctrl_offline(ctrl_info)) { |
| 7039 | rc = -ENXIO; |
| 7040 | break; |
| 7041 | } |
| 7042 | if (time_after(jiffies, timeout)) { |
| 7043 | rc = -ETIMEDOUT; |
| 7044 | break; |
| 7045 | } |
| 7046 | } |
| 7047 | |
| 7048 | return rc; |
| 7049 | } |
| 7050 | |
| 7051 | static int pqi_reset(struct pqi_ctrl_info *ctrl_info) |
| 7052 | { |
| 7053 | int rc; |
| 7054 | union pqi_reset_register reset_reg; |
| 7055 | |
| 7056 | if (ctrl_info->pqi_reset_quiesce_supported) { |
| 7057 | rc = sis_pqi_reset_quiesce(ctrl_info); |
| 7058 | if (rc) { |
| 7059 | dev_err(&ctrl_info->pci_dev->dev, |
| 7060 | "PQI reset failed during quiesce with error %d\n", rc); |
| 7061 | return rc; |
| 7062 | } |
| 7063 | } |
| 7064 | |
| 7065 | reset_reg.all_bits = 0; |
| 7066 | reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; |
| 7067 | reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; |
| 7068 | |
| 7069 | writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); |
| 7070 | |
| 7071 | rc = pqi_wait_for_pqi_reset_completion(ctrl_info); |
| 7072 | if (rc) |
| 7073 | dev_err(&ctrl_info->pci_dev->dev, |
| 7074 | "PQI reset failed with error %d\n", rc); |
| 7075 | |
| 7076 | return rc; |
| 7077 | } |
| 7078 | |
| 7079 | static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) |
| 7080 | { |
| 7081 | int rc; |
| 7082 | struct bmic_sense_subsystem_info *sense_info; |
| 7083 | |
| 7084 | sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); |
| 7085 | if (!sense_info) |
| 7086 | return -ENOMEM; |
| 7087 | |
| 7088 | rc = pqi_sense_subsystem_info(ctrl_info, sense_info); |
| 7089 | if (rc) |
| 7090 | goto out; |
| 7091 | |
| 7092 | memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, |
| 7093 | sizeof(sense_info->ctrl_serial_number)); |
| 7094 | ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; |
| 7095 | |
| 7096 | out: |
| 7097 | kfree(sense_info); |
| 7098 | |
| 7099 | return rc; |
| 7100 | } |
| 7101 | |
| 7102 | static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) |
| 7103 | { |
| 7104 | int rc; |
| 7105 | struct bmic_identify_controller *identify; |
| 7106 | |
| 7107 | identify = kmalloc(sizeof(*identify), GFP_KERNEL); |
| 7108 | if (!identify) |
| 7109 | return -ENOMEM; |
| 7110 | |
| 7111 | rc = pqi_identify_controller(ctrl_info, identify); |
| 7112 | if (rc) |
| 7113 | goto out; |
| 7114 | |
| 7115 | if (get_unaligned_le32(&identify->extra_controller_flags) & |
| 7116 | BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) { |
| 7117 | memcpy(ctrl_info->firmware_version, |
| 7118 | identify->firmware_version_long, |
| 7119 | sizeof(identify->firmware_version_long)); |
| 7120 | } else { |
| 7121 | memcpy(ctrl_info->firmware_version, |
| 7122 | identify->firmware_version_short, |
| 7123 | sizeof(identify->firmware_version_short)); |
| 7124 | ctrl_info->firmware_version |
| 7125 | [sizeof(identify->firmware_version_short)] = '\0'; |
| 7126 | snprintf(ctrl_info->firmware_version + |
| 7127 | strlen(ctrl_info->firmware_version), |
| 7128 | sizeof(ctrl_info->firmware_version) - |
| 7129 | sizeof(identify->firmware_version_short), |
| 7130 | "-%u", |
| 7131 | get_unaligned_le16(&identify->firmware_build_number)); |
| 7132 | } |
| 7133 | |
| 7134 | memcpy(ctrl_info->model, identify->product_id, |
| 7135 | sizeof(identify->product_id)); |
| 7136 | ctrl_info->model[sizeof(identify->product_id)] = '\0'; |
| 7137 | |
| 7138 | memcpy(ctrl_info->vendor, identify->vendor_id, |
| 7139 | sizeof(identify->vendor_id)); |
| 7140 | ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; |
| 7141 | |
| 7142 | out: |
| 7143 | kfree(identify); |
| 7144 | |
| 7145 | return rc; |
| 7146 | } |
| 7147 | |
| 7148 | struct pqi_config_table_section_info { |
| 7149 | struct pqi_ctrl_info *ctrl_info; |
| 7150 | void *section; |
| 7151 | u32 section_offset; |
| 7152 | void __iomem *section_iomem_addr; |
| 7153 | }; |
| 7154 | |
| 7155 | static inline bool pqi_is_firmware_feature_supported( |
| 7156 | struct pqi_config_table_firmware_features *firmware_features, |
| 7157 | unsigned int bit_position) |
| 7158 | { |
| 7159 | unsigned int byte_index; |
| 7160 | |
| 7161 | byte_index = bit_position / BITS_PER_BYTE; |
| 7162 | |
| 7163 | if (byte_index >= le16_to_cpu(firmware_features->num_elements)) |
| 7164 | return false; |
| 7165 | |
| 7166 | return firmware_features->features_supported[byte_index] & |
| 7167 | (1 << (bit_position % BITS_PER_BYTE)) ? true : false; |
| 7168 | } |
| 7169 | |
| 7170 | static inline bool pqi_is_firmware_feature_enabled( |
| 7171 | struct pqi_config_table_firmware_features *firmware_features, |
| 7172 | void __iomem *firmware_features_iomem_addr, |
| 7173 | unsigned int bit_position) |
| 7174 | { |
| 7175 | unsigned int byte_index; |
| 7176 | u8 __iomem *features_enabled_iomem_addr; |
| 7177 | |
| 7178 | byte_index = (bit_position / BITS_PER_BYTE) + |
| 7179 | (le16_to_cpu(firmware_features->num_elements) * 2); |
| 7180 | |
| 7181 | features_enabled_iomem_addr = firmware_features_iomem_addr + |
| 7182 | offsetof(struct pqi_config_table_firmware_features, |
| 7183 | features_supported) + byte_index; |
| 7184 | |
| 7185 | return *((__force u8 *)features_enabled_iomem_addr) & |
| 7186 | (1 << (bit_position % BITS_PER_BYTE)) ? true : false; |
| 7187 | } |
| 7188 | |
| 7189 | static inline void pqi_request_firmware_feature( |
| 7190 | struct pqi_config_table_firmware_features *firmware_features, |
| 7191 | unsigned int bit_position) |
| 7192 | { |
| 7193 | unsigned int byte_index; |
| 7194 | |
| 7195 | byte_index = (bit_position / BITS_PER_BYTE) + |
| 7196 | le16_to_cpu(firmware_features->num_elements); |
| 7197 | |
| 7198 | firmware_features->features_supported[byte_index] |= |
| 7199 | (1 << (bit_position % BITS_PER_BYTE)); |
| 7200 | } |
| 7201 | |
| 7202 | static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, |
| 7203 | u16 first_section, u16 last_section) |
| 7204 | { |
| 7205 | struct pqi_vendor_general_request request; |
| 7206 | |
| 7207 | memset(&request, 0, sizeof(request)); |
| 7208 | |
| 7209 | request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; |
| 7210 | put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, |
| 7211 | &request.header.iu_length); |
| 7212 | put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, |
| 7213 | &request.function_code); |
| 7214 | put_unaligned_le16(first_section, |
| 7215 | &request.data.config_table_update.first_section); |
| 7216 | put_unaligned_le16(last_section, |
| 7217 | &request.data.config_table_update.last_section); |
| 7218 | |
| 7219 | return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); |
| 7220 | } |
| 7221 | |
| 7222 | static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, |
| 7223 | struct pqi_config_table_firmware_features *firmware_features, |
| 7224 | void __iomem *firmware_features_iomem_addr) |
| 7225 | { |
| 7226 | void *features_requested; |
| 7227 | void __iomem *features_requested_iomem_addr; |
| 7228 | void __iomem *host_max_known_feature_iomem_addr; |
| 7229 | |
| 7230 | features_requested = firmware_features->features_supported + |
| 7231 | le16_to_cpu(firmware_features->num_elements); |
| 7232 | |
| 7233 | features_requested_iomem_addr = firmware_features_iomem_addr + |
| 7234 | (features_requested - (void *)firmware_features); |
| 7235 | |
| 7236 | memcpy_toio(features_requested_iomem_addr, features_requested, |
| 7237 | le16_to_cpu(firmware_features->num_elements)); |
| 7238 | |
| 7239 | if (pqi_is_firmware_feature_supported(firmware_features, |
| 7240 | PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { |
| 7241 | host_max_known_feature_iomem_addr = |
| 7242 | features_requested_iomem_addr + |
| 7243 | (le16_to_cpu(firmware_features->num_elements) * 2) + |
| 7244 | sizeof(__le16); |
| 7245 | writew(PQI_FIRMWARE_FEATURE_MAXIMUM, |
| 7246 | host_max_known_feature_iomem_addr); |
| 7247 | } |
| 7248 | |
| 7249 | return pqi_config_table_update(ctrl_info, |
| 7250 | PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, |
| 7251 | PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); |
| 7252 | } |
| 7253 | |
| 7254 | struct pqi_firmware_feature { |
| 7255 | char *feature_name; |
| 7256 | unsigned int feature_bit; |
| 7257 | bool supported; |
| 7258 | bool enabled; |
| 7259 | void (*feature_status)(struct pqi_ctrl_info *ctrl_info, |
| 7260 | struct pqi_firmware_feature *firmware_feature); |
| 7261 | }; |
| 7262 | |
| 7263 | static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, |
| 7264 | struct pqi_firmware_feature *firmware_feature) |
| 7265 | { |
| 7266 | if (!firmware_feature->supported) { |
| 7267 | dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", |
| 7268 | firmware_feature->feature_name); |
| 7269 | return; |
| 7270 | } |
| 7271 | |
| 7272 | if (firmware_feature->enabled) { |
| 7273 | dev_info(&ctrl_info->pci_dev->dev, |
| 7274 | "%s enabled\n", firmware_feature->feature_name); |
| 7275 | return; |
| 7276 | } |
| 7277 | |
| 7278 | dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", |
| 7279 | firmware_feature->feature_name); |
| 7280 | } |
| 7281 | |
| 7282 | static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, |
| 7283 | struct pqi_firmware_feature *firmware_feature) |
| 7284 | { |
| 7285 | switch (firmware_feature->feature_bit) { |
| 7286 | case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: |
| 7287 | ctrl_info->enable_r1_writes = firmware_feature->enabled; |
| 7288 | break; |
| 7289 | case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: |
| 7290 | ctrl_info->enable_r5_writes = firmware_feature->enabled; |
| 7291 | break; |
| 7292 | case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: |
| 7293 | ctrl_info->enable_r6_writes = firmware_feature->enabled; |
| 7294 | break; |
| 7295 | case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: |
| 7296 | ctrl_info->soft_reset_handshake_supported = |
| 7297 | firmware_feature->enabled && |
| 7298 | pqi_read_soft_reset_status(ctrl_info); |
| 7299 | break; |
| 7300 | case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: |
| 7301 | ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; |
| 7302 | break; |
| 7303 | case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: |
| 7304 | ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; |
| 7305 | break; |
| 7306 | case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN: |
| 7307 | ctrl_info->unique_wwid_in_report_phys_lun_supported = |
| 7308 | firmware_feature->enabled; |
| 7309 | break; |
| 7310 | } |
| 7311 | |
| 7312 | pqi_firmware_feature_status(ctrl_info, firmware_feature); |
| 7313 | } |
| 7314 | |
| 7315 | static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, |
| 7316 | struct pqi_firmware_feature *firmware_feature) |
| 7317 | { |
| 7318 | if (firmware_feature->feature_status) |
| 7319 | firmware_feature->feature_status(ctrl_info, firmware_feature); |
| 7320 | } |
| 7321 | |
| 7322 | static DEFINE_MUTEX(pqi_firmware_features_mutex); |
| 7323 | |
| 7324 | static struct pqi_firmware_feature pqi_firmware_features[] = { |
| 7325 | { |
| 7326 | .feature_name = "Online Firmware Activation", |
| 7327 | .feature_bit = PQI_FIRMWARE_FEATURE_OFA, |
| 7328 | .feature_status = pqi_firmware_feature_status, |
| 7329 | }, |
| 7330 | { |
| 7331 | .feature_name = "Serial Management Protocol", |
| 7332 | .feature_bit = PQI_FIRMWARE_FEATURE_SMP, |
| 7333 | .feature_status = pqi_firmware_feature_status, |
| 7334 | }, |
| 7335 | { |
| 7336 | .feature_name = "Maximum Known Feature", |
| 7337 | .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, |
| 7338 | .feature_status = pqi_firmware_feature_status, |
| 7339 | }, |
| 7340 | { |
| 7341 | .feature_name = "RAID 0 Read Bypass", |
| 7342 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, |
| 7343 | .feature_status = pqi_firmware_feature_status, |
| 7344 | }, |
| 7345 | { |
| 7346 | .feature_name = "RAID 1 Read Bypass", |
| 7347 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, |
| 7348 | .feature_status = pqi_firmware_feature_status, |
| 7349 | }, |
| 7350 | { |
| 7351 | .feature_name = "RAID 5 Read Bypass", |
| 7352 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, |
| 7353 | .feature_status = pqi_firmware_feature_status, |
| 7354 | }, |
| 7355 | { |
| 7356 | .feature_name = "RAID 6 Read Bypass", |
| 7357 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, |
| 7358 | .feature_status = pqi_firmware_feature_status, |
| 7359 | }, |
| 7360 | { |
| 7361 | .feature_name = "RAID 0 Write Bypass", |
| 7362 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, |
| 7363 | .feature_status = pqi_firmware_feature_status, |
| 7364 | }, |
| 7365 | { |
| 7366 | .feature_name = "RAID 1 Write Bypass", |
| 7367 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, |
| 7368 | .feature_status = pqi_ctrl_update_feature_flags, |
| 7369 | }, |
| 7370 | { |
| 7371 | .feature_name = "RAID 5 Write Bypass", |
| 7372 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, |
| 7373 | .feature_status = pqi_ctrl_update_feature_flags, |
| 7374 | }, |
| 7375 | { |
| 7376 | .feature_name = "RAID 6 Write Bypass", |
| 7377 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, |
| 7378 | .feature_status = pqi_ctrl_update_feature_flags, |
| 7379 | }, |
| 7380 | { |
| 7381 | .feature_name = "New Soft Reset Handshake", |
| 7382 | .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, |
| 7383 | .feature_status = pqi_ctrl_update_feature_flags, |
| 7384 | }, |
| 7385 | { |
| 7386 | .feature_name = "RAID IU Timeout", |
| 7387 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, |
| 7388 | .feature_status = pqi_ctrl_update_feature_flags, |
| 7389 | }, |
| 7390 | { |
| 7391 | .feature_name = "TMF IU Timeout", |
| 7392 | .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, |
| 7393 | .feature_status = pqi_ctrl_update_feature_flags, |
| 7394 | }, |
| 7395 | { |
| 7396 | .feature_name = "RAID Bypass on encrypted logical volumes on NVMe", |
| 7397 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME, |
| 7398 | .feature_status = pqi_firmware_feature_status, |
| 7399 | }, |
| 7400 | { |
| 7401 | .feature_name = "Unique WWID in Report Physical LUN", |
| 7402 | .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN, |
| 7403 | .feature_status = pqi_ctrl_update_feature_flags, |
| 7404 | }, |
| 7405 | }; |
| 7406 | |
| 7407 | static void pqi_process_firmware_features( |
| 7408 | struct pqi_config_table_section_info *section_info) |
| 7409 | { |
| 7410 | int rc; |
| 7411 | struct pqi_ctrl_info *ctrl_info; |
| 7412 | struct pqi_config_table_firmware_features *firmware_features; |
| 7413 | void __iomem *firmware_features_iomem_addr; |
| 7414 | unsigned int i; |
| 7415 | unsigned int num_features_supported; |
| 7416 | |
| 7417 | ctrl_info = section_info->ctrl_info; |
| 7418 | firmware_features = section_info->section; |
| 7419 | firmware_features_iomem_addr = section_info->section_iomem_addr; |
| 7420 | |
| 7421 | for (i = 0, num_features_supported = 0; |
| 7422 | i < ARRAY_SIZE(pqi_firmware_features); i++) { |
| 7423 | if (pqi_is_firmware_feature_supported(firmware_features, |
| 7424 | pqi_firmware_features[i].feature_bit)) { |
| 7425 | pqi_firmware_features[i].supported = true; |
| 7426 | num_features_supported++; |
| 7427 | } else { |
| 7428 | pqi_firmware_feature_update(ctrl_info, |
| 7429 | &pqi_firmware_features[i]); |
| 7430 | } |
| 7431 | } |
| 7432 | |
| 7433 | if (num_features_supported == 0) |
| 7434 | return; |
| 7435 | |
| 7436 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { |
| 7437 | if (!pqi_firmware_features[i].supported) |
| 7438 | continue; |
| 7439 | pqi_request_firmware_feature(firmware_features, |
| 7440 | pqi_firmware_features[i].feature_bit); |
| 7441 | } |
| 7442 | |
| 7443 | rc = pqi_enable_firmware_features(ctrl_info, firmware_features, |
| 7444 | firmware_features_iomem_addr); |
| 7445 | if (rc) { |
| 7446 | dev_err(&ctrl_info->pci_dev->dev, |
| 7447 | "failed to enable firmware features in PQI configuration table\n"); |
| 7448 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { |
| 7449 | if (!pqi_firmware_features[i].supported) |
| 7450 | continue; |
| 7451 | pqi_firmware_feature_update(ctrl_info, |
| 7452 | &pqi_firmware_features[i]); |
| 7453 | } |
| 7454 | return; |
| 7455 | } |
| 7456 | |
| 7457 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { |
| 7458 | if (!pqi_firmware_features[i].supported) |
| 7459 | continue; |
| 7460 | if (pqi_is_firmware_feature_enabled(firmware_features, |
| 7461 | firmware_features_iomem_addr, |
| 7462 | pqi_firmware_features[i].feature_bit)) { |
| 7463 | pqi_firmware_features[i].enabled = true; |
| 7464 | } |
| 7465 | pqi_firmware_feature_update(ctrl_info, |
| 7466 | &pqi_firmware_features[i]); |
| 7467 | } |
| 7468 | } |
| 7469 | |
| 7470 | static void pqi_init_firmware_features(void) |
| 7471 | { |
| 7472 | unsigned int i; |
| 7473 | |
| 7474 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { |
| 7475 | pqi_firmware_features[i].supported = false; |
| 7476 | pqi_firmware_features[i].enabled = false; |
| 7477 | } |
| 7478 | } |
| 7479 | |
| 7480 | static void pqi_process_firmware_features_section( |
| 7481 | struct pqi_config_table_section_info *section_info) |
| 7482 | { |
| 7483 | mutex_lock(&pqi_firmware_features_mutex); |
| 7484 | pqi_init_firmware_features(); |
| 7485 | pqi_process_firmware_features(section_info); |
| 7486 | mutex_unlock(&pqi_firmware_features_mutex); |
| 7487 | } |
| 7488 | |
| 7489 | /* |
| 7490 | * Reset all controller settings that can be initialized during the processing |
| 7491 | * of the PQI Configuration Table. |
| 7492 | */ |
| 7493 | |
| 7494 | static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) |
| 7495 | { |
| 7496 | ctrl_info->heartbeat_counter = NULL; |
| 7497 | ctrl_info->soft_reset_status = NULL; |
| 7498 | ctrl_info->soft_reset_handshake_supported = false; |
| 7499 | ctrl_info->enable_r1_writes = false; |
| 7500 | ctrl_info->enable_r5_writes = false; |
| 7501 | ctrl_info->enable_r6_writes = false; |
| 7502 | ctrl_info->raid_iu_timeout_supported = false; |
| 7503 | ctrl_info->tmf_iu_timeout_supported = false; |
| 7504 | ctrl_info->unique_wwid_in_report_phys_lun_supported = false; |
| 7505 | } |
| 7506 | |
| 7507 | static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) |
| 7508 | { |
| 7509 | u32 table_length; |
| 7510 | u32 section_offset; |
| 7511 | bool firmware_feature_section_present; |
| 7512 | void __iomem *table_iomem_addr; |
| 7513 | struct pqi_config_table *config_table; |
| 7514 | struct pqi_config_table_section_header *section; |
| 7515 | struct pqi_config_table_section_info section_info; |
| 7516 | struct pqi_config_table_section_info feature_section_info; |
| 7517 | |
| 7518 | table_length = ctrl_info->config_table_length; |
| 7519 | if (table_length == 0) |
| 7520 | return 0; |
| 7521 | |
| 7522 | config_table = kmalloc(table_length, GFP_KERNEL); |
| 7523 | if (!config_table) { |
| 7524 | dev_err(&ctrl_info->pci_dev->dev, |
| 7525 | "failed to allocate memory for PQI configuration table\n"); |
| 7526 | return -ENOMEM; |
| 7527 | } |
| 7528 | |
| 7529 | /* |
| 7530 | * Copy the config table contents from I/O memory space into the |
| 7531 | * temporary buffer. |
| 7532 | */ |
| 7533 | table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; |
| 7534 | memcpy_fromio(config_table, table_iomem_addr, table_length); |
| 7535 | |
| 7536 | firmware_feature_section_present = false; |
| 7537 | section_info.ctrl_info = ctrl_info; |
| 7538 | section_offset = get_unaligned_le32(&config_table->first_section_offset); |
| 7539 | |
| 7540 | while (section_offset) { |
| 7541 | section = (void *)config_table + section_offset; |
| 7542 | |
| 7543 | section_info.section = section; |
| 7544 | section_info.section_offset = section_offset; |
| 7545 | section_info.section_iomem_addr = table_iomem_addr + section_offset; |
| 7546 | |
| 7547 | switch (get_unaligned_le16(§ion->section_id)) { |
| 7548 | case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: |
| 7549 | firmware_feature_section_present = true; |
| 7550 | feature_section_info = section_info; |
| 7551 | break; |
| 7552 | case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: |
| 7553 | if (pqi_disable_heartbeat) |
| 7554 | dev_warn(&ctrl_info->pci_dev->dev, |
| 7555 | "heartbeat disabled by module parameter\n"); |
| 7556 | else |
| 7557 | ctrl_info->heartbeat_counter = |
| 7558 | table_iomem_addr + |
| 7559 | section_offset + |
| 7560 | offsetof(struct pqi_config_table_heartbeat, |
| 7561 | heartbeat_counter); |
| 7562 | break; |
| 7563 | case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: |
| 7564 | ctrl_info->soft_reset_status = |
| 7565 | table_iomem_addr + |
| 7566 | section_offset + |
| 7567 | offsetof(struct pqi_config_table_soft_reset, |
| 7568 | soft_reset_status); |
| 7569 | break; |
| 7570 | } |
| 7571 | |
| 7572 | section_offset = get_unaligned_le16(§ion->next_section_offset); |
| 7573 | } |
| 7574 | |
| 7575 | /* |
| 7576 | * We process the firmware feature section after all other sections |
| 7577 | * have been processed so that the feature bit callbacks can take |
| 7578 | * into account the settings configured by other sections. |
| 7579 | */ |
| 7580 | if (firmware_feature_section_present) |
| 7581 | pqi_process_firmware_features_section(&feature_section_info); |
| 7582 | |
| 7583 | kfree(config_table); |
| 7584 | |
| 7585 | return 0; |
| 7586 | } |
| 7587 | |
| 7588 | /* Switches the controller from PQI mode back into SIS mode. */ |
| 7589 | |
| 7590 | static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) |
| 7591 | { |
| 7592 | int rc; |
| 7593 | |
| 7594 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); |
| 7595 | rc = pqi_reset(ctrl_info); |
| 7596 | if (rc) |
| 7597 | return rc; |
| 7598 | rc = sis_reenable_sis_mode(ctrl_info); |
| 7599 | if (rc) { |
| 7600 | dev_err(&ctrl_info->pci_dev->dev, |
| 7601 | "re-enabling SIS mode failed with error %d\n", rc); |
| 7602 | return rc; |
| 7603 | } |
| 7604 | pqi_save_ctrl_mode(ctrl_info, SIS_MODE); |
| 7605 | |
| 7606 | return 0; |
| 7607 | } |
| 7608 | |
| 7609 | /* |
| 7610 | * If the controller isn't already in SIS mode, this function forces it into |
| 7611 | * SIS mode. |
| 7612 | */ |
| 7613 | |
| 7614 | static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) |
| 7615 | { |
| 7616 | if (!sis_is_firmware_running(ctrl_info)) |
| 7617 | return -ENXIO; |
| 7618 | |
| 7619 | if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) |
| 7620 | return 0; |
| 7621 | |
| 7622 | if (sis_is_kernel_up(ctrl_info)) { |
| 7623 | pqi_save_ctrl_mode(ctrl_info, SIS_MODE); |
| 7624 | return 0; |
| 7625 | } |
| 7626 | |
| 7627 | return pqi_revert_to_sis_mode(ctrl_info); |
| 7628 | } |
| 7629 | |
| 7630 | static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) |
| 7631 | { |
| 7632 | int rc; |
| 7633 | u32 product_id; |
| 7634 | |
| 7635 | if (reset_devices) { |
| 7636 | sis_soft_reset(ctrl_info); |
| 7637 | msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ); |
| 7638 | } else { |
| 7639 | rc = pqi_force_sis_mode(ctrl_info); |
| 7640 | if (rc) |
| 7641 | return rc; |
| 7642 | } |
| 7643 | |
| 7644 | /* |
| 7645 | * Wait until the controller is ready to start accepting SIS |
| 7646 | * commands. |
| 7647 | */ |
| 7648 | rc = sis_wait_for_ctrl_ready(ctrl_info); |
| 7649 | if (rc) |
| 7650 | return rc; |
| 7651 | |
| 7652 | /* |
| 7653 | * Get the controller properties. This allows us to determine |
| 7654 | * whether or not it supports PQI mode. |
| 7655 | */ |
| 7656 | rc = sis_get_ctrl_properties(ctrl_info); |
| 7657 | if (rc) { |
| 7658 | dev_err(&ctrl_info->pci_dev->dev, |
| 7659 | "error obtaining controller properties\n"); |
| 7660 | return rc; |
| 7661 | } |
| 7662 | |
| 7663 | rc = sis_get_pqi_capabilities(ctrl_info); |
| 7664 | if (rc) { |
| 7665 | dev_err(&ctrl_info->pci_dev->dev, |
| 7666 | "error obtaining controller capabilities\n"); |
| 7667 | return rc; |
| 7668 | } |
| 7669 | |
| 7670 | product_id = sis_get_product_id(ctrl_info); |
| 7671 | ctrl_info->product_id = (u8)product_id; |
| 7672 | ctrl_info->product_revision = (u8)(product_id >> 8); |
| 7673 | |
| 7674 | if (reset_devices) { |
| 7675 | if (ctrl_info->max_outstanding_requests > |
| 7676 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) |
| 7677 | ctrl_info->max_outstanding_requests = |
| 7678 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; |
| 7679 | } else { |
| 7680 | if (ctrl_info->max_outstanding_requests > |
| 7681 | PQI_MAX_OUTSTANDING_REQUESTS) |
| 7682 | ctrl_info->max_outstanding_requests = |
| 7683 | PQI_MAX_OUTSTANDING_REQUESTS; |
| 7684 | } |
| 7685 | |
| 7686 | pqi_calculate_io_resources(ctrl_info); |
| 7687 | |
| 7688 | rc = pqi_alloc_error_buffer(ctrl_info); |
| 7689 | if (rc) { |
| 7690 | dev_err(&ctrl_info->pci_dev->dev, |
| 7691 | "failed to allocate PQI error buffer\n"); |
| 7692 | return rc; |
| 7693 | } |
| 7694 | |
| 7695 | /* |
| 7696 | * If the function we are about to call succeeds, the |
| 7697 | * controller will transition from legacy SIS mode |
| 7698 | * into PQI mode. |
| 7699 | */ |
| 7700 | rc = sis_init_base_struct_addr(ctrl_info); |
| 7701 | if (rc) { |
| 7702 | dev_err(&ctrl_info->pci_dev->dev, |
| 7703 | "error initializing PQI mode\n"); |
| 7704 | return rc; |
| 7705 | } |
| 7706 | |
| 7707 | /* Wait for the controller to complete the SIS -> PQI transition. */ |
| 7708 | rc = pqi_wait_for_pqi_mode_ready(ctrl_info); |
| 7709 | if (rc) { |
| 7710 | dev_err(&ctrl_info->pci_dev->dev, |
| 7711 | "transition to PQI mode failed\n"); |
| 7712 | return rc; |
| 7713 | } |
| 7714 | |
| 7715 | /* From here on, we are running in PQI mode. */ |
| 7716 | ctrl_info->pqi_mode_enabled = true; |
| 7717 | pqi_save_ctrl_mode(ctrl_info, PQI_MODE); |
| 7718 | |
| 7719 | rc = pqi_alloc_admin_queues(ctrl_info); |
| 7720 | if (rc) { |
| 7721 | dev_err(&ctrl_info->pci_dev->dev, |
| 7722 | "failed to allocate admin queues\n"); |
| 7723 | return rc; |
| 7724 | } |
| 7725 | |
| 7726 | rc = pqi_create_admin_queues(ctrl_info); |
| 7727 | if (rc) { |
| 7728 | dev_err(&ctrl_info->pci_dev->dev, |
| 7729 | "error creating admin queues\n"); |
| 7730 | return rc; |
| 7731 | } |
| 7732 | |
| 7733 | rc = pqi_report_device_capability(ctrl_info); |
| 7734 | if (rc) { |
| 7735 | dev_err(&ctrl_info->pci_dev->dev, |
| 7736 | "obtaining device capability failed\n"); |
| 7737 | return rc; |
| 7738 | } |
| 7739 | |
| 7740 | rc = pqi_validate_device_capability(ctrl_info); |
| 7741 | if (rc) |
| 7742 | return rc; |
| 7743 | |
| 7744 | pqi_calculate_queue_resources(ctrl_info); |
| 7745 | |
| 7746 | rc = pqi_enable_msix_interrupts(ctrl_info); |
| 7747 | if (rc) |
| 7748 | return rc; |
| 7749 | |
| 7750 | if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { |
| 7751 | ctrl_info->max_msix_vectors = |
| 7752 | ctrl_info->num_msix_vectors_enabled; |
| 7753 | pqi_calculate_queue_resources(ctrl_info); |
| 7754 | } |
| 7755 | |
| 7756 | rc = pqi_alloc_io_resources(ctrl_info); |
| 7757 | if (rc) |
| 7758 | return rc; |
| 7759 | |
| 7760 | rc = pqi_alloc_operational_queues(ctrl_info); |
| 7761 | if (rc) { |
| 7762 | dev_err(&ctrl_info->pci_dev->dev, |
| 7763 | "failed to allocate operational queues\n"); |
| 7764 | return rc; |
| 7765 | } |
| 7766 | |
| 7767 | pqi_init_operational_queues(ctrl_info); |
| 7768 | |
| 7769 | rc = pqi_request_irqs(ctrl_info); |
| 7770 | if (rc) |
| 7771 | return rc; |
| 7772 | |
| 7773 | rc = pqi_create_queues(ctrl_info); |
| 7774 | if (rc) |
| 7775 | return rc; |
| 7776 | |
| 7777 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); |
| 7778 | |
| 7779 | ctrl_info->controller_online = true; |
| 7780 | |
| 7781 | rc = pqi_process_config_table(ctrl_info); |
| 7782 | if (rc) |
| 7783 | return rc; |
| 7784 | |
| 7785 | pqi_start_heartbeat_timer(ctrl_info); |
| 7786 | |
| 7787 | if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { |
| 7788 | rc = pqi_get_advanced_raid_bypass_config(ctrl_info); |
| 7789 | if (rc) { /* Supported features not returned correctly. */ |
| 7790 | dev_err(&ctrl_info->pci_dev->dev, |
| 7791 | "error obtaining advanced RAID bypass configuration\n"); |
| 7792 | return rc; |
| 7793 | } |
| 7794 | ctrl_info->ciss_report_log_flags |= |
| 7795 | CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; |
| 7796 | } |
| 7797 | |
| 7798 | rc = pqi_enable_events(ctrl_info); |
| 7799 | if (rc) { |
| 7800 | dev_err(&ctrl_info->pci_dev->dev, |
| 7801 | "error enabling events\n"); |
| 7802 | return rc; |
| 7803 | } |
| 7804 | |
| 7805 | /* Register with the SCSI subsystem. */ |
| 7806 | rc = pqi_register_scsi(ctrl_info); |
| 7807 | if (rc) |
| 7808 | return rc; |
| 7809 | |
| 7810 | rc = pqi_get_ctrl_product_details(ctrl_info); |
| 7811 | if (rc) { |
| 7812 | dev_err(&ctrl_info->pci_dev->dev, |
| 7813 | "error obtaining product details\n"); |
| 7814 | return rc; |
| 7815 | } |
| 7816 | |
| 7817 | rc = pqi_get_ctrl_serial_number(ctrl_info); |
| 7818 | if (rc) { |
| 7819 | dev_err(&ctrl_info->pci_dev->dev, |
| 7820 | "error obtaining ctrl serial number\n"); |
| 7821 | return rc; |
| 7822 | } |
| 7823 | |
| 7824 | rc = pqi_set_diag_rescan(ctrl_info); |
| 7825 | if (rc) { |
| 7826 | dev_err(&ctrl_info->pci_dev->dev, |
| 7827 | "error enabling multi-lun rescan\n"); |
| 7828 | return rc; |
| 7829 | } |
| 7830 | |
| 7831 | rc = pqi_write_driver_version_to_host_wellness(ctrl_info); |
| 7832 | if (rc) { |
| 7833 | dev_err(&ctrl_info->pci_dev->dev, |
| 7834 | "error updating host wellness\n"); |
| 7835 | return rc; |
| 7836 | } |
| 7837 | |
| 7838 | pqi_schedule_update_time_worker(ctrl_info); |
| 7839 | |
| 7840 | pqi_scan_scsi_devices(ctrl_info); |
| 7841 | |
| 7842 | return 0; |
| 7843 | } |
| 7844 | |
| 7845 | static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) |
| 7846 | { |
| 7847 | unsigned int i; |
| 7848 | struct pqi_admin_queues *admin_queues; |
| 7849 | struct pqi_event_queue *event_queue; |
| 7850 | |
| 7851 | admin_queues = &ctrl_info->admin_queues; |
| 7852 | admin_queues->iq_pi_copy = 0; |
| 7853 | admin_queues->oq_ci_copy = 0; |
| 7854 | writel(0, admin_queues->oq_pi); |
| 7855 | |
| 7856 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
| 7857 | ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; |
| 7858 | ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; |
| 7859 | ctrl_info->queue_groups[i].oq_ci_copy = 0; |
| 7860 | |
| 7861 | writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); |
| 7862 | writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); |
| 7863 | writel(0, ctrl_info->queue_groups[i].oq_pi); |
| 7864 | } |
| 7865 | |
| 7866 | event_queue = &ctrl_info->event_queue; |
| 7867 | writel(0, event_queue->oq_pi); |
| 7868 | event_queue->oq_ci_copy = 0; |
| 7869 | } |
| 7870 | |
| 7871 | static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) |
| 7872 | { |
| 7873 | int rc; |
| 7874 | |
| 7875 | rc = pqi_force_sis_mode(ctrl_info); |
| 7876 | if (rc) |
| 7877 | return rc; |
| 7878 | |
| 7879 | /* |
| 7880 | * Wait until the controller is ready to start accepting SIS |
| 7881 | * commands. |
| 7882 | */ |
| 7883 | rc = sis_wait_for_ctrl_ready_resume(ctrl_info); |
| 7884 | if (rc) |
| 7885 | return rc; |
| 7886 | |
| 7887 | /* |
| 7888 | * Get the controller properties. This allows us to determine |
| 7889 | * whether or not it supports PQI mode. |
| 7890 | */ |
| 7891 | rc = sis_get_ctrl_properties(ctrl_info); |
| 7892 | if (rc) { |
| 7893 | dev_err(&ctrl_info->pci_dev->dev, |
| 7894 | "error obtaining controller properties\n"); |
| 7895 | return rc; |
| 7896 | } |
| 7897 | |
| 7898 | rc = sis_get_pqi_capabilities(ctrl_info); |
| 7899 | if (rc) { |
| 7900 | dev_err(&ctrl_info->pci_dev->dev, |
| 7901 | "error obtaining controller capabilities\n"); |
| 7902 | return rc; |
| 7903 | } |
| 7904 | |
| 7905 | /* |
| 7906 | * If the function we are about to call succeeds, the |
| 7907 | * controller will transition from legacy SIS mode |
| 7908 | * into PQI mode. |
| 7909 | */ |
| 7910 | rc = sis_init_base_struct_addr(ctrl_info); |
| 7911 | if (rc) { |
| 7912 | dev_err(&ctrl_info->pci_dev->dev, |
| 7913 | "error initializing PQI mode\n"); |
| 7914 | return rc; |
| 7915 | } |
| 7916 | |
| 7917 | /* Wait for the controller to complete the SIS -> PQI transition. */ |
| 7918 | rc = pqi_wait_for_pqi_mode_ready(ctrl_info); |
| 7919 | if (rc) { |
| 7920 | dev_err(&ctrl_info->pci_dev->dev, |
| 7921 | "transition to PQI mode failed\n"); |
| 7922 | return rc; |
| 7923 | } |
| 7924 | |
| 7925 | /* From here on, we are running in PQI mode. */ |
| 7926 | ctrl_info->pqi_mode_enabled = true; |
| 7927 | pqi_save_ctrl_mode(ctrl_info, PQI_MODE); |
| 7928 | |
| 7929 | pqi_reinit_queues(ctrl_info); |
| 7930 | |
| 7931 | rc = pqi_create_admin_queues(ctrl_info); |
| 7932 | if (rc) { |
| 7933 | dev_err(&ctrl_info->pci_dev->dev, |
| 7934 | "error creating admin queues\n"); |
| 7935 | return rc; |
| 7936 | } |
| 7937 | |
| 7938 | rc = pqi_create_queues(ctrl_info); |
| 7939 | if (rc) |
| 7940 | return rc; |
| 7941 | |
| 7942 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); |
| 7943 | |
| 7944 | ctrl_info->controller_online = true; |
| 7945 | pqi_ctrl_unblock_requests(ctrl_info); |
| 7946 | |
| 7947 | pqi_ctrl_reset_config(ctrl_info); |
| 7948 | |
| 7949 | rc = pqi_process_config_table(ctrl_info); |
| 7950 | if (rc) |
| 7951 | return rc; |
| 7952 | |
| 7953 | pqi_start_heartbeat_timer(ctrl_info); |
| 7954 | |
| 7955 | if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { |
| 7956 | rc = pqi_get_advanced_raid_bypass_config(ctrl_info); |
| 7957 | if (rc) { |
| 7958 | dev_err(&ctrl_info->pci_dev->dev, |
| 7959 | "error obtaining advanced RAID bypass configuration\n"); |
| 7960 | return rc; |
| 7961 | } |
| 7962 | ctrl_info->ciss_report_log_flags |= |
| 7963 | CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; |
| 7964 | } |
| 7965 | |
| 7966 | rc = pqi_enable_events(ctrl_info); |
| 7967 | if (rc) { |
| 7968 | dev_err(&ctrl_info->pci_dev->dev, |
| 7969 | "error enabling events\n"); |
| 7970 | return rc; |
| 7971 | } |
| 7972 | |
| 7973 | rc = pqi_get_ctrl_product_details(ctrl_info); |
| 7974 | if (rc) { |
| 7975 | dev_err(&ctrl_info->pci_dev->dev, |
| 7976 | "error obtaining product details\n"); |
| 7977 | return rc; |
| 7978 | } |
| 7979 | |
| 7980 | rc = pqi_set_diag_rescan(ctrl_info); |
| 7981 | if (rc) { |
| 7982 | dev_err(&ctrl_info->pci_dev->dev, |
| 7983 | "error enabling multi-lun rescan\n"); |
| 7984 | return rc; |
| 7985 | } |
| 7986 | |
| 7987 | rc = pqi_write_driver_version_to_host_wellness(ctrl_info); |
| 7988 | if (rc) { |
| 7989 | dev_err(&ctrl_info->pci_dev->dev, |
| 7990 | "error updating host wellness\n"); |
| 7991 | return rc; |
| 7992 | } |
| 7993 | |
| 7994 | if (pqi_ofa_in_progress(ctrl_info)) |
| 7995 | pqi_ctrl_unblock_scan(ctrl_info); |
| 7996 | |
| 7997 | pqi_scan_scsi_devices(ctrl_info); |
| 7998 | |
| 7999 | return 0; |
| 8000 | } |
| 8001 | |
| 8002 | static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) |
| 8003 | { |
| 8004 | int rc; |
| 8005 | |
| 8006 | rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, |
| 8007 | PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); |
| 8008 | |
| 8009 | return pcibios_err_to_errno(rc); |
| 8010 | } |
| 8011 | |
| 8012 | static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) |
| 8013 | { |
| 8014 | int rc; |
| 8015 | u64 mask; |
| 8016 | |
| 8017 | rc = pci_enable_device(ctrl_info->pci_dev); |
| 8018 | if (rc) { |
| 8019 | dev_err(&ctrl_info->pci_dev->dev, |
| 8020 | "failed to enable PCI device\n"); |
| 8021 | return rc; |
| 8022 | } |
| 8023 | |
| 8024 | if (sizeof(dma_addr_t) > 4) |
| 8025 | mask = DMA_BIT_MASK(64); |
| 8026 | else |
| 8027 | mask = DMA_BIT_MASK(32); |
| 8028 | |
| 8029 | rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); |
| 8030 | if (rc) { |
| 8031 | dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); |
| 8032 | goto disable_device; |
| 8033 | } |
| 8034 | |
| 8035 | rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); |
| 8036 | if (rc) { |
| 8037 | dev_err(&ctrl_info->pci_dev->dev, |
| 8038 | "failed to obtain PCI resources\n"); |
| 8039 | goto disable_device; |
| 8040 | } |
| 8041 | |
| 8042 | ctrl_info->iomem_base = ioremap(pci_resource_start( |
| 8043 | ctrl_info->pci_dev, 0), |
| 8044 | sizeof(struct pqi_ctrl_registers)); |
| 8045 | if (!ctrl_info->iomem_base) { |
| 8046 | dev_err(&ctrl_info->pci_dev->dev, |
| 8047 | "failed to map memory for controller registers\n"); |
| 8048 | rc = -ENOMEM; |
| 8049 | goto release_regions; |
| 8050 | } |
| 8051 | |
| 8052 | #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 |
| 8053 | |
| 8054 | /* Increase the PCIe completion timeout. */ |
| 8055 | rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, |
| 8056 | PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); |
| 8057 | if (rc) { |
| 8058 | dev_err(&ctrl_info->pci_dev->dev, |
| 8059 | "failed to set PCIe completion timeout\n"); |
| 8060 | goto release_regions; |
| 8061 | } |
| 8062 | |
| 8063 | /* Enable bus mastering. */ |
| 8064 | pci_set_master(ctrl_info->pci_dev); |
| 8065 | |
| 8066 | ctrl_info->registers = ctrl_info->iomem_base; |
| 8067 | ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; |
| 8068 | |
| 8069 | pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); |
| 8070 | |
| 8071 | return 0; |
| 8072 | |
| 8073 | release_regions: |
| 8074 | pci_release_regions(ctrl_info->pci_dev); |
| 8075 | disable_device: |
| 8076 | pci_disable_device(ctrl_info->pci_dev); |
| 8077 | |
| 8078 | return rc; |
| 8079 | } |
| 8080 | |
| 8081 | static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) |
| 8082 | { |
| 8083 | iounmap(ctrl_info->iomem_base); |
| 8084 | pci_release_regions(ctrl_info->pci_dev); |
| 8085 | if (pci_is_enabled(ctrl_info->pci_dev)) |
| 8086 | pci_disable_device(ctrl_info->pci_dev); |
| 8087 | pci_set_drvdata(ctrl_info->pci_dev, NULL); |
| 8088 | } |
| 8089 | |
| 8090 | static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) |
| 8091 | { |
| 8092 | struct pqi_ctrl_info *ctrl_info; |
| 8093 | |
| 8094 | ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), |
| 8095 | GFP_KERNEL, numa_node); |
| 8096 | if (!ctrl_info) |
| 8097 | return NULL; |
| 8098 | |
| 8099 | mutex_init(&ctrl_info->scan_mutex); |
| 8100 | mutex_init(&ctrl_info->lun_reset_mutex); |
| 8101 | mutex_init(&ctrl_info->ofa_mutex); |
| 8102 | |
| 8103 | INIT_LIST_HEAD(&ctrl_info->scsi_device_list); |
| 8104 | spin_lock_init(&ctrl_info->scsi_device_list_lock); |
| 8105 | |
| 8106 | INIT_WORK(&ctrl_info->event_work, pqi_event_worker); |
| 8107 | atomic_set(&ctrl_info->num_interrupts, 0); |
| 8108 | |
| 8109 | INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); |
| 8110 | INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); |
| 8111 | |
| 8112 | timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); |
| 8113 | INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); |
| 8114 | |
| 8115 | INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); |
| 8116 | INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); |
| 8117 | |
| 8118 | sema_init(&ctrl_info->sync_request_sem, |
| 8119 | PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); |
| 8120 | init_waitqueue_head(&ctrl_info->block_requests_wait); |
| 8121 | |
| 8122 | ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; |
| 8123 | ctrl_info->irq_mode = IRQ_MODE_NONE; |
| 8124 | ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; |
| 8125 | |
| 8126 | ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; |
| 8127 | ctrl_info->max_transfer_encrypted_sas_sata = |
| 8128 | PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA; |
| 8129 | ctrl_info->max_transfer_encrypted_nvme = |
| 8130 | PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME; |
| 8131 | ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; |
| 8132 | ctrl_info->max_write_raid_1_10_2drive = ~0; |
| 8133 | ctrl_info->max_write_raid_1_10_3drive = ~0; |
| 8134 | |
| 8135 | return ctrl_info; |
| 8136 | } |
| 8137 | |
| 8138 | static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) |
| 8139 | { |
| 8140 | kfree(ctrl_info); |
| 8141 | } |
| 8142 | |
| 8143 | static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) |
| 8144 | { |
| 8145 | pqi_free_irqs(ctrl_info); |
| 8146 | pqi_disable_msix_interrupts(ctrl_info); |
| 8147 | } |
| 8148 | |
| 8149 | static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) |
| 8150 | { |
| 8151 | pqi_stop_heartbeat_timer(ctrl_info); |
| 8152 | pqi_free_interrupts(ctrl_info); |
| 8153 | if (ctrl_info->queue_memory_base) |
| 8154 | dma_free_coherent(&ctrl_info->pci_dev->dev, |
| 8155 | ctrl_info->queue_memory_length, |
| 8156 | ctrl_info->queue_memory_base, |
| 8157 | ctrl_info->queue_memory_base_dma_handle); |
| 8158 | if (ctrl_info->admin_queue_memory_base) |
| 8159 | dma_free_coherent(&ctrl_info->pci_dev->dev, |
| 8160 | ctrl_info->admin_queue_memory_length, |
| 8161 | ctrl_info->admin_queue_memory_base, |
| 8162 | ctrl_info->admin_queue_memory_base_dma_handle); |
| 8163 | pqi_free_all_io_requests(ctrl_info); |
| 8164 | if (ctrl_info->error_buffer) |
| 8165 | dma_free_coherent(&ctrl_info->pci_dev->dev, |
| 8166 | ctrl_info->error_buffer_length, |
| 8167 | ctrl_info->error_buffer, |
| 8168 | ctrl_info->error_buffer_dma_handle); |
| 8169 | if (ctrl_info->iomem_base) |
| 8170 | pqi_cleanup_pci_init(ctrl_info); |
| 8171 | pqi_free_ctrl_info(ctrl_info); |
| 8172 | } |
| 8173 | |
| 8174 | static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) |
| 8175 | { |
| 8176 | pqi_cancel_rescan_worker(ctrl_info); |
| 8177 | pqi_cancel_update_time_worker(ctrl_info); |
| 8178 | pqi_unregister_scsi(ctrl_info); |
| 8179 | if (ctrl_info->pqi_mode_enabled) |
| 8180 | pqi_revert_to_sis_mode(ctrl_info); |
| 8181 | pqi_free_ctrl_resources(ctrl_info); |
| 8182 | } |
| 8183 | |
| 8184 | static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) |
| 8185 | { |
| 8186 | pqi_ctrl_block_scan(ctrl_info); |
| 8187 | pqi_scsi_block_requests(ctrl_info); |
| 8188 | pqi_ctrl_block_device_reset(ctrl_info); |
| 8189 | pqi_ctrl_block_requests(ctrl_info); |
| 8190 | pqi_ctrl_wait_until_quiesced(ctrl_info); |
| 8191 | pqi_stop_heartbeat_timer(ctrl_info); |
| 8192 | } |
| 8193 | |
| 8194 | static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) |
| 8195 | { |
| 8196 | pqi_start_heartbeat_timer(ctrl_info); |
| 8197 | pqi_ctrl_unblock_requests(ctrl_info); |
| 8198 | pqi_ctrl_unblock_device_reset(ctrl_info); |
| 8199 | pqi_scsi_unblock_requests(ctrl_info); |
| 8200 | pqi_ctrl_unblock_scan(ctrl_info); |
| 8201 | } |
| 8202 | |
| 8203 | static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) |
| 8204 | { |
| 8205 | int i; |
| 8206 | u32 sg_count; |
| 8207 | struct device *dev; |
| 8208 | struct pqi_ofa_memory *ofap; |
| 8209 | struct pqi_sg_descriptor *mem_descriptor; |
| 8210 | dma_addr_t dma_handle; |
| 8211 | |
| 8212 | ofap = ctrl_info->pqi_ofa_mem_virt_addr; |
| 8213 | |
| 8214 | sg_count = DIV_ROUND_UP(total_size, chunk_size); |
| 8215 | if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS) |
| 8216 | goto out; |
| 8217 | |
| 8218 | ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); |
| 8219 | if (!ctrl_info->pqi_ofa_chunk_virt_addr) |
| 8220 | goto out; |
| 8221 | |
| 8222 | dev = &ctrl_info->pci_dev->dev; |
| 8223 | |
| 8224 | for (i = 0; i < sg_count; i++) { |
| 8225 | ctrl_info->pqi_ofa_chunk_virt_addr[i] = |
| 8226 | dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); |
| 8227 | if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) |
| 8228 | goto out_free_chunks; |
| 8229 | mem_descriptor = &ofap->sg_descriptor[i]; |
| 8230 | put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); |
| 8231 | put_unaligned_le32(chunk_size, &mem_descriptor->length); |
| 8232 | } |
| 8233 | |
| 8234 | put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); |
| 8235 | put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); |
| 8236 | put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated); |
| 8237 | |
| 8238 | return 0; |
| 8239 | |
| 8240 | out_free_chunks: |
| 8241 | while (--i >= 0) { |
| 8242 | mem_descriptor = &ofap->sg_descriptor[i]; |
| 8243 | dma_free_coherent(dev, chunk_size, |
| 8244 | ctrl_info->pqi_ofa_chunk_virt_addr[i], |
| 8245 | get_unaligned_le64(&mem_descriptor->address)); |
| 8246 | } |
| 8247 | kfree(ctrl_info->pqi_ofa_chunk_virt_addr); |
| 8248 | |
| 8249 | out: |
| 8250 | return -ENOMEM; |
| 8251 | } |
| 8252 | |
| 8253 | static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) |
| 8254 | { |
| 8255 | u32 total_size; |
| 8256 | u32 chunk_size; |
| 8257 | u32 min_chunk_size; |
| 8258 | |
| 8259 | if (ctrl_info->ofa_bytes_requested == 0) |
| 8260 | return 0; |
| 8261 | |
| 8262 | total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); |
| 8263 | min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS); |
| 8264 | min_chunk_size = PAGE_ALIGN(min_chunk_size); |
| 8265 | |
| 8266 | for (chunk_size = total_size; chunk_size >= min_chunk_size;) { |
| 8267 | if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) |
| 8268 | return 0; |
| 8269 | chunk_size /= 2; |
| 8270 | chunk_size = PAGE_ALIGN(chunk_size); |
| 8271 | } |
| 8272 | |
| 8273 | return -ENOMEM; |
| 8274 | } |
| 8275 | |
| 8276 | static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) |
| 8277 | { |
| 8278 | struct device *dev; |
| 8279 | struct pqi_ofa_memory *ofap; |
| 8280 | |
| 8281 | dev = &ctrl_info->pci_dev->dev; |
| 8282 | |
| 8283 | ofap = dma_alloc_coherent(dev, sizeof(*ofap), |
| 8284 | &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); |
| 8285 | if (!ofap) |
| 8286 | return; |
| 8287 | |
| 8288 | ctrl_info->pqi_ofa_mem_virt_addr = ofap; |
| 8289 | |
| 8290 | if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { |
| 8291 | dev_err(dev, |
| 8292 | "failed to allocate host buffer for Online Firmware Activation\n"); |
| 8293 | dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); |
| 8294 | ctrl_info->pqi_ofa_mem_virt_addr = NULL; |
| 8295 | return; |
| 8296 | } |
| 8297 | |
| 8298 | put_unaligned_le16(PQI_OFA_VERSION, &ofap->version); |
| 8299 | memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature)); |
| 8300 | } |
| 8301 | |
| 8302 | static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) |
| 8303 | { |
| 8304 | unsigned int i; |
| 8305 | struct device *dev; |
| 8306 | struct pqi_ofa_memory *ofap; |
| 8307 | struct pqi_sg_descriptor *mem_descriptor; |
| 8308 | unsigned int num_memory_descriptors; |
| 8309 | |
| 8310 | ofap = ctrl_info->pqi_ofa_mem_virt_addr; |
| 8311 | if (!ofap) |
| 8312 | return; |
| 8313 | |
| 8314 | dev = &ctrl_info->pci_dev->dev; |
| 8315 | |
| 8316 | if (get_unaligned_le32(&ofap->bytes_allocated) == 0) |
| 8317 | goto out; |
| 8318 | |
| 8319 | mem_descriptor = ofap->sg_descriptor; |
| 8320 | num_memory_descriptors = |
| 8321 | get_unaligned_le16(&ofap->num_memory_descriptors); |
| 8322 | |
| 8323 | for (i = 0; i < num_memory_descriptors; i++) { |
| 8324 | dma_free_coherent(dev, |
| 8325 | get_unaligned_le32(&mem_descriptor[i].length), |
| 8326 | ctrl_info->pqi_ofa_chunk_virt_addr[i], |
| 8327 | get_unaligned_le64(&mem_descriptor[i].address)); |
| 8328 | } |
| 8329 | kfree(ctrl_info->pqi_ofa_chunk_virt_addr); |
| 8330 | |
| 8331 | out: |
| 8332 | dma_free_coherent(dev, sizeof(*ofap), ofap, |
| 8333 | ctrl_info->pqi_ofa_mem_dma_handle); |
| 8334 | ctrl_info->pqi_ofa_mem_virt_addr = NULL; |
| 8335 | } |
| 8336 | |
| 8337 | static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) |
| 8338 | { |
| 8339 | u32 buffer_length; |
| 8340 | struct pqi_vendor_general_request request; |
| 8341 | struct pqi_ofa_memory *ofap; |
| 8342 | |
| 8343 | memset(&request, 0, sizeof(request)); |
| 8344 | |
| 8345 | request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; |
| 8346 | put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, |
| 8347 | &request.header.iu_length); |
| 8348 | put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, |
| 8349 | &request.function_code); |
| 8350 | |
| 8351 | ofap = ctrl_info->pqi_ofa_mem_virt_addr; |
| 8352 | |
| 8353 | if (ofap) { |
| 8354 | buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) + |
| 8355 | get_unaligned_le16(&ofap->num_memory_descriptors) * |
| 8356 | sizeof(struct pqi_sg_descriptor); |
| 8357 | |
| 8358 | put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, |
| 8359 | &request.data.ofa_memory_allocation.buffer_address); |
| 8360 | put_unaligned_le32(buffer_length, |
| 8361 | &request.data.ofa_memory_allocation.buffer_length); |
| 8362 | } |
| 8363 | |
| 8364 | return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); |
| 8365 | } |
| 8366 | |
| 8367 | static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) |
| 8368 | { |
| 8369 | ssleep(delay_secs); |
| 8370 | |
| 8371 | return pqi_ctrl_init_resume(ctrl_info); |
| 8372 | } |
| 8373 | |
| 8374 | static void pqi_perform_lockup_action(void) |
| 8375 | { |
| 8376 | switch (pqi_lockup_action) { |
| 8377 | case PANIC: |
| 8378 | panic("FATAL: Smart Family Controller lockup detected"); |
| 8379 | break; |
| 8380 | case REBOOT: |
| 8381 | emergency_restart(); |
| 8382 | break; |
| 8383 | case NONE: |
| 8384 | default: |
| 8385 | break; |
| 8386 | } |
| 8387 | } |
| 8388 | |
| 8389 | static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { |
| 8390 | .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, |
| 8391 | .status = SAM_STAT_CHECK_CONDITION, |
| 8392 | }; |
| 8393 | |
| 8394 | static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) |
| 8395 | { |
| 8396 | unsigned int i; |
| 8397 | struct pqi_io_request *io_request; |
| 8398 | struct scsi_cmnd *scmd; |
| 8399 | |
| 8400 | for (i = 0; i < ctrl_info->max_io_slots; i++) { |
| 8401 | io_request = &ctrl_info->io_request_pool[i]; |
| 8402 | if (atomic_read(&io_request->refcount) == 0) |
| 8403 | continue; |
| 8404 | |
| 8405 | scmd = io_request->scmd; |
| 8406 | if (scmd) { |
| 8407 | set_host_byte(scmd, DID_NO_CONNECT); |
| 8408 | } else { |
| 8409 | io_request->status = -ENXIO; |
| 8410 | io_request->error_info = |
| 8411 | &pqi_ctrl_offline_raid_error_info; |
| 8412 | } |
| 8413 | |
| 8414 | io_request->io_complete_callback(io_request, |
| 8415 | io_request->context); |
| 8416 | } |
| 8417 | } |
| 8418 | |
| 8419 | static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) |
| 8420 | { |
| 8421 | pqi_perform_lockup_action(); |
| 8422 | pqi_stop_heartbeat_timer(ctrl_info); |
| 8423 | pqi_free_interrupts(ctrl_info); |
| 8424 | pqi_cancel_rescan_worker(ctrl_info); |
| 8425 | pqi_cancel_update_time_worker(ctrl_info); |
| 8426 | pqi_ctrl_wait_until_quiesced(ctrl_info); |
| 8427 | pqi_fail_all_outstanding_requests(ctrl_info); |
| 8428 | pqi_ctrl_unblock_requests(ctrl_info); |
| 8429 | } |
| 8430 | |
| 8431 | static void pqi_ctrl_offline_worker(struct work_struct *work) |
| 8432 | { |
| 8433 | struct pqi_ctrl_info *ctrl_info; |
| 8434 | |
| 8435 | ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); |
| 8436 | pqi_take_ctrl_offline_deferred(ctrl_info); |
| 8437 | } |
| 8438 | |
| 8439 | static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) |
| 8440 | { |
| 8441 | if (!ctrl_info->controller_online) |
| 8442 | return; |
| 8443 | |
| 8444 | ctrl_info->controller_online = false; |
| 8445 | ctrl_info->pqi_mode_enabled = false; |
| 8446 | pqi_ctrl_block_requests(ctrl_info); |
| 8447 | if (!pqi_disable_ctrl_shutdown) |
| 8448 | sis_shutdown_ctrl(ctrl_info); |
| 8449 | pci_disable_device(ctrl_info->pci_dev); |
| 8450 | dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); |
| 8451 | schedule_work(&ctrl_info->ctrl_offline_work); |
| 8452 | } |
| 8453 | |
| 8454 | static void pqi_print_ctrl_info(struct pci_dev *pci_dev, |
| 8455 | const struct pci_device_id *id) |
| 8456 | { |
| 8457 | char *ctrl_description; |
| 8458 | |
| 8459 | if (id->driver_data) |
| 8460 | ctrl_description = (char *)id->driver_data; |
| 8461 | else |
| 8462 | ctrl_description = "Microsemi Smart Family Controller"; |
| 8463 | |
| 8464 | dev_info(&pci_dev->dev, "%s found\n", ctrl_description); |
| 8465 | } |
| 8466 | |
| 8467 | static int pqi_pci_probe(struct pci_dev *pci_dev, |
| 8468 | const struct pci_device_id *id) |
| 8469 | { |
| 8470 | int rc; |
| 8471 | int node, cp_node; |
| 8472 | struct pqi_ctrl_info *ctrl_info; |
| 8473 | |
| 8474 | pqi_print_ctrl_info(pci_dev, id); |
| 8475 | |
| 8476 | if (pqi_disable_device_id_wildcards && |
| 8477 | id->subvendor == PCI_ANY_ID && |
| 8478 | id->subdevice == PCI_ANY_ID) { |
| 8479 | dev_warn(&pci_dev->dev, |
| 8480 | "controller not probed because device ID wildcards are disabled\n"); |
| 8481 | return -ENODEV; |
| 8482 | } |
| 8483 | |
| 8484 | if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) |
| 8485 | dev_warn(&pci_dev->dev, |
| 8486 | "controller device ID matched using wildcards\n"); |
| 8487 | |
| 8488 | node = dev_to_node(&pci_dev->dev); |
| 8489 | if (node == NUMA_NO_NODE) { |
| 8490 | cp_node = cpu_to_node(0); |
| 8491 | if (cp_node == NUMA_NO_NODE) |
| 8492 | cp_node = 0; |
| 8493 | set_dev_node(&pci_dev->dev, cp_node); |
| 8494 | } |
| 8495 | |
| 8496 | ctrl_info = pqi_alloc_ctrl_info(node); |
| 8497 | if (!ctrl_info) { |
| 8498 | dev_err(&pci_dev->dev, |
| 8499 | "failed to allocate controller info block\n"); |
| 8500 | return -ENOMEM; |
| 8501 | } |
| 8502 | |
| 8503 | ctrl_info->pci_dev = pci_dev; |
| 8504 | |
| 8505 | rc = pqi_pci_init(ctrl_info); |
| 8506 | if (rc) |
| 8507 | goto error; |
| 8508 | |
| 8509 | rc = pqi_ctrl_init(ctrl_info); |
| 8510 | if (rc) |
| 8511 | goto error; |
| 8512 | |
| 8513 | return 0; |
| 8514 | |
| 8515 | error: |
| 8516 | pqi_remove_ctrl(ctrl_info); |
| 8517 | |
| 8518 | return rc; |
| 8519 | } |
| 8520 | |
| 8521 | static void pqi_pci_remove(struct pci_dev *pci_dev) |
| 8522 | { |
| 8523 | struct pqi_ctrl_info *ctrl_info; |
| 8524 | |
| 8525 | ctrl_info = pci_get_drvdata(pci_dev); |
| 8526 | if (!ctrl_info) |
| 8527 | return; |
| 8528 | |
| 8529 | pqi_remove_ctrl(ctrl_info); |
| 8530 | } |
| 8531 | |
| 8532 | static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) |
| 8533 | { |
| 8534 | unsigned int i; |
| 8535 | struct pqi_io_request *io_request; |
| 8536 | struct scsi_cmnd *scmd; |
| 8537 | |
| 8538 | for (i = 0; i < ctrl_info->max_io_slots; i++) { |
| 8539 | io_request = &ctrl_info->io_request_pool[i]; |
| 8540 | if (atomic_read(&io_request->refcount) == 0) |
| 8541 | continue; |
| 8542 | scmd = io_request->scmd; |
| 8543 | WARN_ON(scmd != NULL); /* IO command from SML */ |
| 8544 | WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ |
| 8545 | } |
| 8546 | } |
| 8547 | |
| 8548 | static void pqi_shutdown(struct pci_dev *pci_dev) |
| 8549 | { |
| 8550 | int rc; |
| 8551 | struct pqi_ctrl_info *ctrl_info; |
| 8552 | |
| 8553 | ctrl_info = pci_get_drvdata(pci_dev); |
| 8554 | if (!ctrl_info) { |
| 8555 | dev_err(&pci_dev->dev, |
| 8556 | "cache could not be flushed\n"); |
| 8557 | return; |
| 8558 | } |
| 8559 | |
| 8560 | pqi_wait_until_ofa_finished(ctrl_info); |
| 8561 | |
| 8562 | pqi_scsi_block_requests(ctrl_info); |
| 8563 | pqi_ctrl_block_device_reset(ctrl_info); |
| 8564 | pqi_ctrl_block_requests(ctrl_info); |
| 8565 | pqi_ctrl_wait_until_quiesced(ctrl_info); |
| 8566 | |
| 8567 | /* |
| 8568 | * Write all data in the controller's battery-backed cache to |
| 8569 | * storage. |
| 8570 | */ |
| 8571 | rc = pqi_flush_cache(ctrl_info, SHUTDOWN); |
| 8572 | if (rc) |
| 8573 | dev_err(&pci_dev->dev, |
| 8574 | "unable to flush controller cache\n"); |
| 8575 | |
| 8576 | pqi_crash_if_pending_command(ctrl_info); |
| 8577 | pqi_reset(ctrl_info); |
| 8578 | } |
| 8579 | |
| 8580 | static void pqi_process_lockup_action_param(void) |
| 8581 | { |
| 8582 | unsigned int i; |
| 8583 | |
| 8584 | if (!pqi_lockup_action_param) |
| 8585 | return; |
| 8586 | |
| 8587 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { |
| 8588 | if (strcmp(pqi_lockup_action_param, |
| 8589 | pqi_lockup_actions[i].name) == 0) { |
| 8590 | pqi_lockup_action = pqi_lockup_actions[i].action; |
| 8591 | return; |
| 8592 | } |
| 8593 | } |
| 8594 | |
| 8595 | pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", |
| 8596 | DRIVER_NAME_SHORT, pqi_lockup_action_param); |
| 8597 | } |
| 8598 | |
| 8599 | static void pqi_process_module_params(void) |
| 8600 | { |
| 8601 | pqi_process_lockup_action_param(); |
| 8602 | } |
| 8603 | |
| 8604 | static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) |
| 8605 | { |
| 8606 | struct pqi_ctrl_info *ctrl_info; |
| 8607 | |
| 8608 | ctrl_info = pci_get_drvdata(pci_dev); |
| 8609 | |
| 8610 | pqi_wait_until_ofa_finished(ctrl_info); |
| 8611 | |
| 8612 | pqi_ctrl_block_scan(ctrl_info); |
| 8613 | pqi_scsi_block_requests(ctrl_info); |
| 8614 | pqi_ctrl_block_device_reset(ctrl_info); |
| 8615 | pqi_ctrl_block_requests(ctrl_info); |
| 8616 | pqi_ctrl_wait_until_quiesced(ctrl_info); |
| 8617 | pqi_flush_cache(ctrl_info, SUSPEND); |
| 8618 | pqi_stop_heartbeat_timer(ctrl_info); |
| 8619 | |
| 8620 | pqi_crash_if_pending_command(ctrl_info); |
| 8621 | |
| 8622 | if (state.event == PM_EVENT_FREEZE) |
| 8623 | return 0; |
| 8624 | |
| 8625 | pci_save_state(pci_dev); |
| 8626 | pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); |
| 8627 | |
| 8628 | ctrl_info->controller_online = false; |
| 8629 | ctrl_info->pqi_mode_enabled = false; |
| 8630 | |
| 8631 | return 0; |
| 8632 | } |
| 8633 | |
| 8634 | static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) |
| 8635 | { |
| 8636 | int rc; |
| 8637 | struct pqi_ctrl_info *ctrl_info; |
| 8638 | |
| 8639 | ctrl_info = pci_get_drvdata(pci_dev); |
| 8640 | |
| 8641 | if (pci_dev->current_state != PCI_D0) { |
| 8642 | ctrl_info->max_hw_queue_index = 0; |
| 8643 | pqi_free_interrupts(ctrl_info); |
| 8644 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); |
| 8645 | rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, |
| 8646 | IRQF_SHARED, DRIVER_NAME_SHORT, |
| 8647 | &ctrl_info->queue_groups[0]); |
| 8648 | if (rc) { |
| 8649 | dev_err(&ctrl_info->pci_dev->dev, |
| 8650 | "irq %u init failed with error %d\n", |
| 8651 | pci_dev->irq, rc); |
| 8652 | return rc; |
| 8653 | } |
| 8654 | pqi_ctrl_unblock_device_reset(ctrl_info); |
| 8655 | pqi_ctrl_unblock_requests(ctrl_info); |
| 8656 | pqi_scsi_unblock_requests(ctrl_info); |
| 8657 | pqi_ctrl_unblock_scan(ctrl_info); |
| 8658 | return 0; |
| 8659 | } |
| 8660 | |
| 8661 | pci_set_power_state(pci_dev, PCI_D0); |
| 8662 | pci_restore_state(pci_dev); |
| 8663 | |
| 8664 | pqi_ctrl_unblock_device_reset(ctrl_info); |
| 8665 | pqi_ctrl_unblock_requests(ctrl_info); |
| 8666 | pqi_scsi_unblock_requests(ctrl_info); |
| 8667 | pqi_ctrl_unblock_scan(ctrl_info); |
| 8668 | |
| 8669 | return pqi_ctrl_init_resume(ctrl_info); |
| 8670 | } |
| 8671 | |
| 8672 | /* Define the PCI IDs for the controllers that we support. */ |
| 8673 | static const struct pci_device_id pqi_pci_id_table[] = { |
| 8674 | { |
| 8675 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8676 | 0x105b, 0x1211) |
| 8677 | }, |
| 8678 | { |
| 8679 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8680 | 0x105b, 0x1321) |
| 8681 | }, |
| 8682 | { |
| 8683 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8684 | 0x152d, 0x8a22) |
| 8685 | }, |
| 8686 | { |
| 8687 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8688 | 0x152d, 0x8a23) |
| 8689 | }, |
| 8690 | { |
| 8691 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8692 | 0x152d, 0x8a24) |
| 8693 | }, |
| 8694 | { |
| 8695 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8696 | 0x152d, 0x8a36) |
| 8697 | }, |
| 8698 | { |
| 8699 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8700 | 0x152d, 0x8a37) |
| 8701 | }, |
| 8702 | { |
| 8703 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8704 | 0x193d, 0x8460) |
| 8705 | }, |
| 8706 | { |
| 8707 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8708 | 0x193d, 0x1104) |
| 8709 | }, |
| 8710 | { |
| 8711 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8712 | 0x193d, 0x1105) |
| 8713 | }, |
| 8714 | { |
| 8715 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8716 | 0x193d, 0x1106) |
| 8717 | }, |
| 8718 | { |
| 8719 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8720 | 0x193d, 0x1107) |
| 8721 | }, |
| 8722 | { |
| 8723 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8724 | 0x193d, 0x8460) |
| 8725 | }, |
| 8726 | { |
| 8727 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8728 | 0x193d, 0x8461) |
| 8729 | }, |
| 8730 | { |
| 8731 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8732 | 0x193d, 0xc460) |
| 8733 | }, |
| 8734 | { |
| 8735 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8736 | 0x193d, 0xc461) |
| 8737 | }, |
| 8738 | { |
| 8739 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8740 | 0x193d, 0xf460) |
| 8741 | }, |
| 8742 | { |
| 8743 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8744 | 0x193d, 0xf461) |
| 8745 | }, |
| 8746 | { |
| 8747 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8748 | 0x1bd4, 0x0045) |
| 8749 | }, |
| 8750 | { |
| 8751 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8752 | 0x1bd4, 0x0046) |
| 8753 | }, |
| 8754 | { |
| 8755 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8756 | 0x1bd4, 0x0047) |
| 8757 | }, |
| 8758 | { |
| 8759 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8760 | 0x1bd4, 0x0048) |
| 8761 | }, |
| 8762 | { |
| 8763 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8764 | 0x1bd4, 0x004a) |
| 8765 | }, |
| 8766 | { |
| 8767 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8768 | 0x1bd4, 0x004b) |
| 8769 | }, |
| 8770 | { |
| 8771 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8772 | 0x1bd4, 0x004c) |
| 8773 | }, |
| 8774 | { |
| 8775 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8776 | 0x1bd4, 0x004f) |
| 8777 | }, |
| 8778 | { |
| 8779 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8780 | 0x1bd4, 0x0051) |
| 8781 | }, |
| 8782 | { |
| 8783 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8784 | 0x1bd4, 0x0052) |
| 8785 | }, |
| 8786 | { |
| 8787 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8788 | 0x1bd4, 0x0053) |
| 8789 | }, |
| 8790 | { |
| 8791 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8792 | 0x1bd4, 0x0054) |
| 8793 | }, |
| 8794 | { |
| 8795 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8796 | 0x19e5, 0xd227) |
| 8797 | }, |
| 8798 | { |
| 8799 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8800 | 0x19e5, 0xd228) |
| 8801 | }, |
| 8802 | { |
| 8803 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8804 | 0x19e5, 0xd229) |
| 8805 | }, |
| 8806 | { |
| 8807 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8808 | 0x19e5, 0xd22a) |
| 8809 | }, |
| 8810 | { |
| 8811 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8812 | 0x19e5, 0xd22b) |
| 8813 | }, |
| 8814 | { |
| 8815 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8816 | 0x19e5, 0xd22c) |
| 8817 | }, |
| 8818 | { |
| 8819 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8820 | PCI_VENDOR_ID_ADAPTEC2, 0x0110) |
| 8821 | }, |
| 8822 | { |
| 8823 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8824 | PCI_VENDOR_ID_ADAPTEC2, 0x0608) |
| 8825 | }, |
| 8826 | { |
| 8827 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8828 | PCI_VENDOR_ID_ADAPTEC2, 0x0800) |
| 8829 | }, |
| 8830 | { |
| 8831 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8832 | PCI_VENDOR_ID_ADAPTEC2, 0x0801) |
| 8833 | }, |
| 8834 | { |
| 8835 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8836 | PCI_VENDOR_ID_ADAPTEC2, 0x0802) |
| 8837 | }, |
| 8838 | { |
| 8839 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8840 | PCI_VENDOR_ID_ADAPTEC2, 0x0803) |
| 8841 | }, |
| 8842 | { |
| 8843 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8844 | PCI_VENDOR_ID_ADAPTEC2, 0x0804) |
| 8845 | }, |
| 8846 | { |
| 8847 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8848 | PCI_VENDOR_ID_ADAPTEC2, 0x0805) |
| 8849 | }, |
| 8850 | { |
| 8851 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8852 | PCI_VENDOR_ID_ADAPTEC2, 0x0806) |
| 8853 | }, |
| 8854 | { |
| 8855 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8856 | PCI_VENDOR_ID_ADAPTEC2, 0x0807) |
| 8857 | }, |
| 8858 | { |
| 8859 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8860 | PCI_VENDOR_ID_ADAPTEC2, 0x0808) |
| 8861 | }, |
| 8862 | { |
| 8863 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8864 | PCI_VENDOR_ID_ADAPTEC2, 0x0809) |
| 8865 | }, |
| 8866 | { |
| 8867 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8868 | PCI_VENDOR_ID_ADAPTEC2, 0x080a) |
| 8869 | }, |
| 8870 | { |
| 8871 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8872 | PCI_VENDOR_ID_ADAPTEC2, 0x0900) |
| 8873 | }, |
| 8874 | { |
| 8875 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8876 | PCI_VENDOR_ID_ADAPTEC2, 0x0901) |
| 8877 | }, |
| 8878 | { |
| 8879 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8880 | PCI_VENDOR_ID_ADAPTEC2, 0x0902) |
| 8881 | }, |
| 8882 | { |
| 8883 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8884 | PCI_VENDOR_ID_ADAPTEC2, 0x0903) |
| 8885 | }, |
| 8886 | { |
| 8887 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8888 | PCI_VENDOR_ID_ADAPTEC2, 0x0904) |
| 8889 | }, |
| 8890 | { |
| 8891 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8892 | PCI_VENDOR_ID_ADAPTEC2, 0x0905) |
| 8893 | }, |
| 8894 | { |
| 8895 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8896 | PCI_VENDOR_ID_ADAPTEC2, 0x0906) |
| 8897 | }, |
| 8898 | { |
| 8899 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8900 | PCI_VENDOR_ID_ADAPTEC2, 0x0907) |
| 8901 | }, |
| 8902 | { |
| 8903 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8904 | PCI_VENDOR_ID_ADAPTEC2, 0x0908) |
| 8905 | }, |
| 8906 | { |
| 8907 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8908 | PCI_VENDOR_ID_ADAPTEC2, 0x090a) |
| 8909 | }, |
| 8910 | { |
| 8911 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8912 | PCI_VENDOR_ID_ADAPTEC2, 0x1200) |
| 8913 | }, |
| 8914 | { |
| 8915 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8916 | PCI_VENDOR_ID_ADAPTEC2, 0x1201) |
| 8917 | }, |
| 8918 | { |
| 8919 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8920 | PCI_VENDOR_ID_ADAPTEC2, 0x1202) |
| 8921 | }, |
| 8922 | { |
| 8923 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8924 | PCI_VENDOR_ID_ADAPTEC2, 0x1280) |
| 8925 | }, |
| 8926 | { |
| 8927 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8928 | PCI_VENDOR_ID_ADAPTEC2, 0x1281) |
| 8929 | }, |
| 8930 | { |
| 8931 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8932 | PCI_VENDOR_ID_ADAPTEC2, 0x1282) |
| 8933 | }, |
| 8934 | { |
| 8935 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8936 | PCI_VENDOR_ID_ADAPTEC2, 0x1300) |
| 8937 | }, |
| 8938 | { |
| 8939 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8940 | PCI_VENDOR_ID_ADAPTEC2, 0x1301) |
| 8941 | }, |
| 8942 | { |
| 8943 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8944 | PCI_VENDOR_ID_ADAPTEC2, 0x1302) |
| 8945 | }, |
| 8946 | { |
| 8947 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8948 | PCI_VENDOR_ID_ADAPTEC2, 0x1303) |
| 8949 | }, |
| 8950 | { |
| 8951 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8952 | PCI_VENDOR_ID_ADAPTEC2, 0x1380) |
| 8953 | }, |
| 8954 | { |
| 8955 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8956 | PCI_VENDOR_ID_ADAPTEC2, 0x1400) |
| 8957 | }, |
| 8958 | { |
| 8959 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8960 | PCI_VENDOR_ID_ADAPTEC2, 0x1402) |
| 8961 | }, |
| 8962 | { |
| 8963 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8964 | PCI_VENDOR_ID_ADAPTEC2, 0x1410) |
| 8965 | }, |
| 8966 | { |
| 8967 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8968 | PCI_VENDOR_ID_ADAPTEC2, 0x1411) |
| 8969 | }, |
| 8970 | { |
| 8971 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8972 | PCI_VENDOR_ID_ADAPTEC2, 0x1412) |
| 8973 | }, |
| 8974 | { |
| 8975 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8976 | PCI_VENDOR_ID_ADAPTEC2, 0x1420) |
| 8977 | }, |
| 8978 | { |
| 8979 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8980 | PCI_VENDOR_ID_ADAPTEC2, 0x1430) |
| 8981 | }, |
| 8982 | { |
| 8983 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8984 | PCI_VENDOR_ID_ADAPTEC2, 0x1440) |
| 8985 | }, |
| 8986 | { |
| 8987 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8988 | PCI_VENDOR_ID_ADAPTEC2, 0x1441) |
| 8989 | }, |
| 8990 | { |
| 8991 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8992 | PCI_VENDOR_ID_ADAPTEC2, 0x1450) |
| 8993 | }, |
| 8994 | { |
| 8995 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 8996 | PCI_VENDOR_ID_ADAPTEC2, 0x1452) |
| 8997 | }, |
| 8998 | { |
| 8999 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9000 | PCI_VENDOR_ID_ADAPTEC2, 0x1460) |
| 9001 | }, |
| 9002 | { |
| 9003 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9004 | PCI_VENDOR_ID_ADAPTEC2, 0x1461) |
| 9005 | }, |
| 9006 | { |
| 9007 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9008 | PCI_VENDOR_ID_ADAPTEC2, 0x1462) |
| 9009 | }, |
| 9010 | { |
| 9011 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9012 | PCI_VENDOR_ID_ADAPTEC2, 0x1470) |
| 9013 | }, |
| 9014 | { |
| 9015 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9016 | PCI_VENDOR_ID_ADAPTEC2, 0x1471) |
| 9017 | }, |
| 9018 | { |
| 9019 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9020 | PCI_VENDOR_ID_ADAPTEC2, 0x1472) |
| 9021 | }, |
| 9022 | { |
| 9023 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9024 | PCI_VENDOR_ID_ADAPTEC2, 0x1480) |
| 9025 | }, |
| 9026 | { |
| 9027 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9028 | PCI_VENDOR_ID_ADAPTEC2, 0x1490) |
| 9029 | }, |
| 9030 | { |
| 9031 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9032 | PCI_VENDOR_ID_ADAPTEC2, 0x1491) |
| 9033 | }, |
| 9034 | { |
| 9035 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9036 | PCI_VENDOR_ID_ADAPTEC2, 0x14a0) |
| 9037 | }, |
| 9038 | { |
| 9039 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9040 | PCI_VENDOR_ID_ADAPTEC2, 0x14a1) |
| 9041 | }, |
| 9042 | { |
| 9043 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9044 | PCI_VENDOR_ID_ADAPTEC2, 0x14b0) |
| 9045 | }, |
| 9046 | { |
| 9047 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9048 | PCI_VENDOR_ID_ADAPTEC2, 0x14b1) |
| 9049 | }, |
| 9050 | { |
| 9051 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9052 | PCI_VENDOR_ID_ADAPTEC2, 0x14c0) |
| 9053 | }, |
| 9054 | { |
| 9055 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9056 | PCI_VENDOR_ID_ADAPTEC2, 0x14c1) |
| 9057 | }, |
| 9058 | { |
| 9059 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9060 | PCI_VENDOR_ID_ADAPTEC2, 0x14d0) |
| 9061 | }, |
| 9062 | { |
| 9063 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9064 | PCI_VENDOR_ID_ADAPTEC2, 0x14e0) |
| 9065 | }, |
| 9066 | { |
| 9067 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9068 | PCI_VENDOR_ID_ADAPTEC2, 0x14f0) |
| 9069 | }, |
| 9070 | { |
| 9071 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9072 | PCI_VENDOR_ID_ADVANTECH, 0x8312) |
| 9073 | }, |
| 9074 | { |
| 9075 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9076 | PCI_VENDOR_ID_DELL, 0x1fe0) |
| 9077 | }, |
| 9078 | { |
| 9079 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9080 | PCI_VENDOR_ID_HP, 0x0600) |
| 9081 | }, |
| 9082 | { |
| 9083 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9084 | PCI_VENDOR_ID_HP, 0x0601) |
| 9085 | }, |
| 9086 | { |
| 9087 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9088 | PCI_VENDOR_ID_HP, 0x0602) |
| 9089 | }, |
| 9090 | { |
| 9091 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9092 | PCI_VENDOR_ID_HP, 0x0603) |
| 9093 | }, |
| 9094 | { |
| 9095 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9096 | PCI_VENDOR_ID_HP, 0x0609) |
| 9097 | }, |
| 9098 | { |
| 9099 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9100 | PCI_VENDOR_ID_HP, 0x0650) |
| 9101 | }, |
| 9102 | { |
| 9103 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9104 | PCI_VENDOR_ID_HP, 0x0651) |
| 9105 | }, |
| 9106 | { |
| 9107 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9108 | PCI_VENDOR_ID_HP, 0x0652) |
| 9109 | }, |
| 9110 | { |
| 9111 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9112 | PCI_VENDOR_ID_HP, 0x0653) |
| 9113 | }, |
| 9114 | { |
| 9115 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9116 | PCI_VENDOR_ID_HP, 0x0654) |
| 9117 | }, |
| 9118 | { |
| 9119 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9120 | PCI_VENDOR_ID_HP, 0x0655) |
| 9121 | }, |
| 9122 | { |
| 9123 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9124 | PCI_VENDOR_ID_HP, 0x0700) |
| 9125 | }, |
| 9126 | { |
| 9127 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9128 | PCI_VENDOR_ID_HP, 0x0701) |
| 9129 | }, |
| 9130 | { |
| 9131 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9132 | PCI_VENDOR_ID_HP, 0x1001) |
| 9133 | }, |
| 9134 | { |
| 9135 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9136 | PCI_VENDOR_ID_HP, 0x1002) |
| 9137 | }, |
| 9138 | { |
| 9139 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9140 | PCI_VENDOR_ID_HP, 0x1100) |
| 9141 | }, |
| 9142 | { |
| 9143 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9144 | PCI_VENDOR_ID_HP, 0x1101) |
| 9145 | }, |
| 9146 | { |
| 9147 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9148 | 0x1590, 0x0294) |
| 9149 | }, |
| 9150 | { |
| 9151 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9152 | 0x1590, 0x02db) |
| 9153 | }, |
| 9154 | { |
| 9155 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9156 | 0x1590, 0x02dc) |
| 9157 | }, |
| 9158 | { |
| 9159 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9160 | 0x1590, 0x032e) |
| 9161 | }, |
| 9162 | { |
| 9163 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9164 | 0x1d8d, 0x0800) |
| 9165 | }, |
| 9166 | { |
| 9167 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9168 | 0x1d8d, 0x0908) |
| 9169 | }, |
| 9170 | { |
| 9171 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9172 | 0x1d8d, 0x0806) |
| 9173 | }, |
| 9174 | { |
| 9175 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9176 | 0x1d8d, 0x0916) |
| 9177 | }, |
| 9178 | { |
| 9179 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9180 | PCI_VENDOR_ID_GIGABYTE, 0x1000) |
| 9181 | }, |
| 9182 | { |
| 9183 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
| 9184 | PCI_ANY_ID, PCI_ANY_ID) |
| 9185 | }, |
| 9186 | { 0 } |
| 9187 | }; |
| 9188 | |
| 9189 | MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); |
| 9190 | |
| 9191 | static struct pci_driver pqi_pci_driver = { |
| 9192 | .name = DRIVER_NAME_SHORT, |
| 9193 | .id_table = pqi_pci_id_table, |
| 9194 | .probe = pqi_pci_probe, |
| 9195 | .remove = pqi_pci_remove, |
| 9196 | .shutdown = pqi_shutdown, |
| 9197 | #if defined(CONFIG_PM) |
| 9198 | .suspend = pqi_suspend, |
| 9199 | .resume = pqi_resume, |
| 9200 | #endif |
| 9201 | }; |
| 9202 | |
| 9203 | static int __init pqi_init(void) |
| 9204 | { |
| 9205 | int rc; |
| 9206 | |
| 9207 | pr_info(DRIVER_NAME "\n"); |
| 9208 | |
| 9209 | pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); |
| 9210 | if (!pqi_sas_transport_template) |
| 9211 | return -ENODEV; |
| 9212 | |
| 9213 | pqi_process_module_params(); |
| 9214 | |
| 9215 | rc = pci_register_driver(&pqi_pci_driver); |
| 9216 | if (rc) |
| 9217 | sas_release_transport(pqi_sas_transport_template); |
| 9218 | |
| 9219 | return rc; |
| 9220 | } |
| 9221 | |
| 9222 | static void __exit pqi_cleanup(void) |
| 9223 | { |
| 9224 | pci_unregister_driver(&pqi_pci_driver); |
| 9225 | sas_release_transport(pqi_sas_transport_template); |
| 9226 | } |
| 9227 | |
| 9228 | module_init(pqi_init); |
| 9229 | module_exit(pqi_cleanup); |
| 9230 | |
| 9231 | static void __attribute__((unused)) verify_structures(void) |
| 9232 | { |
| 9233 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
| 9234 | sis_host_to_ctrl_doorbell) != 0x20); |
| 9235 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
| 9236 | sis_interrupt_mask) != 0x34); |
| 9237 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
| 9238 | sis_ctrl_to_host_doorbell) != 0x9c); |
| 9239 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
| 9240 | sis_ctrl_to_host_doorbell_clear) != 0xa0); |
| 9241 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
| 9242 | sis_driver_scratch) != 0xb0); |
| 9243 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
| 9244 | sis_product_identifier) != 0xb4); |
| 9245 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
| 9246 | sis_firmware_status) != 0xbc); |
| 9247 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
| 9248 | sis_mailbox) != 0x1000); |
| 9249 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
| 9250 | pqi_registers) != 0x4000); |
| 9251 | |
| 9252 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, |
| 9253 | iu_type) != 0x0); |
| 9254 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, |
| 9255 | iu_length) != 0x2); |
| 9256 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, |
| 9257 | response_queue_id) != 0x4); |
| 9258 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, |
| 9259 | driver_flags) != 0x6); |
| 9260 | BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); |
| 9261 | |
| 9262 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
| 9263 | status) != 0x0); |
| 9264 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
| 9265 | service_response) != 0x1); |
| 9266 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
| 9267 | data_present) != 0x2); |
| 9268 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
| 9269 | reserved) != 0x3); |
| 9270 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
| 9271 | residual_count) != 0x4); |
| 9272 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
| 9273 | data_length) != 0x8); |
| 9274 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
| 9275 | reserved1) != 0xa); |
| 9276 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, |
| 9277 | data) != 0xc); |
| 9278 | BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); |
| 9279 | |
| 9280 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
| 9281 | data_in_result) != 0x0); |
| 9282 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
| 9283 | data_out_result) != 0x1); |
| 9284 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
| 9285 | reserved) != 0x2); |
| 9286 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
| 9287 | status) != 0x5); |
| 9288 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
| 9289 | status_qualifier) != 0x6); |
| 9290 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
| 9291 | sense_data_length) != 0x8); |
| 9292 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
| 9293 | response_data_length) != 0xa); |
| 9294 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
| 9295 | data_in_transferred) != 0xc); |
| 9296 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
| 9297 | data_out_transferred) != 0x10); |
| 9298 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, |
| 9299 | data) != 0x14); |
| 9300 | BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); |
| 9301 | |
| 9302 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9303 | signature) != 0x0); |
| 9304 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9305 | function_and_status_code) != 0x8); |
| 9306 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9307 | max_admin_iq_elements) != 0x10); |
| 9308 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9309 | max_admin_oq_elements) != 0x11); |
| 9310 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9311 | admin_iq_element_length) != 0x12); |
| 9312 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9313 | admin_oq_element_length) != 0x13); |
| 9314 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9315 | max_reset_timeout) != 0x14); |
| 9316 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9317 | legacy_intx_status) != 0x18); |
| 9318 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9319 | legacy_intx_mask_set) != 0x1c); |
| 9320 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9321 | legacy_intx_mask_clear) != 0x20); |
| 9322 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9323 | device_status) != 0x40); |
| 9324 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9325 | admin_iq_pi_offset) != 0x48); |
| 9326 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9327 | admin_oq_ci_offset) != 0x50); |
| 9328 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9329 | admin_iq_element_array_addr) != 0x58); |
| 9330 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9331 | admin_oq_element_array_addr) != 0x60); |
| 9332 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9333 | admin_iq_ci_addr) != 0x68); |
| 9334 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9335 | admin_oq_pi_addr) != 0x70); |
| 9336 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9337 | admin_iq_num_elements) != 0x78); |
| 9338 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9339 | admin_oq_num_elements) != 0x79); |
| 9340 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9341 | admin_queue_int_msg_num) != 0x7a); |
| 9342 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9343 | device_error) != 0x80); |
| 9344 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9345 | error_details) != 0x88); |
| 9346 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9347 | device_reset) != 0x90); |
| 9348 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, |
| 9349 | power_action) != 0x94); |
| 9350 | BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); |
| 9351 | |
| 9352 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9353 | header.iu_type) != 0); |
| 9354 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9355 | header.iu_length) != 2); |
| 9356 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9357 | header.driver_flags) != 6); |
| 9358 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9359 | request_id) != 8); |
| 9360 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9361 | function_code) != 10); |
| 9362 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9363 | data.report_device_capability.buffer_length) != 44); |
| 9364 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9365 | data.report_device_capability.sg_descriptor) != 48); |
| 9366 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9367 | data.create_operational_iq.queue_id) != 12); |
| 9368 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9369 | data.create_operational_iq.element_array_addr) != 16); |
| 9370 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9371 | data.create_operational_iq.ci_addr) != 24); |
| 9372 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9373 | data.create_operational_iq.num_elements) != 32); |
| 9374 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9375 | data.create_operational_iq.element_length) != 34); |
| 9376 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9377 | data.create_operational_iq.queue_protocol) != 36); |
| 9378 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9379 | data.create_operational_oq.queue_id) != 12); |
| 9380 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9381 | data.create_operational_oq.element_array_addr) != 16); |
| 9382 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9383 | data.create_operational_oq.pi_addr) != 24); |
| 9384 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9385 | data.create_operational_oq.num_elements) != 32); |
| 9386 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9387 | data.create_operational_oq.element_length) != 34); |
| 9388 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9389 | data.create_operational_oq.queue_protocol) != 36); |
| 9390 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9391 | data.create_operational_oq.int_msg_num) != 40); |
| 9392 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9393 | data.create_operational_oq.coalescing_count) != 42); |
| 9394 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9395 | data.create_operational_oq.min_coalescing_time) != 44); |
| 9396 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9397 | data.create_operational_oq.max_coalescing_time) != 48); |
| 9398 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, |
| 9399 | data.delete_operational_queue.queue_id) != 12); |
| 9400 | BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); |
| 9401 | BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, |
| 9402 | data.create_operational_iq) != 64 - 11); |
| 9403 | BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, |
| 9404 | data.create_operational_oq) != 64 - 11); |
| 9405 | BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, |
| 9406 | data.delete_operational_queue) != 64 - 11); |
| 9407 | |
| 9408 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
| 9409 | header.iu_type) != 0); |
| 9410 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
| 9411 | header.iu_length) != 2); |
| 9412 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
| 9413 | header.driver_flags) != 6); |
| 9414 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
| 9415 | request_id) != 8); |
| 9416 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
| 9417 | function_code) != 10); |
| 9418 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
| 9419 | status) != 11); |
| 9420 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
| 9421 | data.create_operational_iq.status_descriptor) != 12); |
| 9422 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
| 9423 | data.create_operational_iq.iq_pi_offset) != 16); |
| 9424 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
| 9425 | data.create_operational_oq.status_descriptor) != 12); |
| 9426 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, |
| 9427 | data.create_operational_oq.oq_ci_offset) != 16); |
| 9428 | BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); |
| 9429 | |
| 9430 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9431 | header.iu_type) != 0); |
| 9432 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9433 | header.iu_length) != 2); |
| 9434 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9435 | header.response_queue_id) != 4); |
| 9436 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9437 | header.driver_flags) != 6); |
| 9438 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9439 | request_id) != 8); |
| 9440 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9441 | nexus_id) != 10); |
| 9442 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9443 | buffer_length) != 12); |
| 9444 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9445 | lun_number) != 16); |
| 9446 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9447 | protocol_specific) != 24); |
| 9448 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9449 | error_index) != 27); |
| 9450 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9451 | cdb) != 32); |
| 9452 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9453 | timeout) != 60); |
| 9454 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
| 9455 | sg_descriptors) != 64); |
| 9456 | BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != |
| 9457 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
| 9458 | |
| 9459 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9460 | header.iu_type) != 0); |
| 9461 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9462 | header.iu_length) != 2); |
| 9463 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9464 | header.response_queue_id) != 4); |
| 9465 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9466 | header.driver_flags) != 6); |
| 9467 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9468 | request_id) != 8); |
| 9469 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9470 | nexus_id) != 12); |
| 9471 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9472 | buffer_length) != 16); |
| 9473 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9474 | data_encryption_key_index) != 22); |
| 9475 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9476 | encrypt_tweak_lower) != 24); |
| 9477 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9478 | encrypt_tweak_upper) != 28); |
| 9479 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9480 | cdb) != 32); |
| 9481 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9482 | error_index) != 48); |
| 9483 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9484 | num_sg_descriptors) != 50); |
| 9485 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9486 | cdb_length) != 51); |
| 9487 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9488 | lun_number) != 52); |
| 9489 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, |
| 9490 | sg_descriptors) != 64); |
| 9491 | BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != |
| 9492 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
| 9493 | |
| 9494 | BUILD_BUG_ON(offsetof(struct pqi_io_response, |
| 9495 | header.iu_type) != 0); |
| 9496 | BUILD_BUG_ON(offsetof(struct pqi_io_response, |
| 9497 | header.iu_length) != 2); |
| 9498 | BUILD_BUG_ON(offsetof(struct pqi_io_response, |
| 9499 | request_id) != 8); |
| 9500 | BUILD_BUG_ON(offsetof(struct pqi_io_response, |
| 9501 | error_index) != 10); |
| 9502 | |
| 9503 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
| 9504 | header.iu_type) != 0); |
| 9505 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
| 9506 | header.iu_length) != 2); |
| 9507 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
| 9508 | header.response_queue_id) != 4); |
| 9509 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
| 9510 | request_id) != 8); |
| 9511 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
| 9512 | data.report_event_configuration.buffer_length) != 12); |
| 9513 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
| 9514 | data.report_event_configuration.sg_descriptors) != 16); |
| 9515 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
| 9516 | data.set_event_configuration.global_event_oq_id) != 10); |
| 9517 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
| 9518 | data.set_event_configuration.buffer_length) != 12); |
| 9519 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, |
| 9520 | data.set_event_configuration.sg_descriptors) != 16); |
| 9521 | |
| 9522 | BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, |
| 9523 | max_inbound_iu_length) != 6); |
| 9524 | BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, |
| 9525 | max_outbound_iu_length) != 14); |
| 9526 | BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); |
| 9527 | |
| 9528 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9529 | data_length) != 0); |
| 9530 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9531 | iq_arbitration_priority_support_bitmask) != 8); |
| 9532 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9533 | maximum_aw_a) != 9); |
| 9534 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9535 | maximum_aw_b) != 10); |
| 9536 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9537 | maximum_aw_c) != 11); |
| 9538 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9539 | max_inbound_queues) != 16); |
| 9540 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9541 | max_elements_per_iq) != 18); |
| 9542 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9543 | max_iq_element_length) != 24); |
| 9544 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9545 | min_iq_element_length) != 26); |
| 9546 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9547 | max_outbound_queues) != 30); |
| 9548 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9549 | max_elements_per_oq) != 32); |
| 9550 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9551 | intr_coalescing_time_granularity) != 34); |
| 9552 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9553 | max_oq_element_length) != 36); |
| 9554 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9555 | min_oq_element_length) != 38); |
| 9556 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, |
| 9557 | iu_layer_descriptors) != 64); |
| 9558 | BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); |
| 9559 | |
| 9560 | BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, |
| 9561 | event_type) != 0); |
| 9562 | BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, |
| 9563 | oq_id) != 2); |
| 9564 | BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); |
| 9565 | |
| 9566 | BUILD_BUG_ON(offsetof(struct pqi_event_config, |
| 9567 | num_event_descriptors) != 2); |
| 9568 | BUILD_BUG_ON(offsetof(struct pqi_event_config, |
| 9569 | descriptors) != 4); |
| 9570 | |
| 9571 | BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != |
| 9572 | ARRAY_SIZE(pqi_supported_event_types)); |
| 9573 | |
| 9574 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
| 9575 | header.iu_type) != 0); |
| 9576 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
| 9577 | header.iu_length) != 2); |
| 9578 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
| 9579 | event_type) != 8); |
| 9580 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
| 9581 | event_id) != 10); |
| 9582 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
| 9583 | additional_event_id) != 12); |
| 9584 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
| 9585 | data) != 16); |
| 9586 | BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); |
| 9587 | |
| 9588 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, |
| 9589 | header.iu_type) != 0); |
| 9590 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, |
| 9591 | header.iu_length) != 2); |
| 9592 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, |
| 9593 | event_type) != 8); |
| 9594 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, |
| 9595 | event_id) != 10); |
| 9596 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, |
| 9597 | additional_event_id) != 12); |
| 9598 | BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); |
| 9599 | |
| 9600 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
| 9601 | header.iu_type) != 0); |
| 9602 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
| 9603 | header.iu_length) != 2); |
| 9604 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
| 9605 | request_id) != 8); |
| 9606 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
| 9607 | nexus_id) != 10); |
| 9608 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
| 9609 | timeout) != 14); |
| 9610 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
| 9611 | lun_number) != 16); |
| 9612 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
| 9613 | protocol_specific) != 24); |
| 9614 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
| 9615 | outbound_queue_id_to_manage) != 26); |
| 9616 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
| 9617 | request_id_to_manage) != 28); |
| 9618 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, |
| 9619 | task_management_function) != 30); |
| 9620 | BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); |
| 9621 | |
| 9622 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
| 9623 | header.iu_type) != 0); |
| 9624 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
| 9625 | header.iu_length) != 2); |
| 9626 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
| 9627 | request_id) != 8); |
| 9628 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
| 9629 | nexus_id) != 10); |
| 9630 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
| 9631 | additional_response_info) != 12); |
| 9632 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, |
| 9633 | response_code) != 15); |
| 9634 | BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); |
| 9635 | |
| 9636 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9637 | configured_logical_drive_count) != 0); |
| 9638 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9639 | configuration_signature) != 1); |
| 9640 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9641 | firmware_version_short) != 5); |
| 9642 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9643 | extended_logical_unit_count) != 154); |
| 9644 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9645 | firmware_build_number) != 190); |
| 9646 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9647 | vendor_id) != 200); |
| 9648 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9649 | product_id) != 208); |
| 9650 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9651 | extra_controller_flags) != 286); |
| 9652 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9653 | controller_mode) != 292); |
| 9654 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9655 | spare_part_number) != 293); |
| 9656 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
| 9657 | firmware_version_long) != 325); |
| 9658 | |
| 9659 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
| 9660 | phys_bay_in_box) != 115); |
| 9661 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
| 9662 | device_type) != 120); |
| 9663 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
| 9664 | redundant_path_present_map) != 1736); |
| 9665 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
| 9666 | active_path_number) != 1738); |
| 9667 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
| 9668 | alternate_paths_phys_connector) != 1739); |
| 9669 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
| 9670 | alternate_paths_phys_box_on_port) != 1755); |
| 9671 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
| 9672 | current_queue_depth_limit) != 1796); |
| 9673 | BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); |
| 9674 | |
| 9675 | BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4); |
| 9676 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, |
| 9677 | page_code) != 0); |
| 9678 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, |
| 9679 | subpage_code) != 1); |
| 9680 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, |
| 9681 | buffer_length) != 2); |
| 9682 | |
| 9683 | BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4); |
| 9684 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, |
| 9685 | page_code) != 0); |
| 9686 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, |
| 9687 | subpage_code) != 1); |
| 9688 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, |
| 9689 | page_length) != 2); |
| 9690 | |
| 9691 | BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage) |
| 9692 | != 18); |
| 9693 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
| 9694 | header) != 0); |
| 9695 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
| 9696 | firmware_read_support) != 4); |
| 9697 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
| 9698 | driver_read_support) != 5); |
| 9699 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
| 9700 | firmware_write_support) != 6); |
| 9701 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
| 9702 | driver_write_support) != 7); |
| 9703 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
| 9704 | max_transfer_encrypted_sas_sata) != 8); |
| 9705 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
| 9706 | max_transfer_encrypted_nvme) != 10); |
| 9707 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
| 9708 | max_write_raid_5_6) != 12); |
| 9709 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
| 9710 | max_write_raid_1_10_2drive) != 14); |
| 9711 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, |
| 9712 | max_write_raid_1_10_3drive) != 16); |
| 9713 | |
| 9714 | BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); |
| 9715 | BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); |
| 9716 | BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % |
| 9717 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); |
| 9718 | BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % |
| 9719 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); |
| 9720 | BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); |
| 9721 | BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % |
| 9722 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); |
| 9723 | BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); |
| 9724 | BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % |
| 9725 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); |
| 9726 | |
| 9727 | BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); |
| 9728 | BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= |
| 9729 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); |
| 9730 | } |