scsi: smartpqi: Use host-wide tag space
[linux-2.6-block.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
2cc37b15 1// SPDX-License-Identifier: GPL-2.0
6c223761
KB
2/*
3 * driver for Microsemi PQI-based storage controllers
2a712681 4 * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
2f4c4b92 5 * Copyright (c) 2016-2018 Microsemi Corporation
6c223761
KB
6 * Copyright (c) 2016 PMC-Sierra, Inc.
7 *
2f4c4b92 8 * Questions/Comments/Bugfixes to storagedev@microchip.com
6c223761
KB
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/sched.h>
18#include <linux/rtc.h>
19#include <linux/bcd.h>
3c50976f 20#include <linux/reboot.h>
6c223761 21#include <linux/cciss_ioctl.h>
52198226 22#include <linux/blk-mq-pci.h>
6c223761
KB
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_eh.h>
27#include <scsi/scsi_transport_sas.h>
28#include <asm/unaligned.h>
29#include "smartpqi.h"
30#include "smartpqi_sis.h"
31
32#if !defined(BUILD_TIMESTAMP)
33#define BUILD_TIMESTAMP
34#endif
35
5443bdc4 36#define DRIVER_VERSION "1.2.16-012"
2d154f5f 37#define DRIVER_MAJOR 1
f7cb8ac6 38#define DRIVER_MINOR 2
ce60a2b8 39#define DRIVER_RELEASE 16
5443bdc4 40#define DRIVER_REVISION 12
6c223761 41
2d154f5f
KB
42#define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
6c223761
KB
44#define DRIVER_NAME_SHORT "smartpqi"
45
e1d213bd
KB
46#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
47
6c223761
KB
48MODULE_AUTHOR("Microsemi");
49MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
50 DRIVER_VERSION);
51MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
52MODULE_VERSION(DRIVER_VERSION);
53MODULE_LICENSE("GPL");
54
6c223761 55static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
5f310425 56static void pqi_ctrl_offline_worker(struct work_struct *work);
376fb880 57static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
6c223761
KB
58static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
59static void pqi_scan_start(struct Scsi_Host *shost);
60static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
61 struct pqi_queue_group *queue_group, enum pqi_io_path path,
62 struct pqi_io_request *io_request);
63static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
64 struct pqi_iu_header *request, unsigned int flags,
65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
66static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
68 unsigned int cdb_length, struct pqi_queue_group *queue_group,
376fb880 69 struct pqi_encryption_info *encryption_info, bool raid_bypass);
4fd22c13
MR
70static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
71static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
72static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info);
73static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
74 u32 bytes_requested);
75static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
76static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
1e46731e
MR
77static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
78 struct pqi_scsi_dev *device, unsigned long timeout_secs);
6c223761
KB
79
80/* for flags argument to pqi_submit_raid_request_synchronous() */
81#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
82
83static struct scsi_transport_template *pqi_sas_transport_template;
84
85static atomic_t pqi_controller_count = ATOMIC_INIT(0);
86
3c50976f
KB
87enum pqi_lockup_action {
88 NONE,
89 REBOOT,
90 PANIC
91};
92
93static enum pqi_lockup_action pqi_lockup_action = NONE;
94
95static struct {
96 enum pqi_lockup_action action;
97 char *name;
98} pqi_lockup_actions[] = {
99 {
100 .action = NONE,
101 .name = "none",
102 },
103 {
104 .action = REBOOT,
105 .name = "reboot",
106 },
107 {
108 .action = PANIC,
109 .name = "panic",
110 },
111};
112
6a50d6ad
KB
113static unsigned int pqi_supported_event_types[] = {
114 PQI_EVENT_TYPE_HOTPLUG,
115 PQI_EVENT_TYPE_HARDWARE,
116 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
117 PQI_EVENT_TYPE_LOGICAL_DEVICE,
4fd22c13 118 PQI_EVENT_TYPE_OFA,
6a50d6ad
KB
119 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
121};
122
6c223761
KB
123static int pqi_disable_device_id_wildcards;
124module_param_named(disable_device_id_wildcards,
cbe0c7b1 125 pqi_disable_device_id_wildcards, int, 0644);
6c223761
KB
126MODULE_PARM_DESC(disable_device_id_wildcards,
127 "Disable device ID wildcards.");
128
5a259e32
KB
129static int pqi_disable_heartbeat;
130module_param_named(disable_heartbeat,
131 pqi_disable_heartbeat, int, 0644);
132MODULE_PARM_DESC(disable_heartbeat,
133 "Disable heartbeat.");
134
135static int pqi_disable_ctrl_shutdown;
136module_param_named(disable_ctrl_shutdown,
137 pqi_disable_ctrl_shutdown, int, 0644);
138MODULE_PARM_DESC(disable_ctrl_shutdown,
139 "Disable controller shutdown when controller locked up.");
140
3c50976f
KB
141static char *pqi_lockup_action_param;
142module_param_named(lockup_action,
143 pqi_lockup_action_param, charp, 0644);
144MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
145 "\t\tSupported: none, reboot, panic\n"
146 "\t\tDefault: none");
147
5e6a9760
GW
148static int pqi_expose_ld_first;
149module_param_named(expose_ld_first,
150 pqi_expose_ld_first, int, 0644);
151MODULE_PARM_DESC(expose_ld_first,
152 "Expose logical drives before physical drives.");
153
522bc026
DC
154static int pqi_hide_vsep;
155module_param_named(hide_vsep,
156 pqi_hide_vsep, int, 0644);
157MODULE_PARM_DESC(hide_vsep,
158 "Hide the virtual SEP for direct attached drives.");
159
6c223761
KB
160static char *raid_levels[] = {
161 "RAID-0",
162 "RAID-4",
163 "RAID-1(1+0)",
164 "RAID-5",
165 "RAID-5+1",
166 "RAID-ADG",
167 "RAID-1(ADM)",
168};
169
170static char *pqi_raid_level_to_string(u8 raid_level)
171{
172 if (raid_level < ARRAY_SIZE(raid_levels))
173 return raid_levels[raid_level];
174
a9f93392 175 return "RAID UNKNOWN";
6c223761
KB
176}
177
178#define SA_RAID_0 0
179#define SA_RAID_4 1
180#define SA_RAID_1 2 /* also used for RAID 10 */
181#define SA_RAID_5 3 /* also used for RAID 50 */
182#define SA_RAID_51 4
183#define SA_RAID_6 5 /* also used for RAID 60 */
184#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
185#define SA_RAID_MAX SA_RAID_ADM
186#define SA_RAID_UNKNOWN 0xff
187
188static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
189{
7561a7e4 190 pqi_prep_for_scsi_done(scmd);
6c223761
KB
191 scmd->scsi_done(scmd);
192}
193
b6e2ef67 194static inline void pqi_disable_write_same(struct scsi_device *sdev)
6c223761 195{
b6e2ef67 196 sdev->no_write_same = 1;
6c223761
KB
197}
198
6c223761 199static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
6c223761 200{
6c223761 201 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
6c223761
KB
202}
203
204static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
205{
206 return !device->is_physical_device;
207}
208
bd10cf0b
KB
209static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
210{
211 return scsi3addr[2] != 0;
212}
213
694c5d5b
KB
214static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
215{
216 return !ctrl_info->controller_online;
217}
218
6c223761
KB
219static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
220{
221 if (ctrl_info->controller_online)
222 if (!sis_is_firmware_running(ctrl_info))
223 pqi_take_ctrl_offline(ctrl_info);
224}
225
226static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
227{
228 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
229}
230
ff6abb73
KB
231static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
232 struct pqi_ctrl_info *ctrl_info)
233{
234 return sis_read_driver_scratch(ctrl_info);
235}
236
237static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
238 enum pqi_ctrl_mode mode)
239{
240 sis_write_driver_scratch(ctrl_info, mode);
241}
242
694c5d5b
KB
243static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
244{
245 ctrl_info->block_device_reset = true;
246}
247
248static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
249{
250 return ctrl_info->block_device_reset;
251}
252
253static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
254{
255 return ctrl_info->block_requests;
256}
257
7561a7e4
KB
258static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
259{
260 ctrl_info->block_requests = true;
261 scsi_block_requests(ctrl_info->scsi_host);
262}
263
264static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
265{
266 ctrl_info->block_requests = false;
267 wake_up_all(&ctrl_info->block_requests_wait);
376fb880 268 pqi_retry_raid_bypass_requests(ctrl_info);
7561a7e4
KB
269 scsi_unblock_requests(ctrl_info->scsi_host);
270}
271
7561a7e4
KB
272static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
273 unsigned long timeout_msecs)
274{
275 unsigned long remaining_msecs;
276
277 if (!pqi_ctrl_blocked(ctrl_info))
278 return timeout_msecs;
279
280 atomic_inc(&ctrl_info->num_blocked_threads);
281
282 if (timeout_msecs == NO_TIMEOUT) {
283 wait_event(ctrl_info->block_requests_wait,
284 !pqi_ctrl_blocked(ctrl_info));
285 remaining_msecs = timeout_msecs;
286 } else {
287 unsigned long remaining_jiffies;
288
289 remaining_jiffies =
290 wait_event_timeout(ctrl_info->block_requests_wait,
291 !pqi_ctrl_blocked(ctrl_info),
292 msecs_to_jiffies(timeout_msecs));
293 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
294 }
295
296 atomic_dec(&ctrl_info->num_blocked_threads);
297
298 return remaining_msecs;
299}
300
7561a7e4
KB
301static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
302{
303 while (atomic_read(&ctrl_info->num_busy_threads) >
304 atomic_read(&ctrl_info->num_blocked_threads))
305 usleep_range(1000, 2000);
306}
307
03b288cf
KB
308static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
309{
310 return device->device_offline;
311}
312
7561a7e4
KB
313static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
314{
315 device->in_reset = true;
316}
317
318static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
319{
320 device->in_reset = false;
321}
322
323static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
324{
325 return device->in_reset;
326}
6c223761 327
4fd22c13
MR
328static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
329{
330 ctrl_info->in_ofa = true;
331}
332
333static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
334{
335 ctrl_info->in_ofa = false;
336}
337
338static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info)
339{
340 return ctrl_info->in_ofa;
341}
342
1e46731e
MR
343static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
344{
345 device->in_remove = true;
346}
347
1bdf6e93 348static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
1e46731e 349{
1bdf6e93 350 return device->in_remove;
1e46731e
MR
351}
352
0530736e
KB
353static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
354{
355 ctrl_info->in_shutdown = true;
356}
357
358static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
359{
360 return ctrl_info->in_shutdown;
361}
362
5f310425
KB
363static inline void pqi_schedule_rescan_worker_with_delay(
364 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
365{
366 if (pqi_ctrl_offline(ctrl_info))
367 return;
4fd22c13
MR
368 if (pqi_ctrl_in_ofa(ctrl_info))
369 return;
5f310425
KB
370
371 schedule_delayed_work(&ctrl_info->rescan_work, delay);
372}
373
6c223761
KB
374static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
375{
5f310425
KB
376 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
377}
378
4fd22c13 379#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
5f310425
KB
380
381static inline void pqi_schedule_rescan_worker_delayed(
382 struct pqi_ctrl_info *ctrl_info)
383{
384 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
6c223761
KB
385}
386
061ef06a
KB
387static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
388{
389 cancel_delayed_work_sync(&ctrl_info->rescan_work);
390}
391
0530736e
KB
392static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
393{
394 cancel_work_sync(&ctrl_info->event_work);
395}
396
98f87667
KB
397static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
398{
399 if (!ctrl_info->heartbeat_counter)
400 return 0;
401
402 return readl(ctrl_info->heartbeat_counter);
403}
404
4fd22c13
MR
405static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
406{
407 if (!ctrl_info->soft_reset_status)
408 return 0;
409
410 return readb(ctrl_info->soft_reset_status);
411}
412
413static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info,
694c5d5b 414 u8 clear)
4fd22c13
MR
415{
416 u8 status;
417
418 if (!ctrl_info->soft_reset_status)
419 return;
420
421 status = pqi_read_soft_reset_status(ctrl_info);
422 status &= ~clear;
423 writeb(status, ctrl_info->soft_reset_status);
424}
425
6c223761
KB
426static int pqi_map_single(struct pci_dev *pci_dev,
427 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
6917a9cc 428 size_t buffer_length, enum dma_data_direction data_direction)
6c223761
KB
429{
430 dma_addr_t bus_address;
431
6917a9cc 432 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
6c223761
KB
433 return 0;
434
6917a9cc 435 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
6c223761 436 data_direction);
6917a9cc 437 if (dma_mapping_error(&pci_dev->dev, bus_address))
6c223761
KB
438 return -ENOMEM;
439
440 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
441 put_unaligned_le32(buffer_length, &sg_descriptor->length);
442 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
443
444 return 0;
445}
446
447static void pqi_pci_unmap(struct pci_dev *pci_dev,
448 struct pqi_sg_descriptor *descriptors, int num_descriptors,
6917a9cc 449 enum dma_data_direction data_direction)
6c223761
KB
450{
451 int i;
452
6917a9cc 453 if (data_direction == DMA_NONE)
6c223761
KB
454 return;
455
456 for (i = 0; i < num_descriptors; i++)
6917a9cc 457 dma_unmap_single(&pci_dev->dev,
6c223761
KB
458 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
459 get_unaligned_le32(&descriptors[i].length),
460 data_direction);
461}
462
463static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
464 struct pqi_raid_path_request *request, u8 cmd,
465 u8 *scsi3addr, void *buffer, size_t buffer_length,
6917a9cc 466 u16 vpd_page, enum dma_data_direction *dir)
6c223761
KB
467{
468 u8 *cdb;
171c2865 469 size_t cdb_length = buffer_length;
6c223761
KB
470
471 memset(request, 0, sizeof(*request));
472
473 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
474 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
475 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
476 &request->header.iu_length);
477 put_unaligned_le32(buffer_length, &request->buffer_length);
478 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
479 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
480 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
481
482 cdb = request->cdb;
483
484 switch (cmd) {
485 case INQUIRY:
486 request->data_direction = SOP_READ_FLAG;
487 cdb[0] = INQUIRY;
488 if (vpd_page & VPD_PAGE) {
489 cdb[1] = 0x1;
490 cdb[2] = (u8)vpd_page;
491 }
171c2865 492 cdb[4] = (u8)cdb_length;
6c223761
KB
493 break;
494 case CISS_REPORT_LOG:
495 case CISS_REPORT_PHYS:
496 request->data_direction = SOP_READ_FLAG;
497 cdb[0] = cmd;
498 if (cmd == CISS_REPORT_PHYS)
694c5d5b 499 cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER;
6c223761 500 else
694c5d5b 501 cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
171c2865 502 put_unaligned_be32(cdb_length, &cdb[6]);
6c223761
KB
503 break;
504 case CISS_GET_RAID_MAP:
505 request->data_direction = SOP_READ_FLAG;
506 cdb[0] = CISS_READ;
507 cdb[1] = CISS_GET_RAID_MAP;
171c2865 508 put_unaligned_be32(cdb_length, &cdb[6]);
6c223761 509 break;
58322fe0 510 case SA_FLUSH_CACHE:
6c223761
KB
511 request->data_direction = SOP_WRITE_FLAG;
512 cdb[0] = BMIC_WRITE;
58322fe0 513 cdb[6] = BMIC_FLUSH_CACHE;
171c2865 514 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 515 break;
171c2865
DC
516 case BMIC_SENSE_DIAG_OPTIONS:
517 cdb_length = 0;
df561f66 518 fallthrough;
6c223761
KB
519 case BMIC_IDENTIFY_CONTROLLER:
520 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6d90615f 521 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
6c223761
KB
522 request->data_direction = SOP_READ_FLAG;
523 cdb[0] = BMIC_READ;
524 cdb[6] = cmd;
171c2865 525 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 526 break;
171c2865
DC
527 case BMIC_SET_DIAG_OPTIONS:
528 cdb_length = 0;
df561f66 529 fallthrough;
6c223761
KB
530 case BMIC_WRITE_HOST_WELLNESS:
531 request->data_direction = SOP_WRITE_FLAG;
532 cdb[0] = BMIC_WRITE;
533 cdb[6] = cmd;
171c2865 534 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 535 break;
3d46a59a
DB
536 case BMIC_CSMI_PASSTHRU:
537 request->data_direction = SOP_BIDIRECTIONAL;
538 cdb[0] = BMIC_WRITE;
539 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
540 cdb[6] = cmd;
541 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761
KB
542 break;
543 default:
9e68cccc 544 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
6c223761
KB
545 break;
546 }
547
548 switch (request->data_direction) {
549 case SOP_READ_FLAG:
6917a9cc 550 *dir = DMA_FROM_DEVICE;
6c223761
KB
551 break;
552 case SOP_WRITE_FLAG:
6917a9cc 553 *dir = DMA_TO_DEVICE;
6c223761
KB
554 break;
555 case SOP_NO_DIRECTION_FLAG:
6917a9cc 556 *dir = DMA_NONE;
6c223761
KB
557 break;
558 default:
6917a9cc 559 *dir = DMA_BIDIRECTIONAL;
6c223761
KB
560 break;
561 }
562
6c223761 563 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
6917a9cc 564 buffer, buffer_length, *dir);
6c223761
KB
565}
566
376fb880
KB
567static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
568{
569 io_request->scmd = NULL;
570 io_request->status = 0;
571 io_request->error_info = NULL;
572 io_request->raid_bypass = false;
573}
574
6c223761
KB
575static struct pqi_io_request *pqi_alloc_io_request(
576 struct pqi_ctrl_info *ctrl_info)
577{
578 struct pqi_io_request *io_request;
579 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
580
581 while (1) {
582 io_request = &ctrl_info->io_request_pool[i];
583 if (atomic_inc_return(&io_request->refcount) == 1)
584 break;
585 atomic_dec(&io_request->refcount);
586 i = (i + 1) % ctrl_info->max_io_slots;
587 }
588
589 /* benignly racy */
590 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
591
376fb880 592 pqi_reinit_io_request(io_request);
6c223761
KB
593
594 return io_request;
595}
596
597static void pqi_free_io_request(struct pqi_io_request *io_request)
598{
599 atomic_dec(&io_request->refcount);
600}
601
02133b68 602static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
694c5d5b
KB
603 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
604 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
6c223761
KB
605{
606 int rc;
6c223761 607 struct pqi_raid_path_request request;
694c5d5b 608 enum dma_data_direction dir;
6c223761
KB
609
610 rc = pqi_build_raid_path_request(ctrl_info, &request,
02133b68
DC
611 cmd, scsi3addr, buffer,
612 buffer_length, vpd_page, &dir);
6c223761
KB
613 if (rc)
614 return rc;
615
694c5d5b
KB
616 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
617 error_info, timeout_msecs);
6c223761 618
6917a9cc 619 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 620
6c223761
KB
621 return rc;
622}
623
694c5d5b 624/* helper functions for pqi_send_scsi_raid_request */
02133b68
DC
625
626static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
694c5d5b 627 u8 cmd, void *buffer, size_t buffer_length)
6c223761 628{
02133b68 629 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
694c5d5b 630 buffer, buffer_length, 0, NULL, NO_TIMEOUT);
02133b68 631}
6c223761 632
02133b68 633static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
694c5d5b
KB
634 u8 cmd, void *buffer, size_t buffer_length,
635 struct pqi_raid_error_info *error_info)
02133b68
DC
636{
637 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
694c5d5b 638 buffer, buffer_length, 0, error_info, NO_TIMEOUT);
02133b68 639}
6c223761 640
02133b68 641static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
694c5d5b 642 struct bmic_identify_controller *buffer)
02133b68
DC
643{
644 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
694c5d5b 645 buffer, sizeof(*buffer));
02133b68
DC
646}
647
6d90615f 648static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
694c5d5b 649 struct bmic_sense_subsystem_info *sense_info)
6d90615f
MB
650{
651 return pqi_send_ctrl_raid_request(ctrl_info,
694c5d5b
KB
652 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
653 sizeof(*sense_info));
6d90615f
MB
654}
655
02133b68 656static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
6c223761 657 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
02133b68
DC
658{
659 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
660 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
6c223761
KB
661}
662
663static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
664 struct pqi_scsi_dev *device,
694c5d5b 665 struct bmic_identify_physical_device *buffer, size_t buffer_length)
6c223761
KB
666{
667 int rc;
6917a9cc 668 enum dma_data_direction dir;
6c223761
KB
669 u16 bmic_device_index;
670 struct pqi_raid_path_request request;
671
672 rc = pqi_build_raid_path_request(ctrl_info, &request,
673 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
6917a9cc 674 buffer_length, 0, &dir);
6c223761
KB
675 if (rc)
676 return rc;
677
678 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
679 request.cdb[2] = (u8)bmic_device_index;
680 request.cdb[9] = (u8)(bmic_device_index >> 8);
681
682 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
683 0, NULL, NO_TIMEOUT);
684
6917a9cc 685 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 686
6c223761
KB
687 return rc;
688}
689
58322fe0
KB
690static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
691 enum bmic_flush_cache_shutdown_event shutdown_event)
6c223761
KB
692{
693 int rc;
58322fe0 694 struct bmic_flush_cache *flush_cache;
6c223761
KB
695
696 /*
697 * Don't bother trying to flush the cache if the controller is
698 * locked up.
699 */
700 if (pqi_ctrl_offline(ctrl_info))
701 return -ENXIO;
702
58322fe0
KB
703 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
704 if (!flush_cache)
6c223761
KB
705 return -ENOMEM;
706
58322fe0
KB
707 flush_cache->shutdown_event = shutdown_event;
708
02133b68
DC
709 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
710 sizeof(*flush_cache));
6c223761 711
58322fe0 712 kfree(flush_cache);
6c223761
KB
713
714 return rc;
715}
716
3d46a59a
DB
717int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
718 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
719 struct pqi_raid_error_info *error_info)
720{
721 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
722 buffer, buffer_length, error_info);
723}
171c2865 724
694c5d5b 725#define PQI_FETCH_PTRAID_DATA (1 << 31)
171c2865
DC
726
727static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
6c223761
KB
728{
729 int rc;
171c2865 730 struct bmic_diag_options *diag;
6c223761 731
171c2865
DC
732 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
733 if (!diag)
734 return -ENOMEM;
735
02133b68 736 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
694c5d5b 737 diag, sizeof(*diag));
6c223761 738 if (rc)
171c2865 739 goto out;
6c223761 740
171c2865
DC
741 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
742
694c5d5b
KB
743 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
744 sizeof(*diag));
745
171c2865
DC
746out:
747 kfree(diag);
6c223761 748
6c223761
KB
749 return rc;
750}
751
02133b68 752static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
753 void *buffer, size_t buffer_length)
754{
02133b68 755 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
694c5d5b 756 buffer, buffer_length);
6c223761
KB
757}
758
759#pragma pack(1)
760
761struct bmic_host_wellness_driver_version {
762 u8 start_tag[4];
763 u8 driver_version_tag[2];
764 __le16 driver_version_length;
765 char driver_version[32];
b2346b50 766 u8 dont_write_tag[2];
6c223761
KB
767 u8 end_tag[2];
768};
769
770#pragma pack()
771
772static int pqi_write_driver_version_to_host_wellness(
773 struct pqi_ctrl_info *ctrl_info)
774{
775 int rc;
776 struct bmic_host_wellness_driver_version *buffer;
777 size_t buffer_length;
778
779 buffer_length = sizeof(*buffer);
780
781 buffer = kmalloc(buffer_length, GFP_KERNEL);
782 if (!buffer)
783 return -ENOMEM;
784
785 buffer->start_tag[0] = '<';
786 buffer->start_tag[1] = 'H';
787 buffer->start_tag[2] = 'W';
788 buffer->start_tag[3] = '>';
789 buffer->driver_version_tag[0] = 'D';
790 buffer->driver_version_tag[1] = 'V';
791 put_unaligned_le16(sizeof(buffer->driver_version),
792 &buffer->driver_version_length);
061ef06a 793 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
6c223761
KB
794 sizeof(buffer->driver_version) - 1);
795 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
b2346b50
MR
796 buffer->dont_write_tag[0] = 'D';
797 buffer->dont_write_tag[1] = 'W';
6c223761
KB
798 buffer->end_tag[0] = 'Z';
799 buffer->end_tag[1] = 'Z';
800
801 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
802
803 kfree(buffer);
804
805 return rc;
806}
807
808#pragma pack(1)
809
810struct bmic_host_wellness_time {
811 u8 start_tag[4];
812 u8 time_tag[2];
813 __le16 time_length;
814 u8 time[8];
815 u8 dont_write_tag[2];
816 u8 end_tag[2];
817};
818
819#pragma pack()
820
821static int pqi_write_current_time_to_host_wellness(
822 struct pqi_ctrl_info *ctrl_info)
823{
824 int rc;
825 struct bmic_host_wellness_time *buffer;
826 size_t buffer_length;
827 time64_t local_time;
828 unsigned int year;
ed10858e 829 struct tm tm;
6c223761
KB
830
831 buffer_length = sizeof(*buffer);
832
833 buffer = kmalloc(buffer_length, GFP_KERNEL);
834 if (!buffer)
835 return -ENOMEM;
836
837 buffer->start_tag[0] = '<';
838 buffer->start_tag[1] = 'H';
839 buffer->start_tag[2] = 'W';
840 buffer->start_tag[3] = '>';
841 buffer->time_tag[0] = 'T';
842 buffer->time_tag[1] = 'D';
843 put_unaligned_le16(sizeof(buffer->time),
844 &buffer->time_length);
845
ed10858e
AB
846 local_time = ktime_get_real_seconds();
847 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
6c223761
KB
848 year = tm.tm_year + 1900;
849
850 buffer->time[0] = bin2bcd(tm.tm_hour);
851 buffer->time[1] = bin2bcd(tm.tm_min);
852 buffer->time[2] = bin2bcd(tm.tm_sec);
853 buffer->time[3] = 0;
854 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
855 buffer->time[5] = bin2bcd(tm.tm_mday);
856 buffer->time[6] = bin2bcd(year / 100);
857 buffer->time[7] = bin2bcd(year % 100);
858
859 buffer->dont_write_tag[0] = 'D';
860 buffer->dont_write_tag[1] = 'W';
861 buffer->end_tag[0] = 'Z';
862 buffer->end_tag[1] = 'Z';
863
864 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
865
866 kfree(buffer);
867
868 return rc;
869}
870
4fd22c13 871#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
6c223761
KB
872
873static void pqi_update_time_worker(struct work_struct *work)
874{
875 int rc;
876 struct pqi_ctrl_info *ctrl_info;
877
878 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
879 update_time_work);
880
5f310425
KB
881 if (pqi_ctrl_offline(ctrl_info))
882 return;
883
6c223761
KB
884 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
885 if (rc)
886 dev_warn(&ctrl_info->pci_dev->dev,
887 "error updating time on controller\n");
888
889 schedule_delayed_work(&ctrl_info->update_time_work,
890 PQI_UPDATE_TIME_WORK_INTERVAL);
891}
892
893static inline void pqi_schedule_update_time_worker(
4fbebf1a 894 struct pqi_ctrl_info *ctrl_info)
6c223761 895{
4fbebf1a 896 schedule_delayed_work(&ctrl_info->update_time_work, 0);
061ef06a
KB
897}
898
899static inline void pqi_cancel_update_time_worker(
900 struct pqi_ctrl_info *ctrl_info)
901{
061ef06a 902 cancel_delayed_work_sync(&ctrl_info->update_time_work);
6c223761
KB
903}
904
02133b68 905static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
6c223761
KB
906 void *buffer, size_t buffer_length)
907{
02133b68 908 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer,
694c5d5b 909 buffer_length);
6c223761
KB
910}
911
912static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
913 void **buffer)
914{
915 int rc;
916 size_t lun_list_length;
917 size_t lun_data_length;
918 size_t new_lun_list_length;
919 void *lun_data = NULL;
920 struct report_lun_header *report_lun_header;
921
922 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
923 if (!report_lun_header) {
924 rc = -ENOMEM;
925 goto out;
926 }
927
928 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
929 sizeof(*report_lun_header));
930 if (rc)
931 goto out;
932
933 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
934
935again:
936 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
937
938 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
939 if (!lun_data) {
940 rc = -ENOMEM;
941 goto out;
942 }
943
944 if (lun_list_length == 0) {
945 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
946 goto out;
947 }
948
949 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
950 if (rc)
951 goto out;
952
953 new_lun_list_length = get_unaligned_be32(
954 &((struct report_lun_header *)lun_data)->list_length);
955
956 if (new_lun_list_length > lun_list_length) {
957 lun_list_length = new_lun_list_length;
958 kfree(lun_data);
959 goto again;
960 }
961
962out:
963 kfree(report_lun_header);
964
965 if (rc) {
966 kfree(lun_data);
967 lun_data = NULL;
968 }
969
970 *buffer = lun_data;
971
972 return rc;
973}
974
975static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
976 void **buffer)
977{
978 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
979 buffer);
980}
981
982static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
983 void **buffer)
984{
985 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
986}
987
988static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
989 struct report_phys_lun_extended **physdev_list,
990 struct report_log_lun_extended **logdev_list)
991{
992 int rc;
993 size_t logdev_list_length;
994 size_t logdev_data_length;
995 struct report_log_lun_extended *internal_logdev_list;
996 struct report_log_lun_extended *logdev_data;
997 struct report_lun_header report_lun_header;
998
999 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1000 if (rc)
1001 dev_err(&ctrl_info->pci_dev->dev,
1002 "report physical LUNs failed\n");
1003
1004 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1005 if (rc)
1006 dev_err(&ctrl_info->pci_dev->dev,
1007 "report logical LUNs failed\n");
1008
1009 /*
1010 * Tack the controller itself onto the end of the logical device list.
1011 */
1012
1013 logdev_data = *logdev_list;
1014
1015 if (logdev_data) {
1016 logdev_list_length =
1017 get_unaligned_be32(&logdev_data->header.list_length);
1018 } else {
1019 memset(&report_lun_header, 0, sizeof(report_lun_header));
1020 logdev_data =
1021 (struct report_log_lun_extended *)&report_lun_header;
1022 logdev_list_length = 0;
1023 }
1024
1025 logdev_data_length = sizeof(struct report_lun_header) +
1026 logdev_list_length;
1027
1028 internal_logdev_list = kmalloc(logdev_data_length +
1029 sizeof(struct report_log_lun_extended), GFP_KERNEL);
1030 if (!internal_logdev_list) {
1031 kfree(*logdev_list);
1032 *logdev_list = NULL;
1033 return -ENOMEM;
1034 }
1035
1036 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1037 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1038 sizeof(struct report_log_lun_extended_entry));
1039 put_unaligned_be32(logdev_list_length +
1040 sizeof(struct report_log_lun_extended_entry),
1041 &internal_logdev_list->header.list_length);
1042
1043 kfree(*logdev_list);
1044 *logdev_list = internal_logdev_list;
1045
1046 return 0;
1047}
1048
1049static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1050 int bus, int target, int lun)
1051{
1052 device->bus = bus;
1053 device->target = target;
1054 device->lun = lun;
1055}
1056
1057static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1058{
1059 u8 *scsi3addr;
1060 u32 lunid;
bd10cf0b
KB
1061 int bus;
1062 int target;
1063 int lun;
6c223761
KB
1064
1065 scsi3addr = device->scsi3addr;
1066 lunid = get_unaligned_le32(scsi3addr);
1067
1068 if (pqi_is_hba_lunid(scsi3addr)) {
1069 /* The specified device is the controller. */
1070 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1071 device->target_lun_valid = true;
1072 return;
1073 }
1074
1075 if (pqi_is_logical_device(device)) {
bd10cf0b
KB
1076 if (device->is_external_raid_device) {
1077 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1078 target = (lunid >> 16) & 0x3fff;
1079 lun = lunid & 0xff;
1080 } else {
1081 bus = PQI_RAID_VOLUME_BUS;
1082 target = 0;
1083 lun = lunid & 0x3fff;
1084 }
1085 pqi_set_bus_target_lun(device, bus, target, lun);
6c223761
KB
1086 device->target_lun_valid = true;
1087 return;
1088 }
1089
1090 /*
1091 * Defer target and LUN assignment for non-controller physical devices
1092 * because the SAS transport layer will make these assignments later.
1093 */
1094 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1095}
1096
1097static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1098 struct pqi_scsi_dev *device)
1099{
1100 int rc;
1101 u8 raid_level;
1102 u8 *buffer;
1103
1104 raid_level = SA_RAID_UNKNOWN;
1105
1106 buffer = kmalloc(64, GFP_KERNEL);
1107 if (buffer) {
1108 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1109 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1110 if (rc == 0) {
1111 raid_level = buffer[8];
1112 if (raid_level > SA_RAID_MAX)
1113 raid_level = SA_RAID_UNKNOWN;
1114 }
1115 kfree(buffer);
1116 }
1117
1118 device->raid_level = raid_level;
1119}
1120
1121static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1122 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1123{
1124 char *err_msg;
1125 u32 raid_map_size;
1126 u32 r5or6_blocks_per_row;
6c223761
KB
1127
1128 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1129
1130 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1131 err_msg = "RAID map too small";
1132 goto bad_raid_map;
1133 }
1134
6c223761
KB
1135 if (device->raid_level == SA_RAID_1) {
1136 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1137 err_msg = "invalid RAID-1 map";
1138 goto bad_raid_map;
1139 }
1140 } else if (device->raid_level == SA_RAID_ADM) {
1141 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1142 err_msg = "invalid RAID-1(ADM) map";
1143 goto bad_raid_map;
1144 }
1145 } else if ((device->raid_level == SA_RAID_5 ||
1146 device->raid_level == SA_RAID_6) &&
1147 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1148 /* RAID 50/60 */
1149 r5or6_blocks_per_row =
1150 get_unaligned_le16(&raid_map->strip_size) *
1151 get_unaligned_le16(&raid_map->data_disks_per_row);
1152 if (r5or6_blocks_per_row == 0) {
1153 err_msg = "invalid RAID-5 or RAID-6 map";
1154 goto bad_raid_map;
1155 }
1156 }
1157
1158 return 0;
1159
1160bad_raid_map:
d87d5474 1161 dev_warn(&ctrl_info->pci_dev->dev,
38a7338a
KB
1162 "logical device %08x%08x %s\n",
1163 *((u32 *)&device->scsi3addr),
1164 *((u32 *)&device->scsi3addr[4]), err_msg);
6c223761
KB
1165
1166 return -EINVAL;
1167}
1168
1169static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1170 struct pqi_scsi_dev *device)
1171{
1172 int rc;
a91aaae0 1173 u32 raid_map_size;
6c223761
KB
1174 struct raid_map *raid_map;
1175
1176 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1177 if (!raid_map)
1178 return -ENOMEM;
1179
a91aaae0
AK
1180 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1181 device->scsi3addr, raid_map, sizeof(*raid_map),
1182 0, NULL, NO_TIMEOUT);
1183
6c223761
KB
1184 if (rc)
1185 goto error;
1186
a91aaae0 1187 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
6c223761 1188
a91aaae0 1189 if (raid_map_size > sizeof(*raid_map)) {
6c223761 1190
a91aaae0
AK
1191 kfree(raid_map);
1192
1193 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1194 if (!raid_map)
1195 return -ENOMEM;
1196
1197 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1198 device->scsi3addr, raid_map, raid_map_size,
1199 0, NULL, NO_TIMEOUT);
1200 if (rc)
1201 goto error;
1202
1203 if (get_unaligned_le32(&raid_map->structure_size)
1204 != raid_map_size) {
1205 dev_warn(&ctrl_info->pci_dev->dev,
1206 "Requested %d bytes, received %d bytes",
1207 raid_map_size,
1208 get_unaligned_le32(&raid_map->structure_size));
1209 goto error;
1210 }
1211 }
6c223761
KB
1212
1213 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1214 if (rc)
1215 goto error;
1216
1217 device->raid_map = raid_map;
1218
1219 return 0;
1220
1221error:
1222 kfree(raid_map);
1223
1224 return rc;
1225}
1226
588a63fe 1227static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1228 struct pqi_scsi_dev *device)
1229{
1230 int rc;
1231 u8 *buffer;
588a63fe 1232 u8 bypass_status;
6c223761
KB
1233
1234 buffer = kmalloc(64, GFP_KERNEL);
1235 if (!buffer)
1236 return;
1237
1238 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
588a63fe 1239 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
6c223761
KB
1240 if (rc)
1241 goto out;
1242
694c5d5b
KB
1243#define RAID_BYPASS_STATUS 4
1244#define RAID_BYPASS_CONFIGURED 0x1
1245#define RAID_BYPASS_ENABLED 0x2
6c223761 1246
588a63fe
KB
1247 bypass_status = buffer[RAID_BYPASS_STATUS];
1248 device->raid_bypass_configured =
1249 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1250 if (device->raid_bypass_configured &&
1251 (bypass_status & RAID_BYPASS_ENABLED) &&
1252 pqi_get_raid_map(ctrl_info, device) == 0)
1253 device->raid_bypass_enabled = true;
6c223761
KB
1254
1255out:
1256 kfree(buffer);
1257}
1258
1259/*
1260 * Use vendor-specific VPD to determine online/offline status of a volume.
1261 */
1262
1263static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1264 struct pqi_scsi_dev *device)
1265{
1266 int rc;
1267 size_t page_length;
1268 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1269 bool volume_offline = true;
1270 u32 volume_flags;
1271 struct ciss_vpd_logical_volume_status *vpd;
1272
1273 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1274 if (!vpd)
1275 goto no_buffer;
1276
1277 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1278 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1279 if (rc)
1280 goto out;
1281
7ff44499
DC
1282 if (vpd->page_code != CISS_VPD_LV_STATUS)
1283 goto out;
1284
6c223761
KB
1285 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1286 volume_status) + vpd->page_length;
1287 if (page_length < sizeof(*vpd))
1288 goto out;
1289
1290 volume_status = vpd->volume_status;
1291 volume_flags = get_unaligned_be32(&vpd->flags);
1292 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1293
1294out:
1295 kfree(vpd);
1296no_buffer:
1297 device->volume_status = volume_status;
1298 device->volume_offline = volume_offline;
1299}
1300
ce143793
KB
1301static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1302 struct pqi_scsi_dev *device,
1303 struct bmic_identify_physical_device *id_phys)
1304{
1305 int rc;
26b390ab 1306
ce143793
KB
1307 memset(id_phys, 0, sizeof(*id_phys));
1308
1309 rc = pqi_identify_physical_device(ctrl_info, device,
1310 id_phys, sizeof(*id_phys));
1311 if (rc) {
1312 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1313 return rc;
1314 }
1315
1316 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1317 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1318
1319 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1320 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1321
1322 device->box_index = id_phys->box_index;
1323 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1324 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1325 device->queue_depth =
1326 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1327 device->active_path_index = id_phys->active_path_number;
1328 device->path_map = id_phys->redundant_path_present_map;
1329 memcpy(&device->box,
1330 &id_phys->alternate_paths_phys_box_on_port,
1331 sizeof(device->box));
1332 memcpy(&device->phys_connector,
1333 &id_phys->alternate_paths_phys_connector,
1334 sizeof(device->phys_connector));
1335 device->bay = id_phys->phys_bay_in_box;
1336
1337 return 0;
1338}
1339
1340static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1341 struct pqi_scsi_dev *device)
1342{
1343 int rc;
1344 u8 *buffer;
3d46a59a 1345
6c223761
KB
1346 buffer = kmalloc(64, GFP_KERNEL);
1347 if (!buffer)
1348 return -ENOMEM;
1349
1350 /* Send an inquiry to the device to see what it is. */
ce143793
KB
1351 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1352 if (rc)
1353 goto out;
6c223761
KB
1354
1355 scsi_sanitize_inquiry_string(&buffer[8], 8);
1356 scsi_sanitize_inquiry_string(&buffer[16], 16);
1357
1358 device->devtype = buffer[0] & 0x1f;
cbe0c7b1
KB
1359 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1360 memcpy(device->model, &buffer[16], sizeof(device->model));
6c223761 1361
ce143793 1362 if (device->devtype == TYPE_DISK) {
bd10cf0b
KB
1363 if (device->is_external_raid_device) {
1364 device->raid_level = SA_RAID_UNKNOWN;
1365 device->volume_status = CISS_LV_OK;
1366 device->volume_offline = false;
1367 } else {
1368 pqi_get_raid_level(ctrl_info, device);
588a63fe 1369 pqi_get_raid_bypass_status(ctrl_info, device);
bd10cf0b
KB
1370 pqi_get_volume_status(ctrl_info, device);
1371 }
6c223761
KB
1372 }
1373
1374out:
1375 kfree(buffer);
1376
1377 return rc;
1378}
1379
ce143793 1380static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1381 struct pqi_scsi_dev *device,
1382 struct bmic_identify_physical_device *id_phys)
1383{
1384 int rc;
1385
ce143793
KB
1386 if (device->is_expander_smp_device)
1387 return 0;
6c223761 1388
ce143793
KB
1389 if (pqi_is_logical_device(device))
1390 rc = pqi_get_logical_device_info(ctrl_info, device);
1391 else
1392 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
694c5d5b 1393
ce143793 1394 return rc;
6c223761
KB
1395}
1396
1397static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1398 struct pqi_scsi_dev *device)
1399{
1400 char *status;
1401 static const char unknown_state_str[] =
1402 "Volume is in an unknown state (%u)";
1403 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1404
1405 switch (device->volume_status) {
1406 case CISS_LV_OK:
1407 status = "Volume online";
1408 break;
1409 case CISS_LV_FAILED:
1410 status = "Volume failed";
1411 break;
1412 case CISS_LV_NOT_CONFIGURED:
1413 status = "Volume not configured";
1414 break;
1415 case CISS_LV_DEGRADED:
1416 status = "Volume degraded";
1417 break;
1418 case CISS_LV_READY_FOR_RECOVERY:
1419 status = "Volume ready for recovery operation";
1420 break;
1421 case CISS_LV_UNDERGOING_RECOVERY:
1422 status = "Volume undergoing recovery";
1423 break;
1424 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1425 status = "Wrong physical drive was replaced";
1426 break;
1427 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1428 status = "A physical drive not properly connected";
1429 break;
1430 case CISS_LV_HARDWARE_OVERHEATING:
1431 status = "Hardware is overheating";
1432 break;
1433 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1434 status = "Hardware has overheated";
1435 break;
1436 case CISS_LV_UNDERGOING_EXPANSION:
1437 status = "Volume undergoing expansion";
1438 break;
1439 case CISS_LV_NOT_AVAILABLE:
1440 status = "Volume waiting for transforming volume";
1441 break;
1442 case CISS_LV_QUEUED_FOR_EXPANSION:
1443 status = "Volume queued for expansion";
1444 break;
1445 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1446 status = "Volume disabled due to SCSI ID conflict";
1447 break;
1448 case CISS_LV_EJECTED:
1449 status = "Volume has been ejected";
1450 break;
1451 case CISS_LV_UNDERGOING_ERASE:
1452 status = "Volume undergoing background erase";
1453 break;
1454 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1455 status = "Volume ready for predictive spare rebuild";
1456 break;
1457 case CISS_LV_UNDERGOING_RPI:
1458 status = "Volume undergoing rapid parity initialization";
1459 break;
1460 case CISS_LV_PENDING_RPI:
1461 status = "Volume queued for rapid parity initialization";
1462 break;
1463 case CISS_LV_ENCRYPTED_NO_KEY:
1464 status = "Encrypted volume inaccessible - key not present";
1465 break;
1466 case CISS_LV_UNDERGOING_ENCRYPTION:
1467 status = "Volume undergoing encryption process";
1468 break;
1469 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1470 status = "Volume undergoing encryption re-keying process";
1471 break;
1472 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
d87d5474 1473 status = "Volume encrypted but encryption is disabled";
6c223761
KB
1474 break;
1475 case CISS_LV_PENDING_ENCRYPTION:
1476 status = "Volume pending migration to encrypted state";
1477 break;
1478 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1479 status = "Volume pending encryption rekeying";
1480 break;
1481 case CISS_LV_NOT_SUPPORTED:
1482 status = "Volume not supported on this controller";
1483 break;
1484 case CISS_LV_STATUS_UNAVAILABLE:
1485 status = "Volume status not available";
1486 break;
1487 default:
1488 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1489 unknown_state_str, device->volume_status);
1490 status = unknown_state_buffer;
1491 break;
1492 }
1493
1494 dev_info(&ctrl_info->pci_dev->dev,
1495 "scsi %d:%d:%d:%d %s\n",
1496 ctrl_info->scsi_host->host_no,
1497 device->bus, device->target, device->lun, status);
1498}
1499
6c223761
KB
1500static void pqi_rescan_worker(struct work_struct *work)
1501{
1502 struct pqi_ctrl_info *ctrl_info;
1503
1504 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1505 rescan_work);
1506
1507 pqi_scan_scsi_devices(ctrl_info);
1508}
1509
1510static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1511 struct pqi_scsi_dev *device)
1512{
1513 int rc;
1514
1515 if (pqi_is_logical_device(device))
1516 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1517 device->target, device->lun);
1518 else
1519 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1520
1521 return rc;
1522}
1523
1e46731e
MR
1524#define PQI_PENDING_IO_TIMEOUT_SECS 20
1525
6c223761
KB
1526static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1527 struct pqi_scsi_dev *device)
1528{
1e46731e
MR
1529 int rc;
1530
1531 pqi_device_remove_start(device);
1532
4d15ad38 1533 rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
1e46731e
MR
1534 if (rc)
1535 dev_err(&ctrl_info->pci_dev->dev,
4d15ad38 1536 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1e46731e
MR
1537 ctrl_info->scsi_host->host_no, device->bus,
1538 device->target, device->lun,
1539 atomic_read(&device->scsi_cmds_outstanding));
1540
6c223761
KB
1541 if (pqi_is_logical_device(device))
1542 scsi_remove_device(device->sdev);
1543 else
1544 pqi_remove_sas_device(device);
1545}
1546
1547/* Assumes the SCSI device list lock is held. */
1548
1549static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1550 int bus, int target, int lun)
1551{
1552 struct pqi_scsi_dev *device;
1553
4d15ad38
KB
1554 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1555 if (device->bus == bus && device->target == target && device->lun == lun)
6c223761
KB
1556 return device;
1557
1558 return NULL;
1559}
1560
1561static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1562 struct pqi_scsi_dev *dev2)
1563{
1564 if (dev1->is_physical_device != dev2->is_physical_device)
1565 return false;
1566
1567 if (dev1->is_physical_device)
1568 return dev1->wwid == dev2->wwid;
1569
1570 return memcmp(dev1->volume_id, dev2->volume_id,
1571 sizeof(dev1->volume_id)) == 0;
1572}
1573
1574enum pqi_find_result {
1575 DEVICE_NOT_FOUND,
1576 DEVICE_CHANGED,
1577 DEVICE_SAME,
1578};
1579
1580static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
4d15ad38 1581 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
6c223761
KB
1582{
1583 struct pqi_scsi_dev *device;
1584
4d15ad38
KB
1585 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1586 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
6c223761
KB
1587 *matching_device = device;
1588 if (pqi_device_equal(device_to_find, device)) {
1589 if (device_to_find->volume_offline)
1590 return DEVICE_CHANGED;
1591 return DEVICE_SAME;
1592 }
1593 return DEVICE_CHANGED;
1594 }
1595 }
1596
1597 return DEVICE_NOT_FOUND;
1598}
1599
3d46a59a
DB
1600static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1601{
1602 if (device->is_expander_smp_device)
1603 return "Enclosure SMP ";
1604
1605 return scsi_device_type(device->devtype);
1606}
1607
6de783f6
KB
1608#define PQI_DEV_INFO_BUFFER_LENGTH 128
1609
6c223761
KB
1610static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1611 char *action, struct pqi_scsi_dev *device)
1612{
6de783f6
KB
1613 ssize_t count;
1614 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1615
1616 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1617 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1618
1619 if (device->target_lun_valid)
181aea89 1620 count += scnprintf(buffer + count,
6de783f6
KB
1621 PQI_DEV_INFO_BUFFER_LENGTH - count,
1622 "%d:%d",
1623 device->target,
1624 device->lun);
1625 else
181aea89 1626 count += scnprintf(buffer + count,
6de783f6
KB
1627 PQI_DEV_INFO_BUFFER_LENGTH - count,
1628 "-:-");
1629
1630 if (pqi_is_logical_device(device))
181aea89 1631 count += scnprintf(buffer + count,
6de783f6
KB
1632 PQI_DEV_INFO_BUFFER_LENGTH - count,
1633 " %08x%08x",
1634 *((u32 *)&device->scsi3addr),
1635 *((u32 *)&device->scsi3addr[4]));
1636 else
181aea89 1637 count += scnprintf(buffer + count,
6de783f6
KB
1638 PQI_DEV_INFO_BUFFER_LENGTH - count,
1639 " %016llx", device->sas_address);
1640
181aea89 1641 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
6de783f6 1642 " %s %.8s %.16s ",
3d46a59a 1643 pqi_device_type(device),
6c223761 1644 device->vendor,
6de783f6
KB
1645 device->model);
1646
1647 if (pqi_is_logical_device(device)) {
1648 if (device->devtype == TYPE_DISK)
181aea89 1649 count += scnprintf(buffer + count,
6de783f6
KB
1650 PQI_DEV_INFO_BUFFER_LENGTH - count,
1651 "SSDSmartPathCap%c En%c %-12s",
588a63fe
KB
1652 device->raid_bypass_configured ? '+' : '-',
1653 device->raid_bypass_enabled ? '+' : '-',
6de783f6
KB
1654 pqi_raid_level_to_string(device->raid_level));
1655 } else {
181aea89 1656 count += scnprintf(buffer + count,
6de783f6
KB
1657 PQI_DEV_INFO_BUFFER_LENGTH - count,
1658 "AIO%c", device->aio_enabled ? '+' : '-');
1659 if (device->devtype == TYPE_DISK ||
1660 device->devtype == TYPE_ZBC)
181aea89 1661 count += scnprintf(buffer + count,
6de783f6
KB
1662 PQI_DEV_INFO_BUFFER_LENGTH - count,
1663 " qd=%-6d", device->queue_depth);
1664 }
1665
1666 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
6c223761
KB
1667}
1668
1669/* Assumes the SCSI device list lock is held. */
1670
1671static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1672 struct pqi_scsi_dev *new_device)
1673{
1674 existing_device->devtype = new_device->devtype;
1675 existing_device->device_type = new_device->device_type;
1676 existing_device->bus = new_device->bus;
1677 if (new_device->target_lun_valid) {
1678 existing_device->target = new_device->target;
1679 existing_device->lun = new_device->lun;
1680 existing_device->target_lun_valid = true;
1681 }
1682
244ca45e
MR
1683 if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
1684 existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
1685 new_device->volume_status == CISS_LV_OK)
1686 existing_device->rescan = true;
1687
6c223761
KB
1688 /* By definition, the scsi3addr and wwid fields are already the same. */
1689
1690 existing_device->is_physical_device = new_device->is_physical_device;
bd10cf0b
KB
1691 existing_device->is_external_raid_device =
1692 new_device->is_external_raid_device;
3d46a59a
DB
1693 existing_device->is_expander_smp_device =
1694 new_device->is_expander_smp_device;
6c223761
KB
1695 existing_device->aio_enabled = new_device->aio_enabled;
1696 memcpy(existing_device->vendor, new_device->vendor,
1697 sizeof(existing_device->vendor));
1698 memcpy(existing_device->model, new_device->model,
1699 sizeof(existing_device->model));
1700 existing_device->sas_address = new_device->sas_address;
1701 existing_device->raid_level = new_device->raid_level;
1702 existing_device->queue_depth = new_device->queue_depth;
1703 existing_device->aio_handle = new_device->aio_handle;
1704 existing_device->volume_status = new_device->volume_status;
1705 existing_device->active_path_index = new_device->active_path_index;
1706 existing_device->path_map = new_device->path_map;
1707 existing_device->bay = new_device->bay;
2d2ad4bc
GW
1708 existing_device->box_index = new_device->box_index;
1709 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
1710 existing_device->phy_connected_dev_type =
1711 new_device->phy_connected_dev_type;
6c223761
KB
1712 memcpy(existing_device->box, new_device->box,
1713 sizeof(existing_device->box));
1714 memcpy(existing_device->phys_connector, new_device->phys_connector,
1715 sizeof(existing_device->phys_connector));
6c223761
KB
1716 existing_device->offload_to_mirror = 0;
1717 kfree(existing_device->raid_map);
1718 existing_device->raid_map = new_device->raid_map;
588a63fe
KB
1719 existing_device->raid_bypass_configured =
1720 new_device->raid_bypass_configured;
1721 existing_device->raid_bypass_enabled =
1722 new_device->raid_bypass_enabled;
a9a68101 1723 existing_device->device_offline = false;
6c223761
KB
1724
1725 /* To prevent this from being freed later. */
1726 new_device->raid_map = NULL;
1727}
1728
1729static inline void pqi_free_device(struct pqi_scsi_dev *device)
1730{
1731 if (device) {
1732 kfree(device->raid_map);
1733 kfree(device);
1734 }
1735}
1736
1737/*
1738 * Called when exposing a new device to the OS fails in order to re-adjust
1739 * our internal SCSI device list to match the SCSI ML's view.
1740 */
1741
1742static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1743 struct pqi_scsi_dev *device)
1744{
1745 unsigned long flags;
1746
1747 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1748 list_del(&device->scsi_device_list_entry);
1749 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1750
1751 /* Allow the device structure to be freed later. */
1752 device->keep_device = false;
1753}
1754
3d46a59a
DB
1755static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
1756{
1757 if (device->is_expander_smp_device)
1758 return device->sas_port != NULL;
1759
1760 return device->sdev != NULL;
1761}
1762
6c223761
KB
1763static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1764 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1765{
1766 int rc;
1767 unsigned int i;
1768 unsigned long flags;
1769 enum pqi_find_result find_result;
1770 struct pqi_scsi_dev *device;
1771 struct pqi_scsi_dev *next;
1772 struct pqi_scsi_dev *matching_device;
8a994a04
KB
1773 LIST_HEAD(add_list);
1774 LIST_HEAD(delete_list);
6c223761
KB
1775
1776 /*
1777 * The idea here is to do as little work as possible while holding the
1778 * spinlock. That's why we go to great pains to defer anything other
1779 * than updating the internal device list until after we release the
1780 * spinlock.
1781 */
1782
1783 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1784
1785 /* Assume that all devices in the existing list have gone away. */
4d15ad38 1786 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
6c223761
KB
1787 device->device_gone = true;
1788
1789 for (i = 0; i < num_new_devices; i++) {
1790 device = new_device_list[i];
1791
1792 find_result = pqi_scsi_find_entry(ctrl_info, device,
694c5d5b 1793 &matching_device);
6c223761
KB
1794
1795 switch (find_result) {
1796 case DEVICE_SAME:
1797 /*
1798 * The newly found device is already in the existing
1799 * device list.
1800 */
1801 device->new_device = false;
1802 matching_device->device_gone = false;
1803 pqi_scsi_update_device(matching_device, device);
1804 break;
1805 case DEVICE_NOT_FOUND:
1806 /*
1807 * The newly found device is NOT in the existing device
1808 * list.
1809 */
1810 device->new_device = true;
1811 break;
1812 case DEVICE_CHANGED:
1813 /*
1814 * The original device has gone away and we need to add
1815 * the new device.
1816 */
1817 device->new_device = true;
1818 break;
6c223761
KB
1819 }
1820 }
1821
1822 /* Process all devices that have gone away. */
1823 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1824 scsi_device_list_entry) {
1825 if (device->device_gone) {
4d15ad38 1826 list_del_init(&device->scsi_device_list_entry);
6c223761
KB
1827 list_add_tail(&device->delete_list_entry, &delete_list);
1828 }
1829 }
1830
1831 /* Process all new devices. */
1832 for (i = 0; i < num_new_devices; i++) {
1833 device = new_device_list[i];
1834 if (!device->new_device)
1835 continue;
1836 if (device->volume_offline)
1837 continue;
1838 list_add_tail(&device->scsi_device_list_entry,
1839 &ctrl_info->scsi_device_list);
1840 list_add_tail(&device->add_list_entry, &add_list);
1841 /* To prevent this device structure from being freed later. */
1842 device->keep_device = true;
1843 }
1844
6c223761
KB
1845 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1846
4fd22c13
MR
1847 if (pqi_ctrl_in_ofa(ctrl_info))
1848 pqi_ctrl_ofa_done(ctrl_info);
1849
6c223761 1850 /* Remove all devices that have gone away. */
4d15ad38 1851 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
6c223761
KB
1852 if (device->volume_offline) {
1853 pqi_dev_info(ctrl_info, "offline", device);
1854 pqi_show_volume_status(ctrl_info, device);
6c223761
KB
1855 }
1856 list_del(&device->delete_list_entry);
4d15ad38
KB
1857 if (pqi_is_device_added(device)) {
1858 pqi_remove_device(ctrl_info, device);
1859 } else {
1860 if (!device->volume_offline)
1861 pqi_dev_info(ctrl_info, "removed", device);
1862 pqi_free_device(device);
1863 }
6c223761
KB
1864 }
1865
1866 /*
1867 * Notify the SCSI ML if the queue depth of any existing device has
1868 * changed.
1869 */
1870 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1871 scsi_device_list_entry) {
244ca45e
MR
1872 if (device->sdev) {
1873 if (device->queue_depth !=
1874 device->advertised_queue_depth) {
1875 device->advertised_queue_depth = device->queue_depth;
1876 scsi_change_queue_depth(device->sdev,
1877 device->advertised_queue_depth);
1878 }
1879 if (device->rescan) {
1880 scsi_rescan_device(&device->sdev->sdev_gendev);
1881 device->rescan = false;
1882 }
6c223761
KB
1883 }
1884 }
1885
1886 /* Expose any new devices. */
1887 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
3d46a59a 1888 if (!pqi_is_device_added(device)) {
6c223761 1889 rc = pqi_add_device(ctrl_info, device);
ce143793
KB
1890 if (rc == 0) {
1891 pqi_dev_info(ctrl_info, "added", device);
1892 } else {
6c223761
KB
1893 dev_warn(&ctrl_info->pci_dev->dev,
1894 "scsi %d:%d:%d:%d addition failed, device not added\n",
1895 ctrl_info->scsi_host->host_no,
1896 device->bus, device->target,
1897 device->lun);
1898 pqi_fixup_botched_add(ctrl_info, device);
6c223761
KB
1899 }
1900 }
6c223761
KB
1901 }
1902}
1903
ce143793 1904static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
6c223761 1905{
ce143793
KB
1906 /*
1907 * Only support the HBA controller itself as a RAID
1908 * controller. If it's a RAID controller other than
1909 * the HBA itself (an external RAID controller, for
1910 * example), we don't support it.
1911 */
1912 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
1913 !pqi_is_hba_lunid(device->scsi3addr))
1914 return false;
6c223761 1915
ce143793 1916 return true;
6c223761
KB
1917}
1918
94086f5b 1919static inline bool pqi_skip_device(u8 *scsi3addr)
6c223761 1920{
94086f5b
KB
1921 /* Ignore all masked devices. */
1922 if (MASKED_DEVICE(scsi3addr))
6c223761 1923 return true;
6c223761
KB
1924
1925 return false;
1926}
1927
522bc026
DC
1928static inline void pqi_mask_device(u8 *scsi3addr)
1929{
1930 scsi3addr[3] |= 0xc0;
1931}
1932
3d46a59a
DB
1933static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
1934{
ce143793
KB
1935 switch (device->device_type) {
1936 case SA_DEVICE_TYPE_SAS:
1937 case SA_DEVICE_TYPE_EXPANDER_SMP:
1938 case SA_DEVICE_TYPE_SES:
3d46a59a
DB
1939 return true;
1940 }
1941
1942 return false;
1943}
1944
cd128244
DC
1945static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1946{
1947 return !device->is_physical_device ||
1948 !pqi_skip_device(device->scsi3addr);
1949}
1950
6c223761
KB
1951static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1952{
1953 int i;
1954 int rc;
8a994a04 1955 LIST_HEAD(new_device_list_head);
6c223761
KB
1956 struct report_phys_lun_extended *physdev_list = NULL;
1957 struct report_log_lun_extended *logdev_list = NULL;
1958 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1959 struct report_log_lun_extended_entry *log_lun_ext_entry;
1960 struct bmic_identify_physical_device *id_phys = NULL;
1961 u32 num_physicals;
1962 u32 num_logicals;
1963 struct pqi_scsi_dev **new_device_list = NULL;
1964 struct pqi_scsi_dev *device;
1965 struct pqi_scsi_dev *next;
1966 unsigned int num_new_devices;
1967 unsigned int num_valid_devices;
1968 bool is_physical_device;
1969 u8 *scsi3addr;
5e6a9760
GW
1970 unsigned int physical_index;
1971 unsigned int logical_index;
6c223761 1972 static char *out_of_memory_msg =
6de783f6 1973 "failed to allocate memory, device discovery stopped";
6c223761 1974
6c223761
KB
1975 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1976 if (rc)
1977 goto out;
1978
1979 if (physdev_list)
1980 num_physicals =
1981 get_unaligned_be32(&physdev_list->header.list_length)
1982 / sizeof(physdev_list->lun_entries[0]);
1983 else
1984 num_physicals = 0;
1985
1986 if (logdev_list)
1987 num_logicals =
1988 get_unaligned_be32(&logdev_list->header.list_length)
1989 / sizeof(logdev_list->lun_entries[0]);
1990 else
1991 num_logicals = 0;
1992
1993 if (num_physicals) {
1994 /*
1995 * We need this buffer for calls to pqi_get_physical_disk_info()
1996 * below. We allocate it here instead of inside
1997 * pqi_get_physical_disk_info() because it's a fairly large
1998 * buffer.
1999 */
2000 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2001 if (!id_phys) {
2002 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2003 out_of_memory_msg);
2004 rc = -ENOMEM;
2005 goto out;
2006 }
522bc026 2007
694c5d5b 2008 if (pqi_hide_vsep) {
522bc026
DC
2009 for (i = num_physicals - 1; i >= 0; i--) {
2010 phys_lun_ext_entry =
2011 &physdev_list->lun_entries[i];
2012 if (CISS_GET_DRIVE_NUMBER(
2013 phys_lun_ext_entry->lunid) ==
2014 PQI_VSEP_CISS_BTL) {
2015 pqi_mask_device(
2016 phys_lun_ext_entry->lunid);
2017 break;
2018 }
2019 }
2020 }
6c223761
KB
2021 }
2022
2023 num_new_devices = num_physicals + num_logicals;
2024
6da2ec56
KC
2025 new_device_list = kmalloc_array(num_new_devices,
2026 sizeof(*new_device_list),
2027 GFP_KERNEL);
6c223761
KB
2028 if (!new_device_list) {
2029 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2030 rc = -ENOMEM;
2031 goto out;
2032 }
2033
2034 for (i = 0; i < num_new_devices; i++) {
2035 device = kzalloc(sizeof(*device), GFP_KERNEL);
2036 if (!device) {
2037 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2038 out_of_memory_msg);
2039 rc = -ENOMEM;
2040 goto out;
2041 }
2042 list_add_tail(&device->new_device_list_entry,
2043 &new_device_list_head);
2044 }
2045
2046 device = NULL;
2047 num_valid_devices = 0;
5e6a9760
GW
2048 physical_index = 0;
2049 logical_index = 0;
6c223761
KB
2050
2051 for (i = 0; i < num_new_devices; i++) {
2052
5e6a9760
GW
2053 if ((!pqi_expose_ld_first && i < num_physicals) ||
2054 (pqi_expose_ld_first && i >= num_logicals)) {
6c223761 2055 is_physical_device = true;
5e6a9760
GW
2056 phys_lun_ext_entry =
2057 &physdev_list->lun_entries[physical_index++];
6c223761
KB
2058 log_lun_ext_entry = NULL;
2059 scsi3addr = phys_lun_ext_entry->lunid;
2060 } else {
2061 is_physical_device = false;
2062 phys_lun_ext_entry = NULL;
2063 log_lun_ext_entry =
5e6a9760 2064 &logdev_list->lun_entries[logical_index++];
6c223761
KB
2065 scsi3addr = log_lun_ext_entry->lunid;
2066 }
2067
94086f5b 2068 if (is_physical_device && pqi_skip_device(scsi3addr))
6c223761
KB
2069 continue;
2070
2071 if (device)
2072 device = list_next_entry(device, new_device_list_entry);
2073 else
2074 device = list_first_entry(&new_device_list_head,
2075 struct pqi_scsi_dev, new_device_list_entry);
2076
2077 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2078 device->is_physical_device = is_physical_device;
3d46a59a 2079 if (is_physical_device) {
ce143793
KB
2080 device->device_type = phys_lun_ext_entry->device_type;
2081 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
3d46a59a
DB
2082 device->is_expander_smp_device = true;
2083 } else {
bd10cf0b
KB
2084 device->is_external_raid_device =
2085 pqi_is_external_raid_addr(scsi3addr);
3d46a59a 2086 }
6c223761 2087
ce143793
KB
2088 if (!pqi_is_supported_device(device))
2089 continue;
2090
6c223761 2091 /* Gather information about the device. */
ce143793 2092 rc = pqi_get_device_info(ctrl_info, device, id_phys);
6c223761
KB
2093 if (rc == -ENOMEM) {
2094 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2095 out_of_memory_msg);
2096 goto out;
2097 }
2098 if (rc) {
6de783f6
KB
2099 if (device->is_physical_device)
2100 dev_warn(&ctrl_info->pci_dev->dev,
2101 "obtaining device info failed, skipping physical device %016llx\n",
2102 get_unaligned_be64(
2103 &phys_lun_ext_entry->wwid));
2104 else
2105 dev_warn(&ctrl_info->pci_dev->dev,
2106 "obtaining device info failed, skipping logical device %08x%08x\n",
2107 *((u32 *)&device->scsi3addr),
2108 *((u32 *)&device->scsi3addr[4]));
6c223761
KB
2109 rc = 0;
2110 continue;
2111 }
2112
6c223761
KB
2113 pqi_assign_bus_target_lun(device);
2114
6c223761
KB
2115 if (device->is_physical_device) {
2116 device->wwid = phys_lun_ext_entry->wwid;
2117 if ((phys_lun_ext_entry->device_flags &
694c5d5b 2118 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
3d46a59a 2119 phys_lun_ext_entry->aio_handle) {
6c223761 2120 device->aio_enabled = true;
9e322310
CIK
2121 device->aio_handle =
2122 phys_lun_ext_entry->aio_handle;
3d46a59a 2123 }
6c223761
KB
2124 } else {
2125 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
2126 sizeof(device->volume_id));
2127 }
2128
3d46a59a
DB
2129 if (pqi_is_device_with_sas_address(device))
2130 device->sas_address = get_unaligned_be64(&device->wwid);
6c223761
KB
2131
2132 new_device_list[num_valid_devices++] = device;
2133 }
2134
2135 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2136
2137out:
2138 list_for_each_entry_safe(device, next, &new_device_list_head,
2139 new_device_list_entry) {
2140 if (device->keep_device)
2141 continue;
2142 list_del(&device->new_device_list_entry);
2143 pqi_free_device(device);
2144 }
2145
2146 kfree(new_device_list);
2147 kfree(physdev_list);
2148 kfree(logdev_list);
2149 kfree(id_phys);
2150
2151 return rc;
2152}
2153
6c223761
KB
2154static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2155{
530dd8a7 2156 int rc = 0;
6c223761
KB
2157
2158 if (pqi_ctrl_offline(ctrl_info))
2159 return -ENXIO;
2160
530dd8a7 2161 if (!mutex_trylock(&ctrl_info->scan_mutex)) {
5f310425 2162 pqi_schedule_rescan_worker_delayed(ctrl_info);
9946a398 2163 rc = -EINPROGRESS;
530dd8a7
MR
2164 } else {
2165 rc = pqi_update_scsi_devices(ctrl_info);
2166 if (rc)
2167 pqi_schedule_rescan_worker_delayed(ctrl_info);
2168 mutex_unlock(&ctrl_info->scan_mutex);
2169 }
6c223761
KB
2170
2171 return rc;
2172}
2173
2174static void pqi_scan_start(struct Scsi_Host *shost)
2175{
4fd22c13
MR
2176 struct pqi_ctrl_info *ctrl_info;
2177
2178 ctrl_info = shost_to_hba(shost);
2179 if (pqi_ctrl_in_ofa(ctrl_info))
2180 return;
2181
2182 pqi_scan_scsi_devices(ctrl_info);
6c223761
KB
2183}
2184
2185/* Returns TRUE if scan is finished. */
2186
2187static int pqi_scan_finished(struct Scsi_Host *shost,
2188 unsigned long elapsed_time)
2189{
2190 struct pqi_ctrl_info *ctrl_info;
2191
2192 ctrl_info = shost_priv(shost);
2193
2194 return !mutex_is_locked(&ctrl_info->scan_mutex);
2195}
2196
061ef06a
KB
2197static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2198{
2199 mutex_lock(&ctrl_info->scan_mutex);
2200 mutex_unlock(&ctrl_info->scan_mutex);
2201}
2202
2203static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2204{
2205 mutex_lock(&ctrl_info->lun_reset_mutex);
2206 mutex_unlock(&ctrl_info->lun_reset_mutex);
2207}
2208
4fd22c13
MR
2209static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
2210{
2211 mutex_lock(&ctrl_info->ofa_mutex);
2212 mutex_unlock(&ctrl_info->ofa_mutex);
2213}
2214
6c223761
KB
2215static inline void pqi_set_encryption_info(
2216 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2217 u64 first_block)
2218{
2219 u32 volume_blk_size;
2220
2221 /*
2222 * Set the encryption tweak values based on logical block address.
2223 * If the block size is 512, the tweak value is equal to the LBA.
2224 * For other block sizes, tweak value is (LBA * block size) / 512.
2225 */
2226 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2227 if (volume_blk_size != 512)
2228 first_block = (first_block * volume_blk_size) / 512;
2229
2230 encryption_info->data_encryption_key_index =
2231 get_unaligned_le16(&raid_map->data_encryption_key_index);
2232 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2233 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2234}
2235
2236/*
588a63fe 2237 * Attempt to perform RAID bypass mapping for a logical volume I/O.
6c223761
KB
2238 */
2239
2240#define PQI_RAID_BYPASS_INELIGIBLE 1
2241
2242static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2243 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2244 struct pqi_queue_group *queue_group)
2245{
2246 struct raid_map *raid_map;
2247 bool is_write = false;
2248 u32 map_index;
2249 u64 first_block;
2250 u64 last_block;
2251 u32 block_cnt;
2252 u32 blocks_per_row;
2253 u64 first_row;
2254 u64 last_row;
2255 u32 first_row_offset;
2256 u32 last_row_offset;
2257 u32 first_column;
2258 u32 last_column;
2259 u64 r0_first_row;
2260 u64 r0_last_row;
2261 u32 r5or6_blocks_per_row;
2262 u64 r5or6_first_row;
2263 u64 r5or6_last_row;
2264 u32 r5or6_first_row_offset;
2265 u32 r5or6_last_row_offset;
2266 u32 r5or6_first_column;
2267 u32 r5or6_last_column;
2268 u16 data_disks_per_row;
2269 u32 total_disks_per_row;
2270 u16 layout_map_count;
2271 u32 stripesize;
2272 u16 strip_size;
2273 u32 first_group;
2274 u32 last_group;
2275 u32 current_group;
2276 u32 map_row;
2277 u32 aio_handle;
2278 u64 disk_block;
2279 u32 disk_block_cnt;
2280 u8 cdb[16];
2281 u8 cdb_length;
2282 int offload_to_mirror;
2283 struct pqi_encryption_info *encryption_info_ptr;
2284 struct pqi_encryption_info encryption_info;
2285#if BITS_PER_LONG == 32
2286 u64 tmpdiv;
2287#endif
2288
2289 /* Check for valid opcode, get LBA and block count. */
2290 switch (scmd->cmnd[0]) {
2291 case WRITE_6:
2292 is_write = true;
df561f66 2293 fallthrough;
6c223761 2294 case READ_6:
e018ef57
B
2295 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2296 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
6c223761
KB
2297 block_cnt = (u32)scmd->cmnd[4];
2298 if (block_cnt == 0)
2299 block_cnt = 256;
2300 break;
2301 case WRITE_10:
2302 is_write = true;
df561f66 2303 fallthrough;
6c223761
KB
2304 case READ_10:
2305 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2306 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2307 break;
2308 case WRITE_12:
2309 is_write = true;
df561f66 2310 fallthrough;
6c223761
KB
2311 case READ_12:
2312 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2313 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2314 break;
2315 case WRITE_16:
2316 is_write = true;
df561f66 2317 fallthrough;
6c223761
KB
2318 case READ_16:
2319 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2320 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2321 break;
2322 default:
2323 /* Process via normal I/O path. */
2324 return PQI_RAID_BYPASS_INELIGIBLE;
2325 }
2326
2327 /* Check for write to non-RAID-0. */
2328 if (is_write && device->raid_level != SA_RAID_0)
2329 return PQI_RAID_BYPASS_INELIGIBLE;
2330
2331 if (unlikely(block_cnt == 0))
2332 return PQI_RAID_BYPASS_INELIGIBLE;
2333
2334 last_block = first_block + block_cnt - 1;
2335 raid_map = device->raid_map;
2336
2337 /* Check for invalid block or wraparound. */
2338 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2339 last_block < first_block)
2340 return PQI_RAID_BYPASS_INELIGIBLE;
2341
2342 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2343 strip_size = get_unaligned_le16(&raid_map->strip_size);
2344 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2345
2346 /* Calculate stripe information for the request. */
2347 blocks_per_row = data_disks_per_row * strip_size;
2348#if BITS_PER_LONG == 32
2349 tmpdiv = first_block;
2350 do_div(tmpdiv, blocks_per_row);
2351 first_row = tmpdiv;
2352 tmpdiv = last_block;
2353 do_div(tmpdiv, blocks_per_row);
2354 last_row = tmpdiv;
2355 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2356 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2357 tmpdiv = first_row_offset;
2358 do_div(tmpdiv, strip_size);
2359 first_column = tmpdiv;
2360 tmpdiv = last_row_offset;
2361 do_div(tmpdiv, strip_size);
2362 last_column = tmpdiv;
2363#else
2364 first_row = first_block / blocks_per_row;
2365 last_row = last_block / blocks_per_row;
2366 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2367 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2368 first_column = first_row_offset / strip_size;
2369 last_column = last_row_offset / strip_size;
2370#endif
2371
2372 /* If this isn't a single row/column then give to the controller. */
2373 if (first_row != last_row || first_column != last_column)
2374 return PQI_RAID_BYPASS_INELIGIBLE;
2375
2376 /* Proceeding with driver mapping. */
2377 total_disks_per_row = data_disks_per_row +
2378 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2379 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2380 get_unaligned_le16(&raid_map->row_cnt);
2381 map_index = (map_row * total_disks_per_row) + first_column;
2382
2383 /* RAID 1 */
2384 if (device->raid_level == SA_RAID_1) {
2385 if (device->offload_to_mirror)
2386 map_index += data_disks_per_row;
2387 device->offload_to_mirror = !device->offload_to_mirror;
2388 } else if (device->raid_level == SA_RAID_ADM) {
2389 /* RAID ADM */
2390 /*
2391 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2392 * divisible by 3.
2393 */
2394 offload_to_mirror = device->offload_to_mirror;
2395 if (offload_to_mirror == 0) {
2396 /* use physical disk in the first mirrored group. */
2397 map_index %= data_disks_per_row;
2398 } else {
2399 do {
2400 /*
2401 * Determine mirror group that map_index
2402 * indicates.
2403 */
2404 current_group = map_index / data_disks_per_row;
2405
2406 if (offload_to_mirror != current_group) {
2407 if (current_group <
2408 layout_map_count - 1) {
2409 /*
2410 * Select raid index from
2411 * next group.
2412 */
2413 map_index += data_disks_per_row;
2414 current_group++;
2415 } else {
2416 /*
2417 * Select raid index from first
2418 * group.
2419 */
2420 map_index %= data_disks_per_row;
2421 current_group = 0;
2422 }
2423 }
2424 } while (offload_to_mirror != current_group);
2425 }
2426
2427 /* Set mirror group to use next time. */
2428 offload_to_mirror =
2429 (offload_to_mirror >= layout_map_count - 1) ?
2430 0 : offload_to_mirror + 1;
6c223761
KB
2431 device->offload_to_mirror = offload_to_mirror;
2432 /*
2433 * Avoid direct use of device->offload_to_mirror within this
2434 * function since multiple threads might simultaneously
2435 * increment it beyond the range of device->layout_map_count -1.
2436 */
2437 } else if ((device->raid_level == SA_RAID_5 ||
2438 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2439 /* RAID 50/60 */
2440 /* Verify first and last block are in same RAID group */
2441 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2442 stripesize = r5or6_blocks_per_row * layout_map_count;
2443#if BITS_PER_LONG == 32
2444 tmpdiv = first_block;
2445 first_group = do_div(tmpdiv, stripesize);
2446 tmpdiv = first_group;
2447 do_div(tmpdiv, r5or6_blocks_per_row);
2448 first_group = tmpdiv;
2449 tmpdiv = last_block;
2450 last_group = do_div(tmpdiv, stripesize);
2451 tmpdiv = last_group;
2452 do_div(tmpdiv, r5or6_blocks_per_row);
2453 last_group = tmpdiv;
2454#else
2455 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2456 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2457#endif
2458 if (first_group != last_group)
2459 return PQI_RAID_BYPASS_INELIGIBLE;
2460
2461 /* Verify request is in a single row of RAID 5/6 */
2462#if BITS_PER_LONG == 32
2463 tmpdiv = first_block;
2464 do_div(tmpdiv, stripesize);
2465 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2466 tmpdiv = last_block;
2467 do_div(tmpdiv, stripesize);
2468 r5or6_last_row = r0_last_row = tmpdiv;
2469#else
2470 first_row = r5or6_first_row = r0_first_row =
2471 first_block / stripesize;
2472 r5or6_last_row = r0_last_row = last_block / stripesize;
2473#endif
2474 if (r5or6_first_row != r5or6_last_row)
2475 return PQI_RAID_BYPASS_INELIGIBLE;
2476
2477 /* Verify request is in a single column */
2478#if BITS_PER_LONG == 32
2479 tmpdiv = first_block;
2480 first_row_offset = do_div(tmpdiv, stripesize);
2481 tmpdiv = first_row_offset;
2482 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2483 r5or6_first_row_offset = first_row_offset;
2484 tmpdiv = last_block;
2485 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2486 tmpdiv = r5or6_last_row_offset;
2487 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2488 tmpdiv = r5or6_first_row_offset;
2489 do_div(tmpdiv, strip_size);
2490 first_column = r5or6_first_column = tmpdiv;
2491 tmpdiv = r5or6_last_row_offset;
2492 do_div(tmpdiv, strip_size);
2493 r5or6_last_column = tmpdiv;
2494#else
2495 first_row_offset = r5or6_first_row_offset =
2496 (u32)((first_block % stripesize) %
2497 r5or6_blocks_per_row);
2498
2499 r5or6_last_row_offset =
2500 (u32)((last_block % stripesize) %
2501 r5or6_blocks_per_row);
2502
2503 first_column = r5or6_first_row_offset / strip_size;
2504 r5or6_first_column = first_column;
2505 r5or6_last_column = r5or6_last_row_offset / strip_size;
2506#endif
2507 if (r5or6_first_column != r5or6_last_column)
2508 return PQI_RAID_BYPASS_INELIGIBLE;
2509
2510 /* Request is eligible */
2511 map_row =
2512 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2513 get_unaligned_le16(&raid_map->row_cnt);
2514
2515 map_index = (first_group *
2516 (get_unaligned_le16(&raid_map->row_cnt) *
2517 total_disks_per_row)) +
2518 (map_row * total_disks_per_row) + first_column;
2519 }
2520
6c223761
KB
2521 aio_handle = raid_map->disk_data[map_index].aio_handle;
2522 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2523 first_row * strip_size +
2524 (first_row_offset - first_column * strip_size);
2525 disk_block_cnt = block_cnt;
2526
2527 /* Handle differing logical/physical block sizes. */
2528 if (raid_map->phys_blk_shift) {
2529 disk_block <<= raid_map->phys_blk_shift;
2530 disk_block_cnt <<= raid_map->phys_blk_shift;
2531 }
2532
2533 if (unlikely(disk_block_cnt > 0xffff))
2534 return PQI_RAID_BYPASS_INELIGIBLE;
2535
2536 /* Build the new CDB for the physical disk I/O. */
2537 if (disk_block > 0xffffffff) {
2538 cdb[0] = is_write ? WRITE_16 : READ_16;
2539 cdb[1] = 0;
2540 put_unaligned_be64(disk_block, &cdb[2]);
2541 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2542 cdb[14] = 0;
2543 cdb[15] = 0;
2544 cdb_length = 16;
2545 } else {
2546 cdb[0] = is_write ? WRITE_10 : READ_10;
2547 cdb[1] = 0;
2548 put_unaligned_be32((u32)disk_block, &cdb[2]);
2549 cdb[6] = 0;
2550 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2551 cdb[9] = 0;
2552 cdb_length = 10;
2553 }
2554
2555 if (get_unaligned_le16(&raid_map->flags) &
2556 RAID_MAP_ENCRYPTION_ENABLED) {
2557 pqi_set_encryption_info(&encryption_info, raid_map,
2558 first_block);
2559 encryption_info_ptr = &encryption_info;
2560 } else {
2561 encryption_info_ptr = NULL;
2562 }
2563
2564 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
376fb880 2565 cdb, cdb_length, queue_group, encryption_info_ptr, true);
6c223761
KB
2566}
2567
2568#define PQI_STATUS_IDLE 0x0
2569
2570#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2571#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2572
2573#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2574#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2575#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2576#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2577#define PQI_DEVICE_STATE_ERROR 0x4
2578
2579#define PQI_MODE_READY_TIMEOUT_SECS 30
2580#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2581
2582static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2583{
2584 struct pqi_device_registers __iomem *pqi_registers;
2585 unsigned long timeout;
2586 u64 signature;
2587 u8 status;
2588
2589 pqi_registers = ctrl_info->pqi_registers;
4fd22c13 2590 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
6c223761
KB
2591
2592 while (1) {
2593 signature = readq(&pqi_registers->signature);
2594 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2595 sizeof(signature)) == 0)
2596 break;
2597 if (time_after(jiffies, timeout)) {
2598 dev_err(&ctrl_info->pci_dev->dev,
2599 "timed out waiting for PQI signature\n");
2600 return -ETIMEDOUT;
2601 }
2602 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2603 }
2604
2605 while (1) {
2606 status = readb(&pqi_registers->function_and_status_code);
2607 if (status == PQI_STATUS_IDLE)
2608 break;
2609 if (time_after(jiffies, timeout)) {
2610 dev_err(&ctrl_info->pci_dev->dev,
2611 "timed out waiting for PQI IDLE\n");
2612 return -ETIMEDOUT;
2613 }
2614 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2615 }
2616
2617 while (1) {
2618 if (readl(&pqi_registers->device_status) ==
2619 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2620 break;
2621 if (time_after(jiffies, timeout)) {
2622 dev_err(&ctrl_info->pci_dev->dev,
2623 "timed out waiting for PQI all registers ready\n");
2624 return -ETIMEDOUT;
2625 }
2626 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2627 }
2628
2629 return 0;
2630}
2631
2632static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2633{
2634 struct pqi_scsi_dev *device;
2635
2636 device = io_request->scmd->device->hostdata;
588a63fe 2637 device->raid_bypass_enabled = false;
376fb880 2638 device->aio_enabled = false;
6c223761
KB
2639}
2640
d87d5474 2641static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
6c223761
KB
2642{
2643 struct pqi_ctrl_info *ctrl_info;
e58081a7 2644 struct pqi_scsi_dev *device;
6c223761 2645
03b288cf
KB
2646 device = sdev->hostdata;
2647 if (device->device_offline)
2648 return;
2649
2650 device->device_offline = true;
03b288cf
KB
2651 ctrl_info = shost_to_hba(sdev->host);
2652 pqi_schedule_rescan_worker(ctrl_info);
a9a68101 2653 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
03b288cf
KB
2654 path, ctrl_info->scsi_host->host_no, device->bus,
2655 device->target, device->lun);
6c223761
KB
2656}
2657
2658static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2659{
2660 u8 scsi_status;
2661 u8 host_byte;
2662 struct scsi_cmnd *scmd;
2663 struct pqi_raid_error_info *error_info;
2664 size_t sense_data_length;
2665 int residual_count;
2666 int xfer_count;
2667 struct scsi_sense_hdr sshdr;
2668
2669 scmd = io_request->scmd;
2670 if (!scmd)
2671 return;
2672
2673 error_info = io_request->error_info;
2674 scsi_status = error_info->status;
2675 host_byte = DID_OK;
2676
f5b63206
KB
2677 switch (error_info->data_out_result) {
2678 case PQI_DATA_IN_OUT_GOOD:
2679 break;
2680 case PQI_DATA_IN_OUT_UNDERFLOW:
6c223761
KB
2681 xfer_count =
2682 get_unaligned_le32(&error_info->data_out_transferred);
2683 residual_count = scsi_bufflen(scmd) - xfer_count;
2684 scsi_set_resid(scmd, residual_count);
2685 if (xfer_count < scmd->underflow)
2686 host_byte = DID_SOFT_ERROR;
f5b63206
KB
2687 break;
2688 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2689 case PQI_DATA_IN_OUT_ABORTED:
2690 host_byte = DID_ABORT;
2691 break;
2692 case PQI_DATA_IN_OUT_TIMEOUT:
2693 host_byte = DID_TIME_OUT;
2694 break;
2695 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2696 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2697 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2698 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2699 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2700 case PQI_DATA_IN_OUT_ERROR:
2701 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2702 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2703 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2704 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2705 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2706 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2707 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2708 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2709 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2710 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2711 default:
2712 host_byte = DID_ERROR;
2713 break;
6c223761
KB
2714 }
2715
2716 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2717 if (sense_data_length == 0)
2718 sense_data_length =
2719 get_unaligned_le16(&error_info->response_data_length);
2720 if (sense_data_length) {
2721 if (sense_data_length > sizeof(error_info->data))
2722 sense_data_length = sizeof(error_info->data);
2723
2724 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2725 scsi_normalize_sense(error_info->data,
2726 sense_data_length, &sshdr) &&
2727 sshdr.sense_key == HARDWARE_ERROR &&
8ef860ae 2728 sshdr.asc == 0x3e) {
441b7195
EV
2729 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2730 struct pqi_scsi_dev *device = scmd->device->hostdata;
2731
8ef860ae
EV
2732 switch (sshdr.ascq) {
2733 case 0x1: /* LOGICAL UNIT FAILURE */
2734 if (printk_ratelimit())
2735 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2736 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2737 pqi_take_device_offline(scmd->device, "RAID");
2738 host_byte = DID_NO_CONNECT;
2739 break;
2740
2741 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2742 if (printk_ratelimit())
2743 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2744 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2745 break;
2746 }
6c223761
KB
2747 }
2748
2749 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2750 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2751 memcpy(scmd->sense_buffer, error_info->data,
2752 sense_data_length);
2753 }
2754
2755 scmd->result = scsi_status;
2756 set_host_byte(scmd, host_byte);
2757}
2758
2759static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2760{
2761 u8 scsi_status;
2762 u8 host_byte;
2763 struct scsi_cmnd *scmd;
2764 struct pqi_aio_error_info *error_info;
2765 size_t sense_data_length;
2766 int residual_count;
2767 int xfer_count;
2768 bool device_offline;
2769
2770 scmd = io_request->scmd;
2771 error_info = io_request->error_info;
2772 host_byte = DID_OK;
2773 sense_data_length = 0;
2774 device_offline = false;
2775
2776 switch (error_info->service_response) {
2777 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2778 scsi_status = error_info->status;
2779 break;
2780 case PQI_AIO_SERV_RESPONSE_FAILURE:
2781 switch (error_info->status) {
2782 case PQI_AIO_STATUS_IO_ABORTED:
2783 scsi_status = SAM_STAT_TASK_ABORTED;
2784 break;
2785 case PQI_AIO_STATUS_UNDERRUN:
2786 scsi_status = SAM_STAT_GOOD;
2787 residual_count = get_unaligned_le32(
2788 &error_info->residual_count);
2789 scsi_set_resid(scmd, residual_count);
2790 xfer_count = scsi_bufflen(scmd) - residual_count;
2791 if (xfer_count < scmd->underflow)
2792 host_byte = DID_SOFT_ERROR;
2793 break;
2794 case PQI_AIO_STATUS_OVERRUN:
2795 scsi_status = SAM_STAT_GOOD;
2796 break;
2797 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2798 pqi_aio_path_disabled(io_request);
2799 scsi_status = SAM_STAT_GOOD;
2800 io_request->status = -EAGAIN;
2801 break;
2802 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2803 case PQI_AIO_STATUS_INVALID_DEVICE:
376fb880
KB
2804 if (!io_request->raid_bypass) {
2805 device_offline = true;
2806 pqi_take_device_offline(scmd->device, "AIO");
2807 host_byte = DID_NO_CONNECT;
2808 }
6c223761
KB
2809 scsi_status = SAM_STAT_CHECK_CONDITION;
2810 break;
2811 case PQI_AIO_STATUS_IO_ERROR:
2812 default:
2813 scsi_status = SAM_STAT_CHECK_CONDITION;
2814 break;
2815 }
2816 break;
2817 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2818 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2819 scsi_status = SAM_STAT_GOOD;
2820 break;
2821 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2822 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2823 default:
2824 scsi_status = SAM_STAT_CHECK_CONDITION;
2825 break;
2826 }
2827
2828 if (error_info->data_present) {
2829 sense_data_length =
2830 get_unaligned_le16(&error_info->data_length);
2831 if (sense_data_length) {
2832 if (sense_data_length > sizeof(error_info->data))
2833 sense_data_length = sizeof(error_info->data);
2834 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2835 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2836 memcpy(scmd->sense_buffer, error_info->data,
2837 sense_data_length);
2838 }
2839 }
2840
2841 if (device_offline && sense_data_length == 0)
2842 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2843 0x3e, 0x1);
2844
2845 scmd->result = scsi_status;
2846 set_host_byte(scmd, host_byte);
2847}
2848
2849static void pqi_process_io_error(unsigned int iu_type,
2850 struct pqi_io_request *io_request)
2851{
2852 switch (iu_type) {
2853 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2854 pqi_process_raid_io_error(io_request);
2855 break;
2856 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2857 pqi_process_aio_io_error(io_request);
2858 break;
2859 }
2860}
2861
2862static int pqi_interpret_task_management_response(
2863 struct pqi_task_management_response *response)
2864{
2865 int rc;
2866
2867 switch (response->response_code) {
b17f0486
KB
2868 case SOP_TMF_COMPLETE:
2869 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
2870 rc = 0;
2871 break;
3406384b
MR
2872 case SOP_TMF_REJECTED:
2873 rc = -EAGAIN;
2874 break;
6c223761
KB
2875 default:
2876 rc = -EIO;
2877 break;
2878 }
2879
2880 return rc;
2881}
2882
9e68cccc 2883static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
6c223761 2884{
9e68cccc
KB
2885 pqi_take_ctrl_offline(ctrl_info);
2886}
2887
2888static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
6c223761 2889{
9e68cccc 2890 int num_responses;
6c223761
KB
2891 pqi_index_t oq_pi;
2892 pqi_index_t oq_ci;
2893 struct pqi_io_request *io_request;
2894 struct pqi_io_response *response;
2895 u16 request_id;
2896
2897 num_responses = 0;
2898 oq_ci = queue_group->oq_ci_copy;
2899
2900 while (1) {
dac12fbc 2901 oq_pi = readl(queue_group->oq_pi);
9e68cccc
KB
2902 if (oq_pi >= ctrl_info->num_elements_per_oq) {
2903 pqi_invalid_response(ctrl_info);
2904 dev_err(&ctrl_info->pci_dev->dev,
2905 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
2906 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
2907 return -1;
2908 }
6c223761
KB
2909 if (oq_pi == oq_ci)
2910 break;
2911
2912 num_responses++;
2913 response = queue_group->oq_element_array +
2914 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2915
2916 request_id = get_unaligned_le16(&response->request_id);
9e68cccc
KB
2917 if (request_id >= ctrl_info->max_io_slots) {
2918 pqi_invalid_response(ctrl_info);
2919 dev_err(&ctrl_info->pci_dev->dev,
2920 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
2921 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
2922 return -1;
2923 }
6c223761
KB
2924
2925 io_request = &ctrl_info->io_request_pool[request_id];
9e68cccc
KB
2926 if (atomic_read(&io_request->refcount) == 0) {
2927 pqi_invalid_response(ctrl_info);
2928 dev_err(&ctrl_info->pci_dev->dev,
2929 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
2930 request_id, oq_pi, oq_ci);
2931 return -1;
2932 }
6c223761
KB
2933
2934 switch (response->header.iu_type) {
2935 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2936 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2ba55c98
KB
2937 if (io_request->scmd)
2938 io_request->scmd->result = 0;
df561f66 2939 fallthrough;
6c223761
KB
2940 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2941 break;
b212c251
KB
2942 case PQI_RESPONSE_IU_VENDOR_GENERAL:
2943 io_request->status =
2944 get_unaligned_le16(
2945 &((struct pqi_vendor_general_response *)
2946 response)->status);
2947 break;
6c223761
KB
2948 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2949 io_request->status =
2950 pqi_interpret_task_management_response(
2951 (void *)response);
2952 break;
2953 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2954 pqi_aio_path_disabled(io_request);
2955 io_request->status = -EAGAIN;
2956 break;
2957 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2958 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2959 io_request->error_info = ctrl_info->error_buffer +
2960 (get_unaligned_le16(&response->error_index) *
2961 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
9e68cccc 2962 pqi_process_io_error(response->header.iu_type, io_request);
6c223761
KB
2963 break;
2964 default:
9e68cccc 2965 pqi_invalid_response(ctrl_info);
6c223761 2966 dev_err(&ctrl_info->pci_dev->dev,
9e68cccc
KB
2967 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
2968 response->header.iu_type, oq_pi, oq_ci);
2969 return -1;
6c223761
KB
2970 }
2971
9e68cccc 2972 io_request->io_complete_callback(io_request, io_request->context);
6c223761
KB
2973
2974 /*
2975 * Note that the I/O request structure CANNOT BE TOUCHED after
2976 * returning from the I/O completion callback!
2977 */
6c223761
KB
2978 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2979 }
2980
2981 if (num_responses) {
2982 queue_group->oq_ci_copy = oq_ci;
2983 writel(oq_ci, queue_group->oq_ci);
2984 }
2985
2986 return num_responses;
2987}
2988
2989static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 2990 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
2991{
2992 unsigned int num_elements_used;
2993
2994 if (pi >= ci)
2995 num_elements_used = pi - ci;
2996 else
2997 num_elements_used = elements_in_queue - ci + pi;
2998
2999 return elements_in_queue - num_elements_used - 1;
3000}
3001
98f87667 3002static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
3003 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3004{
3005 pqi_index_t iq_pi;
3006 pqi_index_t iq_ci;
3007 unsigned long flags;
3008 void *next_element;
6c223761
KB
3009 struct pqi_queue_group *queue_group;
3010
3011 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3012 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3013
6c223761
KB
3014 while (1) {
3015 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3016
3017 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
dac12fbc 3018 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
6c223761
KB
3019
3020 if (pqi_num_elements_free(iq_pi, iq_ci,
3021 ctrl_info->num_elements_per_iq))
3022 break;
3023
3024 spin_unlock_irqrestore(
3025 &queue_group->submit_lock[RAID_PATH], flags);
3026
98f87667 3027 if (pqi_ctrl_offline(ctrl_info))
6c223761 3028 return;
6c223761
KB
3029 }
3030
3031 next_element = queue_group->iq_element_array[RAID_PATH] +
3032 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3033
3034 memcpy(next_element, iu, iu_length);
3035
3036 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
6c223761
KB
3037 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3038
3039 /*
3040 * This write notifies the controller that an IU is available to be
3041 * processed.
3042 */
3043 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3044
3045 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
3046}
3047
3048static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3049 struct pqi_event *event)
3050{
3051 struct pqi_event_acknowledge_request request;
3052
3053 memset(&request, 0, sizeof(request));
3054
3055 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3056 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3057 &request.header.iu_length);
3058 request.event_type = event->event_type;
3059 request.event_id = event->event_id;
3060 request.additional_event_id = event->additional_event_id;
3061
98f87667 3062 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
6c223761
KB
3063}
3064
4fd22c13
MR
3065#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3066#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3067
3068static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3069 struct pqi_ctrl_info *ctrl_info)
6c223761 3070{
4fd22c13
MR
3071 unsigned long timeout;
3072 u8 status;
6c223761 3073
4fd22c13 3074 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
6c223761 3075
4fd22c13
MR
3076 while (1) {
3077 status = pqi_read_soft_reset_status(ctrl_info);
3078 if (status & PQI_SOFT_RESET_INITIATE)
3079 return RESET_INITIATE_DRIVER;
3080
3081 if (status & PQI_SOFT_RESET_ABORT)
3082 return RESET_ABORT;
3083
3084 if (time_after(jiffies, timeout)) {
3085 dev_err(&ctrl_info->pci_dev->dev,
3086 "timed out waiting for soft reset status\n");
3087 return RESET_TIMEDOUT;
3088 }
3089
3090 if (!sis_is_firmware_running(ctrl_info))
3091 return RESET_NORESPONSE;
3092
3093 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3094 }
3095}
3096
3097static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info,
694c5d5b 3098 enum pqi_soft_reset_status reset_status)
4fd22c13
MR
3099{
3100 int rc;
3101
3102 switch (reset_status) {
3103 case RESET_INITIATE_DRIVER:
4fd22c13
MR
3104 case RESET_TIMEDOUT:
3105 dev_info(&ctrl_info->pci_dev->dev,
3106 "resetting controller %u\n", ctrl_info->ctrl_id);
3107 sis_soft_reset(ctrl_info);
df561f66 3108 fallthrough;
4fd22c13
MR
3109 case RESET_INITIATE_FIRMWARE:
3110 rc = pqi_ofa_ctrl_restart(ctrl_info);
3111 pqi_ofa_free_host_buffer(ctrl_info);
3112 dev_info(&ctrl_info->pci_dev->dev,
3113 "Online Firmware Activation for controller %u: %s\n",
3114 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED");
3115 break;
3116 case RESET_ABORT:
3117 pqi_ofa_ctrl_unquiesce(ctrl_info);
3118 dev_info(&ctrl_info->pci_dev->dev,
3119 "Online Firmware Activation for controller %u: %s\n",
3120 ctrl_info->ctrl_id, "ABORTED");
3121 break;
3122 case RESET_NORESPONSE:
3123 pqi_ofa_free_host_buffer(ctrl_info);
3124 pqi_take_ctrl_offline(ctrl_info);
3125 break;
3126 }
3127}
3128
3129static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3130 struct pqi_event *event)
3131{
3132 u16 event_id;
3133 enum pqi_soft_reset_status status;
3134
3135 event_id = get_unaligned_le16(&event->event_id);
3136
3137 mutex_lock(&ctrl_info->ofa_mutex);
3138
3139 if (event_id == PQI_EVENT_OFA_QUIESCE) {
3140 dev_info(&ctrl_info->pci_dev->dev,
694c5d5b
KB
3141 "Received Online Firmware Activation quiesce event for controller %u\n",
3142 ctrl_info->ctrl_id);
4fd22c13
MR
3143 pqi_ofa_ctrl_quiesce(ctrl_info);
3144 pqi_acknowledge_event(ctrl_info, event);
3145 if (ctrl_info->soft_reset_handshake_supported) {
3146 status = pqi_poll_for_soft_reset_status(ctrl_info);
3147 pqi_process_soft_reset(ctrl_info, status);
3148 } else {
3149 pqi_process_soft_reset(ctrl_info,
3150 RESET_INITIATE_FIRMWARE);
3151 }
3152
3153 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3154 pqi_acknowledge_event(ctrl_info, event);
3155 pqi_ofa_setup_host_buffer(ctrl_info,
3156 le32_to_cpu(event->ofa_bytes_requested));
3157 pqi_ofa_host_memory_update(ctrl_info);
3158 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3159 pqi_ofa_free_host_buffer(ctrl_info);
3160 pqi_acknowledge_event(ctrl_info, event);
3161 dev_info(&ctrl_info->pci_dev->dev,
694c5d5b
KB
3162 "Online Firmware Activation(%u) cancel reason : %u\n",
3163 ctrl_info->ctrl_id, event->ofa_cancel_reason);
4fd22c13
MR
3164 }
3165
3166 mutex_unlock(&ctrl_info->ofa_mutex);
3167}
3168
6c223761
KB
3169static void pqi_event_worker(struct work_struct *work)
3170{
3171 unsigned int i;
3172 struct pqi_ctrl_info *ctrl_info;
6a50d6ad 3173 struct pqi_event *event;
6c223761
KB
3174
3175 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3176
7561a7e4
KB
3177 pqi_ctrl_busy(ctrl_info);
3178 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
5f310425
KB
3179 if (pqi_ctrl_offline(ctrl_info))
3180 goto out;
3181
3182 pqi_schedule_rescan_worker_delayed(ctrl_info);
7561a7e4 3183
6a50d6ad 3184 event = ctrl_info->events;
6c223761 3185 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
6a50d6ad
KB
3186 if (event->pending) {
3187 event->pending = false;
4fd22c13
MR
3188 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3189 pqi_ctrl_unbusy(ctrl_info);
3190 pqi_ofa_process_event(ctrl_info, event);
3191 return;
3192 }
6a50d6ad 3193 pqi_acknowledge_event(ctrl_info, event);
6c223761 3194 }
6a50d6ad 3195 event++;
6c223761
KB
3196 }
3197
5f310425 3198out:
7561a7e4 3199 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
3200}
3201
4fd22c13 3202#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
6c223761 3203
74a0f573 3204static void pqi_heartbeat_timer_handler(struct timer_list *t)
6c223761
KB
3205{
3206 int num_interrupts;
98f87667 3207 u32 heartbeat_count;
74a0f573
KC
3208 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
3209 heartbeat_timer);
6c223761 3210
98f87667
KB
3211 pqi_check_ctrl_health(ctrl_info);
3212 if (pqi_ctrl_offline(ctrl_info))
061ef06a
KB
3213 return;
3214
6c223761 3215 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
98f87667 3216 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
6c223761
KB
3217
3218 if (num_interrupts == ctrl_info->previous_num_interrupts) {
98f87667
KB
3219 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3220 dev_err(&ctrl_info->pci_dev->dev,
3221 "no heartbeat detected - last heartbeat count: %u\n",
3222 heartbeat_count);
6c223761
KB
3223 pqi_take_ctrl_offline(ctrl_info);
3224 return;
3225 }
6c223761 3226 } else {
98f87667 3227 ctrl_info->previous_num_interrupts = num_interrupts;
6c223761
KB
3228 }
3229
98f87667 3230 ctrl_info->previous_heartbeat_count = heartbeat_count;
6c223761
KB
3231 mod_timer(&ctrl_info->heartbeat_timer,
3232 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3233}
3234
3235static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3236{
98f87667
KB
3237 if (!ctrl_info->heartbeat_counter)
3238 return;
3239
6c223761
KB
3240 ctrl_info->previous_num_interrupts =
3241 atomic_read(&ctrl_info->num_interrupts);
98f87667
KB
3242 ctrl_info->previous_heartbeat_count =
3243 pqi_read_heartbeat_counter(ctrl_info);
6c223761 3244
6c223761
KB
3245 ctrl_info->heartbeat_timer.expires =
3246 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
061ef06a 3247 add_timer(&ctrl_info->heartbeat_timer);
6c223761
KB
3248}
3249
3250static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3251{
98f87667 3252 del_timer_sync(&ctrl_info->heartbeat_timer);
6c223761
KB
3253}
3254
6a50d6ad 3255static inline int pqi_event_type_to_event_index(unsigned int event_type)
6c223761
KB
3256{
3257 int index;
3258
6a50d6ad
KB
3259 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
3260 if (event_type == pqi_supported_event_types[index])
3261 return index;
6c223761 3262
6a50d6ad
KB
3263 return -1;
3264}
3265
3266static inline bool pqi_is_supported_event(unsigned int event_type)
3267{
3268 return pqi_event_type_to_event_index(event_type) != -1;
6c223761
KB
3269}
3270
4fd22c13
MR
3271static void pqi_ofa_capture_event_payload(struct pqi_event *event,
3272 struct pqi_event_response *response)
3273{
3274 u16 event_id;
3275
3276 event_id = get_unaligned_le16(&event->event_id);
3277
3278 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3279 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) {
3280 event->ofa_bytes_requested =
3281 response->data.ofa_memory_allocation.bytes_requested;
3282 } else if (event_id == PQI_EVENT_OFA_CANCELLED) {
3283 event->ofa_cancel_reason =
3284 response->data.ofa_cancelled.reason;
3285 }
3286 }
3287}
3288
9e68cccc 3289static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
6c223761 3290{
9e68cccc 3291 int num_events;
6c223761
KB
3292 pqi_index_t oq_pi;
3293 pqi_index_t oq_ci;
3294 struct pqi_event_queue *event_queue;
3295 struct pqi_event_response *response;
6a50d6ad 3296 struct pqi_event *event;
6c223761
KB
3297 int event_index;
3298
3299 event_queue = &ctrl_info->event_queue;
3300 num_events = 0;
6c223761
KB
3301 oq_ci = event_queue->oq_ci_copy;
3302
3303 while (1) {
dac12fbc 3304 oq_pi = readl(event_queue->oq_pi);
9e68cccc
KB
3305 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3306 pqi_invalid_response(ctrl_info);
3307 dev_err(&ctrl_info->pci_dev->dev,
3308 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3309 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3310 return -1;
3311 }
3312
6c223761
KB
3313 if (oq_pi == oq_ci)
3314 break;
3315
3316 num_events++;
9e68cccc 3317 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
6c223761
KB
3318
3319 event_index =
3320 pqi_event_type_to_event_index(response->event_type);
3321
9e68cccc
KB
3322 if (event_index >= 0 && response->request_acknowledge) {
3323 event = &ctrl_info->events[event_index];
3324 event->pending = true;
3325 event->event_type = response->event_type;
3326 event->event_id = response->event_id;
3327 event->additional_event_id = response->additional_event_id;
3328 if (event->event_type == PQI_EVENT_TYPE_OFA)
4fd22c13 3329 pqi_ofa_capture_event_payload(event, response);
6c223761
KB
3330 }
3331
3332 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3333 }
3334
3335 if (num_events) {
3336 event_queue->oq_ci_copy = oq_ci;
3337 writel(oq_ci, event_queue->oq_ci);
98f87667 3338 schedule_work(&ctrl_info->event_work);
6c223761
KB
3339 }
3340
3341 return num_events;
3342}
3343
061ef06a
KB
3344#define PQI_LEGACY_INTX_MASK 0x1
3345
3346static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
694c5d5b 3347 bool enable_intx)
061ef06a
KB
3348{
3349 u32 intx_mask;
3350 struct pqi_device_registers __iomem *pqi_registers;
3351 volatile void __iomem *register_addr;
3352
3353 pqi_registers = ctrl_info->pqi_registers;
3354
3355 if (enable_intx)
3356 register_addr = &pqi_registers->legacy_intx_mask_clear;
3357 else
3358 register_addr = &pqi_registers->legacy_intx_mask_set;
3359
3360 intx_mask = readl(register_addr);
3361 intx_mask |= PQI_LEGACY_INTX_MASK;
3362 writel(intx_mask, register_addr);
3363}
3364
3365static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3366 enum pqi_irq_mode new_mode)
3367{
3368 switch (ctrl_info->irq_mode) {
3369 case IRQ_MODE_MSIX:
3370 switch (new_mode) {
3371 case IRQ_MODE_MSIX:
3372 break;
3373 case IRQ_MODE_INTX:
3374 pqi_configure_legacy_intx(ctrl_info, true);
061ef06a
KB
3375 sis_enable_intx(ctrl_info);
3376 break;
3377 case IRQ_MODE_NONE:
061ef06a
KB
3378 break;
3379 }
3380 break;
3381 case IRQ_MODE_INTX:
3382 switch (new_mode) {
3383 case IRQ_MODE_MSIX:
3384 pqi_configure_legacy_intx(ctrl_info, false);
061ef06a
KB
3385 sis_enable_msix(ctrl_info);
3386 break;
3387 case IRQ_MODE_INTX:
3388 break;
3389 case IRQ_MODE_NONE:
3390 pqi_configure_legacy_intx(ctrl_info, false);
061ef06a
KB
3391 break;
3392 }
3393 break;
3394 case IRQ_MODE_NONE:
3395 switch (new_mode) {
3396 case IRQ_MODE_MSIX:
3397 sis_enable_msix(ctrl_info);
3398 break;
3399 case IRQ_MODE_INTX:
3400 pqi_configure_legacy_intx(ctrl_info, true);
3401 sis_enable_intx(ctrl_info);
3402 break;
3403 case IRQ_MODE_NONE:
3404 break;
3405 }
3406 break;
3407 }
3408
3409 ctrl_info->irq_mode = new_mode;
3410}
3411
3412#define PQI_LEGACY_INTX_PENDING 0x1
3413
3414static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3415{
3416 bool valid_irq;
3417 u32 intx_status;
3418
3419 switch (ctrl_info->irq_mode) {
3420 case IRQ_MODE_MSIX:
3421 valid_irq = true;
3422 break;
3423 case IRQ_MODE_INTX:
3424 intx_status =
3425 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3426 if (intx_status & PQI_LEGACY_INTX_PENDING)
3427 valid_irq = true;
3428 else
3429 valid_irq = false;
3430 break;
3431 case IRQ_MODE_NONE:
3432 default:
3433 valid_irq = false;
3434 break;
3435 }
3436
3437 return valid_irq;
3438}
3439
6c223761
KB
3440static irqreturn_t pqi_irq_handler(int irq, void *data)
3441{
3442 struct pqi_ctrl_info *ctrl_info;
3443 struct pqi_queue_group *queue_group;
9e68cccc
KB
3444 int num_io_responses_handled;
3445 int num_events_handled;
6c223761
KB
3446
3447 queue_group = data;
3448 ctrl_info = queue_group->ctrl_info;
3449
061ef06a 3450 if (!pqi_is_valid_irq(ctrl_info))
6c223761
KB
3451 return IRQ_NONE;
3452
9e68cccc
KB
3453 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3454 if (num_io_responses_handled < 0)
3455 goto out;
6c223761 3456
9e68cccc
KB
3457 if (irq == ctrl_info->event_irq) {
3458 num_events_handled = pqi_process_event_intr(ctrl_info);
3459 if (num_events_handled < 0)
3460 goto out;
3461 } else {
3462 num_events_handled = 0;
3463 }
6c223761 3464
9e68cccc 3465 if (num_io_responses_handled + num_events_handled > 0)
6c223761
KB
3466 atomic_inc(&ctrl_info->num_interrupts);
3467
3468 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3469 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3470
9e68cccc 3471out:
6c223761
KB
3472 return IRQ_HANDLED;
3473}
3474
3475static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3476{
d91d7820 3477 struct pci_dev *pci_dev = ctrl_info->pci_dev;
6c223761
KB
3478 int i;
3479 int rc;
3480
d91d7820 3481 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
6c223761
KB
3482
3483 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
d91d7820 3484 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
52198226 3485 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 3486 if (rc) {
d91d7820 3487 dev_err(&pci_dev->dev,
6c223761 3488 "irq %u init failed with error %d\n",
d91d7820 3489 pci_irq_vector(pci_dev, i), rc);
6c223761
KB
3490 return rc;
3491 }
3492 ctrl_info->num_msix_vectors_initialized++;
3493 }
3494
3495 return 0;
3496}
3497
98bf061b
KB
3498static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3499{
3500 int i;
3501
3502 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3503 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3504 &ctrl_info->queue_groups[i]);
3505
3506 ctrl_info->num_msix_vectors_initialized = 0;
3507}
3508
6c223761
KB
3509static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3510{
98bf061b 3511 int num_vectors_enabled;
6c223761 3512
98bf061b 3513 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
52198226
CH
3514 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3515 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
98bf061b 3516 if (num_vectors_enabled < 0) {
6c223761 3517 dev_err(&ctrl_info->pci_dev->dev,
98bf061b
KB
3518 "MSI-X init failed with error %d\n",
3519 num_vectors_enabled);
3520 return num_vectors_enabled;
6c223761
KB
3521 }
3522
98bf061b 3523 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
061ef06a 3524 ctrl_info->irq_mode = IRQ_MODE_MSIX;
6c223761
KB
3525 return 0;
3526}
3527
98bf061b
KB
3528static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3529{
3530 if (ctrl_info->num_msix_vectors_enabled) {
3531 pci_free_irq_vectors(ctrl_info->pci_dev);
3532 ctrl_info->num_msix_vectors_enabled = 0;
3533 }
3534}
3535
6c223761
KB
3536static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3537{
3538 unsigned int i;
3539 size_t alloc_length;
3540 size_t element_array_length_per_iq;
3541 size_t element_array_length_per_oq;
3542 void *element_array;
dac12fbc 3543 void __iomem *next_queue_index;
6c223761
KB
3544 void *aligned_pointer;
3545 unsigned int num_inbound_queues;
3546 unsigned int num_outbound_queues;
3547 unsigned int num_queue_indexes;
3548 struct pqi_queue_group *queue_group;
3549
3550 element_array_length_per_iq =
3551 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3552 ctrl_info->num_elements_per_iq;
3553 element_array_length_per_oq =
3554 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3555 ctrl_info->num_elements_per_oq;
3556 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3557 num_outbound_queues = ctrl_info->num_queue_groups;
3558 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3559
3560 aligned_pointer = NULL;
3561
3562 for (i = 0; i < num_inbound_queues; i++) {
3563 aligned_pointer = PTR_ALIGN(aligned_pointer,
3564 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3565 aligned_pointer += element_array_length_per_iq;
3566 }
3567
3568 for (i = 0; i < num_outbound_queues; i++) {
3569 aligned_pointer = PTR_ALIGN(aligned_pointer,
3570 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3571 aligned_pointer += element_array_length_per_oq;
3572 }
3573
3574 aligned_pointer = PTR_ALIGN(aligned_pointer,
3575 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3576 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3577 PQI_EVENT_OQ_ELEMENT_LENGTH;
3578
3579 for (i = 0; i < num_queue_indexes; i++) {
3580 aligned_pointer = PTR_ALIGN(aligned_pointer,
3581 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3582 aligned_pointer += sizeof(pqi_index_t);
3583 }
3584
3585 alloc_length = (size_t)aligned_pointer +
3586 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3587
e1d213bd
KB
3588 alloc_length += PQI_EXTRA_SGL_MEMORY;
3589
6c223761 3590 ctrl_info->queue_memory_base =
750afb08
LC
3591 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3592 &ctrl_info->queue_memory_base_dma_handle,
3593 GFP_KERNEL);
6c223761 3594
d87d5474 3595 if (!ctrl_info->queue_memory_base)
6c223761 3596 return -ENOMEM;
6c223761
KB
3597
3598 ctrl_info->queue_memory_length = alloc_length;
3599
3600 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3601 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3602
3603 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3604 queue_group = &ctrl_info->queue_groups[i];
3605 queue_group->iq_element_array[RAID_PATH] = element_array;
3606 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3607 ctrl_info->queue_memory_base_dma_handle +
3608 (element_array - ctrl_info->queue_memory_base);
3609 element_array += element_array_length_per_iq;
3610 element_array = PTR_ALIGN(element_array,
3611 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3612 queue_group->iq_element_array[AIO_PATH] = element_array;
3613 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3614 ctrl_info->queue_memory_base_dma_handle +
3615 (element_array - ctrl_info->queue_memory_base);
3616 element_array += element_array_length_per_iq;
3617 element_array = PTR_ALIGN(element_array,
3618 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3619 }
3620
3621 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3622 queue_group = &ctrl_info->queue_groups[i];
3623 queue_group->oq_element_array = element_array;
3624 queue_group->oq_element_array_bus_addr =
3625 ctrl_info->queue_memory_base_dma_handle +
3626 (element_array - ctrl_info->queue_memory_base);
3627 element_array += element_array_length_per_oq;
3628 element_array = PTR_ALIGN(element_array,
3629 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3630 }
3631
3632 ctrl_info->event_queue.oq_element_array = element_array;
3633 ctrl_info->event_queue.oq_element_array_bus_addr =
3634 ctrl_info->queue_memory_base_dma_handle +
3635 (element_array - ctrl_info->queue_memory_base);
3636 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3637 PQI_EVENT_OQ_ELEMENT_LENGTH;
3638
dac12fbc 3639 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
6c223761
KB
3640 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3641
3642 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3643 queue_group = &ctrl_info->queue_groups[i];
3644 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3645 queue_group->iq_ci_bus_addr[RAID_PATH] =
3646 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
3647 (next_queue_index -
3648 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
3649 next_queue_index += sizeof(pqi_index_t);
3650 next_queue_index = PTR_ALIGN(next_queue_index,
3651 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3652 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3653 queue_group->iq_ci_bus_addr[AIO_PATH] =
3654 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
3655 (next_queue_index -
3656 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
3657 next_queue_index += sizeof(pqi_index_t);
3658 next_queue_index = PTR_ALIGN(next_queue_index,
3659 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3660 queue_group->oq_pi = next_queue_index;
3661 queue_group->oq_pi_bus_addr =
3662 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
3663 (next_queue_index -
3664 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
3665 next_queue_index += sizeof(pqi_index_t);
3666 next_queue_index = PTR_ALIGN(next_queue_index,
3667 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3668 }
3669
3670 ctrl_info->event_queue.oq_pi = next_queue_index;
3671 ctrl_info->event_queue.oq_pi_bus_addr =
3672 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
3673 (next_queue_index -
3674 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
3675
3676 return 0;
3677}
3678
3679static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3680{
3681 unsigned int i;
3682 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3683 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3684
3685 /*
3686 * Initialize the backpointers to the controller structure in
3687 * each operational queue group structure.
3688 */
3689 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3690 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3691
3692 /*
3693 * Assign IDs to all operational queues. Note that the IDs
3694 * assigned to operational IQs are independent of the IDs
3695 * assigned to operational OQs.
3696 */
3697 ctrl_info->event_queue.oq_id = next_oq_id++;
3698 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3699 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3700 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3701 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3702 }
3703
3704 /*
3705 * Assign MSI-X table entry indexes to all queues. Note that the
3706 * interrupt for the event queue is shared with the first queue group.
3707 */
3708 ctrl_info->event_queue.int_msg_num = 0;
3709 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3710 ctrl_info->queue_groups[i].int_msg_num = i;
3711
3712 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3713 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3714 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3715 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3716 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3717 }
3718}
3719
3720static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3721{
3722 size_t alloc_length;
3723 struct pqi_admin_queues_aligned *admin_queues_aligned;
3724 struct pqi_admin_queues *admin_queues;
3725
3726 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3727 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3728
3729 ctrl_info->admin_queue_memory_base =
750afb08
LC
3730 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3731 &ctrl_info->admin_queue_memory_base_dma_handle,
3732 GFP_KERNEL);
6c223761
KB
3733
3734 if (!ctrl_info->admin_queue_memory_base)
3735 return -ENOMEM;
3736
3737 ctrl_info->admin_queue_memory_length = alloc_length;
3738
3739 admin_queues = &ctrl_info->admin_queues;
3740 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3741 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3742 admin_queues->iq_element_array =
3743 &admin_queues_aligned->iq_element_array;
3744 admin_queues->oq_element_array =
3745 &admin_queues_aligned->oq_element_array;
3746 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
dac12fbc
KB
3747 admin_queues->oq_pi =
3748 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
6c223761
KB
3749
3750 admin_queues->iq_element_array_bus_addr =
3751 ctrl_info->admin_queue_memory_base_dma_handle +
3752 (admin_queues->iq_element_array -
3753 ctrl_info->admin_queue_memory_base);
3754 admin_queues->oq_element_array_bus_addr =
3755 ctrl_info->admin_queue_memory_base_dma_handle +
3756 (admin_queues->oq_element_array -
3757 ctrl_info->admin_queue_memory_base);
3758 admin_queues->iq_ci_bus_addr =
3759 ctrl_info->admin_queue_memory_base_dma_handle +
3760 ((void *)admin_queues->iq_ci -
3761 ctrl_info->admin_queue_memory_base);
3762 admin_queues->oq_pi_bus_addr =
3763 ctrl_info->admin_queue_memory_base_dma_handle +
dac12fbc
KB
3764 ((void __iomem *)admin_queues->oq_pi -
3765 (void __iomem *)ctrl_info->admin_queue_memory_base);
6c223761
KB
3766
3767 return 0;
3768}
3769
4fd22c13 3770#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
6c223761
KB
3771#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3772
3773static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3774{
3775 struct pqi_device_registers __iomem *pqi_registers;
3776 struct pqi_admin_queues *admin_queues;
3777 unsigned long timeout;
3778 u8 status;
3779 u32 reg;
3780
3781 pqi_registers = ctrl_info->pqi_registers;
3782 admin_queues = &ctrl_info->admin_queues;
3783
3784 writeq((u64)admin_queues->iq_element_array_bus_addr,
3785 &pqi_registers->admin_iq_element_array_addr);
3786 writeq((u64)admin_queues->oq_element_array_bus_addr,
3787 &pqi_registers->admin_oq_element_array_addr);
3788 writeq((u64)admin_queues->iq_ci_bus_addr,
3789 &pqi_registers->admin_iq_ci_addr);
3790 writeq((u64)admin_queues->oq_pi_bus_addr,
3791 &pqi_registers->admin_oq_pi_addr);
3792
3793 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
e655d469 3794 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
6c223761
KB
3795 (admin_queues->int_msg_num << 16);
3796 writel(reg, &pqi_registers->admin_iq_num_elements);
3797 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3798 &pqi_registers->function_and_status_code);
3799
3800 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3801 while (1) {
3802 status = readb(&pqi_registers->function_and_status_code);
3803 if (status == PQI_STATUS_IDLE)
3804 break;
3805 if (time_after(jiffies, timeout))
3806 return -ETIMEDOUT;
3807 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3808 }
3809
3810 /*
3811 * The offset registers are not initialized to the correct
3812 * offsets until *after* the create admin queue pair command
3813 * completes successfully.
3814 */
3815 admin_queues->iq_pi = ctrl_info->iomem_base +
3816 PQI_DEVICE_REGISTERS_OFFSET +
3817 readq(&pqi_registers->admin_iq_pi_offset);
3818 admin_queues->oq_ci = ctrl_info->iomem_base +
3819 PQI_DEVICE_REGISTERS_OFFSET +
3820 readq(&pqi_registers->admin_oq_ci_offset);
3821
3822 return 0;
3823}
3824
3825static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3826 struct pqi_general_admin_request *request)
3827{
3828 struct pqi_admin_queues *admin_queues;
3829 void *next_element;
3830 pqi_index_t iq_pi;
3831
3832 admin_queues = &ctrl_info->admin_queues;
3833 iq_pi = admin_queues->iq_pi_copy;
3834
3835 next_element = admin_queues->iq_element_array +
3836 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3837
3838 memcpy(next_element, request, sizeof(*request));
3839
3840 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3841 admin_queues->iq_pi_copy = iq_pi;
3842
3843 /*
3844 * This write notifies the controller that an IU is available to be
3845 * processed.
3846 */
3847 writel(iq_pi, admin_queues->iq_pi);
3848}
3849
13bede67
KB
3850#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3851
6c223761
KB
3852static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3853 struct pqi_general_admin_response *response)
3854{
3855 struct pqi_admin_queues *admin_queues;
3856 pqi_index_t oq_pi;
3857 pqi_index_t oq_ci;
3858 unsigned long timeout;
3859
3860 admin_queues = &ctrl_info->admin_queues;
3861 oq_ci = admin_queues->oq_ci_copy;
3862
4fd22c13 3863 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
6c223761
KB
3864
3865 while (1) {
dac12fbc 3866 oq_pi = readl(admin_queues->oq_pi);
6c223761
KB
3867 if (oq_pi != oq_ci)
3868 break;
3869 if (time_after(jiffies, timeout)) {
3870 dev_err(&ctrl_info->pci_dev->dev,
3871 "timed out waiting for admin response\n");
3872 return -ETIMEDOUT;
3873 }
13bede67
KB
3874 if (!sis_is_firmware_running(ctrl_info))
3875 return -ENXIO;
6c223761
KB
3876 usleep_range(1000, 2000);
3877 }
3878
3879 memcpy(response, admin_queues->oq_element_array +
3880 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3881
3882 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3883 admin_queues->oq_ci_copy = oq_ci;
3884 writel(oq_ci, admin_queues->oq_ci);
3885
3886 return 0;
3887}
3888
3889static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3890 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3891 struct pqi_io_request *io_request)
3892{
3893 struct pqi_io_request *next;
3894 void *next_element;
3895 pqi_index_t iq_pi;
3896 pqi_index_t iq_ci;
3897 size_t iu_length;
3898 unsigned long flags;
3899 unsigned int num_elements_needed;
3900 unsigned int num_elements_to_end_of_queue;
3901 size_t copy_count;
3902 struct pqi_iu_header *request;
3903
3904 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3905
376fb880
KB
3906 if (io_request) {
3907 io_request->queue_group = queue_group;
6c223761
KB
3908 list_add_tail(&io_request->request_list_entry,
3909 &queue_group->request_list[path]);
376fb880 3910 }
6c223761
KB
3911
3912 iq_pi = queue_group->iq_pi_copy[path];
3913
3914 list_for_each_entry_safe(io_request, next,
3915 &queue_group->request_list[path], request_list_entry) {
3916
3917 request = io_request->iu;
3918
3919 iu_length = get_unaligned_le16(&request->iu_length) +
3920 PQI_REQUEST_HEADER_LENGTH;
3921 num_elements_needed =
3922 DIV_ROUND_UP(iu_length,
3923 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3924
dac12fbc 3925 iq_ci = readl(queue_group->iq_ci[path]);
6c223761
KB
3926
3927 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3928 ctrl_info->num_elements_per_iq))
3929 break;
3930
3931 put_unaligned_le16(queue_group->oq_id,
3932 &request->response_queue_id);
3933
3934 next_element = queue_group->iq_element_array[path] +
3935 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3936
3937 num_elements_to_end_of_queue =
3938 ctrl_info->num_elements_per_iq - iq_pi;
3939
3940 if (num_elements_needed <= num_elements_to_end_of_queue) {
3941 memcpy(next_element, request, iu_length);
3942 } else {
3943 copy_count = num_elements_to_end_of_queue *
3944 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3945 memcpy(next_element, request, copy_count);
3946 memcpy(queue_group->iq_element_array[path],
3947 (u8 *)request + copy_count,
3948 iu_length - copy_count);
3949 }
3950
3951 iq_pi = (iq_pi + num_elements_needed) %
3952 ctrl_info->num_elements_per_iq;
3953
3954 list_del(&io_request->request_list_entry);
3955 }
3956
3957 if (iq_pi != queue_group->iq_pi_copy[path]) {
3958 queue_group->iq_pi_copy[path] = iq_pi;
3959 /*
3960 * This write notifies the controller that one or more IUs are
3961 * available to be processed.
3962 */
3963 writel(iq_pi, queue_group->iq_pi[path]);
3964 }
3965
3966 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3967}
3968
1f37e992
KB
3969#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3970
3971static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3972 struct completion *wait)
3973{
3974 int rc;
1f37e992
KB
3975
3976 while (1) {
3977 if (wait_for_completion_io_timeout(wait,
4fd22c13 3978 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
1f37e992
KB
3979 rc = 0;
3980 break;
3981 }
3982
3983 pqi_check_ctrl_health(ctrl_info);
3984 if (pqi_ctrl_offline(ctrl_info)) {
3985 rc = -ENXIO;
3986 break;
3987 }
1f37e992
KB
3988 }
3989
3990 return rc;
3991}
3992
6c223761
KB
3993static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3994 void *context)
3995{
3996 struct completion *waiting = context;
3997
3998 complete(waiting);
3999}
4000
694c5d5b
KB
4001static int pqi_process_raid_io_error_synchronous(
4002 struct pqi_raid_error_info *error_info)
26b390ab
KB
4003{
4004 int rc = -EIO;
4005
4006 switch (error_info->data_out_result) {
4007 case PQI_DATA_IN_OUT_GOOD:
4008 if (error_info->status == SAM_STAT_GOOD)
4009 rc = 0;
4010 break;
4011 case PQI_DATA_IN_OUT_UNDERFLOW:
4012 if (error_info->status == SAM_STAT_GOOD ||
4013 error_info->status == SAM_STAT_CHECK_CONDITION)
4014 rc = 0;
4015 break;
4016 case PQI_DATA_IN_OUT_ABORTED:
4017 rc = PQI_CMD_STATUS_ABORTED;
4018 break;
4019 }
4020
4021 return rc;
4022}
4023
6c223761
KB
4024static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4025 struct pqi_iu_header *request, unsigned int flags,
4026 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
4027{
957c5ab1 4028 int rc = 0;
6c223761
KB
4029 struct pqi_io_request *io_request;
4030 unsigned long start_jiffies;
4031 unsigned long msecs_blocked;
4032 size_t iu_length;
957c5ab1 4033 DECLARE_COMPLETION_ONSTACK(wait);
6c223761
KB
4034
4035 /*
4036 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
4037 * are mutually exclusive.
4038 */
4039
4040 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4041 if (down_interruptible(&ctrl_info->sync_request_sem))
4042 return -ERESTARTSYS;
4043 } else {
4044 if (timeout_msecs == NO_TIMEOUT) {
4045 down(&ctrl_info->sync_request_sem);
4046 } else {
4047 start_jiffies = jiffies;
4048 if (down_timeout(&ctrl_info->sync_request_sem,
4049 msecs_to_jiffies(timeout_msecs)))
4050 return -ETIMEDOUT;
4051 msecs_blocked =
4052 jiffies_to_msecs(jiffies - start_jiffies);
cc8f5260
DC
4053 if (msecs_blocked >= timeout_msecs) {
4054 rc = -ETIMEDOUT;
4055 goto out;
4056 }
6c223761
KB
4057 timeout_msecs -= msecs_blocked;
4058 }
4059 }
4060
7561a7e4
KB
4061 pqi_ctrl_busy(ctrl_info);
4062 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
4063 if (timeout_msecs == 0) {
957c5ab1 4064 pqi_ctrl_unbusy(ctrl_info);
7561a7e4
KB
4065 rc = -ETIMEDOUT;
4066 goto out;
4067 }
4068
376fb880 4069 if (pqi_ctrl_offline(ctrl_info)) {
957c5ab1 4070 pqi_ctrl_unbusy(ctrl_info);
376fb880
KB
4071 rc = -ENXIO;
4072 goto out;
4073 }
4074
0530736e
KB
4075 atomic_inc(&ctrl_info->sync_cmds_outstanding);
4076
6c223761
KB
4077 io_request = pqi_alloc_io_request(ctrl_info);
4078
4079 put_unaligned_le16(io_request->index,
4080 &(((struct pqi_raid_path_request *)request)->request_id));
4081
4082 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4083 ((struct pqi_raid_path_request *)request)->error_index =
4084 ((struct pqi_raid_path_request *)request)->request_id;
4085
4086 iu_length = get_unaligned_le16(&request->iu_length) +
4087 PQI_REQUEST_HEADER_LENGTH;
4088 memcpy(io_request->iu, request, iu_length);
4089
957c5ab1
KB
4090 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4091 io_request->context = &wait;
4092
4093 pqi_start_io(ctrl_info,
4094 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4095 io_request);
4096
4097 pqi_ctrl_unbusy(ctrl_info);
4098
4099 if (timeout_msecs == NO_TIMEOUT) {
4100 pqi_wait_for_completion_io(ctrl_info, &wait);
4101 } else {
4102 if (!wait_for_completion_io_timeout(&wait,
4103 msecs_to_jiffies(timeout_msecs))) {
4104 dev_warn(&ctrl_info->pci_dev->dev,
4105 "command timed out\n");
4106 rc = -ETIMEDOUT;
4107 }
4108 }
6c223761
KB
4109
4110 if (error_info) {
4111 if (io_request->error_info)
4112 memcpy(error_info, io_request->error_info,
4113 sizeof(*error_info));
4114 else
4115 memset(error_info, 0, sizeof(*error_info));
4116 } else if (rc == 0 && io_request->error_info) {
26b390ab
KB
4117 rc = pqi_process_raid_io_error_synchronous(
4118 io_request->error_info);
6c223761
KB
4119 }
4120
4121 pqi_free_io_request(io_request);
4122
0530736e 4123 atomic_dec(&ctrl_info->sync_cmds_outstanding);
7561a7e4 4124out:
6c223761
KB
4125 up(&ctrl_info->sync_request_sem);
4126
4127 return rc;
4128}
4129
4130static int pqi_validate_admin_response(
4131 struct pqi_general_admin_response *response, u8 expected_function_code)
4132{
4133 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4134 return -EINVAL;
4135
4136 if (get_unaligned_le16(&response->header.iu_length) !=
4137 PQI_GENERAL_ADMIN_IU_LENGTH)
4138 return -EINVAL;
4139
4140 if (response->function_code != expected_function_code)
4141 return -EINVAL;
4142
4143 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4144 return -EINVAL;
4145
4146 return 0;
4147}
4148
4149static int pqi_submit_admin_request_synchronous(
4150 struct pqi_ctrl_info *ctrl_info,
4151 struct pqi_general_admin_request *request,
4152 struct pqi_general_admin_response *response)
4153{
4154 int rc;
4155
4156 pqi_submit_admin_request(ctrl_info, request);
4157
4158 rc = pqi_poll_for_admin_response(ctrl_info, response);
4159
4160 if (rc == 0)
4161 rc = pqi_validate_admin_response(response,
4162 request->function_code);
4163
4164 return rc;
4165}
4166
4167static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4168{
4169 int rc;
4170 struct pqi_general_admin_request request;
4171 struct pqi_general_admin_response response;
4172 struct pqi_device_capability *capability;
4173 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4174
4175 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4176 if (!capability)
4177 return -ENOMEM;
4178
4179 memset(&request, 0, sizeof(request));
4180
4181 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4182 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4183 &request.header.iu_length);
4184 request.function_code =
4185 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4186 put_unaligned_le32(sizeof(*capability),
4187 &request.data.report_device_capability.buffer_length);
4188
4189 rc = pqi_map_single(ctrl_info->pci_dev,
4190 &request.data.report_device_capability.sg_descriptor,
4191 capability, sizeof(*capability),
6917a9cc 4192 DMA_FROM_DEVICE);
6c223761
KB
4193 if (rc)
4194 goto out;
4195
4196 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4197 &response);
4198
4199 pqi_pci_unmap(ctrl_info->pci_dev,
4200 &request.data.report_device_capability.sg_descriptor, 1,
6917a9cc 4201 DMA_FROM_DEVICE);
6c223761
KB
4202
4203 if (rc)
4204 goto out;
4205
4206 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4207 rc = -EIO;
4208 goto out;
4209 }
4210
4211 ctrl_info->max_inbound_queues =
4212 get_unaligned_le16(&capability->max_inbound_queues);
4213 ctrl_info->max_elements_per_iq =
4214 get_unaligned_le16(&capability->max_elements_per_iq);
4215 ctrl_info->max_iq_element_length =
4216 get_unaligned_le16(&capability->max_iq_element_length)
4217 * 16;
4218 ctrl_info->max_outbound_queues =
4219 get_unaligned_le16(&capability->max_outbound_queues);
4220 ctrl_info->max_elements_per_oq =
4221 get_unaligned_le16(&capability->max_elements_per_oq);
4222 ctrl_info->max_oq_element_length =
4223 get_unaligned_le16(&capability->max_oq_element_length)
4224 * 16;
4225
4226 sop_iu_layer_descriptor =
4227 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4228
4229 ctrl_info->max_inbound_iu_length_per_firmware =
4230 get_unaligned_le16(
4231 &sop_iu_layer_descriptor->max_inbound_iu_length);
4232 ctrl_info->inbound_spanning_supported =
4233 sop_iu_layer_descriptor->inbound_spanning_supported;
4234 ctrl_info->outbound_spanning_supported =
4235 sop_iu_layer_descriptor->outbound_spanning_supported;
4236
4237out:
4238 kfree(capability);
4239
4240 return rc;
4241}
4242
4243static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4244{
4245 if (ctrl_info->max_iq_element_length <
4246 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4247 dev_err(&ctrl_info->pci_dev->dev,
4248 "max. inbound queue element length of %d is less than the required length of %d\n",
4249 ctrl_info->max_iq_element_length,
4250 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4251 return -EINVAL;
4252 }
4253
4254 if (ctrl_info->max_oq_element_length <
4255 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4256 dev_err(&ctrl_info->pci_dev->dev,
4257 "max. outbound queue element length of %d is less than the required length of %d\n",
4258 ctrl_info->max_oq_element_length,
4259 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4260 return -EINVAL;
4261 }
4262
4263 if (ctrl_info->max_inbound_iu_length_per_firmware <
4264 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4265 dev_err(&ctrl_info->pci_dev->dev,
4266 "max. inbound IU length of %u is less than the min. required length of %d\n",
4267 ctrl_info->max_inbound_iu_length_per_firmware,
4268 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4269 return -EINVAL;
4270 }
4271
77668f41
KB
4272 if (!ctrl_info->inbound_spanning_supported) {
4273 dev_err(&ctrl_info->pci_dev->dev,
4274 "the controller does not support inbound spanning\n");
4275 return -EINVAL;
4276 }
4277
4278 if (ctrl_info->outbound_spanning_supported) {
4279 dev_err(&ctrl_info->pci_dev->dev,
4280 "the controller supports outbound spanning but this driver does not\n");
4281 return -EINVAL;
4282 }
4283
6c223761
KB
4284 return 0;
4285}
4286
6c223761
KB
4287static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4288{
4289 int rc;
4290 struct pqi_event_queue *event_queue;
4291 struct pqi_general_admin_request request;
4292 struct pqi_general_admin_response response;
4293
4294 event_queue = &ctrl_info->event_queue;
4295
4296 /*
4297 * Create OQ (Outbound Queue - device to host queue) to dedicate
4298 * to events.
4299 */
4300 memset(&request, 0, sizeof(request));
4301 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4302 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4303 &request.header.iu_length);
4304 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4305 put_unaligned_le16(event_queue->oq_id,
4306 &request.data.create_operational_oq.queue_id);
4307 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4308 &request.data.create_operational_oq.element_array_addr);
4309 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4310 &request.data.create_operational_oq.pi_addr);
4311 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4312 &request.data.create_operational_oq.num_elements);
4313 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4314 &request.data.create_operational_oq.element_length);
4315 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4316 put_unaligned_le16(event_queue->int_msg_num,
4317 &request.data.create_operational_oq.int_msg_num);
4318
4319 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4320 &response);
4321 if (rc)
4322 return rc;
4323
4324 event_queue->oq_ci = ctrl_info->iomem_base +
4325 PQI_DEVICE_REGISTERS_OFFSET +
4326 get_unaligned_le64(
4327 &response.data.create_operational_oq.oq_ci_offset);
4328
4329 return 0;
4330}
4331
061ef06a
KB
4332static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4333 unsigned int group_number)
6c223761 4334{
6c223761
KB
4335 int rc;
4336 struct pqi_queue_group *queue_group;
4337 struct pqi_general_admin_request request;
4338 struct pqi_general_admin_response response;
4339
061ef06a 4340 queue_group = &ctrl_info->queue_groups[group_number];
6c223761
KB
4341
4342 /*
4343 * Create IQ (Inbound Queue - host to device queue) for
4344 * RAID path.
4345 */
4346 memset(&request, 0, sizeof(request));
4347 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4348 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4349 &request.header.iu_length);
4350 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4351 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4352 &request.data.create_operational_iq.queue_id);
4353 put_unaligned_le64(
4354 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4355 &request.data.create_operational_iq.element_array_addr);
4356 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4357 &request.data.create_operational_iq.ci_addr);
4358 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4359 &request.data.create_operational_iq.num_elements);
4360 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4361 &request.data.create_operational_iq.element_length);
4362 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4363
4364 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4365 &response);
4366 if (rc) {
4367 dev_err(&ctrl_info->pci_dev->dev,
4368 "error creating inbound RAID queue\n");
4369 return rc;
4370 }
4371
4372 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4373 PQI_DEVICE_REGISTERS_OFFSET +
4374 get_unaligned_le64(
4375 &response.data.create_operational_iq.iq_pi_offset);
4376
4377 /*
4378 * Create IQ (Inbound Queue - host to device queue) for
4379 * Advanced I/O (AIO) path.
4380 */
4381 memset(&request, 0, sizeof(request));
4382 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4383 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4384 &request.header.iu_length);
4385 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4386 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4387 &request.data.create_operational_iq.queue_id);
4388 put_unaligned_le64((u64)queue_group->
4389 iq_element_array_bus_addr[AIO_PATH],
4390 &request.data.create_operational_iq.element_array_addr);
4391 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4392 &request.data.create_operational_iq.ci_addr);
4393 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4394 &request.data.create_operational_iq.num_elements);
4395 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4396 &request.data.create_operational_iq.element_length);
4397 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4398
4399 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4400 &response);
4401 if (rc) {
4402 dev_err(&ctrl_info->pci_dev->dev,
4403 "error creating inbound AIO queue\n");
339faa81 4404 return rc;
6c223761
KB
4405 }
4406
4407 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4408 PQI_DEVICE_REGISTERS_OFFSET +
4409 get_unaligned_le64(
4410 &response.data.create_operational_iq.iq_pi_offset);
4411
4412 /*
4413 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4414 * assumed to be for RAID path I/O unless we change the queue's
4415 * property.
4416 */
4417 memset(&request, 0, sizeof(request));
4418 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4419 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4420 &request.header.iu_length);
4421 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4422 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4423 &request.data.change_operational_iq_properties.queue_id);
4424 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4425 &request.data.change_operational_iq_properties.vendor_specific);
4426
4427 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4428 &response);
4429 if (rc) {
4430 dev_err(&ctrl_info->pci_dev->dev,
4431 "error changing queue property\n");
339faa81 4432 return rc;
6c223761
KB
4433 }
4434
4435 /*
4436 * Create OQ (Outbound Queue - device to host queue).
4437 */
4438 memset(&request, 0, sizeof(request));
4439 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4440 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4441 &request.header.iu_length);
4442 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4443 put_unaligned_le16(queue_group->oq_id,
4444 &request.data.create_operational_oq.queue_id);
4445 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4446 &request.data.create_operational_oq.element_array_addr);
4447 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4448 &request.data.create_operational_oq.pi_addr);
4449 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4450 &request.data.create_operational_oq.num_elements);
4451 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4452 &request.data.create_operational_oq.element_length);
4453 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4454 put_unaligned_le16(queue_group->int_msg_num,
4455 &request.data.create_operational_oq.int_msg_num);
4456
4457 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4458 &response);
4459 if (rc) {
4460 dev_err(&ctrl_info->pci_dev->dev,
4461 "error creating outbound queue\n");
339faa81 4462 return rc;
6c223761
KB
4463 }
4464
4465 queue_group->oq_ci = ctrl_info->iomem_base +
4466 PQI_DEVICE_REGISTERS_OFFSET +
4467 get_unaligned_le64(
4468 &response.data.create_operational_oq.oq_ci_offset);
4469
6c223761 4470 return 0;
6c223761
KB
4471}
4472
4473static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4474{
4475 int rc;
4476 unsigned int i;
4477
4478 rc = pqi_create_event_queue(ctrl_info);
4479 if (rc) {
4480 dev_err(&ctrl_info->pci_dev->dev,
4481 "error creating event queue\n");
4482 return rc;
4483 }
4484
4485 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
061ef06a 4486 rc = pqi_create_queue_group(ctrl_info, i);
6c223761
KB
4487 if (rc) {
4488 dev_err(&ctrl_info->pci_dev->dev,
4489 "error creating queue group number %u/%u\n",
4490 i, ctrl_info->num_queue_groups);
4491 return rc;
4492 }
4493 }
4494
4495 return 0;
4496}
4497
4498#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4499 (offsetof(struct pqi_event_config, descriptors) + \
4500 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4501
6a50d6ad
KB
4502static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4503 bool enable_events)
6c223761
KB
4504{
4505 int rc;
4506 unsigned int i;
4507 struct pqi_event_config *event_config;
6a50d6ad 4508 struct pqi_event_descriptor *event_descriptor;
6c223761
KB
4509 struct pqi_general_management_request request;
4510
4511 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4512 GFP_KERNEL);
4513 if (!event_config)
4514 return -ENOMEM;
4515
4516 memset(&request, 0, sizeof(request));
4517
4518 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4519 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4520 data.report_event_configuration.sg_descriptors[1]) -
4521 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4522 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4523 &request.data.report_event_configuration.buffer_length);
4524
4525 rc = pqi_map_single(ctrl_info->pci_dev,
4526 request.data.report_event_configuration.sg_descriptors,
4527 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
6917a9cc 4528 DMA_FROM_DEVICE);
6c223761
KB
4529 if (rc)
4530 goto out;
4531
4532 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4533 0, NULL, NO_TIMEOUT);
4534
4535 pqi_pci_unmap(ctrl_info->pci_dev,
4536 request.data.report_event_configuration.sg_descriptors, 1,
6917a9cc 4537 DMA_FROM_DEVICE);
6c223761
KB
4538
4539 if (rc)
4540 goto out;
4541
6a50d6ad
KB
4542 for (i = 0; i < event_config->num_event_descriptors; i++) {
4543 event_descriptor = &event_config->descriptors[i];
4544 if (enable_events &&
4545 pqi_is_supported_event(event_descriptor->event_type))
4546 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4547 &event_descriptor->oq_id);
4548 else
4549 put_unaligned_le16(0, &event_descriptor->oq_id);
4550 }
6c223761
KB
4551
4552 memset(&request, 0, sizeof(request));
4553
4554 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4555 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4556 data.report_event_configuration.sg_descriptors[1]) -
4557 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4558 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4559 &request.data.report_event_configuration.buffer_length);
4560
4561 rc = pqi_map_single(ctrl_info->pci_dev,
4562 request.data.report_event_configuration.sg_descriptors,
4563 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
6917a9cc 4564 DMA_TO_DEVICE);
6c223761
KB
4565 if (rc)
4566 goto out;
4567
4568 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4569 NULL, NO_TIMEOUT);
4570
4571 pqi_pci_unmap(ctrl_info->pci_dev,
4572 request.data.report_event_configuration.sg_descriptors, 1,
6917a9cc 4573 DMA_TO_DEVICE);
6c223761
KB
4574
4575out:
4576 kfree(event_config);
4577
4578 return rc;
4579}
4580
6a50d6ad
KB
4581static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4582{
4583 return pqi_configure_events(ctrl_info, true);
4584}
4585
4586static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4587{
4588 return pqi_configure_events(ctrl_info, false);
4589}
4590
6c223761
KB
4591static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4592{
4593 unsigned int i;
4594 struct device *dev;
4595 size_t sg_chain_buffer_length;
4596 struct pqi_io_request *io_request;
4597
4598 if (!ctrl_info->io_request_pool)
4599 return;
4600
4601 dev = &ctrl_info->pci_dev->dev;
4602 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4603 io_request = ctrl_info->io_request_pool;
4604
4605 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4606 kfree(io_request->iu);
4607 if (!io_request->sg_chain_buffer)
4608 break;
4609 dma_free_coherent(dev, sg_chain_buffer_length,
4610 io_request->sg_chain_buffer,
4611 io_request->sg_chain_buffer_dma_handle);
4612 io_request++;
4613 }
4614
4615 kfree(ctrl_info->io_request_pool);
4616 ctrl_info->io_request_pool = NULL;
4617}
4618
4619static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4620{
6c223761 4621
694c5d5b
KB
4622 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4623 ctrl_info->error_buffer_length,
4624 &ctrl_info->error_buffer_dma_handle,
4625 GFP_KERNEL);
6c223761
KB
4626 if (!ctrl_info->error_buffer)
4627 return -ENOMEM;
4628
4629 return 0;
4630}
4631
4632static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4633{
4634 unsigned int i;
4635 void *sg_chain_buffer;
4636 size_t sg_chain_buffer_length;
4637 dma_addr_t sg_chain_buffer_dma_handle;
4638 struct device *dev;
4639 struct pqi_io_request *io_request;
4640
6396bb22
KC
4641 ctrl_info->io_request_pool =
4642 kcalloc(ctrl_info->max_io_slots,
4643 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
6c223761
KB
4644
4645 if (!ctrl_info->io_request_pool) {
4646 dev_err(&ctrl_info->pci_dev->dev,
4647 "failed to allocate I/O request pool\n");
4648 goto error;
4649 }
4650
4651 dev = &ctrl_info->pci_dev->dev;
4652 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4653 io_request = ctrl_info->io_request_pool;
4654
4655 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4656 io_request->iu =
4657 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4658
4659 if (!io_request->iu) {
4660 dev_err(&ctrl_info->pci_dev->dev,
4661 "failed to allocate IU buffers\n");
4662 goto error;
4663 }
4664
4665 sg_chain_buffer = dma_alloc_coherent(dev,
4666 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4667 GFP_KERNEL);
4668
4669 if (!sg_chain_buffer) {
4670 dev_err(&ctrl_info->pci_dev->dev,
4671 "failed to allocate PQI scatter-gather chain buffers\n");
4672 goto error;
4673 }
4674
4675 io_request->index = i;
4676 io_request->sg_chain_buffer = sg_chain_buffer;
4677 io_request->sg_chain_buffer_dma_handle =
4678 sg_chain_buffer_dma_handle;
4679 io_request++;
4680 }
4681
4682 return 0;
4683
4684error:
4685 pqi_free_all_io_requests(ctrl_info);
4686
4687 return -ENOMEM;
4688}
4689
4690/*
4691 * Calculate required resources that are sized based on max. outstanding
4692 * requests and max. transfer size.
4693 */
4694
4695static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4696{
4697 u32 max_transfer_size;
4698 u32 max_sg_entries;
4699
4700 ctrl_info->scsi_ml_can_queue =
4701 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4702 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4703
4704 ctrl_info->error_buffer_length =
4705 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4706
d727a776
KB
4707 if (reset_devices)
4708 max_transfer_size = min(ctrl_info->max_transfer_size,
4709 PQI_MAX_TRANSFER_SIZE_KDUMP);
4710 else
4711 max_transfer_size = min(ctrl_info->max_transfer_size,
4712 PQI_MAX_TRANSFER_SIZE);
6c223761
KB
4713
4714 max_sg_entries = max_transfer_size / PAGE_SIZE;
4715
4716 /* +1 to cover when the buffer is not page-aligned. */
4717 max_sg_entries++;
4718
4719 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4720
4721 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4722
4723 ctrl_info->sg_chain_buffer_length =
e1d213bd
KB
4724 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4725 PQI_EXTRA_SGL_MEMORY;
6c223761
KB
4726 ctrl_info->sg_tablesize = max_sg_entries;
4727 ctrl_info->max_sectors = max_transfer_size / 512;
4728}
4729
4730static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4731{
6c223761
KB
4732 int num_queue_groups;
4733 u16 num_elements_per_iq;
4734 u16 num_elements_per_oq;
4735
d727a776
KB
4736 if (reset_devices) {
4737 num_queue_groups = 1;
4738 } else {
4739 int num_cpus;
4740 int max_queue_groups;
4741
4742 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4743 ctrl_info->max_outbound_queues - 1);
4744 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
6c223761 4745
d727a776
KB
4746 num_cpus = num_online_cpus();
4747 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4748 num_queue_groups = min(num_queue_groups, max_queue_groups);
4749 }
6c223761
KB
4750
4751 ctrl_info->num_queue_groups = num_queue_groups;
061ef06a 4752 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
6c223761 4753
77668f41
KB
4754 /*
4755 * Make sure that the max. inbound IU length is an even multiple
4756 * of our inbound element length.
4757 */
4758 ctrl_info->max_inbound_iu_length =
4759 (ctrl_info->max_inbound_iu_length_per_firmware /
4760 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4761 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
4762
4763 num_elements_per_iq =
4764 (ctrl_info->max_inbound_iu_length /
4765 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4766
4767 /* Add one because one element in each queue is unusable. */
4768 num_elements_per_iq++;
4769
4770 num_elements_per_iq = min(num_elements_per_iq,
4771 ctrl_info->max_elements_per_iq);
4772
4773 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4774 num_elements_per_oq = min(num_elements_per_oq,
4775 ctrl_info->max_elements_per_oq);
4776
4777 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4778 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4779
4780 ctrl_info->max_sg_per_iu =
4781 ((ctrl_info->max_inbound_iu_length -
4782 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4783 sizeof(struct pqi_sg_descriptor)) +
4784 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4785}
4786
4787static inline void pqi_set_sg_descriptor(
4788 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4789{
4790 u64 address = (u64)sg_dma_address(sg);
4791 unsigned int length = sg_dma_len(sg);
4792
4793 put_unaligned_le64(address, &sg_descriptor->address);
4794 put_unaligned_le32(length, &sg_descriptor->length);
4795 put_unaligned_le32(0, &sg_descriptor->flags);
4796}
4797
4798static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4799 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4800 struct pqi_io_request *io_request)
4801{
4802 int i;
4803 u16 iu_length;
4804 int sg_count;
4805 bool chained;
4806 unsigned int num_sg_in_iu;
4807 unsigned int max_sg_per_iu;
4808 struct scatterlist *sg;
4809 struct pqi_sg_descriptor *sg_descriptor;
4810
4811 sg_count = scsi_dma_map(scmd);
4812 if (sg_count < 0)
4813 return sg_count;
4814
4815 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4816 PQI_REQUEST_HEADER_LENGTH;
4817
4818 if (sg_count == 0)
4819 goto out;
4820
4821 sg = scsi_sglist(scmd);
4822 sg_descriptor = request->sg_descriptors;
4823 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4824 chained = false;
4825 num_sg_in_iu = 0;
4826 i = 0;
4827
4828 while (1) {
4829 pqi_set_sg_descriptor(sg_descriptor, sg);
4830 if (!chained)
4831 num_sg_in_iu++;
4832 i++;
4833 if (i == sg_count)
4834 break;
4835 sg_descriptor++;
4836 if (i == max_sg_per_iu) {
4837 put_unaligned_le64(
4838 (u64)io_request->sg_chain_buffer_dma_handle,
4839 &sg_descriptor->address);
4840 put_unaligned_le32((sg_count - num_sg_in_iu)
4841 * sizeof(*sg_descriptor),
4842 &sg_descriptor->length);
4843 put_unaligned_le32(CISS_SG_CHAIN,
4844 &sg_descriptor->flags);
4845 chained = true;
4846 num_sg_in_iu++;
4847 sg_descriptor = io_request->sg_chain_buffer;
4848 }
4849 sg = sg_next(sg);
4850 }
4851
4852 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4853 request->partial = chained;
4854 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4855
4856out:
4857 put_unaligned_le16(iu_length, &request->header.iu_length);
4858
4859 return 0;
4860}
4861
4862static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4863 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4864 struct pqi_io_request *io_request)
4865{
4866 int i;
4867 u16 iu_length;
4868 int sg_count;
a60eec02
KB
4869 bool chained;
4870 unsigned int num_sg_in_iu;
4871 unsigned int max_sg_per_iu;
6c223761
KB
4872 struct scatterlist *sg;
4873 struct pqi_sg_descriptor *sg_descriptor;
4874
4875 sg_count = scsi_dma_map(scmd);
4876 if (sg_count < 0)
4877 return sg_count;
a60eec02
KB
4878
4879 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4880 PQI_REQUEST_HEADER_LENGTH;
4881 num_sg_in_iu = 0;
4882
6c223761
KB
4883 if (sg_count == 0)
4884 goto out;
4885
a60eec02
KB
4886 sg = scsi_sglist(scmd);
4887 sg_descriptor = request->sg_descriptors;
4888 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4889 chained = false;
4890 i = 0;
4891
4892 while (1) {
4893 pqi_set_sg_descriptor(sg_descriptor, sg);
4894 if (!chained)
4895 num_sg_in_iu++;
4896 i++;
4897 if (i == sg_count)
4898 break;
4899 sg_descriptor++;
4900 if (i == max_sg_per_iu) {
4901 put_unaligned_le64(
4902 (u64)io_request->sg_chain_buffer_dma_handle,
4903 &sg_descriptor->address);
4904 put_unaligned_le32((sg_count - num_sg_in_iu)
4905 * sizeof(*sg_descriptor),
4906 &sg_descriptor->length);
4907 put_unaligned_le32(CISS_SG_CHAIN,
4908 &sg_descriptor->flags);
4909 chained = true;
4910 num_sg_in_iu++;
4911 sg_descriptor = io_request->sg_chain_buffer;
6c223761 4912 }
a60eec02 4913 sg = sg_next(sg);
6c223761
KB
4914 }
4915
a60eec02
KB
4916 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4917 request->partial = chained;
6c223761 4918 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
4919
4920out:
6c223761
KB
4921 put_unaligned_le16(iu_length, &request->header.iu_length);
4922 request->num_sg_descriptors = num_sg_in_iu;
4923
4924 return 0;
4925}
4926
4927static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4928 void *context)
4929{
4930 struct scsi_cmnd *scmd;
4931
4932 scmd = io_request->scmd;
4933 pqi_free_io_request(io_request);
4934 scsi_dma_unmap(scmd);
4935 pqi_scsi_done(scmd);
4936}
4937
376fb880
KB
4938static int pqi_raid_submit_scsi_cmd_with_io_request(
4939 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
6c223761
KB
4940 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4941 struct pqi_queue_group *queue_group)
4942{
4943 int rc;
4944 size_t cdb_length;
6c223761
KB
4945 struct pqi_raid_path_request *request;
4946
6c223761
KB
4947 io_request->io_complete_callback = pqi_raid_io_complete;
4948 io_request->scmd = scmd;
4949
6c223761
KB
4950 request = io_request->iu;
4951 memset(request, 0,
4952 offsetof(struct pqi_raid_path_request, sg_descriptors));
4953
4954 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4955 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4956 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4957 put_unaligned_le16(io_request->index, &request->request_id);
4958 request->error_index = request->request_id;
4959 memcpy(request->lun_number, device->scsi3addr,
4960 sizeof(request->lun_number));
4961
4962 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4963 memcpy(request->cdb, scmd->cmnd, cdb_length);
4964
4965 switch (cdb_length) {
4966 case 6:
4967 case 10:
4968 case 12:
4969 case 16:
4970 /* No bytes in the Additional CDB bytes field */
4971 request->additional_cdb_bytes_usage =
4972 SOP_ADDITIONAL_CDB_BYTES_0;
4973 break;
4974 case 20:
4975 /* 4 bytes in the Additional cdb field */
4976 request->additional_cdb_bytes_usage =
4977 SOP_ADDITIONAL_CDB_BYTES_4;
4978 break;
4979 case 24:
4980 /* 8 bytes in the Additional cdb field */
4981 request->additional_cdb_bytes_usage =
4982 SOP_ADDITIONAL_CDB_BYTES_8;
4983 break;
4984 case 28:
4985 /* 12 bytes in the Additional cdb field */
4986 request->additional_cdb_bytes_usage =
4987 SOP_ADDITIONAL_CDB_BYTES_12;
4988 break;
4989 case 32:
4990 default:
4991 /* 16 bytes in the Additional cdb field */
4992 request->additional_cdb_bytes_usage =
4993 SOP_ADDITIONAL_CDB_BYTES_16;
4994 break;
4995 }
4996
4997 switch (scmd->sc_data_direction) {
4998 case DMA_TO_DEVICE:
4999 request->data_direction = SOP_READ_FLAG;
5000 break;
5001 case DMA_FROM_DEVICE:
5002 request->data_direction = SOP_WRITE_FLAG;
5003 break;
5004 case DMA_NONE:
5005 request->data_direction = SOP_NO_DIRECTION_FLAG;
5006 break;
5007 case DMA_BIDIRECTIONAL:
5008 request->data_direction = SOP_BIDIRECTIONAL;
5009 break;
5010 default:
5011 dev_err(&ctrl_info->pci_dev->dev,
5012 "unknown data direction: %d\n",
5013 scmd->sc_data_direction);
6c223761
KB
5014 break;
5015 }
5016
5017 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5018 if (rc) {
5019 pqi_free_io_request(io_request);
5020 return SCSI_MLQUEUE_HOST_BUSY;
5021 }
5022
5023 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5024
5025 return 0;
5026}
5027
376fb880
KB
5028static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5029 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5030 struct pqi_queue_group *queue_group)
5031{
5032 struct pqi_io_request *io_request;
5033
5034 io_request = pqi_alloc_io_request(ctrl_info);
5035
5036 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5037 device, scmd, queue_group);
5038}
5039
5040static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
5041{
5042 if (!pqi_ctrl_blocked(ctrl_info))
5043 schedule_work(&ctrl_info->raid_bypass_retry_work);
5044}
5045
5046static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5047{
5048 struct scsi_cmnd *scmd;
03b288cf 5049 struct pqi_scsi_dev *device;
376fb880
KB
5050 struct pqi_ctrl_info *ctrl_info;
5051
5052 if (!io_request->raid_bypass)
5053 return false;
5054
5055 scmd = io_request->scmd;
5056 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5057 return false;
5058 if (host_byte(scmd->result) == DID_NO_CONNECT)
5059 return false;
5060
03b288cf
KB
5061 device = scmd->device->hostdata;
5062 if (pqi_device_offline(device))
5063 return false;
5064
376fb880
KB
5065 ctrl_info = shost_to_hba(scmd->device->host);
5066 if (pqi_ctrl_offline(ctrl_info))
5067 return false;
5068
5069 return true;
5070}
5071
5072static inline void pqi_add_to_raid_bypass_retry_list(
5073 struct pqi_ctrl_info *ctrl_info,
5074 struct pqi_io_request *io_request, bool at_head)
5075{
5076 unsigned long flags;
5077
5078 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5079 if (at_head)
5080 list_add(&io_request->request_list_entry,
5081 &ctrl_info->raid_bypass_retry_list);
5082 else
5083 list_add_tail(&io_request->request_list_entry,
5084 &ctrl_info->raid_bypass_retry_list);
5085 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5086}
5087
5088static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
5089 void *context)
5090{
5091 struct scsi_cmnd *scmd;
5092
5093 scmd = io_request->scmd;
5094 pqi_free_io_request(io_request);
5095 pqi_scsi_done(scmd);
5096}
5097
5098static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
5099{
5100 struct scsi_cmnd *scmd;
5101 struct pqi_ctrl_info *ctrl_info;
5102
5103 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
5104 scmd = io_request->scmd;
5105 scmd->result = 0;
5106 ctrl_info = shost_to_hba(scmd->device->host);
5107
5108 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
5109 pqi_schedule_bypass_retry(ctrl_info);
5110}
5111
5112static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
5113{
5114 struct scsi_cmnd *scmd;
5115 struct pqi_scsi_dev *device;
5116 struct pqi_ctrl_info *ctrl_info;
5117 struct pqi_queue_group *queue_group;
5118
5119 scmd = io_request->scmd;
5120 device = scmd->device->hostdata;
5121 if (pqi_device_in_reset(device)) {
5122 pqi_free_io_request(io_request);
5123 set_host_byte(scmd, DID_RESET);
5124 pqi_scsi_done(scmd);
5125 return 0;
5126 }
5127
5128 ctrl_info = shost_to_hba(scmd->device->host);
5129 queue_group = io_request->queue_group;
5130
5131 pqi_reinit_io_request(io_request);
5132
5133 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5134 device, scmd, queue_group);
5135}
5136
5137static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
5138 struct pqi_ctrl_info *ctrl_info)
5139{
5140 unsigned long flags;
5141 struct pqi_io_request *io_request;
5142
5143 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5144 io_request = list_first_entry_or_null(
5145 &ctrl_info->raid_bypass_retry_list,
5146 struct pqi_io_request, request_list_entry);
5147 if (io_request)
5148 list_del(&io_request->request_list_entry);
5149 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5150
5151 return io_request;
5152}
5153
5154static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
5155{
5156 int rc;
5157 struct pqi_io_request *io_request;
5158
5159 pqi_ctrl_busy(ctrl_info);
5160
5161 while (1) {
5162 if (pqi_ctrl_blocked(ctrl_info))
5163 break;
5164 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
5165 if (!io_request)
5166 break;
5167 rc = pqi_retry_raid_bypass(io_request);
5168 if (rc) {
5169 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
5170 true);
5171 pqi_schedule_bypass_retry(ctrl_info);
5172 break;
5173 }
5174 }
5175
5176 pqi_ctrl_unbusy(ctrl_info);
5177}
5178
5179static void pqi_raid_bypass_retry_worker(struct work_struct *work)
5180{
5181 struct pqi_ctrl_info *ctrl_info;
5182
5183 ctrl_info = container_of(work, struct pqi_ctrl_info,
5184 raid_bypass_retry_work);
5185 pqi_retry_raid_bypass_requests(ctrl_info);
5186}
5187
5f310425
KB
5188static void pqi_clear_all_queued_raid_bypass_retries(
5189 struct pqi_ctrl_info *ctrl_info)
376fb880
KB
5190{
5191 unsigned long flags;
376fb880
KB
5192
5193 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5f310425 5194 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
376fb880
KB
5195 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
5196}
5197
6c223761
KB
5198static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5199 void *context)
5200{
5201 struct scsi_cmnd *scmd;
5202
5203 scmd = io_request->scmd;
5204 scsi_dma_unmap(scmd);
5205 if (io_request->status == -EAGAIN)
5206 set_host_byte(scmd, DID_IMM_RETRY);
376fb880
KB
5207 else if (pqi_raid_bypass_retry_needed(io_request)) {
5208 pqi_queue_raid_bypass_retry(io_request);
5209 return;
5210 }
6c223761
KB
5211 pqi_free_io_request(io_request);
5212 pqi_scsi_done(scmd);
5213}
5214
5215static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5216 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5217 struct pqi_queue_group *queue_group)
5218{
5219 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
376fb880 5220 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
6c223761
KB
5221}
5222
5223static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5224 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5225 unsigned int cdb_length, struct pqi_queue_group *queue_group,
376fb880 5226 struct pqi_encryption_info *encryption_info, bool raid_bypass)
6c223761
KB
5227{
5228 int rc;
5229 struct pqi_io_request *io_request;
5230 struct pqi_aio_path_request *request;
5231
5232 io_request = pqi_alloc_io_request(ctrl_info);
5233 io_request->io_complete_callback = pqi_aio_io_complete;
5234 io_request->scmd = scmd;
376fb880 5235 io_request->raid_bypass = raid_bypass;
6c223761
KB
5236
5237 request = io_request->iu;
5238 memset(request, 0,
5239 offsetof(struct pqi_raid_path_request, sg_descriptors));
5240
5241 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5242 put_unaligned_le32(aio_handle, &request->nexus_id);
5243 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5244 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5245 put_unaligned_le16(io_request->index, &request->request_id);
5246 request->error_index = request->request_id;
5247 if (cdb_length > sizeof(request->cdb))
5248 cdb_length = sizeof(request->cdb);
5249 request->cdb_length = cdb_length;
5250 memcpy(request->cdb, cdb, cdb_length);
5251
5252 switch (scmd->sc_data_direction) {
5253 case DMA_TO_DEVICE:
5254 request->data_direction = SOP_READ_FLAG;
5255 break;
5256 case DMA_FROM_DEVICE:
5257 request->data_direction = SOP_WRITE_FLAG;
5258 break;
5259 case DMA_NONE:
5260 request->data_direction = SOP_NO_DIRECTION_FLAG;
5261 break;
5262 case DMA_BIDIRECTIONAL:
5263 request->data_direction = SOP_BIDIRECTIONAL;
5264 break;
5265 default:
5266 dev_err(&ctrl_info->pci_dev->dev,
5267 "unknown data direction: %d\n",
5268 scmd->sc_data_direction);
6c223761
KB
5269 break;
5270 }
5271
5272 if (encryption_info) {
5273 request->encryption_enable = true;
5274 put_unaligned_le16(encryption_info->data_encryption_key_index,
5275 &request->data_encryption_key_index);
5276 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5277 &request->encrypt_tweak_lower);
5278 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5279 &request->encrypt_tweak_upper);
5280 }
5281
5282 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5283 if (rc) {
5284 pqi_free_io_request(io_request);
5285 return SCSI_MLQUEUE_HOST_BUSY;
5286 }
5287
5288 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5289
5290 return 0;
5291}
5292
061ef06a
KB
5293static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5294 struct scsi_cmnd *scmd)
5295{
5296 u16 hw_queue;
5297
5298 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
5299 if (hw_queue > ctrl_info->max_hw_queue_index)
5300 hw_queue = 0;
5301
5302 return hw_queue;
5303}
5304
7561a7e4
KB
5305/*
5306 * This function gets called just before we hand the completed SCSI request
5307 * back to the SML.
5308 */
5309
5310void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5311{
5312 struct pqi_scsi_dev *device;
5313
1e46731e
MR
5314 if (!scmd->device) {
5315 set_host_byte(scmd, DID_NO_CONNECT);
5316 return;
5317 }
5318
7561a7e4 5319 device = scmd->device->hostdata;
1e46731e
MR
5320 if (!device) {
5321 set_host_byte(scmd, DID_NO_CONNECT);
5322 return;
5323 }
5324
7561a7e4
KB
5325 atomic_dec(&device->scsi_cmds_outstanding);
5326}
5327
6c223761 5328static int pqi_scsi_queue_command(struct Scsi_Host *shost,
7d81d2b8 5329 struct scsi_cmnd *scmd)
6c223761
KB
5330{
5331 int rc;
5332 struct pqi_ctrl_info *ctrl_info;
5333 struct pqi_scsi_dev *device;
061ef06a 5334 u16 hw_queue;
6c223761
KB
5335 struct pqi_queue_group *queue_group;
5336 bool raid_bypassed;
5337
5338 device = scmd->device->hostdata;
6c223761
KB
5339 ctrl_info = shost_to_hba(shost);
5340
1e46731e
MR
5341 if (!device) {
5342 set_host_byte(scmd, DID_NO_CONNECT);
5343 pqi_scsi_done(scmd);
5344 return 0;
5345 }
5346
7561a7e4
KB
5347 atomic_inc(&device->scsi_cmds_outstanding);
5348
1bdf6e93 5349 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
6c223761
KB
5350 set_host_byte(scmd, DID_NO_CONNECT);
5351 pqi_scsi_done(scmd);
5352 return 0;
5353 }
5354
7561a7e4 5355 pqi_ctrl_busy(ctrl_info);
4fd22c13 5356 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
0530736e 5357 pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
7561a7e4
KB
5358 rc = SCSI_MLQUEUE_HOST_BUSY;
5359 goto out;
5360 }
5361
7d81d2b8
KB
5362 /*
5363 * This is necessary because the SML doesn't zero out this field during
5364 * error recovery.
5365 */
5366 scmd->result = 0;
5367
061ef06a
KB
5368 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5369 queue_group = &ctrl_info->queue_groups[hw_queue];
6c223761
KB
5370
5371 if (pqi_is_logical_device(device)) {
5372 raid_bypassed = false;
588a63fe 5373 if (device->raid_bypass_enabled &&
694c5d5b 5374 !blk_rq_is_passthrough(scmd->request)) {
6c223761
KB
5375 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5376 scmd, queue_group);
8b664fef 5377 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
376fb880 5378 raid_bypassed = true;
8b664fef
KB
5379 atomic_inc(&device->raid_bypass_cnt);
5380 }
6c223761
KB
5381 }
5382 if (!raid_bypassed)
8b664fef 5383 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761
KB
5384 } else {
5385 if (device->aio_enabled)
8b664fef 5386 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761 5387 else
8b664fef 5388 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761
KB
5389 }
5390
7561a7e4
KB
5391out:
5392 pqi_ctrl_unbusy(ctrl_info);
5393 if (rc)
5394 atomic_dec(&device->scsi_cmds_outstanding);
5395
6c223761
KB
5396 return rc;
5397}
5398
7561a7e4
KB
5399static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5400 struct pqi_queue_group *queue_group)
5401{
5402 unsigned int path;
5403 unsigned long flags;
5404 bool list_is_empty;
5405
5406 for (path = 0; path < 2; path++) {
5407 while (1) {
5408 spin_lock_irqsave(
5409 &queue_group->submit_lock[path], flags);
5410 list_is_empty =
5411 list_empty(&queue_group->request_list[path]);
5412 spin_unlock_irqrestore(
5413 &queue_group->submit_lock[path], flags);
5414 if (list_is_empty)
5415 break;
5416 pqi_check_ctrl_health(ctrl_info);
5417 if (pqi_ctrl_offline(ctrl_info))
5418 return -ENXIO;
5419 usleep_range(1000, 2000);
5420 }
5421 }
5422
5423 return 0;
5424}
5425
5426static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5427{
5428 int rc;
5429 unsigned int i;
5430 unsigned int path;
5431 struct pqi_queue_group *queue_group;
5432 pqi_index_t iq_pi;
5433 pqi_index_t iq_ci;
5434
5435 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5436 queue_group = &ctrl_info->queue_groups[i];
5437
5438 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5439 if (rc)
5440 return rc;
5441
5442 for (path = 0; path < 2; path++) {
5443 iq_pi = queue_group->iq_pi_copy[path];
5444
5445 while (1) {
dac12fbc 5446 iq_ci = readl(queue_group->iq_ci[path]);
7561a7e4
KB
5447 if (iq_ci == iq_pi)
5448 break;
5449 pqi_check_ctrl_health(ctrl_info);
5450 if (pqi_ctrl_offline(ctrl_info))
5451 return -ENXIO;
5452 usleep_range(1000, 2000);
5453 }
5454 }
5455 }
5456
5457 return 0;
5458}
5459
5460static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5461 struct pqi_scsi_dev *device)
5462{
5463 unsigned int i;
5464 unsigned int path;
5465 struct pqi_queue_group *queue_group;
5466 unsigned long flags;
5467 struct pqi_io_request *io_request;
5468 struct pqi_io_request *next;
5469 struct scsi_cmnd *scmd;
5470 struct pqi_scsi_dev *scsi_device;
5471
5472 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5473 queue_group = &ctrl_info->queue_groups[i];
5474
5475 for (path = 0; path < 2; path++) {
5476 spin_lock_irqsave(
5477 &queue_group->submit_lock[path], flags);
5478
5479 list_for_each_entry_safe(io_request, next,
5480 &queue_group->request_list[path],
5481 request_list_entry) {
5482 scmd = io_request->scmd;
5483 if (!scmd)
5484 continue;
5485
5486 scsi_device = scmd->device->hostdata;
5487 if (scsi_device != device)
5488 continue;
5489
5490 list_del(&io_request->request_list_entry);
5491 set_host_byte(scmd, DID_RESET);
5492 pqi_scsi_done(scmd);
5493 }
5494
5495 spin_unlock_irqrestore(
5496 &queue_group->submit_lock[path], flags);
5497 }
5498 }
5499}
5500
4fd22c13
MR
5501static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
5502{
5503 unsigned int i;
5504 unsigned int path;
5505 struct pqi_queue_group *queue_group;
5506 unsigned long flags;
5507 struct pqi_io_request *io_request;
5508 struct pqi_io_request *next;
5509 struct scsi_cmnd *scmd;
5510
5511 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5512 queue_group = &ctrl_info->queue_groups[i];
5513
5514 for (path = 0; path < 2; path++) {
5515 spin_lock_irqsave(&queue_group->submit_lock[path],
5516 flags);
5517
5518 list_for_each_entry_safe(io_request, next,
5519 &queue_group->request_list[path],
5520 request_list_entry) {
5521
5522 scmd = io_request->scmd;
5523 if (!scmd)
5524 continue;
5525
5526 list_del(&io_request->request_list_entry);
5527 set_host_byte(scmd, DID_RESET);
5528 pqi_scsi_done(scmd);
5529 }
5530
5531 spin_unlock_irqrestore(
5532 &queue_group->submit_lock[path], flags);
5533 }
5534 }
5535}
5536
061ef06a 5537static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
1e46731e 5538 struct pqi_scsi_dev *device, unsigned long timeout_secs)
061ef06a 5539{
1e46731e
MR
5540 unsigned long timeout;
5541
4fd22c13 5542 timeout = (timeout_secs * PQI_HZ) + jiffies;
1e46731e 5543
061ef06a
KB
5544 while (atomic_read(&device->scsi_cmds_outstanding)) {
5545 pqi_check_ctrl_health(ctrl_info);
5546 if (pqi_ctrl_offline(ctrl_info))
5547 return -ENXIO;
1e46731e
MR
5548 if (timeout_secs != NO_TIMEOUT) {
5549 if (time_after(jiffies, timeout)) {
5550 dev_err(&ctrl_info->pci_dev->dev,
5551 "timed out waiting for pending IO\n");
5552 return -ETIMEDOUT;
5553 }
5554 }
061ef06a
KB
5555 usleep_range(1000, 2000);
5556 }
5557
5558 return 0;
5559}
5560
4fd22c13
MR
5561static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5562 unsigned long timeout_secs)
061ef06a
KB
5563{
5564 bool io_pending;
5565 unsigned long flags;
4fd22c13 5566 unsigned long timeout;
061ef06a
KB
5567 struct pqi_scsi_dev *device;
5568
4fd22c13 5569 timeout = (timeout_secs * PQI_HZ) + jiffies;
061ef06a
KB
5570 while (1) {
5571 io_pending = false;
5572
5573 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5574 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5575 scsi_device_list_entry) {
5576 if (atomic_read(&device->scsi_cmds_outstanding)) {
5577 io_pending = true;
5578 break;
5579 }
5580 }
5581 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5582 flags);
5583
5584 if (!io_pending)
5585 break;
5586
5587 pqi_check_ctrl_health(ctrl_info);
5588 if (pqi_ctrl_offline(ctrl_info))
5589 return -ENXIO;
5590
4fd22c13
MR
5591 if (timeout_secs != NO_TIMEOUT) {
5592 if (time_after(jiffies, timeout)) {
5593 dev_err(&ctrl_info->pci_dev->dev,
5594 "timed out waiting for pending IO\n");
5595 return -ETIMEDOUT;
5596 }
5597 }
061ef06a
KB
5598 usleep_range(1000, 2000);
5599 }
5600
5601 return 0;
5602}
5603
0530736e
KB
5604static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
5605{
5606 while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
5607 pqi_check_ctrl_health(ctrl_info);
5608 if (pqi_ctrl_offline(ctrl_info))
5609 return -ENXIO;
5610 usleep_range(1000, 2000);
5611 }
5612
5613 return 0;
5614}
5615
14bb215d
KB
5616static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5617 void *context)
6c223761 5618{
14bb215d 5619 struct completion *waiting = context;
6c223761 5620
14bb215d
KB
5621 complete(waiting);
5622}
6c223761 5623
bb9af08c 5624#define PQI_LUN_RESET_TIMEOUT_SECS 30
c2922f17 5625#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
14bb215d
KB
5626
5627static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5628 struct pqi_scsi_dev *device, struct completion *wait)
5629{
5630 int rc;
14bb215d
KB
5631
5632 while (1) {
5633 if (wait_for_completion_io_timeout(wait,
c2922f17 5634 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
14bb215d
KB
5635 rc = 0;
5636 break;
6c223761
KB
5637 }
5638
14bb215d
KB
5639 pqi_check_ctrl_health(ctrl_info);
5640 if (pqi_ctrl_offline(ctrl_info)) {
4e8415e3 5641 rc = -ENXIO;
14bb215d
KB
5642 break;
5643 }
6c223761 5644 }
6c223761 5645
14bb215d 5646 return rc;
6c223761
KB
5647}
5648
14bb215d 5649static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
5650 struct pqi_scsi_dev *device)
5651{
5652 int rc;
5653 struct pqi_io_request *io_request;
5654 DECLARE_COMPLETION_ONSTACK(wait);
5655 struct pqi_task_management_request *request;
5656
6c223761 5657 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 5658 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
5659 io_request->context = &wait;
5660
5661 request = io_request->iu;
5662 memset(request, 0, sizeof(*request));
5663
5664 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5665 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5666 &request->header.iu_length);
5667 put_unaligned_le16(io_request->index, &request->request_id);
5668 memcpy(request->lun_number, device->scsi3addr,
5669 sizeof(request->lun_number));
5670 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
c2922f17
MB
5671 if (ctrl_info->tmf_iu_timeout_supported)
5672 put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS,
5673 &request->timeout);
6c223761
KB
5674
5675 pqi_start_io(ctrl_info,
5676 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5677 io_request);
5678
14bb215d
KB
5679 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5680 if (rc == 0)
6c223761 5681 rc = io_request->status;
6c223761
KB
5682
5683 pqi_free_io_request(io_request);
6c223761
KB
5684
5685 return rc;
5686}
5687
429fab70
KB
5688/* Performs a reset at the LUN level. */
5689
3406384b
MR
5690#define PQI_LUN_RESET_RETRIES 3
5691#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
429fab70 5692#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
6c223761 5693
4fd22c13 5694static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
5695 struct pqi_scsi_dev *device)
5696{
5697 int rc;
3406384b 5698 unsigned int retries;
4fd22c13 5699 unsigned long timeout_secs;
6c223761 5700
3406384b
MR
5701 for (retries = 0;;) {
5702 rc = pqi_lun_reset(ctrl_info, device);
c2922f17 5703 if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
3406384b
MR
5704 break;
5705 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
5706 }
429fab70
KB
5707
5708 timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT;
4fd22c13
MR
5709
5710 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs);
6c223761 5711
14bb215d 5712 return rc == 0 ? SUCCESS : FAILED;
6c223761
KB
5713}
5714
4fd22c13
MR
5715static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5716 struct pqi_scsi_dev *device)
5717{
5718 int rc;
5719
5720 mutex_lock(&ctrl_info->lun_reset_mutex);
5721
5722 pqi_ctrl_block_requests(ctrl_info);
5723 pqi_ctrl_wait_until_quiesced(ctrl_info);
5724 pqi_fail_io_queued_for_device(ctrl_info, device);
5725 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5726 pqi_device_reset_start(device);
5727 pqi_ctrl_unblock_requests(ctrl_info);
5728
5729 if (rc)
5730 rc = FAILED;
5731 else
5732 rc = _pqi_device_reset(ctrl_info, device);
5733
5734 pqi_device_reset_done(device);
5735
5736 mutex_unlock(&ctrl_info->lun_reset_mutex);
429fab70 5737
4fd22c13
MR
5738 return rc;
5739}
5740
6c223761
KB
5741static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5742{
5743 int rc;
7561a7e4 5744 struct Scsi_Host *shost;
6c223761
KB
5745 struct pqi_ctrl_info *ctrl_info;
5746 struct pqi_scsi_dev *device;
5747
7561a7e4
KB
5748 shost = scmd->device->host;
5749 ctrl_info = shost_to_hba(shost);
6c223761
KB
5750 device = scmd->device->hostdata;
5751
5752 dev_err(&ctrl_info->pci_dev->dev,
5753 "resetting scsi %d:%d:%d:%d\n",
7561a7e4 5754 shost->host_no, device->bus, device->target, device->lun);
6c223761 5755
7561a7e4 5756 pqi_check_ctrl_health(ctrl_info);
0530736e
KB
5757 if (pqi_ctrl_offline(ctrl_info) ||
5758 pqi_device_reset_blocked(ctrl_info)) {
7561a7e4
KB
5759 rc = FAILED;
5760 goto out;
5761 }
6c223761 5762
4fd22c13 5763 pqi_wait_until_ofa_finished(ctrl_info);
7561a7e4 5764
0530736e 5765 atomic_inc(&ctrl_info->sync_cmds_outstanding);
4fd22c13 5766 rc = pqi_device_reset(ctrl_info, device);
0530736e 5767 atomic_dec(&ctrl_info->sync_cmds_outstanding);
429fab70 5768
7561a7e4 5769out:
6c223761
KB
5770 dev_err(&ctrl_info->pci_dev->dev,
5771 "reset of scsi %d:%d:%d:%d: %s\n",
7561a7e4 5772 shost->host_no, device->bus, device->target, device->lun,
6c223761
KB
5773 rc == SUCCESS ? "SUCCESS" : "FAILED");
5774
5775 return rc;
5776}
5777
5778static int pqi_slave_alloc(struct scsi_device *sdev)
5779{
5780 struct pqi_scsi_dev *device;
5781 unsigned long flags;
5782 struct pqi_ctrl_info *ctrl_info;
5783 struct scsi_target *starget;
5784 struct sas_rphy *rphy;
5785
5786 ctrl_info = shost_to_hba(sdev->host);
5787
5788 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5789
5790 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5791 starget = scsi_target(sdev);
5792 rphy = target_to_rphy(starget);
5793 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5794 if (device) {
5795 device->target = sdev_id(sdev);
5796 device->lun = sdev->lun;
5797 device->target_lun_valid = true;
5798 }
5799 } else {
5800 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5801 sdev_id(sdev), sdev->lun);
5802 }
5803
94086f5b 5804 if (device) {
6c223761
KB
5805 sdev->hostdata = device;
5806 device->sdev = sdev;
5807 if (device->queue_depth) {
5808 device->advertised_queue_depth = device->queue_depth;
5809 scsi_change_queue_depth(sdev,
5810 device->advertised_queue_depth);
5811 }
b6e2ef67
DC
5812 if (pqi_is_logical_device(device))
5813 pqi_disable_write_same(sdev);
2b447f81
DC
5814 else
5815 sdev->allow_restart = 1;
6c223761
KB
5816 }
5817
5818 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5819
5820 return 0;
5821}
5822
52198226
CH
5823static int pqi_map_queues(struct Scsi_Host *shost)
5824{
5825 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5826
79d3fa9e 5827 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
ed76e329 5828 ctrl_info->pci_dev, 0);
52198226
CH
5829}
5830
ce143793
KB
5831static int pqi_slave_configure(struct scsi_device *sdev)
5832{
5833 struct pqi_scsi_dev *device;
5834
5835 device = sdev->hostdata;
5836 device->devtype = sdev->type;
5837
5838 return 0;
5839}
5840
4d15ad38
KB
5841static void pqi_slave_destroy(struct scsi_device *sdev)
5842{
5843 unsigned long flags;
5844 struct pqi_scsi_dev *device;
5845 struct pqi_ctrl_info *ctrl_info;
5846
5847 ctrl_info = shost_to_hba(sdev->host);
5848
5849 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5850
5851 device = sdev->hostdata;
5852 if (device) {
5853 sdev->hostdata = NULL;
5854 if (!list_empty(&device->scsi_device_list_entry))
5855 list_del(&device->scsi_device_list_entry);
5856 }
5857
5858 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5859
5860 if (device) {
5861 pqi_dev_info(ctrl_info, "removed", device);
5862 pqi_free_device(device);
5863 }
5864}
5865
8b664fef 5866static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6c223761
KB
5867{
5868 struct pci_dev *pci_dev;
5869 u32 subsystem_vendor;
5870 u32 subsystem_device;
5871 cciss_pci_info_struct pciinfo;
5872
5873 if (!arg)
5874 return -EINVAL;
5875
5876 pci_dev = ctrl_info->pci_dev;
5877
5878 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5879 pciinfo.bus = pci_dev->bus->number;
5880 pciinfo.dev_fn = pci_dev->devfn;
5881 subsystem_vendor = pci_dev->subsystem_vendor;
5882 subsystem_device = pci_dev->subsystem_device;
8b664fef 5883 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6c223761
KB
5884
5885 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5886 return -EFAULT;
5887
5888 return 0;
5889}
5890
5891static int pqi_getdrivver_ioctl(void __user *arg)
5892{
5893 u32 version;
5894
5895 if (!arg)
5896 return -EINVAL;
5897
5898 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5899 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5900
5901 if (copy_to_user(arg, &version, sizeof(version)))
5902 return -EFAULT;
5903
5904 return 0;
5905}
5906
5907struct ciss_error_info {
5908 u8 scsi_status;
5909 int command_status;
5910 size_t sense_data_length;
5911};
5912
5913static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5914 struct ciss_error_info *ciss_error_info)
5915{
5916 int ciss_cmd_status;
5917 size_t sense_data_length;
5918
5919 switch (pqi_error_info->data_out_result) {
5920 case PQI_DATA_IN_OUT_GOOD:
5921 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5922 break;
5923 case PQI_DATA_IN_OUT_UNDERFLOW:
5924 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5925 break;
5926 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5927 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5928 break;
5929 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5930 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5931 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5932 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5933 case PQI_DATA_IN_OUT_ERROR:
5934 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5935 break;
5936 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5937 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5938 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5939 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5940 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5941 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5942 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5943 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5944 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5945 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5946 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5947 break;
5948 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5949 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5950 break;
5951 case PQI_DATA_IN_OUT_ABORTED:
5952 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5953 break;
5954 case PQI_DATA_IN_OUT_TIMEOUT:
5955 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5956 break;
5957 default:
5958 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5959 break;
5960 }
5961
5962 sense_data_length =
5963 get_unaligned_le16(&pqi_error_info->sense_data_length);
5964 if (sense_data_length == 0)
5965 sense_data_length =
5966 get_unaligned_le16(&pqi_error_info->response_data_length);
5967 if (sense_data_length)
5968 if (sense_data_length > sizeof(pqi_error_info->data))
5969 sense_data_length = sizeof(pqi_error_info->data);
5970
5971 ciss_error_info->scsi_status = pqi_error_info->status;
5972 ciss_error_info->command_status = ciss_cmd_status;
5973 ciss_error_info->sense_data_length = sense_data_length;
5974}
5975
5976static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5977{
5978 int rc;
5979 char *kernel_buffer = NULL;
5980 u16 iu_length;
5981 size_t sense_data_length;
5982 IOCTL_Command_struct iocommand;
5983 struct pqi_raid_path_request request;
5984 struct pqi_raid_error_info pqi_error_info;
5985 struct ciss_error_info ciss_error_info;
5986
5987 if (pqi_ctrl_offline(ctrl_info))
5988 return -ENXIO;
5989 if (!arg)
5990 return -EINVAL;
5991 if (!capable(CAP_SYS_RAWIO))
5992 return -EPERM;
5993 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5994 return -EFAULT;
5995 if (iocommand.buf_size < 1 &&
5996 iocommand.Request.Type.Direction != XFER_NONE)
5997 return -EINVAL;
5998 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5999 return -EINVAL;
6000 if (iocommand.Request.Type.Type != TYPE_CMD)
6001 return -EINVAL;
6002
6003 switch (iocommand.Request.Type.Direction) {
6004 case XFER_NONE:
6005 case XFER_WRITE:
6006 case XFER_READ:
41555d54 6007 case XFER_READ | XFER_WRITE:
6c223761
KB
6008 break;
6009 default:
6010 return -EINVAL;
6011 }
6012
6013 if (iocommand.buf_size > 0) {
6014 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6015 if (!kernel_buffer)
6016 return -ENOMEM;
6017 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6018 if (copy_from_user(kernel_buffer, iocommand.buf,
6019 iocommand.buf_size)) {
6020 rc = -EFAULT;
6021 goto out;
6022 }
6023 } else {
6024 memset(kernel_buffer, 0, iocommand.buf_size);
6025 }
6026 }
6027
6028 memset(&request, 0, sizeof(request));
6029
6030 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6031 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6032 PQI_REQUEST_HEADER_LENGTH;
6033 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6034 sizeof(request.lun_number));
6035 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6036 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6037
6038 switch (iocommand.Request.Type.Direction) {
6039 case XFER_NONE:
6040 request.data_direction = SOP_NO_DIRECTION_FLAG;
6041 break;
6042 case XFER_WRITE:
6043 request.data_direction = SOP_WRITE_FLAG;
6044 break;
6045 case XFER_READ:
6046 request.data_direction = SOP_READ_FLAG;
6047 break;
41555d54
KB
6048 case XFER_READ | XFER_WRITE:
6049 request.data_direction = SOP_BIDIRECTIONAL;
6050 break;
6c223761
KB
6051 }
6052
6053 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6054
6055 if (iocommand.buf_size > 0) {
6056 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6057
6058 rc = pqi_map_single(ctrl_info->pci_dev,
6059 &request.sg_descriptors[0], kernel_buffer,
6917a9cc 6060 iocommand.buf_size, DMA_BIDIRECTIONAL);
6c223761
KB
6061 if (rc)
6062 goto out;
6063
6064 iu_length += sizeof(request.sg_descriptors[0]);
6065 }
6066
6067 put_unaligned_le16(iu_length, &request.header.iu_length);
6068
21432010 6069 if (ctrl_info->raid_iu_timeout_supported)
6070 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6071
6c223761
KB
6072 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6073 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
6074
6075 if (iocommand.buf_size > 0)
6076 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6917a9cc 6077 DMA_BIDIRECTIONAL);
6c223761
KB
6078
6079 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6080
6081 if (rc == 0) {
6082 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6083 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6084 iocommand.error_info.CommandStatus =
6085 ciss_error_info.command_status;
6086 sense_data_length = ciss_error_info.sense_data_length;
6087 if (sense_data_length) {
6088 if (sense_data_length >
6089 sizeof(iocommand.error_info.SenseInfo))
6090 sense_data_length =
6091 sizeof(iocommand.error_info.SenseInfo);
6092 memcpy(iocommand.error_info.SenseInfo,
6093 pqi_error_info.data, sense_data_length);
6094 iocommand.error_info.SenseLen = sense_data_length;
6095 }
6096 }
6097
6098 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6099 rc = -EFAULT;
6100 goto out;
6101 }
6102
6103 if (rc == 0 && iocommand.buf_size > 0 &&
6104 (iocommand.Request.Type.Direction & XFER_READ)) {
6105 if (copy_to_user(iocommand.buf, kernel_buffer,
6106 iocommand.buf_size)) {
6107 rc = -EFAULT;
6108 }
6109 }
6110
6111out:
6112 kfree(kernel_buffer);
6113
6114 return rc;
6115}
6116
6f4e626f
NC
6117static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6118 void __user *arg)
6c223761
KB
6119{
6120 int rc;
6121 struct pqi_ctrl_info *ctrl_info;
6122
6123 ctrl_info = shost_to_hba(sdev->host);
6124
694c5d5b 6125 if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info))
4fd22c13
MR
6126 return -EBUSY;
6127
6c223761
KB
6128 switch (cmd) {
6129 case CCISS_DEREGDISK:
6130 case CCISS_REGNEWDISK:
6131 case CCISS_REGNEWD:
6132 rc = pqi_scan_scsi_devices(ctrl_info);
6133 break;
6134 case CCISS_GETPCIINFO:
6135 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6136 break;
6137 case CCISS_GETDRIVVER:
6138 rc = pqi_getdrivver_ioctl(arg);
6139 break;
6140 case CCISS_PASSTHRU:
6141 rc = pqi_passthru_ioctl(ctrl_info, arg);
6142 break;
6143 default:
6144 rc = -EINVAL;
6145 break;
6146 }
6147
6148 return rc;
6149}
6150
6d90615f 6151static ssize_t pqi_firmware_version_show(struct device *dev,
6c223761
KB
6152 struct device_attribute *attr, char *buffer)
6153{
6c223761
KB
6154 struct Scsi_Host *shost;
6155 struct pqi_ctrl_info *ctrl_info;
6156
6157 shost = class_to_shost(dev);
6158 ctrl_info = shost_to_hba(shost);
6159
6d90615f
MB
6160 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6161}
6162
6163static ssize_t pqi_driver_version_show(struct device *dev,
6164 struct device_attribute *attr, char *buffer)
6165{
694c5d5b
KB
6166 return snprintf(buffer, PAGE_SIZE, "%s\n",
6167 DRIVER_VERSION BUILD_TIMESTAMP);
6d90615f 6168}
6c223761 6169
6d90615f
MB
6170static ssize_t pqi_serial_number_show(struct device *dev,
6171 struct device_attribute *attr, char *buffer)
6172{
6173 struct Scsi_Host *shost;
6174 struct pqi_ctrl_info *ctrl_info;
6175
6176 shost = class_to_shost(dev);
6177 ctrl_info = shost_to_hba(shost);
6178
6179 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6180}
6181
6182static ssize_t pqi_model_show(struct device *dev,
6183 struct device_attribute *attr, char *buffer)
6184{
6185 struct Scsi_Host *shost;
6186 struct pqi_ctrl_info *ctrl_info;
6187
6188 shost = class_to_shost(dev);
6189 ctrl_info = shost_to_hba(shost);
6190
6191 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6192}
6193
6194static ssize_t pqi_vendor_show(struct device *dev,
6195 struct device_attribute *attr, char *buffer)
6196{
6197 struct Scsi_Host *shost;
6198 struct pqi_ctrl_info *ctrl_info;
6199
6200 shost = class_to_shost(dev);
6201 ctrl_info = shost_to_hba(shost);
6202
6203 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6c223761
KB
6204}
6205
6206static ssize_t pqi_host_rescan_store(struct device *dev,
6207 struct device_attribute *attr, const char *buffer, size_t count)
6208{
6209 struct Scsi_Host *shost = class_to_shost(dev);
6210
6211 pqi_scan_start(shost);
6212
6213 return count;
6214}
6215
3c50976f
KB
6216static ssize_t pqi_lockup_action_show(struct device *dev,
6217 struct device_attribute *attr, char *buffer)
6218{
6219 int count = 0;
6220 unsigned int i;
6221
6222 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6223 if (pqi_lockup_actions[i].action == pqi_lockup_action)
181aea89 6224 count += scnprintf(buffer + count, PAGE_SIZE - count,
3c50976f
KB
6225 "[%s] ", pqi_lockup_actions[i].name);
6226 else
181aea89 6227 count += scnprintf(buffer + count, PAGE_SIZE - count,
3c50976f
KB
6228 "%s ", pqi_lockup_actions[i].name);
6229 }
6230
181aea89 6231 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
3c50976f
KB
6232
6233 return count;
6234}
6235
6236static ssize_t pqi_lockup_action_store(struct device *dev,
6237 struct device_attribute *attr, const char *buffer, size_t count)
6238{
6239 unsigned int i;
6240 char *action_name;
6241 char action_name_buffer[32];
6242
6243 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6244 action_name = strstrip(action_name_buffer);
6245
6246 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6247 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6248 pqi_lockup_action = pqi_lockup_actions[i].action;
6249 return count;
6250 }
6251 }
6252
6253 return -EINVAL;
6254}
6255
6d90615f
MB
6256static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6257static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6258static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6259static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6260static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
cbe0c7b1 6261static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
3c50976f
KB
6262static DEVICE_ATTR(lockup_action, 0644,
6263 pqi_lockup_action_show, pqi_lockup_action_store);
6c223761
KB
6264
6265static struct device_attribute *pqi_shost_attrs[] = {
6d90615f
MB
6266 &dev_attr_driver_version,
6267 &dev_attr_firmware_version,
6268 &dev_attr_model,
6269 &dev_attr_serial_number,
6270 &dev_attr_vendor,
6c223761 6271 &dev_attr_rescan,
3c50976f 6272 &dev_attr_lockup_action,
6c223761
KB
6273 NULL
6274};
6275
cd128244
DC
6276static ssize_t pqi_unique_id_show(struct device *dev,
6277 struct device_attribute *attr, char *buffer)
6278{
6279 struct pqi_ctrl_info *ctrl_info;
6280 struct scsi_device *sdev;
6281 struct pqi_scsi_dev *device;
6282 unsigned long flags;
5b083b30 6283 u8 unique_id[16];
cd128244
DC
6284
6285 sdev = to_scsi_device(dev);
6286 ctrl_info = shost_to_hba(sdev->host);
6287
6288 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6289
6290 device = sdev->hostdata;
6291 if (!device) {
8b664fef 6292 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
6293 return -ENODEV;
6294 }
5b083b30
KB
6295
6296 if (device->is_physical_device) {
6297 memset(unique_id, 0, 8);
6298 memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
6299 } else {
6300 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
6301 }
cd128244
DC
6302
6303 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6304
5995b236
MB
6305 return snprintf(buffer, PAGE_SIZE,
6306 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
5b083b30
KB
6307 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6308 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6309 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6310 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
cd128244
DC
6311}
6312
6313static ssize_t pqi_lunid_show(struct device *dev,
6314 struct device_attribute *attr, char *buffer)
6315{
6316 struct pqi_ctrl_info *ctrl_info;
6317 struct scsi_device *sdev;
6318 struct pqi_scsi_dev *device;
6319 unsigned long flags;
6320 u8 lunid[8];
6321
6322 sdev = to_scsi_device(dev);
6323 ctrl_info = shost_to_hba(sdev->host);
6324
6325 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6326
6327 device = sdev->hostdata;
6328 if (!device) {
8b664fef 6329 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
6330 return -ENODEV;
6331 }
694c5d5b 6332
cd128244
DC
6333 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6334
6335 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6336
6337 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
6338}
6339
694c5d5b
KB
6340#define MAX_PATHS 8
6341
cd128244
DC
6342static ssize_t pqi_path_info_show(struct device *dev,
6343 struct device_attribute *attr, char *buf)
6344{
6345 struct pqi_ctrl_info *ctrl_info;
6346 struct scsi_device *sdev;
6347 struct pqi_scsi_dev *device;
6348 unsigned long flags;
6349 int i;
6350 int output_len = 0;
6351 u8 box;
6352 u8 bay;
694c5d5b 6353 u8 path_map_index;
cd128244 6354 char *active;
694c5d5b 6355 u8 phys_connector[2];
cd128244
DC
6356
6357 sdev = to_scsi_device(dev);
6358 ctrl_info = shost_to_hba(sdev->host);
6359
6360 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6361
6362 device = sdev->hostdata;
6363 if (!device) {
8b664fef 6364 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
6365 return -ENODEV;
6366 }
6367
6368 bay = device->bay;
6369 for (i = 0; i < MAX_PATHS; i++) {
694c5d5b 6370 path_map_index = 1 << i;
cd128244
DC
6371 if (i == device->active_path_index)
6372 active = "Active";
6373 else if (device->path_map & path_map_index)
6374 active = "Inactive";
6375 else
6376 continue;
6377
6378 output_len += scnprintf(buf + output_len,
6379 PAGE_SIZE - output_len,
6380 "[%d:%d:%d:%d] %20.20s ",
6381 ctrl_info->scsi_host->host_no,
6382 device->bus, device->target,
6383 device->lun,
6384 scsi_device_type(device->devtype));
6385
6386 if (device->devtype == TYPE_RAID ||
6387 pqi_is_logical_device(device))
6388 goto end_buffer;
6389
6390 memcpy(&phys_connector, &device->phys_connector[i],
6391 sizeof(phys_connector));
6392 if (phys_connector[0] < '0')
6393 phys_connector[0] = '0';
6394 if (phys_connector[1] < '0')
6395 phys_connector[1] = '0';
6396
6397 output_len += scnprintf(buf + output_len,
6398 PAGE_SIZE - output_len,
6399 "PORT: %.2s ", phys_connector);
6400
6401 box = device->box[i];
6402 if (box != 0 && box != 0xFF)
6403 output_len += scnprintf(buf + output_len,
6404 PAGE_SIZE - output_len,
6405 "BOX: %hhu ", box);
6406
6407 if ((device->devtype == TYPE_DISK ||
6408 device->devtype == TYPE_ZBC) &&
6409 pqi_expose_device(device))
6410 output_len += scnprintf(buf + output_len,
6411 PAGE_SIZE - output_len,
6412 "BAY: %hhu ", bay);
6413
6414end_buffer:
6415 output_len += scnprintf(buf + output_len,
6416 PAGE_SIZE - output_len,
6417 "%s\n", active);
6418 }
6419
6420 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
694c5d5b 6421
cd128244
DC
6422 return output_len;
6423}
6424
6c223761
KB
6425static ssize_t pqi_sas_address_show(struct device *dev,
6426 struct device_attribute *attr, char *buffer)
6427{
6428 struct pqi_ctrl_info *ctrl_info;
6429 struct scsi_device *sdev;
6430 struct pqi_scsi_dev *device;
6431 unsigned long flags;
6432 u64 sas_address;
6433
6434 sdev = to_scsi_device(dev);
6435 ctrl_info = shost_to_hba(sdev->host);
6436
6437 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6438
6439 device = sdev->hostdata;
8b664fef
KB
6440 if (!device || !pqi_is_device_with_sas_address(device)) {
6441 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6c223761
KB
6442 return -ENODEV;
6443 }
694c5d5b 6444
6c223761
KB
6445 sas_address = device->sas_address;
6446
6447 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6448
6449 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6450}
6451
6452static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
6453 struct device_attribute *attr, char *buffer)
6454{
6455 struct pqi_ctrl_info *ctrl_info;
6456 struct scsi_device *sdev;
6457 struct pqi_scsi_dev *device;
6458 unsigned long flags;
6459
6460 sdev = to_scsi_device(dev);
6461 ctrl_info = shost_to_hba(sdev->host);
6462
6463 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6464
6465 device = sdev->hostdata;
8b664fef
KB
6466 if (!device) {
6467 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6468 return -ENODEV;
6469 }
6470
588a63fe 6471 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6c223761
KB
6472 buffer[1] = '\n';
6473 buffer[2] = '\0';
6474
6475 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6476
6477 return 2;
6478}
6479
a9f93392
KB
6480static ssize_t pqi_raid_level_show(struct device *dev,
6481 struct device_attribute *attr, char *buffer)
6482{
6483 struct pqi_ctrl_info *ctrl_info;
6484 struct scsi_device *sdev;
6485 struct pqi_scsi_dev *device;
6486 unsigned long flags;
6487 char *raid_level;
6488
6489 sdev = to_scsi_device(dev);
6490 ctrl_info = shost_to_hba(sdev->host);
6491
6492 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6493
6494 device = sdev->hostdata;
8b664fef
KB
6495 if (!device) {
6496 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6497 return -ENODEV;
6498 }
a9f93392
KB
6499
6500 if (pqi_is_logical_device(device))
6501 raid_level = pqi_raid_level_to_string(device->raid_level);
6502 else
6503 raid_level = "N/A";
6504
6505 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6506
6507 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
6508}
6509
8b664fef
KB
6510static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
6511 struct device_attribute *attr, char *buffer)
6512{
6513 struct pqi_ctrl_info *ctrl_info;
6514 struct scsi_device *sdev;
6515 struct pqi_scsi_dev *device;
6516 unsigned long flags;
6517 int raid_bypass_cnt;
6518
6519 sdev = to_scsi_device(dev);
6520 ctrl_info = shost_to_hba(sdev->host);
6521
6522 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6523
6524 device = sdev->hostdata;
6525 if (!device) {
6526 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6527 return -ENODEV;
6528 }
6529
6530 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
6531
6532 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6533
6534 return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
6535}
6536
cd128244
DC
6537static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
6538static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
6539static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
cbe0c7b1 6540static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
8b664fef 6541static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
a9f93392 6542static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
8b664fef 6543static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
6c223761
KB
6544
6545static struct device_attribute *pqi_sdev_attrs[] = {
cd128244
DC
6546 &dev_attr_lunid,
6547 &dev_attr_unique_id,
6548 &dev_attr_path_info,
6c223761
KB
6549 &dev_attr_sas_address,
6550 &dev_attr_ssd_smart_path_enabled,
a9f93392 6551 &dev_attr_raid_level,
8b664fef 6552 &dev_attr_raid_bypass_cnt,
6c223761
KB
6553 NULL
6554};
6555
6556static struct scsi_host_template pqi_driver_template = {
6557 .module = THIS_MODULE,
6558 .name = DRIVER_NAME_SHORT,
6559 .proc_name = DRIVER_NAME_SHORT,
6560 .queuecommand = pqi_scsi_queue_command,
6561 .scan_start = pqi_scan_start,
6562 .scan_finished = pqi_scan_finished,
6563 .this_id = -1,
6c223761
KB
6564 .eh_device_reset_handler = pqi_eh_device_reset_handler,
6565 .ioctl = pqi_ioctl,
6566 .slave_alloc = pqi_slave_alloc,
ce143793 6567 .slave_configure = pqi_slave_configure,
4d15ad38 6568 .slave_destroy = pqi_slave_destroy,
52198226 6569 .map_queues = pqi_map_queues,
6c223761
KB
6570 .sdev_attrs = pqi_sdev_attrs,
6571 .shost_attrs = pqi_shost_attrs,
6572};
6573
6574static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
6575{
6576 int rc;
6577 struct Scsi_Host *shost;
6578
6579 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
6580 if (!shost) {
6581 dev_err(&ctrl_info->pci_dev->dev,
6582 "scsi_host_alloc failed for controller %u\n",
6583 ctrl_info->ctrl_id);
6584 return -ENOMEM;
6585 }
6586
6587 shost->io_port = 0;
6588 shost->n_io_port = 0;
6589 shost->this_id = -1;
6590 shost->max_channel = PQI_MAX_BUS;
6591 shost->max_cmd_len = MAX_COMMAND_SIZE;
6592 shost->max_lun = ~0;
6593 shost->max_id = ~0;
6594 shost->max_sectors = ctrl_info->max_sectors;
6595 shost->can_queue = ctrl_info->scsi_ml_can_queue;
6596 shost->cmd_per_lun = shost->can_queue;
6597 shost->sg_tablesize = ctrl_info->sg_tablesize;
6598 shost->transportt = pqi_sas_transport_template;
52198226 6599 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
6600 shost->unique_id = shost->irq;
6601 shost->nr_hw_queues = ctrl_info->num_queue_groups;
c6d3ee20 6602 shost->host_tagset = 1;
6c223761
KB
6603 shost->hostdata[0] = (unsigned long)ctrl_info;
6604
6605 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
6606 if (rc) {
6607 dev_err(&ctrl_info->pci_dev->dev,
6608 "scsi_add_host failed for controller %u\n",
6609 ctrl_info->ctrl_id);
6610 goto free_host;
6611 }
6612
6613 rc = pqi_add_sas_host(shost, ctrl_info);
6614 if (rc) {
6615 dev_err(&ctrl_info->pci_dev->dev,
6616 "add SAS host failed for controller %u\n",
6617 ctrl_info->ctrl_id);
6618 goto remove_host;
6619 }
6620
6621 ctrl_info->scsi_host = shost;
6622
6623 return 0;
6624
6625remove_host:
6626 scsi_remove_host(shost);
6627free_host:
6628 scsi_host_put(shost);
6629
6630 return rc;
6631}
6632
6633static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
6634{
6635 struct Scsi_Host *shost;
6636
6637 pqi_delete_sas_host(ctrl_info);
6638
6639 shost = ctrl_info->scsi_host;
6640 if (!shost)
6641 return;
6642
6643 scsi_remove_host(shost);
6644 scsi_host_put(shost);
6645}
6646
336b6819
KB
6647static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
6648{
6649 int rc = 0;
6650 struct pqi_device_registers __iomem *pqi_registers;
6651 unsigned long timeout;
6652 unsigned int timeout_msecs;
6653 union pqi_reset_register reset_reg;
6c223761 6654
336b6819
KB
6655 pqi_registers = ctrl_info->pqi_registers;
6656 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
6657 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
6658
6659 while (1) {
6660 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
6661 reset_reg.all_bits = readl(&pqi_registers->device_reset);
6662 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
6663 break;
6664 pqi_check_ctrl_health(ctrl_info);
6665 if (pqi_ctrl_offline(ctrl_info)) {
6666 rc = -ENXIO;
6667 break;
6668 }
6669 if (time_after(jiffies, timeout)) {
6670 rc = -ETIMEDOUT;
6671 break;
6672 }
6673 }
6674
6675 return rc;
6676}
6c223761
KB
6677
6678static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
6679{
6680 int rc;
336b6819
KB
6681 union pqi_reset_register reset_reg;
6682
6683 if (ctrl_info->pqi_reset_quiesce_supported) {
6684 rc = sis_pqi_reset_quiesce(ctrl_info);
6685 if (rc) {
6686 dev_err(&ctrl_info->pci_dev->dev,
6687 "PQI reset failed during quiesce with error %d\n",
6688 rc);
6689 return rc;
6690 }
6691 }
6c223761 6692
336b6819
KB
6693 reset_reg.all_bits = 0;
6694 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
6695 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6c223761 6696
336b6819 6697 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6c223761 6698
336b6819 6699 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6c223761
KB
6700 if (rc)
6701 dev_err(&ctrl_info->pci_dev->dev,
336b6819 6702 "PQI reset failed with error %d\n", rc);
6c223761
KB
6703
6704 return rc;
6705}
6706
6d90615f
MB
6707static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
6708{
6709 int rc;
6710 struct bmic_sense_subsystem_info *sense_info;
6711
6712 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
6713 if (!sense_info)
6714 return -ENOMEM;
6715
6716 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
6717 if (rc)
6718 goto out;
6719
6720 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
6721 sizeof(sense_info->ctrl_serial_number));
6722 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
6723
6724out:
6725 kfree(sense_info);
6726
6727 return rc;
6728}
6729
6730static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6c223761
KB
6731{
6732 int rc;
6733 struct bmic_identify_controller *identify;
6734
6735 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
6736 if (!identify)
6737 return -ENOMEM;
6738
6739 rc = pqi_identify_controller(ctrl_info, identify);
6740 if (rc)
6741 goto out;
6742
6743 memcpy(ctrl_info->firmware_version, identify->firmware_version,
6744 sizeof(identify->firmware_version));
6745 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
6746 snprintf(ctrl_info->firmware_version +
6747 strlen(ctrl_info->firmware_version),
6748 sizeof(ctrl_info->firmware_version),
6749 "-%u", get_unaligned_le16(&identify->firmware_build_number));
6750
6d90615f
MB
6751 memcpy(ctrl_info->model, identify->product_id,
6752 sizeof(identify->product_id));
6753 ctrl_info->model[sizeof(identify->product_id)] = '\0';
6754
6755 memcpy(ctrl_info->vendor, identify->vendor_id,
6756 sizeof(identify->vendor_id));
6757 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
6758
6c223761
KB
6759out:
6760 kfree(identify);
6761
6762 return rc;
6763}
6764
b212c251
KB
6765struct pqi_config_table_section_info {
6766 struct pqi_ctrl_info *ctrl_info;
6767 void *section;
6768 u32 section_offset;
6769 void __iomem *section_iomem_addr;
6770};
6771
6772static inline bool pqi_is_firmware_feature_supported(
6773 struct pqi_config_table_firmware_features *firmware_features,
6774 unsigned int bit_position)
98f87667 6775{
b212c251 6776 unsigned int byte_index;
98f87667 6777
b212c251 6778 byte_index = bit_position / BITS_PER_BYTE;
98f87667 6779
b212c251
KB
6780 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
6781 return false;
98f87667 6782
b212c251
KB
6783 return firmware_features->features_supported[byte_index] &
6784 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6785}
6786
6787static inline bool pqi_is_firmware_feature_enabled(
6788 struct pqi_config_table_firmware_features *firmware_features,
6789 void __iomem *firmware_features_iomem_addr,
6790 unsigned int bit_position)
6791{
6792 unsigned int byte_index;
6793 u8 __iomem *features_enabled_iomem_addr;
6794
6795 byte_index = (bit_position / BITS_PER_BYTE) +
6796 (le16_to_cpu(firmware_features->num_elements) * 2);
6797
6798 features_enabled_iomem_addr = firmware_features_iomem_addr +
6799 offsetof(struct pqi_config_table_firmware_features,
6800 features_supported) + byte_index;
6801
6802 return *((__force u8 *)features_enabled_iomem_addr) &
6803 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
6804}
6805
6806static inline void pqi_request_firmware_feature(
6807 struct pqi_config_table_firmware_features *firmware_features,
6808 unsigned int bit_position)
6809{
6810 unsigned int byte_index;
6811
6812 byte_index = (bit_position / BITS_PER_BYTE) +
6813 le16_to_cpu(firmware_features->num_elements);
6814
6815 firmware_features->features_supported[byte_index] |=
6816 (1 << (bit_position % BITS_PER_BYTE));
6817}
6818
6819static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
6820 u16 first_section, u16 last_section)
6821{
6822 struct pqi_vendor_general_request request;
6823
6824 memset(&request, 0, sizeof(request));
6825
6826 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
6827 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
6828 &request.header.iu_length);
6829 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
6830 &request.function_code);
6831 put_unaligned_le16(first_section,
6832 &request.data.config_table_update.first_section);
6833 put_unaligned_le16(last_section,
6834 &request.data.config_table_update.last_section);
6835
6836 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6837 0, NULL, NO_TIMEOUT);
6838}
6839
6840static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
6841 struct pqi_config_table_firmware_features *firmware_features,
6842 void __iomem *firmware_features_iomem_addr)
6843{
6844 void *features_requested;
6845 void __iomem *features_requested_iomem_addr;
6846
6847 features_requested = firmware_features->features_supported +
6848 le16_to_cpu(firmware_features->num_elements);
6849
6850 features_requested_iomem_addr = firmware_features_iomem_addr +
6851 (features_requested - (void *)firmware_features);
6852
6853 memcpy_toio(features_requested_iomem_addr, features_requested,
6854 le16_to_cpu(firmware_features->num_elements));
6855
6856 return pqi_config_table_update(ctrl_info,
6857 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
6858 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
6859}
6860
6861struct pqi_firmware_feature {
6862 char *feature_name;
6863 unsigned int feature_bit;
6864 bool supported;
6865 bool enabled;
6866 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
6867 struct pqi_firmware_feature *firmware_feature);
6868};
6869
6870static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
6871 struct pqi_firmware_feature *firmware_feature)
6872{
6873 if (!firmware_feature->supported) {
6874 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
6875 firmware_feature->feature_name);
6876 return;
6877 }
6878
6879 if (firmware_feature->enabled) {
6880 dev_info(&ctrl_info->pci_dev->dev,
6881 "%s enabled\n", firmware_feature->feature_name);
6882 return;
6883 }
6884
6885 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
6886 firmware_feature->feature_name);
6887}
6888
21432010 6889static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
6890 struct pqi_firmware_feature *firmware_feature)
6891{
6892 switch (firmware_feature->feature_bit) {
6893 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
6894 ctrl_info->soft_reset_handshake_supported =
6895 firmware_feature->enabled;
6896 break;
6897 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
6898 ctrl_info->raid_iu_timeout_supported =
6899 firmware_feature->enabled;
6900 break;
c2922f17
MB
6901 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
6902 ctrl_info->tmf_iu_timeout_supported =
6903 firmware_feature->enabled;
6904 break;
21432010 6905 }
6906
6907 pqi_firmware_feature_status(ctrl_info, firmware_feature);
6908}
6909
b212c251
KB
6910static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
6911 struct pqi_firmware_feature *firmware_feature)
6912{
6913 if (firmware_feature->feature_status)
6914 firmware_feature->feature_status(ctrl_info, firmware_feature);
6915}
6916
6917static DEFINE_MUTEX(pqi_firmware_features_mutex);
6918
6919static struct pqi_firmware_feature pqi_firmware_features[] = {
6920 {
6921 .feature_name = "Online Firmware Activation",
6922 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
6923 .feature_status = pqi_firmware_feature_status,
6924 },
6925 {
6926 .feature_name = "Serial Management Protocol",
6927 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
6928 .feature_status = pqi_firmware_feature_status,
6929 },
4fd22c13
MR
6930 {
6931 .feature_name = "New Soft Reset Handshake",
6932 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
21432010 6933 .feature_status = pqi_ctrl_update_feature_flags,
6934 },
6935 {
6936 .feature_name = "RAID IU Timeout",
6937 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
6938 .feature_status = pqi_ctrl_update_feature_flags,
4fd22c13 6939 },
c2922f17
MB
6940 {
6941 .feature_name = "TMF IU Timeout",
6942 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
6943 .feature_status = pqi_ctrl_update_feature_flags,
6944 },
b212c251
KB
6945};
6946
6947static void pqi_process_firmware_features(
6948 struct pqi_config_table_section_info *section_info)
6949{
6950 int rc;
6951 struct pqi_ctrl_info *ctrl_info;
6952 struct pqi_config_table_firmware_features *firmware_features;
6953 void __iomem *firmware_features_iomem_addr;
6954 unsigned int i;
6955 unsigned int num_features_supported;
6956
6957 ctrl_info = section_info->ctrl_info;
6958 firmware_features = section_info->section;
6959 firmware_features_iomem_addr = section_info->section_iomem_addr;
6960
6961 for (i = 0, num_features_supported = 0;
6962 i < ARRAY_SIZE(pqi_firmware_features); i++) {
6963 if (pqi_is_firmware_feature_supported(firmware_features,
6964 pqi_firmware_features[i].feature_bit)) {
6965 pqi_firmware_features[i].supported = true;
6966 num_features_supported++;
6967 } else {
6968 pqi_firmware_feature_update(ctrl_info,
6969 &pqi_firmware_features[i]);
6970 }
6971 }
6972
6973 if (num_features_supported == 0)
6974 return;
6975
6976 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6977 if (!pqi_firmware_features[i].supported)
6978 continue;
6979 pqi_request_firmware_feature(firmware_features,
6980 pqi_firmware_features[i].feature_bit);
6981 }
6982
6983 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
6984 firmware_features_iomem_addr);
6985 if (rc) {
6986 dev_err(&ctrl_info->pci_dev->dev,
6987 "failed to enable firmware features in PQI configuration table\n");
6988 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6989 if (!pqi_firmware_features[i].supported)
6990 continue;
6991 pqi_firmware_feature_update(ctrl_info,
6992 &pqi_firmware_features[i]);
6993 }
6994 return;
6995 }
6996
6997 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
6998 if (!pqi_firmware_features[i].supported)
6999 continue;
7000 if (pqi_is_firmware_feature_enabled(firmware_features,
7001 firmware_features_iomem_addr,
4fd22c13 7002 pqi_firmware_features[i].feature_bit)) {
b212c251 7003 pqi_firmware_features[i].enabled = true;
4fd22c13 7004 }
b212c251
KB
7005 pqi_firmware_feature_update(ctrl_info,
7006 &pqi_firmware_features[i]);
7007 }
7008}
7009
7010static void pqi_init_firmware_features(void)
7011{
7012 unsigned int i;
7013
7014 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7015 pqi_firmware_features[i].supported = false;
7016 pqi_firmware_features[i].enabled = false;
7017 }
7018}
7019
7020static void pqi_process_firmware_features_section(
7021 struct pqi_config_table_section_info *section_info)
7022{
7023 mutex_lock(&pqi_firmware_features_mutex);
7024 pqi_init_firmware_features();
7025 pqi_process_firmware_features(section_info);
7026 mutex_unlock(&pqi_firmware_features_mutex);
7027}
7028
98f87667
KB
7029static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7030{
7031 u32 table_length;
7032 u32 section_offset;
7033 void __iomem *table_iomem_addr;
7034 struct pqi_config_table *config_table;
7035 struct pqi_config_table_section_header *section;
b212c251 7036 struct pqi_config_table_section_info section_info;
98f87667
KB
7037
7038 table_length = ctrl_info->config_table_length;
b212c251
KB
7039 if (table_length == 0)
7040 return 0;
98f87667
KB
7041
7042 config_table = kmalloc(table_length, GFP_KERNEL);
7043 if (!config_table) {
7044 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 7045 "failed to allocate memory for PQI configuration table\n");
98f87667
KB
7046 return -ENOMEM;
7047 }
7048
7049 /*
7050 * Copy the config table contents from I/O memory space into the
7051 * temporary buffer.
7052 */
7053 table_iomem_addr = ctrl_info->iomem_base +
7054 ctrl_info->config_table_offset;
7055 memcpy_fromio(config_table, table_iomem_addr, table_length);
7056
b212c251 7057 section_info.ctrl_info = ctrl_info;
98f87667
KB
7058 section_offset =
7059 get_unaligned_le32(&config_table->first_section_offset);
7060
7061 while (section_offset) {
7062 section = (void *)config_table + section_offset;
7063
b212c251
KB
7064 section_info.section = section;
7065 section_info.section_offset = section_offset;
7066 section_info.section_iomem_addr =
7067 table_iomem_addr + section_offset;
7068
98f87667 7069 switch (get_unaligned_le16(&section->section_id)) {
b212c251
KB
7070 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
7071 pqi_process_firmware_features_section(&section_info);
7072 break;
98f87667 7073 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5a259e32
KB
7074 if (pqi_disable_heartbeat)
7075 dev_warn(&ctrl_info->pci_dev->dev,
7076 "heartbeat disabled by module parameter\n");
7077 else
7078 ctrl_info->heartbeat_counter =
7079 table_iomem_addr +
7080 section_offset +
7081 offsetof(
7082 struct pqi_config_table_heartbeat,
7083 heartbeat_counter);
98f87667 7084 break;
4fd22c13
MR
7085 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7086 ctrl_info->soft_reset_status =
7087 table_iomem_addr +
7088 section_offset +
7089 offsetof(struct pqi_config_table_soft_reset,
7090 soft_reset_status);
7091 break;
98f87667
KB
7092 }
7093
7094 section_offset =
7095 get_unaligned_le16(&section->next_section_offset);
7096 }
7097
7098 kfree(config_table);
7099
7100 return 0;
7101}
7102
162d7753
KB
7103/* Switches the controller from PQI mode back into SIS mode. */
7104
7105static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7106{
7107 int rc;
7108
061ef06a 7109 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
162d7753
KB
7110 rc = pqi_reset(ctrl_info);
7111 if (rc)
7112 return rc;
4f078e24
KB
7113 rc = sis_reenable_sis_mode(ctrl_info);
7114 if (rc) {
7115 dev_err(&ctrl_info->pci_dev->dev,
7116 "re-enabling SIS mode failed with error %d\n", rc);
7117 return rc;
7118 }
162d7753
KB
7119 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7120
7121 return 0;
7122}
7123
7124/*
7125 * If the controller isn't already in SIS mode, this function forces it into
7126 * SIS mode.
7127 */
7128
7129static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73
KB
7130{
7131 if (!sis_is_firmware_running(ctrl_info))
7132 return -ENXIO;
7133
162d7753
KB
7134 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7135 return 0;
7136
7137 if (sis_is_kernel_up(ctrl_info)) {
7138 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7139 return 0;
ff6abb73
KB
7140 }
7141
162d7753 7142 return pqi_revert_to_sis_mode(ctrl_info);
ff6abb73
KB
7143}
7144
0530736e
KB
7145#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7146
6c223761
KB
7147static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7148{
7149 int rc;
7150
0530736e
KB
7151 if (reset_devices) {
7152 sis_soft_reset(ctrl_info);
7153 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7154 } else {
7155 rc = pqi_force_sis_mode(ctrl_info);
7156 if (rc)
7157 return rc;
7158 }
6c223761
KB
7159
7160 /*
7161 * Wait until the controller is ready to start accepting SIS
7162 * commands.
7163 */
7164 rc = sis_wait_for_ctrl_ready(ctrl_info);
8845fdfa 7165 if (rc)
6c223761 7166 return rc;
6c223761
KB
7167
7168 /*
7169 * Get the controller properties. This allows us to determine
7170 * whether or not it supports PQI mode.
7171 */
7172 rc = sis_get_ctrl_properties(ctrl_info);
7173 if (rc) {
7174 dev_err(&ctrl_info->pci_dev->dev,
7175 "error obtaining controller properties\n");
7176 return rc;
7177 }
7178
7179 rc = sis_get_pqi_capabilities(ctrl_info);
7180 if (rc) {
7181 dev_err(&ctrl_info->pci_dev->dev,
7182 "error obtaining controller capabilities\n");
7183 return rc;
7184 }
7185
d727a776
KB
7186 if (reset_devices) {
7187 if (ctrl_info->max_outstanding_requests >
7188 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
7189 ctrl_info->max_outstanding_requests =
7190 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7191 } else {
7192 if (ctrl_info->max_outstanding_requests >
7193 PQI_MAX_OUTSTANDING_REQUESTS)
7194 ctrl_info->max_outstanding_requests =
7195 PQI_MAX_OUTSTANDING_REQUESTS;
7196 }
6c223761
KB
7197
7198 pqi_calculate_io_resources(ctrl_info);
7199
7200 rc = pqi_alloc_error_buffer(ctrl_info);
7201 if (rc) {
7202 dev_err(&ctrl_info->pci_dev->dev,
7203 "failed to allocate PQI error buffer\n");
7204 return rc;
7205 }
7206
7207 /*
7208 * If the function we are about to call succeeds, the
7209 * controller will transition from legacy SIS mode
7210 * into PQI mode.
7211 */
7212 rc = sis_init_base_struct_addr(ctrl_info);
7213 if (rc) {
7214 dev_err(&ctrl_info->pci_dev->dev,
7215 "error initializing PQI mode\n");
7216 return rc;
7217 }
7218
7219 /* Wait for the controller to complete the SIS -> PQI transition. */
7220 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7221 if (rc) {
7222 dev_err(&ctrl_info->pci_dev->dev,
7223 "transition to PQI mode failed\n");
7224 return rc;
7225 }
7226
7227 /* From here on, we are running in PQI mode. */
7228 ctrl_info->pqi_mode_enabled = true;
ff6abb73 7229 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761
KB
7230
7231 rc = pqi_alloc_admin_queues(ctrl_info);
7232 if (rc) {
7233 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 7234 "failed to allocate admin queues\n");
6c223761
KB
7235 return rc;
7236 }
7237
7238 rc = pqi_create_admin_queues(ctrl_info);
7239 if (rc) {
7240 dev_err(&ctrl_info->pci_dev->dev,
7241 "error creating admin queues\n");
7242 return rc;
7243 }
7244
7245 rc = pqi_report_device_capability(ctrl_info);
7246 if (rc) {
7247 dev_err(&ctrl_info->pci_dev->dev,
7248 "obtaining device capability failed\n");
7249 return rc;
7250 }
7251
7252 rc = pqi_validate_device_capability(ctrl_info);
7253 if (rc)
7254 return rc;
7255
7256 pqi_calculate_queue_resources(ctrl_info);
7257
7258 rc = pqi_enable_msix_interrupts(ctrl_info);
7259 if (rc)
7260 return rc;
7261
7262 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7263 ctrl_info->max_msix_vectors =
7264 ctrl_info->num_msix_vectors_enabled;
7265 pqi_calculate_queue_resources(ctrl_info);
7266 }
7267
7268 rc = pqi_alloc_io_resources(ctrl_info);
7269 if (rc)
7270 return rc;
7271
7272 rc = pqi_alloc_operational_queues(ctrl_info);
d87d5474
KB
7273 if (rc) {
7274 dev_err(&ctrl_info->pci_dev->dev,
7275 "failed to allocate operational queues\n");
6c223761 7276 return rc;
d87d5474 7277 }
6c223761
KB
7278
7279 pqi_init_operational_queues(ctrl_info);
7280
7281 rc = pqi_request_irqs(ctrl_info);
7282 if (rc)
7283 return rc;
7284
6c223761
KB
7285 rc = pqi_create_queues(ctrl_info);
7286 if (rc)
7287 return rc;
7288
061ef06a
KB
7289 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7290
7291 ctrl_info->controller_online = true;
b212c251
KB
7292
7293 rc = pqi_process_config_table(ctrl_info);
7294 if (rc)
7295 return rc;
7296
061ef06a 7297 pqi_start_heartbeat_timer(ctrl_info);
6c223761 7298
6a50d6ad 7299 rc = pqi_enable_events(ctrl_info);
6c223761
KB
7300 if (rc) {
7301 dev_err(&ctrl_info->pci_dev->dev,
6a50d6ad 7302 "error enabling events\n");
6c223761
KB
7303 return rc;
7304 }
7305
6c223761
KB
7306 /* Register with the SCSI subsystem. */
7307 rc = pqi_register_scsi(ctrl_info);
7308 if (rc)
7309 return rc;
7310
6d90615f
MB
7311 rc = pqi_get_ctrl_product_details(ctrl_info);
7312 if (rc) {
7313 dev_err(&ctrl_info->pci_dev->dev,
7314 "error obtaining product details\n");
7315 return rc;
7316 }
7317
7318 rc = pqi_get_ctrl_serial_number(ctrl_info);
6c223761
KB
7319 if (rc) {
7320 dev_err(&ctrl_info->pci_dev->dev,
6d90615f 7321 "error obtaining ctrl serial number\n");
6c223761
KB
7322 return rc;
7323 }
7324
171c2865
DC
7325 rc = pqi_set_diag_rescan(ctrl_info);
7326 if (rc) {
7327 dev_err(&ctrl_info->pci_dev->dev,
7328 "error enabling multi-lun rescan\n");
7329 return rc;
7330 }
7331
6c223761
KB
7332 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7333 if (rc) {
7334 dev_err(&ctrl_info->pci_dev->dev,
7335 "error updating host wellness\n");
7336 return rc;
7337 }
7338
7339 pqi_schedule_update_time_worker(ctrl_info);
7340
7341 pqi_scan_scsi_devices(ctrl_info);
7342
7343 return 0;
7344}
7345
061ef06a
KB
7346static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
7347{
7348 unsigned int i;
7349 struct pqi_admin_queues *admin_queues;
7350 struct pqi_event_queue *event_queue;
7351
7352 admin_queues = &ctrl_info->admin_queues;
7353 admin_queues->iq_pi_copy = 0;
7354 admin_queues->oq_ci_copy = 0;
dac12fbc 7355 writel(0, admin_queues->oq_pi);
061ef06a
KB
7356
7357 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
7358 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
7359 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
7360 ctrl_info->queue_groups[i].oq_ci_copy = 0;
7361
dac12fbc
KB
7362 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
7363 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
7364 writel(0, ctrl_info->queue_groups[i].oq_pi);
061ef06a
KB
7365 }
7366
7367 event_queue = &ctrl_info->event_queue;
dac12fbc 7368 writel(0, event_queue->oq_pi);
061ef06a
KB
7369 event_queue->oq_ci_copy = 0;
7370}
7371
7372static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
7373{
7374 int rc;
7375
7376 rc = pqi_force_sis_mode(ctrl_info);
7377 if (rc)
7378 return rc;
7379
7380 /*
7381 * Wait until the controller is ready to start accepting SIS
7382 * commands.
7383 */
7384 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
7385 if (rc)
7386 return rc;
7387
4fd22c13
MR
7388 /*
7389 * Get the controller properties. This allows us to determine
7390 * whether or not it supports PQI mode.
7391 */
7392 rc = sis_get_ctrl_properties(ctrl_info);
7393 if (rc) {
7394 dev_err(&ctrl_info->pci_dev->dev,
7395 "error obtaining controller properties\n");
7396 return rc;
7397 }
7398
7399 rc = sis_get_pqi_capabilities(ctrl_info);
7400 if (rc) {
7401 dev_err(&ctrl_info->pci_dev->dev,
7402 "error obtaining controller capabilities\n");
7403 return rc;
7404 }
7405
061ef06a
KB
7406 /*
7407 * If the function we are about to call succeeds, the
7408 * controller will transition from legacy SIS mode
7409 * into PQI mode.
7410 */
7411 rc = sis_init_base_struct_addr(ctrl_info);
7412 if (rc) {
7413 dev_err(&ctrl_info->pci_dev->dev,
7414 "error initializing PQI mode\n");
7415 return rc;
7416 }
7417
7418 /* Wait for the controller to complete the SIS -> PQI transition. */
7419 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7420 if (rc) {
7421 dev_err(&ctrl_info->pci_dev->dev,
7422 "transition to PQI mode failed\n");
7423 return rc;
7424 }
7425
7426 /* From here on, we are running in PQI mode. */
7427 ctrl_info->pqi_mode_enabled = true;
7428 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7429
7430 pqi_reinit_queues(ctrl_info);
7431
7432 rc = pqi_create_admin_queues(ctrl_info);
7433 if (rc) {
7434 dev_err(&ctrl_info->pci_dev->dev,
7435 "error creating admin queues\n");
7436 return rc;
7437 }
7438
7439 rc = pqi_create_queues(ctrl_info);
7440 if (rc)
7441 return rc;
7442
7443 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
7444
7445 ctrl_info->controller_online = true;
061ef06a
KB
7446 pqi_ctrl_unblock_requests(ctrl_info);
7447
4fd22c13
MR
7448 rc = pqi_process_config_table(ctrl_info);
7449 if (rc)
7450 return rc;
7451
7452 pqi_start_heartbeat_timer(ctrl_info);
7453
061ef06a
KB
7454 rc = pqi_enable_events(ctrl_info);
7455 if (rc) {
7456 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 7457 "error enabling events\n");
061ef06a
KB
7458 return rc;
7459 }
7460
6d90615f 7461 rc = pqi_get_ctrl_product_details(ctrl_info);
4fd22c13
MR
7462 if (rc) {
7463 dev_err(&ctrl_info->pci_dev->dev,
694c5d5b 7464 "error obtaining product details\n");
4fd22c13
MR
7465 return rc;
7466 }
7467
171c2865
DC
7468 rc = pqi_set_diag_rescan(ctrl_info);
7469 if (rc) {
7470 dev_err(&ctrl_info->pci_dev->dev,
7471 "error enabling multi-lun rescan\n");
7472 return rc;
7473 }
7474
061ef06a
KB
7475 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
7476 if (rc) {
7477 dev_err(&ctrl_info->pci_dev->dev,
7478 "error updating host wellness\n");
7479 return rc;
7480 }
7481
7482 pqi_schedule_update_time_worker(ctrl_info);
7483
7484 pqi_scan_scsi_devices(ctrl_info);
7485
7486 return 0;
7487}
7488
a81ed5f3
KB
7489static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
7490 u16 timeout)
7491{
d20df83b
BOS
7492 int rc;
7493
7494 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
a81ed5f3 7495 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
d20df83b
BOS
7496
7497 return pcibios_err_to_errno(rc);
a81ed5f3
KB
7498}
7499
6c223761
KB
7500static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
7501{
7502 int rc;
7503 u64 mask;
7504
7505 rc = pci_enable_device(ctrl_info->pci_dev);
7506 if (rc) {
7507 dev_err(&ctrl_info->pci_dev->dev,
7508 "failed to enable PCI device\n");
7509 return rc;
7510 }
7511
7512 if (sizeof(dma_addr_t) > 4)
7513 mask = DMA_BIT_MASK(64);
7514 else
7515 mask = DMA_BIT_MASK(32);
7516
1d94f06e 7517 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
6c223761
KB
7518 if (rc) {
7519 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
7520 goto disable_device;
7521 }
7522
7523 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
7524 if (rc) {
7525 dev_err(&ctrl_info->pci_dev->dev,
7526 "failed to obtain PCI resources\n");
7527 goto disable_device;
7528 }
7529
4bdc0d67 7530 ctrl_info->iomem_base = ioremap(pci_resource_start(
6c223761
KB
7531 ctrl_info->pci_dev, 0),
7532 sizeof(struct pqi_ctrl_registers));
7533 if (!ctrl_info->iomem_base) {
7534 dev_err(&ctrl_info->pci_dev->dev,
7535 "failed to map memory for controller registers\n");
7536 rc = -ENOMEM;
7537 goto release_regions;
7538 }
7539
a81ed5f3
KB
7540#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
7541
7542 /* Increase the PCIe completion timeout. */
7543 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
7544 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
7545 if (rc) {
7546 dev_err(&ctrl_info->pci_dev->dev,
7547 "failed to set PCIe completion timeout\n");
7548 goto release_regions;
7549 }
7550
6c223761
KB
7551 /* Enable bus mastering. */
7552 pci_set_master(ctrl_info->pci_dev);
7553
cbe0c7b1
KB
7554 ctrl_info->registers = ctrl_info->iomem_base;
7555 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
7556
6c223761
KB
7557 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
7558
7559 return 0;
7560
7561release_regions:
7562 pci_release_regions(ctrl_info->pci_dev);
7563disable_device:
7564 pci_disable_device(ctrl_info->pci_dev);
7565
7566 return rc;
7567}
7568
7569static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
7570{
7571 iounmap(ctrl_info->iomem_base);
7572 pci_release_regions(ctrl_info->pci_dev);
cbe0c7b1
KB
7573 if (pci_is_enabled(ctrl_info->pci_dev))
7574 pci_disable_device(ctrl_info->pci_dev);
6c223761
KB
7575 pci_set_drvdata(ctrl_info->pci_dev, NULL);
7576}
7577
7578static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
7579{
7580 struct pqi_ctrl_info *ctrl_info;
7581
7582 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
7583 GFP_KERNEL, numa_node);
7584 if (!ctrl_info)
7585 return NULL;
7586
7587 mutex_init(&ctrl_info->scan_mutex);
7561a7e4 7588 mutex_init(&ctrl_info->lun_reset_mutex);
4fd22c13 7589 mutex_init(&ctrl_info->ofa_mutex);
6c223761
KB
7590
7591 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
7592 spin_lock_init(&ctrl_info->scsi_device_list_lock);
7593
7594 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
7595 atomic_set(&ctrl_info->num_interrupts, 0);
0530736e 7596 atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
6c223761
KB
7597
7598 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
7599 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
7600
74a0f573 7601 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
5f310425 7602 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
98f87667 7603
6c223761
KB
7604 sema_init(&ctrl_info->sync_request_sem,
7605 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7561a7e4 7606 init_waitqueue_head(&ctrl_info->block_requests_wait);
6c223761 7607
376fb880
KB
7608 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
7609 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
7610 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
7611 pqi_raid_bypass_retry_worker);
7612
6c223761 7613 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
061ef06a 7614 ctrl_info->irq_mode = IRQ_MODE_NONE;
6c223761
KB
7615 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
7616
7617 return ctrl_info;
7618}
7619
7620static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
7621{
7622 kfree(ctrl_info);
7623}
7624
7625static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
7626{
98bf061b
KB
7627 pqi_free_irqs(ctrl_info);
7628 pqi_disable_msix_interrupts(ctrl_info);
6c223761
KB
7629}
7630
7631static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
7632{
7633 pqi_stop_heartbeat_timer(ctrl_info);
7634 pqi_free_interrupts(ctrl_info);
7635 if (ctrl_info->queue_memory_base)
7636 dma_free_coherent(&ctrl_info->pci_dev->dev,
7637 ctrl_info->queue_memory_length,
7638 ctrl_info->queue_memory_base,
7639 ctrl_info->queue_memory_base_dma_handle);
7640 if (ctrl_info->admin_queue_memory_base)
7641 dma_free_coherent(&ctrl_info->pci_dev->dev,
7642 ctrl_info->admin_queue_memory_length,
7643 ctrl_info->admin_queue_memory_base,
7644 ctrl_info->admin_queue_memory_base_dma_handle);
7645 pqi_free_all_io_requests(ctrl_info);
7646 if (ctrl_info->error_buffer)
7647 dma_free_coherent(&ctrl_info->pci_dev->dev,
7648 ctrl_info->error_buffer_length,
7649 ctrl_info->error_buffer,
7650 ctrl_info->error_buffer_dma_handle);
7651 if (ctrl_info->iomem_base)
7652 pqi_cleanup_pci_init(ctrl_info);
7653 pqi_free_ctrl_info(ctrl_info);
7654}
7655
7656static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
7657{
061ef06a
KB
7658 pqi_cancel_rescan_worker(ctrl_info);
7659 pqi_cancel_update_time_worker(ctrl_info);
e57a1f9b 7660 pqi_unregister_scsi(ctrl_info);
162d7753
KB
7661 if (ctrl_info->pqi_mode_enabled)
7662 pqi_revert_to_sis_mode(ctrl_info);
6c223761
KB
7663 pqi_free_ctrl_resources(ctrl_info);
7664}
7665
4fd22c13
MR
7666static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
7667{
7668 pqi_cancel_update_time_worker(ctrl_info);
7669 pqi_cancel_rescan_worker(ctrl_info);
7670 pqi_wait_until_lun_reset_finished(ctrl_info);
7671 pqi_wait_until_scan_finished(ctrl_info);
7672 pqi_ctrl_ofa_start(ctrl_info);
7673 pqi_ctrl_block_requests(ctrl_info);
7674 pqi_ctrl_wait_until_quiesced(ctrl_info);
7675 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS);
7676 pqi_fail_io_queued_for_all_devices(ctrl_info);
7677 pqi_wait_until_inbound_queues_empty(ctrl_info);
7678 pqi_stop_heartbeat_timer(ctrl_info);
7679 ctrl_info->pqi_mode_enabled = false;
7680 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7681}
7682
7683static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
7684{
7685 pqi_ofa_free_host_buffer(ctrl_info);
7686 ctrl_info->pqi_mode_enabled = true;
7687 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
7688 ctrl_info->controller_online = true;
7689 pqi_ctrl_unblock_requests(ctrl_info);
7690 pqi_start_heartbeat_timer(ctrl_info);
7691 pqi_schedule_update_time_worker(ctrl_info);
7692 pqi_clear_soft_reset_status(ctrl_info,
7693 PQI_SOFT_RESET_ABORT);
7694 pqi_scan_scsi_devices(ctrl_info);
7695}
7696
7697static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7698 u32 total_size, u32 chunk_size)
7699{
7700 u32 sg_count;
7701 u32 size;
7702 int i;
7703 struct pqi_sg_descriptor *mem_descriptor = NULL;
7704 struct device *dev;
7705 struct pqi_ofa_memory *ofap;
7706
7707 dev = &ctrl_info->pci_dev->dev;
7708
7709 sg_count = (total_size + chunk_size - 1);
e52c9e07 7710 sg_count /= chunk_size;
4fd22c13
MR
7711
7712 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7713
7714 if (sg_count*chunk_size < total_size)
7715 goto out;
7716
7717 ctrl_info->pqi_ofa_chunk_virt_addr =
7718 kcalloc(sg_count, sizeof(void *), GFP_KERNEL);
7719 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
7720 goto out;
7721
7722 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) {
7723 dma_addr_t dma_handle;
7724
7725 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
750afb08
LC
7726 dma_alloc_coherent(dev, chunk_size, &dma_handle,
7727 GFP_KERNEL);
4fd22c13
MR
7728
7729 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7730 break;
7731
7732 mem_descriptor = &ofap->sg_descriptor[i];
7733 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address);
7734 put_unaligned_le32 (chunk_size, &mem_descriptor->length);
7735 }
7736
7737 if (!size || size < total_size)
7738 goto out_free_chunks;
7739
7740 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
7741 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
7742 put_unaligned_le32(size, &ofap->bytes_allocated);
7743
7744 return 0;
7745
7746out_free_chunks:
7747 while (--i >= 0) {
7748 mem_descriptor = &ofap->sg_descriptor[i];
7749 dma_free_coherent(dev, chunk_size,
7750 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7751 get_unaligned_le64(&mem_descriptor->address));
7752 }
7753 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7754
7755out:
7756 put_unaligned_le32 (0, &ofap->bytes_allocated);
7757 return -ENOMEM;
7758}
7759
7760static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
7761{
7762 u32 total_size;
7763 u32 min_chunk_size;
7764 u32 chunk_sz;
7765
7766 total_size = le32_to_cpu(
7767 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated);
7768 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS;
7769
7770 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2)
7771 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz))
7772 return 0;
7773
7774 return -ENOMEM;
7775}
7776
7777static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7778 u32 bytes_requested)
7779{
7780 struct pqi_ofa_memory *pqi_ofa_memory;
7781 struct device *dev;
7782
7783 dev = &ctrl_info->pci_dev->dev;
750afb08
LC
7784 pqi_ofa_memory = dma_alloc_coherent(dev,
7785 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7786 &ctrl_info->pqi_ofa_mem_dma_handle,
7787 GFP_KERNEL);
4fd22c13
MR
7788
7789 if (!pqi_ofa_memory)
7790 return;
7791
7792 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version);
7793 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE,
7794 sizeof(pqi_ofa_memory->signature));
7795 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested);
7796
7797 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory;
7798
7799 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
7800 dev_err(dev, "Failed to allocate host buffer of size = %u",
7801 bytes_requested);
7802 }
694c5d5b
KB
7803
7804 return;
4fd22c13
MR
7805}
7806
7807static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
7808{
7809 int i;
7810 struct pqi_sg_descriptor *mem_descriptor;
7811 struct pqi_ofa_memory *ofap;
7812
7813 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7814
7815 if (!ofap)
7816 return;
7817
7818 if (!ofap->bytes_allocated)
7819 goto out;
7820
7821 mem_descriptor = ofap->sg_descriptor;
7822
7823 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors);
7824 i++) {
7825 dma_free_coherent(&ctrl_info->pci_dev->dev,
7826 get_unaligned_le32(&mem_descriptor[i].length),
7827 ctrl_info->pqi_ofa_chunk_virt_addr[i],
7828 get_unaligned_le64(&mem_descriptor[i].address));
7829 }
7830 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
7831
7832out:
7833 dma_free_coherent(&ctrl_info->pci_dev->dev,
7834 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap,
7835 ctrl_info->pqi_ofa_mem_dma_handle);
7836 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
7837}
7838
7839static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
7840{
7841 struct pqi_vendor_general_request request;
7842 size_t size;
7843 struct pqi_ofa_memory *ofap;
7844
7845 memset(&request, 0, sizeof(request));
7846
7847 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
7848
7849 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7850 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7851 &request.header.iu_length);
7852 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
7853 &request.function_code);
7854
7855 if (ofap) {
7856 size = offsetof(struct pqi_ofa_memory, sg_descriptor) +
7857 get_unaligned_le16(&ofap->num_memory_descriptors) *
7858 sizeof(struct pqi_sg_descriptor);
7859
7860 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
7861 &request.data.ofa_memory_allocation.buffer_address);
7862 put_unaligned_le32(size,
7863 &request.data.ofa_memory_allocation.buffer_length);
7864
7865 }
7866
7867 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
7868 0, NULL, NO_TIMEOUT);
7869}
7870
4fd22c13
MR
7871static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
7872{
7873 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
7874 return pqi_ctrl_init_resume(ctrl_info);
7875}
7876
3c50976f
KB
7877static void pqi_perform_lockup_action(void)
7878{
7879 switch (pqi_lockup_action) {
7880 case PANIC:
7881 panic("FATAL: Smart Family Controller lockup detected");
7882 break;
7883 case REBOOT:
7884 emergency_restart();
7885 break;
7886 case NONE:
7887 default:
7888 break;
7889 }
7890}
7891
5f310425
KB
7892static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
7893 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
7894 .status = SAM_STAT_CHECK_CONDITION,
7895};
7896
7897static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
376fb880
KB
7898{
7899 unsigned int i;
376fb880 7900 struct pqi_io_request *io_request;
376fb880
KB
7901 struct scsi_cmnd *scmd;
7902
5f310425
KB
7903 for (i = 0; i < ctrl_info->max_io_slots; i++) {
7904 io_request = &ctrl_info->io_request_pool[i];
7905 if (atomic_read(&io_request->refcount) == 0)
7906 continue;
376fb880 7907
5f310425
KB
7908 scmd = io_request->scmd;
7909 if (scmd) {
7910 set_host_byte(scmd, DID_NO_CONNECT);
7911 } else {
7912 io_request->status = -ENXIO;
7913 io_request->error_info =
7914 &pqi_ctrl_offline_raid_error_info;
376fb880 7915 }
5f310425
KB
7916
7917 io_request->io_complete_callback(io_request,
7918 io_request->context);
376fb880
KB
7919 }
7920}
7921
5f310425 7922static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
376fb880 7923{
5f310425
KB
7924 pqi_perform_lockup_action();
7925 pqi_stop_heartbeat_timer(ctrl_info);
7926 pqi_free_interrupts(ctrl_info);
7927 pqi_cancel_rescan_worker(ctrl_info);
7928 pqi_cancel_update_time_worker(ctrl_info);
7929 pqi_ctrl_wait_until_quiesced(ctrl_info);
7930 pqi_fail_all_outstanding_requests(ctrl_info);
7931 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
7932 pqi_ctrl_unblock_requests(ctrl_info);
7933}
7934
7935static void pqi_ctrl_offline_worker(struct work_struct *work)
7936{
7937 struct pqi_ctrl_info *ctrl_info;
7938
7939 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
7940 pqi_take_ctrl_offline_deferred(ctrl_info);
376fb880
KB
7941}
7942
7943static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
7944{
5f310425
KB
7945 if (!ctrl_info->controller_online)
7946 return;
7947
376fb880 7948 ctrl_info->controller_online = false;
5f310425
KB
7949 ctrl_info->pqi_mode_enabled = false;
7950 pqi_ctrl_block_requests(ctrl_info);
5a259e32
KB
7951 if (!pqi_disable_ctrl_shutdown)
7952 sis_shutdown_ctrl(ctrl_info);
376fb880
KB
7953 pci_disable_device(ctrl_info->pci_dev);
7954 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
5f310425 7955 schedule_work(&ctrl_info->ctrl_offline_work);
376fb880
KB
7956}
7957
d91d7820 7958static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6c223761
KB
7959 const struct pci_device_id *id)
7960{
7961 char *ctrl_description;
7962
37b36847 7963 if (id->driver_data)
6c223761 7964 ctrl_description = (char *)id->driver_data;
37b36847
KB
7965 else
7966 ctrl_description = "Microsemi Smart Family Controller";
6c223761 7967
d91d7820 7968 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6c223761
KB
7969}
7970
d91d7820
KB
7971static int pqi_pci_probe(struct pci_dev *pci_dev,
7972 const struct pci_device_id *id)
6c223761
KB
7973{
7974 int rc;
62dc51fb 7975 int node, cp_node;
6c223761
KB
7976 struct pqi_ctrl_info *ctrl_info;
7977
d91d7820 7978 pqi_print_ctrl_info(pci_dev, id);
6c223761
KB
7979
7980 if (pqi_disable_device_id_wildcards &&
7981 id->subvendor == PCI_ANY_ID &&
7982 id->subdevice == PCI_ANY_ID) {
d91d7820 7983 dev_warn(&pci_dev->dev,
6c223761
KB
7984 "controller not probed because device ID wildcards are disabled\n");
7985 return -ENODEV;
7986 }
7987
7988 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
d91d7820 7989 dev_warn(&pci_dev->dev,
6c223761
KB
7990 "controller device ID matched using wildcards\n");
7991
d91d7820 7992 node = dev_to_node(&pci_dev->dev);
62dc51fb
SB
7993 if (node == NUMA_NO_NODE) {
7994 cp_node = cpu_to_node(0);
7995 if (cp_node == NUMA_NO_NODE)
7996 cp_node = 0;
7997 set_dev_node(&pci_dev->dev, cp_node);
7998 }
6c223761
KB
7999
8000 ctrl_info = pqi_alloc_ctrl_info(node);
8001 if (!ctrl_info) {
d91d7820 8002 dev_err(&pci_dev->dev,
6c223761
KB
8003 "failed to allocate controller info block\n");
8004 return -ENOMEM;
8005 }
8006
d91d7820 8007 ctrl_info->pci_dev = pci_dev;
6c223761
KB
8008
8009 rc = pqi_pci_init(ctrl_info);
8010 if (rc)
8011 goto error;
8012
8013 rc = pqi_ctrl_init(ctrl_info);
8014 if (rc)
8015 goto error;
8016
8017 return 0;
8018
8019error:
8020 pqi_remove_ctrl(ctrl_info);
8021
8022 return rc;
8023}
8024
d91d7820 8025static void pqi_pci_remove(struct pci_dev *pci_dev)
6c223761
KB
8026{
8027 struct pqi_ctrl_info *ctrl_info;
8028
d91d7820 8029 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
8030 if (!ctrl_info)
8031 return;
8032
8033 pqi_remove_ctrl(ctrl_info);
8034}
8035
0530736e
KB
8036static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8037{
8038 unsigned int i;
8039 struct pqi_io_request *io_request;
8040 struct scsi_cmnd *scmd;
8041
8042 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8043 io_request = &ctrl_info->io_request_pool[i];
8044 if (atomic_read(&io_request->refcount) == 0)
8045 continue;
8046 scmd = io_request->scmd;
8047 WARN_ON(scmd != NULL); /* IO command from SML */
8048 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8049 }
8050}
8051
d91d7820 8052static void pqi_shutdown(struct pci_dev *pci_dev)
6c223761
KB
8053{
8054 int rc;
8055 struct pqi_ctrl_info *ctrl_info;
8056
d91d7820 8057 ctrl_info = pci_get_drvdata(pci_dev);
0530736e
KB
8058 if (!ctrl_info) {
8059 dev_err(&pci_dev->dev,
8060 "cache could not be flushed\n");
8061 return;
8062 }
8063
8064 pqi_disable_events(ctrl_info);
8065 pqi_wait_until_ofa_finished(ctrl_info);
8066 pqi_cancel_update_time_worker(ctrl_info);
8067 pqi_cancel_rescan_worker(ctrl_info);
8068 pqi_cancel_event_worker(ctrl_info);
8069
8070 pqi_ctrl_shutdown_start(ctrl_info);
8071 pqi_ctrl_wait_until_quiesced(ctrl_info);
8072
8073 rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
8074 if (rc) {
8075 dev_err(&pci_dev->dev,
8076 "wait for pending I/O failed\n");
8077 return;
8078 }
8079
8080 pqi_ctrl_block_device_reset(ctrl_info);
8081 pqi_wait_until_lun_reset_finished(ctrl_info);
6c223761
KB
8082
8083 /*
8084 * Write all data in the controller's battery-backed cache to
8085 * storage.
8086 */
58322fe0 8087 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
0530736e
KB
8088 if (rc)
8089 dev_err(&pci_dev->dev,
8090 "unable to flush controller cache\n");
8091
8092 pqi_ctrl_block_requests(ctrl_info);
8093
8094 rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
8095 if (rc) {
8096 dev_err(&pci_dev->dev,
8097 "wait for pending sync cmds failed\n");
6c223761 8098 return;
0530736e
KB
8099 }
8100
8101 pqi_crash_if_pending_command(ctrl_info);
8102 pqi_reset(ctrl_info);
6c223761
KB
8103}
8104
3c50976f
KB
8105static void pqi_process_lockup_action_param(void)
8106{
8107 unsigned int i;
8108
8109 if (!pqi_lockup_action_param)
8110 return;
8111
8112 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8113 if (strcmp(pqi_lockup_action_param,
8114 pqi_lockup_actions[i].name) == 0) {
8115 pqi_lockup_action = pqi_lockup_actions[i].action;
8116 return;
8117 }
8118 }
8119
8120 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8121 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8122}
8123
8124static void pqi_process_module_params(void)
8125{
8126 pqi_process_lockup_action_param();
8127}
8128
5c146686 8129static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
061ef06a
KB
8130{
8131 struct pqi_ctrl_info *ctrl_info;
8132
8133 ctrl_info = pci_get_drvdata(pci_dev);
8134
8135 pqi_disable_events(ctrl_info);
8136 pqi_cancel_update_time_worker(ctrl_info);
8137 pqi_cancel_rescan_worker(ctrl_info);
8138 pqi_wait_until_scan_finished(ctrl_info);
8139 pqi_wait_until_lun_reset_finished(ctrl_info);
4fd22c13 8140 pqi_wait_until_ofa_finished(ctrl_info);
58322fe0 8141 pqi_flush_cache(ctrl_info, SUSPEND);
061ef06a
KB
8142 pqi_ctrl_block_requests(ctrl_info);
8143 pqi_ctrl_wait_until_quiesced(ctrl_info);
8144 pqi_wait_until_inbound_queues_empty(ctrl_info);
4fd22c13 8145 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
061ef06a
KB
8146 pqi_stop_heartbeat_timer(ctrl_info);
8147
8148 if (state.event == PM_EVENT_FREEZE)
8149 return 0;
8150
8151 pci_save_state(pci_dev);
8152 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8153
8154 ctrl_info->controller_online = false;
8155 ctrl_info->pqi_mode_enabled = false;
8156
8157 return 0;
8158}
8159
5c146686 8160static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
061ef06a
KB
8161{
8162 int rc;
8163 struct pqi_ctrl_info *ctrl_info;
8164
8165 ctrl_info = pci_get_drvdata(pci_dev);
8166
8167 if (pci_dev->current_state != PCI_D0) {
8168 ctrl_info->max_hw_queue_index = 0;
8169 pqi_free_interrupts(ctrl_info);
8170 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8171 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8172 IRQF_SHARED, DRIVER_NAME_SHORT,
8173 &ctrl_info->queue_groups[0]);
8174 if (rc) {
8175 dev_err(&ctrl_info->pci_dev->dev,
8176 "irq %u init failed with error %d\n",
8177 pci_dev->irq, rc);
8178 return rc;
8179 }
8180 pqi_start_heartbeat_timer(ctrl_info);
8181 pqi_ctrl_unblock_requests(ctrl_info);
8182 return 0;
8183 }
8184
8185 pci_set_power_state(pci_dev, PCI_D0);
8186 pci_restore_state(pci_dev);
8187
8188 return pqi_ctrl_init_resume(ctrl_info);
8189}
8190
6c223761
KB
8191/* Define the PCI IDs for the controllers that we support. */
8192static const struct pci_device_id pqi_pci_id_table[] = {
b0f9408b
KB
8193 {
8194 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8195 0x105b, 0x1211)
8196 },
8197 {
8198 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8199 0x105b, 0x1321)
8200 },
7eddabff
KB
8201 {
8202 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8203 0x152d, 0x8a22)
8204 },
8205 {
8206 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8207 0x152d, 0x8a23)
8208 },
8209 {
8210 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8211 0x152d, 0x8a24)
8212 },
8213 {
8214 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8215 0x152d, 0x8a36)
8216 },
8217 {
8218 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8219 0x152d, 0x8a37)
8220 },
0595a0b4
AK
8221 {
8222 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8223 0x193d, 0x1104)
8224 },
8225 {
8226 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8227 0x193d, 0x1105)
8228 },
8229 {
8230 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8231 0x193d, 0x1106)
8232 },
8233 {
8234 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8235 0x193d, 0x1107)
8236 },
b0f9408b
KB
8237 {
8238 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8239 0x193d, 0x8460)
8240 },
8241 {
8242 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8243 0x193d, 0x8461)
8244 },
84a77fef
MB
8245 {
8246 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8247 0x193d, 0xc460)
8248 },
8249 {
8250 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8251 0x193d, 0xc461)
8252 },
b0f9408b
KB
8253 {
8254 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8255 0x193d, 0xf460)
8256 },
8257 {
8258 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8259 0x193d, 0xf461)
8260 },
8261 {
8262 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8263 0x1bd4, 0x0045)
8264 },
8265 {
8266 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8267 0x1bd4, 0x0046)
8268 },
8269 {
8270 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8271 0x1bd4, 0x0047)
8272 },
8273 {
8274 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8275 0x1bd4, 0x0048)
8276 },
9f8d05fa
KB
8277 {
8278 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8279 0x1bd4, 0x004a)
8280 },
8281 {
8282 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8283 0x1bd4, 0x004b)
8284 },
8285 {
8286 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8287 0x1bd4, 0x004c)
8288 },
63a7956a
GW
8289 {
8290 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8291 0x1bd4, 0x004f)
8292 },
c1b10475
AK
8293 {
8294 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8295 0x19e5, 0xd227)
8296 },
8297 {
8298 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8299 0x19e5, 0xd228)
8300 },
8301 {
8302 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8303 0x19e5, 0xd229)
8304 },
8305 {
8306 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8307 0x19e5, 0xd22a)
8308 },
8309 {
8310 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8311 0x19e5, 0xd22b)
8312 },
8313 {
8314 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8315 0x19e5, 0xd22c)
8316 },
6c223761
KB
8317 {
8318 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8319 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
8320 },
8321 {
8322 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
55790064 8323 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
6c223761
KB
8324 },
8325 {
8326 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8327 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6c223761
KB
8328 },
8329 {
8330 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8331 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6c223761
KB
8332 },
8333 {
8334 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8335 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6c223761
KB
8336 },
8337 {
8338 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8339 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6c223761
KB
8340 },
8341 {
8342 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8343 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6c223761
KB
8344 },
8345 {
8346 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8347 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6c223761
KB
8348 },
8349 {
8350 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8351 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6c223761 8352 },
55790064
KB
8353 {
8354 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8355 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
8356 },
63a7956a
GW
8357 {
8358 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8359 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
8360 },
8361 {
8362 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8363 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
8364 },
3af06083
MR
8365 {
8366 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8367 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
8368 },
6c223761
KB
8369 {
8370 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8371 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6c223761
KB
8372 },
8373 {
8374 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8375 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6c223761
KB
8376 },
8377 {
8378 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8379 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6c223761
KB
8380 },
8381 {
8382 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8383 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6c223761
KB
8384 },
8385 {
8386 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8387 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6c223761
KB
8388 },
8389 {
8390 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8391 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6c223761
KB
8392 },
8393 {
8394 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8395 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6c223761
KB
8396 },
8397 {
8398 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8399 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6c223761
KB
8400 },
8401 {
8402 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8403 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6c223761 8404 },
55790064
KB
8405 {
8406 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8407 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
8408 },
6c223761
KB
8409 {
8410 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8411 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6c223761
KB
8412 },
8413 {
8414 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8415 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6c223761
KB
8416 },
8417 {
8418 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8419 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6c223761
KB
8420 },
8421 {
8422 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8423 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6c223761
KB
8424 },
8425 {
8426 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8427 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6c223761 8428 },
b0f9408b
KB
8429 {
8430 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8431 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
8432 },
6c223761
KB
8433 {
8434 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8435 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6c223761
KB
8436 },
8437 {
8438 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 8439 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6c223761 8440 },
bd809e8d
KB
8441 {
8442 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8443 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
8444 },
8445 {
8446 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8447 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
8448 },
6c223761
KB
8449 {
8450 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff
KB
8451 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
8452 },
9f8d05fa
KB
8453 {
8454 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8455 PCI_VENDOR_ID_ADVANTECH, 0x8312)
8456 },
55790064
KB
8457 {
8458 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8459 PCI_VENDOR_ID_DELL, 0x1fe0)
8460 },
7eddabff
KB
8461 {
8462 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8463 PCI_VENDOR_ID_HP, 0x0600)
8464 },
8465 {
8466 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8467 PCI_VENDOR_ID_HP, 0x0601)
8468 },
8469 {
8470 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8471 PCI_VENDOR_ID_HP, 0x0602)
8472 },
8473 {
8474 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8475 PCI_VENDOR_ID_HP, 0x0603)
8476 },
8477 {
8478 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
55790064 8479 PCI_VENDOR_ID_HP, 0x0609)
7eddabff
KB
8480 },
8481 {
8482 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8483 PCI_VENDOR_ID_HP, 0x0650)
8484 },
8485 {
8486 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8487 PCI_VENDOR_ID_HP, 0x0651)
8488 },
8489 {
8490 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8491 PCI_VENDOR_ID_HP, 0x0652)
8492 },
8493 {
8494 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8495 PCI_VENDOR_ID_HP, 0x0653)
8496 },
8497 {
8498 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8499 PCI_VENDOR_ID_HP, 0x0654)
8500 },
8501 {
8502 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8503 PCI_VENDOR_ID_HP, 0x0655)
8504 },
7eddabff
KB
8505 {
8506 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8507 PCI_VENDOR_ID_HP, 0x0700)
8508 },
8509 {
8510 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8511 PCI_VENDOR_ID_HP, 0x0701)
6c223761
KB
8512 },
8513 {
8514 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8515 PCI_VENDOR_ID_HP, 0x1001)
8516 },
8517 {
8518 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8519 PCI_VENDOR_ID_HP, 0x1100)
8520 },
8521 {
8522 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8523 PCI_VENDOR_ID_HP, 0x1101)
8524 },
8bdb3b9c
GW
8525 {
8526 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8527 0x1d8d, 0x0800)
8528 },
8529 {
8530 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8531 0x1d8d, 0x0908)
8532 },
8533 {
8534 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8535 0x1d8d, 0x0806)
8536 },
8537 {
8538 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8539 0x1d8d, 0x0916)
8540 },
71ecc60d
GW
8541 {
8542 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8543 PCI_VENDOR_ID_GIGABYTE, 0x1000)
8544 },
6c223761
KB
8545 {
8546 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8547 PCI_ANY_ID, PCI_ANY_ID)
8548 },
8549 { 0 }
8550};
8551
8552MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
8553
8554static struct pci_driver pqi_pci_driver = {
8555 .name = DRIVER_NAME_SHORT,
8556 .id_table = pqi_pci_id_table,
8557 .probe = pqi_pci_probe,
8558 .remove = pqi_pci_remove,
8559 .shutdown = pqi_shutdown,
061ef06a
KB
8560#if defined(CONFIG_PM)
8561 .suspend = pqi_suspend,
8562 .resume = pqi_resume,
8563#endif
6c223761
KB
8564};
8565
8566static int __init pqi_init(void)
8567{
8568 int rc;
8569
8570 pr_info(DRIVER_NAME "\n");
8571
8b664fef 8572 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
6c223761
KB
8573 if (!pqi_sas_transport_template)
8574 return -ENODEV;
8575
3c50976f
KB
8576 pqi_process_module_params();
8577
6c223761
KB
8578 rc = pci_register_driver(&pqi_pci_driver);
8579 if (rc)
8580 sas_release_transport(pqi_sas_transport_template);
8581
8582 return rc;
8583}
8584
8585static void __exit pqi_cleanup(void)
8586{
8587 pci_unregister_driver(&pqi_pci_driver);
8588 sas_release_transport(pqi_sas_transport_template);
8589}
8590
8591module_init(pqi_init);
8592module_exit(pqi_cleanup);
8593
8594static void __attribute__((unused)) verify_structures(void)
8595{
8596 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8597 sis_host_to_ctrl_doorbell) != 0x20);
8598 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8599 sis_interrupt_mask) != 0x34);
8600 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8601 sis_ctrl_to_host_doorbell) != 0x9c);
8602 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8603 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
8604 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8605 sis_driver_scratch) != 0xb0);
6c223761
KB
8606 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8607 sis_firmware_status) != 0xbc);
8608 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8609 sis_mailbox) != 0x1000);
8610 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
8611 pqi_registers) != 0x4000);
8612
8613 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8614 iu_type) != 0x0);
8615 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8616 iu_length) != 0x2);
8617 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8618 response_queue_id) != 0x4);
8619 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
8620 work_area) != 0x6);
8621 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
8622
8623 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8624 status) != 0x0);
8625 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8626 service_response) != 0x1);
8627 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8628 data_present) != 0x2);
8629 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8630 reserved) != 0x3);
8631 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8632 residual_count) != 0x4);
8633 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8634 data_length) != 0x8);
8635 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8636 reserved1) != 0xa);
8637 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
8638 data) != 0xc);
8639 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
8640
8641 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8642 data_in_result) != 0x0);
8643 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8644 data_out_result) != 0x1);
8645 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8646 reserved) != 0x2);
8647 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8648 status) != 0x5);
8649 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8650 status_qualifier) != 0x6);
8651 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8652 sense_data_length) != 0x8);
8653 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8654 response_data_length) != 0xa);
8655 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8656 data_in_transferred) != 0xc);
8657 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8658 data_out_transferred) != 0x10);
8659 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
8660 data) != 0x14);
8661 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
8662
8663 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8664 signature) != 0x0);
8665 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8666 function_and_status_code) != 0x8);
8667 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8668 max_admin_iq_elements) != 0x10);
8669 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8670 max_admin_oq_elements) != 0x11);
8671 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8672 admin_iq_element_length) != 0x12);
8673 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8674 admin_oq_element_length) != 0x13);
8675 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8676 max_reset_timeout) != 0x14);
8677 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8678 legacy_intx_status) != 0x18);
8679 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8680 legacy_intx_mask_set) != 0x1c);
8681 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8682 legacy_intx_mask_clear) != 0x20);
8683 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8684 device_status) != 0x40);
8685 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8686 admin_iq_pi_offset) != 0x48);
8687 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8688 admin_oq_ci_offset) != 0x50);
8689 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8690 admin_iq_element_array_addr) != 0x58);
8691 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8692 admin_oq_element_array_addr) != 0x60);
8693 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8694 admin_iq_ci_addr) != 0x68);
8695 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8696 admin_oq_pi_addr) != 0x70);
8697 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8698 admin_iq_num_elements) != 0x78);
8699 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8700 admin_oq_num_elements) != 0x79);
8701 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8702 admin_queue_int_msg_num) != 0x7a);
8703 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8704 device_error) != 0x80);
8705 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8706 error_details) != 0x88);
8707 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8708 device_reset) != 0x90);
8709 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
8710 power_action) != 0x94);
8711 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
8712
8713 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8714 header.iu_type) != 0);
8715 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8716 header.iu_length) != 2);
8717 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8718 header.work_area) != 6);
8719 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8720 request_id) != 8);
8721 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8722 function_code) != 10);
8723 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8724 data.report_device_capability.buffer_length) != 44);
8725 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8726 data.report_device_capability.sg_descriptor) != 48);
8727 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8728 data.create_operational_iq.queue_id) != 12);
8729 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8730 data.create_operational_iq.element_array_addr) != 16);
8731 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8732 data.create_operational_iq.ci_addr) != 24);
8733 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8734 data.create_operational_iq.num_elements) != 32);
8735 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8736 data.create_operational_iq.element_length) != 34);
8737 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8738 data.create_operational_iq.queue_protocol) != 36);
8739 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8740 data.create_operational_oq.queue_id) != 12);
8741 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8742 data.create_operational_oq.element_array_addr) != 16);
8743 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8744 data.create_operational_oq.pi_addr) != 24);
8745 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8746 data.create_operational_oq.num_elements) != 32);
8747 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8748 data.create_operational_oq.element_length) != 34);
8749 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8750 data.create_operational_oq.queue_protocol) != 36);
8751 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8752 data.create_operational_oq.int_msg_num) != 40);
8753 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8754 data.create_operational_oq.coalescing_count) != 42);
8755 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8756 data.create_operational_oq.min_coalescing_time) != 44);
8757 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8758 data.create_operational_oq.max_coalescing_time) != 48);
8759 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
8760 data.delete_operational_queue.queue_id) != 12);
8761 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
c593642c 8762 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761 8763 data.create_operational_iq) != 64 - 11);
c593642c 8764 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761 8765 data.create_operational_oq) != 64 - 11);
c593642c 8766 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761
KB
8767 data.delete_operational_queue) != 64 - 11);
8768
8769 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8770 header.iu_type) != 0);
8771 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8772 header.iu_length) != 2);
8773 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8774 header.work_area) != 6);
8775 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8776 request_id) != 8);
8777 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8778 function_code) != 10);
8779 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8780 status) != 11);
8781 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8782 data.create_operational_iq.status_descriptor) != 12);
8783 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8784 data.create_operational_iq.iq_pi_offset) != 16);
8785 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8786 data.create_operational_oq.status_descriptor) != 12);
8787 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
8788 data.create_operational_oq.oq_ci_offset) != 16);
8789 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
8790
8791 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8792 header.iu_type) != 0);
8793 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8794 header.iu_length) != 2);
8795 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8796 header.response_queue_id) != 4);
8797 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8798 header.work_area) != 6);
8799 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8800 request_id) != 8);
8801 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8802 nexus_id) != 10);
8803 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8804 buffer_length) != 12);
8805 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8806 lun_number) != 16);
8807 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8808 protocol_specific) != 24);
8809 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8810 error_index) != 27);
8811 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8812 cdb) != 32);
21432010 8813 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8814 timeout) != 60);
6c223761
KB
8815 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
8816 sg_descriptors) != 64);
8817 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
8818 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8819
8820 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8821 header.iu_type) != 0);
8822 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8823 header.iu_length) != 2);
8824 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8825 header.response_queue_id) != 4);
8826 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8827 header.work_area) != 6);
8828 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8829 request_id) != 8);
8830 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8831 nexus_id) != 12);
8832 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8833 buffer_length) != 16);
8834 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8835 data_encryption_key_index) != 22);
8836 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8837 encrypt_tweak_lower) != 24);
8838 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8839 encrypt_tweak_upper) != 28);
8840 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8841 cdb) != 32);
8842 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8843 error_index) != 48);
8844 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8845 num_sg_descriptors) != 50);
8846 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8847 cdb_length) != 51);
8848 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8849 lun_number) != 52);
8850 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
8851 sg_descriptors) != 64);
8852 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
8853 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
8854
8855 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8856 header.iu_type) != 0);
8857 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8858 header.iu_length) != 2);
8859 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8860 request_id) != 8);
8861 BUILD_BUG_ON(offsetof(struct pqi_io_response,
8862 error_index) != 10);
8863
8864 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8865 header.iu_type) != 0);
8866 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8867 header.iu_length) != 2);
8868 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8869 header.response_queue_id) != 4);
8870 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8871 request_id) != 8);
8872 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8873 data.report_event_configuration.buffer_length) != 12);
8874 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8875 data.report_event_configuration.sg_descriptors) != 16);
8876 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8877 data.set_event_configuration.global_event_oq_id) != 10);
8878 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8879 data.set_event_configuration.buffer_length) != 12);
8880 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
8881 data.set_event_configuration.sg_descriptors) != 16);
8882
8883 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8884 max_inbound_iu_length) != 6);
8885 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
8886 max_outbound_iu_length) != 14);
8887 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
8888
8889 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8890 data_length) != 0);
8891 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8892 iq_arbitration_priority_support_bitmask) != 8);
8893 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8894 maximum_aw_a) != 9);
8895 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8896 maximum_aw_b) != 10);
8897 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8898 maximum_aw_c) != 11);
8899 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8900 max_inbound_queues) != 16);
8901 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8902 max_elements_per_iq) != 18);
8903 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8904 max_iq_element_length) != 24);
8905 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8906 min_iq_element_length) != 26);
8907 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8908 max_outbound_queues) != 30);
8909 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8910 max_elements_per_oq) != 32);
8911 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8912 intr_coalescing_time_granularity) != 34);
8913 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8914 max_oq_element_length) != 36);
8915 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8916 min_oq_element_length) != 38);
8917 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
8918 iu_layer_descriptors) != 64);
8919 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
8920
8921 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8922 event_type) != 0);
8923 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
8924 oq_id) != 2);
8925 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
8926
8927 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8928 num_event_descriptors) != 2);
8929 BUILD_BUG_ON(offsetof(struct pqi_event_config,
8930 descriptors) != 4);
8931
061ef06a
KB
8932 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
8933 ARRAY_SIZE(pqi_supported_event_types));
8934
6c223761
KB
8935 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8936 header.iu_type) != 0);
8937 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8938 header.iu_length) != 2);
8939 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8940 event_type) != 8);
8941 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8942 event_id) != 10);
8943 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8944 additional_event_id) != 12);
8945 BUILD_BUG_ON(offsetof(struct pqi_event_response,
8946 data) != 16);
8947 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
8948
8949 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8950 header.iu_type) != 0);
8951 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8952 header.iu_length) != 2);
8953 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8954 event_type) != 8);
8955 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8956 event_id) != 10);
8957 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
8958 additional_event_id) != 12);
8959 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
8960
8961 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8962 header.iu_type) != 0);
8963 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8964 header.iu_length) != 2);
8965 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8966 request_id) != 8);
8967 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8968 nexus_id) != 10);
8969 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
c2922f17
MB
8970 timeout) != 14);
8971 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6c223761
KB
8972 lun_number) != 16);
8973 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8974 protocol_specific) != 24);
8975 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8976 outbound_queue_id_to_manage) != 26);
8977 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8978 request_id_to_manage) != 28);
8979 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
8980 task_management_function) != 30);
8981 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
8982
8983 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8984 header.iu_type) != 0);
8985 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8986 header.iu_length) != 2);
8987 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8988 request_id) != 8);
8989 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8990 nexus_id) != 10);
8991 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8992 additional_response_info) != 12);
8993 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
8994 response_code) != 15);
8995 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
8996
8997 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
8998 configured_logical_drive_count) != 0);
8999 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9000 configuration_signature) != 1);
9001 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9002 firmware_version) != 5);
9003 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9004 extended_logical_unit_count) != 154);
9005 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9006 firmware_build_number) != 190);
9007 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9008 controller_mode) != 292);
9009
1be42f46
KB
9010 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9011 phys_bay_in_box) != 115);
9012 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9013 device_type) != 120);
9014 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9015 redundant_path_present_map) != 1736);
9016 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9017 active_path_number) != 1738);
9018 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9019 alternate_paths_phys_connector) != 1739);
9020 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9021 alternate_paths_phys_box_on_port) != 1755);
9022 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9023 current_queue_depth_limit) != 1796);
9024 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
9025
6c223761
KB
9026 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
9027 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
9028 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
9029 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9030 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
9031 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9032 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
9033 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
9034 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9035 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
9036 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
9037 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
9038
9039 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
d727a776
KB
9040 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
9041 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
6c223761 9042}