scsi: smartpqi: Fix rmmod stack trace
[linux-2.6-block.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
2cc37b15 1// SPDX-License-Identifier: GPL-2.0
6c223761 2/*
889653ec
KB
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
2f4c4b92 5 * Copyright (c) 2016-2018 Microsemi Corporation
6c223761
KB
6 * Copyright (c) 2016 PMC-Sierra, Inc.
7 *
2f4c4b92 8 * Questions/Comments/Bugfixes to storagedev@microchip.com
6c223761
KB
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/sched.h>
18#include <linux/rtc.h>
19#include <linux/bcd.h>
3c50976f 20#include <linux/reboot.h>
6c223761 21#include <linux/cciss_ioctl.h>
52198226 22#include <linux/blk-mq-pci.h>
6c223761
KB
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_eh.h>
27#include <scsi/scsi_transport_sas.h>
28#include <asm/unaligned.h>
29#include "smartpqi.h"
30#include "smartpqi_sis.h"
31
32#if !defined(BUILD_TIMESTAMP)
33#define BUILD_TIMESTAMP
34#endif
35
605ae389 36#define DRIVER_VERSION "2.1.12-055"
d56030f8
DB
37#define DRIVER_MAJOR 2
38#define DRIVER_MINOR 1
605ae389
DB
39#define DRIVER_RELEASE 12
40#define DRIVER_REVISION 55
6c223761 41
6aa26b5a 42#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
2d154f5f 43 DRIVER_VERSION BUILD_TIMESTAMP ")"
6c223761
KB
44#define DRIVER_NAME_SHORT "smartpqi"
45
e1d213bd
KB
46#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
47
2790cd4d
KB
48#define PQI_POST_RESET_DELAY_SECS 5
49#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
50
6aa26b5a
DB
51MODULE_AUTHOR("Microchip");
52MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
6c223761 53 DRIVER_VERSION);
6c223761
KB
54MODULE_VERSION(DRIVER_VERSION);
55MODULE_LICENSE("GPL");
56
5d1f03e6
MB
57static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
58 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
5f310425 59static void pqi_ctrl_offline_worker(struct work_struct *work);
6c223761
KB
60static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
61static void pqi_scan_start(struct Scsi_Host *shost);
62static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
63 struct pqi_queue_group *queue_group, enum pqi_io_path path,
64 struct pqi_io_request *io_request);
65static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
66 struct pqi_iu_header *request, unsigned int flags,
ae0c189d 67 struct pqi_raid_error_info *error_info);
6c223761
KB
68static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
69 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
70 unsigned int cdb_length, struct pqi_queue_group *queue_group,
376fb880 71 struct pqi_encryption_info *encryption_info, bool raid_bypass);
7a012c23
DB
72static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
73 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
74 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
75 struct pqi_scsi_dev_raid_map_data *rmd);
6702d2c4
DB
76static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
77 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
78 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
79 struct pqi_scsi_dev_raid_map_data *rmd);
4fd22c13
MR
80static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
81static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
2790cd4d
KB
82static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
83static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
4fd22c13
MR
84static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
85static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
1e46731e 86static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
18ff5f08 87 struct pqi_scsi_dev *device, unsigned long timeout_msecs);
6c223761
KB
88
89/* for flags argument to pqi_submit_raid_request_synchronous() */
90#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
91
92static struct scsi_transport_template *pqi_sas_transport_template;
93
94static atomic_t pqi_controller_count = ATOMIC_INIT(0);
95
3c50976f
KB
96enum pqi_lockup_action {
97 NONE,
98 REBOOT,
99 PANIC
100};
101
102static enum pqi_lockup_action pqi_lockup_action = NONE;
103
104static struct {
105 enum pqi_lockup_action action;
106 char *name;
107} pqi_lockup_actions[] = {
108 {
109 .action = NONE,
110 .name = "none",
111 },
112 {
113 .action = REBOOT,
114 .name = "reboot",
115 },
116 {
117 .action = PANIC,
118 .name = "panic",
119 },
120};
121
6a50d6ad
KB
122static unsigned int pqi_supported_event_types[] = {
123 PQI_EVENT_TYPE_HOTPLUG,
124 PQI_EVENT_TYPE_HARDWARE,
125 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
126 PQI_EVENT_TYPE_LOGICAL_DEVICE,
4fd22c13 127 PQI_EVENT_TYPE_OFA,
6a50d6ad
KB
128 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
129 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
130};
131
6c223761
KB
132static int pqi_disable_device_id_wildcards;
133module_param_named(disable_device_id_wildcards,
cbe0c7b1 134 pqi_disable_device_id_wildcards, int, 0644);
6c223761
KB
135MODULE_PARM_DESC(disable_device_id_wildcards,
136 "Disable device ID wildcards.");
137
5a259e32
KB
138static int pqi_disable_heartbeat;
139module_param_named(disable_heartbeat,
140 pqi_disable_heartbeat, int, 0644);
141MODULE_PARM_DESC(disable_heartbeat,
142 "Disable heartbeat.");
143
144static int pqi_disable_ctrl_shutdown;
145module_param_named(disable_ctrl_shutdown,
146 pqi_disable_ctrl_shutdown, int, 0644);
147MODULE_PARM_DESC(disable_ctrl_shutdown,
148 "Disable controller shutdown when controller locked up.");
149
3c50976f
KB
150static char *pqi_lockup_action_param;
151module_param_named(lockup_action,
152 pqi_lockup_action_param, charp, 0644);
153MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
154 "\t\tSupported: none, reboot, panic\n"
155 "\t\tDefault: none");
156
5e6a9760
GW
157static int pqi_expose_ld_first;
158module_param_named(expose_ld_first,
159 pqi_expose_ld_first, int, 0644);
583891c9 160MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
5e6a9760 161
522bc026
DC
162static int pqi_hide_vsep;
163module_param_named(hide_vsep,
164 pqi_hide_vsep, int, 0644);
583891c9 165MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
522bc026 166
6c223761
KB
167static char *raid_levels[] = {
168 "RAID-0",
169 "RAID-4",
170 "RAID-1(1+0)",
171 "RAID-5",
172 "RAID-5+1",
7a012c23
DB
173 "RAID-6",
174 "RAID-1(Triple)",
6c223761
KB
175};
176
177static char *pqi_raid_level_to_string(u8 raid_level)
178{
179 if (raid_level < ARRAY_SIZE(raid_levels))
180 return raid_levels[raid_level];
181
a9f93392 182 return "RAID UNKNOWN";
6c223761
KB
183}
184
185#define SA_RAID_0 0
186#define SA_RAID_4 1
187#define SA_RAID_1 2 /* also used for RAID 10 */
188#define SA_RAID_5 3 /* also used for RAID 50 */
189#define SA_RAID_51 4
190#define SA_RAID_6 5 /* also used for RAID 60 */
7a012c23
DB
191#define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
192#define SA_RAID_MAX SA_RAID_TRIPLE
6c223761
KB
193#define SA_RAID_UNKNOWN 0xff
194
195static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
196{
7561a7e4 197 pqi_prep_for_scsi_done(scmd);
0ca19080 198 scsi_done(scmd);
6c223761
KB
199}
200
b6e2ef67 201static inline void pqi_disable_write_same(struct scsi_device *sdev)
6c223761 202{
b6e2ef67 203 sdev->no_write_same = 1;
6c223761
KB
204}
205
6c223761 206static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
6c223761 207{
6c223761 208 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
6c223761
KB
209}
210
211static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
212{
213 return !device->is_physical_device;
214}
215
bd10cf0b
KB
216static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
217{
218 return scsi3addr[2] != 0;
219}
220
694c5d5b
KB
221static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
222{
223 return !ctrl_info->controller_online;
224}
225
6c223761
KB
226static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
227{
228 if (ctrl_info->controller_online)
229 if (!sis_is_firmware_running(ctrl_info))
5d1f03e6 230 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
6c223761
KB
231}
232
233static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
234{
235 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
236}
237
9ee5d6e9
MR
238#define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
239#define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
240
583891c9 241static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73 242{
9ee5d6e9 243 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
ff6abb73
KB
244}
245
246static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
247 enum pqi_ctrl_mode mode)
248{
9ee5d6e9
MR
249 u32 driver_scratch;
250
251 driver_scratch = sis_read_driver_scratch(ctrl_info);
252
253 if (mode == PQI_MODE)
254 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
255 else
256 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
257
258 sis_write_driver_scratch(ctrl_info, driver_scratch);
259}
260
261static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
262{
263 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
264}
265
266static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
267{
268 u32 driver_scratch;
269
270 driver_scratch = sis_read_driver_scratch(ctrl_info);
271
272 if (is_supported)
273 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
274 else
275 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
276
277 sis_write_driver_scratch(ctrl_info, driver_scratch);
ff6abb73
KB
278}
279
9fa82023
KB
280static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
281{
282 ctrl_info->scan_blocked = true;
283 mutex_lock(&ctrl_info->scan_mutex);
284}
285
286static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
287{
288 ctrl_info->scan_blocked = false;
289 mutex_unlock(&ctrl_info->scan_mutex);
290}
291
292static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
293{
294 return ctrl_info->scan_blocked;
295}
296
694c5d5b
KB
297static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
298{
37f33181 299 mutex_lock(&ctrl_info->lun_reset_mutex);
694c5d5b
KB
300}
301
37f33181 302static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
694c5d5b 303{
37f33181 304 mutex_unlock(&ctrl_info->lun_reset_mutex);
694c5d5b
KB
305}
306
9fa82023
KB
307static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
308{
309 struct Scsi_Host *shost;
310 unsigned int num_loops;
311 int msecs_sleep;
312
313 shost = ctrl_info->scsi_host;
314
315 scsi_block_requests(shost);
316
317 num_loops = 0;
318 msecs_sleep = 20;
319 while (scsi_host_busy(shost)) {
320 num_loops++;
321 if (num_loops == 10)
322 msecs_sleep = 500;
323 msleep(msecs_sleep);
324 }
325}
326
327static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
328{
329 scsi_unblock_requests(ctrl_info->scsi_host);
330}
331
332static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
333{
334 atomic_inc(&ctrl_info->num_busy_threads);
694c5d5b
KB
335}
336
9fa82023 337static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
694c5d5b 338{
9fa82023 339 atomic_dec(&ctrl_info->num_busy_threads);
694c5d5b
KB
340}
341
342static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
343{
344 return ctrl_info->block_requests;
345}
346
7561a7e4
KB
347static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
348{
349 ctrl_info->block_requests = true;
7561a7e4
KB
350}
351
352static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
353{
354 ctrl_info->block_requests = false;
355 wake_up_all(&ctrl_info->block_requests_wait);
7561a7e4
KB
356}
357
ae0c189d 358static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
7561a7e4 359{
7561a7e4 360 if (!pqi_ctrl_blocked(ctrl_info))
ae0c189d 361 return;
7561a7e4
KB
362
363 atomic_inc(&ctrl_info->num_blocked_threads);
ae0c189d
KB
364 wait_event(ctrl_info->block_requests_wait,
365 !pqi_ctrl_blocked(ctrl_info));
7561a7e4 366 atomic_dec(&ctrl_info->num_blocked_threads);
7561a7e4
KB
367}
368
18ff5f08
KB
369#define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
370
7561a7e4
KB
371static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
372{
18ff5f08
KB
373 unsigned long start_jiffies;
374 unsigned long warning_timeout;
375 bool displayed_warning;
376
377 displayed_warning = false;
378 start_jiffies = jiffies;
379 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
380
7561a7e4 381 while (atomic_read(&ctrl_info->num_busy_threads) >
18ff5f08
KB
382 atomic_read(&ctrl_info->num_blocked_threads)) {
383 if (time_after(jiffies, warning_timeout)) {
384 dev_warn(&ctrl_info->pci_dev->dev,
385 "waiting %u seconds for driver activity to quiesce\n",
386 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
387 displayed_warning = true;
388 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
389 }
7561a7e4 390 usleep_range(1000, 2000);
18ff5f08
KB
391 }
392
393 if (displayed_warning)
394 dev_warn(&ctrl_info->pci_dev->dev,
395 "driver activity quiesced after waiting for %u seconds\n",
396 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
7561a7e4
KB
397}
398
03b288cf
KB
399static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
400{
401 return device->device_offline;
402}
403
2790cd4d 404static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
7561a7e4 405{
2790cd4d 406 mutex_lock(&ctrl_info->ofa_mutex);
7561a7e4 407}
6c223761 408
2790cd4d 409static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
4fd22c13 410{
2790cd4d 411 mutex_unlock(&ctrl_info->ofa_mutex);
4fd22c13
MR
412}
413
2790cd4d 414static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
4fd22c13 415{
2790cd4d
KB
416 mutex_lock(&ctrl_info->ofa_mutex);
417 mutex_unlock(&ctrl_info->ofa_mutex);
4fd22c13
MR
418}
419
2790cd4d 420static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
4fd22c13 421{
2790cd4d 422 return mutex_is_locked(&ctrl_info->ofa_mutex);
4fd22c13
MR
423}
424
1e46731e
MR
425static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
426{
427 device->in_remove = true;
428}
429
1bdf6e93 430static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
1e46731e 431{
1bdf6e93 432 return device->in_remove;
1e46731e
MR
433}
434
2790cd4d 435static inline int pqi_event_type_to_event_index(unsigned int event_type)
0530736e 436{
2790cd4d
KB
437 int index;
438
439 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
440 if (event_type == pqi_supported_event_types[index])
441 return index;
442
443 return -1;
0530736e
KB
444}
445
2790cd4d 446static inline bool pqi_is_supported_event(unsigned int event_type)
0530736e 447{
2790cd4d 448 return pqi_event_type_to_event_index(event_type) != -1;
0530736e
KB
449}
450
583891c9
KB
451static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
452 unsigned long delay)
5f310425
KB
453{
454 if (pqi_ctrl_offline(ctrl_info))
455 return;
456
457 schedule_delayed_work(&ctrl_info->rescan_work, delay);
458}
459
6c223761
KB
460static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
461{
5f310425
KB
462 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
463}
464
4fd22c13 465#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
5f310425 466
583891c9 467static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
5f310425
KB
468{
469 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
6c223761
KB
470}
471
061ef06a
KB
472static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
473{
474 cancel_delayed_work_sync(&ctrl_info->rescan_work);
475}
476
98f87667
KB
477static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
478{
479 if (!ctrl_info->heartbeat_counter)
480 return 0;
481
482 return readl(ctrl_info->heartbeat_counter);
483}
484
4fd22c13
MR
485static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
486{
4fd22c13
MR
487 return readb(ctrl_info->soft_reset_status);
488}
489
4ccc354b 490static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
4fd22c13
MR
491{
492 u8 status;
493
4fd22c13 494 status = pqi_read_soft_reset_status(ctrl_info);
4ccc354b 495 status &= ~PQI_SOFT_RESET_ABORT;
4fd22c13
MR
496 writeb(status, ctrl_info->soft_reset_status);
497}
498
6c223761
KB
499static int pqi_map_single(struct pci_dev *pci_dev,
500 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
6917a9cc 501 size_t buffer_length, enum dma_data_direction data_direction)
6c223761
KB
502{
503 dma_addr_t bus_address;
504
6917a9cc 505 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
6c223761
KB
506 return 0;
507
6917a9cc 508 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
6c223761 509 data_direction);
6917a9cc 510 if (dma_mapping_error(&pci_dev->dev, bus_address))
6c223761
KB
511 return -ENOMEM;
512
513 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
514 put_unaligned_le32(buffer_length, &sg_descriptor->length);
515 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
516
517 return 0;
518}
519
520static void pqi_pci_unmap(struct pci_dev *pci_dev,
521 struct pqi_sg_descriptor *descriptors, int num_descriptors,
6917a9cc 522 enum dma_data_direction data_direction)
6c223761
KB
523{
524 int i;
525
6917a9cc 526 if (data_direction == DMA_NONE)
6c223761
KB
527 return;
528
529 for (i = 0; i < num_descriptors; i++)
6917a9cc 530 dma_unmap_single(&pci_dev->dev,
6c223761
KB
531 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
532 get_unaligned_le32(&descriptors[i].length),
533 data_direction);
534}
535
536static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
537 struct pqi_raid_path_request *request, u8 cmd,
538 u8 *scsi3addr, void *buffer, size_t buffer_length,
6917a9cc 539 u16 vpd_page, enum dma_data_direction *dir)
6c223761
KB
540{
541 u8 *cdb;
171c2865 542 size_t cdb_length = buffer_length;
6c223761
KB
543
544 memset(request, 0, sizeof(*request));
545
546 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
547 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
548 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
549 &request->header.iu_length);
550 put_unaligned_le32(buffer_length, &request->buffer_length);
551 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
552 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
553 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
554
555 cdb = request->cdb;
556
557 switch (cmd) {
be76f906
DB
558 case TEST_UNIT_READY:
559 request->data_direction = SOP_READ_FLAG;
560 cdb[0] = TEST_UNIT_READY;
561 break;
6c223761
KB
562 case INQUIRY:
563 request->data_direction = SOP_READ_FLAG;
564 cdb[0] = INQUIRY;
565 if (vpd_page & VPD_PAGE) {
566 cdb[1] = 0x1;
567 cdb[2] = (u8)vpd_page;
568 }
171c2865 569 cdb[4] = (u8)cdb_length;
6c223761
KB
570 break;
571 case CISS_REPORT_LOG:
572 case CISS_REPORT_PHYS:
573 request->data_direction = SOP_READ_FLAG;
574 cdb[0] = cmd;
28ca6d87
MM
575 if (cmd == CISS_REPORT_PHYS) {
576 if (ctrl_info->rpl_extended_format_4_5_supported)
577 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
578 else
579 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
580 } else {
f6cc2a77 581 cdb[1] = ctrl_info->ciss_report_log_flags;
28ca6d87 582 }
171c2865 583 put_unaligned_be32(cdb_length, &cdb[6]);
6c223761
KB
584 break;
585 case CISS_GET_RAID_MAP:
586 request->data_direction = SOP_READ_FLAG;
587 cdb[0] = CISS_READ;
588 cdb[1] = CISS_GET_RAID_MAP;
171c2865 589 put_unaligned_be32(cdb_length, &cdb[6]);
6c223761 590 break;
58322fe0 591 case SA_FLUSH_CACHE:
ae0c189d 592 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
6c223761
KB
593 request->data_direction = SOP_WRITE_FLAG;
594 cdb[0] = BMIC_WRITE;
58322fe0 595 cdb[6] = BMIC_FLUSH_CACHE;
171c2865 596 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 597 break;
171c2865
DC
598 case BMIC_SENSE_DIAG_OPTIONS:
599 cdb_length = 0;
df561f66 600 fallthrough;
6c223761
KB
601 case BMIC_IDENTIFY_CONTROLLER:
602 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6d90615f 603 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
f6cc2a77 604 case BMIC_SENSE_FEATURE:
6c223761
KB
605 request->data_direction = SOP_READ_FLAG;
606 cdb[0] = BMIC_READ;
607 cdb[6] = cmd;
171c2865 608 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 609 break;
171c2865
DC
610 case BMIC_SET_DIAG_OPTIONS:
611 cdb_length = 0;
df561f66 612 fallthrough;
6c223761
KB
613 case BMIC_WRITE_HOST_WELLNESS:
614 request->data_direction = SOP_WRITE_FLAG;
615 cdb[0] = BMIC_WRITE;
616 cdb[6] = cmd;
171c2865 617 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 618 break;
3d46a59a
DB
619 case BMIC_CSMI_PASSTHRU:
620 request->data_direction = SOP_BIDIRECTIONAL;
621 cdb[0] = BMIC_WRITE;
622 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
623 cdb[6] = cmd;
624 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761
KB
625 break;
626 default:
9e68cccc 627 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
6c223761
KB
628 break;
629 }
630
631 switch (request->data_direction) {
632 case SOP_READ_FLAG:
6917a9cc 633 *dir = DMA_FROM_DEVICE;
6c223761
KB
634 break;
635 case SOP_WRITE_FLAG:
6917a9cc 636 *dir = DMA_TO_DEVICE;
6c223761
KB
637 break;
638 case SOP_NO_DIRECTION_FLAG:
6917a9cc 639 *dir = DMA_NONE;
6c223761
KB
640 break;
641 default:
6917a9cc 642 *dir = DMA_BIDIRECTIONAL;
6c223761
KB
643 break;
644 }
645
6c223761 646 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
6917a9cc 647 buffer, buffer_length, *dir);
6c223761
KB
648}
649
376fb880
KB
650static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
651{
652 io_request->scmd = NULL;
653 io_request->status = 0;
654 io_request->error_info = NULL;
655 io_request->raid_bypass = false;
656}
657
6c223761
KB
658static struct pqi_io_request *pqi_alloc_io_request(
659 struct pqi_ctrl_info *ctrl_info)
660{
661 struct pqi_io_request *io_request;
662 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
663
664 while (1) {
665 io_request = &ctrl_info->io_request_pool[i];
666 if (atomic_inc_return(&io_request->refcount) == 1)
667 break;
668 atomic_dec(&io_request->refcount);
669 i = (i + 1) % ctrl_info->max_io_slots;
670 }
671
672 /* benignly racy */
673 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
674
376fb880 675 pqi_reinit_io_request(io_request);
6c223761
KB
676
677 return io_request;
678}
679
680static void pqi_free_io_request(struct pqi_io_request *io_request)
681{
682 atomic_dec(&io_request->refcount);
683}
684
02133b68 685static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
694c5d5b 686 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
ae0c189d 687 struct pqi_raid_error_info *error_info)
6c223761
KB
688{
689 int rc;
6c223761 690 struct pqi_raid_path_request request;
694c5d5b 691 enum dma_data_direction dir;
6c223761 692
583891c9
KB
693 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
694 buffer, buffer_length, vpd_page, &dir);
6c223761
KB
695 if (rc)
696 return rc;
697
ae0c189d 698 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
6c223761 699
6917a9cc 700 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 701
6c223761
KB
702 return rc;
703}
704
694c5d5b 705/* helper functions for pqi_send_scsi_raid_request */
02133b68
DC
706
707static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
694c5d5b 708 u8 cmd, void *buffer, size_t buffer_length)
6c223761 709{
02133b68 710 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
ae0c189d 711 buffer, buffer_length, 0, NULL);
02133b68 712}
6c223761 713
02133b68 714static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
694c5d5b
KB
715 u8 cmd, void *buffer, size_t buffer_length,
716 struct pqi_raid_error_info *error_info)
02133b68
DC
717{
718 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
ae0c189d 719 buffer, buffer_length, 0, error_info);
02133b68 720}
6c223761 721
02133b68 722static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
694c5d5b 723 struct bmic_identify_controller *buffer)
02133b68
DC
724{
725 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
694c5d5b 726 buffer, sizeof(*buffer));
02133b68
DC
727}
728
6d90615f 729static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
694c5d5b 730 struct bmic_sense_subsystem_info *sense_info)
6d90615f
MB
731{
732 return pqi_send_ctrl_raid_request(ctrl_info,
694c5d5b
KB
733 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
734 sizeof(*sense_info));
6d90615f
MB
735}
736
02133b68 737static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
6c223761 738 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
02133b68
DC
739{
740 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
ae0c189d 741 buffer, buffer_length, vpd_page, NULL);
6c223761
KB
742}
743
744static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
745 struct pqi_scsi_dev *device,
694c5d5b 746 struct bmic_identify_physical_device *buffer, size_t buffer_length)
6c223761
KB
747{
748 int rc;
6917a9cc 749 enum dma_data_direction dir;
6c223761
KB
750 u16 bmic_device_index;
751 struct pqi_raid_path_request request;
752
753 rc = pqi_build_raid_path_request(ctrl_info, &request,
754 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
6917a9cc 755 buffer_length, 0, &dir);
6c223761
KB
756 if (rc)
757 return rc;
758
759 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
760 request.cdb[2] = (u8)bmic_device_index;
761 request.cdb[9] = (u8)(bmic_device_index >> 8);
762
ae0c189d 763 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761 764
6917a9cc 765 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 766
6c223761
KB
767 return rc;
768}
769
f6cc2a77
KB
770static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
771{
772 u32 bytes;
773
774 bytes = get_unaligned_le16(limit);
775 if (bytes == 0)
776 bytes = ~0;
777 else
778 bytes *= 1024;
779
780 return bytes;
781}
782
783#pragma pack(1)
784
785struct bmic_sense_feature_buffer {
786 struct bmic_sense_feature_buffer_header header;
787 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
788};
789
790#pragma pack()
791
792#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
793 offsetofend(struct bmic_sense_feature_buffer, \
794 aio_subpage.max_write_raid_1_10_3drive)
795
796#define MINIMUM_AIO_SUBPAGE_LENGTH \
797 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
798 max_write_raid_1_10_3drive) - \
799 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
800
801static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
802{
803 int rc;
804 enum dma_data_direction dir;
805 struct pqi_raid_path_request request;
806 struct bmic_sense_feature_buffer *buffer;
807
808 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
809 if (!buffer)
810 return -ENOMEM;
811
583891c9
KB
812 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
813 buffer, sizeof(*buffer), 0, &dir);
f6cc2a77
KB
814 if (rc)
815 goto error;
816
817 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
818 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
819
ae0c189d 820 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761 821
6917a9cc 822 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 823
f6cc2a77
KB
824 if (rc)
825 goto error;
826
827 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
828 buffer->header.subpage_code !=
829 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
830 get_unaligned_le16(&buffer->header.buffer_length) <
831 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
832 buffer->aio_subpage.header.page_code !=
833 BMIC_SENSE_FEATURE_IO_PAGE ||
834 buffer->aio_subpage.header.subpage_code !=
835 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
836 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
837 MINIMUM_AIO_SUBPAGE_LENGTH) {
838 goto error;
839 }
840
841 ctrl_info->max_transfer_encrypted_sas_sata =
842 pqi_aio_limit_to_bytes(
843 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
844
845 ctrl_info->max_transfer_encrypted_nvme =
846 pqi_aio_limit_to_bytes(
847 &buffer->aio_subpage.max_transfer_encrypted_nvme);
848
849 ctrl_info->max_write_raid_5_6 =
850 pqi_aio_limit_to_bytes(
851 &buffer->aio_subpage.max_write_raid_5_6);
852
853 ctrl_info->max_write_raid_1_10_2drive =
854 pqi_aio_limit_to_bytes(
855 &buffer->aio_subpage.max_write_raid_1_10_2drive);
856
857 ctrl_info->max_write_raid_1_10_3drive =
858 pqi_aio_limit_to_bytes(
859 &buffer->aio_subpage.max_write_raid_1_10_3drive);
860
861error:
862 kfree(buffer);
863
6c223761
KB
864 return rc;
865}
866
58322fe0
KB
867static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
868 enum bmic_flush_cache_shutdown_event shutdown_event)
6c223761
KB
869{
870 int rc;
58322fe0 871 struct bmic_flush_cache *flush_cache;
6c223761 872
58322fe0
KB
873 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
874 if (!flush_cache)
6c223761
KB
875 return -ENOMEM;
876
58322fe0
KB
877 flush_cache->shutdown_event = shutdown_event;
878
02133b68
DC
879 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
880 sizeof(*flush_cache));
6c223761 881
58322fe0 882 kfree(flush_cache);
6c223761
KB
883
884 return rc;
885}
886
3d46a59a
DB
887int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
888 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
889 struct pqi_raid_error_info *error_info)
890{
891 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
892 buffer, buffer_length, error_info);
893}
171c2865 894
694c5d5b 895#define PQI_FETCH_PTRAID_DATA (1 << 31)
171c2865
DC
896
897static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
6c223761
KB
898{
899 int rc;
171c2865 900 struct bmic_diag_options *diag;
6c223761 901
171c2865
DC
902 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
903 if (!diag)
904 return -ENOMEM;
905
02133b68 906 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
694c5d5b 907 diag, sizeof(*diag));
6c223761 908 if (rc)
171c2865 909 goto out;
6c223761 910
171c2865
DC
911 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
912
694c5d5b
KB
913 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
914 sizeof(*diag));
915
171c2865
DC
916out:
917 kfree(diag);
6c223761 918
6c223761
KB
919 return rc;
920}
921
02133b68 922static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
923 void *buffer, size_t buffer_length)
924{
02133b68 925 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
694c5d5b 926 buffer, buffer_length);
6c223761
KB
927}
928
929#pragma pack(1)
930
931struct bmic_host_wellness_driver_version {
932 u8 start_tag[4];
933 u8 driver_version_tag[2];
934 __le16 driver_version_length;
935 char driver_version[32];
b2346b50 936 u8 dont_write_tag[2];
6c223761
KB
937 u8 end_tag[2];
938};
939
940#pragma pack()
941
942static int pqi_write_driver_version_to_host_wellness(
943 struct pqi_ctrl_info *ctrl_info)
944{
945 int rc;
946 struct bmic_host_wellness_driver_version *buffer;
947 size_t buffer_length;
948
949 buffer_length = sizeof(*buffer);
950
951 buffer = kmalloc(buffer_length, GFP_KERNEL);
952 if (!buffer)
953 return -ENOMEM;
954
955 buffer->start_tag[0] = '<';
956 buffer->start_tag[1] = 'H';
957 buffer->start_tag[2] = 'W';
958 buffer->start_tag[3] = '>';
959 buffer->driver_version_tag[0] = 'D';
960 buffer->driver_version_tag[1] = 'V';
961 put_unaligned_le16(sizeof(buffer->driver_version),
962 &buffer->driver_version_length);
061ef06a 963 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
6c223761
KB
964 sizeof(buffer->driver_version) - 1);
965 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
b2346b50
MR
966 buffer->dont_write_tag[0] = 'D';
967 buffer->dont_write_tag[1] = 'W';
6c223761
KB
968 buffer->end_tag[0] = 'Z';
969 buffer->end_tag[1] = 'Z';
970
971 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
972
973 kfree(buffer);
974
975 return rc;
976}
977
978#pragma pack(1)
979
980struct bmic_host_wellness_time {
981 u8 start_tag[4];
982 u8 time_tag[2];
983 __le16 time_length;
984 u8 time[8];
985 u8 dont_write_tag[2];
986 u8 end_tag[2];
987};
988
989#pragma pack()
990
991static int pqi_write_current_time_to_host_wellness(
992 struct pqi_ctrl_info *ctrl_info)
993{
994 int rc;
995 struct bmic_host_wellness_time *buffer;
996 size_t buffer_length;
997 time64_t local_time;
998 unsigned int year;
ed10858e 999 struct tm tm;
6c223761
KB
1000
1001 buffer_length = sizeof(*buffer);
1002
1003 buffer = kmalloc(buffer_length, GFP_KERNEL);
1004 if (!buffer)
1005 return -ENOMEM;
1006
1007 buffer->start_tag[0] = '<';
1008 buffer->start_tag[1] = 'H';
1009 buffer->start_tag[2] = 'W';
1010 buffer->start_tag[3] = '>';
1011 buffer->time_tag[0] = 'T';
1012 buffer->time_tag[1] = 'D';
1013 put_unaligned_le16(sizeof(buffer->time),
1014 &buffer->time_length);
1015
ed10858e
AB
1016 local_time = ktime_get_real_seconds();
1017 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
6c223761
KB
1018 year = tm.tm_year + 1900;
1019
1020 buffer->time[0] = bin2bcd(tm.tm_hour);
1021 buffer->time[1] = bin2bcd(tm.tm_min);
1022 buffer->time[2] = bin2bcd(tm.tm_sec);
1023 buffer->time[3] = 0;
1024 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1025 buffer->time[5] = bin2bcd(tm.tm_mday);
1026 buffer->time[6] = bin2bcd(year / 100);
1027 buffer->time[7] = bin2bcd(year % 100);
1028
1029 buffer->dont_write_tag[0] = 'D';
1030 buffer->dont_write_tag[1] = 'W';
1031 buffer->end_tag[0] = 'Z';
1032 buffer->end_tag[1] = 'Z';
1033
1034 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1035
1036 kfree(buffer);
1037
1038 return rc;
1039}
1040
4fd22c13 1041#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
6c223761
KB
1042
1043static void pqi_update_time_worker(struct work_struct *work)
1044{
1045 int rc;
1046 struct pqi_ctrl_info *ctrl_info;
1047
1048 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1049 update_time_work);
1050
6c223761
KB
1051 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1052 if (rc)
1053 dev_warn(&ctrl_info->pci_dev->dev,
1054 "error updating time on controller\n");
1055
1056 schedule_delayed_work(&ctrl_info->update_time_work,
1057 PQI_UPDATE_TIME_WORK_INTERVAL);
1058}
1059
583891c9 1060static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
6c223761 1061{
4fbebf1a 1062 schedule_delayed_work(&ctrl_info->update_time_work, 0);
061ef06a
KB
1063}
1064
583891c9 1065static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
061ef06a 1066{
061ef06a 1067 cancel_delayed_work_sync(&ctrl_info->update_time_work);
6c223761
KB
1068}
1069
583891c9
KB
1070static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1071 size_t buffer_length)
6c223761 1072{
583891c9 1073 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
6c223761
KB
1074}
1075
583891c9 1076static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
6c223761
KB
1077{
1078 int rc;
1079 size_t lun_list_length;
1080 size_t lun_data_length;
1081 size_t new_lun_list_length;
1082 void *lun_data = NULL;
1083 struct report_lun_header *report_lun_header;
1084
1085 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1086 if (!report_lun_header) {
1087 rc = -ENOMEM;
1088 goto out;
1089 }
1090
583891c9 1091 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
6c223761
KB
1092 if (rc)
1093 goto out;
1094
1095 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1096
1097again:
1098 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1099
1100 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1101 if (!lun_data) {
1102 rc = -ENOMEM;
1103 goto out;
1104 }
1105
1106 if (lun_list_length == 0) {
1107 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1108 goto out;
1109 }
1110
1111 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1112 if (rc)
1113 goto out;
1114
583891c9
KB
1115 new_lun_list_length =
1116 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
6c223761
KB
1117
1118 if (new_lun_list_length > lun_list_length) {
1119 lun_list_length = new_lun_list_length;
1120 kfree(lun_data);
1121 goto again;
1122 }
1123
1124out:
1125 kfree(report_lun_header);
1126
1127 if (rc) {
1128 kfree(lun_data);
1129 lun_data = NULL;
1130 }
1131
1132 *buffer = lun_data;
1133
1134 return rc;
1135}
1136
583891c9 1137static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
6c223761 1138{
28ca6d87
MM
1139 int rc;
1140 unsigned int i;
1141 u8 rpl_response_format;
1142 u32 num_physicals;
1143 size_t rpl_16byte_wwid_list_length;
1144 void *rpl_list;
1145 struct report_lun_header *rpl_header;
1146 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1147 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1148
1149 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1150 if (rc)
1151 return rc;
1152
1153 if (ctrl_info->rpl_extended_format_4_5_supported) {
1154 rpl_header = rpl_list;
1155 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1156 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1157 *buffer = rpl_list;
1158 return 0;
1159 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1160 dev_err(&ctrl_info->pci_dev->dev,
1161 "RPL returned unsupported data format %u\n",
1162 rpl_response_format);
1163 return -EINVAL;
1164 } else {
1165 dev_warn(&ctrl_info->pci_dev->dev,
1166 "RPL returned extended format 2 instead of 4\n");
1167 }
1168 }
1169
1170 rpl_8byte_wwid_list = rpl_list;
1171 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1172 rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
1173
1174 rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
1175 if (!rpl_16byte_wwid_list)
1176 return -ENOMEM;
1177
1178 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1179 &rpl_16byte_wwid_list->header.list_length);
1180 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1181
1182 for (i = 0; i < num_physicals; i++) {
1183 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1184 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid, 0, 8);
1185 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1186 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1187 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1188 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1189 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1190 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1191 }
1192
1193 kfree(rpl_8byte_wwid_list);
1194 *buffer = rpl_16byte_wwid_list;
1195
1196 return 0;
6c223761
KB
1197}
1198
583891c9 1199static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
6c223761
KB
1200{
1201 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1202}
1203
1204static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
28ca6d87
MM
1205 struct report_phys_lun_16byte_wwid_list **physdev_list,
1206 struct report_log_lun_list **logdev_list)
6c223761
KB
1207{
1208 int rc;
1209 size_t logdev_list_length;
1210 size_t logdev_data_length;
28ca6d87
MM
1211 struct report_log_lun_list *internal_logdev_list;
1212 struct report_log_lun_list *logdev_data;
6c223761
KB
1213 struct report_lun_header report_lun_header;
1214
1215 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1216 if (rc)
1217 dev_err(&ctrl_info->pci_dev->dev,
1218 "report physical LUNs failed\n");
1219
1220 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1221 if (rc)
1222 dev_err(&ctrl_info->pci_dev->dev,
1223 "report logical LUNs failed\n");
1224
1225 /*
1226 * Tack the controller itself onto the end of the logical device list.
1227 */
1228
1229 logdev_data = *logdev_list;
1230
1231 if (logdev_data) {
1232 logdev_list_length =
1233 get_unaligned_be32(&logdev_data->header.list_length);
1234 } else {
1235 memset(&report_lun_header, 0, sizeof(report_lun_header));
1236 logdev_data =
28ca6d87 1237 (struct report_log_lun_list *)&report_lun_header;
6c223761
KB
1238 logdev_list_length = 0;
1239 }
1240
1241 logdev_data_length = sizeof(struct report_lun_header) +
1242 logdev_list_length;
1243
1244 internal_logdev_list = kmalloc(logdev_data_length +
28ca6d87 1245 sizeof(struct report_log_lun), GFP_KERNEL);
6c223761
KB
1246 if (!internal_logdev_list) {
1247 kfree(*logdev_list);
1248 *logdev_list = NULL;
1249 return -ENOMEM;
1250 }
1251
1252 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1253 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
28ca6d87 1254 sizeof(struct report_log_lun));
6c223761 1255 put_unaligned_be32(logdev_list_length +
28ca6d87 1256 sizeof(struct report_log_lun),
6c223761
KB
1257 &internal_logdev_list->header.list_length);
1258
1259 kfree(*logdev_list);
1260 *logdev_list = internal_logdev_list;
1261
1262 return 0;
1263}
1264
1265static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1266 int bus, int target, int lun)
1267{
1268 device->bus = bus;
1269 device->target = target;
1270 device->lun = lun;
1271}
1272
1273static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1274{
1275 u8 *scsi3addr;
1276 u32 lunid;
bd10cf0b
KB
1277 int bus;
1278 int target;
1279 int lun;
6c223761
KB
1280
1281 scsi3addr = device->scsi3addr;
1282 lunid = get_unaligned_le32(scsi3addr);
1283
1284 if (pqi_is_hba_lunid(scsi3addr)) {
1285 /* The specified device is the controller. */
1286 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1287 device->target_lun_valid = true;
1288 return;
1289 }
1290
1291 if (pqi_is_logical_device(device)) {
bd10cf0b
KB
1292 if (device->is_external_raid_device) {
1293 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1294 target = (lunid >> 16) & 0x3fff;
1295 lun = lunid & 0xff;
1296 } else {
1297 bus = PQI_RAID_VOLUME_BUS;
1298 target = 0;
1299 lun = lunid & 0x3fff;
1300 }
1301 pqi_set_bus_target_lun(device, bus, target, lun);
6c223761
KB
1302 device->target_lun_valid = true;
1303 return;
1304 }
1305
1306 /*
1307 * Defer target and LUN assignment for non-controller physical devices
1308 * because the SAS transport layer will make these assignments later.
1309 */
1310 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1311}
1312
1313static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1314 struct pqi_scsi_dev *device)
1315{
1316 int rc;
1317 u8 raid_level;
1318 u8 *buffer;
1319
1320 raid_level = SA_RAID_UNKNOWN;
1321
1322 buffer = kmalloc(64, GFP_KERNEL);
1323 if (buffer) {
1324 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1325 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1326 if (rc == 0) {
1327 raid_level = buffer[8];
1328 if (raid_level > SA_RAID_MAX)
1329 raid_level = SA_RAID_UNKNOWN;
1330 }
1331 kfree(buffer);
1332 }
1333
1334 device->raid_level = raid_level;
1335}
1336
1337static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1338 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1339{
1340 char *err_msg;
1341 u32 raid_map_size;
1342 u32 r5or6_blocks_per_row;
6c223761
KB
1343
1344 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1345
1346 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1347 err_msg = "RAID map too small";
1348 goto bad_raid_map;
1349 }
1350
6c223761
KB
1351 if (device->raid_level == SA_RAID_1) {
1352 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1353 err_msg = "invalid RAID-1 map";
1354 goto bad_raid_map;
1355 }
7a012c23 1356 } else if (device->raid_level == SA_RAID_TRIPLE) {
6c223761 1357 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
7a012c23 1358 err_msg = "invalid RAID-1(Triple) map";
6c223761
KB
1359 goto bad_raid_map;
1360 }
1361 } else if ((device->raid_level == SA_RAID_5 ||
1362 device->raid_level == SA_RAID_6) &&
1363 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1364 /* RAID 50/60 */
1365 r5or6_blocks_per_row =
1366 get_unaligned_le16(&raid_map->strip_size) *
1367 get_unaligned_le16(&raid_map->data_disks_per_row);
1368 if (r5or6_blocks_per_row == 0) {
1369 err_msg = "invalid RAID-5 or RAID-6 map";
1370 goto bad_raid_map;
1371 }
1372 }
1373
1374 return 0;
1375
1376bad_raid_map:
d87d5474 1377 dev_warn(&ctrl_info->pci_dev->dev,
38a7338a
KB
1378 "logical device %08x%08x %s\n",
1379 *((u32 *)&device->scsi3addr),
1380 *((u32 *)&device->scsi3addr[4]), err_msg);
6c223761
KB
1381
1382 return -EINVAL;
1383}
1384
1385static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1386 struct pqi_scsi_dev *device)
1387{
1388 int rc;
a91aaae0 1389 u32 raid_map_size;
6c223761
KB
1390 struct raid_map *raid_map;
1391
1392 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1393 if (!raid_map)
1394 return -ENOMEM;
1395
a91aaae0 1396 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
ae0c189d 1397 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
6c223761
KB
1398 if (rc)
1399 goto error;
1400
a91aaae0 1401 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
6c223761 1402
a91aaae0 1403 if (raid_map_size > sizeof(*raid_map)) {
6c223761 1404
a91aaae0
AK
1405 kfree(raid_map);
1406
1407 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1408 if (!raid_map)
1409 return -ENOMEM;
1410
1411 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
ae0c189d 1412 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
a91aaae0
AK
1413 if (rc)
1414 goto error;
1415
1416 if (get_unaligned_le32(&raid_map->structure_size)
1417 != raid_map_size) {
1418 dev_warn(&ctrl_info->pci_dev->dev,
583891c9 1419 "requested %u bytes, received %u bytes\n",
a91aaae0
AK
1420 raid_map_size,
1421 get_unaligned_le32(&raid_map->structure_size));
d1f6581a 1422 rc = -EINVAL;
a91aaae0
AK
1423 goto error;
1424 }
1425 }
6c223761
KB
1426
1427 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1428 if (rc)
1429 goto error;
1430
1431 device->raid_map = raid_map;
1432
1433 return 0;
1434
1435error:
1436 kfree(raid_map);
1437
1438 return rc;
1439}
1440
f6cc2a77
KB
1441static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1442 struct pqi_scsi_dev *device)
1443{
1444 if (!ctrl_info->lv_drive_type_mix_valid) {
1445 device->max_transfer_encrypted = ~0;
1446 return;
1447 }
1448
1449 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1450 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1451 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1452 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1453 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1454 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1455 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1456 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1457 device->max_transfer_encrypted =
1458 ctrl_info->max_transfer_encrypted_sas_sata;
1459 break;
1460 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1461 device->max_transfer_encrypted =
1462 ctrl_info->max_transfer_encrypted_nvme;
1463 break;
1464 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1465 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1466 default:
1467 device->max_transfer_encrypted =
1468 min(ctrl_info->max_transfer_encrypted_sas_sata,
1469 ctrl_info->max_transfer_encrypted_nvme);
1470 break;
1471 }
1472}
1473
588a63fe 1474static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1475 struct pqi_scsi_dev *device)
1476{
1477 int rc;
1478 u8 *buffer;
588a63fe 1479 u8 bypass_status;
6c223761
KB
1480
1481 buffer = kmalloc(64, GFP_KERNEL);
1482 if (!buffer)
1483 return;
1484
1485 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
588a63fe 1486 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
6c223761
KB
1487 if (rc)
1488 goto out;
1489
694c5d5b
KB
1490#define RAID_BYPASS_STATUS 4
1491#define RAID_BYPASS_CONFIGURED 0x1
1492#define RAID_BYPASS_ENABLED 0x2
6c223761 1493
588a63fe
KB
1494 bypass_status = buffer[RAID_BYPASS_STATUS];
1495 device->raid_bypass_configured =
1496 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1497 if (device->raid_bypass_configured &&
1498 (bypass_status & RAID_BYPASS_ENABLED) &&
f6cc2a77 1499 pqi_get_raid_map(ctrl_info, device) == 0) {
588a63fe 1500 device->raid_bypass_enabled = true;
f6cc2a77
KB
1501 if (get_unaligned_le16(&device->raid_map->flags) &
1502 RAID_MAP_ENCRYPTION_ENABLED)
1503 pqi_set_max_transfer_encrypted(ctrl_info, device);
1504 }
6c223761
KB
1505
1506out:
1507 kfree(buffer);
1508}
1509
1510/*
1511 * Use vendor-specific VPD to determine online/offline status of a volume.
1512 */
1513
1514static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1515 struct pqi_scsi_dev *device)
1516{
1517 int rc;
1518 size_t page_length;
1519 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1520 bool volume_offline = true;
1521 u32 volume_flags;
1522 struct ciss_vpd_logical_volume_status *vpd;
1523
1524 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1525 if (!vpd)
1526 goto no_buffer;
1527
1528 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1529 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1530 if (rc)
1531 goto out;
1532
7ff44499
DC
1533 if (vpd->page_code != CISS_VPD_LV_STATUS)
1534 goto out;
1535
6c223761
KB
1536 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1537 volume_status) + vpd->page_length;
1538 if (page_length < sizeof(*vpd))
1539 goto out;
1540
1541 volume_status = vpd->volume_status;
1542 volume_flags = get_unaligned_be32(&vpd->flags);
1543 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1544
1545out:
1546 kfree(vpd);
1547no_buffer:
1548 device->volume_status = volume_status;
1549 device->volume_offline = volume_offline;
1550}
1551
ec504b23
MB
1552#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1553
ce143793
KB
1554static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1555 struct pqi_scsi_dev *device,
1556 struct bmic_identify_physical_device *id_phys)
1557{
1558 int rc;
26b390ab 1559
ce143793
KB
1560 memset(id_phys, 0, sizeof(*id_phys));
1561
1562 rc = pqi_identify_physical_device(ctrl_info, device,
1563 id_phys, sizeof(*id_phys));
1564 if (rc) {
1565 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1566 return rc;
1567 }
1568
1569 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1570 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1571
1572 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1573 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1574
1575 device->box_index = id_phys->box_index;
1576 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1577 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1578 device->queue_depth =
1579 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1580 device->active_path_index = id_phys->active_path_number;
1581 device->path_map = id_phys->redundant_path_present_map;
1582 memcpy(&device->box,
1583 &id_phys->alternate_paths_phys_box_on_port,
1584 sizeof(device->box));
1585 memcpy(&device->phys_connector,
1586 &id_phys->alternate_paths_phys_connector,
1587 sizeof(device->phys_connector));
1588 device->bay = id_phys->phys_bay_in_box;
1589
7a84a821
KB
1590 memcpy(&device->page_83_identifier, &id_phys->page_83_identifier,
1591 sizeof(device->page_83_identifier));
1592
ec504b23
MB
1593 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1594 id_phys->phy_count)
1595 device->phy_id =
1596 id_phys->phy_to_phy_map[device->active_path_index];
1597 else
1598 device->phy_id = 0xFF;
1599
ce143793
KB
1600 return 0;
1601}
1602
1603static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1604 struct pqi_scsi_dev *device)
1605{
1606 int rc;
1607 u8 *buffer;
3d46a59a 1608
6c223761
KB
1609 buffer = kmalloc(64, GFP_KERNEL);
1610 if (!buffer)
1611 return -ENOMEM;
1612
1613 /* Send an inquiry to the device to see what it is. */
ce143793
KB
1614 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1615 if (rc)
1616 goto out;
6c223761
KB
1617
1618 scsi_sanitize_inquiry_string(&buffer[8], 8);
1619 scsi_sanitize_inquiry_string(&buffer[16], 16);
1620
1621 device->devtype = buffer[0] & 0x1f;
cbe0c7b1
KB
1622 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1623 memcpy(device->model, &buffer[16], sizeof(device->model));
6c223761 1624
ce143793 1625 if (device->devtype == TYPE_DISK) {
bd10cf0b
KB
1626 if (device->is_external_raid_device) {
1627 device->raid_level = SA_RAID_UNKNOWN;
1628 device->volume_status = CISS_LV_OK;
1629 device->volume_offline = false;
1630 } else {
1631 pqi_get_raid_level(ctrl_info, device);
588a63fe 1632 pqi_get_raid_bypass_status(ctrl_info, device);
bd10cf0b
KB
1633 pqi_get_volume_status(ctrl_info, device);
1634 }
6c223761
KB
1635 }
1636
1637out:
1638 kfree(buffer);
1639
1640 return rc;
1641}
1642
be76f906
DB
1643/*
1644 * Prevent adding drive to OS for some corner cases such as a drive
1645 * undergoing a sanitize operation. Some OSes will continue to poll
1646 * the drive until the sanitize completes, which can take hours,
1647 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1648 * are allowed, but READ/WRITE cause check condition. So the OS
1649 * cannot check/read the partition table.
1650 * Note: devices that have completed sanitize must be re-enabled
1651 * using the management utility.
1652 */
1653static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
1654 struct pqi_scsi_dev *device)
1655{
1656 u8 scsi_status;
1657 int rc;
1658 enum dma_data_direction dir;
1659 char *buffer;
1660 int buffer_length = 64;
1661 size_t sense_data_length;
1662 struct scsi_sense_hdr sshdr;
1663 struct pqi_raid_path_request request;
1664 struct pqi_raid_error_info error_info;
1665 bool offline = false; /* Assume keep online */
1666
1667 /* Do not check controllers. */
1668 if (pqi_is_hba_lunid(device->scsi3addr))
1669 return false;
1670
1671 /* Do not check LVs. */
1672 if (pqi_is_logical_device(device))
1673 return false;
1674
1675 buffer = kmalloc(buffer_length, GFP_KERNEL);
1676 if (!buffer)
1677 return false; /* Assume not offline */
1678
1679 /* Check for SANITIZE in progress using TUR */
1680 rc = pqi_build_raid_path_request(ctrl_info, &request,
1681 TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
1682 buffer_length, 0, &dir);
1683 if (rc)
1684 goto out; /* Assume not offline */
1685
1686 memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
1687
1688 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
1689
1690 if (rc)
1691 goto out; /* Assume not offline */
1692
1693 scsi_status = error_info.status;
1694 sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
1695 if (sense_data_length == 0)
1696 sense_data_length =
1697 get_unaligned_le16(&error_info.response_data_length);
1698 if (sense_data_length) {
1699 if (sense_data_length > sizeof(error_info.data))
1700 sense_data_length = sizeof(error_info.data);
1701
1702 /*
1703 * Check for sanitize in progress: asc:0x04, ascq: 0x1b
1704 */
1705 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
1706 scsi_normalize_sense(error_info.data,
1707 sense_data_length, &sshdr) &&
1708 sshdr.sense_key == NOT_READY &&
1709 sshdr.asc == 0x04 &&
1710 sshdr.ascq == 0x1b) {
1711 device->device_offline = true;
1712 offline = true;
1713 goto out; /* Keep device offline */
1714 }
1715 }
1716
1717out:
1718 kfree(buffer);
1719 return offline;
1720}
1721
ce143793 1722static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1723 struct pqi_scsi_dev *device,
1724 struct bmic_identify_physical_device *id_phys)
1725{
1726 int rc;
1727
ce143793
KB
1728 if (device->is_expander_smp_device)
1729 return 0;
6c223761 1730
ce143793
KB
1731 if (pqi_is_logical_device(device))
1732 rc = pqi_get_logical_device_info(ctrl_info, device);
1733 else
1734 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
694c5d5b 1735
ce143793 1736 return rc;
6c223761
KB
1737}
1738
1739static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1740 struct pqi_scsi_dev *device)
1741{
1742 char *status;
1743 static const char unknown_state_str[] =
1744 "Volume is in an unknown state (%u)";
1745 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1746
1747 switch (device->volume_status) {
1748 case CISS_LV_OK:
1749 status = "Volume online";
1750 break;
1751 case CISS_LV_FAILED:
1752 status = "Volume failed";
1753 break;
1754 case CISS_LV_NOT_CONFIGURED:
1755 status = "Volume not configured";
1756 break;
1757 case CISS_LV_DEGRADED:
1758 status = "Volume degraded";
1759 break;
1760 case CISS_LV_READY_FOR_RECOVERY:
1761 status = "Volume ready for recovery operation";
1762 break;
1763 case CISS_LV_UNDERGOING_RECOVERY:
1764 status = "Volume undergoing recovery";
1765 break;
1766 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1767 status = "Wrong physical drive was replaced";
1768 break;
1769 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1770 status = "A physical drive not properly connected";
1771 break;
1772 case CISS_LV_HARDWARE_OVERHEATING:
1773 status = "Hardware is overheating";
1774 break;
1775 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1776 status = "Hardware has overheated";
1777 break;
1778 case CISS_LV_UNDERGOING_EXPANSION:
1779 status = "Volume undergoing expansion";
1780 break;
1781 case CISS_LV_NOT_AVAILABLE:
1782 status = "Volume waiting for transforming volume";
1783 break;
1784 case CISS_LV_QUEUED_FOR_EXPANSION:
1785 status = "Volume queued for expansion";
1786 break;
1787 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1788 status = "Volume disabled due to SCSI ID conflict";
1789 break;
1790 case CISS_LV_EJECTED:
1791 status = "Volume has been ejected";
1792 break;
1793 case CISS_LV_UNDERGOING_ERASE:
1794 status = "Volume undergoing background erase";
1795 break;
1796 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1797 status = "Volume ready for predictive spare rebuild";
1798 break;
1799 case CISS_LV_UNDERGOING_RPI:
1800 status = "Volume undergoing rapid parity initialization";
1801 break;
1802 case CISS_LV_PENDING_RPI:
1803 status = "Volume queued for rapid parity initialization";
1804 break;
1805 case CISS_LV_ENCRYPTED_NO_KEY:
1806 status = "Encrypted volume inaccessible - key not present";
1807 break;
1808 case CISS_LV_UNDERGOING_ENCRYPTION:
1809 status = "Volume undergoing encryption process";
1810 break;
1811 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1812 status = "Volume undergoing encryption re-keying process";
1813 break;
1814 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
d87d5474 1815 status = "Volume encrypted but encryption is disabled";
6c223761
KB
1816 break;
1817 case CISS_LV_PENDING_ENCRYPTION:
1818 status = "Volume pending migration to encrypted state";
1819 break;
1820 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1821 status = "Volume pending encryption rekeying";
1822 break;
1823 case CISS_LV_NOT_SUPPORTED:
1824 status = "Volume not supported on this controller";
1825 break;
1826 case CISS_LV_STATUS_UNAVAILABLE:
1827 status = "Volume status not available";
1828 break;
1829 default:
1830 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1831 unknown_state_str, device->volume_status);
1832 status = unknown_state_buffer;
1833 break;
1834 }
1835
1836 dev_info(&ctrl_info->pci_dev->dev,
1837 "scsi %d:%d:%d:%d %s\n",
1838 ctrl_info->scsi_host->host_no,
1839 device->bus, device->target, device->lun, status);
1840}
1841
6c223761
KB
1842static void pqi_rescan_worker(struct work_struct *work)
1843{
1844 struct pqi_ctrl_info *ctrl_info;
1845
1846 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1847 rescan_work);
1848
1849 pqi_scan_scsi_devices(ctrl_info);
1850}
1851
1852static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1853 struct pqi_scsi_dev *device)
1854{
1855 int rc;
1856
1857 if (pqi_is_logical_device(device))
1858 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1859 device->target, device->lun);
1860 else
1861 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1862
1863 return rc;
1864}
1865
18ff5f08 1866#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1e46731e 1867
583891c9 1868static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6c223761 1869{
1e46731e
MR
1870 int rc;
1871
18ff5f08
KB
1872 rc = pqi_device_wait_for_pending_io(ctrl_info, device,
1873 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1e46731e
MR
1874 if (rc)
1875 dev_err(&ctrl_info->pci_dev->dev,
4d15ad38 1876 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1e46731e
MR
1877 ctrl_info->scsi_host->host_no, device->bus,
1878 device->target, device->lun,
1879 atomic_read(&device->scsi_cmds_outstanding));
1880
6c223761
KB
1881 if (pqi_is_logical_device(device))
1882 scsi_remove_device(device->sdev);
1883 else
1884 pqi_remove_sas_device(device);
819225b0
DB
1885
1886 pqi_device_remove_start(device);
6c223761
KB
1887}
1888
1889/* Assumes the SCSI device list lock is held. */
1890
1891static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1892 int bus, int target, int lun)
1893{
1894 struct pqi_scsi_dev *device;
1895
4d15ad38
KB
1896 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1897 if (device->bus == bus && device->target == target && device->lun == lun)
6c223761
KB
1898 return device;
1899
1900 return NULL;
1901}
1902
583891c9 1903static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
6c223761
KB
1904{
1905 if (dev1->is_physical_device != dev2->is_physical_device)
1906 return false;
1907
1908 if (dev1->is_physical_device)
28ca6d87 1909 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
6c223761 1910
583891c9 1911 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
6c223761
KB
1912}
1913
1914enum pqi_find_result {
1915 DEVICE_NOT_FOUND,
1916 DEVICE_CHANGED,
1917 DEVICE_SAME,
1918};
1919
1920static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
4d15ad38 1921 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
6c223761
KB
1922{
1923 struct pqi_scsi_dev *device;
1924
4d15ad38
KB
1925 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1926 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
6c223761
KB
1927 *matching_device = device;
1928 if (pqi_device_equal(device_to_find, device)) {
1929 if (device_to_find->volume_offline)
1930 return DEVICE_CHANGED;
1931 return DEVICE_SAME;
1932 }
1933 return DEVICE_CHANGED;
1934 }
1935 }
1936
1937 return DEVICE_NOT_FOUND;
1938}
1939
3d46a59a
DB
1940static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1941{
1942 if (device->is_expander_smp_device)
1943 return "Enclosure SMP ";
1944
1945 return scsi_device_type(device->devtype);
1946}
1947
6de783f6
KB
1948#define PQI_DEV_INFO_BUFFER_LENGTH 128
1949
6c223761
KB
1950static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1951 char *action, struct pqi_scsi_dev *device)
1952{
6de783f6
KB
1953 ssize_t count;
1954 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1955
a4256252 1956 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
6de783f6
KB
1957 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1958
1959 if (device->target_lun_valid)
181aea89 1960 count += scnprintf(buffer + count,
6de783f6
KB
1961 PQI_DEV_INFO_BUFFER_LENGTH - count,
1962 "%d:%d",
1963 device->target,
1964 device->lun);
1965 else
181aea89 1966 count += scnprintf(buffer + count,
6de783f6
KB
1967 PQI_DEV_INFO_BUFFER_LENGTH - count,
1968 "-:-");
1969
1970 if (pqi_is_logical_device(device))
181aea89 1971 count += scnprintf(buffer + count,
6de783f6
KB
1972 PQI_DEV_INFO_BUFFER_LENGTH - count,
1973 " %08x%08x",
1974 *((u32 *)&device->scsi3addr),
1975 *((u32 *)&device->scsi3addr[4]));
1976 else
181aea89 1977 count += scnprintf(buffer + count,
6de783f6 1978 PQI_DEV_INFO_BUFFER_LENGTH - count,
28ca6d87
MM
1979 " %016llx%016llx",
1980 get_unaligned_be64(&device->wwid[0]),
1981 get_unaligned_be64(&device->wwid[8]));
6de783f6 1982
181aea89 1983 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
6de783f6 1984 " %s %.8s %.16s ",
3d46a59a 1985 pqi_device_type(device),
6c223761 1986 device->vendor,
6de783f6
KB
1987 device->model);
1988
1989 if (pqi_is_logical_device(device)) {
1990 if (device->devtype == TYPE_DISK)
181aea89 1991 count += scnprintf(buffer + count,
6de783f6
KB
1992 PQI_DEV_INFO_BUFFER_LENGTH - count,
1993 "SSDSmartPathCap%c En%c %-12s",
588a63fe
KB
1994 device->raid_bypass_configured ? '+' : '-',
1995 device->raid_bypass_enabled ? '+' : '-',
6de783f6
KB
1996 pqi_raid_level_to_string(device->raid_level));
1997 } else {
181aea89 1998 count += scnprintf(buffer + count,
6de783f6
KB
1999 PQI_DEV_INFO_BUFFER_LENGTH - count,
2000 "AIO%c", device->aio_enabled ? '+' : '-');
2001 if (device->devtype == TYPE_DISK ||
2002 device->devtype == TYPE_ZBC)
181aea89 2003 count += scnprintf(buffer + count,
6de783f6
KB
2004 PQI_DEV_INFO_BUFFER_LENGTH - count,
2005 " qd=%-6d", device->queue_depth);
2006 }
2007
2008 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
6c223761
KB
2009}
2010
2011/* Assumes the SCSI device list lock is held. */
2012
2013static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
2014 struct pqi_scsi_dev *new_device)
2015{
6c223761
KB
2016 existing_device->device_type = new_device->device_type;
2017 existing_device->bus = new_device->bus;
2018 if (new_device->target_lun_valid) {
2019 existing_device->target = new_device->target;
2020 existing_device->lun = new_device->lun;
2021 existing_device->target_lun_valid = true;
2022 }
2023
244ca45e
MR
2024 if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
2025 existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
2026 new_device->volume_status == CISS_LV_OK)
2027 existing_device->rescan = true;
2028
6c223761
KB
2029 /* By definition, the scsi3addr and wwid fields are already the same. */
2030
2031 existing_device->is_physical_device = new_device->is_physical_device;
bd10cf0b
KB
2032 existing_device->is_external_raid_device =
2033 new_device->is_external_raid_device;
3d46a59a
DB
2034 existing_device->is_expander_smp_device =
2035 new_device->is_expander_smp_device;
6c223761
KB
2036 existing_device->aio_enabled = new_device->aio_enabled;
2037 memcpy(existing_device->vendor, new_device->vendor,
2038 sizeof(existing_device->vendor));
2039 memcpy(existing_device->model, new_device->model,
2040 sizeof(existing_device->model));
2041 existing_device->sas_address = new_device->sas_address;
2042 existing_device->raid_level = new_device->raid_level;
2043 existing_device->queue_depth = new_device->queue_depth;
2044 existing_device->aio_handle = new_device->aio_handle;
2045 existing_device->volume_status = new_device->volume_status;
2046 existing_device->active_path_index = new_device->active_path_index;
ec504b23 2047 existing_device->phy_id = new_device->phy_id;
6c223761
KB
2048 existing_device->path_map = new_device->path_map;
2049 existing_device->bay = new_device->bay;
2d2ad4bc
GW
2050 existing_device->box_index = new_device->box_index;
2051 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
583891c9 2052 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
6c223761
KB
2053 memcpy(existing_device->box, new_device->box,
2054 sizeof(existing_device->box));
2055 memcpy(existing_device->phys_connector, new_device->phys_connector,
2056 sizeof(existing_device->phys_connector));
7a012c23 2057 existing_device->next_bypass_group = 0;
6c223761
KB
2058 kfree(existing_device->raid_map);
2059 existing_device->raid_map = new_device->raid_map;
588a63fe
KB
2060 existing_device->raid_bypass_configured =
2061 new_device->raid_bypass_configured;
2062 existing_device->raid_bypass_enabled =
2063 new_device->raid_bypass_enabled;
a9a68101 2064 existing_device->device_offline = false;
6c223761
KB
2065
2066 /* To prevent this from being freed later. */
2067 new_device->raid_map = NULL;
2068}
2069
2070static inline void pqi_free_device(struct pqi_scsi_dev *device)
2071{
2072 if (device) {
2073 kfree(device->raid_map);
2074 kfree(device);
2075 }
2076}
2077
2078/*
2079 * Called when exposing a new device to the OS fails in order to re-adjust
2080 * our internal SCSI device list to match the SCSI ML's view.
2081 */
2082
2083static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2084 struct pqi_scsi_dev *device)
2085{
2086 unsigned long flags;
2087
2088 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2089 list_del(&device->scsi_device_list_entry);
2090 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2091
2092 /* Allow the device structure to be freed later. */
2093 device->keep_device = false;
2094}
2095
3d46a59a
DB
2096static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2097{
2098 if (device->is_expander_smp_device)
2099 return device->sas_port != NULL;
2100
2101 return device->sdev != NULL;
2102}
2103
6c223761
KB
2104static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2105 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2106{
2107 int rc;
2108 unsigned int i;
2109 unsigned long flags;
2110 enum pqi_find_result find_result;
2111 struct pqi_scsi_dev *device;
2112 struct pqi_scsi_dev *next;
2113 struct pqi_scsi_dev *matching_device;
8a994a04
KB
2114 LIST_HEAD(add_list);
2115 LIST_HEAD(delete_list);
6c223761
KB
2116
2117 /*
2118 * The idea here is to do as little work as possible while holding the
2119 * spinlock. That's why we go to great pains to defer anything other
2120 * than updating the internal device list until after we release the
2121 * spinlock.
2122 */
2123
2124 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2125
2126 /* Assume that all devices in the existing list have gone away. */
4d15ad38 2127 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
6c223761
KB
2128 device->device_gone = true;
2129
2130 for (i = 0; i < num_new_devices; i++) {
2131 device = new_device_list[i];
2132
2133 find_result = pqi_scsi_find_entry(ctrl_info, device,
694c5d5b 2134 &matching_device);
6c223761
KB
2135
2136 switch (find_result) {
2137 case DEVICE_SAME:
2138 /*
2139 * The newly found device is already in the existing
2140 * device list.
2141 */
2142 device->new_device = false;
2143 matching_device->device_gone = false;
2144 pqi_scsi_update_device(matching_device, device);
2145 break;
2146 case DEVICE_NOT_FOUND:
2147 /*
2148 * The newly found device is NOT in the existing device
2149 * list.
2150 */
2151 device->new_device = true;
2152 break;
2153 case DEVICE_CHANGED:
2154 /*
2155 * The original device has gone away and we need to add
2156 * the new device.
2157 */
2158 device->new_device = true;
2159 break;
6c223761
KB
2160 }
2161 }
2162
2163 /* Process all devices that have gone away. */
2164 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2165 scsi_device_list_entry) {
2166 if (device->device_gone) {
819225b0 2167 list_del(&device->scsi_device_list_entry);
6c223761
KB
2168 list_add_tail(&device->delete_list_entry, &delete_list);
2169 }
2170 }
2171
2172 /* Process all new devices. */
2173 for (i = 0; i < num_new_devices; i++) {
2174 device = new_device_list[i];
2175 if (!device->new_device)
2176 continue;
2177 if (device->volume_offline)
2178 continue;
2179 list_add_tail(&device->scsi_device_list_entry,
2180 &ctrl_info->scsi_device_list);
2181 list_add_tail(&device->add_list_entry, &add_list);
2182 /* To prevent this device structure from being freed later. */
2183 device->keep_device = true;
2184 }
2185
6c223761
KB
2186 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2187
2790cd4d
KB
2188 /*
2189 * If OFA is in progress and there are devices that need to be deleted,
2190 * allow any pending reset operations to continue and unblock any SCSI
2191 * requests before removal.
2192 */
2193 if (pqi_ofa_in_progress(ctrl_info)) {
2194 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2195 if (pqi_is_device_added(device))
2196 pqi_device_remove_start(device);
2197 pqi_ctrl_unblock_device_reset(ctrl_info);
2198 pqi_scsi_unblock_requests(ctrl_info);
2199 }
4fd22c13 2200
6c223761 2201 /* Remove all devices that have gone away. */
4d15ad38 2202 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
6c223761
KB
2203 if (device->volume_offline) {
2204 pqi_dev_info(ctrl_info, "offline", device);
2205 pqi_show_volume_status(ctrl_info, device);
4d15ad38 2206 } else {
819225b0 2207 pqi_dev_info(ctrl_info, "removed", device);
4d15ad38 2208 }
819225b0
DB
2209 if (pqi_is_device_added(device))
2210 pqi_remove_device(ctrl_info, device);
2211 list_del(&device->delete_list_entry);
2212 pqi_free_device(device);
6c223761
KB
2213 }
2214
2215 /*
2216 * Notify the SCSI ML if the queue depth of any existing device has
2217 * changed.
2218 */
583891c9
KB
2219 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2220 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2221 device->advertised_queue_depth = device->queue_depth;
2222 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
244ca45e
MR
2223 if (device->rescan) {
2224 scsi_rescan_device(&device->sdev->sdev_gendev);
2225 device->rescan = false;
2226 }
6c223761
KB
2227 }
2228 }
2229
2230 /* Expose any new devices. */
2231 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
3d46a59a 2232 if (!pqi_is_device_added(device)) {
6c223761 2233 rc = pqi_add_device(ctrl_info, device);
ce143793
KB
2234 if (rc == 0) {
2235 pqi_dev_info(ctrl_info, "added", device);
2236 } else {
6c223761
KB
2237 dev_warn(&ctrl_info->pci_dev->dev,
2238 "scsi %d:%d:%d:%d addition failed, device not added\n",
2239 ctrl_info->scsi_host->host_no,
2240 device->bus, device->target,
2241 device->lun);
2242 pqi_fixup_botched_add(ctrl_info, device);
6c223761
KB
2243 }
2244 }
6c223761
KB
2245 }
2246}
2247
ce143793 2248static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
6c223761 2249{
ce143793
KB
2250 /*
2251 * Only support the HBA controller itself as a RAID
2252 * controller. If it's a RAID controller other than
2253 * the HBA itself (an external RAID controller, for
2254 * example), we don't support it.
2255 */
2256 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2257 !pqi_is_hba_lunid(device->scsi3addr))
583891c9 2258 return false;
6c223761 2259
ce143793 2260 return true;
6c223761
KB
2261}
2262
94086f5b 2263static inline bool pqi_skip_device(u8 *scsi3addr)
6c223761 2264{
94086f5b
KB
2265 /* Ignore all masked devices. */
2266 if (MASKED_DEVICE(scsi3addr))
6c223761 2267 return true;
6c223761
KB
2268
2269 return false;
2270}
2271
522bc026
DC
2272static inline void pqi_mask_device(u8 *scsi3addr)
2273{
2274 scsi3addr[3] |= 0xc0;
2275}
2276
3d46a59a
DB
2277static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
2278{
ce143793
KB
2279 switch (device->device_type) {
2280 case SA_DEVICE_TYPE_SAS:
2281 case SA_DEVICE_TYPE_EXPANDER_SMP:
2282 case SA_DEVICE_TYPE_SES:
3d46a59a
DB
2283 return true;
2284 }
2285
2286 return false;
2287}
2288
cd128244
DC
2289static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2290{
583891c9 2291 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
cd128244
DC
2292}
2293
7a84a821 2294static inline void pqi_set_physical_device_wwid(struct pqi_ctrl_info *ctrl_info,
28ca6d87 2295 struct pqi_scsi_dev *device, struct report_phys_lun_16byte_wwid *phys_lun)
7a84a821
KB
2296{
2297 if (ctrl_info->unique_wwid_in_report_phys_lun_supported ||
28ca6d87 2298 ctrl_info->rpl_extended_format_4_5_supported ||
7a84a821 2299 pqi_is_device_with_sas_address(device))
28ca6d87 2300 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
7a84a821 2301 else
28ca6d87 2302 memcpy(&device->wwid[8], device->page_83_identifier, 8);
cd128244
DC
2303}
2304
6c223761
KB
2305static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2306{
2307 int i;
2308 int rc;
8a994a04 2309 LIST_HEAD(new_device_list_head);
28ca6d87
MM
2310 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2311 struct report_log_lun_list *logdev_list = NULL;
2312 struct report_phys_lun_16byte_wwid *phys_lun;
2313 struct report_log_lun *log_lun;
6c223761
KB
2314 struct bmic_identify_physical_device *id_phys = NULL;
2315 u32 num_physicals;
2316 u32 num_logicals;
2317 struct pqi_scsi_dev **new_device_list = NULL;
2318 struct pqi_scsi_dev *device;
2319 struct pqi_scsi_dev *next;
2320 unsigned int num_new_devices;
2321 unsigned int num_valid_devices;
2322 bool is_physical_device;
2323 u8 *scsi3addr;
5e6a9760
GW
2324 unsigned int physical_index;
2325 unsigned int logical_index;
6c223761 2326 static char *out_of_memory_msg =
6de783f6 2327 "failed to allocate memory, device discovery stopped";
6c223761 2328
6c223761
KB
2329 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2330 if (rc)
2331 goto out;
2332
2333 if (physdev_list)
2334 num_physicals =
2335 get_unaligned_be32(&physdev_list->header.list_length)
2336 / sizeof(physdev_list->lun_entries[0]);
2337 else
2338 num_physicals = 0;
2339
2340 if (logdev_list)
2341 num_logicals =
2342 get_unaligned_be32(&logdev_list->header.list_length)
2343 / sizeof(logdev_list->lun_entries[0]);
2344 else
2345 num_logicals = 0;
2346
2347 if (num_physicals) {
2348 /*
2349 * We need this buffer for calls to pqi_get_physical_disk_info()
2350 * below. We allocate it here instead of inside
2351 * pqi_get_physical_disk_info() because it's a fairly large
2352 * buffer.
2353 */
2354 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2355 if (!id_phys) {
2356 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2357 out_of_memory_msg);
2358 rc = -ENOMEM;
2359 goto out;
2360 }
522bc026 2361
694c5d5b 2362 if (pqi_hide_vsep) {
522bc026 2363 for (i = num_physicals - 1; i >= 0; i--) {
28ca6d87
MM
2364 phys_lun = &physdev_list->lun_entries[i];
2365 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2366 pqi_mask_device(phys_lun->lunid);
522bc026
DC
2367 break;
2368 }
2369 }
2370 }
6c223761
KB
2371 }
2372
f6cc2a77
KB
2373 if (num_logicals &&
2374 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2375 ctrl_info->lv_drive_type_mix_valid = true;
2376
6c223761
KB
2377 num_new_devices = num_physicals + num_logicals;
2378
6da2ec56
KC
2379 new_device_list = kmalloc_array(num_new_devices,
2380 sizeof(*new_device_list),
2381 GFP_KERNEL);
6c223761
KB
2382 if (!new_device_list) {
2383 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2384 rc = -ENOMEM;
2385 goto out;
2386 }
2387
2388 for (i = 0; i < num_new_devices; i++) {
2389 device = kzalloc(sizeof(*device), GFP_KERNEL);
2390 if (!device) {
2391 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2392 out_of_memory_msg);
2393 rc = -ENOMEM;
2394 goto out;
2395 }
2396 list_add_tail(&device->new_device_list_entry,
2397 &new_device_list_head);
2398 }
2399
2400 device = NULL;
2401 num_valid_devices = 0;
5e6a9760
GW
2402 physical_index = 0;
2403 logical_index = 0;
6c223761
KB
2404
2405 for (i = 0; i < num_new_devices; i++) {
2406
5e6a9760
GW
2407 if ((!pqi_expose_ld_first && i < num_physicals) ||
2408 (pqi_expose_ld_first && i >= num_logicals)) {
6c223761 2409 is_physical_device = true;
28ca6d87
MM
2410 phys_lun = &physdev_list->lun_entries[physical_index++];
2411 log_lun = NULL;
2412 scsi3addr = phys_lun->lunid;
6c223761
KB
2413 } else {
2414 is_physical_device = false;
28ca6d87
MM
2415 phys_lun = NULL;
2416 log_lun = &logdev_list->lun_entries[logical_index++];
2417 scsi3addr = log_lun->lunid;
6c223761
KB
2418 }
2419
94086f5b 2420 if (is_physical_device && pqi_skip_device(scsi3addr))
6c223761
KB
2421 continue;
2422
2423 if (device)
2424 device = list_next_entry(device, new_device_list_entry);
2425 else
2426 device = list_first_entry(&new_device_list_head,
2427 struct pqi_scsi_dev, new_device_list_entry);
2428
2429 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2430 device->is_physical_device = is_physical_device;
3d46a59a 2431 if (is_physical_device) {
28ca6d87 2432 device->device_type = phys_lun->device_type;
ce143793 2433 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
3d46a59a
DB
2434 device->is_expander_smp_device = true;
2435 } else {
bd10cf0b
KB
2436 device->is_external_raid_device =
2437 pqi_is_external_raid_addr(scsi3addr);
3d46a59a 2438 }
6c223761 2439
ce143793
KB
2440 if (!pqi_is_supported_device(device))
2441 continue;
2442
be76f906
DB
2443 /* Do not present disks that the OS cannot fully probe */
2444 if (pqi_keep_device_offline(ctrl_info, device))
2445 continue;
2446
6c223761 2447 /* Gather information about the device. */
ce143793 2448 rc = pqi_get_device_info(ctrl_info, device, id_phys);
6c223761
KB
2449 if (rc == -ENOMEM) {
2450 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2451 out_of_memory_msg);
2452 goto out;
2453 }
2454 if (rc) {
6de783f6
KB
2455 if (device->is_physical_device)
2456 dev_warn(&ctrl_info->pci_dev->dev,
28ca6d87
MM
2457 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2458 get_unaligned_be64(&phys_lun->wwid[0]),
2459 get_unaligned_be64(&phys_lun->wwid[8]));
6de783f6
KB
2460 else
2461 dev_warn(&ctrl_info->pci_dev->dev,
2462 "obtaining device info failed, skipping logical device %08x%08x\n",
2463 *((u32 *)&device->scsi3addr),
2464 *((u32 *)&device->scsi3addr[4]));
6c223761
KB
2465 rc = 0;
2466 continue;
2467 }
2468
6c223761
KB
2469 pqi_assign_bus_target_lun(device);
2470
6c223761 2471 if (device->is_physical_device) {
28ca6d87
MM
2472 pqi_set_physical_device_wwid(ctrl_info, device, phys_lun);
2473 if ((phys_lun->device_flags &
694c5d5b 2474 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
28ca6d87 2475 phys_lun->aio_handle) {
583891c9
KB
2476 device->aio_enabled = true;
2477 device->aio_handle =
28ca6d87 2478 phys_lun->aio_handle;
3d46a59a 2479 }
6c223761 2480 } else {
28ca6d87 2481 memcpy(device->volume_id, log_lun->volume_id,
6c223761
KB
2482 sizeof(device->volume_id));
2483 }
2484
3d46a59a 2485 if (pqi_is_device_with_sas_address(device))
28ca6d87 2486 device->sas_address = get_unaligned_be64(&device->wwid[8]);
6c223761
KB
2487
2488 new_device_list[num_valid_devices++] = device;
2489 }
2490
2491 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2492
2493out:
2494 list_for_each_entry_safe(device, next, &new_device_list_head,
2495 new_device_list_entry) {
2496 if (device->keep_device)
2497 continue;
2498 list_del(&device->new_device_list_entry);
2499 pqi_free_device(device);
2500 }
2501
2502 kfree(new_device_list);
2503 kfree(physdev_list);
2504 kfree(logdev_list);
2505 kfree(id_phys);
2506
2507 return rc;
2508}
2509
819225b0
DB
2510static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2511{
2512 unsigned long flags;
2513 struct pqi_scsi_dev *device;
2514 struct pqi_scsi_dev *next;
2515
819225b0
DB
2516 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2517 scsi_device_list_entry) {
2518 if (pqi_is_device_added(device))
2519 pqi_remove_device(ctrl_info, device);
c4ff687d 2520 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
819225b0
DB
2521 list_del(&device->scsi_device_list_entry);
2522 pqi_free_device(device);
c4ff687d 2523 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
819225b0 2524 }
819225b0
DB
2525}
2526
6c223761
KB
2527static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2528{
66f1c2b4
KB
2529 int rc;
2530 int mutex_acquired;
6c223761
KB
2531
2532 if (pqi_ctrl_offline(ctrl_info))
2533 return -ENXIO;
2534
66f1c2b4
KB
2535 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2536
2537 if (!mutex_acquired) {
2538 if (pqi_ctrl_scan_blocked(ctrl_info))
2539 return -EBUSY;
5f310425 2540 pqi_schedule_rescan_worker_delayed(ctrl_info);
66f1c2b4 2541 return -EINPROGRESS;
530dd8a7 2542 }
6c223761 2543
66f1c2b4
KB
2544 rc = pqi_update_scsi_devices(ctrl_info);
2545 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2546 pqi_schedule_rescan_worker_delayed(ctrl_info);
2547
2548 mutex_unlock(&ctrl_info->scan_mutex);
2549
6c223761
KB
2550 return rc;
2551}
2552
2553static void pqi_scan_start(struct Scsi_Host *shost)
2554{
4fd22c13
MR
2555 struct pqi_ctrl_info *ctrl_info;
2556
2557 ctrl_info = shost_to_hba(shost);
4fd22c13
MR
2558
2559 pqi_scan_scsi_devices(ctrl_info);
6c223761
KB
2560}
2561
2562/* Returns TRUE if scan is finished. */
2563
2564static int pqi_scan_finished(struct Scsi_Host *shost,
2565 unsigned long elapsed_time)
2566{
2567 struct pqi_ctrl_info *ctrl_info;
2568
2569 ctrl_info = shost_priv(shost);
2570
2571 return !mutex_is_locked(&ctrl_info->scan_mutex);
2572}
2573
583891c9
KB
2574static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2575 struct raid_map *raid_map, u64 first_block)
6c223761
KB
2576{
2577 u32 volume_blk_size;
2578
2579 /*
2580 * Set the encryption tweak values based on logical block address.
2581 * If the block size is 512, the tweak value is equal to the LBA.
2582 * For other block sizes, tweak value is (LBA * block size) / 512.
2583 */
2584 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2585 if (volume_blk_size != 512)
2586 first_block = (first_block * volume_blk_size) / 512;
2587
2588 encryption_info->data_encryption_key_index =
2589 get_unaligned_le16(&raid_map->data_encryption_key_index);
2590 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2591 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2592}
2593
2594/*
588a63fe 2595 * Attempt to perform RAID bypass mapping for a logical volume I/O.
6c223761
KB
2596 */
2597
6702d2c4
DB
2598static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2599 struct pqi_scsi_dev_raid_map_data *rmd)
281a817f
DB
2600{
2601 bool is_supported = true;
2602
2603 switch (rmd->raid_level) {
2604 case SA_RAID_0:
2605 break;
2606 case SA_RAID_1:
f6cc2a77
KB
2607 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2608 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2609 is_supported = false;
2610 break;
7a012c23 2611 case SA_RAID_TRIPLE:
f6cc2a77
KB
2612 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2613 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
281a817f
DB
2614 is_supported = false;
2615 break;
2616 case SA_RAID_5:
f6cc2a77
KB
2617 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2618 rmd->data_length > ctrl_info->max_write_raid_5_6))
6702d2c4
DB
2619 is_supported = false;
2620 break;
281a817f 2621 case SA_RAID_6:
f6cc2a77
KB
2622 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2623 rmd->data_length > ctrl_info->max_write_raid_5_6))
281a817f
DB
2624 is_supported = false;
2625 break;
281a817f
DB
2626 default:
2627 is_supported = false;
f6cc2a77 2628 break;
281a817f
DB
2629 }
2630
2631 return is_supported;
2632}
2633
6c223761
KB
2634#define PQI_RAID_BYPASS_INELIGIBLE 1
2635
281a817f 2636static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
583891c9 2637 struct pqi_scsi_dev_raid_map_data *rmd)
6c223761 2638{
6c223761
KB
2639 /* Check for valid opcode, get LBA and block count. */
2640 switch (scmd->cmnd[0]) {
2641 case WRITE_6:
281a817f 2642 rmd->is_write = true;
df561f66 2643 fallthrough;
6c223761 2644 case READ_6:
281a817f 2645 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
e018ef57 2646 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
281a817f
DB
2647 rmd->block_cnt = (u32)scmd->cmnd[4];
2648 if (rmd->block_cnt == 0)
2649 rmd->block_cnt = 256;
6c223761
KB
2650 break;
2651 case WRITE_10:
281a817f 2652 rmd->is_write = true;
df561f66 2653 fallthrough;
6c223761 2654 case READ_10:
281a817f
DB
2655 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2656 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
6c223761
KB
2657 break;
2658 case WRITE_12:
281a817f 2659 rmd->is_write = true;
df561f66 2660 fallthrough;
6c223761 2661 case READ_12:
281a817f
DB
2662 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2663 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
6c223761
KB
2664 break;
2665 case WRITE_16:
281a817f 2666 rmd->is_write = true;
df561f66 2667 fallthrough;
6c223761 2668 case READ_16:
281a817f
DB
2669 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2670 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
6c223761
KB
2671 break;
2672 default:
2673 /* Process via normal I/O path. */
2674 return PQI_RAID_BYPASS_INELIGIBLE;
2675 }
2676
281a817f 2677 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
6c223761 2678
281a817f
DB
2679 return 0;
2680}
6c223761 2681
281a817f 2682static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
583891c9 2683 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
281a817f
DB
2684{
2685#if BITS_PER_LONG == 32
2686 u64 tmpdiv;
2687#endif
2688
2689 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
6c223761
KB
2690
2691 /* Check for invalid block or wraparound. */
281a817f
DB
2692 if (rmd->last_block >=
2693 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2694 rmd->last_block < rmd->first_block)
6c223761
KB
2695 return PQI_RAID_BYPASS_INELIGIBLE;
2696
281a817f 2697 rmd->data_disks_per_row =
583891c9 2698 get_unaligned_le16(&raid_map->data_disks_per_row);
281a817f
DB
2699 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2700 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
6c223761
KB
2701
2702 /* Calculate stripe information for the request. */
281a817f 2703 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
667298ce
DB
2704 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2705 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2706#if BITS_PER_LONG == 32
281a817f
DB
2707 tmpdiv = rmd->first_block;
2708 do_div(tmpdiv, rmd->blocks_per_row);
2709 rmd->first_row = tmpdiv;
2710 tmpdiv = rmd->last_block;
2711 do_div(tmpdiv, rmd->blocks_per_row);
2712 rmd->last_row = tmpdiv;
2713 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2714 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2715 tmpdiv = rmd->first_row_offset;
2716 do_div(tmpdiv, rmd->strip_size);
2717 rmd->first_column = tmpdiv;
2718 tmpdiv = rmd->last_row_offset;
2719 do_div(tmpdiv, rmd->strip_size);
2720 rmd->last_column = tmpdiv;
6c223761 2721#else
281a817f
DB
2722 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2723 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2724 rmd->first_row_offset = (u32)(rmd->first_block -
583891c9 2725 (rmd->first_row * rmd->blocks_per_row));
281a817f 2726 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
583891c9 2727 rmd->blocks_per_row));
281a817f
DB
2728 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2729 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
6c223761
KB
2730#endif
2731
2732 /* If this isn't a single row/column then give to the controller. */
281a817f 2733 if (rmd->first_row != rmd->last_row ||
583891c9 2734 rmd->first_column != rmd->last_column)
6c223761
KB
2735 return PQI_RAID_BYPASS_INELIGIBLE;
2736
2737 /* Proceeding with driver mapping. */
281a817f 2738 rmd->total_disks_per_row = rmd->data_disks_per_row +
6c223761 2739 get_unaligned_le16(&raid_map->metadata_disks_per_row);
281a817f
DB
2740 rmd->map_row = ((u32)(rmd->first_row >>
2741 raid_map->parity_rotation_shift)) %
6c223761 2742 get_unaligned_le16(&raid_map->row_cnt);
281a817f 2743 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
583891c9 2744 rmd->first_column;
6c223761 2745
281a817f
DB
2746 return 0;
2747}
6c223761 2748
281a817f 2749static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
583891c9 2750 struct raid_map *raid_map)
281a817f 2751{
6c223761 2752#if BITS_PER_LONG == 32
281a817f 2753 u64 tmpdiv;
6c223761 2754#endif
6c223761 2755
667298ce
DB
2756 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2757 return PQI_RAID_BYPASS_INELIGIBLE;
2758
281a817f 2759 /* RAID 50/60 */
583891c9 2760 /* Verify first and last block are in same RAID group. */
281a817f 2761 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
6c223761 2762#if BITS_PER_LONG == 32
281a817f
DB
2763 tmpdiv = rmd->first_block;
2764 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2765 tmpdiv = rmd->first_group;
2766 do_div(tmpdiv, rmd->blocks_per_row);
2767 rmd->first_group = tmpdiv;
2768 tmpdiv = rmd->last_block;
2769 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2770 tmpdiv = rmd->last_group;
2771 do_div(tmpdiv, rmd->blocks_per_row);
2772 rmd->last_group = tmpdiv;
6c223761 2773#else
281a817f
DB
2774 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2775 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
6c223761 2776#endif
281a817f
DB
2777 if (rmd->first_group != rmd->last_group)
2778 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2779
583891c9 2780 /* Verify request is in a single row of RAID 5/6. */
6c223761 2781#if BITS_PER_LONG == 32
281a817f
DB
2782 tmpdiv = rmd->first_block;
2783 do_div(tmpdiv, rmd->stripesize);
2784 rmd->first_row = tmpdiv;
2785 rmd->r5or6_first_row = tmpdiv;
2786 tmpdiv = rmd->last_block;
2787 do_div(tmpdiv, rmd->stripesize);
2788 rmd->r5or6_last_row = tmpdiv;
6c223761 2789#else
281a817f
DB
2790 rmd->first_row = rmd->r5or6_first_row =
2791 rmd->first_block / rmd->stripesize;
2792 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
6c223761 2793#endif
281a817f
DB
2794 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2795 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2796
583891c9 2797 /* Verify request is in a single column. */
6c223761 2798#if BITS_PER_LONG == 32
281a817f
DB
2799 tmpdiv = rmd->first_block;
2800 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2801 tmpdiv = rmd->first_row_offset;
2802 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2803 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2804 tmpdiv = rmd->last_block;
2805 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2806 tmpdiv = rmd->r5or6_last_row_offset;
2807 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2808 tmpdiv = rmd->r5or6_first_row_offset;
2809 do_div(tmpdiv, rmd->strip_size);
2810 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2811 tmpdiv = rmd->r5or6_last_row_offset;
2812 do_div(tmpdiv, rmd->strip_size);
2813 rmd->r5or6_last_column = tmpdiv;
6c223761 2814#else
281a817f 2815 rmd->first_row_offset = rmd->r5or6_first_row_offset =
583891c9
KB
2816 (u32)((rmd->first_block % rmd->stripesize) %
2817 rmd->blocks_per_row);
281a817f
DB
2818
2819 rmd->r5or6_last_row_offset =
2820 (u32)((rmd->last_block % rmd->stripesize) %
2821 rmd->blocks_per_row);
2822
2823 rmd->first_column =
583891c9 2824 rmd->r5or6_first_row_offset / rmd->strip_size;
281a817f
DB
2825 rmd->r5or6_first_column = rmd->first_column;
2826 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2827#endif
2828 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2829 return PQI_RAID_BYPASS_INELIGIBLE;
2830
583891c9 2831 /* Request is eligible. */
281a817f
DB
2832 rmd->map_row =
2833 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2834 get_unaligned_le16(&raid_map->row_cnt);
6c223761 2835
281a817f
DB
2836 rmd->map_index = (rmd->first_group *
2837 (get_unaligned_le16(&raid_map->row_cnt) *
2838 rmd->total_disks_per_row)) +
2839 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
6c223761 2840
6702d2c4
DB
2841 if (rmd->is_write) {
2842 u32 index;
6c223761 2843
6702d2c4
DB
2844 /*
2845 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2846 * parity entries inside the device's raid_map.
2847 *
2848 * A device's RAID map is bounded by: number of RAID disks squared.
2849 *
2850 * The devices RAID map size is checked during device
2851 * initialization.
2852 */
2853 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2854 index *= rmd->total_disks_per_row;
2855 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2856
2857 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2858 if (rmd->raid_level == SA_RAID_6) {
2859 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2860 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2861 }
6702d2c4
DB
2862#if BITS_PER_LONG == 32
2863 tmpdiv = rmd->first_block;
2864 do_div(tmpdiv, rmd->blocks_per_row);
2865 rmd->row = tmpdiv;
2866#else
2867 rmd->row = rmd->first_block / rmd->blocks_per_row;
6c223761 2868#endif
6702d2c4
DB
2869 }
2870
281a817f
DB
2871 return 0;
2872}
2873
2874static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2875{
2876 /* Build the new CDB for the physical disk I/O. */
2877 if (rmd->disk_block > 0xffffffff) {
2878 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2879 rmd->cdb[1] = 0;
2880 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2881 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2882 rmd->cdb[14] = 0;
2883 rmd->cdb[15] = 0;
2884 rmd->cdb_length = 16;
2885 } else {
2886 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2887 rmd->cdb[1] = 0;
2888 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2889 rmd->cdb[6] = 0;
2890 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2891 rmd->cdb[9] = 0;
2892 rmd->cdb_length = 10;
2893 }
2894}
2895
7a012c23 2896static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
583891c9 2897 struct pqi_scsi_dev_raid_map_data *rmd)
7a012c23
DB
2898{
2899 u32 index;
2900 u32 group;
2901
2902 group = rmd->map_index / rmd->data_disks_per_row;
2903
2904 index = rmd->map_index - (group * rmd->data_disks_per_row);
2905 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2906 index += rmd->data_disks_per_row;
2907 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2908 if (rmd->layout_map_count > 2) {
2909 index += rmd->data_disks_per_row;
2910 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2911 }
2912
2913 rmd->num_it_nexus_entries = rmd->layout_map_count;
2914}
2915
281a817f
DB
2916static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2917 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2918 struct pqi_queue_group *queue_group)
2919{
281a817f 2920 int rc;
7a012c23
DB
2921 struct raid_map *raid_map;
2922 u32 group;
2923 u32 next_bypass_group;
281a817f
DB
2924 struct pqi_encryption_info *encryption_info_ptr;
2925 struct pqi_encryption_info encryption_info;
583891c9 2926 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
281a817f
DB
2927
2928 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2929 if (rc)
2930 return PQI_RAID_BYPASS_INELIGIBLE;
2931
2932 rmd.raid_level = device->raid_level;
6c223761 2933
6702d2c4 2934 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
281a817f
DB
2935 return PQI_RAID_BYPASS_INELIGIBLE;
2936
2937 if (unlikely(rmd.block_cnt == 0))
2938 return PQI_RAID_BYPASS_INELIGIBLE;
2939
2940 raid_map = device->raid_map;
6c223761 2941
281a817f
DB
2942 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2943 if (rc)
2944 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2945
7a012c23
DB
2946 if (device->raid_level == SA_RAID_1 ||
2947 device->raid_level == SA_RAID_TRIPLE) {
2948 if (rmd.is_write) {
2949 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2950 } else {
2951 group = device->next_bypass_group;
2952 next_bypass_group = group + 1;
2953 if (next_bypass_group >= rmd.layout_map_count)
2954 next_bypass_group = 0;
2955 device->next_bypass_group = next_bypass_group;
2956 rmd.map_index += group * rmd.data_disks_per_row;
2957 }
281a817f 2958 } else if ((device->raid_level == SA_RAID_5 ||
6702d2c4
DB
2959 device->raid_level == SA_RAID_6) &&
2960 (rmd.layout_map_count > 1 || rmd.is_write)) {
281a817f
DB
2961 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2962 if (rc)
2963 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761
KB
2964 }
2965
281a817f
DB
2966 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2967 return PQI_RAID_BYPASS_INELIGIBLE;
2968
2969 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2970 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2971 rmd.first_row * rmd.strip_size +
2972 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2973 rmd.disk_block_cnt = rmd.block_cnt;
6c223761
KB
2974
2975 /* Handle differing logical/physical block sizes. */
2976 if (raid_map->phys_blk_shift) {
281a817f
DB
2977 rmd.disk_block <<= raid_map->phys_blk_shift;
2978 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
6c223761
KB
2979 }
2980
281a817f 2981 if (unlikely(rmd.disk_block_cnt > 0xffff))
6c223761
KB
2982 return PQI_RAID_BYPASS_INELIGIBLE;
2983
281a817f 2984 pqi_set_aio_cdb(&rmd);
6c223761 2985
583891c9 2986 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
f6cc2a77
KB
2987 if (rmd.data_length > device->max_transfer_encrypted)
2988 return PQI_RAID_BYPASS_INELIGIBLE;
583891c9 2989 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
6c223761
KB
2990 encryption_info_ptr = &encryption_info;
2991 } else {
2992 encryption_info_ptr = NULL;
2993 }
2994
6702d2c4
DB
2995 if (rmd.is_write) {
2996 switch (device->raid_level) {
7a012c23
DB
2997 case SA_RAID_1:
2998 case SA_RAID_TRIPLE:
2999 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3000 encryption_info_ptr, device, &rmd);
6702d2c4
DB
3001 case SA_RAID_5:
3002 case SA_RAID_6:
3003 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
583891c9 3004 encryption_info_ptr, device, &rmd);
6702d2c4 3005 }
6702d2c4
DB
3006 }
3007
f6cc2a77
KB
3008 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3009 rmd.cdb, rmd.cdb_length, queue_group,
3010 encryption_info_ptr, true);
6c223761
KB
3011}
3012
3013#define PQI_STATUS_IDLE 0x0
3014
3015#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3016#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3017
3018#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3019#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3020#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3021#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3022#define PQI_DEVICE_STATE_ERROR 0x4
3023
3024#define PQI_MODE_READY_TIMEOUT_SECS 30
3025#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3026
3027static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3028{
3029 struct pqi_device_registers __iomem *pqi_registers;
3030 unsigned long timeout;
3031 u64 signature;
3032 u8 status;
3033
3034 pqi_registers = ctrl_info->pqi_registers;
4fd22c13 3035 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies;
6c223761
KB
3036
3037 while (1) {
3038 signature = readq(&pqi_registers->signature);
3039 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3040 sizeof(signature)) == 0)
3041 break;
3042 if (time_after(jiffies, timeout)) {
3043 dev_err(&ctrl_info->pci_dev->dev,
3044 "timed out waiting for PQI signature\n");
3045 return -ETIMEDOUT;
3046 }
3047 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3048 }
3049
3050 while (1) {
3051 status = readb(&pqi_registers->function_and_status_code);
3052 if (status == PQI_STATUS_IDLE)
3053 break;
3054 if (time_after(jiffies, timeout)) {
3055 dev_err(&ctrl_info->pci_dev->dev,
3056 "timed out waiting for PQI IDLE\n");
3057 return -ETIMEDOUT;
3058 }
3059 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3060 }
3061
3062 while (1) {
3063 if (readl(&pqi_registers->device_status) ==
3064 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3065 break;
3066 if (time_after(jiffies, timeout)) {
3067 dev_err(&ctrl_info->pci_dev->dev,
3068 "timed out waiting for PQI all registers ready\n");
3069 return -ETIMEDOUT;
3070 }
3071 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3072 }
3073
3074 return 0;
3075}
3076
3077static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3078{
3079 struct pqi_scsi_dev *device;
3080
3081 device = io_request->scmd->device->hostdata;
588a63fe 3082 device->raid_bypass_enabled = false;
376fb880 3083 device->aio_enabled = false;
6c223761
KB
3084}
3085
d87d5474 3086static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
6c223761
KB
3087{
3088 struct pqi_ctrl_info *ctrl_info;
e58081a7 3089 struct pqi_scsi_dev *device;
6c223761 3090
03b288cf
KB
3091 device = sdev->hostdata;
3092 if (device->device_offline)
3093 return;
3094
3095 device->device_offline = true;
03b288cf
KB
3096 ctrl_info = shost_to_hba(sdev->host);
3097 pqi_schedule_rescan_worker(ctrl_info);
a9a68101 3098 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
03b288cf
KB
3099 path, ctrl_info->scsi_host->host_no, device->bus,
3100 device->target, device->lun);
6c223761
KB
3101}
3102
3103static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3104{
3105 u8 scsi_status;
3106 u8 host_byte;
3107 struct scsi_cmnd *scmd;
3108 struct pqi_raid_error_info *error_info;
3109 size_t sense_data_length;
3110 int residual_count;
3111 int xfer_count;
3112 struct scsi_sense_hdr sshdr;
3113
3114 scmd = io_request->scmd;
3115 if (!scmd)
3116 return;
3117
3118 error_info = io_request->error_info;
3119 scsi_status = error_info->status;
3120 host_byte = DID_OK;
3121
f5b63206
KB
3122 switch (error_info->data_out_result) {
3123 case PQI_DATA_IN_OUT_GOOD:
3124 break;
3125 case PQI_DATA_IN_OUT_UNDERFLOW:
6c223761
KB
3126 xfer_count =
3127 get_unaligned_le32(&error_info->data_out_transferred);
3128 residual_count = scsi_bufflen(scmd) - xfer_count;
3129 scsi_set_resid(scmd, residual_count);
3130 if (xfer_count < scmd->underflow)
3131 host_byte = DID_SOFT_ERROR;
f5b63206
KB
3132 break;
3133 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3134 case PQI_DATA_IN_OUT_ABORTED:
3135 host_byte = DID_ABORT;
3136 break;
3137 case PQI_DATA_IN_OUT_TIMEOUT:
3138 host_byte = DID_TIME_OUT;
3139 break;
3140 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3141 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3142 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3143 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3144 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3145 case PQI_DATA_IN_OUT_ERROR:
3146 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3147 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3148 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3149 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3150 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3151 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3152 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3153 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3154 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3155 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3156 default:
3157 host_byte = DID_ERROR;
3158 break;
6c223761
KB
3159 }
3160
3161 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3162 if (sense_data_length == 0)
3163 sense_data_length =
3164 get_unaligned_le16(&error_info->response_data_length);
3165 if (sense_data_length) {
3166 if (sense_data_length > sizeof(error_info->data))
3167 sense_data_length = sizeof(error_info->data);
3168
3169 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3170 scsi_normalize_sense(error_info->data,
3171 sense_data_length, &sshdr) &&
3172 sshdr.sense_key == HARDWARE_ERROR &&
8ef860ae 3173 sshdr.asc == 0x3e) {
441b7195
EV
3174 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3175 struct pqi_scsi_dev *device = scmd->device->hostdata;
3176
8ef860ae
EV
3177 switch (sshdr.ascq) {
3178 case 0x1: /* LOGICAL UNIT FAILURE */
3179 if (printk_ratelimit())
3180 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3181 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3182 pqi_take_device_offline(scmd->device, "RAID");
3183 host_byte = DID_NO_CONNECT;
3184 break;
3185
3186 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3187 if (printk_ratelimit())
3188 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3189 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3190 break;
3191 }
6c223761
KB
3192 }
3193
3194 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3195 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3196 memcpy(scmd->sense_buffer, error_info->data,
3197 sense_data_length);
3198 }
3199
3200 scmd->result = scsi_status;
3201 set_host_byte(scmd, host_byte);
3202}
3203
3204static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3205{
3206 u8 scsi_status;
3207 u8 host_byte;
3208 struct scsi_cmnd *scmd;
3209 struct pqi_aio_error_info *error_info;
3210 size_t sense_data_length;
3211 int residual_count;
3212 int xfer_count;
3213 bool device_offline;
3214
3215 scmd = io_request->scmd;
3216 error_info = io_request->error_info;
3217 host_byte = DID_OK;
3218 sense_data_length = 0;
3219 device_offline = false;
3220
3221 switch (error_info->service_response) {
3222 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3223 scsi_status = error_info->status;
3224 break;
3225 case PQI_AIO_SERV_RESPONSE_FAILURE:
3226 switch (error_info->status) {
3227 case PQI_AIO_STATUS_IO_ABORTED:
3228 scsi_status = SAM_STAT_TASK_ABORTED;
3229 break;
3230 case PQI_AIO_STATUS_UNDERRUN:
3231 scsi_status = SAM_STAT_GOOD;
3232 residual_count = get_unaligned_le32(
3233 &error_info->residual_count);
3234 scsi_set_resid(scmd, residual_count);
3235 xfer_count = scsi_bufflen(scmd) - residual_count;
3236 if (xfer_count < scmd->underflow)
3237 host_byte = DID_SOFT_ERROR;
3238 break;
3239 case PQI_AIO_STATUS_OVERRUN:
3240 scsi_status = SAM_STAT_GOOD;
3241 break;
3242 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3243 pqi_aio_path_disabled(io_request);
3244 scsi_status = SAM_STAT_GOOD;
3245 io_request->status = -EAGAIN;
3246 break;
3247 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3248 case PQI_AIO_STATUS_INVALID_DEVICE:
376fb880
KB
3249 if (!io_request->raid_bypass) {
3250 device_offline = true;
3251 pqi_take_device_offline(scmd->device, "AIO");
3252 host_byte = DID_NO_CONNECT;
3253 }
6c223761
KB
3254 scsi_status = SAM_STAT_CHECK_CONDITION;
3255 break;
3256 case PQI_AIO_STATUS_IO_ERROR:
3257 default:
3258 scsi_status = SAM_STAT_CHECK_CONDITION;
3259 break;
3260 }
3261 break;
3262 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3263 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3264 scsi_status = SAM_STAT_GOOD;
3265 break;
3266 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3267 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3268 default:
3269 scsi_status = SAM_STAT_CHECK_CONDITION;
3270 break;
3271 }
3272
3273 if (error_info->data_present) {
3274 sense_data_length =
3275 get_unaligned_le16(&error_info->data_length);
3276 if (sense_data_length) {
3277 if (sense_data_length > sizeof(error_info->data))
3278 sense_data_length = sizeof(error_info->data);
3279 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3280 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3281 memcpy(scmd->sense_buffer, error_info->data,
3282 sense_data_length);
3283 }
3284 }
3285
3286 if (device_offline && sense_data_length == 0)
f2b1e9c6 3287 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
6c223761
KB
3288
3289 scmd->result = scsi_status;
3290 set_host_byte(scmd, host_byte);
3291}
3292
3293static void pqi_process_io_error(unsigned int iu_type,
3294 struct pqi_io_request *io_request)
3295{
3296 switch (iu_type) {
3297 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3298 pqi_process_raid_io_error(io_request);
3299 break;
3300 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3301 pqi_process_aio_io_error(io_request);
3302 break;
3303 }
3304}
3305
18ff5f08 3306static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
3307 struct pqi_task_management_response *response)
3308{
3309 int rc;
3310
3311 switch (response->response_code) {
b17f0486
KB
3312 case SOP_TMF_COMPLETE:
3313 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
3314 rc = 0;
3315 break;
3406384b
MR
3316 case SOP_TMF_REJECTED:
3317 rc = -EAGAIN;
3318 break;
6c223761
KB
3319 default:
3320 rc = -EIO;
3321 break;
3322 }
3323
18ff5f08
KB
3324 if (rc)
3325 dev_err(&ctrl_info->pci_dev->dev,
3326 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3327
6c223761
KB
3328 return rc;
3329}
3330
5d1f03e6
MB
3331static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3332 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
6c223761 3333{
5d1f03e6 3334 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
9e68cccc
KB
3335}
3336
3337static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
6c223761 3338{
9e68cccc 3339 int num_responses;
6c223761
KB
3340 pqi_index_t oq_pi;
3341 pqi_index_t oq_ci;
3342 struct pqi_io_request *io_request;
3343 struct pqi_io_response *response;
3344 u16 request_id;
3345
3346 num_responses = 0;
3347 oq_ci = queue_group->oq_ci_copy;
3348
3349 while (1) {
dac12fbc 3350 oq_pi = readl(queue_group->oq_pi);
9e68cccc 3351 if (oq_pi >= ctrl_info->num_elements_per_oq) {
5d1f03e6 3352 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
9e68cccc
KB
3353 dev_err(&ctrl_info->pci_dev->dev,
3354 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3355 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3356 return -1;
3357 }
6c223761
KB
3358 if (oq_pi == oq_ci)
3359 break;
3360
3361 num_responses++;
3362 response = queue_group->oq_element_array +
3363 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3364
3365 request_id = get_unaligned_le16(&response->request_id);
9e68cccc 3366 if (request_id >= ctrl_info->max_io_slots) {
5d1f03e6 3367 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
9e68cccc
KB
3368 dev_err(&ctrl_info->pci_dev->dev,
3369 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3370 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3371 return -1;
3372 }
6c223761
KB
3373
3374 io_request = &ctrl_info->io_request_pool[request_id];
9e68cccc 3375 if (atomic_read(&io_request->refcount) == 0) {
5d1f03e6 3376 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
9e68cccc
KB
3377 dev_err(&ctrl_info->pci_dev->dev,
3378 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3379 request_id, oq_pi, oq_ci);
3380 return -1;
3381 }
6c223761
KB
3382
3383 switch (response->header.iu_type) {
3384 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3385 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2ba55c98
KB
3386 if (io_request->scmd)
3387 io_request->scmd->result = 0;
df561f66 3388 fallthrough;
6c223761
KB
3389 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3390 break;
b212c251
KB
3391 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3392 io_request->status =
3393 get_unaligned_le16(
583891c9 3394 &((struct pqi_vendor_general_response *)response)->status);
b212c251 3395 break;
6c223761 3396 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
18ff5f08
KB
3397 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3398 (void *)response);
6c223761
KB
3399 break;
3400 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3401 pqi_aio_path_disabled(io_request);
3402 io_request->status = -EAGAIN;
3403 break;
3404 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3405 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3406 io_request->error_info = ctrl_info->error_buffer +
3407 (get_unaligned_le16(&response->error_index) *
3408 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
9e68cccc 3409 pqi_process_io_error(response->header.iu_type, io_request);
6c223761
KB
3410 break;
3411 default:
5d1f03e6 3412 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
6c223761 3413 dev_err(&ctrl_info->pci_dev->dev,
9e68cccc
KB
3414 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3415 response->header.iu_type, oq_pi, oq_ci);
3416 return -1;
6c223761
KB
3417 }
3418
9e68cccc 3419 io_request->io_complete_callback(io_request, io_request->context);
6c223761
KB
3420
3421 /*
3422 * Note that the I/O request structure CANNOT BE TOUCHED after
3423 * returning from the I/O completion callback!
3424 */
6c223761
KB
3425 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3426 }
3427
3428 if (num_responses) {
3429 queue_group->oq_ci_copy = oq_ci;
3430 writel(oq_ci, queue_group->oq_ci);
3431 }
3432
3433 return num_responses;
3434}
3435
3436static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 3437 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
3438{
3439 unsigned int num_elements_used;
3440
3441 if (pi >= ci)
3442 num_elements_used = pi - ci;
3443 else
3444 num_elements_used = elements_in_queue - ci + pi;
3445
3446 return elements_in_queue - num_elements_used - 1;
3447}
3448
98f87667 3449static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
3450 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3451{
3452 pqi_index_t iq_pi;
3453 pqi_index_t iq_ci;
3454 unsigned long flags;
3455 void *next_element;
6c223761
KB
3456 struct pqi_queue_group *queue_group;
3457
3458 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3459 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3460
6c223761
KB
3461 while (1) {
3462 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3463
3464 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
dac12fbc 3465 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
6c223761
KB
3466
3467 if (pqi_num_elements_free(iq_pi, iq_ci,
3468 ctrl_info->num_elements_per_iq))
3469 break;
3470
3471 spin_unlock_irqrestore(
3472 &queue_group->submit_lock[RAID_PATH], flags);
3473
98f87667 3474 if (pqi_ctrl_offline(ctrl_info))
6c223761 3475 return;
6c223761
KB
3476 }
3477
3478 next_element = queue_group->iq_element_array[RAID_PATH] +
3479 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3480
3481 memcpy(next_element, iu, iu_length);
3482
3483 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
6c223761
KB
3484 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3485
3486 /*
3487 * This write notifies the controller that an IU is available to be
3488 * processed.
3489 */
3490 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3491
3492 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
3493}
3494
3495static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3496 struct pqi_event *event)
3497{
3498 struct pqi_event_acknowledge_request request;
3499
3500 memset(&request, 0, sizeof(request));
3501
3502 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3503 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3504 &request.header.iu_length);
3505 request.event_type = event->event_type;
06b41e0d
KB
3506 put_unaligned_le16(event->event_id, &request.event_id);
3507 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
6c223761 3508
98f87667 3509 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
6c223761
KB
3510}
3511
4fd22c13
MR
3512#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3513#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3514
3515static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3516 struct pqi_ctrl_info *ctrl_info)
6c223761 3517{
4fd22c13 3518 u8 status;
583891c9 3519 unsigned long timeout;
6c223761 3520
4fd22c13 3521 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies;
6c223761 3522
4fd22c13
MR
3523 while (1) {
3524 status = pqi_read_soft_reset_status(ctrl_info);
3525 if (status & PQI_SOFT_RESET_INITIATE)
3526 return RESET_INITIATE_DRIVER;
3527
3528 if (status & PQI_SOFT_RESET_ABORT)
3529 return RESET_ABORT;
3530
4ccc354b
KB
3531 if (!sis_is_firmware_running(ctrl_info))
3532 return RESET_NORESPONSE;
3533
4fd22c13 3534 if (time_after(jiffies, timeout)) {
4ccc354b 3535 dev_warn(&ctrl_info->pci_dev->dev,
4fd22c13
MR
3536 "timed out waiting for soft reset status\n");
3537 return RESET_TIMEDOUT;
3538 }
3539
4fd22c13
MR
3540 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3541 }
3542}
3543
4ccc354b 3544static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
4fd22c13
MR
3545{
3546 int rc;
2790cd4d 3547 unsigned int delay_secs;
4ccc354b
KB
3548 enum pqi_soft_reset_status reset_status;
3549
3550 if (ctrl_info->soft_reset_handshake_supported)
3551 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3552 else
3553 reset_status = RESET_INITIATE_FIRMWARE;
4fd22c13 3554
2790cd4d 3555 delay_secs = PQI_POST_RESET_DELAY_SECS;
4fd22c13
MR
3556
3557 switch (reset_status) {
4fd22c13 3558 case RESET_TIMEDOUT:
2790cd4d 3559 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
4ccc354b
KB
3560 fallthrough;
3561 case RESET_INITIATE_DRIVER:
4fd22c13 3562 dev_info(&ctrl_info->pci_dev->dev,
4ccc354b 3563 "Online Firmware Activation: resetting controller\n");
4fd22c13 3564 sis_soft_reset(ctrl_info);
df561f66 3565 fallthrough;
4fd22c13 3566 case RESET_INITIATE_FIRMWARE:
4ccc354b
KB
3567 ctrl_info->pqi_mode_enabled = false;
3568 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
2790cd4d 3569 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
4fd22c13 3570 pqi_ofa_free_host_buffer(ctrl_info);
4ccc354b 3571 pqi_ctrl_ofa_done(ctrl_info);
4fd22c13 3572 dev_info(&ctrl_info->pci_dev->dev,
4ccc354b
KB
3573 "Online Firmware Activation: %s\n",
3574 rc == 0 ? "SUCCESS" : "FAILED");
4fd22c13
MR
3575 break;
3576 case RESET_ABORT:
4fd22c13 3577 dev_info(&ctrl_info->pci_dev->dev,
4ccc354b
KB
3578 "Online Firmware Activation ABORTED\n");
3579 if (ctrl_info->soft_reset_handshake_supported)
3580 pqi_clear_soft_reset_status(ctrl_info);
3581 pqi_ofa_free_host_buffer(ctrl_info);
3582 pqi_ctrl_ofa_done(ctrl_info);
3583 pqi_ofa_ctrl_unquiesce(ctrl_info);
4fd22c13
MR
3584 break;
3585 case RESET_NORESPONSE:
4ccc354b
KB
3586 fallthrough;
3587 default:
3588 dev_err(&ctrl_info->pci_dev->dev,
3589 "unexpected Online Firmware Activation reset status: 0x%x\n",
3590 reset_status);
4fd22c13 3591 pqi_ofa_free_host_buffer(ctrl_info);
4ccc354b
KB
3592 pqi_ctrl_ofa_done(ctrl_info);
3593 pqi_ofa_ctrl_unquiesce(ctrl_info);
5d1f03e6 3594 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
4fd22c13
MR
3595 break;
3596 }
3597}
3598
2790cd4d 3599static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
4fd22c13 3600{
2790cd4d 3601 struct pqi_ctrl_info *ctrl_info;
4fd22c13 3602
2790cd4d 3603 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
4fd22c13 3604
2790cd4d
KB
3605 pqi_ctrl_ofa_start(ctrl_info);
3606 pqi_ofa_setup_host_buffer(ctrl_info);
3607 pqi_ofa_host_memory_update(ctrl_info);
3608}
4fd22c13 3609
2790cd4d
KB
3610static void pqi_ofa_quiesce_worker(struct work_struct *work)
3611{
3612 struct pqi_ctrl_info *ctrl_info;
3613 struct pqi_event *event;
4fd22c13 3614
2790cd4d
KB
3615 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3616
3617 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3618
3619 pqi_ofa_ctrl_quiesce(ctrl_info);
3620 pqi_acknowledge_event(ctrl_info, event);
3621 pqi_process_soft_reset(ctrl_info);
3622}
4fd22c13 3623
2790cd4d
KB
3624static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3625 struct pqi_event *event)
3626{
3627 bool ack_event;
3628
3629 ack_event = true;
3630
3631 switch (event->event_id) {
3632 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
4fd22c13 3633 dev_info(&ctrl_info->pci_dev->dev,
2790cd4d
KB
3634 "received Online Firmware Activation memory allocation request\n");
3635 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3636 break;
3637 case PQI_EVENT_OFA_QUIESCE:
4fd22c13 3638 dev_info(&ctrl_info->pci_dev->dev,
2790cd4d
KB
3639 "received Online Firmware Activation quiesce request\n");
3640 schedule_work(&ctrl_info->ofa_quiesce_work);
3641 ack_event = false;
3642 break;
3643 case PQI_EVENT_OFA_CANCELED:
4fd22c13 3644 dev_info(&ctrl_info->pci_dev->dev,
2790cd4d
KB
3645 "received Online Firmware Activation cancel request: reason: %u\n",
3646 ctrl_info->ofa_cancel_reason);
3647 pqi_ofa_free_host_buffer(ctrl_info);
3648 pqi_ctrl_ofa_done(ctrl_info);
3649 break;
3650 default:
3651 dev_err(&ctrl_info->pci_dev->dev,
3652 "received unknown Online Firmware Activation request: event ID: %u\n",
3653 event->event_id);
3654 break;
4fd22c13
MR
3655 }
3656
2790cd4d 3657 return ack_event;
4fd22c13
MR
3658}
3659
6c223761
KB
3660static void pqi_event_worker(struct work_struct *work)
3661{
3662 unsigned int i;
2790cd4d 3663 bool rescan_needed;
6c223761 3664 struct pqi_ctrl_info *ctrl_info;
6a50d6ad 3665 struct pqi_event *event;
2790cd4d 3666 bool ack_event;
6c223761
KB
3667
3668 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3669
7561a7e4 3670 pqi_ctrl_busy(ctrl_info);
ae0c189d 3671 pqi_wait_if_ctrl_blocked(ctrl_info);
5f310425
KB
3672 if (pqi_ctrl_offline(ctrl_info))
3673 goto out;
3674
2790cd4d 3675 rescan_needed = false;
6a50d6ad 3676 event = ctrl_info->events;
6c223761 3677 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
6a50d6ad
KB
3678 if (event->pending) {
3679 event->pending = false;
4fd22c13 3680 if (event->event_type == PQI_EVENT_TYPE_OFA) {
2790cd4d
KB
3681 ack_event = pqi_ofa_process_event(ctrl_info, event);
3682 } else {
3683 ack_event = true;
3684 rescan_needed = true;
4fd22c13 3685 }
2790cd4d
KB
3686 if (ack_event)
3687 pqi_acknowledge_event(ctrl_info, event);
6c223761 3688 }
6a50d6ad 3689 event++;
6c223761
KB
3690 }
3691
2790cd4d
KB
3692 if (rescan_needed)
3693 pqi_schedule_rescan_worker_delayed(ctrl_info);
3694
5f310425 3695out:
7561a7e4 3696 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
3697}
3698
4fd22c13 3699#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
6c223761 3700
74a0f573 3701static void pqi_heartbeat_timer_handler(struct timer_list *t)
6c223761
KB
3702{
3703 int num_interrupts;
98f87667 3704 u32 heartbeat_count;
583891c9 3705 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
6c223761 3706
98f87667
KB
3707 pqi_check_ctrl_health(ctrl_info);
3708 if (pqi_ctrl_offline(ctrl_info))
061ef06a
KB
3709 return;
3710
6c223761 3711 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
98f87667 3712 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
6c223761
KB
3713
3714 if (num_interrupts == ctrl_info->previous_num_interrupts) {
98f87667
KB
3715 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3716 dev_err(&ctrl_info->pci_dev->dev,
3717 "no heartbeat detected - last heartbeat count: %u\n",
3718 heartbeat_count);
5d1f03e6 3719 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
6c223761
KB
3720 return;
3721 }
6c223761 3722 } else {
98f87667 3723 ctrl_info->previous_num_interrupts = num_interrupts;
6c223761
KB
3724 }
3725
98f87667 3726 ctrl_info->previous_heartbeat_count = heartbeat_count;
6c223761
KB
3727 mod_timer(&ctrl_info->heartbeat_timer,
3728 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3729}
3730
3731static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3732{
98f87667
KB
3733 if (!ctrl_info->heartbeat_counter)
3734 return;
3735
6c223761
KB
3736 ctrl_info->previous_num_interrupts =
3737 atomic_read(&ctrl_info->num_interrupts);
98f87667
KB
3738 ctrl_info->previous_heartbeat_count =
3739 pqi_read_heartbeat_counter(ctrl_info);
6c223761 3740
6c223761
KB
3741 ctrl_info->heartbeat_timer.expires =
3742 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
061ef06a 3743 add_timer(&ctrl_info->heartbeat_timer);
6c223761
KB
3744}
3745
3746static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3747{
98f87667 3748 del_timer_sync(&ctrl_info->heartbeat_timer);
6c223761
KB
3749}
3750
2790cd4d
KB
3751static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3752 struct pqi_event *event, struct pqi_event_response *response)
4fd22c13 3753{
2790cd4d
KB
3754 switch (event->event_id) {
3755 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3756 ctrl_info->ofa_bytes_requested =
3757 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3758 break;
3759 case PQI_EVENT_OFA_CANCELED:
3760 ctrl_info->ofa_cancel_reason =
3761 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3762 break;
4fd22c13
MR
3763 }
3764}
3765
9e68cccc 3766static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
6c223761 3767{
9e68cccc 3768 int num_events;
6c223761
KB
3769 pqi_index_t oq_pi;
3770 pqi_index_t oq_ci;
3771 struct pqi_event_queue *event_queue;
3772 struct pqi_event_response *response;
6a50d6ad 3773 struct pqi_event *event;
6c223761
KB
3774 int event_index;
3775
3776 event_queue = &ctrl_info->event_queue;
3777 num_events = 0;
6c223761
KB
3778 oq_ci = event_queue->oq_ci_copy;
3779
3780 while (1) {
dac12fbc 3781 oq_pi = readl(event_queue->oq_pi);
9e68cccc 3782 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
5d1f03e6 3783 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
9e68cccc
KB
3784 dev_err(&ctrl_info->pci_dev->dev,
3785 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3786 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3787 return -1;
3788 }
3789
6c223761
KB
3790 if (oq_pi == oq_ci)
3791 break;
3792
3793 num_events++;
9e68cccc 3794 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
6c223761 3795
583891c9 3796 event_index = pqi_event_type_to_event_index(response->event_type);
6c223761 3797
9e68cccc
KB
3798 if (event_index >= 0 && response->request_acknowledge) {
3799 event = &ctrl_info->events[event_index];
3800 event->pending = true;
3801 event->event_type = response->event_type;
06b41e0d
KB
3802 event->event_id = get_unaligned_le16(&response->event_id);
3803 event->additional_event_id =
3804 get_unaligned_le32(&response->additional_event_id);
9e68cccc 3805 if (event->event_type == PQI_EVENT_TYPE_OFA)
2790cd4d 3806 pqi_ofa_capture_event_payload(ctrl_info, event, response);
6c223761
KB
3807 }
3808
3809 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3810 }
3811
3812 if (num_events) {
3813 event_queue->oq_ci_copy = oq_ci;
3814 writel(oq_ci, event_queue->oq_ci);
98f87667 3815 schedule_work(&ctrl_info->event_work);
6c223761
KB
3816 }
3817
3818 return num_events;
3819}
3820
061ef06a
KB
3821#define PQI_LEGACY_INTX_MASK 0x1
3822
583891c9 3823static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
061ef06a
KB
3824{
3825 u32 intx_mask;
3826 struct pqi_device_registers __iomem *pqi_registers;
3827 volatile void __iomem *register_addr;
3828
3829 pqi_registers = ctrl_info->pqi_registers;
3830
3831 if (enable_intx)
3832 register_addr = &pqi_registers->legacy_intx_mask_clear;
3833 else
3834 register_addr = &pqi_registers->legacy_intx_mask_set;
3835
3836 intx_mask = readl(register_addr);
3837 intx_mask |= PQI_LEGACY_INTX_MASK;
3838 writel(intx_mask, register_addr);
3839}
3840
3841static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3842 enum pqi_irq_mode new_mode)
3843{
3844 switch (ctrl_info->irq_mode) {
3845 case IRQ_MODE_MSIX:
3846 switch (new_mode) {
3847 case IRQ_MODE_MSIX:
3848 break;
3849 case IRQ_MODE_INTX:
3850 pqi_configure_legacy_intx(ctrl_info, true);
061ef06a
KB
3851 sis_enable_intx(ctrl_info);
3852 break;
3853 case IRQ_MODE_NONE:
061ef06a
KB
3854 break;
3855 }
3856 break;
3857 case IRQ_MODE_INTX:
3858 switch (new_mode) {
3859 case IRQ_MODE_MSIX:
3860 pqi_configure_legacy_intx(ctrl_info, false);
061ef06a
KB
3861 sis_enable_msix(ctrl_info);
3862 break;
3863 case IRQ_MODE_INTX:
3864 break;
3865 case IRQ_MODE_NONE:
3866 pqi_configure_legacy_intx(ctrl_info, false);
061ef06a
KB
3867 break;
3868 }
3869 break;
3870 case IRQ_MODE_NONE:
3871 switch (new_mode) {
3872 case IRQ_MODE_MSIX:
3873 sis_enable_msix(ctrl_info);
3874 break;
3875 case IRQ_MODE_INTX:
3876 pqi_configure_legacy_intx(ctrl_info, true);
3877 sis_enable_intx(ctrl_info);
3878 break;
3879 case IRQ_MODE_NONE:
3880 break;
3881 }
3882 break;
3883 }
3884
3885 ctrl_info->irq_mode = new_mode;
3886}
3887
3888#define PQI_LEGACY_INTX_PENDING 0x1
3889
3890static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3891{
3892 bool valid_irq;
3893 u32 intx_status;
3894
3895 switch (ctrl_info->irq_mode) {
3896 case IRQ_MODE_MSIX:
3897 valid_irq = true;
3898 break;
3899 case IRQ_MODE_INTX:
583891c9 3900 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
061ef06a
KB
3901 if (intx_status & PQI_LEGACY_INTX_PENDING)
3902 valid_irq = true;
3903 else
3904 valid_irq = false;
3905 break;
3906 case IRQ_MODE_NONE:
3907 default:
3908 valid_irq = false;
3909 break;
3910 }
3911
3912 return valid_irq;
3913}
3914
6c223761
KB
3915static irqreturn_t pqi_irq_handler(int irq, void *data)
3916{
3917 struct pqi_ctrl_info *ctrl_info;
3918 struct pqi_queue_group *queue_group;
9e68cccc
KB
3919 int num_io_responses_handled;
3920 int num_events_handled;
6c223761
KB
3921
3922 queue_group = data;
3923 ctrl_info = queue_group->ctrl_info;
3924
061ef06a 3925 if (!pqi_is_valid_irq(ctrl_info))
6c223761
KB
3926 return IRQ_NONE;
3927
9e68cccc
KB
3928 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3929 if (num_io_responses_handled < 0)
3930 goto out;
6c223761 3931
9e68cccc
KB
3932 if (irq == ctrl_info->event_irq) {
3933 num_events_handled = pqi_process_event_intr(ctrl_info);
3934 if (num_events_handled < 0)
3935 goto out;
3936 } else {
3937 num_events_handled = 0;
3938 }
6c223761 3939
9e68cccc 3940 if (num_io_responses_handled + num_events_handled > 0)
6c223761
KB
3941 atomic_inc(&ctrl_info->num_interrupts);
3942
3943 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3944 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3945
9e68cccc 3946out:
6c223761
KB
3947 return IRQ_HANDLED;
3948}
3949
3950static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3951{
d91d7820 3952 struct pci_dev *pci_dev = ctrl_info->pci_dev;
6c223761
KB
3953 int i;
3954 int rc;
3955
d91d7820 3956 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
6c223761
KB
3957
3958 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
d91d7820 3959 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
52198226 3960 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 3961 if (rc) {
d91d7820 3962 dev_err(&pci_dev->dev,
6c223761 3963 "irq %u init failed with error %d\n",
d91d7820 3964 pci_irq_vector(pci_dev, i), rc);
6c223761
KB
3965 return rc;
3966 }
3967 ctrl_info->num_msix_vectors_initialized++;
3968 }
3969
3970 return 0;
3971}
3972
98bf061b
KB
3973static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3974{
3975 int i;
3976
3977 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3978 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3979 &ctrl_info->queue_groups[i]);
3980
3981 ctrl_info->num_msix_vectors_initialized = 0;
3982}
3983
6c223761
KB
3984static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3985{
98bf061b 3986 int num_vectors_enabled;
6c223761 3987
98bf061b 3988 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
52198226
CH
3989 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3990 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
98bf061b 3991 if (num_vectors_enabled < 0) {
6c223761 3992 dev_err(&ctrl_info->pci_dev->dev,
98bf061b
KB
3993 "MSI-X init failed with error %d\n",
3994 num_vectors_enabled);
3995 return num_vectors_enabled;
6c223761
KB
3996 }
3997
98bf061b 3998 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
061ef06a 3999 ctrl_info->irq_mode = IRQ_MODE_MSIX;
6c223761
KB
4000 return 0;
4001}
4002
98bf061b
KB
4003static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4004{
4005 if (ctrl_info->num_msix_vectors_enabled) {
4006 pci_free_irq_vectors(ctrl_info->pci_dev);
4007 ctrl_info->num_msix_vectors_enabled = 0;
4008 }
4009}
4010
6c223761
KB
4011static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4012{
4013 unsigned int i;
4014 size_t alloc_length;
4015 size_t element_array_length_per_iq;
4016 size_t element_array_length_per_oq;
4017 void *element_array;
dac12fbc 4018 void __iomem *next_queue_index;
6c223761
KB
4019 void *aligned_pointer;
4020 unsigned int num_inbound_queues;
4021 unsigned int num_outbound_queues;
4022 unsigned int num_queue_indexes;
4023 struct pqi_queue_group *queue_group;
4024
4025 element_array_length_per_iq =
4026 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4027 ctrl_info->num_elements_per_iq;
4028 element_array_length_per_oq =
4029 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4030 ctrl_info->num_elements_per_oq;
4031 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4032 num_outbound_queues = ctrl_info->num_queue_groups;
4033 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4034
4035 aligned_pointer = NULL;
4036
4037 for (i = 0; i < num_inbound_queues; i++) {
4038 aligned_pointer = PTR_ALIGN(aligned_pointer,
4039 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4040 aligned_pointer += element_array_length_per_iq;
4041 }
4042
4043 for (i = 0; i < num_outbound_queues; i++) {
4044 aligned_pointer = PTR_ALIGN(aligned_pointer,
4045 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4046 aligned_pointer += element_array_length_per_oq;
4047 }
4048
4049 aligned_pointer = PTR_ALIGN(aligned_pointer,
4050 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4051 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4052 PQI_EVENT_OQ_ELEMENT_LENGTH;
4053
4054 for (i = 0; i < num_queue_indexes; i++) {
4055 aligned_pointer = PTR_ALIGN(aligned_pointer,
4056 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4057 aligned_pointer += sizeof(pqi_index_t);
4058 }
4059
4060 alloc_length = (size_t)aligned_pointer +
4061 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4062
e1d213bd
KB
4063 alloc_length += PQI_EXTRA_SGL_MEMORY;
4064
6c223761 4065 ctrl_info->queue_memory_base =
750afb08
LC
4066 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4067 &ctrl_info->queue_memory_base_dma_handle,
4068 GFP_KERNEL);
6c223761 4069
d87d5474 4070 if (!ctrl_info->queue_memory_base)
6c223761 4071 return -ENOMEM;
6c223761
KB
4072
4073 ctrl_info->queue_memory_length = alloc_length;
4074
4075 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4076 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4077
4078 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4079 queue_group = &ctrl_info->queue_groups[i];
4080 queue_group->iq_element_array[RAID_PATH] = element_array;
4081 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4082 ctrl_info->queue_memory_base_dma_handle +
4083 (element_array - ctrl_info->queue_memory_base);
4084 element_array += element_array_length_per_iq;
4085 element_array = PTR_ALIGN(element_array,
4086 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4087 queue_group->iq_element_array[AIO_PATH] = element_array;
4088 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4089 ctrl_info->queue_memory_base_dma_handle +
4090 (element_array - ctrl_info->queue_memory_base);
4091 element_array += element_array_length_per_iq;
4092 element_array = PTR_ALIGN(element_array,
4093 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4094 }
4095
4096 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4097 queue_group = &ctrl_info->queue_groups[i];
4098 queue_group->oq_element_array = element_array;
4099 queue_group->oq_element_array_bus_addr =
4100 ctrl_info->queue_memory_base_dma_handle +
4101 (element_array - ctrl_info->queue_memory_base);
4102 element_array += element_array_length_per_oq;
4103 element_array = PTR_ALIGN(element_array,
4104 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4105 }
4106
4107 ctrl_info->event_queue.oq_element_array = element_array;
4108 ctrl_info->event_queue.oq_element_array_bus_addr =
4109 ctrl_info->queue_memory_base_dma_handle +
4110 (element_array - ctrl_info->queue_memory_base);
4111 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4112 PQI_EVENT_OQ_ELEMENT_LENGTH;
4113
dac12fbc 4114 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
6c223761
KB
4115 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4116
4117 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4118 queue_group = &ctrl_info->queue_groups[i];
4119 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4120 queue_group->iq_ci_bus_addr[RAID_PATH] =
4121 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4122 (next_queue_index -
4123 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4124 next_queue_index += sizeof(pqi_index_t);
4125 next_queue_index = PTR_ALIGN(next_queue_index,
4126 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4127 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4128 queue_group->iq_ci_bus_addr[AIO_PATH] =
4129 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4130 (next_queue_index -
4131 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4132 next_queue_index += sizeof(pqi_index_t);
4133 next_queue_index = PTR_ALIGN(next_queue_index,
4134 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4135 queue_group->oq_pi = next_queue_index;
4136 queue_group->oq_pi_bus_addr =
4137 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4138 (next_queue_index -
4139 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4140 next_queue_index += sizeof(pqi_index_t);
4141 next_queue_index = PTR_ALIGN(next_queue_index,
4142 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4143 }
4144
4145 ctrl_info->event_queue.oq_pi = next_queue_index;
4146 ctrl_info->event_queue.oq_pi_bus_addr =
4147 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4148 (next_queue_index -
4149 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4150
4151 return 0;
4152}
4153
4154static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4155{
4156 unsigned int i;
4157 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4158 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4159
4160 /*
4161 * Initialize the backpointers to the controller structure in
4162 * each operational queue group structure.
4163 */
4164 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4165 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4166
4167 /*
4168 * Assign IDs to all operational queues. Note that the IDs
4169 * assigned to operational IQs are independent of the IDs
4170 * assigned to operational OQs.
4171 */
4172 ctrl_info->event_queue.oq_id = next_oq_id++;
4173 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4174 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4175 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4176 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4177 }
4178
4179 /*
4180 * Assign MSI-X table entry indexes to all queues. Note that the
4181 * interrupt for the event queue is shared with the first queue group.
4182 */
4183 ctrl_info->event_queue.int_msg_num = 0;
4184 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4185 ctrl_info->queue_groups[i].int_msg_num = i;
4186
4187 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4188 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4189 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4190 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4191 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4192 }
4193}
4194
4195static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4196{
4197 size_t alloc_length;
4198 struct pqi_admin_queues_aligned *admin_queues_aligned;
4199 struct pqi_admin_queues *admin_queues;
4200
4201 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4202 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4203
4204 ctrl_info->admin_queue_memory_base =
750afb08
LC
4205 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4206 &ctrl_info->admin_queue_memory_base_dma_handle,
4207 GFP_KERNEL);
6c223761
KB
4208
4209 if (!ctrl_info->admin_queue_memory_base)
4210 return -ENOMEM;
4211
4212 ctrl_info->admin_queue_memory_length = alloc_length;
4213
4214 admin_queues = &ctrl_info->admin_queues;
4215 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4216 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4217 admin_queues->iq_element_array =
4218 &admin_queues_aligned->iq_element_array;
4219 admin_queues->oq_element_array =
4220 &admin_queues_aligned->oq_element_array;
583891c9
KB
4221 admin_queues->iq_ci =
4222 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
dac12fbc
KB
4223 admin_queues->oq_pi =
4224 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
6c223761
KB
4225
4226 admin_queues->iq_element_array_bus_addr =
4227 ctrl_info->admin_queue_memory_base_dma_handle +
4228 (admin_queues->iq_element_array -
4229 ctrl_info->admin_queue_memory_base);
4230 admin_queues->oq_element_array_bus_addr =
4231 ctrl_info->admin_queue_memory_base_dma_handle +
4232 (admin_queues->oq_element_array -
4233 ctrl_info->admin_queue_memory_base);
4234 admin_queues->iq_ci_bus_addr =
4235 ctrl_info->admin_queue_memory_base_dma_handle +
583891c9
KB
4236 ((void __iomem *)admin_queues->iq_ci -
4237 (void __iomem *)ctrl_info->admin_queue_memory_base);
6c223761
KB
4238 admin_queues->oq_pi_bus_addr =
4239 ctrl_info->admin_queue_memory_base_dma_handle +
dac12fbc
KB
4240 ((void __iomem *)admin_queues->oq_pi -
4241 (void __iomem *)ctrl_info->admin_queue_memory_base);
6c223761
KB
4242
4243 return 0;
4244}
4245
4fd22c13 4246#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
6c223761
KB
4247#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4248
4249static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4250{
4251 struct pqi_device_registers __iomem *pqi_registers;
4252 struct pqi_admin_queues *admin_queues;
4253 unsigned long timeout;
4254 u8 status;
4255 u32 reg;
4256
4257 pqi_registers = ctrl_info->pqi_registers;
4258 admin_queues = &ctrl_info->admin_queues;
4259
4260 writeq((u64)admin_queues->iq_element_array_bus_addr,
4261 &pqi_registers->admin_iq_element_array_addr);
4262 writeq((u64)admin_queues->oq_element_array_bus_addr,
4263 &pqi_registers->admin_oq_element_array_addr);
4264 writeq((u64)admin_queues->iq_ci_bus_addr,
4265 &pqi_registers->admin_iq_ci_addr);
4266 writeq((u64)admin_queues->oq_pi_bus_addr,
4267 &pqi_registers->admin_oq_pi_addr);
4268
4269 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
e655d469 4270 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
6c223761
KB
4271 (admin_queues->int_msg_num << 16);
4272 writel(reg, &pqi_registers->admin_iq_num_elements);
583891c9 4273
6c223761
KB
4274 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4275 &pqi_registers->function_and_status_code);
4276
4277 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4278 while (1) {
987d3560 4279 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
6c223761
KB
4280 status = readb(&pqi_registers->function_and_status_code);
4281 if (status == PQI_STATUS_IDLE)
4282 break;
4283 if (time_after(jiffies, timeout))
4284 return -ETIMEDOUT;
6c223761
KB
4285 }
4286
4287 /*
4288 * The offset registers are not initialized to the correct
4289 * offsets until *after* the create admin queue pair command
4290 * completes successfully.
4291 */
4292 admin_queues->iq_pi = ctrl_info->iomem_base +
4293 PQI_DEVICE_REGISTERS_OFFSET +
4294 readq(&pqi_registers->admin_iq_pi_offset);
4295 admin_queues->oq_ci = ctrl_info->iomem_base +
4296 PQI_DEVICE_REGISTERS_OFFSET +
4297 readq(&pqi_registers->admin_oq_ci_offset);
4298
4299 return 0;
4300}
4301
4302static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4303 struct pqi_general_admin_request *request)
4304{
4305 struct pqi_admin_queues *admin_queues;
4306 void *next_element;
4307 pqi_index_t iq_pi;
4308
4309 admin_queues = &ctrl_info->admin_queues;
4310 iq_pi = admin_queues->iq_pi_copy;
4311
4312 next_element = admin_queues->iq_element_array +
4313 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4314
4315 memcpy(next_element, request, sizeof(*request));
4316
4317 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4318 admin_queues->iq_pi_copy = iq_pi;
4319
4320 /*
4321 * This write notifies the controller that an IU is available to be
4322 * processed.
4323 */
4324 writel(iq_pi, admin_queues->iq_pi);
4325}
4326
13bede67
KB
4327#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4328
6c223761
KB
4329static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4330 struct pqi_general_admin_response *response)
4331{
4332 struct pqi_admin_queues *admin_queues;
4333 pqi_index_t oq_pi;
4334 pqi_index_t oq_ci;
4335 unsigned long timeout;
4336
4337 admin_queues = &ctrl_info->admin_queues;
4338 oq_ci = admin_queues->oq_ci_copy;
4339
4fd22c13 4340 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies;
6c223761
KB
4341
4342 while (1) {
dac12fbc 4343 oq_pi = readl(admin_queues->oq_pi);
6c223761
KB
4344 if (oq_pi != oq_ci)
4345 break;
4346 if (time_after(jiffies, timeout)) {
4347 dev_err(&ctrl_info->pci_dev->dev,
4348 "timed out waiting for admin response\n");
4349 return -ETIMEDOUT;
4350 }
13bede67
KB
4351 if (!sis_is_firmware_running(ctrl_info))
4352 return -ENXIO;
6c223761
KB
4353 usleep_range(1000, 2000);
4354 }
4355
4356 memcpy(response, admin_queues->oq_element_array +
4357 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4358
4359 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4360 admin_queues->oq_ci_copy = oq_ci;
4361 writel(oq_ci, admin_queues->oq_ci);
4362
4363 return 0;
4364}
4365
4366static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4367 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4368 struct pqi_io_request *io_request)
4369{
4370 struct pqi_io_request *next;
4371 void *next_element;
4372 pqi_index_t iq_pi;
4373 pqi_index_t iq_ci;
4374 size_t iu_length;
4375 unsigned long flags;
4376 unsigned int num_elements_needed;
4377 unsigned int num_elements_to_end_of_queue;
4378 size_t copy_count;
4379 struct pqi_iu_header *request;
4380
4381 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4382
376fb880
KB
4383 if (io_request) {
4384 io_request->queue_group = queue_group;
6c223761
KB
4385 list_add_tail(&io_request->request_list_entry,
4386 &queue_group->request_list[path]);
376fb880 4387 }
6c223761
KB
4388
4389 iq_pi = queue_group->iq_pi_copy[path];
4390
4391 list_for_each_entry_safe(io_request, next,
4392 &queue_group->request_list[path], request_list_entry) {
4393
4394 request = io_request->iu;
4395
4396 iu_length = get_unaligned_le16(&request->iu_length) +
4397 PQI_REQUEST_HEADER_LENGTH;
4398 num_elements_needed =
4399 DIV_ROUND_UP(iu_length,
4400 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4401
dac12fbc 4402 iq_ci = readl(queue_group->iq_ci[path]);
6c223761
KB
4403
4404 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4405 ctrl_info->num_elements_per_iq))
4406 break;
4407
4408 put_unaligned_le16(queue_group->oq_id,
4409 &request->response_queue_id);
4410
4411 next_element = queue_group->iq_element_array[path] +
4412 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4413
4414 num_elements_to_end_of_queue =
4415 ctrl_info->num_elements_per_iq - iq_pi;
4416
4417 if (num_elements_needed <= num_elements_to_end_of_queue) {
4418 memcpy(next_element, request, iu_length);
4419 } else {
4420 copy_count = num_elements_to_end_of_queue *
4421 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4422 memcpy(next_element, request, copy_count);
4423 memcpy(queue_group->iq_element_array[path],
4424 (u8 *)request + copy_count,
4425 iu_length - copy_count);
4426 }
4427
4428 iq_pi = (iq_pi + num_elements_needed) %
4429 ctrl_info->num_elements_per_iq;
4430
4431 list_del(&io_request->request_list_entry);
4432 }
4433
4434 if (iq_pi != queue_group->iq_pi_copy[path]) {
4435 queue_group->iq_pi_copy[path] = iq_pi;
4436 /*
4437 * This write notifies the controller that one or more IUs are
4438 * available to be processed.
4439 */
4440 writel(iq_pi, queue_group->iq_pi[path]);
4441 }
4442
4443 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4444}
4445
1f37e992
KB
4446#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4447
4448static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4449 struct completion *wait)
4450{
4451 int rc;
1f37e992
KB
4452
4453 while (1) {
4454 if (wait_for_completion_io_timeout(wait,
4fd22c13 4455 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) {
1f37e992
KB
4456 rc = 0;
4457 break;
4458 }
4459
4460 pqi_check_ctrl_health(ctrl_info);
4461 if (pqi_ctrl_offline(ctrl_info)) {
4462 rc = -ENXIO;
4463 break;
4464 }
1f37e992
KB
4465 }
4466
4467 return rc;
4468}
4469
6c223761
KB
4470static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4471 void *context)
4472{
4473 struct completion *waiting = context;
4474
4475 complete(waiting);
4476}
4477
694c5d5b
KB
4478static int pqi_process_raid_io_error_synchronous(
4479 struct pqi_raid_error_info *error_info)
26b390ab
KB
4480{
4481 int rc = -EIO;
4482
4483 switch (error_info->data_out_result) {
4484 case PQI_DATA_IN_OUT_GOOD:
4485 if (error_info->status == SAM_STAT_GOOD)
4486 rc = 0;
4487 break;
4488 case PQI_DATA_IN_OUT_UNDERFLOW:
4489 if (error_info->status == SAM_STAT_GOOD ||
4490 error_info->status == SAM_STAT_CHECK_CONDITION)
4491 rc = 0;
4492 break;
4493 case PQI_DATA_IN_OUT_ABORTED:
4494 rc = PQI_CMD_STATUS_ABORTED;
4495 break;
4496 }
4497
4498 return rc;
4499}
4500
ae0c189d
KB
4501static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4502{
4503 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4504}
4505
6c223761
KB
4506static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4507 struct pqi_iu_header *request, unsigned int flags,
ae0c189d 4508 struct pqi_raid_error_info *error_info)
6c223761 4509{
957c5ab1 4510 int rc = 0;
6c223761 4511 struct pqi_io_request *io_request;
6c223761 4512 size_t iu_length;
957c5ab1 4513 DECLARE_COMPLETION_ONSTACK(wait);
6c223761 4514
6c223761
KB
4515 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4516 if (down_interruptible(&ctrl_info->sync_request_sem))
4517 return -ERESTARTSYS;
4518 } else {
ae0c189d 4519 down(&ctrl_info->sync_request_sem);
6c223761
KB
4520 }
4521
7561a7e4 4522 pqi_ctrl_busy(ctrl_info);
ae0c189d
KB
4523 /*
4524 * Wait for other admin queue updates such as;
4525 * config table changes, OFA memory updates, ...
4526 */
4527 if (pqi_is_blockable_request(request))
4528 pqi_wait_if_ctrl_blocked(ctrl_info);
7561a7e4 4529
376fb880
KB
4530 if (pqi_ctrl_offline(ctrl_info)) {
4531 rc = -ENXIO;
4532 goto out;
4533 }
4534
6c223761
KB
4535 io_request = pqi_alloc_io_request(ctrl_info);
4536
4537 put_unaligned_le16(io_request->index,
4538 &(((struct pqi_raid_path_request *)request)->request_id));
4539
4540 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4541 ((struct pqi_raid_path_request *)request)->error_index =
4542 ((struct pqi_raid_path_request *)request)->request_id;
4543
4544 iu_length = get_unaligned_le16(&request->iu_length) +
4545 PQI_REQUEST_HEADER_LENGTH;
4546 memcpy(io_request->iu, request, iu_length);
4547
957c5ab1
KB
4548 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4549 io_request->context = &wait;
4550
583891c9 4551 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
957c5ab1
KB
4552 io_request);
4553
ae0c189d 4554 pqi_wait_for_completion_io(ctrl_info, &wait);
6c223761
KB
4555
4556 if (error_info) {
4557 if (io_request->error_info)
583891c9 4558 memcpy(error_info, io_request->error_info, sizeof(*error_info));
6c223761
KB
4559 else
4560 memset(error_info, 0, sizeof(*error_info));
4561 } else if (rc == 0 && io_request->error_info) {
583891c9 4562 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
6c223761
KB
4563 }
4564
4565 pqi_free_io_request(io_request);
4566
7561a7e4 4567out:
ae0c189d 4568 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
4569 up(&ctrl_info->sync_request_sem);
4570
4571 return rc;
4572}
4573
4574static int pqi_validate_admin_response(
4575 struct pqi_general_admin_response *response, u8 expected_function_code)
4576{
4577 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4578 return -EINVAL;
4579
4580 if (get_unaligned_le16(&response->header.iu_length) !=
4581 PQI_GENERAL_ADMIN_IU_LENGTH)
4582 return -EINVAL;
4583
4584 if (response->function_code != expected_function_code)
4585 return -EINVAL;
4586
4587 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4588 return -EINVAL;
4589
4590 return 0;
4591}
4592
4593static int pqi_submit_admin_request_synchronous(
4594 struct pqi_ctrl_info *ctrl_info,
4595 struct pqi_general_admin_request *request,
4596 struct pqi_general_admin_response *response)
4597{
4598 int rc;
4599
4600 pqi_submit_admin_request(ctrl_info, request);
4601
4602 rc = pqi_poll_for_admin_response(ctrl_info, response);
4603
4604 if (rc == 0)
ae0c189d 4605 rc = pqi_validate_admin_response(response, request->function_code);
6c223761
KB
4606
4607 return rc;
4608}
4609
4610static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4611{
4612 int rc;
4613 struct pqi_general_admin_request request;
4614 struct pqi_general_admin_response response;
4615 struct pqi_device_capability *capability;
4616 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4617
4618 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4619 if (!capability)
4620 return -ENOMEM;
4621
4622 memset(&request, 0, sizeof(request));
4623
4624 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4625 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4626 &request.header.iu_length);
4627 request.function_code =
4628 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4629 put_unaligned_le32(sizeof(*capability),
4630 &request.data.report_device_capability.buffer_length);
4631
4632 rc = pqi_map_single(ctrl_info->pci_dev,
4633 &request.data.report_device_capability.sg_descriptor,
4634 capability, sizeof(*capability),
6917a9cc 4635 DMA_FROM_DEVICE);
6c223761
KB
4636 if (rc)
4637 goto out;
4638
583891c9 4639 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
6c223761
KB
4640
4641 pqi_pci_unmap(ctrl_info->pci_dev,
4642 &request.data.report_device_capability.sg_descriptor, 1,
6917a9cc 4643 DMA_FROM_DEVICE);
6c223761
KB
4644
4645 if (rc)
4646 goto out;
4647
4648 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4649 rc = -EIO;
4650 goto out;
4651 }
4652
4653 ctrl_info->max_inbound_queues =
4654 get_unaligned_le16(&capability->max_inbound_queues);
4655 ctrl_info->max_elements_per_iq =
4656 get_unaligned_le16(&capability->max_elements_per_iq);
4657 ctrl_info->max_iq_element_length =
4658 get_unaligned_le16(&capability->max_iq_element_length)
4659 * 16;
4660 ctrl_info->max_outbound_queues =
4661 get_unaligned_le16(&capability->max_outbound_queues);
4662 ctrl_info->max_elements_per_oq =
4663 get_unaligned_le16(&capability->max_elements_per_oq);
4664 ctrl_info->max_oq_element_length =
4665 get_unaligned_le16(&capability->max_oq_element_length)
4666 * 16;
4667
4668 sop_iu_layer_descriptor =
4669 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4670
4671 ctrl_info->max_inbound_iu_length_per_firmware =
4672 get_unaligned_le16(
4673 &sop_iu_layer_descriptor->max_inbound_iu_length);
4674 ctrl_info->inbound_spanning_supported =
4675 sop_iu_layer_descriptor->inbound_spanning_supported;
4676 ctrl_info->outbound_spanning_supported =
4677 sop_iu_layer_descriptor->outbound_spanning_supported;
4678
4679out:
4680 kfree(capability);
4681
4682 return rc;
4683}
4684
4685static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4686{
4687 if (ctrl_info->max_iq_element_length <
4688 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4689 dev_err(&ctrl_info->pci_dev->dev,
4690 "max. inbound queue element length of %d is less than the required length of %d\n",
4691 ctrl_info->max_iq_element_length,
4692 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4693 return -EINVAL;
4694 }
4695
4696 if (ctrl_info->max_oq_element_length <
4697 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4698 dev_err(&ctrl_info->pci_dev->dev,
4699 "max. outbound queue element length of %d is less than the required length of %d\n",
4700 ctrl_info->max_oq_element_length,
4701 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4702 return -EINVAL;
4703 }
4704
4705 if (ctrl_info->max_inbound_iu_length_per_firmware <
4706 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4707 dev_err(&ctrl_info->pci_dev->dev,
4708 "max. inbound IU length of %u is less than the min. required length of %d\n",
4709 ctrl_info->max_inbound_iu_length_per_firmware,
4710 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4711 return -EINVAL;
4712 }
4713
77668f41
KB
4714 if (!ctrl_info->inbound_spanning_supported) {
4715 dev_err(&ctrl_info->pci_dev->dev,
4716 "the controller does not support inbound spanning\n");
4717 return -EINVAL;
4718 }
4719
4720 if (ctrl_info->outbound_spanning_supported) {
4721 dev_err(&ctrl_info->pci_dev->dev,
4722 "the controller supports outbound spanning but this driver does not\n");
4723 return -EINVAL;
4724 }
4725
6c223761
KB
4726 return 0;
4727}
4728
6c223761
KB
4729static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4730{
4731 int rc;
4732 struct pqi_event_queue *event_queue;
4733 struct pqi_general_admin_request request;
4734 struct pqi_general_admin_response response;
4735
4736 event_queue = &ctrl_info->event_queue;
4737
4738 /*
4739 * Create OQ (Outbound Queue - device to host queue) to dedicate
4740 * to events.
4741 */
4742 memset(&request, 0, sizeof(request));
4743 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4744 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4745 &request.header.iu_length);
4746 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4747 put_unaligned_le16(event_queue->oq_id,
4748 &request.data.create_operational_oq.queue_id);
4749 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4750 &request.data.create_operational_oq.element_array_addr);
4751 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4752 &request.data.create_operational_oq.pi_addr);
4753 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4754 &request.data.create_operational_oq.num_elements);
4755 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4756 &request.data.create_operational_oq.element_length);
4757 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4758 put_unaligned_le16(event_queue->int_msg_num,
4759 &request.data.create_operational_oq.int_msg_num);
4760
4761 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4762 &response);
4763 if (rc)
4764 return rc;
4765
4766 event_queue->oq_ci = ctrl_info->iomem_base +
4767 PQI_DEVICE_REGISTERS_OFFSET +
4768 get_unaligned_le64(
4769 &response.data.create_operational_oq.oq_ci_offset);
4770
4771 return 0;
4772}
4773
061ef06a
KB
4774static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4775 unsigned int group_number)
6c223761 4776{
6c223761
KB
4777 int rc;
4778 struct pqi_queue_group *queue_group;
4779 struct pqi_general_admin_request request;
4780 struct pqi_general_admin_response response;
4781
061ef06a 4782 queue_group = &ctrl_info->queue_groups[group_number];
6c223761
KB
4783
4784 /*
4785 * Create IQ (Inbound Queue - host to device queue) for
4786 * RAID path.
4787 */
4788 memset(&request, 0, sizeof(request));
4789 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4790 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4791 &request.header.iu_length);
4792 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4793 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4794 &request.data.create_operational_iq.queue_id);
4795 put_unaligned_le64(
4796 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4797 &request.data.create_operational_iq.element_array_addr);
4798 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4799 &request.data.create_operational_iq.ci_addr);
4800 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4801 &request.data.create_operational_iq.num_elements);
4802 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4803 &request.data.create_operational_iq.element_length);
4804 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4805
4806 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4807 &response);
4808 if (rc) {
4809 dev_err(&ctrl_info->pci_dev->dev,
4810 "error creating inbound RAID queue\n");
4811 return rc;
4812 }
4813
4814 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4815 PQI_DEVICE_REGISTERS_OFFSET +
4816 get_unaligned_le64(
4817 &response.data.create_operational_iq.iq_pi_offset);
4818
4819 /*
4820 * Create IQ (Inbound Queue - host to device queue) for
4821 * Advanced I/O (AIO) path.
4822 */
4823 memset(&request, 0, sizeof(request));
4824 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4825 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4826 &request.header.iu_length);
4827 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4828 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4829 &request.data.create_operational_iq.queue_id);
4830 put_unaligned_le64((u64)queue_group->
4831 iq_element_array_bus_addr[AIO_PATH],
4832 &request.data.create_operational_iq.element_array_addr);
4833 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4834 &request.data.create_operational_iq.ci_addr);
4835 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4836 &request.data.create_operational_iq.num_elements);
4837 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4838 &request.data.create_operational_iq.element_length);
4839 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4840
4841 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4842 &response);
4843 if (rc) {
4844 dev_err(&ctrl_info->pci_dev->dev,
4845 "error creating inbound AIO queue\n");
339faa81 4846 return rc;
6c223761
KB
4847 }
4848
4849 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4850 PQI_DEVICE_REGISTERS_OFFSET +
4851 get_unaligned_le64(
4852 &response.data.create_operational_iq.iq_pi_offset);
4853
4854 /*
4855 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4856 * assumed to be for RAID path I/O unless we change the queue's
4857 * property.
4858 */
4859 memset(&request, 0, sizeof(request));
4860 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4861 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4862 &request.header.iu_length);
4863 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4864 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4865 &request.data.change_operational_iq_properties.queue_id);
4866 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4867 &request.data.change_operational_iq_properties.vendor_specific);
4868
4869 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4870 &response);
4871 if (rc) {
4872 dev_err(&ctrl_info->pci_dev->dev,
4873 "error changing queue property\n");
339faa81 4874 return rc;
6c223761
KB
4875 }
4876
4877 /*
4878 * Create OQ (Outbound Queue - device to host queue).
4879 */
4880 memset(&request, 0, sizeof(request));
4881 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4882 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4883 &request.header.iu_length);
4884 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4885 put_unaligned_le16(queue_group->oq_id,
4886 &request.data.create_operational_oq.queue_id);
4887 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4888 &request.data.create_operational_oq.element_array_addr);
4889 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4890 &request.data.create_operational_oq.pi_addr);
4891 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4892 &request.data.create_operational_oq.num_elements);
4893 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4894 &request.data.create_operational_oq.element_length);
4895 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4896 put_unaligned_le16(queue_group->int_msg_num,
4897 &request.data.create_operational_oq.int_msg_num);
4898
4899 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4900 &response);
4901 if (rc) {
4902 dev_err(&ctrl_info->pci_dev->dev,
4903 "error creating outbound queue\n");
339faa81 4904 return rc;
6c223761
KB
4905 }
4906
4907 queue_group->oq_ci = ctrl_info->iomem_base +
4908 PQI_DEVICE_REGISTERS_OFFSET +
4909 get_unaligned_le64(
4910 &response.data.create_operational_oq.oq_ci_offset);
4911
6c223761 4912 return 0;
6c223761
KB
4913}
4914
4915static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4916{
4917 int rc;
4918 unsigned int i;
4919
4920 rc = pqi_create_event_queue(ctrl_info);
4921 if (rc) {
4922 dev_err(&ctrl_info->pci_dev->dev,
4923 "error creating event queue\n");
4924 return rc;
4925 }
4926
4927 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
061ef06a 4928 rc = pqi_create_queue_group(ctrl_info, i);
6c223761
KB
4929 if (rc) {
4930 dev_err(&ctrl_info->pci_dev->dev,
4931 "error creating queue group number %u/%u\n",
4932 i, ctrl_info->num_queue_groups);
4933 return rc;
4934 }
4935 }
4936
4937 return 0;
4938}
4939
4940#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
5f492a7a 4941 struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
6c223761 4942
6a50d6ad
KB
4943static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4944 bool enable_events)
6c223761
KB
4945{
4946 int rc;
4947 unsigned int i;
4948 struct pqi_event_config *event_config;
6a50d6ad 4949 struct pqi_event_descriptor *event_descriptor;
6c223761
KB
4950 struct pqi_general_management_request request;
4951
4952 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4953 GFP_KERNEL);
4954 if (!event_config)
4955 return -ENOMEM;
4956
4957 memset(&request, 0, sizeof(request));
4958
4959 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4960 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4961 data.report_event_configuration.sg_descriptors[1]) -
4962 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4963 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4964 &request.data.report_event_configuration.buffer_length);
4965
4966 rc = pqi_map_single(ctrl_info->pci_dev,
4967 request.data.report_event_configuration.sg_descriptors,
4968 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
6917a9cc 4969 DMA_FROM_DEVICE);
6c223761
KB
4970 if (rc)
4971 goto out;
4972
ae0c189d 4973 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761
KB
4974
4975 pqi_pci_unmap(ctrl_info->pci_dev,
4976 request.data.report_event_configuration.sg_descriptors, 1,
6917a9cc 4977 DMA_FROM_DEVICE);
6c223761
KB
4978
4979 if (rc)
4980 goto out;
4981
6a50d6ad
KB
4982 for (i = 0; i < event_config->num_event_descriptors; i++) {
4983 event_descriptor = &event_config->descriptors[i];
4984 if (enable_events &&
4985 pqi_is_supported_event(event_descriptor->event_type))
583891c9 4986 put_unaligned_le16(ctrl_info->event_queue.oq_id,
6a50d6ad
KB
4987 &event_descriptor->oq_id);
4988 else
4989 put_unaligned_le16(0, &event_descriptor->oq_id);
4990 }
6c223761
KB
4991
4992 memset(&request, 0, sizeof(request));
4993
4994 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4995 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4996 data.report_event_configuration.sg_descriptors[1]) -
4997 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4998 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4999 &request.data.report_event_configuration.buffer_length);
5000
5001 rc = pqi_map_single(ctrl_info->pci_dev,
5002 request.data.report_event_configuration.sg_descriptors,
5003 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
6917a9cc 5004 DMA_TO_DEVICE);
6c223761
KB
5005 if (rc)
5006 goto out;
5007
ae0c189d 5008 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761
KB
5009
5010 pqi_pci_unmap(ctrl_info->pci_dev,
5011 request.data.report_event_configuration.sg_descriptors, 1,
6917a9cc 5012 DMA_TO_DEVICE);
6c223761
KB
5013
5014out:
5015 kfree(event_config);
5016
5017 return rc;
5018}
5019
6a50d6ad
KB
5020static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5021{
5022 return pqi_configure_events(ctrl_info, true);
5023}
5024
6c223761
KB
5025static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5026{
5027 unsigned int i;
5028 struct device *dev;
5029 size_t sg_chain_buffer_length;
5030 struct pqi_io_request *io_request;
5031
5032 if (!ctrl_info->io_request_pool)
5033 return;
5034
5035 dev = &ctrl_info->pci_dev->dev;
5036 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5037 io_request = ctrl_info->io_request_pool;
5038
5039 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5040 kfree(io_request->iu);
5041 if (!io_request->sg_chain_buffer)
5042 break;
5043 dma_free_coherent(dev, sg_chain_buffer_length,
5044 io_request->sg_chain_buffer,
5045 io_request->sg_chain_buffer_dma_handle);
5046 io_request++;
5047 }
5048
5049 kfree(ctrl_info->io_request_pool);
5050 ctrl_info->io_request_pool = NULL;
5051}
5052
5053static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5054{
694c5d5b
KB
5055 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5056 ctrl_info->error_buffer_length,
5057 &ctrl_info->error_buffer_dma_handle,
5058 GFP_KERNEL);
6c223761
KB
5059 if (!ctrl_info->error_buffer)
5060 return -ENOMEM;
5061
5062 return 0;
5063}
5064
5065static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5066{
5067 unsigned int i;
5068 void *sg_chain_buffer;
5069 size_t sg_chain_buffer_length;
5070 dma_addr_t sg_chain_buffer_dma_handle;
5071 struct device *dev;
5072 struct pqi_io_request *io_request;
5073
583891c9
KB
5074 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5075 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
6c223761
KB
5076
5077 if (!ctrl_info->io_request_pool) {
5078 dev_err(&ctrl_info->pci_dev->dev,
5079 "failed to allocate I/O request pool\n");
5080 goto error;
5081 }
5082
5083 dev = &ctrl_info->pci_dev->dev;
5084 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5085 io_request = ctrl_info->io_request_pool;
5086
5087 for (i = 0; i < ctrl_info->max_io_slots; i++) {
583891c9 5088 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
6c223761
KB
5089
5090 if (!io_request->iu) {
5091 dev_err(&ctrl_info->pci_dev->dev,
5092 "failed to allocate IU buffers\n");
5093 goto error;
5094 }
5095
5096 sg_chain_buffer = dma_alloc_coherent(dev,
5097 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5098 GFP_KERNEL);
5099
5100 if (!sg_chain_buffer) {
5101 dev_err(&ctrl_info->pci_dev->dev,
5102 "failed to allocate PQI scatter-gather chain buffers\n");
5103 goto error;
5104 }
5105
5106 io_request->index = i;
5107 io_request->sg_chain_buffer = sg_chain_buffer;
583891c9 5108 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
6c223761
KB
5109 io_request++;
5110 }
5111
5112 return 0;
5113
5114error:
5115 pqi_free_all_io_requests(ctrl_info);
5116
5117 return -ENOMEM;
5118}
5119
5120/*
5121 * Calculate required resources that are sized based on max. outstanding
5122 * requests and max. transfer size.
5123 */
5124
5125static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5126{
5127 u32 max_transfer_size;
5128 u32 max_sg_entries;
5129
5130 ctrl_info->scsi_ml_can_queue =
5131 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5132 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5133
5134 ctrl_info->error_buffer_length =
5135 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5136
d727a776
KB
5137 if (reset_devices)
5138 max_transfer_size = min(ctrl_info->max_transfer_size,
5139 PQI_MAX_TRANSFER_SIZE_KDUMP);
5140 else
5141 max_transfer_size = min(ctrl_info->max_transfer_size,
5142 PQI_MAX_TRANSFER_SIZE);
6c223761
KB
5143
5144 max_sg_entries = max_transfer_size / PAGE_SIZE;
5145
5146 /* +1 to cover when the buffer is not page-aligned. */
5147 max_sg_entries++;
5148
5149 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5150
5151 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5152
5153 ctrl_info->sg_chain_buffer_length =
e1d213bd
KB
5154 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5155 PQI_EXTRA_SGL_MEMORY;
6c223761
KB
5156 ctrl_info->sg_tablesize = max_sg_entries;
5157 ctrl_info->max_sectors = max_transfer_size / 512;
5158}
5159
5160static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5161{
6c223761
KB
5162 int num_queue_groups;
5163 u16 num_elements_per_iq;
5164 u16 num_elements_per_oq;
5165
d727a776
KB
5166 if (reset_devices) {
5167 num_queue_groups = 1;
5168 } else {
5169 int num_cpus;
5170 int max_queue_groups;
5171
5172 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5173 ctrl_info->max_outbound_queues - 1);
5174 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
6c223761 5175
d727a776
KB
5176 num_cpus = num_online_cpus();
5177 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5178 num_queue_groups = min(num_queue_groups, max_queue_groups);
5179 }
6c223761
KB
5180
5181 ctrl_info->num_queue_groups = num_queue_groups;
061ef06a 5182 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
6c223761 5183
77668f41
KB
5184 /*
5185 * Make sure that the max. inbound IU length is an even multiple
5186 * of our inbound element length.
5187 */
5188 ctrl_info->max_inbound_iu_length =
5189 (ctrl_info->max_inbound_iu_length_per_firmware /
5190 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5191 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
5192
5193 num_elements_per_iq =
5194 (ctrl_info->max_inbound_iu_length /
5195 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5196
5197 /* Add one because one element in each queue is unusable. */
5198 num_elements_per_iq++;
5199
5200 num_elements_per_iq = min(num_elements_per_iq,
5201 ctrl_info->max_elements_per_iq);
5202
5203 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5204 num_elements_per_oq = min(num_elements_per_oq,
5205 ctrl_info->max_elements_per_oq);
5206
5207 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5208 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5209
5210 ctrl_info->max_sg_per_iu =
5211 ((ctrl_info->max_inbound_iu_length -
5212 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5213 sizeof(struct pqi_sg_descriptor)) +
5214 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
6702d2c4
DB
5215
5216 ctrl_info->max_sg_per_r56_iu =
5217 ((ctrl_info->max_inbound_iu_length -
5218 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5219 sizeof(struct pqi_sg_descriptor)) +
5220 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
6c223761
KB
5221}
5222
583891c9
KB
5223static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5224 struct scatterlist *sg)
6c223761
KB
5225{
5226 u64 address = (u64)sg_dma_address(sg);
5227 unsigned int length = sg_dma_len(sg);
5228
5229 put_unaligned_le64(address, &sg_descriptor->address);
5230 put_unaligned_le32(length, &sg_descriptor->length);
5231 put_unaligned_le32(0, &sg_descriptor->flags);
5232}
5233
1a22bc4b
DB
5234static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5235 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5236 int max_sg_per_iu, bool *chained)
6c223761
KB
5237{
5238 int i;
6c223761 5239 unsigned int num_sg_in_iu;
6c223761 5240
1a22bc4b 5241 *chained = false;
6c223761 5242 i = 0;
1a22bc4b
DB
5243 num_sg_in_iu = 0;
5244 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
6c223761
KB
5245
5246 while (1) {
5247 pqi_set_sg_descriptor(sg_descriptor, sg);
1a22bc4b 5248 if (!*chained)
6c223761
KB
5249 num_sg_in_iu++;
5250 i++;
5251 if (i == sg_count)
5252 break;
5253 sg_descriptor++;
5254 if (i == max_sg_per_iu) {
1a22bc4b 5255 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
6c223761 5256 &sg_descriptor->address);
1a22bc4b 5257 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
6c223761 5258 &sg_descriptor->length);
1a22bc4b
DB
5259 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5260 *chained = true;
6c223761
KB
5261 num_sg_in_iu++;
5262 sg_descriptor = io_request->sg_chain_buffer;
5263 }
5264 sg = sg_next(sg);
5265 }
5266
5267 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
6c223761 5268
1a22bc4b 5269 return num_sg_in_iu;
6c223761
KB
5270}
5271
6c223761
KB
5272static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5273 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
6c223761
KB
5274 struct pqi_io_request *io_request)
5275{
6c223761
KB
5276 u16 iu_length;
5277 int sg_count;
a60eec02
KB
5278 bool chained;
5279 unsigned int num_sg_in_iu;
6c223761
KB
5280 struct scatterlist *sg;
5281 struct pqi_sg_descriptor *sg_descriptor;
5282
5283 sg_count = scsi_dma_map(scmd);
5284 if (sg_count < 0)
5285 return sg_count;
a60eec02 5286
6c223761 5287 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
a60eec02 5288 PQI_REQUEST_HEADER_LENGTH;
a60eec02 5289
6c223761
KB
5290 if (sg_count == 0)
5291 goto out;
5292
a60eec02
KB
5293 sg = scsi_sglist(scmd);
5294 sg_descriptor = request->sg_descriptors;
a60eec02 5295
1a22bc4b
DB
5296 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5297 ctrl_info->max_sg_per_iu, &chained);
6c223761 5298
a60eec02 5299 request->partial = chained;
6c223761 5300 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
5301
5302out:
6c223761 5303 put_unaligned_le16(iu_length, &request->header.iu_length);
6c223761
KB
5304
5305 return 0;
5306}
5307
7a012c23
DB
5308static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5309 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5310 struct pqi_io_request *io_request)
6c223761 5311{
7a012c23
DB
5312 u16 iu_length;
5313 int sg_count;
5314 bool chained;
5315 unsigned int num_sg_in_iu;
5316 struct scatterlist *sg;
5317 struct pqi_sg_descriptor *sg_descriptor;
5318
5319 sg_count = scsi_dma_map(scmd);
5320 if (sg_count < 0)
5321 return sg_count;
5322
5323 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5324 PQI_REQUEST_HEADER_LENGTH;
5325 num_sg_in_iu = 0;
5326
5327 if (sg_count == 0)
5328 goto out;
5329
5330 sg = scsi_sglist(scmd);
5331 sg_descriptor = request->sg_descriptors;
5332
5333 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5334 ctrl_info->max_sg_per_iu, &chained);
5335
5336 request->partial = chained;
5337 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5338
5339out:
5340 put_unaligned_le16(iu_length, &request->header.iu_length);
5341 request->num_sg_descriptors = num_sg_in_iu;
5342
5343 return 0;
5344}
5345
6702d2c4
DB
5346static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5347 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5348 struct pqi_io_request *io_request)
5349{
5350 u16 iu_length;
5351 int sg_count;
5352 bool chained;
5353 unsigned int num_sg_in_iu;
5354 struct scatterlist *sg;
5355 struct pqi_sg_descriptor *sg_descriptor;
5356
5357 sg_count = scsi_dma_map(scmd);
5358 if (sg_count < 0)
5359 return sg_count;
5360
5361 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5362 PQI_REQUEST_HEADER_LENGTH;
5363 num_sg_in_iu = 0;
5364
5365 if (sg_count != 0) {
5366 sg = scsi_sglist(scmd);
5367 sg_descriptor = request->sg_descriptors;
5368
5369 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5370 ctrl_info->max_sg_per_r56_iu, &chained);
5371
5372 request->partial = chained;
5373 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5374 }
5375
5376 put_unaligned_le16(iu_length, &request->header.iu_length);
5377 request->num_sg_descriptors = num_sg_in_iu;
5378
5379 return 0;
5380}
5381
6c223761
KB
5382static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5383 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5384 struct pqi_io_request *io_request)
5385{
6c223761
KB
5386 u16 iu_length;
5387 int sg_count;
a60eec02
KB
5388 bool chained;
5389 unsigned int num_sg_in_iu;
6c223761
KB
5390 struct scatterlist *sg;
5391 struct pqi_sg_descriptor *sg_descriptor;
5392
5393 sg_count = scsi_dma_map(scmd);
5394 if (sg_count < 0)
5395 return sg_count;
a60eec02
KB
5396
5397 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5398 PQI_REQUEST_HEADER_LENGTH;
5399 num_sg_in_iu = 0;
5400
6c223761
KB
5401 if (sg_count == 0)
5402 goto out;
5403
a60eec02
KB
5404 sg = scsi_sglist(scmd);
5405 sg_descriptor = request->sg_descriptors;
a60eec02 5406
1a22bc4b
DB
5407 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5408 ctrl_info->max_sg_per_iu, &chained);
6c223761 5409
a60eec02 5410 request->partial = chained;
6c223761 5411 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
5412
5413out:
6c223761
KB
5414 put_unaligned_le16(iu_length, &request->header.iu_length);
5415 request->num_sg_descriptors = num_sg_in_iu;
5416
5417 return 0;
5418}
5419
5420static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5421 void *context)
5422{
5423 struct scsi_cmnd *scmd;
5424
5425 scmd = io_request->scmd;
5426 pqi_free_io_request(io_request);
5427 scsi_dma_unmap(scmd);
5428 pqi_scsi_done(scmd);
5429}
5430
376fb880
KB
5431static int pqi_raid_submit_scsi_cmd_with_io_request(
5432 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
6c223761
KB
5433 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5434 struct pqi_queue_group *queue_group)
5435{
5436 int rc;
5437 size_t cdb_length;
6c223761
KB
5438 struct pqi_raid_path_request *request;
5439
6c223761
KB
5440 io_request->io_complete_callback = pqi_raid_io_complete;
5441 io_request->scmd = scmd;
5442
6c223761 5443 request = io_request->iu;
583891c9 5444 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
6c223761
KB
5445
5446 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5447 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5448 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5449 put_unaligned_le16(io_request->index, &request->request_id);
5450 request->error_index = request->request_id;
583891c9 5451 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
6c223761
KB
5452
5453 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5454 memcpy(request->cdb, scmd->cmnd, cdb_length);
5455
5456 switch (cdb_length) {
5457 case 6:
5458 case 10:
5459 case 12:
5460 case 16:
583891c9 5461 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6c223761
KB
5462 break;
5463 case 20:
583891c9 5464 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
6c223761
KB
5465 break;
5466 case 24:
583891c9 5467 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
6c223761
KB
5468 break;
5469 case 28:
583891c9 5470 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
6c223761
KB
5471 break;
5472 case 32:
5473 default:
583891c9 5474 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
6c223761
KB
5475 break;
5476 }
5477
5478 switch (scmd->sc_data_direction) {
5479 case DMA_TO_DEVICE:
5480 request->data_direction = SOP_READ_FLAG;
5481 break;
5482 case DMA_FROM_DEVICE:
5483 request->data_direction = SOP_WRITE_FLAG;
5484 break;
5485 case DMA_NONE:
5486 request->data_direction = SOP_NO_DIRECTION_FLAG;
5487 break;
5488 case DMA_BIDIRECTIONAL:
5489 request->data_direction = SOP_BIDIRECTIONAL;
5490 break;
5491 default:
5492 dev_err(&ctrl_info->pci_dev->dev,
5493 "unknown data direction: %d\n",
5494 scmd->sc_data_direction);
6c223761
KB
5495 break;
5496 }
5497
5498 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5499 if (rc) {
5500 pqi_free_io_request(io_request);
5501 return SCSI_MLQUEUE_HOST_BUSY;
5502 }
5503
5504 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5505
5506 return 0;
5507}
5508
376fb880
KB
5509static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5510 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5511 struct pqi_queue_group *queue_group)
5512{
5513 struct pqi_io_request *io_request;
5514
5515 io_request = pqi_alloc_io_request(ctrl_info);
5516
5517 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5518 device, scmd, queue_group);
5519}
5520
376fb880
KB
5521static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5522{
5523 struct scsi_cmnd *scmd;
03b288cf 5524 struct pqi_scsi_dev *device;
376fb880
KB
5525 struct pqi_ctrl_info *ctrl_info;
5526
5527 if (!io_request->raid_bypass)
5528 return false;
5529
5530 scmd = io_request->scmd;
5531 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5532 return false;
5533 if (host_byte(scmd->result) == DID_NO_CONNECT)
5534 return false;
5535
03b288cf 5536 device = scmd->device->hostdata;
5be9db06 5537 if (pqi_device_offline(device) || pqi_device_in_remove(device))
03b288cf
KB
5538 return false;
5539
376fb880
KB
5540 ctrl_info = shost_to_hba(scmd->device->host);
5541 if (pqi_ctrl_offline(ctrl_info))
5542 return false;
5543
5544 return true;
5545}
5546
6c223761
KB
5547static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5548 void *context)
5549{
5550 struct scsi_cmnd *scmd;
5551
5552 scmd = io_request->scmd;
5553 scsi_dma_unmap(scmd);
5be9db06 5554 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
6c223761 5555 set_host_byte(scmd, DID_IMM_RETRY);
5be9db06 5556 scmd->SCp.this_residual++;
376fb880 5557 }
5be9db06 5558
6c223761
KB
5559 pqi_free_io_request(io_request);
5560 pqi_scsi_done(scmd);
5561}
5562
5563static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5564 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5565 struct pqi_queue_group *queue_group)
5566{
5567 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
376fb880 5568 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
6c223761
KB
5569}
5570
5571static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5572 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5573 unsigned int cdb_length, struct pqi_queue_group *queue_group,
376fb880 5574 struct pqi_encryption_info *encryption_info, bool raid_bypass)
6c223761
KB
5575{
5576 int rc;
5577 struct pqi_io_request *io_request;
5578 struct pqi_aio_path_request *request;
5579
5580 io_request = pqi_alloc_io_request(ctrl_info);
5581 io_request->io_complete_callback = pqi_aio_io_complete;
5582 io_request->scmd = scmd;
376fb880 5583 io_request->raid_bypass = raid_bypass;
6c223761
KB
5584
5585 request = io_request->iu;
583891c9 5586 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
6c223761
KB
5587
5588 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5589 put_unaligned_le32(aio_handle, &request->nexus_id);
5590 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5591 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5592 put_unaligned_le16(io_request->index, &request->request_id);
5593 request->error_index = request->request_id;
5594 if (cdb_length > sizeof(request->cdb))
5595 cdb_length = sizeof(request->cdb);
5596 request->cdb_length = cdb_length;
5597 memcpy(request->cdb, cdb, cdb_length);
5598
5599 switch (scmd->sc_data_direction) {
5600 case DMA_TO_DEVICE:
5601 request->data_direction = SOP_READ_FLAG;
5602 break;
5603 case DMA_FROM_DEVICE:
5604 request->data_direction = SOP_WRITE_FLAG;
5605 break;
5606 case DMA_NONE:
5607 request->data_direction = SOP_NO_DIRECTION_FLAG;
5608 break;
5609 case DMA_BIDIRECTIONAL:
5610 request->data_direction = SOP_BIDIRECTIONAL;
5611 break;
5612 default:
5613 dev_err(&ctrl_info->pci_dev->dev,
5614 "unknown data direction: %d\n",
5615 scmd->sc_data_direction);
6c223761
KB
5616 break;
5617 }
5618
5619 if (encryption_info) {
5620 request->encryption_enable = true;
5621 put_unaligned_le16(encryption_info->data_encryption_key_index,
5622 &request->data_encryption_key_index);
5623 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5624 &request->encrypt_tweak_lower);
5625 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5626 &request->encrypt_tweak_upper);
5627 }
5628
5629 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5630 if (rc) {
5631 pqi_free_io_request(io_request);
5632 return SCSI_MLQUEUE_HOST_BUSY;
5633 }
5634
5635 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5636
5637 return 0;
5638}
5639
7a012c23
DB
5640static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5641 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5642 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5643 struct pqi_scsi_dev_raid_map_data *rmd)
7a012c23
DB
5644{
5645 int rc;
5646 struct pqi_io_request *io_request;
5647 struct pqi_aio_r1_path_request *r1_request;
5648
5649 io_request = pqi_alloc_io_request(ctrl_info);
5650 io_request->io_complete_callback = pqi_aio_io_complete;
5651 io_request->scmd = scmd;
5652 io_request->raid_bypass = true;
5653
5654 r1_request = io_request->iu;
5655 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5656
5657 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
7a012c23
DB
5658 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5659 r1_request->num_drives = rmd->num_it_nexus_entries;
5660 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5661 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5662 if (rmd->num_it_nexus_entries == 3)
5663 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5664
5665 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5666 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5667 put_unaligned_le16(io_request->index, &r1_request->request_id);
5668 r1_request->error_index = r1_request->request_id;
5669 if (rmd->cdb_length > sizeof(r1_request->cdb))
5670 rmd->cdb_length = sizeof(r1_request->cdb);
5671 r1_request->cdb_length = rmd->cdb_length;
5672 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5673
5674 /* The direction is always write. */
5675 r1_request->data_direction = SOP_READ_FLAG;
5676
5677 if (encryption_info) {
5678 r1_request->encryption_enable = true;
5679 put_unaligned_le16(encryption_info->data_encryption_key_index,
5680 &r1_request->data_encryption_key_index);
5681 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5682 &r1_request->encrypt_tweak_lower);
5683 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5684 &r1_request->encrypt_tweak_upper);
5685 }
5686
5687 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5688 if (rc) {
5689 pqi_free_io_request(io_request);
5690 return SCSI_MLQUEUE_HOST_BUSY;
5691 }
5692
5693 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5694
5695 return 0;
5696}
5697
6702d2c4
DB
5698static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5699 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5700 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5701 struct pqi_scsi_dev_raid_map_data *rmd)
5702{
5703 int rc;
5704 struct pqi_io_request *io_request;
5705 struct pqi_aio_r56_path_request *r56_request;
5706
5707 io_request = pqi_alloc_io_request(ctrl_info);
5708 io_request->io_complete_callback = pqi_aio_io_complete;
5709 io_request->scmd = scmd;
5710 io_request->raid_bypass = true;
5711
5712 r56_request = io_request->iu;
5713 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5714
5715 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5716 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5717 else
5718 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5719
5720 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5721 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5722 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5723 if (rmd->raid_level == SA_RAID_6) {
5724 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5725 r56_request->xor_multiplier = rmd->xor_mult;
5726 }
5727 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5728 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5729 put_unaligned_le64(rmd->row, &r56_request->row);
5730
5731 put_unaligned_le16(io_request->index, &r56_request->request_id);
5732 r56_request->error_index = r56_request->request_id;
5733
5734 if (rmd->cdb_length > sizeof(r56_request->cdb))
5735 rmd->cdb_length = sizeof(r56_request->cdb);
5736 r56_request->cdb_length = rmd->cdb_length;
5737 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5738
5739 /* The direction is always write. */
5740 r56_request->data_direction = SOP_READ_FLAG;
5741
5742 if (encryption_info) {
5743 r56_request->encryption_enable = true;
5744 put_unaligned_le16(encryption_info->data_encryption_key_index,
5745 &r56_request->data_encryption_key_index);
5746 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5747 &r56_request->encrypt_tweak_lower);
5748 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5749 &r56_request->encrypt_tweak_upper);
5750 }
5751
5752 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5753 if (rc) {
5754 pqi_free_io_request(io_request);
5755 return SCSI_MLQUEUE_HOST_BUSY;
5756 }
5757
5758 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5759
5760 return 0;
5761}
5762
061ef06a
KB
5763static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5764 struct scsi_cmnd *scmd)
5765{
5766 u16 hw_queue;
5767
12db0f93 5768 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
061ef06a
KB
5769 if (hw_queue > ctrl_info->max_hw_queue_index)
5770 hw_queue = 0;
5771
5772 return hw_queue;
5773}
5774
5be9db06
KB
5775static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5776{
12db0f93 5777 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5be9db06
KB
5778 return false;
5779
5780 return scmd->SCp.this_residual == 0;
5781}
5782
7561a7e4
KB
5783/*
5784 * This function gets called just before we hand the completed SCSI request
5785 * back to the SML.
5786 */
5787
5788void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5789{
5790 struct pqi_scsi_dev *device;
5791
1e46731e
MR
5792 if (!scmd->device) {
5793 set_host_byte(scmd, DID_NO_CONNECT);
5794 return;
5795 }
5796
7561a7e4 5797 device = scmd->device->hostdata;
1e46731e
MR
5798 if (!device) {
5799 set_host_byte(scmd, DID_NO_CONNECT);
5800 return;
5801 }
5802
7561a7e4
KB
5803 atomic_dec(&device->scsi_cmds_outstanding);
5804}
5805
c7ffedb3 5806static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
7d81d2b8 5807 struct scsi_cmnd *scmd)
c7ffedb3
DB
5808{
5809 u32 oldest_jiffies;
5810 u8 lru_index;
5811 int i;
5812 int rc;
5813 struct pqi_scsi_dev *device;
5814 struct pqi_stream_data *pqi_stream_data;
5815 struct pqi_scsi_dev_raid_map_data rmd;
5816
5817 if (!ctrl_info->enable_stream_detection)
5818 return false;
5819
5820 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5821 if (rc)
5822 return false;
5823
5824 /* Check writes only. */
5825 if (!rmd.is_write)
5826 return false;
5827
5828 device = scmd->device->hostdata;
5829
5830 /* Check for RAID 5/6 streams. */
5831 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5832 return false;
5833
5834 /*
5835 * If controller does not support AIO RAID{5,6} writes, need to send
5836 * requests down non-AIO path.
5837 */
5838 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5839 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5840 return true;
5841
5842 lru_index = 0;
5843 oldest_jiffies = INT_MAX;
5844 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5845 pqi_stream_data = &device->stream_data[i];
5846 /*
5847 * Check for adjacent request or request is within
5848 * the previous request.
5849 */
5850 if ((pqi_stream_data->next_lba &&
5851 rmd.first_block >= pqi_stream_data->next_lba) &&
5852 rmd.first_block <= pqi_stream_data->next_lba +
5853 rmd.block_cnt) {
5854 pqi_stream_data->next_lba = rmd.first_block +
5855 rmd.block_cnt;
5856 pqi_stream_data->last_accessed = jiffies;
5857 return true;
5858 }
5859
5860 /* unused entry */
5861 if (pqi_stream_data->last_accessed == 0) {
5862 lru_index = i;
5863 break;
5864 }
5865
5866 /* Find entry with oldest last accessed time. */
5867 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5868 oldest_jiffies = pqi_stream_data->last_accessed;
5869 lru_index = i;
5870 }
5871 }
5872
5873 /* Set LRU entry. */
5874 pqi_stream_data = &device->stream_data[lru_index];
5875 pqi_stream_data->last_accessed = jiffies;
5876 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5877
5878 return false;
5879}
5880
5881static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
6c223761
KB
5882{
5883 int rc;
5884 struct pqi_ctrl_info *ctrl_info;
5885 struct pqi_scsi_dev *device;
061ef06a 5886 u16 hw_queue;
6c223761
KB
5887 struct pqi_queue_group *queue_group;
5888 bool raid_bypassed;
5889
5890 device = scmd->device->hostdata;
6c223761 5891
1e46731e
MR
5892 if (!device) {
5893 set_host_byte(scmd, DID_NO_CONNECT);
5894 pqi_scsi_done(scmd);
5895 return 0;
5896 }
5897
7561a7e4
KB
5898 atomic_inc(&device->scsi_cmds_outstanding);
5899
583891c9
KB
5900 ctrl_info = shost_to_hba(shost);
5901
1bdf6e93 5902 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
6c223761
KB
5903 set_host_byte(scmd, DID_NO_CONNECT);
5904 pqi_scsi_done(scmd);
5905 return 0;
5906 }
5907
5be9db06 5908 if (pqi_ctrl_blocked(ctrl_info)) {
7561a7e4
KB
5909 rc = SCSI_MLQUEUE_HOST_BUSY;
5910 goto out;
5911 }
5912
7d81d2b8
KB
5913 /*
5914 * This is necessary because the SML doesn't zero out this field during
5915 * error recovery.
5916 */
5917 scmd->result = 0;
5918
061ef06a
KB
5919 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5920 queue_group = &ctrl_info->queue_groups[hw_queue];
6c223761
KB
5921
5922 if (pqi_is_logical_device(device)) {
5923 raid_bypassed = false;
588a63fe 5924 if (device->raid_bypass_enabled &&
5be9db06
KB
5925 pqi_is_bypass_eligible_request(scmd) &&
5926 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5927 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
8b664fef 5928 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
376fb880 5929 raid_bypassed = true;
8b664fef
KB
5930 atomic_inc(&device->raid_bypass_cnt);
5931 }
6c223761
KB
5932 }
5933 if (!raid_bypassed)
8b664fef 5934 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761
KB
5935 } else {
5936 if (device->aio_enabled)
8b664fef 5937 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761 5938 else
8b664fef 5939 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761
KB
5940 }
5941
7561a7e4 5942out:
7561a7e4
KB
5943 if (rc)
5944 atomic_dec(&device->scsi_cmds_outstanding);
5945
6c223761
KB
5946 return rc;
5947}
5948
6ce1ddf5 5949static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
7561a7e4 5950{
6ce1ddf5 5951 unsigned int i;
7561a7e4
KB
5952 unsigned int path;
5953 unsigned long flags;
6ce1ddf5
KB
5954 unsigned int queued_io_count;
5955 struct pqi_queue_group *queue_group;
5956 struct pqi_io_request *io_request;
7561a7e4 5957
6ce1ddf5
KB
5958 queued_io_count = 0;
5959
5960 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5961 queue_group = &ctrl_info->queue_groups[i];
5962 for (path = 0; path < 2; path++) {
5963 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
5964 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
5965 queued_io_count++;
5966 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
7561a7e4
KB
5967 }
5968 }
5969
6ce1ddf5 5970 return queued_io_count;
7561a7e4
KB
5971}
5972
6ce1ddf5 5973static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
7561a7e4 5974{
7561a7e4
KB
5975 unsigned int i;
5976 unsigned int path;
6ce1ddf5 5977 unsigned int nonempty_inbound_queue_count;
7561a7e4
KB
5978 struct pqi_queue_group *queue_group;
5979 pqi_index_t iq_pi;
5980 pqi_index_t iq_ci;
5981
6ce1ddf5
KB
5982 nonempty_inbound_queue_count = 0;
5983
7561a7e4
KB
5984 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5985 queue_group = &ctrl_info->queue_groups[i];
7561a7e4
KB
5986 for (path = 0; path < 2; path++) {
5987 iq_pi = queue_group->iq_pi_copy[path];
6ce1ddf5
KB
5988 iq_ci = readl(queue_group->iq_ci[path]);
5989 if (iq_ci != iq_pi)
5990 nonempty_inbound_queue_count++;
5991 }
5992 }
7561a7e4 5993
6ce1ddf5
KB
5994 return nonempty_inbound_queue_count;
5995}
5996
5997#define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
5998
5999static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6000{
6001 unsigned long start_jiffies;
6002 unsigned long warning_timeout;
6003 unsigned int queued_io_count;
6004 unsigned int nonempty_inbound_queue_count;
6005 bool displayed_warning;
6006
6007 displayed_warning = false;
6008 start_jiffies = jiffies;
6009 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
6010
6011 while (1) {
6012 queued_io_count = pqi_queued_io_count(ctrl_info);
6013 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6014 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6015 break;
6016 pqi_check_ctrl_health(ctrl_info);
6017 if (pqi_ctrl_offline(ctrl_info))
6018 return -ENXIO;
6019 if (time_after(jiffies, warning_timeout)) {
6020 dev_warn(&ctrl_info->pci_dev->dev,
6021 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6022 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6023 displayed_warning = true;
6024 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
7561a7e4 6025 }
6ce1ddf5 6026 usleep_range(1000, 2000);
7561a7e4
KB
6027 }
6028
6ce1ddf5
KB
6029 if (displayed_warning)
6030 dev_warn(&ctrl_info->pci_dev->dev,
6031 "queued I/O drained after waiting for %u seconds\n",
6032 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6033
7561a7e4
KB
6034 return 0;
6035}
6036
6037static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6038 struct pqi_scsi_dev *device)
6039{
6040 unsigned int i;
6041 unsigned int path;
6042 struct pqi_queue_group *queue_group;
6043 unsigned long flags;
6044 struct pqi_io_request *io_request;
6045 struct pqi_io_request *next;
6046 struct scsi_cmnd *scmd;
6047 struct pqi_scsi_dev *scsi_device;
6048
6049 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6050 queue_group = &ctrl_info->queue_groups[i];
6051
6052 for (path = 0; path < 2; path++) {
6053 spin_lock_irqsave(
6054 &queue_group->submit_lock[path], flags);
6055
6056 list_for_each_entry_safe(io_request, next,
6057 &queue_group->request_list[path],
6058 request_list_entry) {
583891c9 6059
7561a7e4
KB
6060 scmd = io_request->scmd;
6061 if (!scmd)
6062 continue;
6063
6064 scsi_device = scmd->device->hostdata;
6065 if (scsi_device != device)
6066 continue;
6067
6068 list_del(&io_request->request_list_entry);
6069 set_host_byte(scmd, DID_RESET);
b622a601
MB
6070 pqi_free_io_request(io_request);
6071 scsi_dma_unmap(scmd);
7561a7e4
KB
6072 pqi_scsi_done(scmd);
6073 }
6074
6075 spin_unlock_irqrestore(
6076 &queue_group->submit_lock[path], flags);
6077 }
6078 }
6079}
6080
18ff5f08 6081#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
4fd22c13 6082
061ef06a 6083static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
18ff5f08 6084 struct pqi_scsi_dev *device, unsigned long timeout_msecs)
061ef06a 6085{
18ff5f08
KB
6086 int cmds_outstanding;
6087 unsigned long start_jiffies;
6088 unsigned long warning_timeout;
6089 unsigned long msecs_waiting;
1e46731e 6090
18ff5f08
KB
6091 start_jiffies = jiffies;
6092 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + start_jiffies;
1e46731e 6093
18ff5f08 6094 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding)) > 0) {
061ef06a
KB
6095 pqi_check_ctrl_health(ctrl_info);
6096 if (pqi_ctrl_offline(ctrl_info))
6097 return -ENXIO;
18ff5f08 6098 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6ce1ddf5 6099 if (msecs_waiting >= timeout_msecs) {
18ff5f08
KB
6100 dev_err(&ctrl_info->pci_dev->dev,
6101 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6102 ctrl_info->scsi_host->host_no, device->bus, device->target,
6103 device->lun, msecs_waiting / 1000, cmds_outstanding);
6104 return -ETIMEDOUT;
061ef06a 6105 }
18ff5f08
KB
6106 if (time_after(jiffies, warning_timeout)) {
6107 dev_warn(&ctrl_info->pci_dev->dev,
6108 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6109 ctrl_info->scsi_host->host_no, device->bus, device->target,
6110 device->lun, msecs_waiting / 1000, cmds_outstanding);
6111 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * PQI_HZ) + jiffies;
4fd22c13 6112 }
061ef06a
KB
6113 usleep_range(1000, 2000);
6114 }
6115
6116 return 0;
6117}
6118
14bb215d
KB
6119static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6120 void *context)
6c223761 6121{
14bb215d 6122 struct completion *waiting = context;
6c223761 6123
14bb215d
KB
6124 complete(waiting);
6125}
6c223761 6126
c2922f17 6127#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
14bb215d
KB
6128
6129static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6130 struct pqi_scsi_dev *device, struct completion *wait)
6131{
6132 int rc;
18ff5f08 6133 unsigned int wait_secs;
6ce1ddf5 6134 int cmds_outstanding;
18ff5f08
KB
6135
6136 wait_secs = 0;
14bb215d
KB
6137
6138 while (1) {
6139 if (wait_for_completion_io_timeout(wait,
c2922f17 6140 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) {
14bb215d
KB
6141 rc = 0;
6142 break;
6c223761
KB
6143 }
6144
14bb215d
KB
6145 pqi_check_ctrl_health(ctrl_info);
6146 if (pqi_ctrl_offline(ctrl_info)) {
4e8415e3 6147 rc = -ENXIO;
14bb215d
KB
6148 break;
6149 }
18ff5f08
KB
6150
6151 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6ce1ddf5 6152 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding);
18ff5f08 6153 dev_warn(&ctrl_info->pci_dev->dev,
6ce1ddf5
KB
6154 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6155 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun, wait_secs, cmds_outstanding);
6c223761 6156 }
6c223761 6157
14bb215d 6158 return rc;
6c223761
KB
6159}
6160
18ff5f08
KB
6161#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6162
6163static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6c223761
KB
6164{
6165 int rc;
6166 struct pqi_io_request *io_request;
6167 DECLARE_COMPLETION_ONSTACK(wait);
6168 struct pqi_task_management_request *request;
6169
6c223761 6170 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 6171 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
6172 io_request->context = &wait;
6173
6174 request = io_request->iu;
6175 memset(request, 0, sizeof(*request));
6176
6177 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6178 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6179 &request->header.iu_length);
6180 put_unaligned_le16(io_request->index, &request->request_id);
6181 memcpy(request->lun_number, device->scsi3addr,
6182 sizeof(request->lun_number));
6183 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
c2922f17 6184 if (ctrl_info->tmf_iu_timeout_supported)
18ff5f08 6185 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6c223761 6186
583891c9 6187 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6c223761
KB
6188 io_request);
6189
14bb215d
KB
6190 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
6191 if (rc == 0)
6c223761 6192 rc = io_request->status;
6c223761
KB
6193
6194 pqi_free_io_request(io_request);
6c223761
KB
6195
6196 return rc;
6197}
6198
18ff5f08
KB
6199#define PQI_LUN_RESET_RETRIES 3
6200#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6201#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6202#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6c223761 6203
18ff5f08 6204static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6c223761 6205{
18ff5f08
KB
6206 int reset_rc;
6207 int wait_rc;
3406384b 6208 unsigned int retries;
18ff5f08 6209 unsigned long timeout_msecs;
6c223761 6210
3406384b 6211 for (retries = 0;;) {
18ff5f08
KB
6212 reset_rc = pqi_lun_reset(ctrl_info, device);
6213 if (reset_rc == 0 || ++retries > PQI_LUN_RESET_RETRIES)
3406384b
MR
6214 break;
6215 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6216 }
429fab70 6217
18ff5f08
KB
6218 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6219 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
4fd22c13 6220
18ff5f08
KB
6221 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, timeout_msecs);
6222 if (wait_rc && reset_rc == 0)
6223 reset_rc = wait_rc;
6c223761 6224
18ff5f08 6225 return reset_rc == 0 ? SUCCESS : FAILED;
6c223761
KB
6226}
6227
4fd22c13
MR
6228static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
6229 struct pqi_scsi_dev *device)
6230{
6231 int rc;
6232
4fd22c13
MR
6233 pqi_ctrl_block_requests(ctrl_info);
6234 pqi_ctrl_wait_until_quiesced(ctrl_info);
6235 pqi_fail_io_queued_for_device(ctrl_info, device);
6236 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
4fd22c13
MR
6237 if (rc)
6238 rc = FAILED;
6239 else
37f33181
KB
6240 rc = pqi_lun_reset_with_retries(ctrl_info, device);
6241 pqi_ctrl_unblock_requests(ctrl_info);
429fab70 6242
4fd22c13
MR
6243 return rc;
6244}
6245
6c223761
KB
6246static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6247{
6248 int rc;
7561a7e4 6249 struct Scsi_Host *shost;
6c223761
KB
6250 struct pqi_ctrl_info *ctrl_info;
6251 struct pqi_scsi_dev *device;
6252
7561a7e4
KB
6253 shost = scmd->device->host;
6254 ctrl_info = shost_to_hba(shost);
6c223761
KB
6255 device = scmd->device->hostdata;
6256
37f33181
KB
6257 mutex_lock(&ctrl_info->lun_reset_mutex);
6258
6c223761 6259 dev_err(&ctrl_info->pci_dev->dev,
f0e473e0
MB
6260 "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6261 shost->host_no,
6262 device->bus, device->target, device->lun,
6263 scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6c223761 6264
7561a7e4 6265 pqi_check_ctrl_health(ctrl_info);
37f33181 6266 if (pqi_ctrl_offline(ctrl_info))
7561a7e4 6267 rc = FAILED;
37f33181
KB
6268 else
6269 rc = pqi_device_reset(ctrl_info, device);
429fab70 6270
6c223761
KB
6271 dev_err(&ctrl_info->pci_dev->dev,
6272 "reset of scsi %d:%d:%d:%d: %s\n",
7561a7e4 6273 shost->host_no, device->bus, device->target, device->lun,
6c223761
KB
6274 rc == SUCCESS ? "SUCCESS" : "FAILED");
6275
37f33181
KB
6276 mutex_unlock(&ctrl_info->lun_reset_mutex);
6277
6c223761
KB
6278 return rc;
6279}
6280
6281static int pqi_slave_alloc(struct scsi_device *sdev)
6282{
6283 struct pqi_scsi_dev *device;
6284 unsigned long flags;
6285 struct pqi_ctrl_info *ctrl_info;
6286 struct scsi_target *starget;
6287 struct sas_rphy *rphy;
6288
6289 ctrl_info = shost_to_hba(sdev->host);
6290
6291 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6292
6293 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6294 starget = scsi_target(sdev);
6295 rphy = target_to_rphy(starget);
6296 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6297 if (device) {
d4dc6aea
KB
6298 if (device->target_lun_valid) {
6299 device->ignore_device = true;
6300 } else {
6301 device->target = sdev_id(sdev);
6302 device->lun = sdev->lun;
6303 device->target_lun_valid = true;
6304 }
6c223761
KB
6305 }
6306 } else {
6307 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6308 sdev_id(sdev), sdev->lun);
6309 }
6310
94086f5b 6311 if (device) {
6c223761
KB
6312 sdev->hostdata = device;
6313 device->sdev = sdev;
6314 if (device->queue_depth) {
6315 device->advertised_queue_depth = device->queue_depth;
6316 scsi_change_queue_depth(sdev,
6317 device->advertised_queue_depth);
6318 }
99a12b48 6319 if (pqi_is_logical_device(device)) {
b6e2ef67 6320 pqi_disable_write_same(sdev);
99a12b48 6321 } else {
2b447f81 6322 sdev->allow_restart = 1;
99a12b48
KB
6323 if (device->device_type == SA_DEVICE_TYPE_NVME)
6324 pqi_disable_write_same(sdev);
6325 }
6c223761
KB
6326 }
6327
6328 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6329
6330 return 0;
6331}
6332
52198226
CH
6333static int pqi_map_queues(struct Scsi_Host *shost)
6334{
6335 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6336
79d3fa9e 6337 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
ed76e329 6338 ctrl_info->pci_dev, 0);
52198226
CH
6339}
6340
d4dc6aea
KB
6341static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6342{
6343 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6344}
6345
ce143793
KB
6346static int pqi_slave_configure(struct scsi_device *sdev)
6347{
d4dc6aea 6348 int rc = 0;
ce143793
KB
6349 struct pqi_scsi_dev *device;
6350
6351 device = sdev->hostdata;
6352 device->devtype = sdev->type;
6353
d4dc6aea
KB
6354 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6355 rc = -ENXIO;
6356 device->ignore_device = false;
6357 }
6358
6359 return rc;
ce143793
KB
6360}
6361
8b664fef 6362static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6c223761
KB
6363{
6364 struct pci_dev *pci_dev;
6365 u32 subsystem_vendor;
6366 u32 subsystem_device;
6367 cciss_pci_info_struct pciinfo;
6368
6369 if (!arg)
6370 return -EINVAL;
6371
6372 pci_dev = ctrl_info->pci_dev;
6373
6374 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6375 pciinfo.bus = pci_dev->bus->number;
6376 pciinfo.dev_fn = pci_dev->devfn;
6377 subsystem_vendor = pci_dev->subsystem_vendor;
6378 subsystem_device = pci_dev->subsystem_device;
8b664fef 6379 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6c223761
KB
6380
6381 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6382 return -EFAULT;
6383
6384 return 0;
6385}
6386
6387static int pqi_getdrivver_ioctl(void __user *arg)
6388{
6389 u32 version;
6390
6391 if (!arg)
6392 return -EINVAL;
6393
6394 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6395 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6396
6397 if (copy_to_user(arg, &version, sizeof(version)))
6398 return -EFAULT;
6399
6400 return 0;
6401}
6402
6403struct ciss_error_info {
6404 u8 scsi_status;
6405 int command_status;
6406 size_t sense_data_length;
6407};
6408
6409static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6410 struct ciss_error_info *ciss_error_info)
6411{
6412 int ciss_cmd_status;
6413 size_t sense_data_length;
6414
6415 switch (pqi_error_info->data_out_result) {
6416 case PQI_DATA_IN_OUT_GOOD:
6417 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6418 break;
6419 case PQI_DATA_IN_OUT_UNDERFLOW:
6420 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6421 break;
6422 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6423 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6424 break;
6425 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6426 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6427 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6428 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6429 case PQI_DATA_IN_OUT_ERROR:
6430 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6431 break;
6432 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6433 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6434 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6435 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6436 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6437 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6438 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6439 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6440 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6441 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6442 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6443 break;
6444 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6445 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6446 break;
6447 case PQI_DATA_IN_OUT_ABORTED:
6448 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6449 break;
6450 case PQI_DATA_IN_OUT_TIMEOUT:
6451 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6452 break;
6453 default:
6454 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6455 break;
6456 }
6457
6458 sense_data_length =
6459 get_unaligned_le16(&pqi_error_info->sense_data_length);
6460 if (sense_data_length == 0)
6461 sense_data_length =
6462 get_unaligned_le16(&pqi_error_info->response_data_length);
6463 if (sense_data_length)
6464 if (sense_data_length > sizeof(pqi_error_info->data))
6465 sense_data_length = sizeof(pqi_error_info->data);
6466
6467 ciss_error_info->scsi_status = pqi_error_info->status;
6468 ciss_error_info->command_status = ciss_cmd_status;
6469 ciss_error_info->sense_data_length = sense_data_length;
6470}
6471
6472static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6473{
6474 int rc;
6475 char *kernel_buffer = NULL;
6476 u16 iu_length;
6477 size_t sense_data_length;
6478 IOCTL_Command_struct iocommand;
6479 struct pqi_raid_path_request request;
6480 struct pqi_raid_error_info pqi_error_info;
6481 struct ciss_error_info ciss_error_info;
6482
6483 if (pqi_ctrl_offline(ctrl_info))
6484 return -ENXIO;
2790cd4d
KB
6485 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6486 return -EBUSY;
6c223761
KB
6487 if (!arg)
6488 return -EINVAL;
6489 if (!capable(CAP_SYS_RAWIO))
6490 return -EPERM;
6491 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6492 return -EFAULT;
6493 if (iocommand.buf_size < 1 &&
6494 iocommand.Request.Type.Direction != XFER_NONE)
6495 return -EINVAL;
6496 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6497 return -EINVAL;
6498 if (iocommand.Request.Type.Type != TYPE_CMD)
6499 return -EINVAL;
6500
6501 switch (iocommand.Request.Type.Direction) {
6502 case XFER_NONE:
6503 case XFER_WRITE:
6504 case XFER_READ:
41555d54 6505 case XFER_READ | XFER_WRITE:
6c223761
KB
6506 break;
6507 default:
6508 return -EINVAL;
6509 }
6510
6511 if (iocommand.buf_size > 0) {
6512 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6513 if (!kernel_buffer)
6514 return -ENOMEM;
6515 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6516 if (copy_from_user(kernel_buffer, iocommand.buf,
6517 iocommand.buf_size)) {
6518 rc = -EFAULT;
6519 goto out;
6520 }
6521 } else {
6522 memset(kernel_buffer, 0, iocommand.buf_size);
6523 }
6524 }
6525
6526 memset(&request, 0, sizeof(request));
6527
6528 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6529 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6530 PQI_REQUEST_HEADER_LENGTH;
6531 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6532 sizeof(request.lun_number));
6533 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6534 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6535
6536 switch (iocommand.Request.Type.Direction) {
6537 case XFER_NONE:
6538 request.data_direction = SOP_NO_DIRECTION_FLAG;
6539 break;
6540 case XFER_WRITE:
6541 request.data_direction = SOP_WRITE_FLAG;
6542 break;
6543 case XFER_READ:
6544 request.data_direction = SOP_READ_FLAG;
6545 break;
41555d54
KB
6546 case XFER_READ | XFER_WRITE:
6547 request.data_direction = SOP_BIDIRECTIONAL;
6548 break;
6c223761
KB
6549 }
6550
6551 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6552
6553 if (iocommand.buf_size > 0) {
6554 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6555
6556 rc = pqi_map_single(ctrl_info->pci_dev,
6557 &request.sg_descriptors[0], kernel_buffer,
6917a9cc 6558 iocommand.buf_size, DMA_BIDIRECTIONAL);
6c223761
KB
6559 if (rc)
6560 goto out;
6561
6562 iu_length += sizeof(request.sg_descriptors[0]);
6563 }
6564
6565 put_unaligned_le16(iu_length, &request.header.iu_length);
6566
21432010 6567 if (ctrl_info->raid_iu_timeout_supported)
6568 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6569
6c223761 6570 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
ae0c189d 6571 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6c223761
KB
6572
6573 if (iocommand.buf_size > 0)
6574 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6917a9cc 6575 DMA_BIDIRECTIONAL);
6c223761
KB
6576
6577 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6578
6579 if (rc == 0) {
6580 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6581 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6582 iocommand.error_info.CommandStatus =
6583 ciss_error_info.command_status;
6584 sense_data_length = ciss_error_info.sense_data_length;
6585 if (sense_data_length) {
6586 if (sense_data_length >
6587 sizeof(iocommand.error_info.SenseInfo))
6588 sense_data_length =
6589 sizeof(iocommand.error_info.SenseInfo);
6590 memcpy(iocommand.error_info.SenseInfo,
6591 pqi_error_info.data, sense_data_length);
6592 iocommand.error_info.SenseLen = sense_data_length;
6593 }
6594 }
6595
6596 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6597 rc = -EFAULT;
6598 goto out;
6599 }
6600
6601 if (rc == 0 && iocommand.buf_size > 0 &&
6602 (iocommand.Request.Type.Direction & XFER_READ)) {
6603 if (copy_to_user(iocommand.buf, kernel_buffer,
6604 iocommand.buf_size)) {
6605 rc = -EFAULT;
6606 }
6607 }
6608
6609out:
6610 kfree(kernel_buffer);
6611
6612 return rc;
6613}
6614
6f4e626f
NC
6615static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6616 void __user *arg)
6c223761
KB
6617{
6618 int rc;
6619 struct pqi_ctrl_info *ctrl_info;
6620
6621 ctrl_info = shost_to_hba(sdev->host);
6622
6623 switch (cmd) {
6624 case CCISS_DEREGDISK:
6625 case CCISS_REGNEWDISK:
6626 case CCISS_REGNEWD:
6627 rc = pqi_scan_scsi_devices(ctrl_info);
6628 break;
6629 case CCISS_GETPCIINFO:
6630 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6631 break;
6632 case CCISS_GETDRIVVER:
6633 rc = pqi_getdrivver_ioctl(arg);
6634 break;
6635 case CCISS_PASSTHRU:
6636 rc = pqi_passthru_ioctl(ctrl_info, arg);
6637 break;
6638 default:
6639 rc = -EINVAL;
6640 break;
6641 }
6642
6643 return rc;
6644}
6645
6d90615f 6646static ssize_t pqi_firmware_version_show(struct device *dev,
6c223761
KB
6647 struct device_attribute *attr, char *buffer)
6648{
6c223761
KB
6649 struct Scsi_Host *shost;
6650 struct pqi_ctrl_info *ctrl_info;
6651
6652 shost = class_to_shost(dev);
6653 ctrl_info = shost_to_hba(shost);
6654
a4256252 6655 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6d90615f
MB
6656}
6657
6658static ssize_t pqi_driver_version_show(struct device *dev,
6659 struct device_attribute *attr, char *buffer)
6660{
a4256252 6661 return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6d90615f 6662}
6c223761 6663
6d90615f
MB
6664static ssize_t pqi_serial_number_show(struct device *dev,
6665 struct device_attribute *attr, char *buffer)
6666{
6667 struct Scsi_Host *shost;
6668 struct pqi_ctrl_info *ctrl_info;
6669
6670 shost = class_to_shost(dev);
6671 ctrl_info = shost_to_hba(shost);
6672
a4256252 6673 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6d90615f
MB
6674}
6675
6676static ssize_t pqi_model_show(struct device *dev,
6677 struct device_attribute *attr, char *buffer)
6678{
6679 struct Scsi_Host *shost;
6680 struct pqi_ctrl_info *ctrl_info;
6681
6682 shost = class_to_shost(dev);
6683 ctrl_info = shost_to_hba(shost);
6684
a4256252 6685 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6d90615f
MB
6686}
6687
6688static ssize_t pqi_vendor_show(struct device *dev,
6689 struct device_attribute *attr, char *buffer)
6690{
6691 struct Scsi_Host *shost;
6692 struct pqi_ctrl_info *ctrl_info;
6693
6694 shost = class_to_shost(dev);
6695 ctrl_info = shost_to_hba(shost);
6696
a4256252 6697 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6c223761
KB
6698}
6699
6700static ssize_t pqi_host_rescan_store(struct device *dev,
6701 struct device_attribute *attr, const char *buffer, size_t count)
6702{
6703 struct Scsi_Host *shost = class_to_shost(dev);
6704
6705 pqi_scan_start(shost);
6706
6707 return count;
6708}
6709
3c50976f
KB
6710static ssize_t pqi_lockup_action_show(struct device *dev,
6711 struct device_attribute *attr, char *buffer)
6712{
6713 int count = 0;
6714 unsigned int i;
6715
6716 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6717 if (pqi_lockup_actions[i].action == pqi_lockup_action)
181aea89 6718 count += scnprintf(buffer + count, PAGE_SIZE - count,
3c50976f
KB
6719 "[%s] ", pqi_lockup_actions[i].name);
6720 else
181aea89 6721 count += scnprintf(buffer + count, PAGE_SIZE - count,
3c50976f
KB
6722 "%s ", pqi_lockup_actions[i].name);
6723 }
6724
181aea89 6725 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
3c50976f
KB
6726
6727 return count;
6728}
6729
6730static ssize_t pqi_lockup_action_store(struct device *dev,
6731 struct device_attribute *attr, const char *buffer, size_t count)
6732{
6733 unsigned int i;
6734 char *action_name;
6735 char action_name_buffer[32];
6736
6737 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6738 action_name = strstrip(action_name_buffer);
6739
6740 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6741 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6742 pqi_lockup_action = pqi_lockup_actions[i].action;
6743 return count;
6744 }
6745 }
6746
6747 return -EINVAL;
6748}
6749
5be746d7
DB
6750static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6751 struct device_attribute *attr, char *buffer)
6752{
6753 struct Scsi_Host *shost = class_to_shost(dev);
6754 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6755
6756 return scnprintf(buffer, 10, "%x\n",
6757 ctrl_info->enable_stream_detection);
6758}
6759
6760static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6761 struct device_attribute *attr, const char *buffer, size_t count)
6762{
6763 struct Scsi_Host *shost = class_to_shost(dev);
6764 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6765 u8 set_stream_detection = 0;
6766
6767 if (kstrtou8(buffer, 0, &set_stream_detection))
6768 return -EINVAL;
6769
6770 if (set_stream_detection > 0)
6771 set_stream_detection = 1;
6772
6773 ctrl_info->enable_stream_detection = set_stream_detection;
6774
6775 return count;
6776}
6777
6702d2c4
DB
6778static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6779 struct device_attribute *attr, char *buffer)
6780{
6781 struct Scsi_Host *shost = class_to_shost(dev);
6782 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6783
6784 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6785}
6786
6787static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6788 struct device_attribute *attr, const char *buffer, size_t count)
6789{
6790 struct Scsi_Host *shost = class_to_shost(dev);
6791 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6792 u8 set_r5_writes = 0;
6793
6794 if (kstrtou8(buffer, 0, &set_r5_writes))
6795 return -EINVAL;
6796
6797 if (set_r5_writes > 0)
6798 set_r5_writes = 1;
6799
6800 ctrl_info->enable_r5_writes = set_r5_writes;
6801
6802 return count;
6803}
6804
6805static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6806 struct device_attribute *attr, char *buffer)
6807{
6808 struct Scsi_Host *shost = class_to_shost(dev);
6809 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6810
6811 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6812}
6813
6814static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6815 struct device_attribute *attr, const char *buffer, size_t count)
6816{
6817 struct Scsi_Host *shost = class_to_shost(dev);
6818 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6819 u8 set_r6_writes = 0;
6820
6821 if (kstrtou8(buffer, 0, &set_r6_writes))
6822 return -EINVAL;
6823
6824 if (set_r6_writes > 0)
6825 set_r6_writes = 1;
6826
6827 ctrl_info->enable_r6_writes = set_r6_writes;
6828
6829 return count;
6830}
6831
6d90615f
MB
6832static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6833static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6834static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6835static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6836static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
cbe0c7b1 6837static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
583891c9
KB
6838static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6839 pqi_lockup_action_store);
5be746d7
DB
6840static DEVICE_ATTR(enable_stream_detection, 0644,
6841 pqi_host_enable_stream_detection_show,
6842 pqi_host_enable_stream_detection_store);
6702d2c4
DB
6843static DEVICE_ATTR(enable_r5_writes, 0644,
6844 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6845static DEVICE_ATTR(enable_r6_writes, 0644,
6846 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6c223761 6847
64fc9015
BVA
6848static struct attribute *pqi_shost_attrs[] = {
6849 &dev_attr_driver_version.attr,
6850 &dev_attr_firmware_version.attr,
6851 &dev_attr_model.attr,
6852 &dev_attr_serial_number.attr,
6853 &dev_attr_vendor.attr,
6854 &dev_attr_rescan.attr,
6855 &dev_attr_lockup_action.attr,
6856 &dev_attr_enable_stream_detection.attr,
6857 &dev_attr_enable_r5_writes.attr,
6858 &dev_attr_enable_r6_writes.attr,
6c223761
KB
6859 NULL
6860};
6861
64fc9015
BVA
6862ATTRIBUTE_GROUPS(pqi_shost);
6863
cd128244
DC
6864static ssize_t pqi_unique_id_show(struct device *dev,
6865 struct device_attribute *attr, char *buffer)
6866{
6867 struct pqi_ctrl_info *ctrl_info;
6868 struct scsi_device *sdev;
6869 struct pqi_scsi_dev *device;
6870 unsigned long flags;
5b083b30 6871 u8 unique_id[16];
cd128244
DC
6872
6873 sdev = to_scsi_device(dev);
6874 ctrl_info = shost_to_hba(sdev->host);
6875
6876 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6877
6878 device = sdev->hostdata;
6879 if (!device) {
8b664fef 6880 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
6881 return -ENODEV;
6882 }
5b083b30 6883
28ca6d87
MM
6884 if (device->is_physical_device)
6885 memcpy(unique_id, device->wwid, sizeof(device->wwid));
6886 else
5b083b30 6887 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
cd128244
DC
6888
6889 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6890
a4256252 6891 return scnprintf(buffer, PAGE_SIZE,
583891c9
KB
6892 "%02X%02X%02X%02X%02X%02X%02X%02X"
6893 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
5b083b30
KB
6894 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6895 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6896 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6897 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
cd128244
DC
6898}
6899
6900static ssize_t pqi_lunid_show(struct device *dev,
6901 struct device_attribute *attr, char *buffer)
6902{
6903 struct pqi_ctrl_info *ctrl_info;
6904 struct scsi_device *sdev;
6905 struct pqi_scsi_dev *device;
6906 unsigned long flags;
6907 u8 lunid[8];
6908
6909 sdev = to_scsi_device(dev);
6910 ctrl_info = shost_to_hba(sdev->host);
6911
6912 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6913
6914 device = sdev->hostdata;
6915 if (!device) {
8b664fef 6916 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
6917 return -ENODEV;
6918 }
694c5d5b 6919
cd128244
DC
6920 memcpy(lunid, device->scsi3addr, sizeof(lunid));
6921
6922 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6923
a4256252 6924 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
cd128244
DC
6925}
6926
694c5d5b
KB
6927#define MAX_PATHS 8
6928
cd128244
DC
6929static ssize_t pqi_path_info_show(struct device *dev,
6930 struct device_attribute *attr, char *buf)
6931{
6932 struct pqi_ctrl_info *ctrl_info;
6933 struct scsi_device *sdev;
6934 struct pqi_scsi_dev *device;
6935 unsigned long flags;
6936 int i;
6937 int output_len = 0;
6938 u8 box;
6939 u8 bay;
694c5d5b 6940 u8 path_map_index;
cd128244 6941 char *active;
694c5d5b 6942 u8 phys_connector[2];
cd128244
DC
6943
6944 sdev = to_scsi_device(dev);
6945 ctrl_info = shost_to_hba(sdev->host);
6946
6947 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6948
6949 device = sdev->hostdata;
6950 if (!device) {
8b664fef 6951 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
6952 return -ENODEV;
6953 }
6954
6955 bay = device->bay;
6956 for (i = 0; i < MAX_PATHS; i++) {
694c5d5b 6957 path_map_index = 1 << i;
cd128244
DC
6958 if (i == device->active_path_index)
6959 active = "Active";
6960 else if (device->path_map & path_map_index)
6961 active = "Inactive";
6962 else
6963 continue;
6964
6965 output_len += scnprintf(buf + output_len,
6966 PAGE_SIZE - output_len,
6967 "[%d:%d:%d:%d] %20.20s ",
6968 ctrl_info->scsi_host->host_no,
6969 device->bus, device->target,
6970 device->lun,
6971 scsi_device_type(device->devtype));
6972
6973 if (device->devtype == TYPE_RAID ||
6974 pqi_is_logical_device(device))
6975 goto end_buffer;
6976
6977 memcpy(&phys_connector, &device->phys_connector[i],
6978 sizeof(phys_connector));
6979 if (phys_connector[0] < '0')
6980 phys_connector[0] = '0';
6981 if (phys_connector[1] < '0')
6982 phys_connector[1] = '0';
6983
6984 output_len += scnprintf(buf + output_len,
6985 PAGE_SIZE - output_len,
6986 "PORT: %.2s ", phys_connector);
6987
6988 box = device->box[i];
6989 if (box != 0 && box != 0xFF)
6990 output_len += scnprintf(buf + output_len,
6991 PAGE_SIZE - output_len,
6992 "BOX: %hhu ", box);
6993
6994 if ((device->devtype == TYPE_DISK ||
6995 device->devtype == TYPE_ZBC) &&
6996 pqi_expose_device(device))
6997 output_len += scnprintf(buf + output_len,
6998 PAGE_SIZE - output_len,
6999 "BAY: %hhu ", bay);
7000
7001end_buffer:
7002 output_len += scnprintf(buf + output_len,
7003 PAGE_SIZE - output_len,
7004 "%s\n", active);
7005 }
7006
7007 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
694c5d5b 7008
cd128244
DC
7009 return output_len;
7010}
7011
6c223761
KB
7012static ssize_t pqi_sas_address_show(struct device *dev,
7013 struct device_attribute *attr, char *buffer)
7014{
7015 struct pqi_ctrl_info *ctrl_info;
7016 struct scsi_device *sdev;
7017 struct pqi_scsi_dev *device;
7018 unsigned long flags;
7019 u64 sas_address;
7020
7021 sdev = to_scsi_device(dev);
7022 ctrl_info = shost_to_hba(sdev->host);
7023
7024 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7025
7026 device = sdev->hostdata;
8b664fef
KB
7027 if (!device || !pqi_is_device_with_sas_address(device)) {
7028 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6c223761
KB
7029 return -ENODEV;
7030 }
694c5d5b 7031
6c223761
KB
7032 sas_address = device->sas_address;
7033
7034 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7035
a4256252 7036 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6c223761
KB
7037}
7038
7039static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7040 struct device_attribute *attr, char *buffer)
7041{
7042 struct pqi_ctrl_info *ctrl_info;
7043 struct scsi_device *sdev;
7044 struct pqi_scsi_dev *device;
7045 unsigned long flags;
7046
7047 sdev = to_scsi_device(dev);
7048 ctrl_info = shost_to_hba(sdev->host);
7049
7050 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7051
7052 device = sdev->hostdata;
8b664fef
KB
7053 if (!device) {
7054 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7055 return -ENODEV;
7056 }
7057
588a63fe 7058 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6c223761
KB
7059 buffer[1] = '\n';
7060 buffer[2] = '\0';
7061
7062 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7063
7064 return 2;
7065}
7066
a9f93392
KB
7067static ssize_t pqi_raid_level_show(struct device *dev,
7068 struct device_attribute *attr, char *buffer)
7069{
7070 struct pqi_ctrl_info *ctrl_info;
7071 struct scsi_device *sdev;
7072 struct pqi_scsi_dev *device;
7073 unsigned long flags;
7074 char *raid_level;
7075
7076 sdev = to_scsi_device(dev);
7077 ctrl_info = shost_to_hba(sdev->host);
7078
7079 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7080
7081 device = sdev->hostdata;
8b664fef
KB
7082 if (!device) {
7083 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7084 return -ENODEV;
7085 }
a9f93392
KB
7086
7087 if (pqi_is_logical_device(device))
7088 raid_level = pqi_raid_level_to_string(device->raid_level);
7089 else
7090 raid_level = "N/A";
7091
7092 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7093
a4256252 7094 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
a9f93392
KB
7095}
7096
8b664fef
KB
7097static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7098 struct device_attribute *attr, char *buffer)
7099{
7100 struct pqi_ctrl_info *ctrl_info;
7101 struct scsi_device *sdev;
7102 struct pqi_scsi_dev *device;
7103 unsigned long flags;
7104 int raid_bypass_cnt;
7105
7106 sdev = to_scsi_device(dev);
7107 ctrl_info = shost_to_hba(sdev->host);
7108
7109 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7110
7111 device = sdev->hostdata;
7112 if (!device) {
7113 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7114 return -ENODEV;
7115 }
7116
7117 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
7118
7119 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7120
a4256252 7121 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
8b664fef
KB
7122}
7123
cd128244
DC
7124static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7125static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7126static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
cbe0c7b1 7127static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
8b664fef 7128static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
a9f93392 7129static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
8b664fef 7130static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
6c223761 7131
64fc9015
BVA
7132static struct attribute *pqi_sdev_attrs[] = {
7133 &dev_attr_lunid.attr,
7134 &dev_attr_unique_id.attr,
7135 &dev_attr_path_info.attr,
7136 &dev_attr_sas_address.attr,
7137 &dev_attr_ssd_smart_path_enabled.attr,
7138 &dev_attr_raid_level.attr,
7139 &dev_attr_raid_bypass_cnt.attr,
6c223761
KB
7140 NULL
7141};
7142
64fc9015
BVA
7143ATTRIBUTE_GROUPS(pqi_sdev);
7144
6c223761
KB
7145static struct scsi_host_template pqi_driver_template = {
7146 .module = THIS_MODULE,
7147 .name = DRIVER_NAME_SHORT,
7148 .proc_name = DRIVER_NAME_SHORT,
7149 .queuecommand = pqi_scsi_queue_command,
7150 .scan_start = pqi_scan_start,
7151 .scan_finished = pqi_scan_finished,
7152 .this_id = -1,
6c223761
KB
7153 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7154 .ioctl = pqi_ioctl,
7155 .slave_alloc = pqi_slave_alloc,
ce143793 7156 .slave_configure = pqi_slave_configure,
52198226 7157 .map_queues = pqi_map_queues,
64fc9015
BVA
7158 .sdev_groups = pqi_sdev_groups,
7159 .shost_groups = pqi_shost_groups,
6c223761
KB
7160};
7161
7162static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7163{
7164 int rc;
7165 struct Scsi_Host *shost;
7166
7167 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7168 if (!shost) {
583891c9 7169 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
6c223761
KB
7170 return -ENOMEM;
7171 }
7172
7173 shost->io_port = 0;
7174 shost->n_io_port = 0;
7175 shost->this_id = -1;
7176 shost->max_channel = PQI_MAX_BUS;
7177 shost->max_cmd_len = MAX_COMMAND_SIZE;
7178 shost->max_lun = ~0;
7179 shost->max_id = ~0;
7180 shost->max_sectors = ctrl_info->max_sectors;
7181 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7182 shost->cmd_per_lun = shost->can_queue;
7183 shost->sg_tablesize = ctrl_info->sg_tablesize;
7184 shost->transportt = pqi_sas_transport_template;
52198226 7185 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
7186 shost->unique_id = shost->irq;
7187 shost->nr_hw_queues = ctrl_info->num_queue_groups;
c6d3ee20 7188 shost->host_tagset = 1;
6c223761
KB
7189 shost->hostdata[0] = (unsigned long)ctrl_info;
7190
7191 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7192 if (rc) {
583891c9 7193 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
6c223761
KB
7194 goto free_host;
7195 }
7196
7197 rc = pqi_add_sas_host(shost, ctrl_info);
7198 if (rc) {
583891c9 7199 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
6c223761
KB
7200 goto remove_host;
7201 }
7202
7203 ctrl_info->scsi_host = shost;
7204
7205 return 0;
7206
7207remove_host:
7208 scsi_remove_host(shost);
7209free_host:
7210 scsi_host_put(shost);
7211
7212 return rc;
7213}
7214
7215static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7216{
7217 struct Scsi_Host *shost;
7218
7219 pqi_delete_sas_host(ctrl_info);
7220
7221 shost = ctrl_info->scsi_host;
7222 if (!shost)
7223 return;
7224
7225 scsi_remove_host(shost);
7226 scsi_host_put(shost);
7227}
7228
336b6819
KB
7229static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7230{
7231 int rc = 0;
7232 struct pqi_device_registers __iomem *pqi_registers;
7233 unsigned long timeout;
7234 unsigned int timeout_msecs;
7235 union pqi_reset_register reset_reg;
6c223761 7236
336b6819
KB
7237 pqi_registers = ctrl_info->pqi_registers;
7238 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7239 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7240
7241 while (1) {
7242 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7243 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7244 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7245 break;
7246 pqi_check_ctrl_health(ctrl_info);
7247 if (pqi_ctrl_offline(ctrl_info)) {
7248 rc = -ENXIO;
7249 break;
7250 }
7251 if (time_after(jiffies, timeout)) {
7252 rc = -ETIMEDOUT;
7253 break;
7254 }
7255 }
7256
7257 return rc;
7258}
6c223761
KB
7259
7260static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7261{
7262 int rc;
336b6819
KB
7263 union pqi_reset_register reset_reg;
7264
7265 if (ctrl_info->pqi_reset_quiesce_supported) {
7266 rc = sis_pqi_reset_quiesce(ctrl_info);
7267 if (rc) {
7268 dev_err(&ctrl_info->pci_dev->dev,
583891c9 7269 "PQI reset failed during quiesce with error %d\n", rc);
336b6819
KB
7270 return rc;
7271 }
7272 }
6c223761 7273
336b6819
KB
7274 reset_reg.all_bits = 0;
7275 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7276 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6c223761 7277
336b6819 7278 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6c223761 7279
336b6819 7280 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6c223761
KB
7281 if (rc)
7282 dev_err(&ctrl_info->pci_dev->dev,
336b6819 7283 "PQI reset failed with error %d\n", rc);
6c223761
KB
7284
7285 return rc;
7286}
7287
6d90615f
MB
7288static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7289{
7290 int rc;
7291 struct bmic_sense_subsystem_info *sense_info;
7292
7293 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7294 if (!sense_info)
7295 return -ENOMEM;
7296
7297 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7298 if (rc)
7299 goto out;
7300
7301 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7302 sizeof(sense_info->ctrl_serial_number));
7303 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7304
7305out:
7306 kfree(sense_info);
7307
7308 return rc;
7309}
7310
7311static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6c223761
KB
7312{
7313 int rc;
7314 struct bmic_identify_controller *identify;
7315
7316 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7317 if (!identify)
7318 return -ENOMEM;
7319
7320 rc = pqi_identify_controller(ctrl_info, identify);
7321 if (rc)
7322 goto out;
7323
598bef8d
KB
7324 if (get_unaligned_le32(&identify->extra_controller_flags) &
7325 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7326 memcpy(ctrl_info->firmware_version,
7327 identify->firmware_version_long,
7328 sizeof(identify->firmware_version_long));
7329 } else {
7330 memcpy(ctrl_info->firmware_version,
7331 identify->firmware_version_short,
7332 sizeof(identify->firmware_version_short));
7333 ctrl_info->firmware_version
7334 [sizeof(identify->firmware_version_short)] = '\0';
7335 snprintf(ctrl_info->firmware_version +
7336 strlen(ctrl_info->firmware_version),
7337 sizeof(ctrl_info->firmware_version) -
7338 sizeof(identify->firmware_version_short),
7339 "-%u",
7340 get_unaligned_le16(&identify->firmware_build_number));
7341 }
6c223761 7342
6d90615f
MB
7343 memcpy(ctrl_info->model, identify->product_id,
7344 sizeof(identify->product_id));
7345 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7346
7347 memcpy(ctrl_info->vendor, identify->vendor_id,
7348 sizeof(identify->vendor_id));
7349 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7350
6c223761
KB
7351out:
7352 kfree(identify);
7353
7354 return rc;
7355}
7356
b212c251
KB
7357struct pqi_config_table_section_info {
7358 struct pqi_ctrl_info *ctrl_info;
7359 void *section;
7360 u32 section_offset;
7361 void __iomem *section_iomem_addr;
7362};
7363
7364static inline bool pqi_is_firmware_feature_supported(
7365 struct pqi_config_table_firmware_features *firmware_features,
7366 unsigned int bit_position)
98f87667 7367{
b212c251 7368 unsigned int byte_index;
98f87667 7369
b212c251 7370 byte_index = bit_position / BITS_PER_BYTE;
98f87667 7371
b212c251
KB
7372 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7373 return false;
98f87667 7374
b212c251
KB
7375 return firmware_features->features_supported[byte_index] &
7376 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7377}
7378
7379static inline bool pqi_is_firmware_feature_enabled(
7380 struct pqi_config_table_firmware_features *firmware_features,
7381 void __iomem *firmware_features_iomem_addr,
7382 unsigned int bit_position)
7383{
7384 unsigned int byte_index;
7385 u8 __iomem *features_enabled_iomem_addr;
7386
7387 byte_index = (bit_position / BITS_PER_BYTE) +
7388 (le16_to_cpu(firmware_features->num_elements) * 2);
7389
7390 features_enabled_iomem_addr = firmware_features_iomem_addr +
7391 offsetof(struct pqi_config_table_firmware_features,
7392 features_supported) + byte_index;
7393
7394 return *((__force u8 *)features_enabled_iomem_addr) &
7395 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7396}
7397
7398static inline void pqi_request_firmware_feature(
7399 struct pqi_config_table_firmware_features *firmware_features,
7400 unsigned int bit_position)
7401{
7402 unsigned int byte_index;
7403
7404 byte_index = (bit_position / BITS_PER_BYTE) +
7405 le16_to_cpu(firmware_features->num_elements);
7406
7407 firmware_features->features_supported[byte_index] |=
7408 (1 << (bit_position % BITS_PER_BYTE));
7409}
7410
7411static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7412 u16 first_section, u16 last_section)
7413{
7414 struct pqi_vendor_general_request request;
7415
7416 memset(&request, 0, sizeof(request));
7417
7418 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7419 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7420 &request.header.iu_length);
7421 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7422 &request.function_code);
7423 put_unaligned_le16(first_section,
7424 &request.data.config_table_update.first_section);
7425 put_unaligned_le16(last_section,
7426 &request.data.config_table_update.last_section);
7427
ae0c189d 7428 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
b212c251
KB
7429}
7430
7431static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7432 struct pqi_config_table_firmware_features *firmware_features,
7433 void __iomem *firmware_features_iomem_addr)
7434{
7435 void *features_requested;
7436 void __iomem *features_requested_iomem_addr;
f6cc2a77 7437 void __iomem *host_max_known_feature_iomem_addr;
b212c251
KB
7438
7439 features_requested = firmware_features->features_supported +
7440 le16_to_cpu(firmware_features->num_elements);
7441
7442 features_requested_iomem_addr = firmware_features_iomem_addr +
7443 (features_requested - (void *)firmware_features);
7444
7445 memcpy_toio(features_requested_iomem_addr, features_requested,
7446 le16_to_cpu(firmware_features->num_elements));
7447
f6cc2a77
KB
7448 if (pqi_is_firmware_feature_supported(firmware_features,
7449 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7450 host_max_known_feature_iomem_addr =
7451 features_requested_iomem_addr +
7452 (le16_to_cpu(firmware_features->num_elements) * 2) +
7453 sizeof(__le16);
7454 writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7455 host_max_known_feature_iomem_addr);
7456 }
7457
b212c251
KB
7458 return pqi_config_table_update(ctrl_info,
7459 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7460 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7461}
7462
7463struct pqi_firmware_feature {
7464 char *feature_name;
7465 unsigned int feature_bit;
7466 bool supported;
7467 bool enabled;
7468 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7469 struct pqi_firmware_feature *firmware_feature);
7470};
7471
7472static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7473 struct pqi_firmware_feature *firmware_feature)
7474{
7475 if (!firmware_feature->supported) {
7476 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7477 firmware_feature->feature_name);
7478 return;
7479 }
7480
7481 if (firmware_feature->enabled) {
7482 dev_info(&ctrl_info->pci_dev->dev,
7483 "%s enabled\n", firmware_feature->feature_name);
7484 return;
7485 }
7486
7487 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7488 firmware_feature->feature_name);
7489}
7490
21432010 7491static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7492 struct pqi_firmware_feature *firmware_feature)
7493{
7494 switch (firmware_feature->feature_bit) {
f6cc2a77
KB
7495 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7496 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7497 break;
7498 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7499 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7500 break;
7501 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7502 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7503 break;
21432010 7504 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7505 ctrl_info->soft_reset_handshake_supported =
4ccc354b
KB
7506 firmware_feature->enabled &&
7507 pqi_read_soft_reset_status(ctrl_info);
21432010 7508 break;
7509 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
583891c9 7510 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
21432010 7511 break;
c2922f17 7512 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
583891c9 7513 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
c2922f17 7514 break;
7a84a821
KB
7515 case PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN:
7516 ctrl_info->unique_wwid_in_report_phys_lun_supported =
c2922f17
MB
7517 firmware_feature->enabled;
7518 break;
5d1f03e6
MB
7519 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7520 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
9ee5d6e9 7521 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
5d1f03e6 7522 break;
28ca6d87
MM
7523 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7524 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7525 break;
21432010 7526 }
7527
7528 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7529}
7530
b212c251
KB
7531static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7532 struct pqi_firmware_feature *firmware_feature)
7533{
7534 if (firmware_feature->feature_status)
7535 firmware_feature->feature_status(ctrl_info, firmware_feature);
7536}
7537
7538static DEFINE_MUTEX(pqi_firmware_features_mutex);
7539
7540static struct pqi_firmware_feature pqi_firmware_features[] = {
7541 {
7542 .feature_name = "Online Firmware Activation",
7543 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7544 .feature_status = pqi_firmware_feature_status,
7545 },
7546 {
7547 .feature_name = "Serial Management Protocol",
7548 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7549 .feature_status = pqi_firmware_feature_status,
7550 },
f6cc2a77
KB
7551 {
7552 .feature_name = "Maximum Known Feature",
7553 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7554 .feature_status = pqi_firmware_feature_status,
7555 },
7556 {
7557 .feature_name = "RAID 0 Read Bypass",
7558 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
b212c251
KB
7559 .feature_status = pqi_firmware_feature_status,
7560 },
7561 {
f6cc2a77
KB
7562 .feature_name = "RAID 1 Read Bypass",
7563 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7564 .feature_status = pqi_firmware_feature_status,
7565 },
7566 {
7567 .feature_name = "RAID 5 Read Bypass",
7568 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
b212c251
KB
7569 .feature_status = pqi_firmware_feature_status,
7570 },
f6cc2a77
KB
7571 {
7572 .feature_name = "RAID 6 Read Bypass",
7573 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7574 .feature_status = pqi_firmware_feature_status,
7575 },
7576 {
7577 .feature_name = "RAID 0 Write Bypass",
7578 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7579 .feature_status = pqi_firmware_feature_status,
7580 },
7581 {
7582 .feature_name = "RAID 1 Write Bypass",
7583 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7584 .feature_status = pqi_ctrl_update_feature_flags,
7585 },
7586 {
7587 .feature_name = "RAID 5 Write Bypass",
7588 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7589 .feature_status = pqi_ctrl_update_feature_flags,
7590 },
7591 {
7592 .feature_name = "RAID 6 Write Bypass",
7593 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7594 .feature_status = pqi_ctrl_update_feature_flags,
7595 },
4fd22c13
MR
7596 {
7597 .feature_name = "New Soft Reset Handshake",
7598 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
21432010 7599 .feature_status = pqi_ctrl_update_feature_flags,
7600 },
7601 {
7602 .feature_name = "RAID IU Timeout",
7603 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7604 .feature_status = pqi_ctrl_update_feature_flags,
4fd22c13 7605 },
c2922f17
MB
7606 {
7607 .feature_name = "TMF IU Timeout",
7608 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7609 .feature_status = pqi_ctrl_update_feature_flags,
7610 },
f6cc2a77
KB
7611 {
7612 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7613 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7614 .feature_status = pqi_firmware_feature_status,
7615 },
7a84a821
KB
7616 {
7617 .feature_name = "Unique WWID in Report Physical LUN",
7618 .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_WWID_IN_REPORT_PHYS_LUN,
7619 .feature_status = pqi_ctrl_update_feature_flags,
7620 },
5d1f03e6
MB
7621 {
7622 .feature_name = "Firmware Triage",
7623 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7624 .feature_status = pqi_ctrl_update_feature_flags,
7625 },
28ca6d87
MM
7626 {
7627 .feature_name = "RPL Extended Formats 4 and 5",
7628 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7629 .feature_status = pqi_ctrl_update_feature_flags,
7630 },
b212c251
KB
7631};
7632
7633static void pqi_process_firmware_features(
7634 struct pqi_config_table_section_info *section_info)
7635{
7636 int rc;
7637 struct pqi_ctrl_info *ctrl_info;
7638 struct pqi_config_table_firmware_features *firmware_features;
7639 void __iomem *firmware_features_iomem_addr;
7640 unsigned int i;
7641 unsigned int num_features_supported;
7642
7643 ctrl_info = section_info->ctrl_info;
7644 firmware_features = section_info->section;
7645 firmware_features_iomem_addr = section_info->section_iomem_addr;
7646
7647 for (i = 0, num_features_supported = 0;
7648 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7649 if (pqi_is_firmware_feature_supported(firmware_features,
7650 pqi_firmware_features[i].feature_bit)) {
7651 pqi_firmware_features[i].supported = true;
7652 num_features_supported++;
7653 } else {
7654 pqi_firmware_feature_update(ctrl_info,
7655 &pqi_firmware_features[i]);
7656 }
7657 }
7658
7659 if (num_features_supported == 0)
7660 return;
7661
7662 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7663 if (!pqi_firmware_features[i].supported)
7664 continue;
7665 pqi_request_firmware_feature(firmware_features,
7666 pqi_firmware_features[i].feature_bit);
7667 }
7668
7669 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7670 firmware_features_iomem_addr);
7671 if (rc) {
7672 dev_err(&ctrl_info->pci_dev->dev,
7673 "failed to enable firmware features in PQI configuration table\n");
7674 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7675 if (!pqi_firmware_features[i].supported)
7676 continue;
7677 pqi_firmware_feature_update(ctrl_info,
7678 &pqi_firmware_features[i]);
7679 }
7680 return;
7681 }
7682
7683 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7684 if (!pqi_firmware_features[i].supported)
7685 continue;
7686 if (pqi_is_firmware_feature_enabled(firmware_features,
7687 firmware_features_iomem_addr,
4fd22c13 7688 pqi_firmware_features[i].feature_bit)) {
583891c9 7689 pqi_firmware_features[i].enabled = true;
4fd22c13 7690 }
b212c251
KB
7691 pqi_firmware_feature_update(ctrl_info,
7692 &pqi_firmware_features[i]);
7693 }
7694}
7695
7696static void pqi_init_firmware_features(void)
7697{
7698 unsigned int i;
7699
7700 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7701 pqi_firmware_features[i].supported = false;
7702 pqi_firmware_features[i].enabled = false;
7703 }
7704}
7705
7706static void pqi_process_firmware_features_section(
7707 struct pqi_config_table_section_info *section_info)
7708{
7709 mutex_lock(&pqi_firmware_features_mutex);
7710 pqi_init_firmware_features();
7711 pqi_process_firmware_features(section_info);
7712 mutex_unlock(&pqi_firmware_features_mutex);
7713}
7714
f6cc2a77
KB
7715/*
7716 * Reset all controller settings that can be initialized during the processing
7717 * of the PQI Configuration Table.
7718 */
7719
4ccc354b
KB
7720static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7721{
7722 ctrl_info->heartbeat_counter = NULL;
7723 ctrl_info->soft_reset_status = NULL;
7724 ctrl_info->soft_reset_handshake_supported = false;
7725 ctrl_info->enable_r1_writes = false;
7726 ctrl_info->enable_r5_writes = false;
7727 ctrl_info->enable_r6_writes = false;
7728 ctrl_info->raid_iu_timeout_supported = false;
7729 ctrl_info->tmf_iu_timeout_supported = false;
7730 ctrl_info->unique_wwid_in_report_phys_lun_supported = false;
5d1f03e6 7731 ctrl_info->firmware_triage_supported = false;
28ca6d87 7732 ctrl_info->rpl_extended_format_4_5_supported = false;
4ccc354b
KB
7733}
7734
98f87667
KB
7735static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7736{
7737 u32 table_length;
7738 u32 section_offset;
f6cc2a77 7739 bool firmware_feature_section_present;
98f87667
KB
7740 void __iomem *table_iomem_addr;
7741 struct pqi_config_table *config_table;
7742 struct pqi_config_table_section_header *section;
b212c251 7743 struct pqi_config_table_section_info section_info;
f6cc2a77 7744 struct pqi_config_table_section_info feature_section_info;
98f87667
KB
7745
7746 table_length = ctrl_info->config_table_length;
b212c251
KB
7747 if (table_length == 0)
7748 return 0;
98f87667
KB
7749
7750 config_table = kmalloc(table_length, GFP_KERNEL);
7751 if (!config_table) {
7752 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 7753 "failed to allocate memory for PQI configuration table\n");
98f87667
KB
7754 return -ENOMEM;
7755 }
7756
7757 /*
7758 * Copy the config table contents from I/O memory space into the
7759 * temporary buffer.
7760 */
583891c9 7761 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
98f87667
KB
7762 memcpy_fromio(config_table, table_iomem_addr, table_length);
7763
f6cc2a77 7764 firmware_feature_section_present = false;
b212c251 7765 section_info.ctrl_info = ctrl_info;
583891c9 7766 section_offset = get_unaligned_le32(&config_table->first_section_offset);
98f87667
KB
7767
7768 while (section_offset) {
7769 section = (void *)config_table + section_offset;
7770
b212c251
KB
7771 section_info.section = section;
7772 section_info.section_offset = section_offset;
583891c9 7773 section_info.section_iomem_addr = table_iomem_addr + section_offset;
b212c251 7774
98f87667 7775 switch (get_unaligned_le16(&section->section_id)) {
b212c251 7776 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
f6cc2a77
KB
7777 firmware_feature_section_present = true;
7778 feature_section_info = section_info;
b212c251 7779 break;
98f87667 7780 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5a259e32
KB
7781 if (pqi_disable_heartbeat)
7782 dev_warn(&ctrl_info->pci_dev->dev,
7783 "heartbeat disabled by module parameter\n");
7784 else
7785 ctrl_info->heartbeat_counter =
7786 table_iomem_addr +
7787 section_offset +
583891c9 7788 offsetof(struct pqi_config_table_heartbeat,
5a259e32 7789 heartbeat_counter);
98f87667 7790 break;
4fd22c13
MR
7791 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7792 ctrl_info->soft_reset_status =
7793 table_iomem_addr +
7794 section_offset +
7795 offsetof(struct pqi_config_table_soft_reset,
583891c9 7796 soft_reset_status);
4fd22c13 7797 break;
98f87667
KB
7798 }
7799
583891c9 7800 section_offset = get_unaligned_le16(&section->next_section_offset);
98f87667
KB
7801 }
7802
f6cc2a77
KB
7803 /*
7804 * We process the firmware feature section after all other sections
7805 * have been processed so that the feature bit callbacks can take
7806 * into account the settings configured by other sections.
7807 */
7808 if (firmware_feature_section_present)
7809 pqi_process_firmware_features_section(&feature_section_info);
7810
98f87667
KB
7811 kfree(config_table);
7812
7813 return 0;
7814}
7815
162d7753
KB
7816/* Switches the controller from PQI mode back into SIS mode. */
7817
7818static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7819{
7820 int rc;
7821
061ef06a 7822 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
162d7753
KB
7823 rc = pqi_reset(ctrl_info);
7824 if (rc)
7825 return rc;
4f078e24
KB
7826 rc = sis_reenable_sis_mode(ctrl_info);
7827 if (rc) {
7828 dev_err(&ctrl_info->pci_dev->dev,
7829 "re-enabling SIS mode failed with error %d\n", rc);
7830 return rc;
7831 }
162d7753
KB
7832 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7833
7834 return 0;
7835}
7836
7837/*
7838 * If the controller isn't already in SIS mode, this function forces it into
7839 * SIS mode.
7840 */
7841
7842static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73
KB
7843{
7844 if (!sis_is_firmware_running(ctrl_info))
7845 return -ENXIO;
7846
162d7753
KB
7847 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
7848 return 0;
7849
7850 if (sis_is_kernel_up(ctrl_info)) {
7851 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
7852 return 0;
ff6abb73
KB
7853 }
7854
162d7753 7855 return pqi_revert_to_sis_mode(ctrl_info);
ff6abb73
KB
7856}
7857
6c223761
KB
7858static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
7859{
7860 int rc;
2708a256 7861 u32 product_id;
6c223761 7862
0530736e 7863 if (reset_devices) {
9ee5d6e9
MR
7864 if (pqi_is_fw_triage_supported(ctrl_info)) {
7865 rc = sis_wait_for_fw_triage_completion(ctrl_info);
7866 if (rc)
7867 return rc;
7868 }
0530736e 7869 sis_soft_reset(ctrl_info);
3268b8a8 7870 msleep(PQI_POST_RESET_DELAY_SECS * PQI_HZ);
0530736e
KB
7871 } else {
7872 rc = pqi_force_sis_mode(ctrl_info);
7873 if (rc)
7874 return rc;
7875 }
6c223761
KB
7876
7877 /*
7878 * Wait until the controller is ready to start accepting SIS
7879 * commands.
7880 */
7881 rc = sis_wait_for_ctrl_ready(ctrl_info);
8845fdfa 7882 if (rc)
6c223761 7883 return rc;
6c223761
KB
7884
7885 /*
7886 * Get the controller properties. This allows us to determine
7887 * whether or not it supports PQI mode.
7888 */
7889 rc = sis_get_ctrl_properties(ctrl_info);
7890 if (rc) {
7891 dev_err(&ctrl_info->pci_dev->dev,
7892 "error obtaining controller properties\n");
7893 return rc;
7894 }
7895
7896 rc = sis_get_pqi_capabilities(ctrl_info);
7897 if (rc) {
7898 dev_err(&ctrl_info->pci_dev->dev,
7899 "error obtaining controller capabilities\n");
7900 return rc;
7901 }
7902
2708a256
KB
7903 product_id = sis_get_product_id(ctrl_info);
7904 ctrl_info->product_id = (u8)product_id;
7905 ctrl_info->product_revision = (u8)(product_id >> 8);
7906
d727a776
KB
7907 if (reset_devices) {
7908 if (ctrl_info->max_outstanding_requests >
7909 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
583891c9 7910 ctrl_info->max_outstanding_requests =
d727a776
KB
7911 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
7912 } else {
7913 if (ctrl_info->max_outstanding_requests >
7914 PQI_MAX_OUTSTANDING_REQUESTS)
583891c9 7915 ctrl_info->max_outstanding_requests =
d727a776
KB
7916 PQI_MAX_OUTSTANDING_REQUESTS;
7917 }
6c223761
KB
7918
7919 pqi_calculate_io_resources(ctrl_info);
7920
7921 rc = pqi_alloc_error_buffer(ctrl_info);
7922 if (rc) {
7923 dev_err(&ctrl_info->pci_dev->dev,
7924 "failed to allocate PQI error buffer\n");
7925 return rc;
7926 }
7927
7928 /*
7929 * If the function we are about to call succeeds, the
7930 * controller will transition from legacy SIS mode
7931 * into PQI mode.
7932 */
7933 rc = sis_init_base_struct_addr(ctrl_info);
7934 if (rc) {
7935 dev_err(&ctrl_info->pci_dev->dev,
7936 "error initializing PQI mode\n");
7937 return rc;
7938 }
7939
7940 /* Wait for the controller to complete the SIS -> PQI transition. */
7941 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
7942 if (rc) {
7943 dev_err(&ctrl_info->pci_dev->dev,
7944 "transition to PQI mode failed\n");
7945 return rc;
7946 }
7947
7948 /* From here on, we are running in PQI mode. */
7949 ctrl_info->pqi_mode_enabled = true;
ff6abb73 7950 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761
KB
7951
7952 rc = pqi_alloc_admin_queues(ctrl_info);
7953 if (rc) {
7954 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 7955 "failed to allocate admin queues\n");
6c223761
KB
7956 return rc;
7957 }
7958
7959 rc = pqi_create_admin_queues(ctrl_info);
7960 if (rc) {
7961 dev_err(&ctrl_info->pci_dev->dev,
7962 "error creating admin queues\n");
7963 return rc;
7964 }
7965
7966 rc = pqi_report_device_capability(ctrl_info);
7967 if (rc) {
7968 dev_err(&ctrl_info->pci_dev->dev,
7969 "obtaining device capability failed\n");
7970 return rc;
7971 }
7972
7973 rc = pqi_validate_device_capability(ctrl_info);
7974 if (rc)
7975 return rc;
7976
7977 pqi_calculate_queue_resources(ctrl_info);
7978
7979 rc = pqi_enable_msix_interrupts(ctrl_info);
7980 if (rc)
7981 return rc;
7982
7983 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
7984 ctrl_info->max_msix_vectors =
7985 ctrl_info->num_msix_vectors_enabled;
7986 pqi_calculate_queue_resources(ctrl_info);
7987 }
7988
7989 rc = pqi_alloc_io_resources(ctrl_info);
7990 if (rc)
7991 return rc;
7992
7993 rc = pqi_alloc_operational_queues(ctrl_info);
d87d5474
KB
7994 if (rc) {
7995 dev_err(&ctrl_info->pci_dev->dev,
7996 "failed to allocate operational queues\n");
6c223761 7997 return rc;
d87d5474 7998 }
6c223761
KB
7999
8000 pqi_init_operational_queues(ctrl_info);
8001
0777a3fb 8002 rc = pqi_create_queues(ctrl_info);
6c223761
KB
8003 if (rc)
8004 return rc;
8005
0777a3fb 8006 rc = pqi_request_irqs(ctrl_info);
6c223761
KB
8007 if (rc)
8008 return rc;
8009
061ef06a
KB
8010 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8011
8012 ctrl_info->controller_online = true;
b212c251
KB
8013
8014 rc = pqi_process_config_table(ctrl_info);
8015 if (rc)
8016 return rc;
8017
061ef06a 8018 pqi_start_heartbeat_timer(ctrl_info);
6c223761 8019
f6cc2a77
KB
8020 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8021 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8022 if (rc) { /* Supported features not returned correctly. */
8023 dev_err(&ctrl_info->pci_dev->dev,
8024 "error obtaining advanced RAID bypass configuration\n");
8025 return rc;
8026 }
8027 ctrl_info->ciss_report_log_flags |=
8028 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8029 }
8030
6a50d6ad 8031 rc = pqi_enable_events(ctrl_info);
6c223761
KB
8032 if (rc) {
8033 dev_err(&ctrl_info->pci_dev->dev,
6a50d6ad 8034 "error enabling events\n");
6c223761
KB
8035 return rc;
8036 }
8037
6c223761
KB
8038 /* Register with the SCSI subsystem. */
8039 rc = pqi_register_scsi(ctrl_info);
8040 if (rc)
8041 return rc;
8042
6d90615f
MB
8043 rc = pqi_get_ctrl_product_details(ctrl_info);
8044 if (rc) {
8045 dev_err(&ctrl_info->pci_dev->dev,
8046 "error obtaining product details\n");
8047 return rc;
8048 }
8049
8050 rc = pqi_get_ctrl_serial_number(ctrl_info);
6c223761
KB
8051 if (rc) {
8052 dev_err(&ctrl_info->pci_dev->dev,
6d90615f 8053 "error obtaining ctrl serial number\n");
6c223761
KB
8054 return rc;
8055 }
8056
171c2865
DC
8057 rc = pqi_set_diag_rescan(ctrl_info);
8058 if (rc) {
8059 dev_err(&ctrl_info->pci_dev->dev,
8060 "error enabling multi-lun rescan\n");
8061 return rc;
8062 }
8063
6c223761
KB
8064 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8065 if (rc) {
8066 dev_err(&ctrl_info->pci_dev->dev,
8067 "error updating host wellness\n");
8068 return rc;
8069 }
8070
8071 pqi_schedule_update_time_worker(ctrl_info);
8072
8073 pqi_scan_scsi_devices(ctrl_info);
8074
8075 return 0;
8076}
8077
061ef06a
KB
8078static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8079{
8080 unsigned int i;
8081 struct pqi_admin_queues *admin_queues;
8082 struct pqi_event_queue *event_queue;
8083
8084 admin_queues = &ctrl_info->admin_queues;
8085 admin_queues->iq_pi_copy = 0;
8086 admin_queues->oq_ci_copy = 0;
dac12fbc 8087 writel(0, admin_queues->oq_pi);
061ef06a
KB
8088
8089 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8090 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8091 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8092 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8093
dac12fbc
KB
8094 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8095 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8096 writel(0, ctrl_info->queue_groups[i].oq_pi);
061ef06a
KB
8097 }
8098
8099 event_queue = &ctrl_info->event_queue;
dac12fbc 8100 writel(0, event_queue->oq_pi);
061ef06a
KB
8101 event_queue->oq_ci_copy = 0;
8102}
8103
8104static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8105{
8106 int rc;
8107
8108 rc = pqi_force_sis_mode(ctrl_info);
8109 if (rc)
8110 return rc;
8111
8112 /*
8113 * Wait until the controller is ready to start accepting SIS
8114 * commands.
8115 */
8116 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8117 if (rc)
8118 return rc;
8119
4fd22c13
MR
8120 /*
8121 * Get the controller properties. This allows us to determine
8122 * whether or not it supports PQI mode.
8123 */
8124 rc = sis_get_ctrl_properties(ctrl_info);
8125 if (rc) {
8126 dev_err(&ctrl_info->pci_dev->dev,
8127 "error obtaining controller properties\n");
8128 return rc;
8129 }
8130
8131 rc = sis_get_pqi_capabilities(ctrl_info);
8132 if (rc) {
8133 dev_err(&ctrl_info->pci_dev->dev,
8134 "error obtaining controller capabilities\n");
8135 return rc;
8136 }
8137
061ef06a
KB
8138 /*
8139 * If the function we are about to call succeeds, the
8140 * controller will transition from legacy SIS mode
8141 * into PQI mode.
8142 */
8143 rc = sis_init_base_struct_addr(ctrl_info);
8144 if (rc) {
8145 dev_err(&ctrl_info->pci_dev->dev,
8146 "error initializing PQI mode\n");
8147 return rc;
8148 }
8149
8150 /* Wait for the controller to complete the SIS -> PQI transition. */
8151 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8152 if (rc) {
8153 dev_err(&ctrl_info->pci_dev->dev,
8154 "transition to PQI mode failed\n");
8155 return rc;
8156 }
8157
8158 /* From here on, we are running in PQI mode. */
8159 ctrl_info->pqi_mode_enabled = true;
8160 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8161
8162 pqi_reinit_queues(ctrl_info);
8163
8164 rc = pqi_create_admin_queues(ctrl_info);
8165 if (rc) {
8166 dev_err(&ctrl_info->pci_dev->dev,
8167 "error creating admin queues\n");
8168 return rc;
8169 }
8170
8171 rc = pqi_create_queues(ctrl_info);
8172 if (rc)
8173 return rc;
8174
8175 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8176
8177 ctrl_info->controller_online = true;
061ef06a
KB
8178 pqi_ctrl_unblock_requests(ctrl_info);
8179
4ccc354b
KB
8180 pqi_ctrl_reset_config(ctrl_info);
8181
4fd22c13
MR
8182 rc = pqi_process_config_table(ctrl_info);
8183 if (rc)
8184 return rc;
8185
8186 pqi_start_heartbeat_timer(ctrl_info);
8187
f6cc2a77
KB
8188 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8189 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8190 if (rc) {
8191 dev_err(&ctrl_info->pci_dev->dev,
8192 "error obtaining advanced RAID bypass configuration\n");
8193 return rc;
8194 }
8195 ctrl_info->ciss_report_log_flags |=
8196 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8197 }
8198
061ef06a
KB
8199 rc = pqi_enable_events(ctrl_info);
8200 if (rc) {
8201 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 8202 "error enabling events\n");
061ef06a
KB
8203 return rc;
8204 }
8205
6d90615f 8206 rc = pqi_get_ctrl_product_details(ctrl_info);
4fd22c13
MR
8207 if (rc) {
8208 dev_err(&ctrl_info->pci_dev->dev,
694c5d5b 8209 "error obtaining product details\n");
4fd22c13
MR
8210 return rc;
8211 }
8212
171c2865
DC
8213 rc = pqi_set_diag_rescan(ctrl_info);
8214 if (rc) {
8215 dev_err(&ctrl_info->pci_dev->dev,
8216 "error enabling multi-lun rescan\n");
8217 return rc;
8218 }
8219
061ef06a
KB
8220 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8221 if (rc) {
8222 dev_err(&ctrl_info->pci_dev->dev,
8223 "error updating host wellness\n");
8224 return rc;
8225 }
8226
2790cd4d
KB
8227 if (pqi_ofa_in_progress(ctrl_info))
8228 pqi_ctrl_unblock_scan(ctrl_info);
061ef06a
KB
8229
8230 pqi_scan_scsi_devices(ctrl_info);
8231
8232 return 0;
8233}
8234
583891c9 8235static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
a81ed5f3 8236{
d20df83b
BOS
8237 int rc;
8238
8239 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
a81ed5f3 8240 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
d20df83b
BOS
8241
8242 return pcibios_err_to_errno(rc);
a81ed5f3
KB
8243}
8244
6c223761
KB
8245static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8246{
8247 int rc;
8248 u64 mask;
8249
8250 rc = pci_enable_device(ctrl_info->pci_dev);
8251 if (rc) {
8252 dev_err(&ctrl_info->pci_dev->dev,
8253 "failed to enable PCI device\n");
8254 return rc;
8255 }
8256
8257 if (sizeof(dma_addr_t) > 4)
8258 mask = DMA_BIT_MASK(64);
8259 else
8260 mask = DMA_BIT_MASK(32);
8261
1d94f06e 8262 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
6c223761
KB
8263 if (rc) {
8264 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8265 goto disable_device;
8266 }
8267
8268 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8269 if (rc) {
8270 dev_err(&ctrl_info->pci_dev->dev,
8271 "failed to obtain PCI resources\n");
8272 goto disable_device;
8273 }
8274
4bdc0d67 8275 ctrl_info->iomem_base = ioremap(pci_resource_start(
6c223761
KB
8276 ctrl_info->pci_dev, 0),
8277 sizeof(struct pqi_ctrl_registers));
8278 if (!ctrl_info->iomem_base) {
8279 dev_err(&ctrl_info->pci_dev->dev,
8280 "failed to map memory for controller registers\n");
8281 rc = -ENOMEM;
8282 goto release_regions;
8283 }
8284
a81ed5f3
KB
8285#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8286
8287 /* Increase the PCIe completion timeout. */
8288 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8289 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8290 if (rc) {
8291 dev_err(&ctrl_info->pci_dev->dev,
8292 "failed to set PCIe completion timeout\n");
8293 goto release_regions;
8294 }
8295
6c223761
KB
8296 /* Enable bus mastering. */
8297 pci_set_master(ctrl_info->pci_dev);
8298
cbe0c7b1
KB
8299 ctrl_info->registers = ctrl_info->iomem_base;
8300 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8301
6c223761
KB
8302 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8303
8304 return 0;
8305
8306release_regions:
8307 pci_release_regions(ctrl_info->pci_dev);
8308disable_device:
8309 pci_disable_device(ctrl_info->pci_dev);
8310
8311 return rc;
8312}
8313
8314static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8315{
8316 iounmap(ctrl_info->iomem_base);
8317 pci_release_regions(ctrl_info->pci_dev);
cbe0c7b1
KB
8318 if (pci_is_enabled(ctrl_info->pci_dev))
8319 pci_disable_device(ctrl_info->pci_dev);
6c223761
KB
8320 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8321}
8322
8323static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8324{
8325 struct pqi_ctrl_info *ctrl_info;
8326
8327 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8328 GFP_KERNEL, numa_node);
8329 if (!ctrl_info)
8330 return NULL;
8331
8332 mutex_init(&ctrl_info->scan_mutex);
7561a7e4 8333 mutex_init(&ctrl_info->lun_reset_mutex);
4fd22c13 8334 mutex_init(&ctrl_info->ofa_mutex);
6c223761
KB
8335
8336 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8337 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8338
8339 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8340 atomic_set(&ctrl_info->num_interrupts, 0);
8341
8342 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8343 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8344
74a0f573 8345 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
5f310425 8346 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
98f87667 8347
2790cd4d
KB
8348 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8349 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8350
6c223761
KB
8351 sema_init(&ctrl_info->sync_request_sem,
8352 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7561a7e4 8353 init_waitqueue_head(&ctrl_info->block_requests_wait);
6c223761
KB
8354
8355 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
061ef06a 8356 ctrl_info->irq_mode = IRQ_MODE_NONE;
6c223761
KB
8357 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8358
f6cc2a77
KB
8359 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8360 ctrl_info->max_transfer_encrypted_sas_sata =
8361 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8362 ctrl_info->max_transfer_encrypted_nvme =
8363 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8364 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8365 ctrl_info->max_write_raid_1_10_2drive = ~0;
8366 ctrl_info->max_write_raid_1_10_3drive = ~0;
8367
6c223761
KB
8368 return ctrl_info;
8369}
8370
8371static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8372{
8373 kfree(ctrl_info);
8374}
8375
8376static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8377{
98bf061b
KB
8378 pqi_free_irqs(ctrl_info);
8379 pqi_disable_msix_interrupts(ctrl_info);
6c223761
KB
8380}
8381
8382static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8383{
8384 pqi_stop_heartbeat_timer(ctrl_info);
8385 pqi_free_interrupts(ctrl_info);
8386 if (ctrl_info->queue_memory_base)
8387 dma_free_coherent(&ctrl_info->pci_dev->dev,
8388 ctrl_info->queue_memory_length,
8389 ctrl_info->queue_memory_base,
8390 ctrl_info->queue_memory_base_dma_handle);
8391 if (ctrl_info->admin_queue_memory_base)
8392 dma_free_coherent(&ctrl_info->pci_dev->dev,
8393 ctrl_info->admin_queue_memory_length,
8394 ctrl_info->admin_queue_memory_base,
8395 ctrl_info->admin_queue_memory_base_dma_handle);
8396 pqi_free_all_io_requests(ctrl_info);
8397 if (ctrl_info->error_buffer)
8398 dma_free_coherent(&ctrl_info->pci_dev->dev,
8399 ctrl_info->error_buffer_length,
8400 ctrl_info->error_buffer,
8401 ctrl_info->error_buffer_dma_handle);
8402 if (ctrl_info->iomem_base)
8403 pqi_cleanup_pci_init(ctrl_info);
8404 pqi_free_ctrl_info(ctrl_info);
8405}
8406
8407static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8408{
061ef06a
KB
8409 pqi_cancel_rescan_worker(ctrl_info);
8410 pqi_cancel_update_time_worker(ctrl_info);
819225b0 8411 pqi_remove_all_scsi_devices(ctrl_info);
e57a1f9b 8412 pqi_unregister_scsi(ctrl_info);
162d7753
KB
8413 if (ctrl_info->pqi_mode_enabled)
8414 pqi_revert_to_sis_mode(ctrl_info);
6c223761
KB
8415 pqi_free_ctrl_resources(ctrl_info);
8416}
8417
4fd22c13
MR
8418static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8419{
2790cd4d
KB
8420 pqi_ctrl_block_scan(ctrl_info);
8421 pqi_scsi_block_requests(ctrl_info);
8422 pqi_ctrl_block_device_reset(ctrl_info);
4fd22c13
MR
8423 pqi_ctrl_block_requests(ctrl_info);
8424 pqi_ctrl_wait_until_quiesced(ctrl_info);
4fd22c13 8425 pqi_stop_heartbeat_timer(ctrl_info);
4fd22c13
MR
8426}
8427
8428static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8429{
4fd22c13 8430 pqi_start_heartbeat_timer(ctrl_info);
2790cd4d
KB
8431 pqi_ctrl_unblock_requests(ctrl_info);
8432 pqi_ctrl_unblock_device_reset(ctrl_info);
8433 pqi_scsi_unblock_requests(ctrl_info);
8434 pqi_ctrl_unblock_scan(ctrl_info);
4fd22c13
MR
8435}
8436
2790cd4d 8437static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
4fd22c13 8438{
4fd22c13 8439 int i;
2790cd4d 8440 u32 sg_count;
4fd22c13
MR
8441 struct device *dev;
8442 struct pqi_ofa_memory *ofap;
2790cd4d
KB
8443 struct pqi_sg_descriptor *mem_descriptor;
8444 dma_addr_t dma_handle;
4fd22c13
MR
8445
8446 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8447
2790cd4d
KB
8448 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8449 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
4fd22c13
MR
8450 goto out;
8451
2790cd4d 8452 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
4fd22c13
MR
8453 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8454 goto out;
8455
2790cd4d 8456 dev = &ctrl_info->pci_dev->dev;
4fd22c13 8457
2790cd4d 8458 for (i = 0; i < sg_count; i++) {
4fd22c13 8459 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
2790cd4d 8460 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
4fd22c13 8461 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
2790cd4d 8462 goto out_free_chunks;
4fd22c13 8463 mem_descriptor = &ofap->sg_descriptor[i];
583891c9
KB
8464 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8465 put_unaligned_le32(chunk_size, &mem_descriptor->length);
4fd22c13
MR
8466 }
8467
4fd22c13
MR
8468 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8469 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
2790cd4d 8470 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
4fd22c13
MR
8471
8472 return 0;
8473
8474out_free_chunks:
8475 while (--i >= 0) {
8476 mem_descriptor = &ofap->sg_descriptor[i];
8477 dma_free_coherent(dev, chunk_size,
2790cd4d
KB
8478 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8479 get_unaligned_le64(&mem_descriptor->address));
4fd22c13
MR
8480 }
8481 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8482
8483out:
4fd22c13
MR
8484 return -ENOMEM;
8485}
8486
8487static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8488{
8489 u32 total_size;
2790cd4d 8490 u32 chunk_size;
4fd22c13 8491 u32 min_chunk_size;
4fd22c13 8492
2790cd4d
KB
8493 if (ctrl_info->ofa_bytes_requested == 0)
8494 return 0;
4fd22c13 8495
2790cd4d
KB
8496 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8497 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8498 min_chunk_size = PAGE_ALIGN(min_chunk_size);
4fd22c13 8499
2790cd4d
KB
8500 for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8501 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
4fd22c13 8502 return 0;
2790cd4d
KB
8503 chunk_size /= 2;
8504 chunk_size = PAGE_ALIGN(chunk_size);
8505 }
4fd22c13
MR
8506
8507 return -ENOMEM;
8508}
8509
2790cd4d 8510static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
4fd22c13 8511{
4fd22c13 8512 struct device *dev;
2790cd4d 8513 struct pqi_ofa_memory *ofap;
4fd22c13
MR
8514
8515 dev = &ctrl_info->pci_dev->dev;
4fd22c13 8516
2790cd4d
KB
8517 ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8518 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8519 if (!ofap)
4fd22c13
MR
8520 return;
8521
2790cd4d 8522 ctrl_info->pqi_ofa_mem_virt_addr = ofap;
4fd22c13
MR
8523
8524 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
2790cd4d
KB
8525 dev_err(dev,
8526 "failed to allocate host buffer for Online Firmware Activation\n");
8527 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8528 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8529 return;
4fd22c13 8530 }
694c5d5b 8531
2790cd4d
KB
8532 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8533 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
4fd22c13
MR
8534}
8535
8536static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8537{
2790cd4d
KB
8538 unsigned int i;
8539 struct device *dev;
4fd22c13 8540 struct pqi_ofa_memory *ofap;
2790cd4d
KB
8541 struct pqi_sg_descriptor *mem_descriptor;
8542 unsigned int num_memory_descriptors;
4fd22c13
MR
8543
8544 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
4fd22c13
MR
8545 if (!ofap)
8546 return;
8547
2790cd4d
KB
8548 dev = &ctrl_info->pci_dev->dev;
8549
8550 if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
4fd22c13
MR
8551 goto out;
8552
8553 mem_descriptor = ofap->sg_descriptor;
2790cd4d
KB
8554 num_memory_descriptors =
8555 get_unaligned_le16(&ofap->num_memory_descriptors);
4fd22c13 8556
2790cd4d
KB
8557 for (i = 0; i < num_memory_descriptors; i++) {
8558 dma_free_coherent(dev,
4fd22c13
MR
8559 get_unaligned_le32(&mem_descriptor[i].length),
8560 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8561 get_unaligned_le64(&mem_descriptor[i].address));
8562 }
8563 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8564
8565out:
2790cd4d
KB
8566 dma_free_coherent(dev, sizeof(*ofap), ofap,
8567 ctrl_info->pqi_ofa_mem_dma_handle);
4fd22c13
MR
8568 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8569}
8570
8571static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8572{
2790cd4d 8573 u32 buffer_length;
4fd22c13 8574 struct pqi_vendor_general_request request;
4fd22c13
MR
8575 struct pqi_ofa_memory *ofap;
8576
8577 memset(&request, 0, sizeof(request));
8578
4fd22c13
MR
8579 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8580 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8581 &request.header.iu_length);
8582 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8583 &request.function_code);
8584
2790cd4d
KB
8585 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8586
4fd22c13 8587 if (ofap) {
2790cd4d 8588 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
4fd22c13
MR
8589 get_unaligned_le16(&ofap->num_memory_descriptors) *
8590 sizeof(struct pqi_sg_descriptor);
8591
8592 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8593 &request.data.ofa_memory_allocation.buffer_address);
2790cd4d 8594 put_unaligned_le32(buffer_length,
4fd22c13 8595 &request.data.ofa_memory_allocation.buffer_length);
4fd22c13
MR
8596 }
8597
ae0c189d 8598 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4fd22c13
MR
8599}
8600
2790cd4d 8601static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
4fd22c13 8602{
2790cd4d
KB
8603 ssleep(delay_secs);
8604
4fd22c13
MR
8605 return pqi_ctrl_init_resume(ctrl_info);
8606}
8607
3c50976f
KB
8608static void pqi_perform_lockup_action(void)
8609{
8610 switch (pqi_lockup_action) {
8611 case PANIC:
8612 panic("FATAL: Smart Family Controller lockup detected");
8613 break;
8614 case REBOOT:
8615 emergency_restart();
8616 break;
8617 case NONE:
8618 default:
8619 break;
8620 }
8621}
8622
5f310425
KB
8623static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8624 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8625 .status = SAM_STAT_CHECK_CONDITION,
8626};
8627
8628static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
376fb880
KB
8629{
8630 unsigned int i;
376fb880 8631 struct pqi_io_request *io_request;
376fb880 8632 struct scsi_cmnd *scmd;
4f3cefc3 8633 struct scsi_device *sdev;
376fb880 8634
5f310425
KB
8635 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8636 io_request = &ctrl_info->io_request_pool[i];
8637 if (atomic_read(&io_request->refcount) == 0)
8638 continue;
376fb880 8639
5f310425
KB
8640 scmd = io_request->scmd;
8641 if (scmd) {
4f3cefc3
MR
8642 sdev = scmd->device;
8643 if (!sdev || !scsi_device_online(sdev)) {
8644 pqi_free_io_request(io_request);
8645 continue;
8646 } else {
8647 set_host_byte(scmd, DID_NO_CONNECT);
8648 }
5f310425
KB
8649 } else {
8650 io_request->status = -ENXIO;
8651 io_request->error_info =
8652 &pqi_ctrl_offline_raid_error_info;
376fb880 8653 }
5f310425
KB
8654
8655 io_request->io_complete_callback(io_request,
8656 io_request->context);
376fb880
KB
8657 }
8658}
8659
5f310425 8660static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
376fb880 8661{
5f310425
KB
8662 pqi_perform_lockup_action();
8663 pqi_stop_heartbeat_timer(ctrl_info);
8664 pqi_free_interrupts(ctrl_info);
8665 pqi_cancel_rescan_worker(ctrl_info);
8666 pqi_cancel_update_time_worker(ctrl_info);
8667 pqi_ctrl_wait_until_quiesced(ctrl_info);
8668 pqi_fail_all_outstanding_requests(ctrl_info);
5f310425
KB
8669 pqi_ctrl_unblock_requests(ctrl_info);
8670}
8671
8672static void pqi_ctrl_offline_worker(struct work_struct *work)
8673{
8674 struct pqi_ctrl_info *ctrl_info;
8675
8676 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8677 pqi_take_ctrl_offline_deferred(ctrl_info);
376fb880
KB
8678}
8679
5d1f03e6
MB
8680static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8681 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
376fb880 8682{
5f310425
KB
8683 if (!ctrl_info->controller_online)
8684 return;
8685
376fb880 8686 ctrl_info->controller_online = false;
5f310425
KB
8687 ctrl_info->pqi_mode_enabled = false;
8688 pqi_ctrl_block_requests(ctrl_info);
5a259e32 8689 if (!pqi_disable_ctrl_shutdown)
5d1f03e6 8690 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
376fb880
KB
8691 pci_disable_device(ctrl_info->pci_dev);
8692 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
5f310425 8693 schedule_work(&ctrl_info->ctrl_offline_work);
376fb880
KB
8694}
8695
d91d7820 8696static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6c223761
KB
8697 const struct pci_device_id *id)
8698{
8699 char *ctrl_description;
8700
37b36847 8701 if (id->driver_data)
6c223761 8702 ctrl_description = (char *)id->driver_data;
37b36847 8703 else
6aa26b5a 8704 ctrl_description = "Microchip Smart Family Controller";
6c223761 8705
d91d7820 8706 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6c223761
KB
8707}
8708
d91d7820
KB
8709static int pqi_pci_probe(struct pci_dev *pci_dev,
8710 const struct pci_device_id *id)
6c223761
KB
8711{
8712 int rc;
62dc51fb 8713 int node, cp_node;
6c223761
KB
8714 struct pqi_ctrl_info *ctrl_info;
8715
d91d7820 8716 pqi_print_ctrl_info(pci_dev, id);
6c223761
KB
8717
8718 if (pqi_disable_device_id_wildcards &&
8719 id->subvendor == PCI_ANY_ID &&
8720 id->subdevice == PCI_ANY_ID) {
d91d7820 8721 dev_warn(&pci_dev->dev,
6c223761
KB
8722 "controller not probed because device ID wildcards are disabled\n");
8723 return -ENODEV;
8724 }
8725
8726 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
d91d7820 8727 dev_warn(&pci_dev->dev,
6c223761
KB
8728 "controller device ID matched using wildcards\n");
8729
d91d7820 8730 node = dev_to_node(&pci_dev->dev);
62dc51fb
SB
8731 if (node == NUMA_NO_NODE) {
8732 cp_node = cpu_to_node(0);
8733 if (cp_node == NUMA_NO_NODE)
8734 cp_node = 0;
8735 set_dev_node(&pci_dev->dev, cp_node);
8736 }
6c223761
KB
8737
8738 ctrl_info = pqi_alloc_ctrl_info(node);
8739 if (!ctrl_info) {
d91d7820 8740 dev_err(&pci_dev->dev,
6c223761
KB
8741 "failed to allocate controller info block\n");
8742 return -ENOMEM;
8743 }
8744
d91d7820 8745 ctrl_info->pci_dev = pci_dev;
6c223761
KB
8746
8747 rc = pqi_pci_init(ctrl_info);
8748 if (rc)
8749 goto error;
8750
8751 rc = pqi_ctrl_init(ctrl_info);
8752 if (rc)
8753 goto error;
8754
8755 return 0;
8756
8757error:
8758 pqi_remove_ctrl(ctrl_info);
8759
8760 return rc;
8761}
8762
d91d7820 8763static void pqi_pci_remove(struct pci_dev *pci_dev)
6c223761
KB
8764{
8765 struct pqi_ctrl_info *ctrl_info;
8766
d91d7820 8767 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
8768 if (!ctrl_info)
8769 return;
8770
8771 pqi_remove_ctrl(ctrl_info);
8772}
8773
0530736e
KB
8774static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8775{
8776 unsigned int i;
8777 struct pqi_io_request *io_request;
8778 struct scsi_cmnd *scmd;
8779
8780 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8781 io_request = &ctrl_info->io_request_pool[i];
8782 if (atomic_read(&io_request->refcount) == 0)
8783 continue;
8784 scmd = io_request->scmd;
8785 WARN_ON(scmd != NULL); /* IO command from SML */
8786 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8787 }
8788}
8789
d91d7820 8790static void pqi_shutdown(struct pci_dev *pci_dev)
6c223761
KB
8791{
8792 int rc;
8793 struct pqi_ctrl_info *ctrl_info;
8794
d91d7820 8795 ctrl_info = pci_get_drvdata(pci_dev);
0530736e
KB
8796 if (!ctrl_info) {
8797 dev_err(&pci_dev->dev,
8798 "cache could not be flushed\n");
8799 return;
8800 }
8801
0530736e 8802 pqi_wait_until_ofa_finished(ctrl_info);
0530736e 8803
9fa82023 8804 pqi_scsi_block_requests(ctrl_info);
0530736e 8805 pqi_ctrl_block_device_reset(ctrl_info);
9fa82023
KB
8806 pqi_ctrl_block_requests(ctrl_info);
8807 pqi_ctrl_wait_until_quiesced(ctrl_info);
6c223761
KB
8808
8809 /*
8810 * Write all data in the controller's battery-backed cache to
8811 * storage.
8812 */
58322fe0 8813 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
0530736e
KB
8814 if (rc)
8815 dev_err(&pci_dev->dev,
8816 "unable to flush controller cache\n");
8817
0530736e
KB
8818 pqi_crash_if_pending_command(ctrl_info);
8819 pqi_reset(ctrl_info);
6c223761
KB
8820}
8821
3c50976f
KB
8822static void pqi_process_lockup_action_param(void)
8823{
8824 unsigned int i;
8825
8826 if (!pqi_lockup_action_param)
8827 return;
8828
8829 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
8830 if (strcmp(pqi_lockup_action_param,
8831 pqi_lockup_actions[i].name) == 0) {
8832 pqi_lockup_action = pqi_lockup_actions[i].action;
8833 return;
8834 }
8835 }
8836
8837 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8838 DRIVER_NAME_SHORT, pqi_lockup_action_param);
8839}
8840
8841static void pqi_process_module_params(void)
8842{
8843 pqi_process_lockup_action_param();
8844}
8845
5c146686 8846static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
061ef06a
KB
8847{
8848 struct pqi_ctrl_info *ctrl_info;
8849
8850 ctrl_info = pci_get_drvdata(pci_dev);
8851
4fd22c13 8852 pqi_wait_until_ofa_finished(ctrl_info);
9fa82023
KB
8853
8854 pqi_ctrl_block_scan(ctrl_info);
8855 pqi_scsi_block_requests(ctrl_info);
8856 pqi_ctrl_block_device_reset(ctrl_info);
061ef06a
KB
8857 pqi_ctrl_block_requests(ctrl_info);
8858 pqi_ctrl_wait_until_quiesced(ctrl_info);
9fa82023 8859 pqi_flush_cache(ctrl_info, SUSPEND);
061ef06a
KB
8860 pqi_stop_heartbeat_timer(ctrl_info);
8861
9fa82023
KB
8862 pqi_crash_if_pending_command(ctrl_info);
8863
061ef06a
KB
8864 if (state.event == PM_EVENT_FREEZE)
8865 return 0;
8866
8867 pci_save_state(pci_dev);
8868 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
8869
8870 ctrl_info->controller_online = false;
8871 ctrl_info->pqi_mode_enabled = false;
8872
8873 return 0;
8874}
8875
5c146686 8876static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
061ef06a
KB
8877{
8878 int rc;
8879 struct pqi_ctrl_info *ctrl_info;
8880
8881 ctrl_info = pci_get_drvdata(pci_dev);
8882
8883 if (pci_dev->current_state != PCI_D0) {
8884 ctrl_info->max_hw_queue_index = 0;
8885 pqi_free_interrupts(ctrl_info);
8886 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
8887 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
8888 IRQF_SHARED, DRIVER_NAME_SHORT,
8889 &ctrl_info->queue_groups[0]);
8890 if (rc) {
8891 dev_err(&ctrl_info->pci_dev->dev,
8892 "irq %u init failed with error %d\n",
8893 pci_dev->irq, rc);
8894 return rc;
8895 }
43e97ef4 8896 pqi_ctrl_unblock_device_reset(ctrl_info);
061ef06a 8897 pqi_ctrl_unblock_requests(ctrl_info);
9fa82023 8898 pqi_scsi_unblock_requests(ctrl_info);
43e97ef4 8899 pqi_ctrl_unblock_scan(ctrl_info);
061ef06a
KB
8900 return 0;
8901 }
8902
8903 pci_set_power_state(pci_dev, PCI_D0);
8904 pci_restore_state(pci_dev);
8905
43e97ef4
KB
8906 pqi_ctrl_unblock_device_reset(ctrl_info);
8907 pqi_ctrl_unblock_requests(ctrl_info);
8908 pqi_scsi_unblock_requests(ctrl_info);
8909 pqi_ctrl_unblock_scan(ctrl_info);
8910
061ef06a
KB
8911 return pqi_ctrl_init_resume(ctrl_info);
8912}
8913
6c223761
KB
8914/* Define the PCI IDs for the controllers that we support. */
8915static const struct pci_device_id pqi_pci_id_table[] = {
b0f9408b
KB
8916 {
8917 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8918 0x105b, 0x1211)
8919 },
8920 {
8921 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8922 0x105b, 0x1321)
8923 },
7eddabff
KB
8924 {
8925 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8926 0x152d, 0x8a22)
8927 },
8928 {
8929 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8930 0x152d, 0x8a23)
8931 },
8932 {
8933 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8934 0x152d, 0x8a24)
8935 },
8936 {
8937 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8938 0x152d, 0x8a36)
8939 },
8940 {
8941 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8942 0x152d, 0x8a37)
8943 },
75fbeacc
KB
8944 {
8945 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8946 0x193d, 0x8460)
8947 },
0595a0b4
AK
8948 {
8949 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8950 0x193d, 0x1104)
8951 },
8952 {
8953 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8954 0x193d, 0x1105)
8955 },
8956 {
8957 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8958 0x193d, 0x1106)
8959 },
8960 {
8961 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8962 0x193d, 0x1107)
8963 },
d3af3f64
MR
8964 {
8965 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8966 0x193d, 0x1108)
8967 },
8968 {
8969 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8970 0x193d, 0x1109)
8971 },
b0f9408b
KB
8972 {
8973 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8974 0x193d, 0x8460)
8975 },
8976 {
8977 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8978 0x193d, 0x8461)
8979 },
84a77fef
MB
8980 {
8981 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8982 0x193d, 0xc460)
8983 },
8984 {
8985 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8986 0x193d, 0xc461)
8987 },
b0f9408b
KB
8988 {
8989 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8990 0x193d, 0xf460)
8991 },
8992 {
8993 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8994 0x193d, 0xf461)
8995 },
8996 {
8997 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
8998 0x1bd4, 0x0045)
8999 },
9000 {
9001 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9002 0x1bd4, 0x0046)
9003 },
9004 {
9005 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9006 0x1bd4, 0x0047)
9007 },
9008 {
9009 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9010 0x1bd4, 0x0048)
9011 },
9f8d05fa
KB
9012 {
9013 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9014 0x1bd4, 0x004a)
9015 },
9016 {
9017 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9018 0x1bd4, 0x004b)
9019 },
9020 {
9021 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9022 0x1bd4, 0x004c)
9023 },
63a7956a
GW
9024 {
9025 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9026 0x1bd4, 0x004f)
9027 },
75fbeacc
KB
9028 {
9029 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9030 0x1bd4, 0x0051)
9031 },
9032 {
9033 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9034 0x1bd4, 0x0052)
9035 },
9036 {
9037 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9038 0x1bd4, 0x0053)
9039 },
9040 {
9041 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9042 0x1bd4, 0x0054)
9043 },
c1b10475
AK
9044 {
9045 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9046 0x19e5, 0xd227)
9047 },
9048 {
9049 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9050 0x19e5, 0xd228)
9051 },
9052 {
9053 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9054 0x19e5, 0xd229)
9055 },
9056 {
9057 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9058 0x19e5, 0xd22a)
9059 },
9060 {
9061 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9062 0x19e5, 0xd22b)
9063 },
9064 {
9065 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9066 0x19e5, 0xd22c)
9067 },
6c223761
KB
9068 {
9069 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9070 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9071 },
9072 {
9073 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
55790064 9074 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
6c223761
KB
9075 },
9076 {
9077 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9078 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6c223761
KB
9079 },
9080 {
9081 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9082 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6c223761
KB
9083 },
9084 {
9085 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9086 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6c223761
KB
9087 },
9088 {
9089 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9090 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6c223761
KB
9091 },
9092 {
9093 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9094 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6c223761
KB
9095 },
9096 {
9097 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9098 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6c223761
KB
9099 },
9100 {
9101 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9102 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6c223761 9103 },
55790064
KB
9104 {
9105 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9106 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9107 },
63a7956a
GW
9108 {
9109 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9110 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9111 },
9112 {
9113 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9114 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9115 },
3af06083
MR
9116 {
9117 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9118 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9119 },
6c223761
KB
9120 {
9121 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9122 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6c223761
KB
9123 },
9124 {
9125 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9126 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6c223761
KB
9127 },
9128 {
9129 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9130 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6c223761
KB
9131 },
9132 {
9133 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9134 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6c223761
KB
9135 },
9136 {
9137 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9138 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6c223761
KB
9139 },
9140 {
9141 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9142 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6c223761
KB
9143 },
9144 {
9145 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9146 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6c223761
KB
9147 },
9148 {
9149 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9150 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6c223761
KB
9151 },
9152 {
9153 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9154 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6c223761 9155 },
55790064
KB
9156 {
9157 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9158 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9159 },
6c223761
KB
9160 {
9161 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9162 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6c223761
KB
9163 },
9164 {
9165 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9166 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6c223761
KB
9167 },
9168 {
9169 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9170 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6c223761
KB
9171 },
9172 {
9173 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9174 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6c223761
KB
9175 },
9176 {
9177 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9178 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6c223761 9179 },
b0f9408b
KB
9180 {
9181 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9182 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9183 },
6c223761
KB
9184 {
9185 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9186 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6c223761
KB
9187 },
9188 {
9189 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9190 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6c223761 9191 },
bd809e8d
KB
9192 {
9193 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9194 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9195 },
9196 {
9197 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9198 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9199 },
6c223761
KB
9200 {
9201 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff
KB
9202 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9203 },
75fbeacc
KB
9204 {
9205 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9206 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9207 },
9208 {
9209 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9210 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9211 },
9212 {
9213 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9214 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9215 },
9216 {
9217 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9218 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9219 },
9220 {
9221 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9222 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9223 },
9224 {
9225 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9226 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9227 },
9228 {
9229 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9230 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9231 },
9232 {
9233 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9234 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9235 },
9236 {
9237 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9238 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9239 },
9240 {
9241 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9242 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9243 },
9244 {
9245 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9246 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9247 },
9248 {
9249 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9250 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9251 },
9252 {
9253 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9254 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9255 },
9256 {
9257 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9258 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9259 },
9260 {
9261 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9262 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9263 },
9264 {
9265 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9266 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9267 },
9268 {
9269 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9270 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9271 },
9272 {
9273 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9274 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9275 },
9276 {
9277 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9278 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9279 },
9280 {
9281 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9282 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9283 },
9284 {
9285 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9286 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9287 },
9288 {
9289 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9290 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9291 },
80982656
MM
9292 {
9293 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9294 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9295 },
75fbeacc
KB
9296 {
9297 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9298 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9299 },
9300 {
9301 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9302 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9303 },
9304 {
9305 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9306 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9307 },
9308 {
9309 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9310 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9311 },
9312 {
9313 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9314 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9315 },
9316 {
9317 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9318 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9319 },
9320 {
9321 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9322 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9323 },
9f8d05fa
KB
9324 {
9325 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9326 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9327 },
55790064
KB
9328 {
9329 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9330 PCI_VENDOR_ID_DELL, 0x1fe0)
9331 },
7eddabff
KB
9332 {
9333 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9334 PCI_VENDOR_ID_HP, 0x0600)
9335 },
9336 {
9337 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9338 PCI_VENDOR_ID_HP, 0x0601)
9339 },
9340 {
9341 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9342 PCI_VENDOR_ID_HP, 0x0602)
9343 },
9344 {
9345 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9346 PCI_VENDOR_ID_HP, 0x0603)
9347 },
9348 {
9349 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
55790064 9350 PCI_VENDOR_ID_HP, 0x0609)
7eddabff
KB
9351 },
9352 {
9353 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9354 PCI_VENDOR_ID_HP, 0x0650)
9355 },
9356 {
9357 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9358 PCI_VENDOR_ID_HP, 0x0651)
9359 },
9360 {
9361 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9362 PCI_VENDOR_ID_HP, 0x0652)
9363 },
9364 {
9365 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9366 PCI_VENDOR_ID_HP, 0x0653)
9367 },
9368 {
9369 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9370 PCI_VENDOR_ID_HP, 0x0654)
9371 },
9372 {
9373 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9374 PCI_VENDOR_ID_HP, 0x0655)
9375 },
7eddabff
KB
9376 {
9377 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9378 PCI_VENDOR_ID_HP, 0x0700)
9379 },
9380 {
9381 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9382 PCI_VENDOR_ID_HP, 0x0701)
6c223761
KB
9383 },
9384 {
9385 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9386 PCI_VENDOR_ID_HP, 0x1001)
9387 },
75fbeacc
KB
9388 {
9389 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9390 PCI_VENDOR_ID_HP, 0x1002)
9391 },
6c223761
KB
9392 {
9393 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9394 PCI_VENDOR_ID_HP, 0x1100)
9395 },
9396 {
9397 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9398 PCI_VENDOR_ID_HP, 0x1101)
9399 },
75fbeacc
KB
9400 {
9401 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9402 0x1590, 0x0294)
9403 },
9404 {
9405 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9406 0x1590, 0x02db)
9407 },
9408 {
9409 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9410 0x1590, 0x02dc)
9411 },
9412 {
9413 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9414 0x1590, 0x032e)
9415 },
8bdb3b9c
GW
9416 {
9417 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9418 0x1d8d, 0x0800)
9419 },
9420 {
9421 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9422 0x1d8d, 0x0908)
9423 },
9424 {
9425 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9426 0x1d8d, 0x0806)
9427 },
9428 {
9429 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9430 0x1d8d, 0x0916)
9431 },
71ecc60d
GW
9432 {
9433 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9434 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9435 },
e326b97c
MM
9436 {
9437 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9438 0x1dfc, 0x3161)
9439 },
09d9968a
B
9440 {
9441 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9442 0x1cf2, 0x5445)
9443 },
9444 {
9445 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9446 0x1cf2, 0x5446)
9447 },
9448 {
9449 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9450 0x1cf2, 0x5447)
9451 },
9452 {
9453 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9454 0x1cf2, 0x0b27)
9455 },
9456 {
9457 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9458 0x1cf2, 0x0b29)
9459 },
9460 {
9461 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9462 0x1cf2, 0x0b45)
9463 },
6c223761
KB
9464 {
9465 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9466 PCI_ANY_ID, PCI_ANY_ID)
9467 },
9468 { 0 }
9469};
9470
9471MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
9472
9473static struct pci_driver pqi_pci_driver = {
9474 .name = DRIVER_NAME_SHORT,
9475 .id_table = pqi_pci_id_table,
9476 .probe = pqi_pci_probe,
9477 .remove = pqi_pci_remove,
9478 .shutdown = pqi_shutdown,
061ef06a
KB
9479#if defined(CONFIG_PM)
9480 .suspend = pqi_suspend,
9481 .resume = pqi_resume,
9482#endif
6c223761
KB
9483};
9484
9485static int __init pqi_init(void)
9486{
9487 int rc;
9488
9489 pr_info(DRIVER_NAME "\n");
9490
8b664fef 9491 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
6c223761
KB
9492 if (!pqi_sas_transport_template)
9493 return -ENODEV;
9494
3c50976f
KB
9495 pqi_process_module_params();
9496
6c223761
KB
9497 rc = pci_register_driver(&pqi_pci_driver);
9498 if (rc)
9499 sas_release_transport(pqi_sas_transport_template);
9500
9501 return rc;
9502}
9503
9504static void __exit pqi_cleanup(void)
9505{
9506 pci_unregister_driver(&pqi_pci_driver);
9507 sas_release_transport(pqi_sas_transport_template);
9508}
9509
9510module_init(pqi_init);
9511module_exit(pqi_cleanup);
9512
9513static void __attribute__((unused)) verify_structures(void)
9514{
9515 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9516 sis_host_to_ctrl_doorbell) != 0x20);
9517 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9518 sis_interrupt_mask) != 0x34);
9519 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9520 sis_ctrl_to_host_doorbell) != 0x9c);
9521 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9522 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
9523 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9524 sis_driver_scratch) != 0xb0);
2708a256
KB
9525 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9526 sis_product_identifier) != 0xb4);
6c223761
KB
9527 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9528 sis_firmware_status) != 0xbc);
5d1f03e6
MB
9529 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9530 sis_ctrl_shutdown_reason_code) != 0xcc);
6c223761
KB
9531 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9532 sis_mailbox) != 0x1000);
9533 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9534 pqi_registers) != 0x4000);
9535
9536 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9537 iu_type) != 0x0);
9538 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9539 iu_length) != 0x2);
9540 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9541 response_queue_id) != 0x4);
9542 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
ae0c189d 9543 driver_flags) != 0x6);
6c223761
KB
9544 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
9545
9546 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9547 status) != 0x0);
9548 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9549 service_response) != 0x1);
9550 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9551 data_present) != 0x2);
9552 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9553 reserved) != 0x3);
9554 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9555 residual_count) != 0x4);
9556 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9557 data_length) != 0x8);
9558 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9559 reserved1) != 0xa);
9560 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9561 data) != 0xc);
9562 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
9563
9564 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9565 data_in_result) != 0x0);
9566 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9567 data_out_result) != 0x1);
9568 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9569 reserved) != 0x2);
9570 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9571 status) != 0x5);
9572 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9573 status_qualifier) != 0x6);
9574 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9575 sense_data_length) != 0x8);
9576 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9577 response_data_length) != 0xa);
9578 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9579 data_in_transferred) != 0xc);
9580 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9581 data_out_transferred) != 0x10);
9582 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9583 data) != 0x14);
9584 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
9585
9586 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9587 signature) != 0x0);
9588 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9589 function_and_status_code) != 0x8);
9590 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9591 max_admin_iq_elements) != 0x10);
9592 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9593 max_admin_oq_elements) != 0x11);
9594 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9595 admin_iq_element_length) != 0x12);
9596 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9597 admin_oq_element_length) != 0x13);
9598 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9599 max_reset_timeout) != 0x14);
9600 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9601 legacy_intx_status) != 0x18);
9602 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9603 legacy_intx_mask_set) != 0x1c);
9604 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9605 legacy_intx_mask_clear) != 0x20);
9606 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9607 device_status) != 0x40);
9608 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9609 admin_iq_pi_offset) != 0x48);
9610 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9611 admin_oq_ci_offset) != 0x50);
9612 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9613 admin_iq_element_array_addr) != 0x58);
9614 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9615 admin_oq_element_array_addr) != 0x60);
9616 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9617 admin_iq_ci_addr) != 0x68);
9618 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9619 admin_oq_pi_addr) != 0x70);
9620 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9621 admin_iq_num_elements) != 0x78);
9622 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9623 admin_oq_num_elements) != 0x79);
9624 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9625 admin_queue_int_msg_num) != 0x7a);
9626 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9627 device_error) != 0x80);
9628 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9629 error_details) != 0x88);
9630 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9631 device_reset) != 0x90);
9632 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9633 power_action) != 0x94);
9634 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
9635
9636 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9637 header.iu_type) != 0);
9638 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9639 header.iu_length) != 2);
9640 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
ae0c189d 9641 header.driver_flags) != 6);
6c223761
KB
9642 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9643 request_id) != 8);
9644 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9645 function_code) != 10);
9646 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9647 data.report_device_capability.buffer_length) != 44);
9648 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9649 data.report_device_capability.sg_descriptor) != 48);
9650 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9651 data.create_operational_iq.queue_id) != 12);
9652 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9653 data.create_operational_iq.element_array_addr) != 16);
9654 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9655 data.create_operational_iq.ci_addr) != 24);
9656 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9657 data.create_operational_iq.num_elements) != 32);
9658 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9659 data.create_operational_iq.element_length) != 34);
9660 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9661 data.create_operational_iq.queue_protocol) != 36);
9662 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9663 data.create_operational_oq.queue_id) != 12);
9664 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9665 data.create_operational_oq.element_array_addr) != 16);
9666 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9667 data.create_operational_oq.pi_addr) != 24);
9668 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9669 data.create_operational_oq.num_elements) != 32);
9670 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9671 data.create_operational_oq.element_length) != 34);
9672 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9673 data.create_operational_oq.queue_protocol) != 36);
9674 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9675 data.create_operational_oq.int_msg_num) != 40);
9676 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9677 data.create_operational_oq.coalescing_count) != 42);
9678 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9679 data.create_operational_oq.min_coalescing_time) != 44);
9680 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9681 data.create_operational_oq.max_coalescing_time) != 48);
9682 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
9683 data.delete_operational_queue.queue_id) != 12);
9684 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
c593642c 9685 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761 9686 data.create_operational_iq) != 64 - 11);
c593642c 9687 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761 9688 data.create_operational_oq) != 64 - 11);
c593642c 9689 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761
KB
9690 data.delete_operational_queue) != 64 - 11);
9691
9692 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9693 header.iu_type) != 0);
9694 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9695 header.iu_length) != 2);
9696 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
ae0c189d 9697 header.driver_flags) != 6);
6c223761
KB
9698 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9699 request_id) != 8);
9700 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9701 function_code) != 10);
9702 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9703 status) != 11);
9704 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9705 data.create_operational_iq.status_descriptor) != 12);
9706 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9707 data.create_operational_iq.iq_pi_offset) != 16);
9708 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9709 data.create_operational_oq.status_descriptor) != 12);
9710 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
9711 data.create_operational_oq.oq_ci_offset) != 16);
9712 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
9713
9714 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9715 header.iu_type) != 0);
9716 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9717 header.iu_length) != 2);
9718 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9719 header.response_queue_id) != 4);
9720 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
ae0c189d 9721 header.driver_flags) != 6);
6c223761
KB
9722 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9723 request_id) != 8);
9724 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9725 nexus_id) != 10);
9726 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9727 buffer_length) != 12);
9728 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9729 lun_number) != 16);
9730 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9731 protocol_specific) != 24);
9732 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9733 error_index) != 27);
9734 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9735 cdb) != 32);
21432010 9736 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9737 timeout) != 60);
6c223761
KB
9738 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
9739 sg_descriptors) != 64);
9740 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
9741 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9742
9743 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9744 header.iu_type) != 0);
9745 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9746 header.iu_length) != 2);
9747 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9748 header.response_queue_id) != 4);
9749 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
ae0c189d 9750 header.driver_flags) != 6);
6c223761
KB
9751 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9752 request_id) != 8);
9753 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9754 nexus_id) != 12);
9755 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9756 buffer_length) != 16);
9757 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9758 data_encryption_key_index) != 22);
9759 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9760 encrypt_tweak_lower) != 24);
9761 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9762 encrypt_tweak_upper) != 28);
9763 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9764 cdb) != 32);
9765 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9766 error_index) != 48);
9767 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9768 num_sg_descriptors) != 50);
9769 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9770 cdb_length) != 51);
9771 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9772 lun_number) != 52);
9773 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
9774 sg_descriptors) != 64);
9775 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
9776 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
9777
9778 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9779 header.iu_type) != 0);
9780 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9781 header.iu_length) != 2);
9782 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9783 request_id) != 8);
9784 BUILD_BUG_ON(offsetof(struct pqi_io_response,
9785 error_index) != 10);
9786
9787 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9788 header.iu_type) != 0);
9789 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9790 header.iu_length) != 2);
9791 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9792 header.response_queue_id) != 4);
9793 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9794 request_id) != 8);
9795 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9796 data.report_event_configuration.buffer_length) != 12);
9797 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9798 data.report_event_configuration.sg_descriptors) != 16);
9799 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9800 data.set_event_configuration.global_event_oq_id) != 10);
9801 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9802 data.set_event_configuration.buffer_length) != 12);
9803 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
9804 data.set_event_configuration.sg_descriptors) != 16);
9805
9806 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9807 max_inbound_iu_length) != 6);
9808 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
9809 max_outbound_iu_length) != 14);
9810 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
9811
9812 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9813 data_length) != 0);
9814 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9815 iq_arbitration_priority_support_bitmask) != 8);
9816 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9817 maximum_aw_a) != 9);
9818 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9819 maximum_aw_b) != 10);
9820 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9821 maximum_aw_c) != 11);
9822 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9823 max_inbound_queues) != 16);
9824 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9825 max_elements_per_iq) != 18);
9826 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9827 max_iq_element_length) != 24);
9828 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9829 min_iq_element_length) != 26);
9830 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9831 max_outbound_queues) != 30);
9832 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9833 max_elements_per_oq) != 32);
9834 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9835 intr_coalescing_time_granularity) != 34);
9836 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9837 max_oq_element_length) != 36);
9838 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9839 min_oq_element_length) != 38);
9840 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
9841 iu_layer_descriptors) != 64);
9842 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
9843
9844 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9845 event_type) != 0);
9846 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
9847 oq_id) != 2);
9848 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
9849
9850 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9851 num_event_descriptors) != 2);
9852 BUILD_BUG_ON(offsetof(struct pqi_event_config,
9853 descriptors) != 4);
9854
061ef06a
KB
9855 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
9856 ARRAY_SIZE(pqi_supported_event_types));
9857
6c223761
KB
9858 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9859 header.iu_type) != 0);
9860 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9861 header.iu_length) != 2);
9862 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9863 event_type) != 8);
9864 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9865 event_id) != 10);
9866 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9867 additional_event_id) != 12);
9868 BUILD_BUG_ON(offsetof(struct pqi_event_response,
9869 data) != 16);
9870 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
9871
9872 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9873 header.iu_type) != 0);
9874 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9875 header.iu_length) != 2);
9876 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9877 event_type) != 8);
9878 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9879 event_id) != 10);
9880 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
9881 additional_event_id) != 12);
9882 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
9883
9884 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9885 header.iu_type) != 0);
9886 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9887 header.iu_length) != 2);
9888 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9889 request_id) != 8);
9890 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9891 nexus_id) != 10);
9892 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
c2922f17
MB
9893 timeout) != 14);
9894 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6c223761
KB
9895 lun_number) != 16);
9896 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9897 protocol_specific) != 24);
9898 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9899 outbound_queue_id_to_manage) != 26);
9900 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9901 request_id_to_manage) != 28);
9902 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
9903 task_management_function) != 30);
9904 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
9905
9906 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9907 header.iu_type) != 0);
9908 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9909 header.iu_length) != 2);
9910 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9911 request_id) != 8);
9912 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9913 nexus_id) != 10);
9914 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9915 additional_response_info) != 12);
9916 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
9917 response_code) != 15);
9918 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
9919
9920 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9921 configured_logical_drive_count) != 0);
9922 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9923 configuration_signature) != 1);
9924 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
598bef8d 9925 firmware_version_short) != 5);
6c223761
KB
9926 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9927 extended_logical_unit_count) != 154);
9928 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9929 firmware_build_number) != 190);
598bef8d
KB
9930 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9931 vendor_id) != 200);
9932 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9933 product_id) != 208);
9934 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9935 extra_controller_flags) != 286);
6c223761
KB
9936 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9937 controller_mode) != 292);
598bef8d
KB
9938 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9939 spare_part_number) != 293);
9940 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
9941 firmware_version_long) != 325);
6c223761 9942
1be42f46
KB
9943 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9944 phys_bay_in_box) != 115);
9945 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9946 device_type) != 120);
9947 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9948 redundant_path_present_map) != 1736);
9949 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9950 active_path_number) != 1738);
9951 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9952 alternate_paths_phys_connector) != 1739);
9953 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9954 alternate_paths_phys_box_on_port) != 1755);
9955 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
9956 current_queue_depth_limit) != 1796);
9957 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
9958
f6cc2a77
KB
9959 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
9960 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9961 page_code) != 0);
9962 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9963 subpage_code) != 1);
9964 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
9965 buffer_length) != 2);
9966
9967 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
9968 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9969 page_code) != 0);
9970 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9971 subpage_code) != 1);
9972 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
9973 page_length) != 2);
9974
9975 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
9976 != 18);
9977 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9978 header) != 0);
9979 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9980 firmware_read_support) != 4);
9981 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9982 driver_read_support) != 5);
9983 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9984 firmware_write_support) != 6);
9985 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9986 driver_write_support) != 7);
9987 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9988 max_transfer_encrypted_sas_sata) != 8);
9989 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9990 max_transfer_encrypted_nvme) != 10);
9991 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9992 max_write_raid_5_6) != 12);
9993 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9994 max_write_raid_1_10_2drive) != 14);
9995 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
9996 max_write_raid_1_10_3drive) != 16);
9997
6c223761
KB
9998 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
9999 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10000 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10001 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10002 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10003 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10004 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10005 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10006 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10007 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10008 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10009 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10010
10011 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
d727a776
KB
10012 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10013 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
6c223761 10014}