scsi: smartpqi: Fix DMA direction for RAID requests
[linux-2.6-block.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
2cc37b15 1// SPDX-License-Identifier: GPL-2.0
6c223761 2/*
889653ec
KB
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries
2f4c4b92 5 * Copyright (c) 2016-2018 Microsemi Corporation
6c223761
KB
6 * Copyright (c) 2016 PMC-Sierra, Inc.
7 *
2f4c4b92 8 * Questions/Comments/Bugfixes to storagedev@microchip.com
6c223761
KB
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/sched.h>
18#include <linux/rtc.h>
19#include <linux/bcd.h>
3c50976f 20#include <linux/reboot.h>
6c223761 21#include <linux/cciss_ioctl.h>
52198226 22#include <linux/blk-mq-pci.h>
6c223761
KB
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_eh.h>
27#include <scsi/scsi_transport_sas.h>
28#include <asm/unaligned.h>
29#include "smartpqi.h"
30#include "smartpqi_sis.h"
31
32#if !defined(BUILD_TIMESTAMP)
33#define BUILD_TIMESTAMP
34#endif
35
62ed6622 36#define DRIVER_VERSION "2.1.14-035"
d56030f8
DB
37#define DRIVER_MAJOR 2
38#define DRIVER_MINOR 1
62ed6622
DB
39#define DRIVER_RELEASE 14
40#define DRIVER_REVISION 35
6c223761 41
6aa26b5a 42#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
2d154f5f 43 DRIVER_VERSION BUILD_TIMESTAMP ")"
6c223761
KB
44#define DRIVER_NAME_SHORT "smartpqi"
45
e1d213bd
KB
46#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
47
2790cd4d
KB
48#define PQI_POST_RESET_DELAY_SECS 5
49#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
50
6aa26b5a
DB
51MODULE_AUTHOR("Microchip");
52MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
6c223761 53 DRIVER_VERSION);
6c223761
KB
54MODULE_VERSION(DRIVER_VERSION);
55MODULE_LICENSE("GPL");
56
c1ea387d
BVA
57struct pqi_cmd_priv {
58 int this_residual;
59};
60
61static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
62{
63 return scsi_cmd_priv(cmd);
64}
65
5e693586 66static void pqi_verify_structures(void);
5d1f03e6
MB
67static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
68 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
5f310425 69static void pqi_ctrl_offline_worker(struct work_struct *work);
6c223761
KB
70static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
71static void pqi_scan_start(struct Scsi_Host *shost);
72static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
73 struct pqi_queue_group *queue_group, enum pqi_io_path path,
74 struct pqi_io_request *io_request);
75static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
76 struct pqi_iu_header *request, unsigned int flags,
ae0c189d 77 struct pqi_raid_error_info *error_info);
6c223761
KB
78static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
79 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
80 unsigned int cdb_length, struct pqi_queue_group *queue_group,
2a47834d 81 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
7a012c23
DB
82static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
83 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
84 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
85 struct pqi_scsi_dev_raid_map_data *rmd);
6702d2c4
DB
86static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
87 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
88 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
89 struct pqi_scsi_dev_raid_map_data *rmd);
4fd22c13
MR
90static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
91static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
2790cd4d
KB
92static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
93static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
4fd22c13
MR
94static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
95static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
1e46731e 96static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
904f2bfd 97 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
331f7e99 98static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
6c223761
KB
99
100/* for flags argument to pqi_submit_raid_request_synchronous() */
101#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
102
103static struct scsi_transport_template *pqi_sas_transport_template;
104
105static atomic_t pqi_controller_count = ATOMIC_INIT(0);
106
3c50976f
KB
107enum pqi_lockup_action {
108 NONE,
109 REBOOT,
110 PANIC
111};
112
113static enum pqi_lockup_action pqi_lockup_action = NONE;
114
115static struct {
116 enum pqi_lockup_action action;
117 char *name;
118} pqi_lockup_actions[] = {
119 {
120 .action = NONE,
121 .name = "none",
122 },
123 {
124 .action = REBOOT,
125 .name = "reboot",
126 },
127 {
128 .action = PANIC,
129 .name = "panic",
130 },
131};
132
6a50d6ad
KB
133static unsigned int pqi_supported_event_types[] = {
134 PQI_EVENT_TYPE_HOTPLUG,
135 PQI_EVENT_TYPE_HARDWARE,
136 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
137 PQI_EVENT_TYPE_LOGICAL_DEVICE,
4fd22c13 138 PQI_EVENT_TYPE_OFA,
6a50d6ad
KB
139 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
140 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
141};
142
6c223761
KB
143static int pqi_disable_device_id_wildcards;
144module_param_named(disable_device_id_wildcards,
cbe0c7b1 145 pqi_disable_device_id_wildcards, int, 0644);
6c223761
KB
146MODULE_PARM_DESC(disable_device_id_wildcards,
147 "Disable device ID wildcards.");
148
5a259e32
KB
149static int pqi_disable_heartbeat;
150module_param_named(disable_heartbeat,
151 pqi_disable_heartbeat, int, 0644);
152MODULE_PARM_DESC(disable_heartbeat,
153 "Disable heartbeat.");
154
155static int pqi_disable_ctrl_shutdown;
156module_param_named(disable_ctrl_shutdown,
157 pqi_disable_ctrl_shutdown, int, 0644);
158MODULE_PARM_DESC(disable_ctrl_shutdown,
159 "Disable controller shutdown when controller locked up.");
160
3c50976f
KB
161static char *pqi_lockup_action_param;
162module_param_named(lockup_action,
163 pqi_lockup_action_param, charp, 0644);
164MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
165 "\t\tSupported: none, reboot, panic\n"
166 "\t\tDefault: none");
167
5e6a9760
GW
168static int pqi_expose_ld_first;
169module_param_named(expose_ld_first,
170 pqi_expose_ld_first, int, 0644);
583891c9 171MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
5e6a9760 172
522bc026
DC
173static int pqi_hide_vsep;
174module_param_named(hide_vsep,
175 pqi_hide_vsep, int, 0644);
583891c9 176MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
522bc026 177
6c223761
KB
178static char *raid_levels[] = {
179 "RAID-0",
180 "RAID-4",
181 "RAID-1(1+0)",
182 "RAID-5",
183 "RAID-5+1",
7a012c23
DB
184 "RAID-6",
185 "RAID-1(Triple)",
6c223761
KB
186};
187
188static char *pqi_raid_level_to_string(u8 raid_level)
189{
190 if (raid_level < ARRAY_SIZE(raid_levels))
191 return raid_levels[raid_level];
192
a9f93392 193 return "RAID UNKNOWN";
6c223761
KB
194}
195
196#define SA_RAID_0 0
197#define SA_RAID_4 1
198#define SA_RAID_1 2 /* also used for RAID 10 */
199#define SA_RAID_5 3 /* also used for RAID 50 */
200#define SA_RAID_51 4
201#define SA_RAID_6 5 /* also used for RAID 60 */
7a012c23
DB
202#define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
203#define SA_RAID_MAX SA_RAID_TRIPLE
6c223761
KB
204#define SA_RAID_UNKNOWN 0xff
205
206static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
207{
7561a7e4 208 pqi_prep_for_scsi_done(scmd);
0ca19080 209 scsi_done(scmd);
6c223761
KB
210}
211
b6e2ef67 212static inline void pqi_disable_write_same(struct scsi_device *sdev)
6c223761 213{
b6e2ef67 214 sdev->no_write_same = 1;
6c223761
KB
215}
216
6c223761 217static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
6c223761 218{
6c223761 219 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
6c223761
KB
220}
221
222static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
223{
224 return !device->is_physical_device;
225}
226
bd10cf0b
KB
227static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
228{
229 return scsi3addr[2] != 0;
230}
231
694c5d5b
KB
232static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
233{
234 return !ctrl_info->controller_online;
235}
236
6c223761
KB
237static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
238{
239 if (ctrl_info->controller_online)
240 if (!sis_is_firmware_running(ctrl_info))
5d1f03e6 241 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
6c223761
KB
242}
243
244static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
245{
246 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
247}
248
9ee5d6e9
MR
249#define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
250#define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
251
583891c9 252static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73 253{
9ee5d6e9 254 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
ff6abb73
KB
255}
256
257static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
258 enum pqi_ctrl_mode mode)
259{
9ee5d6e9
MR
260 u32 driver_scratch;
261
262 driver_scratch = sis_read_driver_scratch(ctrl_info);
263
264 if (mode == PQI_MODE)
265 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
266 else
267 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
268
269 sis_write_driver_scratch(ctrl_info, driver_scratch);
270}
271
272static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
273{
274 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
275}
276
277static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
278{
279 u32 driver_scratch;
280
281 driver_scratch = sis_read_driver_scratch(ctrl_info);
282
283 if (is_supported)
284 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
285 else
286 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
287
288 sis_write_driver_scratch(ctrl_info, driver_scratch);
ff6abb73
KB
289}
290
9fa82023
KB
291static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
292{
293 ctrl_info->scan_blocked = true;
294 mutex_lock(&ctrl_info->scan_mutex);
295}
296
297static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
298{
299 ctrl_info->scan_blocked = false;
300 mutex_unlock(&ctrl_info->scan_mutex);
301}
302
303static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
304{
305 return ctrl_info->scan_blocked;
306}
307
694c5d5b
KB
308static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
309{
37f33181 310 mutex_lock(&ctrl_info->lun_reset_mutex);
694c5d5b
KB
311}
312
37f33181 313static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
694c5d5b 314{
37f33181 315 mutex_unlock(&ctrl_info->lun_reset_mutex);
694c5d5b
KB
316}
317
9fa82023
KB
318static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
319{
320 struct Scsi_Host *shost;
321 unsigned int num_loops;
322 int msecs_sleep;
323
324 shost = ctrl_info->scsi_host;
325
326 scsi_block_requests(shost);
327
328 num_loops = 0;
329 msecs_sleep = 20;
330 while (scsi_host_busy(shost)) {
331 num_loops++;
332 if (num_loops == 10)
333 msecs_sleep = 500;
334 msleep(msecs_sleep);
335 }
336}
337
338static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
339{
340 scsi_unblock_requests(ctrl_info->scsi_host);
341}
342
343static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
344{
345 atomic_inc(&ctrl_info->num_busy_threads);
694c5d5b
KB
346}
347
9fa82023 348static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
694c5d5b 349{
9fa82023 350 atomic_dec(&ctrl_info->num_busy_threads);
694c5d5b
KB
351}
352
353static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
354{
355 return ctrl_info->block_requests;
356}
357
7561a7e4
KB
358static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
359{
360 ctrl_info->block_requests = true;
7561a7e4
KB
361}
362
363static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
364{
365 ctrl_info->block_requests = false;
366 wake_up_all(&ctrl_info->block_requests_wait);
7561a7e4
KB
367}
368
ae0c189d 369static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
7561a7e4 370{
7561a7e4 371 if (!pqi_ctrl_blocked(ctrl_info))
ae0c189d 372 return;
7561a7e4
KB
373
374 atomic_inc(&ctrl_info->num_blocked_threads);
ae0c189d
KB
375 wait_event(ctrl_info->block_requests_wait,
376 !pqi_ctrl_blocked(ctrl_info));
7561a7e4 377 atomic_dec(&ctrl_info->num_blocked_threads);
7561a7e4
KB
378}
379
18ff5f08
KB
380#define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
381
7561a7e4
KB
382static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
383{
18ff5f08
KB
384 unsigned long start_jiffies;
385 unsigned long warning_timeout;
386 bool displayed_warning;
387
388 displayed_warning = false;
389 start_jiffies = jiffies;
42dc0426 390 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
18ff5f08 391
7561a7e4 392 while (atomic_read(&ctrl_info->num_busy_threads) >
18ff5f08
KB
393 atomic_read(&ctrl_info->num_blocked_threads)) {
394 if (time_after(jiffies, warning_timeout)) {
395 dev_warn(&ctrl_info->pci_dev->dev,
396 "waiting %u seconds for driver activity to quiesce\n",
397 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
398 displayed_warning = true;
42dc0426 399 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
18ff5f08 400 }
7561a7e4 401 usleep_range(1000, 2000);
18ff5f08
KB
402 }
403
404 if (displayed_warning)
405 dev_warn(&ctrl_info->pci_dev->dev,
406 "driver activity quiesced after waiting for %u seconds\n",
407 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
7561a7e4
KB
408}
409
03b288cf
KB
410static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
411{
412 return device->device_offline;
413}
414
2790cd4d 415static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
7561a7e4 416{
2790cd4d 417 mutex_lock(&ctrl_info->ofa_mutex);
7561a7e4 418}
6c223761 419
2790cd4d 420static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
4fd22c13 421{
2790cd4d 422 mutex_unlock(&ctrl_info->ofa_mutex);
4fd22c13
MR
423}
424
2790cd4d 425static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
4fd22c13 426{
2790cd4d
KB
427 mutex_lock(&ctrl_info->ofa_mutex);
428 mutex_unlock(&ctrl_info->ofa_mutex);
4fd22c13
MR
429}
430
2790cd4d 431static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
4fd22c13 432{
2790cd4d 433 return mutex_is_locked(&ctrl_info->ofa_mutex);
4fd22c13
MR
434}
435
1e46731e
MR
436static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
437{
438 device->in_remove = true;
439}
440
1bdf6e93 441static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
1e46731e 442{
1bdf6e93 443 return device->in_remove;
1e46731e
MR
444}
445
2790cd4d 446static inline int pqi_event_type_to_event_index(unsigned int event_type)
0530736e 447{
2790cd4d
KB
448 int index;
449
450 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
451 if (event_type == pqi_supported_event_types[index])
452 return index;
453
454 return -1;
0530736e
KB
455}
456
2790cd4d 457static inline bool pqi_is_supported_event(unsigned int event_type)
0530736e 458{
2790cd4d 459 return pqi_event_type_to_event_index(event_type) != -1;
0530736e
KB
460}
461
583891c9
KB
462static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
463 unsigned long delay)
5f310425
KB
464{
465 if (pqi_ctrl_offline(ctrl_info))
466 return;
467
468 schedule_delayed_work(&ctrl_info->rescan_work, delay);
469}
470
6c223761
KB
471static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
472{
5f310425
KB
473 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
474}
475
42dc0426 476#define PQI_RESCAN_WORK_DELAY (10 * HZ)
5f310425 477
583891c9 478static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
5f310425
KB
479{
480 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
6c223761
KB
481}
482
061ef06a
KB
483static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
484{
485 cancel_delayed_work_sync(&ctrl_info->rescan_work);
486}
487
98f87667
KB
488static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
489{
490 if (!ctrl_info->heartbeat_counter)
491 return 0;
492
493 return readl(ctrl_info->heartbeat_counter);
494}
495
4fd22c13
MR
496static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
497{
4fd22c13
MR
498 return readb(ctrl_info->soft_reset_status);
499}
500
4ccc354b 501static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
4fd22c13
MR
502{
503 u8 status;
504
4fd22c13 505 status = pqi_read_soft_reset_status(ctrl_info);
4ccc354b 506 status &= ~PQI_SOFT_RESET_ABORT;
4fd22c13
MR
507 writeb(status, ctrl_info->soft_reset_status);
508}
509
6c223761
KB
510static int pqi_map_single(struct pci_dev *pci_dev,
511 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
6917a9cc 512 size_t buffer_length, enum dma_data_direction data_direction)
6c223761
KB
513{
514 dma_addr_t bus_address;
515
6917a9cc 516 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
6c223761
KB
517 return 0;
518
6917a9cc 519 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
6c223761 520 data_direction);
6917a9cc 521 if (dma_mapping_error(&pci_dev->dev, bus_address))
6c223761
KB
522 return -ENOMEM;
523
524 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
525 put_unaligned_le32(buffer_length, &sg_descriptor->length);
526 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
527
528 return 0;
529}
530
531static void pqi_pci_unmap(struct pci_dev *pci_dev,
532 struct pqi_sg_descriptor *descriptors, int num_descriptors,
6917a9cc 533 enum dma_data_direction data_direction)
6c223761
KB
534{
535 int i;
536
6917a9cc 537 if (data_direction == DMA_NONE)
6c223761
KB
538 return;
539
540 for (i = 0; i < num_descriptors; i++)
6917a9cc 541 dma_unmap_single(&pci_dev->dev,
6c223761
KB
542 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
543 get_unaligned_le32(&descriptors[i].length),
544 data_direction);
545}
546
547static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
548 struct pqi_raid_path_request *request, u8 cmd,
549 u8 *scsi3addr, void *buffer, size_t buffer_length,
6917a9cc 550 u16 vpd_page, enum dma_data_direction *dir)
6c223761
KB
551{
552 u8 *cdb;
171c2865 553 size_t cdb_length = buffer_length;
6c223761
KB
554
555 memset(request, 0, sizeof(*request));
556
557 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
558 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
559 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
560 &request->header.iu_length);
561 put_unaligned_le32(buffer_length, &request->buffer_length);
562 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
563 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
564 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
565
566 cdb = request->cdb;
567
568 switch (cmd) {
be76f906
DB
569 case TEST_UNIT_READY:
570 request->data_direction = SOP_READ_FLAG;
571 cdb[0] = TEST_UNIT_READY;
572 break;
6c223761
KB
573 case INQUIRY:
574 request->data_direction = SOP_READ_FLAG;
575 cdb[0] = INQUIRY;
576 if (vpd_page & VPD_PAGE) {
577 cdb[1] = 0x1;
578 cdb[2] = (u8)vpd_page;
579 }
171c2865 580 cdb[4] = (u8)cdb_length;
6c223761
KB
581 break;
582 case CISS_REPORT_LOG:
583 case CISS_REPORT_PHYS:
584 request->data_direction = SOP_READ_FLAG;
585 cdb[0] = cmd;
28ca6d87
MM
586 if (cmd == CISS_REPORT_PHYS) {
587 if (ctrl_info->rpl_extended_format_4_5_supported)
588 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
589 else
590 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
591 } else {
f6cc2a77 592 cdb[1] = ctrl_info->ciss_report_log_flags;
28ca6d87 593 }
171c2865 594 put_unaligned_be32(cdb_length, &cdb[6]);
6c223761
KB
595 break;
596 case CISS_GET_RAID_MAP:
597 request->data_direction = SOP_READ_FLAG;
598 cdb[0] = CISS_READ;
599 cdb[1] = CISS_GET_RAID_MAP;
171c2865 600 put_unaligned_be32(cdb_length, &cdb[6]);
6c223761 601 break;
58322fe0 602 case SA_FLUSH_CACHE:
ae0c189d 603 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
6c223761
KB
604 request->data_direction = SOP_WRITE_FLAG;
605 cdb[0] = BMIC_WRITE;
58322fe0 606 cdb[6] = BMIC_FLUSH_CACHE;
171c2865 607 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 608 break;
171c2865
DC
609 case BMIC_SENSE_DIAG_OPTIONS:
610 cdb_length = 0;
df561f66 611 fallthrough;
6c223761
KB
612 case BMIC_IDENTIFY_CONTROLLER:
613 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6d90615f 614 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
f6cc2a77 615 case BMIC_SENSE_FEATURE:
6c223761
KB
616 request->data_direction = SOP_READ_FLAG;
617 cdb[0] = BMIC_READ;
618 cdb[6] = cmd;
171c2865 619 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 620 break;
171c2865
DC
621 case BMIC_SET_DIAG_OPTIONS:
622 cdb_length = 0;
df561f66 623 fallthrough;
6c223761
KB
624 case BMIC_WRITE_HOST_WELLNESS:
625 request->data_direction = SOP_WRITE_FLAG;
626 cdb[0] = BMIC_WRITE;
627 cdb[6] = cmd;
171c2865 628 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 629 break;
3d46a59a
DB
630 case BMIC_CSMI_PASSTHRU:
631 request->data_direction = SOP_BIDIRECTIONAL;
632 cdb[0] = BMIC_WRITE;
633 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
634 cdb[6] = cmd;
635 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761
KB
636 break;
637 default:
9e68cccc 638 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
6c223761
KB
639 break;
640 }
641
642 switch (request->data_direction) {
643 case SOP_READ_FLAG:
6917a9cc 644 *dir = DMA_FROM_DEVICE;
6c223761
KB
645 break;
646 case SOP_WRITE_FLAG:
6917a9cc 647 *dir = DMA_TO_DEVICE;
6c223761
KB
648 break;
649 case SOP_NO_DIRECTION_FLAG:
6917a9cc 650 *dir = DMA_NONE;
6c223761
KB
651 break;
652 default:
6917a9cc 653 *dir = DMA_BIDIRECTIONAL;
6c223761
KB
654 break;
655 }
656
6c223761 657 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
6917a9cc 658 buffer, buffer_length, *dir);
6c223761
KB
659}
660
376fb880
KB
661static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
662{
663 io_request->scmd = NULL;
664 io_request->status = 0;
665 io_request->error_info = NULL;
666 io_request->raid_bypass = false;
667}
668
6c223761
KB
669static struct pqi_io_request *pqi_alloc_io_request(
670 struct pqi_ctrl_info *ctrl_info)
671{
672 struct pqi_io_request *io_request;
673 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
674
675 while (1) {
676 io_request = &ctrl_info->io_request_pool[i];
677 if (atomic_inc_return(&io_request->refcount) == 1)
678 break;
679 atomic_dec(&io_request->refcount);
680 i = (i + 1) % ctrl_info->max_io_slots;
681 }
682
683 /* benignly racy */
684 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
685
376fb880 686 pqi_reinit_io_request(io_request);
6c223761
KB
687
688 return io_request;
689}
690
691static void pqi_free_io_request(struct pqi_io_request *io_request)
692{
693 atomic_dec(&io_request->refcount);
694}
695
02133b68 696static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
694c5d5b 697 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
ae0c189d 698 struct pqi_raid_error_info *error_info)
6c223761
KB
699{
700 int rc;
6c223761 701 struct pqi_raid_path_request request;
694c5d5b 702 enum dma_data_direction dir;
6c223761 703
583891c9
KB
704 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
705 buffer, buffer_length, vpd_page, &dir);
6c223761
KB
706 if (rc)
707 return rc;
708
ae0c189d 709 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
6c223761 710
6917a9cc 711 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 712
6c223761
KB
713 return rc;
714}
715
694c5d5b 716/* helper functions for pqi_send_scsi_raid_request */
02133b68
DC
717
718static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
694c5d5b 719 u8 cmd, void *buffer, size_t buffer_length)
6c223761 720{
02133b68 721 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
ae0c189d 722 buffer, buffer_length, 0, NULL);
02133b68 723}
6c223761 724
02133b68 725static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
694c5d5b
KB
726 u8 cmd, void *buffer, size_t buffer_length,
727 struct pqi_raid_error_info *error_info)
02133b68
DC
728{
729 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
ae0c189d 730 buffer, buffer_length, 0, error_info);
02133b68 731}
6c223761 732
02133b68 733static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
694c5d5b 734 struct bmic_identify_controller *buffer)
02133b68
DC
735{
736 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
694c5d5b 737 buffer, sizeof(*buffer));
02133b68
DC
738}
739
6d90615f 740static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
694c5d5b 741 struct bmic_sense_subsystem_info *sense_info)
6d90615f
MB
742{
743 return pqi_send_ctrl_raid_request(ctrl_info,
694c5d5b
KB
744 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
745 sizeof(*sense_info));
6d90615f
MB
746}
747
02133b68 748static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
6c223761 749 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
02133b68
DC
750{
751 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
ae0c189d 752 buffer, buffer_length, vpd_page, NULL);
6c223761
KB
753}
754
755static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
756 struct pqi_scsi_dev *device,
694c5d5b 757 struct bmic_identify_physical_device *buffer, size_t buffer_length)
6c223761
KB
758{
759 int rc;
6917a9cc 760 enum dma_data_direction dir;
6c223761
KB
761 u16 bmic_device_index;
762 struct pqi_raid_path_request request;
763
764 rc = pqi_build_raid_path_request(ctrl_info, &request,
765 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
6917a9cc 766 buffer_length, 0, &dir);
6c223761
KB
767 if (rc)
768 return rc;
769
770 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
771 request.cdb[2] = (u8)bmic_device_index;
772 request.cdb[9] = (u8)(bmic_device_index >> 8);
773
ae0c189d 774 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761 775
6917a9cc 776 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 777
6c223761
KB
778 return rc;
779}
780
f6cc2a77
KB
781static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
782{
783 u32 bytes;
784
785 bytes = get_unaligned_le16(limit);
786 if (bytes == 0)
787 bytes = ~0;
788 else
789 bytes *= 1024;
790
791 return bytes;
792}
793
794#pragma pack(1)
795
796struct bmic_sense_feature_buffer {
797 struct bmic_sense_feature_buffer_header header;
798 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
799};
800
801#pragma pack()
802
803#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
804 offsetofend(struct bmic_sense_feature_buffer, \
805 aio_subpage.max_write_raid_1_10_3drive)
806
807#define MINIMUM_AIO_SUBPAGE_LENGTH \
808 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
809 max_write_raid_1_10_3drive) - \
810 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
811
812static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
813{
814 int rc;
815 enum dma_data_direction dir;
816 struct pqi_raid_path_request request;
817 struct bmic_sense_feature_buffer *buffer;
818
819 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
820 if (!buffer)
821 return -ENOMEM;
822
583891c9
KB
823 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
824 buffer, sizeof(*buffer), 0, &dir);
f6cc2a77
KB
825 if (rc)
826 goto error;
827
828 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
829 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
830
ae0c189d 831 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761 832
6917a9cc 833 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 834
f6cc2a77
KB
835 if (rc)
836 goto error;
837
838 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
839 buffer->header.subpage_code !=
840 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
841 get_unaligned_le16(&buffer->header.buffer_length) <
842 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
843 buffer->aio_subpage.header.page_code !=
844 BMIC_SENSE_FEATURE_IO_PAGE ||
845 buffer->aio_subpage.header.subpage_code !=
846 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
847 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
848 MINIMUM_AIO_SUBPAGE_LENGTH) {
849 goto error;
850 }
851
852 ctrl_info->max_transfer_encrypted_sas_sata =
853 pqi_aio_limit_to_bytes(
854 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
855
856 ctrl_info->max_transfer_encrypted_nvme =
857 pqi_aio_limit_to_bytes(
858 &buffer->aio_subpage.max_transfer_encrypted_nvme);
859
860 ctrl_info->max_write_raid_5_6 =
861 pqi_aio_limit_to_bytes(
862 &buffer->aio_subpage.max_write_raid_5_6);
863
864 ctrl_info->max_write_raid_1_10_2drive =
865 pqi_aio_limit_to_bytes(
866 &buffer->aio_subpage.max_write_raid_1_10_2drive);
867
868 ctrl_info->max_write_raid_1_10_3drive =
869 pqi_aio_limit_to_bytes(
870 &buffer->aio_subpage.max_write_raid_1_10_3drive);
871
872error:
873 kfree(buffer);
874
6c223761
KB
875 return rc;
876}
877
58322fe0
KB
878static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
879 enum bmic_flush_cache_shutdown_event shutdown_event)
6c223761
KB
880{
881 int rc;
58322fe0 882 struct bmic_flush_cache *flush_cache;
6c223761 883
58322fe0
KB
884 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
885 if (!flush_cache)
6c223761
KB
886 return -ENOMEM;
887
58322fe0
KB
888 flush_cache->shutdown_event = shutdown_event;
889
02133b68
DC
890 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
891 sizeof(*flush_cache));
6c223761 892
58322fe0 893 kfree(flush_cache);
6c223761
KB
894
895 return rc;
896}
897
3d46a59a
DB
898int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
899 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
900 struct pqi_raid_error_info *error_info)
901{
902 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
903 buffer, buffer_length, error_info);
904}
171c2865 905
694c5d5b 906#define PQI_FETCH_PTRAID_DATA (1 << 31)
171c2865
DC
907
908static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
6c223761
KB
909{
910 int rc;
171c2865 911 struct bmic_diag_options *diag;
6c223761 912
171c2865
DC
913 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
914 if (!diag)
915 return -ENOMEM;
916
02133b68 917 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
694c5d5b 918 diag, sizeof(*diag));
6c223761 919 if (rc)
171c2865 920 goto out;
6c223761 921
171c2865
DC
922 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
923
694c5d5b
KB
924 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
925 sizeof(*diag));
926
171c2865
DC
927out:
928 kfree(diag);
6c223761 929
6c223761
KB
930 return rc;
931}
932
02133b68 933static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
934 void *buffer, size_t buffer_length)
935{
02133b68 936 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
694c5d5b 937 buffer, buffer_length);
6c223761
KB
938}
939
940#pragma pack(1)
941
942struct bmic_host_wellness_driver_version {
943 u8 start_tag[4];
944 u8 driver_version_tag[2];
945 __le16 driver_version_length;
946 char driver_version[32];
b2346b50 947 u8 dont_write_tag[2];
6c223761
KB
948 u8 end_tag[2];
949};
950
951#pragma pack()
952
953static int pqi_write_driver_version_to_host_wellness(
954 struct pqi_ctrl_info *ctrl_info)
955{
956 int rc;
957 struct bmic_host_wellness_driver_version *buffer;
958 size_t buffer_length;
959
960 buffer_length = sizeof(*buffer);
961
962 buffer = kmalloc(buffer_length, GFP_KERNEL);
963 if (!buffer)
964 return -ENOMEM;
965
966 buffer->start_tag[0] = '<';
967 buffer->start_tag[1] = 'H';
968 buffer->start_tag[2] = 'W';
969 buffer->start_tag[3] = '>';
970 buffer->driver_version_tag[0] = 'D';
971 buffer->driver_version_tag[1] = 'V';
972 put_unaligned_le16(sizeof(buffer->driver_version),
973 &buffer->driver_version_length);
061ef06a 974 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
6c223761
KB
975 sizeof(buffer->driver_version) - 1);
976 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
b2346b50
MR
977 buffer->dont_write_tag[0] = 'D';
978 buffer->dont_write_tag[1] = 'W';
6c223761
KB
979 buffer->end_tag[0] = 'Z';
980 buffer->end_tag[1] = 'Z';
981
982 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
983
984 kfree(buffer);
985
986 return rc;
987}
988
989#pragma pack(1)
990
991struct bmic_host_wellness_time {
992 u8 start_tag[4];
993 u8 time_tag[2];
994 __le16 time_length;
995 u8 time[8];
996 u8 dont_write_tag[2];
997 u8 end_tag[2];
998};
999
1000#pragma pack()
1001
1002static int pqi_write_current_time_to_host_wellness(
1003 struct pqi_ctrl_info *ctrl_info)
1004{
1005 int rc;
1006 struct bmic_host_wellness_time *buffer;
1007 size_t buffer_length;
1008 time64_t local_time;
1009 unsigned int year;
ed10858e 1010 struct tm tm;
6c223761
KB
1011
1012 buffer_length = sizeof(*buffer);
1013
1014 buffer = kmalloc(buffer_length, GFP_KERNEL);
1015 if (!buffer)
1016 return -ENOMEM;
1017
1018 buffer->start_tag[0] = '<';
1019 buffer->start_tag[1] = 'H';
1020 buffer->start_tag[2] = 'W';
1021 buffer->start_tag[3] = '>';
1022 buffer->time_tag[0] = 'T';
1023 buffer->time_tag[1] = 'D';
1024 put_unaligned_le16(sizeof(buffer->time),
1025 &buffer->time_length);
1026
ed10858e
AB
1027 local_time = ktime_get_real_seconds();
1028 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
6c223761
KB
1029 year = tm.tm_year + 1900;
1030
1031 buffer->time[0] = bin2bcd(tm.tm_hour);
1032 buffer->time[1] = bin2bcd(tm.tm_min);
1033 buffer->time[2] = bin2bcd(tm.tm_sec);
1034 buffer->time[3] = 0;
1035 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1036 buffer->time[5] = bin2bcd(tm.tm_mday);
1037 buffer->time[6] = bin2bcd(year / 100);
1038 buffer->time[7] = bin2bcd(year % 100);
1039
1040 buffer->dont_write_tag[0] = 'D';
1041 buffer->dont_write_tag[1] = 'W';
1042 buffer->end_tag[0] = 'Z';
1043 buffer->end_tag[1] = 'Z';
1044
1045 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1046
1047 kfree(buffer);
1048
1049 return rc;
1050}
1051
42dc0426 1052#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
6c223761
KB
1053
1054static void pqi_update_time_worker(struct work_struct *work)
1055{
1056 int rc;
1057 struct pqi_ctrl_info *ctrl_info;
1058
1059 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1060 update_time_work);
1061
6c223761
KB
1062 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1063 if (rc)
1064 dev_warn(&ctrl_info->pci_dev->dev,
1065 "error updating time on controller\n");
1066
1067 schedule_delayed_work(&ctrl_info->update_time_work,
1068 PQI_UPDATE_TIME_WORK_INTERVAL);
1069}
1070
583891c9 1071static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
6c223761 1072{
4fbebf1a 1073 schedule_delayed_work(&ctrl_info->update_time_work, 0);
061ef06a
KB
1074}
1075
583891c9 1076static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
061ef06a 1077{
061ef06a 1078 cancel_delayed_work_sync(&ctrl_info->update_time_work);
6c223761
KB
1079}
1080
583891c9
KB
1081static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1082 size_t buffer_length)
6c223761 1083{
583891c9 1084 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
6c223761
KB
1085}
1086
583891c9 1087static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
6c223761
KB
1088{
1089 int rc;
1090 size_t lun_list_length;
1091 size_t lun_data_length;
1092 size_t new_lun_list_length;
1093 void *lun_data = NULL;
1094 struct report_lun_header *report_lun_header;
1095
1096 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1097 if (!report_lun_header) {
1098 rc = -ENOMEM;
1099 goto out;
1100 }
1101
583891c9 1102 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
6c223761
KB
1103 if (rc)
1104 goto out;
1105
1106 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1107
1108again:
1109 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1110
1111 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1112 if (!lun_data) {
1113 rc = -ENOMEM;
1114 goto out;
1115 }
1116
1117 if (lun_list_length == 0) {
1118 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1119 goto out;
1120 }
1121
1122 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1123 if (rc)
1124 goto out;
1125
583891c9
KB
1126 new_lun_list_length =
1127 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
6c223761
KB
1128
1129 if (new_lun_list_length > lun_list_length) {
1130 lun_list_length = new_lun_list_length;
1131 kfree(lun_data);
1132 goto again;
1133 }
1134
1135out:
1136 kfree(report_lun_header);
1137
1138 if (rc) {
1139 kfree(lun_data);
1140 lun_data = NULL;
1141 }
1142
1143 *buffer = lun_data;
1144
1145 return rc;
1146}
1147
583891c9 1148static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
6c223761 1149{
28ca6d87
MM
1150 int rc;
1151 unsigned int i;
1152 u8 rpl_response_format;
1153 u32 num_physicals;
1154 size_t rpl_16byte_wwid_list_length;
1155 void *rpl_list;
1156 struct report_lun_header *rpl_header;
1157 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1158 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1159
1160 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1161 if (rc)
1162 return rc;
1163
1164 if (ctrl_info->rpl_extended_format_4_5_supported) {
1165 rpl_header = rpl_list;
1166 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1167 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1168 *buffer = rpl_list;
1169 return 0;
1170 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1171 dev_err(&ctrl_info->pci_dev->dev,
1172 "RPL returned unsupported data format %u\n",
1173 rpl_response_format);
1174 return -EINVAL;
1175 } else {
1176 dev_warn(&ctrl_info->pci_dev->dev,
1177 "RPL returned extended format 2 instead of 4\n");
1178 }
1179 }
1180
1181 rpl_8byte_wwid_list = rpl_list;
1182 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1183 rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
1184
1185 rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
1186 if (!rpl_16byte_wwid_list)
1187 return -ENOMEM;
1188
1189 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1190 &rpl_16byte_wwid_list->header.list_length);
1191 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1192
1193 for (i = 0; i < num_physicals; i++) {
1194 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
291c2e00
KB
1195 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1196 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
28ca6d87
MM
1197 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1198 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1199 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1200 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1201 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1202 }
1203
1204 kfree(rpl_8byte_wwid_list);
1205 *buffer = rpl_16byte_wwid_list;
1206
1207 return 0;
6c223761
KB
1208}
1209
583891c9 1210static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
6c223761
KB
1211{
1212 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1213}
1214
1215static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
28ca6d87
MM
1216 struct report_phys_lun_16byte_wwid_list **physdev_list,
1217 struct report_log_lun_list **logdev_list)
6c223761
KB
1218{
1219 int rc;
1220 size_t logdev_list_length;
1221 size_t logdev_data_length;
28ca6d87
MM
1222 struct report_log_lun_list *internal_logdev_list;
1223 struct report_log_lun_list *logdev_data;
6c223761
KB
1224 struct report_lun_header report_lun_header;
1225
1226 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1227 if (rc)
1228 dev_err(&ctrl_info->pci_dev->dev,
1229 "report physical LUNs failed\n");
1230
1231 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1232 if (rc)
1233 dev_err(&ctrl_info->pci_dev->dev,
1234 "report logical LUNs failed\n");
1235
1236 /*
1237 * Tack the controller itself onto the end of the logical device list.
1238 */
1239
1240 logdev_data = *logdev_list;
1241
1242 if (logdev_data) {
1243 logdev_list_length =
1244 get_unaligned_be32(&logdev_data->header.list_length);
1245 } else {
1246 memset(&report_lun_header, 0, sizeof(report_lun_header));
1247 logdev_data =
28ca6d87 1248 (struct report_log_lun_list *)&report_lun_header;
6c223761
KB
1249 logdev_list_length = 0;
1250 }
1251
1252 logdev_data_length = sizeof(struct report_lun_header) +
1253 logdev_list_length;
1254
1255 internal_logdev_list = kmalloc(logdev_data_length +
28ca6d87 1256 sizeof(struct report_log_lun), GFP_KERNEL);
6c223761
KB
1257 if (!internal_logdev_list) {
1258 kfree(*logdev_list);
1259 *logdev_list = NULL;
1260 return -ENOMEM;
1261 }
1262
1263 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1264 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
28ca6d87 1265 sizeof(struct report_log_lun));
6c223761 1266 put_unaligned_be32(logdev_list_length +
28ca6d87 1267 sizeof(struct report_log_lun),
6c223761
KB
1268 &internal_logdev_list->header.list_length);
1269
1270 kfree(*logdev_list);
1271 *logdev_list = internal_logdev_list;
1272
1273 return 0;
1274}
1275
1276static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1277 int bus, int target, int lun)
1278{
1279 device->bus = bus;
1280 device->target = target;
1281 device->lun = lun;
1282}
1283
1284static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1285{
1286 u8 *scsi3addr;
1287 u32 lunid;
bd10cf0b
KB
1288 int bus;
1289 int target;
1290 int lun;
6c223761
KB
1291
1292 scsi3addr = device->scsi3addr;
1293 lunid = get_unaligned_le32(scsi3addr);
1294
1295 if (pqi_is_hba_lunid(scsi3addr)) {
1296 /* The specified device is the controller. */
1297 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1298 device->target_lun_valid = true;
1299 return;
1300 }
1301
1302 if (pqi_is_logical_device(device)) {
bd10cf0b
KB
1303 if (device->is_external_raid_device) {
1304 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1305 target = (lunid >> 16) & 0x3fff;
1306 lun = lunid & 0xff;
1307 } else {
1308 bus = PQI_RAID_VOLUME_BUS;
1309 target = 0;
1310 lun = lunid & 0x3fff;
1311 }
1312 pqi_set_bus_target_lun(device, bus, target, lun);
6c223761
KB
1313 device->target_lun_valid = true;
1314 return;
1315 }
1316
1317 /*
1318 * Defer target and LUN assignment for non-controller physical devices
1319 * because the SAS transport layer will make these assignments later.
1320 */
1321 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1322}
1323
1324static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1325 struct pqi_scsi_dev *device)
1326{
1327 int rc;
1328 u8 raid_level;
1329 u8 *buffer;
1330
1331 raid_level = SA_RAID_UNKNOWN;
1332
1333 buffer = kmalloc(64, GFP_KERNEL);
1334 if (buffer) {
1335 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1336 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1337 if (rc == 0) {
1338 raid_level = buffer[8];
1339 if (raid_level > SA_RAID_MAX)
1340 raid_level = SA_RAID_UNKNOWN;
1341 }
1342 kfree(buffer);
1343 }
1344
1345 device->raid_level = raid_level;
1346}
1347
1348static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1349 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1350{
1351 char *err_msg;
1352 u32 raid_map_size;
1353 u32 r5or6_blocks_per_row;
6c223761
KB
1354
1355 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1356
1357 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1358 err_msg = "RAID map too small";
1359 goto bad_raid_map;
1360 }
1361
6c223761
KB
1362 if (device->raid_level == SA_RAID_1) {
1363 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1364 err_msg = "invalid RAID-1 map";
1365 goto bad_raid_map;
1366 }
7a012c23 1367 } else if (device->raid_level == SA_RAID_TRIPLE) {
6c223761 1368 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
7a012c23 1369 err_msg = "invalid RAID-1(Triple) map";
6c223761
KB
1370 goto bad_raid_map;
1371 }
1372 } else if ((device->raid_level == SA_RAID_5 ||
1373 device->raid_level == SA_RAID_6) &&
1374 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1375 /* RAID 50/60 */
1376 r5or6_blocks_per_row =
1377 get_unaligned_le16(&raid_map->strip_size) *
1378 get_unaligned_le16(&raid_map->data_disks_per_row);
1379 if (r5or6_blocks_per_row == 0) {
1380 err_msg = "invalid RAID-5 or RAID-6 map";
1381 goto bad_raid_map;
1382 }
1383 }
1384
1385 return 0;
1386
1387bad_raid_map:
d87d5474 1388 dev_warn(&ctrl_info->pci_dev->dev,
38a7338a
KB
1389 "logical device %08x%08x %s\n",
1390 *((u32 *)&device->scsi3addr),
1391 *((u32 *)&device->scsi3addr[4]), err_msg);
6c223761
KB
1392
1393 return -EINVAL;
1394}
1395
1396static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1397 struct pqi_scsi_dev *device)
1398{
1399 int rc;
a91aaae0 1400 u32 raid_map_size;
6c223761
KB
1401 struct raid_map *raid_map;
1402
1403 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1404 if (!raid_map)
1405 return -ENOMEM;
1406
a91aaae0 1407 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
ae0c189d 1408 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
6c223761
KB
1409 if (rc)
1410 goto error;
1411
a91aaae0 1412 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
6c223761 1413
a91aaae0 1414 if (raid_map_size > sizeof(*raid_map)) {
6c223761 1415
a91aaae0
AK
1416 kfree(raid_map);
1417
1418 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1419 if (!raid_map)
1420 return -ENOMEM;
1421
1422 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
ae0c189d 1423 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
a91aaae0
AK
1424 if (rc)
1425 goto error;
1426
1427 if (get_unaligned_le32(&raid_map->structure_size)
1428 != raid_map_size) {
1429 dev_warn(&ctrl_info->pci_dev->dev,
583891c9 1430 "requested %u bytes, received %u bytes\n",
a91aaae0
AK
1431 raid_map_size,
1432 get_unaligned_le32(&raid_map->structure_size));
d1f6581a 1433 rc = -EINVAL;
a91aaae0
AK
1434 goto error;
1435 }
1436 }
6c223761
KB
1437
1438 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1439 if (rc)
1440 goto error;
1441
1442 device->raid_map = raid_map;
1443
1444 return 0;
1445
1446error:
1447 kfree(raid_map);
1448
1449 return rc;
1450}
1451
f6cc2a77
KB
1452static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1453 struct pqi_scsi_dev *device)
1454{
1455 if (!ctrl_info->lv_drive_type_mix_valid) {
1456 device->max_transfer_encrypted = ~0;
1457 return;
1458 }
1459
1460 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1461 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1462 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1463 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1464 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1465 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1466 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1467 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1468 device->max_transfer_encrypted =
1469 ctrl_info->max_transfer_encrypted_sas_sata;
1470 break;
1471 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1472 device->max_transfer_encrypted =
1473 ctrl_info->max_transfer_encrypted_nvme;
1474 break;
1475 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1476 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1477 default:
1478 device->max_transfer_encrypted =
1479 min(ctrl_info->max_transfer_encrypted_sas_sata,
1480 ctrl_info->max_transfer_encrypted_nvme);
1481 break;
1482 }
1483}
1484
588a63fe 1485static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1486 struct pqi_scsi_dev *device)
1487{
1488 int rc;
1489 u8 *buffer;
588a63fe 1490 u8 bypass_status;
6c223761
KB
1491
1492 buffer = kmalloc(64, GFP_KERNEL);
1493 if (!buffer)
1494 return;
1495
1496 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
588a63fe 1497 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
6c223761
KB
1498 if (rc)
1499 goto out;
1500
694c5d5b
KB
1501#define RAID_BYPASS_STATUS 4
1502#define RAID_BYPASS_CONFIGURED 0x1
1503#define RAID_BYPASS_ENABLED 0x2
6c223761 1504
588a63fe
KB
1505 bypass_status = buffer[RAID_BYPASS_STATUS];
1506 device->raid_bypass_configured =
1507 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1508 if (device->raid_bypass_configured &&
1509 (bypass_status & RAID_BYPASS_ENABLED) &&
f6cc2a77 1510 pqi_get_raid_map(ctrl_info, device) == 0) {
588a63fe 1511 device->raid_bypass_enabled = true;
f6cc2a77
KB
1512 if (get_unaligned_le16(&device->raid_map->flags) &
1513 RAID_MAP_ENCRYPTION_ENABLED)
1514 pqi_set_max_transfer_encrypted(ctrl_info, device);
1515 }
6c223761
KB
1516
1517out:
1518 kfree(buffer);
1519}
1520
1521/*
1522 * Use vendor-specific VPD to determine online/offline status of a volume.
1523 */
1524
1525static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1526 struct pqi_scsi_dev *device)
1527{
1528 int rc;
1529 size_t page_length;
1530 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1531 bool volume_offline = true;
1532 u32 volume_flags;
1533 struct ciss_vpd_logical_volume_status *vpd;
1534
1535 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1536 if (!vpd)
1537 goto no_buffer;
1538
1539 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1540 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1541 if (rc)
1542 goto out;
1543
7ff44499
DC
1544 if (vpd->page_code != CISS_VPD_LV_STATUS)
1545 goto out;
1546
6c223761
KB
1547 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1548 volume_status) + vpd->page_length;
1549 if (page_length < sizeof(*vpd))
1550 goto out;
1551
1552 volume_status = vpd->volume_status;
1553 volume_flags = get_unaligned_be32(&vpd->flags);
1554 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1555
1556out:
1557 kfree(vpd);
1558no_buffer:
1559 device->volume_status = volume_status;
1560 device->volume_offline = volume_offline;
1561}
1562
2a47834d 1563#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
ec504b23
MB
1564#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1565
ce143793
KB
1566static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1567 struct pqi_scsi_dev *device,
1568 struct bmic_identify_physical_device *id_phys)
1569{
1570 int rc;
26b390ab 1571
ce143793
KB
1572 memset(id_phys, 0, sizeof(*id_phys));
1573
1574 rc = pqi_identify_physical_device(ctrl_info, device,
1575 id_phys, sizeof(*id_phys));
1576 if (rc) {
1577 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1578 return rc;
1579 }
1580
1581 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1582 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1583
1584 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1585 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1586
1587 device->box_index = id_phys->box_index;
1588 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1589 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1590 device->queue_depth =
1591 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1592 device->active_path_index = id_phys->active_path_number;
1593 device->path_map = id_phys->redundant_path_present_map;
1594 memcpy(&device->box,
1595 &id_phys->alternate_paths_phys_box_on_port,
1596 sizeof(device->box));
1597 memcpy(&device->phys_connector,
1598 &id_phys->alternate_paths_phys_connector,
1599 sizeof(device->phys_connector));
1600 device->bay = id_phys->phys_bay_in_box;
904f2bfd
KM
1601 device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
1602 if (!device->multi_lun_device_lun_count)
1603 device->multi_lun_device_lun_count = 1;
ec504b23
MB
1604 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1605 id_phys->phy_count)
1606 device->phy_id =
1607 id_phys->phy_to_phy_map[device->active_path_index];
1608 else
1609 device->phy_id = 0xFF;
1610
2a47834d
GW
1611 device->ncq_prio_support =
1612 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1613 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1614
ce143793
KB
1615 return 0;
1616}
1617
1618static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1619 struct pqi_scsi_dev *device)
1620{
1621 int rc;
1622 u8 *buffer;
3d46a59a 1623
6c223761
KB
1624 buffer = kmalloc(64, GFP_KERNEL);
1625 if (!buffer)
1626 return -ENOMEM;
1627
1628 /* Send an inquiry to the device to see what it is. */
ce143793
KB
1629 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1630 if (rc)
1631 goto out;
6c223761
KB
1632
1633 scsi_sanitize_inquiry_string(&buffer[8], 8);
1634 scsi_sanitize_inquiry_string(&buffer[16], 16);
1635
1636 device->devtype = buffer[0] & 0x1f;
cbe0c7b1
KB
1637 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1638 memcpy(device->model, &buffer[16], sizeof(device->model));
6c223761 1639
ce143793 1640 if (device->devtype == TYPE_DISK) {
bd10cf0b
KB
1641 if (device->is_external_raid_device) {
1642 device->raid_level = SA_RAID_UNKNOWN;
1643 device->volume_status = CISS_LV_OK;
1644 device->volume_offline = false;
1645 } else {
1646 pqi_get_raid_level(ctrl_info, device);
588a63fe 1647 pqi_get_raid_bypass_status(ctrl_info, device);
bd10cf0b
KB
1648 pqi_get_volume_status(ctrl_info, device);
1649 }
6c223761
KB
1650 }
1651
1652out:
1653 kfree(buffer);
1654
1655 return rc;
1656}
1657
be76f906
DB
1658/*
1659 * Prevent adding drive to OS for some corner cases such as a drive
1660 * undergoing a sanitize operation. Some OSes will continue to poll
1661 * the drive until the sanitize completes, which can take hours,
1662 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1663 * are allowed, but READ/WRITE cause check condition. So the OS
1664 * cannot check/read the partition table.
1665 * Note: devices that have completed sanitize must be re-enabled
1666 * using the management utility.
1667 */
1668static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
1669 struct pqi_scsi_dev *device)
1670{
1671 u8 scsi_status;
1672 int rc;
1673 enum dma_data_direction dir;
1674 char *buffer;
1675 int buffer_length = 64;
1676 size_t sense_data_length;
1677 struct scsi_sense_hdr sshdr;
1678 struct pqi_raid_path_request request;
1679 struct pqi_raid_error_info error_info;
1680 bool offline = false; /* Assume keep online */
1681
1682 /* Do not check controllers. */
1683 if (pqi_is_hba_lunid(device->scsi3addr))
1684 return false;
1685
1686 /* Do not check LVs. */
1687 if (pqi_is_logical_device(device))
1688 return false;
1689
1690 buffer = kmalloc(buffer_length, GFP_KERNEL);
1691 if (!buffer)
1692 return false; /* Assume not offline */
1693
1694 /* Check for SANITIZE in progress using TUR */
1695 rc = pqi_build_raid_path_request(ctrl_info, &request,
1696 TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
1697 buffer_length, 0, &dir);
1698 if (rc)
1699 goto out; /* Assume not offline */
1700
1701 memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
1702
1703 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
1704
1705 if (rc)
1706 goto out; /* Assume not offline */
1707
1708 scsi_status = error_info.status;
1709 sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
1710 if (sense_data_length == 0)
1711 sense_data_length =
1712 get_unaligned_le16(&error_info.response_data_length);
1713 if (sense_data_length) {
1714 if (sense_data_length > sizeof(error_info.data))
1715 sense_data_length = sizeof(error_info.data);
1716
1717 /*
1718 * Check for sanitize in progress: asc:0x04, ascq: 0x1b
1719 */
1720 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
1721 scsi_normalize_sense(error_info.data,
1722 sense_data_length, &sshdr) &&
1723 sshdr.sense_key == NOT_READY &&
1724 sshdr.asc == 0x04 &&
1725 sshdr.ascq == 0x1b) {
1726 device->device_offline = true;
1727 offline = true;
1728 goto out; /* Keep device offline */
1729 }
1730 }
1731
1732out:
1733 kfree(buffer);
1734 return offline;
1735}
1736
ce143793 1737static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1738 struct pqi_scsi_dev *device,
1739 struct bmic_identify_physical_device *id_phys)
1740{
1741 int rc;
1742
ce143793
KB
1743 if (device->is_expander_smp_device)
1744 return 0;
6c223761 1745
ce143793
KB
1746 if (pqi_is_logical_device(device))
1747 rc = pqi_get_logical_device_info(ctrl_info, device);
1748 else
1749 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
694c5d5b 1750
ce143793 1751 return rc;
6c223761
KB
1752}
1753
1754static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1755 struct pqi_scsi_dev *device)
1756{
1757 char *status;
1758 static const char unknown_state_str[] =
1759 "Volume is in an unknown state (%u)";
1760 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1761
1762 switch (device->volume_status) {
1763 case CISS_LV_OK:
1764 status = "Volume online";
1765 break;
1766 case CISS_LV_FAILED:
1767 status = "Volume failed";
1768 break;
1769 case CISS_LV_NOT_CONFIGURED:
1770 status = "Volume not configured";
1771 break;
1772 case CISS_LV_DEGRADED:
1773 status = "Volume degraded";
1774 break;
1775 case CISS_LV_READY_FOR_RECOVERY:
1776 status = "Volume ready for recovery operation";
1777 break;
1778 case CISS_LV_UNDERGOING_RECOVERY:
1779 status = "Volume undergoing recovery";
1780 break;
1781 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1782 status = "Wrong physical drive was replaced";
1783 break;
1784 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1785 status = "A physical drive not properly connected";
1786 break;
1787 case CISS_LV_HARDWARE_OVERHEATING:
1788 status = "Hardware is overheating";
1789 break;
1790 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1791 status = "Hardware has overheated";
1792 break;
1793 case CISS_LV_UNDERGOING_EXPANSION:
1794 status = "Volume undergoing expansion";
1795 break;
1796 case CISS_LV_NOT_AVAILABLE:
1797 status = "Volume waiting for transforming volume";
1798 break;
1799 case CISS_LV_QUEUED_FOR_EXPANSION:
1800 status = "Volume queued for expansion";
1801 break;
1802 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1803 status = "Volume disabled due to SCSI ID conflict";
1804 break;
1805 case CISS_LV_EJECTED:
1806 status = "Volume has been ejected";
1807 break;
1808 case CISS_LV_UNDERGOING_ERASE:
1809 status = "Volume undergoing background erase";
1810 break;
1811 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1812 status = "Volume ready for predictive spare rebuild";
1813 break;
1814 case CISS_LV_UNDERGOING_RPI:
1815 status = "Volume undergoing rapid parity initialization";
1816 break;
1817 case CISS_LV_PENDING_RPI:
1818 status = "Volume queued for rapid parity initialization";
1819 break;
1820 case CISS_LV_ENCRYPTED_NO_KEY:
1821 status = "Encrypted volume inaccessible - key not present";
1822 break;
1823 case CISS_LV_UNDERGOING_ENCRYPTION:
1824 status = "Volume undergoing encryption process";
1825 break;
1826 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1827 status = "Volume undergoing encryption re-keying process";
1828 break;
1829 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
d87d5474 1830 status = "Volume encrypted but encryption is disabled";
6c223761
KB
1831 break;
1832 case CISS_LV_PENDING_ENCRYPTION:
1833 status = "Volume pending migration to encrypted state";
1834 break;
1835 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1836 status = "Volume pending encryption rekeying";
1837 break;
1838 case CISS_LV_NOT_SUPPORTED:
1839 status = "Volume not supported on this controller";
1840 break;
1841 case CISS_LV_STATUS_UNAVAILABLE:
1842 status = "Volume status not available";
1843 break;
1844 default:
1845 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1846 unknown_state_str, device->volume_status);
1847 status = unknown_state_buffer;
1848 break;
1849 }
1850
1851 dev_info(&ctrl_info->pci_dev->dev,
1852 "scsi %d:%d:%d:%d %s\n",
1853 ctrl_info->scsi_host->host_no,
1854 device->bus, device->target, device->lun, status);
1855}
1856
6c223761
KB
1857static void pqi_rescan_worker(struct work_struct *work)
1858{
1859 struct pqi_ctrl_info *ctrl_info;
1860
1861 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1862 rescan_work);
1863
1864 pqi_scan_scsi_devices(ctrl_info);
1865}
1866
1867static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1868 struct pqi_scsi_dev *device)
1869{
1870 int rc;
1871
1872 if (pqi_is_logical_device(device))
1873 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1874 device->target, device->lun);
1875 else
1876 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1877
1878 return rc;
1879}
1880
18ff5f08 1881#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1e46731e 1882
583891c9 1883static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6c223761 1884{
1e46731e 1885 int rc;
904f2bfd 1886 int lun;
1e46731e 1887
904f2bfd
KM
1888 for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
1889 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1890 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1891 if (rc)
1892 dev_err(&ctrl_info->pci_dev->dev,
1893 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1894 ctrl_info->scsi_host->host_no, device->bus,
1895 device->target, lun,
1896 atomic_read(&device->scsi_cmds_outstanding[lun]));
1897 }
1e46731e 1898
6c223761
KB
1899 if (pqi_is_logical_device(device))
1900 scsi_remove_device(device->sdev);
1901 else
1902 pqi_remove_sas_device(device);
819225b0
DB
1903
1904 pqi_device_remove_start(device);
6c223761
KB
1905}
1906
1907/* Assumes the SCSI device list lock is held. */
1908
1909static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1910 int bus, int target, int lun)
1911{
1912 struct pqi_scsi_dev *device;
1913
4d15ad38
KB
1914 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1915 if (device->bus == bus && device->target == target && device->lun == lun)
6c223761
KB
1916 return device;
1917
1918 return NULL;
1919}
1920
583891c9 1921static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
6c223761
KB
1922{
1923 if (dev1->is_physical_device != dev2->is_physical_device)
1924 return false;
1925
1926 if (dev1->is_physical_device)
28ca6d87 1927 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
6c223761 1928
583891c9 1929 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
6c223761
KB
1930}
1931
1932enum pqi_find_result {
1933 DEVICE_NOT_FOUND,
1934 DEVICE_CHANGED,
1935 DEVICE_SAME,
1936};
1937
1938static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
4d15ad38 1939 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
6c223761
KB
1940{
1941 struct pqi_scsi_dev *device;
1942
4d15ad38
KB
1943 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1944 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
6c223761
KB
1945 *matching_device = device;
1946 if (pqi_device_equal(device_to_find, device)) {
1947 if (device_to_find->volume_offline)
1948 return DEVICE_CHANGED;
1949 return DEVICE_SAME;
1950 }
1951 return DEVICE_CHANGED;
1952 }
1953 }
1954
1955 return DEVICE_NOT_FOUND;
1956}
1957
3d46a59a
DB
1958static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1959{
1960 if (device->is_expander_smp_device)
1961 return "Enclosure SMP ";
1962
1963 return scsi_device_type(device->devtype);
1964}
1965
6de783f6
KB
1966#define PQI_DEV_INFO_BUFFER_LENGTH 128
1967
6c223761
KB
1968static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1969 char *action, struct pqi_scsi_dev *device)
1970{
6de783f6
KB
1971 ssize_t count;
1972 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1973
a4256252 1974 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
6de783f6
KB
1975 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1976
1977 if (device->target_lun_valid)
181aea89 1978 count += scnprintf(buffer + count,
6de783f6
KB
1979 PQI_DEV_INFO_BUFFER_LENGTH - count,
1980 "%d:%d",
1981 device->target,
1982 device->lun);
1983 else
181aea89 1984 count += scnprintf(buffer + count,
6de783f6
KB
1985 PQI_DEV_INFO_BUFFER_LENGTH - count,
1986 "-:-");
1987
1988 if (pqi_is_logical_device(device))
181aea89 1989 count += scnprintf(buffer + count,
6de783f6
KB
1990 PQI_DEV_INFO_BUFFER_LENGTH - count,
1991 " %08x%08x",
1992 *((u32 *)&device->scsi3addr),
1993 *((u32 *)&device->scsi3addr[4]));
1994 else
181aea89 1995 count += scnprintf(buffer + count,
6de783f6 1996 PQI_DEV_INFO_BUFFER_LENGTH - count,
28ca6d87
MM
1997 " %016llx%016llx",
1998 get_unaligned_be64(&device->wwid[0]),
1999 get_unaligned_be64(&device->wwid[8]));
6de783f6 2000
181aea89 2001 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
6de783f6 2002 " %s %.8s %.16s ",
3d46a59a 2003 pqi_device_type(device),
6c223761 2004 device->vendor,
6de783f6
KB
2005 device->model);
2006
2007 if (pqi_is_logical_device(device)) {
2008 if (device->devtype == TYPE_DISK)
181aea89 2009 count += scnprintf(buffer + count,
6de783f6
KB
2010 PQI_DEV_INFO_BUFFER_LENGTH - count,
2011 "SSDSmartPathCap%c En%c %-12s",
588a63fe
KB
2012 device->raid_bypass_configured ? '+' : '-',
2013 device->raid_bypass_enabled ? '+' : '-',
6de783f6
KB
2014 pqi_raid_level_to_string(device->raid_level));
2015 } else {
181aea89 2016 count += scnprintf(buffer + count,
6de783f6
KB
2017 PQI_DEV_INFO_BUFFER_LENGTH - count,
2018 "AIO%c", device->aio_enabled ? '+' : '-');
2019 if (device->devtype == TYPE_DISK ||
2020 device->devtype == TYPE_ZBC)
181aea89 2021 count += scnprintf(buffer + count,
6de783f6
KB
2022 PQI_DEV_INFO_BUFFER_LENGTH - count,
2023 " qd=%-6d", device->queue_depth);
2024 }
2025
2026 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
6c223761
KB
2027}
2028
2029/* Assumes the SCSI device list lock is held. */
2030
27655e9d
MR
2031static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2032 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
6c223761 2033{
6c223761
KB
2034 existing_device->device_type = new_device->device_type;
2035 existing_device->bus = new_device->bus;
2036 if (new_device->target_lun_valid) {
2037 existing_device->target = new_device->target;
2038 existing_device->lun = new_device->lun;
2039 existing_device->target_lun_valid = true;
2040 }
2041
27655e9d
MR
2042 if (pqi_is_logical_device(existing_device) &&
2043 ctrl_info->logical_volume_rescan_needed)
244ca45e
MR
2044 existing_device->rescan = true;
2045
6c223761
KB
2046 /* By definition, the scsi3addr and wwid fields are already the same. */
2047
2048 existing_device->is_physical_device = new_device->is_physical_device;
bd10cf0b
KB
2049 existing_device->is_external_raid_device =
2050 new_device->is_external_raid_device;
3d46a59a
DB
2051 existing_device->is_expander_smp_device =
2052 new_device->is_expander_smp_device;
6c223761
KB
2053 existing_device->aio_enabled = new_device->aio_enabled;
2054 memcpy(existing_device->vendor, new_device->vendor,
2055 sizeof(existing_device->vendor));
2056 memcpy(existing_device->model, new_device->model,
2057 sizeof(existing_device->model));
2058 existing_device->sas_address = new_device->sas_address;
2059 existing_device->raid_level = new_device->raid_level;
2060 existing_device->queue_depth = new_device->queue_depth;
2061 existing_device->aio_handle = new_device->aio_handle;
2062 existing_device->volume_status = new_device->volume_status;
2063 existing_device->active_path_index = new_device->active_path_index;
ec504b23 2064 existing_device->phy_id = new_device->phy_id;
6c223761
KB
2065 existing_device->path_map = new_device->path_map;
2066 existing_device->bay = new_device->bay;
2d2ad4bc
GW
2067 existing_device->box_index = new_device->box_index;
2068 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
583891c9 2069 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
904f2bfd
KM
2070 existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
2071 if (!existing_device->multi_lun_device_lun_count)
2072 existing_device->multi_lun_device_lun_count = 1;
6c223761
KB
2073 memcpy(existing_device->box, new_device->box,
2074 sizeof(existing_device->box));
2075 memcpy(existing_device->phys_connector, new_device->phys_connector,
2076 sizeof(existing_device->phys_connector));
5d8fbce0 2077 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
6c223761
KB
2078 kfree(existing_device->raid_map);
2079 existing_device->raid_map = new_device->raid_map;
588a63fe
KB
2080 existing_device->raid_bypass_configured =
2081 new_device->raid_bypass_configured;
2082 existing_device->raid_bypass_enabled =
2083 new_device->raid_bypass_enabled;
a9a68101 2084 existing_device->device_offline = false;
6c223761
KB
2085
2086 /* To prevent this from being freed later. */
2087 new_device->raid_map = NULL;
2088}
2089
2090static inline void pqi_free_device(struct pqi_scsi_dev *device)
2091{
2092 if (device) {
2093 kfree(device->raid_map);
2094 kfree(device);
2095 }
2096}
2097
2098/*
2099 * Called when exposing a new device to the OS fails in order to re-adjust
2100 * our internal SCSI device list to match the SCSI ML's view.
2101 */
2102
2103static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2104 struct pqi_scsi_dev *device)
2105{
2106 unsigned long flags;
2107
2108 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2109 list_del(&device->scsi_device_list_entry);
2110 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2111
2112 /* Allow the device structure to be freed later. */
2113 device->keep_device = false;
2114}
2115
3d46a59a
DB
2116static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2117{
2118 if (device->is_expander_smp_device)
2119 return device->sas_port != NULL;
2120
2121 return device->sdev != NULL;
2122}
2123
6c223761
KB
2124static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2125 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2126{
2127 int rc;
2128 unsigned int i;
2129 unsigned long flags;
2130 enum pqi_find_result find_result;
2131 struct pqi_scsi_dev *device;
2132 struct pqi_scsi_dev *next;
2133 struct pqi_scsi_dev *matching_device;
8a994a04
KB
2134 LIST_HEAD(add_list);
2135 LIST_HEAD(delete_list);
6c223761
KB
2136
2137 /*
2138 * The idea here is to do as little work as possible while holding the
2139 * spinlock. That's why we go to great pains to defer anything other
2140 * than updating the internal device list until after we release the
2141 * spinlock.
2142 */
2143
2144 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2145
2146 /* Assume that all devices in the existing list have gone away. */
4d15ad38 2147 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
6c223761
KB
2148 device->device_gone = true;
2149
2150 for (i = 0; i < num_new_devices; i++) {
2151 device = new_device_list[i];
2152
2153 find_result = pqi_scsi_find_entry(ctrl_info, device,
694c5d5b 2154 &matching_device);
6c223761
KB
2155
2156 switch (find_result) {
2157 case DEVICE_SAME:
2158 /*
2159 * The newly found device is already in the existing
2160 * device list.
2161 */
2162 device->new_device = false;
2163 matching_device->device_gone = false;
27655e9d 2164 pqi_scsi_update_device(ctrl_info, matching_device, device);
6c223761
KB
2165 break;
2166 case DEVICE_NOT_FOUND:
2167 /*
2168 * The newly found device is NOT in the existing device
2169 * list.
2170 */
2171 device->new_device = true;
2172 break;
2173 case DEVICE_CHANGED:
2174 /*
2175 * The original device has gone away and we need to add
2176 * the new device.
2177 */
2178 device->new_device = true;
2179 break;
6c223761
KB
2180 }
2181 }
2182
2183 /* Process all devices that have gone away. */
2184 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2185 scsi_device_list_entry) {
2186 if (device->device_gone) {
819225b0 2187 list_del(&device->scsi_device_list_entry);
6c223761
KB
2188 list_add_tail(&device->delete_list_entry, &delete_list);
2189 }
2190 }
2191
2192 /* Process all new devices. */
2193 for (i = 0; i < num_new_devices; i++) {
2194 device = new_device_list[i];
2195 if (!device->new_device)
2196 continue;
2197 if (device->volume_offline)
2198 continue;
2199 list_add_tail(&device->scsi_device_list_entry,
2200 &ctrl_info->scsi_device_list);
2201 list_add_tail(&device->add_list_entry, &add_list);
2202 /* To prevent this device structure from being freed later. */
2203 device->keep_device = true;
2204 }
2205
6c223761
KB
2206 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2207
2790cd4d
KB
2208 /*
2209 * If OFA is in progress and there are devices that need to be deleted,
2210 * allow any pending reset operations to continue and unblock any SCSI
2211 * requests before removal.
2212 */
2213 if (pqi_ofa_in_progress(ctrl_info)) {
2214 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2215 if (pqi_is_device_added(device))
2216 pqi_device_remove_start(device);
2217 pqi_ctrl_unblock_device_reset(ctrl_info);
2218 pqi_scsi_unblock_requests(ctrl_info);
2219 }
4fd22c13 2220
6c223761 2221 /* Remove all devices that have gone away. */
4d15ad38 2222 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
6c223761
KB
2223 if (device->volume_offline) {
2224 pqi_dev_info(ctrl_info, "offline", device);
2225 pqi_show_volume_status(ctrl_info, device);
4d15ad38 2226 } else {
819225b0 2227 pqi_dev_info(ctrl_info, "removed", device);
4d15ad38 2228 }
819225b0
DB
2229 if (pqi_is_device_added(device))
2230 pqi_remove_device(ctrl_info, device);
2231 list_del(&device->delete_list_entry);
2232 pqi_free_device(device);
6c223761
KB
2233 }
2234
2235 /*
27655e9d
MR
2236 * Notify the SML of any existing device changes such as;
2237 * queue depth, device size.
6c223761 2238 */
583891c9
KB
2239 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2240 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2241 device->advertised_queue_depth = device->queue_depth;
2242 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
244ca45e
MR
2243 if (device->rescan) {
2244 scsi_rescan_device(&device->sdev->sdev_gendev);
2245 device->rescan = false;
2246 }
6c223761
KB
2247 }
2248 }
2249
2250 /* Expose any new devices. */
2251 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
3d46a59a 2252 if (!pqi_is_device_added(device)) {
6c223761 2253 rc = pqi_add_device(ctrl_info, device);
ce143793
KB
2254 if (rc == 0) {
2255 pqi_dev_info(ctrl_info, "added", device);
2256 } else {
6c223761
KB
2257 dev_warn(&ctrl_info->pci_dev->dev,
2258 "scsi %d:%d:%d:%d addition failed, device not added\n",
2259 ctrl_info->scsi_host->host_no,
2260 device->bus, device->target,
2261 device->lun);
2262 pqi_fixup_botched_add(ctrl_info, device);
6c223761
KB
2263 }
2264 }
6c223761 2265 }
27655e9d
MR
2266
2267 ctrl_info->logical_volume_rescan_needed = false;
2268
6c223761
KB
2269}
2270
ce143793 2271static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
6c223761 2272{
ce143793
KB
2273 /*
2274 * Only support the HBA controller itself as a RAID
2275 * controller. If it's a RAID controller other than
2276 * the HBA itself (an external RAID controller, for
2277 * example), we don't support it.
2278 */
2279 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2280 !pqi_is_hba_lunid(device->scsi3addr))
583891c9 2281 return false;
6c223761 2282
ce143793 2283 return true;
6c223761
KB
2284}
2285
94086f5b 2286static inline bool pqi_skip_device(u8 *scsi3addr)
6c223761 2287{
94086f5b
KB
2288 /* Ignore all masked devices. */
2289 if (MASKED_DEVICE(scsi3addr))
6c223761 2290 return true;
6c223761
KB
2291
2292 return false;
2293}
2294
522bc026
DC
2295static inline void pqi_mask_device(u8 *scsi3addr)
2296{
2297 scsi3addr[3] |= 0xc0;
2298}
2299
94a68c81
MB
2300static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
2301{
2302 if (pqi_is_logical_device(device))
2303 return false;
2304
2305 return (device->path_map & (device->path_map - 1)) != 0;
2306}
2307
cd128244
DC
2308static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2309{
583891c9 2310 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
cd128244
DC
2311}
2312
6c223761
KB
2313static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2314{
2315 int i;
2316 int rc;
8a994a04 2317 LIST_HEAD(new_device_list_head);
28ca6d87
MM
2318 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2319 struct report_log_lun_list *logdev_list = NULL;
2320 struct report_phys_lun_16byte_wwid *phys_lun;
2321 struct report_log_lun *log_lun;
6c223761
KB
2322 struct bmic_identify_physical_device *id_phys = NULL;
2323 u32 num_physicals;
2324 u32 num_logicals;
2325 struct pqi_scsi_dev **new_device_list = NULL;
2326 struct pqi_scsi_dev *device;
2327 struct pqi_scsi_dev *next;
2328 unsigned int num_new_devices;
2329 unsigned int num_valid_devices;
2330 bool is_physical_device;
2331 u8 *scsi3addr;
5e6a9760
GW
2332 unsigned int physical_index;
2333 unsigned int logical_index;
6c223761 2334 static char *out_of_memory_msg =
6de783f6 2335 "failed to allocate memory, device discovery stopped";
6c223761 2336
6c223761
KB
2337 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2338 if (rc)
2339 goto out;
2340
2341 if (physdev_list)
2342 num_physicals =
2343 get_unaligned_be32(&physdev_list->header.list_length)
2344 / sizeof(physdev_list->lun_entries[0]);
2345 else
2346 num_physicals = 0;
2347
2348 if (logdev_list)
2349 num_logicals =
2350 get_unaligned_be32(&logdev_list->header.list_length)
2351 / sizeof(logdev_list->lun_entries[0]);
2352 else
2353 num_logicals = 0;
2354
2355 if (num_physicals) {
2356 /*
2357 * We need this buffer for calls to pqi_get_physical_disk_info()
2358 * below. We allocate it here instead of inside
2359 * pqi_get_physical_disk_info() because it's a fairly large
2360 * buffer.
2361 */
2362 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2363 if (!id_phys) {
2364 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2365 out_of_memory_msg);
2366 rc = -ENOMEM;
2367 goto out;
2368 }
522bc026 2369
694c5d5b 2370 if (pqi_hide_vsep) {
522bc026 2371 for (i = num_physicals - 1; i >= 0; i--) {
28ca6d87
MM
2372 phys_lun = &physdev_list->lun_entries[i];
2373 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2374 pqi_mask_device(phys_lun->lunid);
522bc026
DC
2375 break;
2376 }
2377 }
2378 }
6c223761
KB
2379 }
2380
f6cc2a77
KB
2381 if (num_logicals &&
2382 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2383 ctrl_info->lv_drive_type_mix_valid = true;
2384
6c223761
KB
2385 num_new_devices = num_physicals + num_logicals;
2386
6da2ec56
KC
2387 new_device_list = kmalloc_array(num_new_devices,
2388 sizeof(*new_device_list),
2389 GFP_KERNEL);
6c223761
KB
2390 if (!new_device_list) {
2391 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2392 rc = -ENOMEM;
2393 goto out;
2394 }
2395
2396 for (i = 0; i < num_new_devices; i++) {
2397 device = kzalloc(sizeof(*device), GFP_KERNEL);
2398 if (!device) {
2399 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2400 out_of_memory_msg);
2401 rc = -ENOMEM;
2402 goto out;
2403 }
2404 list_add_tail(&device->new_device_list_entry,
2405 &new_device_list_head);
2406 }
2407
2408 device = NULL;
2409 num_valid_devices = 0;
5e6a9760
GW
2410 physical_index = 0;
2411 logical_index = 0;
6c223761
KB
2412
2413 for (i = 0; i < num_new_devices; i++) {
2414
5e6a9760
GW
2415 if ((!pqi_expose_ld_first && i < num_physicals) ||
2416 (pqi_expose_ld_first && i >= num_logicals)) {
6c223761 2417 is_physical_device = true;
28ca6d87
MM
2418 phys_lun = &physdev_list->lun_entries[physical_index++];
2419 log_lun = NULL;
2420 scsi3addr = phys_lun->lunid;
6c223761
KB
2421 } else {
2422 is_physical_device = false;
28ca6d87
MM
2423 phys_lun = NULL;
2424 log_lun = &logdev_list->lun_entries[logical_index++];
2425 scsi3addr = log_lun->lunid;
6c223761
KB
2426 }
2427
94086f5b 2428 if (is_physical_device && pqi_skip_device(scsi3addr))
6c223761
KB
2429 continue;
2430
2431 if (device)
2432 device = list_next_entry(device, new_device_list_entry);
2433 else
2434 device = list_first_entry(&new_device_list_head,
2435 struct pqi_scsi_dev, new_device_list_entry);
2436
2437 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2438 device->is_physical_device = is_physical_device;
3d46a59a 2439 if (is_physical_device) {
28ca6d87 2440 device->device_type = phys_lun->device_type;
ce143793 2441 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
3d46a59a
DB
2442 device->is_expander_smp_device = true;
2443 } else {
bd10cf0b
KB
2444 device->is_external_raid_device =
2445 pqi_is_external_raid_addr(scsi3addr);
3d46a59a 2446 }
6c223761 2447
ce143793
KB
2448 if (!pqi_is_supported_device(device))
2449 continue;
2450
be76f906
DB
2451 /* Do not present disks that the OS cannot fully probe */
2452 if (pqi_keep_device_offline(ctrl_info, device))
2453 continue;
2454
6c223761 2455 /* Gather information about the device. */
ce143793 2456 rc = pqi_get_device_info(ctrl_info, device, id_phys);
6c223761
KB
2457 if (rc == -ENOMEM) {
2458 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2459 out_of_memory_msg);
2460 goto out;
2461 }
2462 if (rc) {
6de783f6
KB
2463 if (device->is_physical_device)
2464 dev_warn(&ctrl_info->pci_dev->dev,
28ca6d87
MM
2465 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2466 get_unaligned_be64(&phys_lun->wwid[0]),
2467 get_unaligned_be64(&phys_lun->wwid[8]));
6de783f6
KB
2468 else
2469 dev_warn(&ctrl_info->pci_dev->dev,
2470 "obtaining device info failed, skipping logical device %08x%08x\n",
2471 *((u32 *)&device->scsi3addr),
2472 *((u32 *)&device->scsi3addr[4]));
6c223761
KB
2473 rc = 0;
2474 continue;
2475 }
2476
6c223761
KB
2477 pqi_assign_bus_target_lun(device);
2478
6c223761 2479 if (device->is_physical_device) {
00598b05 2480 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
28ca6d87 2481 if ((phys_lun->device_flags &
694c5d5b 2482 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
28ca6d87 2483 phys_lun->aio_handle) {
583891c9
KB
2484 device->aio_enabled = true;
2485 device->aio_handle =
28ca6d87 2486 phys_lun->aio_handle;
3d46a59a 2487 }
6c223761 2488 } else {
28ca6d87 2489 memcpy(device->volume_id, log_lun->volume_id,
6c223761
KB
2490 sizeof(device->volume_id));
2491 }
2492
291c2e00 2493 device->sas_address = get_unaligned_be64(&device->wwid[0]);
6c223761
KB
2494
2495 new_device_list[num_valid_devices++] = device;
2496 }
2497
2498 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2499
2500out:
2501 list_for_each_entry_safe(device, next, &new_device_list_head,
2502 new_device_list_entry) {
2503 if (device->keep_device)
2504 continue;
2505 list_del(&device->new_device_list_entry);
2506 pqi_free_device(device);
2507 }
2508
2509 kfree(new_device_list);
2510 kfree(physdev_list);
2511 kfree(logdev_list);
2512 kfree(id_phys);
2513
2514 return rc;
2515}
2516
819225b0
DB
2517static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2518{
2519 unsigned long flags;
2520 struct pqi_scsi_dev *device;
2521 struct pqi_scsi_dev *next;
2522
819225b0
DB
2523 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2524 scsi_device_list_entry) {
2525 if (pqi_is_device_added(device))
2526 pqi_remove_device(ctrl_info, device);
c4ff687d 2527 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
819225b0
DB
2528 list_del(&device->scsi_device_list_entry);
2529 pqi_free_device(device);
c4ff687d 2530 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
819225b0 2531 }
819225b0
DB
2532}
2533
6c223761
KB
2534static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2535{
66f1c2b4
KB
2536 int rc;
2537 int mutex_acquired;
6c223761
KB
2538
2539 if (pqi_ctrl_offline(ctrl_info))
2540 return -ENXIO;
2541
66f1c2b4
KB
2542 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2543
2544 if (!mutex_acquired) {
2545 if (pqi_ctrl_scan_blocked(ctrl_info))
2546 return -EBUSY;
5f310425 2547 pqi_schedule_rescan_worker_delayed(ctrl_info);
66f1c2b4 2548 return -EINPROGRESS;
530dd8a7 2549 }
6c223761 2550
66f1c2b4
KB
2551 rc = pqi_update_scsi_devices(ctrl_info);
2552 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2553 pqi_schedule_rescan_worker_delayed(ctrl_info);
2554
2555 mutex_unlock(&ctrl_info->scan_mutex);
2556
6c223761
KB
2557 return rc;
2558}
2559
2560static void pqi_scan_start(struct Scsi_Host *shost)
2561{
4fd22c13
MR
2562 struct pqi_ctrl_info *ctrl_info;
2563
2564 ctrl_info = shost_to_hba(shost);
4fd22c13
MR
2565
2566 pqi_scan_scsi_devices(ctrl_info);
6c223761
KB
2567}
2568
2569/* Returns TRUE if scan is finished. */
2570
2571static int pqi_scan_finished(struct Scsi_Host *shost,
2572 unsigned long elapsed_time)
2573{
2574 struct pqi_ctrl_info *ctrl_info;
2575
2576 ctrl_info = shost_priv(shost);
2577
2578 return !mutex_is_locked(&ctrl_info->scan_mutex);
2579}
2580
583891c9
KB
2581static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2582 struct raid_map *raid_map, u64 first_block)
6c223761
KB
2583{
2584 u32 volume_blk_size;
2585
2586 /*
2587 * Set the encryption tweak values based on logical block address.
2588 * If the block size is 512, the tweak value is equal to the LBA.
2589 * For other block sizes, tweak value is (LBA * block size) / 512.
2590 */
2591 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2592 if (volume_blk_size != 512)
2593 first_block = (first_block * volume_blk_size) / 512;
2594
2595 encryption_info->data_encryption_key_index =
2596 get_unaligned_le16(&raid_map->data_encryption_key_index);
2597 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2598 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2599}
2600
2601/*
588a63fe 2602 * Attempt to perform RAID bypass mapping for a logical volume I/O.
6c223761
KB
2603 */
2604
6702d2c4
DB
2605static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2606 struct pqi_scsi_dev_raid_map_data *rmd)
281a817f
DB
2607{
2608 bool is_supported = true;
2609
2610 switch (rmd->raid_level) {
2611 case SA_RAID_0:
2612 break;
2613 case SA_RAID_1:
f6cc2a77
KB
2614 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2615 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2616 is_supported = false;
2617 break;
7a012c23 2618 case SA_RAID_TRIPLE:
f6cc2a77
KB
2619 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2620 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
281a817f
DB
2621 is_supported = false;
2622 break;
2623 case SA_RAID_5:
f6cc2a77
KB
2624 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2625 rmd->data_length > ctrl_info->max_write_raid_5_6))
6702d2c4
DB
2626 is_supported = false;
2627 break;
281a817f 2628 case SA_RAID_6:
f6cc2a77
KB
2629 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2630 rmd->data_length > ctrl_info->max_write_raid_5_6))
281a817f
DB
2631 is_supported = false;
2632 break;
281a817f
DB
2633 default:
2634 is_supported = false;
f6cc2a77 2635 break;
281a817f
DB
2636 }
2637
2638 return is_supported;
2639}
2640
6c223761
KB
2641#define PQI_RAID_BYPASS_INELIGIBLE 1
2642
281a817f 2643static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
583891c9 2644 struct pqi_scsi_dev_raid_map_data *rmd)
6c223761 2645{
6c223761
KB
2646 /* Check for valid opcode, get LBA and block count. */
2647 switch (scmd->cmnd[0]) {
2648 case WRITE_6:
281a817f 2649 rmd->is_write = true;
df561f66 2650 fallthrough;
6c223761 2651 case READ_6:
281a817f 2652 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
e018ef57 2653 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
281a817f
DB
2654 rmd->block_cnt = (u32)scmd->cmnd[4];
2655 if (rmd->block_cnt == 0)
2656 rmd->block_cnt = 256;
6c223761
KB
2657 break;
2658 case WRITE_10:
281a817f 2659 rmd->is_write = true;
df561f66 2660 fallthrough;
6c223761 2661 case READ_10:
281a817f
DB
2662 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2663 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
6c223761
KB
2664 break;
2665 case WRITE_12:
281a817f 2666 rmd->is_write = true;
df561f66 2667 fallthrough;
6c223761 2668 case READ_12:
281a817f
DB
2669 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2670 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
6c223761
KB
2671 break;
2672 case WRITE_16:
281a817f 2673 rmd->is_write = true;
df561f66 2674 fallthrough;
6c223761 2675 case READ_16:
281a817f
DB
2676 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2677 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
6c223761
KB
2678 break;
2679 default:
2680 /* Process via normal I/O path. */
2681 return PQI_RAID_BYPASS_INELIGIBLE;
2682 }
2683
281a817f 2684 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
6c223761 2685
281a817f
DB
2686 return 0;
2687}
6c223761 2688
281a817f 2689static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
583891c9 2690 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
281a817f
DB
2691{
2692#if BITS_PER_LONG == 32
2693 u64 tmpdiv;
2694#endif
2695
2696 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
6c223761
KB
2697
2698 /* Check for invalid block or wraparound. */
281a817f
DB
2699 if (rmd->last_block >=
2700 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2701 rmd->last_block < rmd->first_block)
6c223761
KB
2702 return PQI_RAID_BYPASS_INELIGIBLE;
2703
281a817f 2704 rmd->data_disks_per_row =
583891c9 2705 get_unaligned_le16(&raid_map->data_disks_per_row);
281a817f
DB
2706 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2707 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
6c223761
KB
2708
2709 /* Calculate stripe information for the request. */
281a817f 2710 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
667298ce
DB
2711 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2712 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2713#if BITS_PER_LONG == 32
281a817f
DB
2714 tmpdiv = rmd->first_block;
2715 do_div(tmpdiv, rmd->blocks_per_row);
2716 rmd->first_row = tmpdiv;
2717 tmpdiv = rmd->last_block;
2718 do_div(tmpdiv, rmd->blocks_per_row);
2719 rmd->last_row = tmpdiv;
2720 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2721 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2722 tmpdiv = rmd->first_row_offset;
2723 do_div(tmpdiv, rmd->strip_size);
2724 rmd->first_column = tmpdiv;
2725 tmpdiv = rmd->last_row_offset;
2726 do_div(tmpdiv, rmd->strip_size);
2727 rmd->last_column = tmpdiv;
6c223761 2728#else
281a817f
DB
2729 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2730 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2731 rmd->first_row_offset = (u32)(rmd->first_block -
583891c9 2732 (rmd->first_row * rmd->blocks_per_row));
281a817f 2733 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
583891c9 2734 rmd->blocks_per_row));
281a817f
DB
2735 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2736 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
6c223761
KB
2737#endif
2738
2739 /* If this isn't a single row/column then give to the controller. */
281a817f 2740 if (rmd->first_row != rmd->last_row ||
583891c9 2741 rmd->first_column != rmd->last_column)
6c223761
KB
2742 return PQI_RAID_BYPASS_INELIGIBLE;
2743
2744 /* Proceeding with driver mapping. */
281a817f 2745 rmd->total_disks_per_row = rmd->data_disks_per_row +
6c223761 2746 get_unaligned_le16(&raid_map->metadata_disks_per_row);
281a817f
DB
2747 rmd->map_row = ((u32)(rmd->first_row >>
2748 raid_map->parity_rotation_shift)) %
6c223761 2749 get_unaligned_le16(&raid_map->row_cnt);
281a817f 2750 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
583891c9 2751 rmd->first_column;
6c223761 2752
281a817f
DB
2753 return 0;
2754}
6c223761 2755
281a817f 2756static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
583891c9 2757 struct raid_map *raid_map)
281a817f 2758{
6c223761 2759#if BITS_PER_LONG == 32
281a817f 2760 u64 tmpdiv;
6c223761 2761#endif
6c223761 2762
667298ce
DB
2763 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2764 return PQI_RAID_BYPASS_INELIGIBLE;
2765
281a817f 2766 /* RAID 50/60 */
583891c9 2767 /* Verify first and last block are in same RAID group. */
281a817f 2768 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
6c223761 2769#if BITS_PER_LONG == 32
281a817f
DB
2770 tmpdiv = rmd->first_block;
2771 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2772 tmpdiv = rmd->first_group;
2773 do_div(tmpdiv, rmd->blocks_per_row);
2774 rmd->first_group = tmpdiv;
2775 tmpdiv = rmd->last_block;
2776 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2777 tmpdiv = rmd->last_group;
2778 do_div(tmpdiv, rmd->blocks_per_row);
2779 rmd->last_group = tmpdiv;
6c223761 2780#else
281a817f
DB
2781 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2782 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
6c223761 2783#endif
281a817f
DB
2784 if (rmd->first_group != rmd->last_group)
2785 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2786
583891c9 2787 /* Verify request is in a single row of RAID 5/6. */
6c223761 2788#if BITS_PER_LONG == 32
281a817f
DB
2789 tmpdiv = rmd->first_block;
2790 do_div(tmpdiv, rmd->stripesize);
2791 rmd->first_row = tmpdiv;
2792 rmd->r5or6_first_row = tmpdiv;
2793 tmpdiv = rmd->last_block;
2794 do_div(tmpdiv, rmd->stripesize);
2795 rmd->r5or6_last_row = tmpdiv;
6c223761 2796#else
281a817f
DB
2797 rmd->first_row = rmd->r5or6_first_row =
2798 rmd->first_block / rmd->stripesize;
2799 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
6c223761 2800#endif
281a817f
DB
2801 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2802 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2803
583891c9 2804 /* Verify request is in a single column. */
6c223761 2805#if BITS_PER_LONG == 32
281a817f
DB
2806 tmpdiv = rmd->first_block;
2807 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2808 tmpdiv = rmd->first_row_offset;
2809 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2810 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2811 tmpdiv = rmd->last_block;
2812 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2813 tmpdiv = rmd->r5or6_last_row_offset;
2814 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2815 tmpdiv = rmd->r5or6_first_row_offset;
2816 do_div(tmpdiv, rmd->strip_size);
2817 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2818 tmpdiv = rmd->r5or6_last_row_offset;
2819 do_div(tmpdiv, rmd->strip_size);
2820 rmd->r5or6_last_column = tmpdiv;
6c223761 2821#else
281a817f 2822 rmd->first_row_offset = rmd->r5or6_first_row_offset =
583891c9
KB
2823 (u32)((rmd->first_block % rmd->stripesize) %
2824 rmd->blocks_per_row);
281a817f
DB
2825
2826 rmd->r5or6_last_row_offset =
2827 (u32)((rmd->last_block % rmd->stripesize) %
2828 rmd->blocks_per_row);
2829
2830 rmd->first_column =
583891c9 2831 rmd->r5or6_first_row_offset / rmd->strip_size;
281a817f
DB
2832 rmd->r5or6_first_column = rmd->first_column;
2833 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2834#endif
2835 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2836 return PQI_RAID_BYPASS_INELIGIBLE;
2837
583891c9 2838 /* Request is eligible. */
281a817f
DB
2839 rmd->map_row =
2840 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2841 get_unaligned_le16(&raid_map->row_cnt);
6c223761 2842
281a817f
DB
2843 rmd->map_index = (rmd->first_group *
2844 (get_unaligned_le16(&raid_map->row_cnt) *
2845 rmd->total_disks_per_row)) +
2846 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
6c223761 2847
6702d2c4
DB
2848 if (rmd->is_write) {
2849 u32 index;
6c223761 2850
6702d2c4
DB
2851 /*
2852 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2853 * parity entries inside the device's raid_map.
2854 *
2855 * A device's RAID map is bounded by: number of RAID disks squared.
2856 *
2857 * The devices RAID map size is checked during device
2858 * initialization.
2859 */
2860 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2861 index *= rmd->total_disks_per_row;
2862 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2863
2864 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2865 if (rmd->raid_level == SA_RAID_6) {
2866 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2867 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2868 }
6702d2c4
DB
2869#if BITS_PER_LONG == 32
2870 tmpdiv = rmd->first_block;
2871 do_div(tmpdiv, rmd->blocks_per_row);
2872 rmd->row = tmpdiv;
2873#else
2874 rmd->row = rmd->first_block / rmd->blocks_per_row;
6c223761 2875#endif
6702d2c4
DB
2876 }
2877
281a817f
DB
2878 return 0;
2879}
2880
2881static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2882{
2883 /* Build the new CDB for the physical disk I/O. */
2884 if (rmd->disk_block > 0xffffffff) {
2885 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2886 rmd->cdb[1] = 0;
2887 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2888 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2889 rmd->cdb[14] = 0;
2890 rmd->cdb[15] = 0;
2891 rmd->cdb_length = 16;
2892 } else {
2893 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2894 rmd->cdb[1] = 0;
2895 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2896 rmd->cdb[6] = 0;
2897 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2898 rmd->cdb[9] = 0;
2899 rmd->cdb_length = 10;
2900 }
2901}
2902
7a012c23 2903static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
583891c9 2904 struct pqi_scsi_dev_raid_map_data *rmd)
7a012c23
DB
2905{
2906 u32 index;
2907 u32 group;
2908
2909 group = rmd->map_index / rmd->data_disks_per_row;
2910
2911 index = rmd->map_index - (group * rmd->data_disks_per_row);
2912 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2913 index += rmd->data_disks_per_row;
2914 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2915 if (rmd->layout_map_count > 2) {
2916 index += rmd->data_disks_per_row;
2917 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2918 }
2919
2920 rmd->num_it_nexus_entries = rmd->layout_map_count;
2921}
2922
281a817f
DB
2923static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2924 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2925 struct pqi_queue_group *queue_group)
2926{
281a817f 2927 int rc;
7a012c23
DB
2928 struct raid_map *raid_map;
2929 u32 group;
2930 u32 next_bypass_group;
281a817f
DB
2931 struct pqi_encryption_info *encryption_info_ptr;
2932 struct pqi_encryption_info encryption_info;
583891c9 2933 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
281a817f
DB
2934
2935 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2936 if (rc)
2937 return PQI_RAID_BYPASS_INELIGIBLE;
2938
2939 rmd.raid_level = device->raid_level;
6c223761 2940
6702d2c4 2941 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
281a817f
DB
2942 return PQI_RAID_BYPASS_INELIGIBLE;
2943
2944 if (unlikely(rmd.block_cnt == 0))
2945 return PQI_RAID_BYPASS_INELIGIBLE;
2946
2947 raid_map = device->raid_map;
6c223761 2948
281a817f
DB
2949 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2950 if (rc)
2951 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2952
7a012c23
DB
2953 if (device->raid_level == SA_RAID_1 ||
2954 device->raid_level == SA_RAID_TRIPLE) {
2955 if (rmd.is_write) {
2956 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2957 } else {
5d8fbce0 2958 group = device->next_bypass_group[rmd.map_index];
7a012c23
DB
2959 next_bypass_group = group + 1;
2960 if (next_bypass_group >= rmd.layout_map_count)
2961 next_bypass_group = 0;
5d8fbce0 2962 device->next_bypass_group[rmd.map_index] = next_bypass_group;
7a012c23
DB
2963 rmd.map_index += group * rmd.data_disks_per_row;
2964 }
281a817f 2965 } else if ((device->raid_level == SA_RAID_5 ||
6702d2c4
DB
2966 device->raid_level == SA_RAID_6) &&
2967 (rmd.layout_map_count > 1 || rmd.is_write)) {
281a817f
DB
2968 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2969 if (rc)
2970 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761
KB
2971 }
2972
281a817f
DB
2973 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2974 return PQI_RAID_BYPASS_INELIGIBLE;
2975
2976 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
2977 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2978 rmd.first_row * rmd.strip_size +
2979 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
2980 rmd.disk_block_cnt = rmd.block_cnt;
6c223761
KB
2981
2982 /* Handle differing logical/physical block sizes. */
2983 if (raid_map->phys_blk_shift) {
281a817f
DB
2984 rmd.disk_block <<= raid_map->phys_blk_shift;
2985 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
6c223761
KB
2986 }
2987
281a817f 2988 if (unlikely(rmd.disk_block_cnt > 0xffff))
6c223761
KB
2989 return PQI_RAID_BYPASS_INELIGIBLE;
2990
281a817f 2991 pqi_set_aio_cdb(&rmd);
6c223761 2992
583891c9 2993 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
f6cc2a77
KB
2994 if (rmd.data_length > device->max_transfer_encrypted)
2995 return PQI_RAID_BYPASS_INELIGIBLE;
583891c9 2996 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
6c223761
KB
2997 encryption_info_ptr = &encryption_info;
2998 } else {
2999 encryption_info_ptr = NULL;
3000 }
3001
6702d2c4
DB
3002 if (rmd.is_write) {
3003 switch (device->raid_level) {
7a012c23
DB
3004 case SA_RAID_1:
3005 case SA_RAID_TRIPLE:
3006 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3007 encryption_info_ptr, device, &rmd);
6702d2c4
DB
3008 case SA_RAID_5:
3009 case SA_RAID_6:
3010 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
583891c9 3011 encryption_info_ptr, device, &rmd);
6702d2c4 3012 }
6702d2c4
DB
3013 }
3014
f6cc2a77
KB
3015 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3016 rmd.cdb, rmd.cdb_length, queue_group,
2a47834d 3017 encryption_info_ptr, true, false);
6c223761
KB
3018}
3019
3020#define PQI_STATUS_IDLE 0x0
3021
3022#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3023#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3024
3025#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3026#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3027#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3028#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3029#define PQI_DEVICE_STATE_ERROR 0x4
3030
3031#define PQI_MODE_READY_TIMEOUT_SECS 30
3032#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3033
3034static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3035{
3036 struct pqi_device_registers __iomem *pqi_registers;
3037 unsigned long timeout;
3038 u64 signature;
3039 u8 status;
3040
3041 pqi_registers = ctrl_info->pqi_registers;
42dc0426 3042 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
6c223761
KB
3043
3044 while (1) {
3045 signature = readq(&pqi_registers->signature);
3046 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3047 sizeof(signature)) == 0)
3048 break;
3049 if (time_after(jiffies, timeout)) {
3050 dev_err(&ctrl_info->pci_dev->dev,
3051 "timed out waiting for PQI signature\n");
3052 return -ETIMEDOUT;
3053 }
3054 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3055 }
3056
3057 while (1) {
3058 status = readb(&pqi_registers->function_and_status_code);
3059 if (status == PQI_STATUS_IDLE)
3060 break;
3061 if (time_after(jiffies, timeout)) {
3062 dev_err(&ctrl_info->pci_dev->dev,
3063 "timed out waiting for PQI IDLE\n");
3064 return -ETIMEDOUT;
3065 }
3066 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3067 }
3068
3069 while (1) {
3070 if (readl(&pqi_registers->device_status) ==
3071 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3072 break;
3073 if (time_after(jiffies, timeout)) {
3074 dev_err(&ctrl_info->pci_dev->dev,
3075 "timed out waiting for PQI all registers ready\n");
3076 return -ETIMEDOUT;
3077 }
3078 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3079 }
3080
3081 return 0;
3082}
3083
3084static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3085{
3086 struct pqi_scsi_dev *device;
3087
3088 device = io_request->scmd->device->hostdata;
588a63fe 3089 device->raid_bypass_enabled = false;
376fb880 3090 device->aio_enabled = false;
6c223761
KB
3091}
3092
d87d5474 3093static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
6c223761
KB
3094{
3095 struct pqi_ctrl_info *ctrl_info;
e58081a7 3096 struct pqi_scsi_dev *device;
6c223761 3097
03b288cf
KB
3098 device = sdev->hostdata;
3099 if (device->device_offline)
3100 return;
3101
3102 device->device_offline = true;
03b288cf
KB
3103 ctrl_info = shost_to_hba(sdev->host);
3104 pqi_schedule_rescan_worker(ctrl_info);
a9a68101 3105 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
03b288cf
KB
3106 path, ctrl_info->scsi_host->host_no, device->bus,
3107 device->target, device->lun);
6c223761
KB
3108}
3109
3110static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3111{
3112 u8 scsi_status;
3113 u8 host_byte;
3114 struct scsi_cmnd *scmd;
3115 struct pqi_raid_error_info *error_info;
3116 size_t sense_data_length;
3117 int residual_count;
3118 int xfer_count;
3119 struct scsi_sense_hdr sshdr;
3120
3121 scmd = io_request->scmd;
3122 if (!scmd)
3123 return;
3124
3125 error_info = io_request->error_info;
3126 scsi_status = error_info->status;
3127 host_byte = DID_OK;
3128
f5b63206
KB
3129 switch (error_info->data_out_result) {
3130 case PQI_DATA_IN_OUT_GOOD:
3131 break;
3132 case PQI_DATA_IN_OUT_UNDERFLOW:
6c223761
KB
3133 xfer_count =
3134 get_unaligned_le32(&error_info->data_out_transferred);
3135 residual_count = scsi_bufflen(scmd) - xfer_count;
3136 scsi_set_resid(scmd, residual_count);
3137 if (xfer_count < scmd->underflow)
3138 host_byte = DID_SOFT_ERROR;
f5b63206
KB
3139 break;
3140 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3141 case PQI_DATA_IN_OUT_ABORTED:
3142 host_byte = DID_ABORT;
3143 break;
3144 case PQI_DATA_IN_OUT_TIMEOUT:
3145 host_byte = DID_TIME_OUT;
3146 break;
3147 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3148 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3149 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3150 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3151 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3152 case PQI_DATA_IN_OUT_ERROR:
3153 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3154 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3155 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3156 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3157 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3158 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3159 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3160 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3161 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3162 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3163 default:
3164 host_byte = DID_ERROR;
3165 break;
6c223761
KB
3166 }
3167
3168 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3169 if (sense_data_length == 0)
3170 sense_data_length =
3171 get_unaligned_le16(&error_info->response_data_length);
3172 if (sense_data_length) {
3173 if (sense_data_length > sizeof(error_info->data))
3174 sense_data_length = sizeof(error_info->data);
3175
3176 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3177 scsi_normalize_sense(error_info->data,
3178 sense_data_length, &sshdr) &&
3179 sshdr.sense_key == HARDWARE_ERROR &&
8ef860ae 3180 sshdr.asc == 0x3e) {
441b7195
EV
3181 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3182 struct pqi_scsi_dev *device = scmd->device->hostdata;
3183
8ef860ae
EV
3184 switch (sshdr.ascq) {
3185 case 0x1: /* LOGICAL UNIT FAILURE */
3186 if (printk_ratelimit())
3187 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3188 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3189 pqi_take_device_offline(scmd->device, "RAID");
3190 host_byte = DID_NO_CONNECT;
3191 break;
3192
3193 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3194 if (printk_ratelimit())
3195 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3196 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3197 break;
3198 }
6c223761
KB
3199 }
3200
3201 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3202 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3203 memcpy(scmd->sense_buffer, error_info->data,
3204 sense_data_length);
3205 }
3206
3207 scmd->result = scsi_status;
3208 set_host_byte(scmd, host_byte);
3209}
3210
3211static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3212{
3213 u8 scsi_status;
3214 u8 host_byte;
3215 struct scsi_cmnd *scmd;
3216 struct pqi_aio_error_info *error_info;
3217 size_t sense_data_length;
3218 int residual_count;
3219 int xfer_count;
3220 bool device_offline;
94a68c81 3221 struct pqi_scsi_dev *device;
6c223761
KB
3222
3223 scmd = io_request->scmd;
3224 error_info = io_request->error_info;
3225 host_byte = DID_OK;
3226 sense_data_length = 0;
3227 device_offline = false;
94a68c81 3228 device = scmd->device->hostdata;
6c223761
KB
3229
3230 switch (error_info->service_response) {
3231 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3232 scsi_status = error_info->status;
3233 break;
3234 case PQI_AIO_SERV_RESPONSE_FAILURE:
3235 switch (error_info->status) {
3236 case PQI_AIO_STATUS_IO_ABORTED:
3237 scsi_status = SAM_STAT_TASK_ABORTED;
3238 break;
3239 case PQI_AIO_STATUS_UNDERRUN:
3240 scsi_status = SAM_STAT_GOOD;
3241 residual_count = get_unaligned_le32(
3242 &error_info->residual_count);
3243 scsi_set_resid(scmd, residual_count);
3244 xfer_count = scsi_bufflen(scmd) - residual_count;
3245 if (xfer_count < scmd->underflow)
3246 host_byte = DID_SOFT_ERROR;
3247 break;
3248 case PQI_AIO_STATUS_OVERRUN:
3249 scsi_status = SAM_STAT_GOOD;
3250 break;
3251 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3252 pqi_aio_path_disabled(io_request);
94a68c81
MB
3253 if (pqi_is_multipath_device(device)) {
3254 pqi_device_remove_start(device);
3255 host_byte = DID_NO_CONNECT;
3256 scsi_status = SAM_STAT_CHECK_CONDITION;
3257 } else {
3258 scsi_status = SAM_STAT_GOOD;
3259 io_request->status = -EAGAIN;
3260 }
6c223761
KB
3261 break;
3262 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3263 case PQI_AIO_STATUS_INVALID_DEVICE:
376fb880
KB
3264 if (!io_request->raid_bypass) {
3265 device_offline = true;
3266 pqi_take_device_offline(scmd->device, "AIO");
3267 host_byte = DID_NO_CONNECT;
3268 }
6c223761
KB
3269 scsi_status = SAM_STAT_CHECK_CONDITION;
3270 break;
3271 case PQI_AIO_STATUS_IO_ERROR:
3272 default:
3273 scsi_status = SAM_STAT_CHECK_CONDITION;
3274 break;
3275 }
3276 break;
3277 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3278 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3279 scsi_status = SAM_STAT_GOOD;
3280 break;
3281 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3282 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3283 default:
3284 scsi_status = SAM_STAT_CHECK_CONDITION;
3285 break;
3286 }
3287
3288 if (error_info->data_present) {
3289 sense_data_length =
3290 get_unaligned_le16(&error_info->data_length);
3291 if (sense_data_length) {
3292 if (sense_data_length > sizeof(error_info->data))
3293 sense_data_length = sizeof(error_info->data);
3294 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3295 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3296 memcpy(scmd->sense_buffer, error_info->data,
3297 sense_data_length);
3298 }
3299 }
3300
3301 if (device_offline && sense_data_length == 0)
f2b1e9c6 3302 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
6c223761
KB
3303
3304 scmd->result = scsi_status;
3305 set_host_byte(scmd, host_byte);
3306}
3307
3308static void pqi_process_io_error(unsigned int iu_type,
3309 struct pqi_io_request *io_request)
3310{
3311 switch (iu_type) {
3312 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3313 pqi_process_raid_io_error(io_request);
3314 break;
3315 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3316 pqi_process_aio_io_error(io_request);
3317 break;
3318 }
3319}
3320
18ff5f08 3321static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
3322 struct pqi_task_management_response *response)
3323{
3324 int rc;
3325
3326 switch (response->response_code) {
b17f0486
KB
3327 case SOP_TMF_COMPLETE:
3328 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
3329 rc = 0;
3330 break;
3406384b
MR
3331 case SOP_TMF_REJECTED:
3332 rc = -EAGAIN;
3333 break;
4e7d2602
MM
3334 case SOP_RC_INCORRECT_LOGICAL_UNIT:
3335 rc = -ENODEV;
3336 break;
6c223761
KB
3337 default:
3338 rc = -EIO;
3339 break;
3340 }
3341
18ff5f08
KB
3342 if (rc)
3343 dev_err(&ctrl_info->pci_dev->dev,
3344 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3345
6c223761
KB
3346 return rc;
3347}
3348
5d1f03e6
MB
3349static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3350 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
6c223761 3351{
5d1f03e6 3352 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
9e68cccc
KB
3353}
3354
3355static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
6c223761 3356{
9e68cccc 3357 int num_responses;
6c223761
KB
3358 pqi_index_t oq_pi;
3359 pqi_index_t oq_ci;
3360 struct pqi_io_request *io_request;
3361 struct pqi_io_response *response;
3362 u16 request_id;
3363
3364 num_responses = 0;
3365 oq_ci = queue_group->oq_ci_copy;
3366
3367 while (1) {
dac12fbc 3368 oq_pi = readl(queue_group->oq_pi);
9e68cccc 3369 if (oq_pi >= ctrl_info->num_elements_per_oq) {
5d1f03e6 3370 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
9e68cccc
KB
3371 dev_err(&ctrl_info->pci_dev->dev,
3372 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3373 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3374 return -1;
3375 }
6c223761
KB
3376 if (oq_pi == oq_ci)
3377 break;
3378
3379 num_responses++;
3380 response = queue_group->oq_element_array +
3381 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3382
3383 request_id = get_unaligned_le16(&response->request_id);
9e68cccc 3384 if (request_id >= ctrl_info->max_io_slots) {
5d1f03e6 3385 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
9e68cccc
KB
3386 dev_err(&ctrl_info->pci_dev->dev,
3387 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3388 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3389 return -1;
3390 }
6c223761
KB
3391
3392 io_request = &ctrl_info->io_request_pool[request_id];
9e68cccc 3393 if (atomic_read(&io_request->refcount) == 0) {
5d1f03e6 3394 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
9e68cccc
KB
3395 dev_err(&ctrl_info->pci_dev->dev,
3396 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3397 request_id, oq_pi, oq_ci);
3398 return -1;
3399 }
6c223761
KB
3400
3401 switch (response->header.iu_type) {
3402 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3403 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2ba55c98
KB
3404 if (io_request->scmd)
3405 io_request->scmd->result = 0;
df561f66 3406 fallthrough;
6c223761
KB
3407 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3408 break;
b212c251
KB
3409 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3410 io_request->status =
3411 get_unaligned_le16(
583891c9 3412 &((struct pqi_vendor_general_response *)response)->status);
b212c251 3413 break;
6c223761 3414 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
18ff5f08
KB
3415 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3416 (void *)response);
6c223761
KB
3417 break;
3418 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3419 pqi_aio_path_disabled(io_request);
3420 io_request->status = -EAGAIN;
3421 break;
3422 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3423 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3424 io_request->error_info = ctrl_info->error_buffer +
3425 (get_unaligned_le16(&response->error_index) *
3426 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
9e68cccc 3427 pqi_process_io_error(response->header.iu_type, io_request);
6c223761
KB
3428 break;
3429 default:
5d1f03e6 3430 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
6c223761 3431 dev_err(&ctrl_info->pci_dev->dev,
9e68cccc
KB
3432 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3433 response->header.iu_type, oq_pi, oq_ci);
3434 return -1;
6c223761
KB
3435 }
3436
9e68cccc 3437 io_request->io_complete_callback(io_request, io_request->context);
6c223761
KB
3438
3439 /*
3440 * Note that the I/O request structure CANNOT BE TOUCHED after
3441 * returning from the I/O completion callback!
3442 */
6c223761
KB
3443 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3444 }
3445
3446 if (num_responses) {
3447 queue_group->oq_ci_copy = oq_ci;
3448 writel(oq_ci, queue_group->oq_ci);
3449 }
3450
3451 return num_responses;
3452}
3453
3454static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 3455 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
3456{
3457 unsigned int num_elements_used;
3458
3459 if (pi >= ci)
3460 num_elements_used = pi - ci;
3461 else
3462 num_elements_used = elements_in_queue - ci + pi;
3463
3464 return elements_in_queue - num_elements_used - 1;
3465}
3466
98f87667 3467static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
3468 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3469{
3470 pqi_index_t iq_pi;
3471 pqi_index_t iq_ci;
3472 unsigned long flags;
3473 void *next_element;
6c223761
KB
3474 struct pqi_queue_group *queue_group;
3475
3476 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3477 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3478
6c223761
KB
3479 while (1) {
3480 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3481
3482 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
dac12fbc 3483 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
6c223761
KB
3484
3485 if (pqi_num_elements_free(iq_pi, iq_ci,
3486 ctrl_info->num_elements_per_iq))
3487 break;
3488
3489 spin_unlock_irqrestore(
3490 &queue_group->submit_lock[RAID_PATH], flags);
3491
98f87667 3492 if (pqi_ctrl_offline(ctrl_info))
6c223761 3493 return;
6c223761
KB
3494 }
3495
3496 next_element = queue_group->iq_element_array[RAID_PATH] +
3497 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3498
3499 memcpy(next_element, iu, iu_length);
3500
3501 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
6c223761
KB
3502 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3503
3504 /*
3505 * This write notifies the controller that an IU is available to be
3506 * processed.
3507 */
3508 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3509
3510 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
3511}
3512
3513static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3514 struct pqi_event *event)
3515{
3516 struct pqi_event_acknowledge_request request;
3517
3518 memset(&request, 0, sizeof(request));
3519
3520 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3521 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3522 &request.header.iu_length);
3523 request.event_type = event->event_type;
06b41e0d
KB
3524 put_unaligned_le16(event->event_id, &request.event_id);
3525 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
6c223761 3526
98f87667 3527 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
6c223761
KB
3528}
3529
4fd22c13
MR
3530#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3531#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3532
3533static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3534 struct pqi_ctrl_info *ctrl_info)
6c223761 3535{
4fd22c13 3536 u8 status;
583891c9 3537 unsigned long timeout;
6c223761 3538
42dc0426 3539 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
6c223761 3540
4fd22c13
MR
3541 while (1) {
3542 status = pqi_read_soft_reset_status(ctrl_info);
3543 if (status & PQI_SOFT_RESET_INITIATE)
3544 return RESET_INITIATE_DRIVER;
3545
3546 if (status & PQI_SOFT_RESET_ABORT)
3547 return RESET_ABORT;
3548
4ccc354b
KB
3549 if (!sis_is_firmware_running(ctrl_info))
3550 return RESET_NORESPONSE;
3551
4fd22c13 3552 if (time_after(jiffies, timeout)) {
4ccc354b 3553 dev_warn(&ctrl_info->pci_dev->dev,
4fd22c13
MR
3554 "timed out waiting for soft reset status\n");
3555 return RESET_TIMEDOUT;
3556 }
3557
4fd22c13
MR
3558 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3559 }
3560}
3561
4ccc354b 3562static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
4fd22c13
MR
3563{
3564 int rc;
2790cd4d 3565 unsigned int delay_secs;
4ccc354b
KB
3566 enum pqi_soft_reset_status reset_status;
3567
3568 if (ctrl_info->soft_reset_handshake_supported)
3569 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3570 else
3571 reset_status = RESET_INITIATE_FIRMWARE;
4fd22c13 3572
2790cd4d 3573 delay_secs = PQI_POST_RESET_DELAY_SECS;
4fd22c13
MR
3574
3575 switch (reset_status) {
4fd22c13 3576 case RESET_TIMEDOUT:
2790cd4d 3577 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
4ccc354b
KB
3578 fallthrough;
3579 case RESET_INITIATE_DRIVER:
4fd22c13 3580 dev_info(&ctrl_info->pci_dev->dev,
4ccc354b 3581 "Online Firmware Activation: resetting controller\n");
4fd22c13 3582 sis_soft_reset(ctrl_info);
df561f66 3583 fallthrough;
4fd22c13 3584 case RESET_INITIATE_FIRMWARE:
4ccc354b
KB
3585 ctrl_info->pqi_mode_enabled = false;
3586 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
2790cd4d 3587 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
4fd22c13 3588 pqi_ofa_free_host_buffer(ctrl_info);
4ccc354b 3589 pqi_ctrl_ofa_done(ctrl_info);
4fd22c13 3590 dev_info(&ctrl_info->pci_dev->dev,
4ccc354b
KB
3591 "Online Firmware Activation: %s\n",
3592 rc == 0 ? "SUCCESS" : "FAILED");
4fd22c13
MR
3593 break;
3594 case RESET_ABORT:
4fd22c13 3595 dev_info(&ctrl_info->pci_dev->dev,
4ccc354b
KB
3596 "Online Firmware Activation ABORTED\n");
3597 if (ctrl_info->soft_reset_handshake_supported)
3598 pqi_clear_soft_reset_status(ctrl_info);
3599 pqi_ofa_free_host_buffer(ctrl_info);
3600 pqi_ctrl_ofa_done(ctrl_info);
3601 pqi_ofa_ctrl_unquiesce(ctrl_info);
4fd22c13
MR
3602 break;
3603 case RESET_NORESPONSE:
4ccc354b
KB
3604 fallthrough;
3605 default:
3606 dev_err(&ctrl_info->pci_dev->dev,
3607 "unexpected Online Firmware Activation reset status: 0x%x\n",
3608 reset_status);
4fd22c13 3609 pqi_ofa_free_host_buffer(ctrl_info);
4ccc354b
KB
3610 pqi_ctrl_ofa_done(ctrl_info);
3611 pqi_ofa_ctrl_unquiesce(ctrl_info);
5d1f03e6 3612 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
4fd22c13
MR
3613 break;
3614 }
3615}
3616
2790cd4d 3617static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
4fd22c13 3618{
2790cd4d 3619 struct pqi_ctrl_info *ctrl_info;
4fd22c13 3620
2790cd4d 3621 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
4fd22c13 3622
2790cd4d
KB
3623 pqi_ctrl_ofa_start(ctrl_info);
3624 pqi_ofa_setup_host_buffer(ctrl_info);
3625 pqi_ofa_host_memory_update(ctrl_info);
3626}
4fd22c13 3627
2790cd4d
KB
3628static void pqi_ofa_quiesce_worker(struct work_struct *work)
3629{
3630 struct pqi_ctrl_info *ctrl_info;
3631 struct pqi_event *event;
4fd22c13 3632
2790cd4d
KB
3633 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3634
3635 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3636
3637 pqi_ofa_ctrl_quiesce(ctrl_info);
3638 pqi_acknowledge_event(ctrl_info, event);
3639 pqi_process_soft_reset(ctrl_info);
3640}
4fd22c13 3641
2790cd4d
KB
3642static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3643 struct pqi_event *event)
3644{
3645 bool ack_event;
3646
3647 ack_event = true;
3648
3649 switch (event->event_id) {
3650 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
4fd22c13 3651 dev_info(&ctrl_info->pci_dev->dev,
2790cd4d
KB
3652 "received Online Firmware Activation memory allocation request\n");
3653 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3654 break;
3655 case PQI_EVENT_OFA_QUIESCE:
4fd22c13 3656 dev_info(&ctrl_info->pci_dev->dev,
2790cd4d
KB
3657 "received Online Firmware Activation quiesce request\n");
3658 schedule_work(&ctrl_info->ofa_quiesce_work);
3659 ack_event = false;
3660 break;
3661 case PQI_EVENT_OFA_CANCELED:
4fd22c13 3662 dev_info(&ctrl_info->pci_dev->dev,
2790cd4d
KB
3663 "received Online Firmware Activation cancel request: reason: %u\n",
3664 ctrl_info->ofa_cancel_reason);
3665 pqi_ofa_free_host_buffer(ctrl_info);
3666 pqi_ctrl_ofa_done(ctrl_info);
3667 break;
3668 default:
3669 dev_err(&ctrl_info->pci_dev->dev,
3670 "received unknown Online Firmware Activation request: event ID: %u\n",
3671 event->event_id);
3672 break;
4fd22c13
MR
3673 }
3674
2790cd4d 3675 return ack_event;
4fd22c13
MR
3676}
3677
6c223761
KB
3678static void pqi_event_worker(struct work_struct *work)
3679{
3680 unsigned int i;
2790cd4d 3681 bool rescan_needed;
6c223761 3682 struct pqi_ctrl_info *ctrl_info;
6a50d6ad 3683 struct pqi_event *event;
2790cd4d 3684 bool ack_event;
6c223761
KB
3685
3686 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3687
7561a7e4 3688 pqi_ctrl_busy(ctrl_info);
ae0c189d 3689 pqi_wait_if_ctrl_blocked(ctrl_info);
5f310425
KB
3690 if (pqi_ctrl_offline(ctrl_info))
3691 goto out;
3692
2790cd4d 3693 rescan_needed = false;
6a50d6ad 3694 event = ctrl_info->events;
6c223761 3695 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
6a50d6ad
KB
3696 if (event->pending) {
3697 event->pending = false;
4fd22c13 3698 if (event->event_type == PQI_EVENT_TYPE_OFA) {
2790cd4d
KB
3699 ack_event = pqi_ofa_process_event(ctrl_info, event);
3700 } else {
3701 ack_event = true;
3702 rescan_needed = true;
27655e9d
MR
3703 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3704 ctrl_info->logical_volume_rescan_needed = true;
4fd22c13 3705 }
2790cd4d
KB
3706 if (ack_event)
3707 pqi_acknowledge_event(ctrl_info, event);
6c223761 3708 }
6a50d6ad 3709 event++;
6c223761
KB
3710 }
3711
4e7d2602
MM
3712#define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
3713
2790cd4d 3714 if (rescan_needed)
4e7d2602
MM
3715 pqi_schedule_rescan_worker_with_delay(ctrl_info,
3716 PQI_RESCAN_WORK_FOR_EVENT_DELAY);
2790cd4d 3717
5f310425 3718out:
7561a7e4 3719 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
3720}
3721
42dc0426 3722#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
6c223761 3723
74a0f573 3724static void pqi_heartbeat_timer_handler(struct timer_list *t)
6c223761
KB
3725{
3726 int num_interrupts;
98f87667 3727 u32 heartbeat_count;
583891c9 3728 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
6c223761 3729
98f87667
KB
3730 pqi_check_ctrl_health(ctrl_info);
3731 if (pqi_ctrl_offline(ctrl_info))
061ef06a
KB
3732 return;
3733
6c223761 3734 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
98f87667 3735 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
6c223761
KB
3736
3737 if (num_interrupts == ctrl_info->previous_num_interrupts) {
98f87667
KB
3738 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3739 dev_err(&ctrl_info->pci_dev->dev,
3740 "no heartbeat detected - last heartbeat count: %u\n",
3741 heartbeat_count);
5d1f03e6 3742 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
6c223761
KB
3743 return;
3744 }
6c223761 3745 } else {
98f87667 3746 ctrl_info->previous_num_interrupts = num_interrupts;
6c223761
KB
3747 }
3748
98f87667 3749 ctrl_info->previous_heartbeat_count = heartbeat_count;
6c223761
KB
3750 mod_timer(&ctrl_info->heartbeat_timer,
3751 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3752}
3753
3754static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3755{
98f87667
KB
3756 if (!ctrl_info->heartbeat_counter)
3757 return;
3758
6c223761
KB
3759 ctrl_info->previous_num_interrupts =
3760 atomic_read(&ctrl_info->num_interrupts);
98f87667
KB
3761 ctrl_info->previous_heartbeat_count =
3762 pqi_read_heartbeat_counter(ctrl_info);
6c223761 3763
6c223761
KB
3764 ctrl_info->heartbeat_timer.expires =
3765 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
061ef06a 3766 add_timer(&ctrl_info->heartbeat_timer);
6c223761
KB
3767}
3768
3769static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3770{
98f87667 3771 del_timer_sync(&ctrl_info->heartbeat_timer);
6c223761
KB
3772}
3773
2790cd4d
KB
3774static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3775 struct pqi_event *event, struct pqi_event_response *response)
4fd22c13 3776{
2790cd4d
KB
3777 switch (event->event_id) {
3778 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3779 ctrl_info->ofa_bytes_requested =
3780 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3781 break;
3782 case PQI_EVENT_OFA_CANCELED:
3783 ctrl_info->ofa_cancel_reason =
3784 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3785 break;
4fd22c13
MR
3786 }
3787}
3788
9e68cccc 3789static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
6c223761 3790{
9e68cccc 3791 int num_events;
6c223761
KB
3792 pqi_index_t oq_pi;
3793 pqi_index_t oq_ci;
3794 struct pqi_event_queue *event_queue;
3795 struct pqi_event_response *response;
6a50d6ad 3796 struct pqi_event *event;
6c223761
KB
3797 int event_index;
3798
3799 event_queue = &ctrl_info->event_queue;
3800 num_events = 0;
6c223761
KB
3801 oq_ci = event_queue->oq_ci_copy;
3802
3803 while (1) {
dac12fbc 3804 oq_pi = readl(event_queue->oq_pi);
9e68cccc 3805 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
5d1f03e6 3806 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
9e68cccc
KB
3807 dev_err(&ctrl_info->pci_dev->dev,
3808 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3809 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3810 return -1;
3811 }
3812
6c223761
KB
3813 if (oq_pi == oq_ci)
3814 break;
3815
3816 num_events++;
9e68cccc 3817 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
6c223761 3818
583891c9 3819 event_index = pqi_event_type_to_event_index(response->event_type);
6c223761 3820
9e68cccc
KB
3821 if (event_index >= 0 && response->request_acknowledge) {
3822 event = &ctrl_info->events[event_index];
3823 event->pending = true;
3824 event->event_type = response->event_type;
06b41e0d
KB
3825 event->event_id = get_unaligned_le16(&response->event_id);
3826 event->additional_event_id =
3827 get_unaligned_le32(&response->additional_event_id);
9e68cccc 3828 if (event->event_type == PQI_EVENT_TYPE_OFA)
2790cd4d 3829 pqi_ofa_capture_event_payload(ctrl_info, event, response);
6c223761
KB
3830 }
3831
3832 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3833 }
3834
3835 if (num_events) {
3836 event_queue->oq_ci_copy = oq_ci;
3837 writel(oq_ci, event_queue->oq_ci);
98f87667 3838 schedule_work(&ctrl_info->event_work);
6c223761
KB
3839 }
3840
3841 return num_events;
3842}
3843
061ef06a
KB
3844#define PQI_LEGACY_INTX_MASK 0x1
3845
583891c9 3846static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
061ef06a
KB
3847{
3848 u32 intx_mask;
3849 struct pqi_device_registers __iomem *pqi_registers;
3850 volatile void __iomem *register_addr;
3851
3852 pqi_registers = ctrl_info->pqi_registers;
3853
3854 if (enable_intx)
3855 register_addr = &pqi_registers->legacy_intx_mask_clear;
3856 else
3857 register_addr = &pqi_registers->legacy_intx_mask_set;
3858
3859 intx_mask = readl(register_addr);
3860 intx_mask |= PQI_LEGACY_INTX_MASK;
3861 writel(intx_mask, register_addr);
3862}
3863
3864static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3865 enum pqi_irq_mode new_mode)
3866{
3867 switch (ctrl_info->irq_mode) {
3868 case IRQ_MODE_MSIX:
3869 switch (new_mode) {
3870 case IRQ_MODE_MSIX:
3871 break;
3872 case IRQ_MODE_INTX:
3873 pqi_configure_legacy_intx(ctrl_info, true);
061ef06a
KB
3874 sis_enable_intx(ctrl_info);
3875 break;
3876 case IRQ_MODE_NONE:
061ef06a
KB
3877 break;
3878 }
3879 break;
3880 case IRQ_MODE_INTX:
3881 switch (new_mode) {
3882 case IRQ_MODE_MSIX:
3883 pqi_configure_legacy_intx(ctrl_info, false);
061ef06a
KB
3884 sis_enable_msix(ctrl_info);
3885 break;
3886 case IRQ_MODE_INTX:
3887 break;
3888 case IRQ_MODE_NONE:
3889 pqi_configure_legacy_intx(ctrl_info, false);
061ef06a
KB
3890 break;
3891 }
3892 break;
3893 case IRQ_MODE_NONE:
3894 switch (new_mode) {
3895 case IRQ_MODE_MSIX:
3896 sis_enable_msix(ctrl_info);
3897 break;
3898 case IRQ_MODE_INTX:
3899 pqi_configure_legacy_intx(ctrl_info, true);
3900 sis_enable_intx(ctrl_info);
3901 break;
3902 case IRQ_MODE_NONE:
3903 break;
3904 }
3905 break;
3906 }
3907
3908 ctrl_info->irq_mode = new_mode;
3909}
3910
3911#define PQI_LEGACY_INTX_PENDING 0x1
3912
3913static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3914{
3915 bool valid_irq;
3916 u32 intx_status;
3917
3918 switch (ctrl_info->irq_mode) {
3919 case IRQ_MODE_MSIX:
3920 valid_irq = true;
3921 break;
3922 case IRQ_MODE_INTX:
583891c9 3923 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
061ef06a
KB
3924 if (intx_status & PQI_LEGACY_INTX_PENDING)
3925 valid_irq = true;
3926 else
3927 valid_irq = false;
3928 break;
3929 case IRQ_MODE_NONE:
3930 default:
3931 valid_irq = false;
3932 break;
3933 }
3934
3935 return valid_irq;
3936}
3937
6c223761
KB
3938static irqreturn_t pqi_irq_handler(int irq, void *data)
3939{
3940 struct pqi_ctrl_info *ctrl_info;
3941 struct pqi_queue_group *queue_group;
9e68cccc
KB
3942 int num_io_responses_handled;
3943 int num_events_handled;
6c223761
KB
3944
3945 queue_group = data;
3946 ctrl_info = queue_group->ctrl_info;
3947
061ef06a 3948 if (!pqi_is_valid_irq(ctrl_info))
6c223761
KB
3949 return IRQ_NONE;
3950
9e68cccc
KB
3951 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3952 if (num_io_responses_handled < 0)
3953 goto out;
6c223761 3954
9e68cccc
KB
3955 if (irq == ctrl_info->event_irq) {
3956 num_events_handled = pqi_process_event_intr(ctrl_info);
3957 if (num_events_handled < 0)
3958 goto out;
3959 } else {
3960 num_events_handled = 0;
3961 }
6c223761 3962
9e68cccc 3963 if (num_io_responses_handled + num_events_handled > 0)
6c223761
KB
3964 atomic_inc(&ctrl_info->num_interrupts);
3965
3966 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3967 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3968
9e68cccc 3969out:
6c223761
KB
3970 return IRQ_HANDLED;
3971}
3972
3973static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3974{
d91d7820 3975 struct pci_dev *pci_dev = ctrl_info->pci_dev;
6c223761
KB
3976 int i;
3977 int rc;
3978
d91d7820 3979 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
6c223761
KB
3980
3981 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
d91d7820 3982 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
52198226 3983 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 3984 if (rc) {
d91d7820 3985 dev_err(&pci_dev->dev,
6c223761 3986 "irq %u init failed with error %d\n",
d91d7820 3987 pci_irq_vector(pci_dev, i), rc);
6c223761
KB
3988 return rc;
3989 }
3990 ctrl_info->num_msix_vectors_initialized++;
3991 }
3992
3993 return 0;
3994}
3995
98bf061b
KB
3996static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3997{
3998 int i;
3999
4000 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4001 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4002 &ctrl_info->queue_groups[i]);
4003
4004 ctrl_info->num_msix_vectors_initialized = 0;
4005}
4006
6c223761
KB
4007static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4008{
98bf061b 4009 int num_vectors_enabled;
6c223761 4010
98bf061b 4011 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
52198226
CH
4012 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4013 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
98bf061b 4014 if (num_vectors_enabled < 0) {
6c223761 4015 dev_err(&ctrl_info->pci_dev->dev,
98bf061b
KB
4016 "MSI-X init failed with error %d\n",
4017 num_vectors_enabled);
4018 return num_vectors_enabled;
6c223761
KB
4019 }
4020
98bf061b 4021 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
061ef06a 4022 ctrl_info->irq_mode = IRQ_MODE_MSIX;
6c223761
KB
4023 return 0;
4024}
4025
98bf061b
KB
4026static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4027{
4028 if (ctrl_info->num_msix_vectors_enabled) {
4029 pci_free_irq_vectors(ctrl_info->pci_dev);
4030 ctrl_info->num_msix_vectors_enabled = 0;
4031 }
4032}
4033
6c223761
KB
4034static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4035{
4036 unsigned int i;
4037 size_t alloc_length;
4038 size_t element_array_length_per_iq;
4039 size_t element_array_length_per_oq;
4040 void *element_array;
dac12fbc 4041 void __iomem *next_queue_index;
6c223761
KB
4042 void *aligned_pointer;
4043 unsigned int num_inbound_queues;
4044 unsigned int num_outbound_queues;
4045 unsigned int num_queue_indexes;
4046 struct pqi_queue_group *queue_group;
4047
4048 element_array_length_per_iq =
4049 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4050 ctrl_info->num_elements_per_iq;
4051 element_array_length_per_oq =
4052 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4053 ctrl_info->num_elements_per_oq;
4054 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4055 num_outbound_queues = ctrl_info->num_queue_groups;
4056 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4057
4058 aligned_pointer = NULL;
4059
4060 for (i = 0; i < num_inbound_queues; i++) {
4061 aligned_pointer = PTR_ALIGN(aligned_pointer,
4062 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4063 aligned_pointer += element_array_length_per_iq;
4064 }
4065
4066 for (i = 0; i < num_outbound_queues; i++) {
4067 aligned_pointer = PTR_ALIGN(aligned_pointer,
4068 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4069 aligned_pointer += element_array_length_per_oq;
4070 }
4071
4072 aligned_pointer = PTR_ALIGN(aligned_pointer,
4073 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4074 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4075 PQI_EVENT_OQ_ELEMENT_LENGTH;
4076
4077 for (i = 0; i < num_queue_indexes; i++) {
4078 aligned_pointer = PTR_ALIGN(aligned_pointer,
4079 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4080 aligned_pointer += sizeof(pqi_index_t);
4081 }
4082
4083 alloc_length = (size_t)aligned_pointer +
4084 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4085
e1d213bd
KB
4086 alloc_length += PQI_EXTRA_SGL_MEMORY;
4087
6c223761 4088 ctrl_info->queue_memory_base =
750afb08
LC
4089 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4090 &ctrl_info->queue_memory_base_dma_handle,
4091 GFP_KERNEL);
6c223761 4092
d87d5474 4093 if (!ctrl_info->queue_memory_base)
6c223761 4094 return -ENOMEM;
6c223761
KB
4095
4096 ctrl_info->queue_memory_length = alloc_length;
4097
4098 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4099 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4100
4101 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4102 queue_group = &ctrl_info->queue_groups[i];
4103 queue_group->iq_element_array[RAID_PATH] = element_array;
4104 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4105 ctrl_info->queue_memory_base_dma_handle +
4106 (element_array - ctrl_info->queue_memory_base);
4107 element_array += element_array_length_per_iq;
4108 element_array = PTR_ALIGN(element_array,
4109 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4110 queue_group->iq_element_array[AIO_PATH] = element_array;
4111 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4112 ctrl_info->queue_memory_base_dma_handle +
4113 (element_array - ctrl_info->queue_memory_base);
4114 element_array += element_array_length_per_iq;
4115 element_array = PTR_ALIGN(element_array,
4116 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4117 }
4118
4119 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4120 queue_group = &ctrl_info->queue_groups[i];
4121 queue_group->oq_element_array = element_array;
4122 queue_group->oq_element_array_bus_addr =
4123 ctrl_info->queue_memory_base_dma_handle +
4124 (element_array - ctrl_info->queue_memory_base);
4125 element_array += element_array_length_per_oq;
4126 element_array = PTR_ALIGN(element_array,
4127 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4128 }
4129
4130 ctrl_info->event_queue.oq_element_array = element_array;
4131 ctrl_info->event_queue.oq_element_array_bus_addr =
4132 ctrl_info->queue_memory_base_dma_handle +
4133 (element_array - ctrl_info->queue_memory_base);
4134 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4135 PQI_EVENT_OQ_ELEMENT_LENGTH;
4136
dac12fbc 4137 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
6c223761
KB
4138 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4139
4140 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4141 queue_group = &ctrl_info->queue_groups[i];
4142 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4143 queue_group->iq_ci_bus_addr[RAID_PATH] =
4144 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4145 (next_queue_index -
4146 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4147 next_queue_index += sizeof(pqi_index_t);
4148 next_queue_index = PTR_ALIGN(next_queue_index,
4149 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4150 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4151 queue_group->iq_ci_bus_addr[AIO_PATH] =
4152 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4153 (next_queue_index -
4154 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4155 next_queue_index += sizeof(pqi_index_t);
4156 next_queue_index = PTR_ALIGN(next_queue_index,
4157 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4158 queue_group->oq_pi = next_queue_index;
4159 queue_group->oq_pi_bus_addr =
4160 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4161 (next_queue_index -
4162 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4163 next_queue_index += sizeof(pqi_index_t);
4164 next_queue_index = PTR_ALIGN(next_queue_index,
4165 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4166 }
4167
4168 ctrl_info->event_queue.oq_pi = next_queue_index;
4169 ctrl_info->event_queue.oq_pi_bus_addr =
4170 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4171 (next_queue_index -
4172 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4173
4174 return 0;
4175}
4176
4177static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4178{
4179 unsigned int i;
4180 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4181 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4182
4183 /*
4184 * Initialize the backpointers to the controller structure in
4185 * each operational queue group structure.
4186 */
4187 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4188 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4189
4190 /*
4191 * Assign IDs to all operational queues. Note that the IDs
4192 * assigned to operational IQs are independent of the IDs
4193 * assigned to operational OQs.
4194 */
4195 ctrl_info->event_queue.oq_id = next_oq_id++;
4196 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4197 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4198 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4199 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4200 }
4201
4202 /*
4203 * Assign MSI-X table entry indexes to all queues. Note that the
4204 * interrupt for the event queue is shared with the first queue group.
4205 */
4206 ctrl_info->event_queue.int_msg_num = 0;
4207 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4208 ctrl_info->queue_groups[i].int_msg_num = i;
4209
4210 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4211 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4212 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4213 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4214 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4215 }
4216}
4217
4218static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4219{
4220 size_t alloc_length;
4221 struct pqi_admin_queues_aligned *admin_queues_aligned;
4222 struct pqi_admin_queues *admin_queues;
4223
4224 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4225 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4226
4227 ctrl_info->admin_queue_memory_base =
750afb08
LC
4228 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4229 &ctrl_info->admin_queue_memory_base_dma_handle,
4230 GFP_KERNEL);
6c223761
KB
4231
4232 if (!ctrl_info->admin_queue_memory_base)
4233 return -ENOMEM;
4234
4235 ctrl_info->admin_queue_memory_length = alloc_length;
4236
4237 admin_queues = &ctrl_info->admin_queues;
4238 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4239 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4240 admin_queues->iq_element_array =
4241 &admin_queues_aligned->iq_element_array;
4242 admin_queues->oq_element_array =
4243 &admin_queues_aligned->oq_element_array;
583891c9
KB
4244 admin_queues->iq_ci =
4245 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
dac12fbc
KB
4246 admin_queues->oq_pi =
4247 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
6c223761
KB
4248
4249 admin_queues->iq_element_array_bus_addr =
4250 ctrl_info->admin_queue_memory_base_dma_handle +
4251 (admin_queues->iq_element_array -
4252 ctrl_info->admin_queue_memory_base);
4253 admin_queues->oq_element_array_bus_addr =
4254 ctrl_info->admin_queue_memory_base_dma_handle +
4255 (admin_queues->oq_element_array -
4256 ctrl_info->admin_queue_memory_base);
4257 admin_queues->iq_ci_bus_addr =
4258 ctrl_info->admin_queue_memory_base_dma_handle +
583891c9
KB
4259 ((void __iomem *)admin_queues->iq_ci -
4260 (void __iomem *)ctrl_info->admin_queue_memory_base);
6c223761
KB
4261 admin_queues->oq_pi_bus_addr =
4262 ctrl_info->admin_queue_memory_base_dma_handle +
dac12fbc
KB
4263 ((void __iomem *)admin_queues->oq_pi -
4264 (void __iomem *)ctrl_info->admin_queue_memory_base);
6c223761
KB
4265
4266 return 0;
4267}
4268
42dc0426 4269#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
6c223761
KB
4270#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4271
4272static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4273{
4274 struct pqi_device_registers __iomem *pqi_registers;
4275 struct pqi_admin_queues *admin_queues;
4276 unsigned long timeout;
4277 u8 status;
4278 u32 reg;
4279
4280 pqi_registers = ctrl_info->pqi_registers;
4281 admin_queues = &ctrl_info->admin_queues;
4282
4283 writeq((u64)admin_queues->iq_element_array_bus_addr,
4284 &pqi_registers->admin_iq_element_array_addr);
4285 writeq((u64)admin_queues->oq_element_array_bus_addr,
4286 &pqi_registers->admin_oq_element_array_addr);
4287 writeq((u64)admin_queues->iq_ci_bus_addr,
4288 &pqi_registers->admin_iq_ci_addr);
4289 writeq((u64)admin_queues->oq_pi_bus_addr,
4290 &pqi_registers->admin_oq_pi_addr);
4291
4292 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
e655d469 4293 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
6c223761
KB
4294 (admin_queues->int_msg_num << 16);
4295 writel(reg, &pqi_registers->admin_iq_num_elements);
583891c9 4296
6c223761
KB
4297 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4298 &pqi_registers->function_and_status_code);
4299
4300 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4301 while (1) {
987d3560 4302 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
6c223761
KB
4303 status = readb(&pqi_registers->function_and_status_code);
4304 if (status == PQI_STATUS_IDLE)
4305 break;
4306 if (time_after(jiffies, timeout))
4307 return -ETIMEDOUT;
6c223761
KB
4308 }
4309
4310 /*
4311 * The offset registers are not initialized to the correct
4312 * offsets until *after* the create admin queue pair command
4313 * completes successfully.
4314 */
4315 admin_queues->iq_pi = ctrl_info->iomem_base +
4316 PQI_DEVICE_REGISTERS_OFFSET +
4317 readq(&pqi_registers->admin_iq_pi_offset);
4318 admin_queues->oq_ci = ctrl_info->iomem_base +
4319 PQI_DEVICE_REGISTERS_OFFSET +
4320 readq(&pqi_registers->admin_oq_ci_offset);
4321
4322 return 0;
4323}
4324
4325static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4326 struct pqi_general_admin_request *request)
4327{
4328 struct pqi_admin_queues *admin_queues;
4329 void *next_element;
4330 pqi_index_t iq_pi;
4331
4332 admin_queues = &ctrl_info->admin_queues;
4333 iq_pi = admin_queues->iq_pi_copy;
4334
4335 next_element = admin_queues->iq_element_array +
4336 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4337
4338 memcpy(next_element, request, sizeof(*request));
4339
4340 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4341 admin_queues->iq_pi_copy = iq_pi;
4342
4343 /*
4344 * This write notifies the controller that an IU is available to be
4345 * processed.
4346 */
4347 writel(iq_pi, admin_queues->iq_pi);
4348}
4349
13bede67
KB
4350#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4351
6c223761
KB
4352static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4353 struct pqi_general_admin_response *response)
4354{
4355 struct pqi_admin_queues *admin_queues;
4356 pqi_index_t oq_pi;
4357 pqi_index_t oq_ci;
4358 unsigned long timeout;
4359
4360 admin_queues = &ctrl_info->admin_queues;
4361 oq_ci = admin_queues->oq_ci_copy;
4362
42dc0426 4363 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
6c223761
KB
4364
4365 while (1) {
dac12fbc 4366 oq_pi = readl(admin_queues->oq_pi);
6c223761
KB
4367 if (oq_pi != oq_ci)
4368 break;
4369 if (time_after(jiffies, timeout)) {
4370 dev_err(&ctrl_info->pci_dev->dev,
4371 "timed out waiting for admin response\n");
4372 return -ETIMEDOUT;
4373 }
13bede67
KB
4374 if (!sis_is_firmware_running(ctrl_info))
4375 return -ENXIO;
6c223761
KB
4376 usleep_range(1000, 2000);
4377 }
4378
4379 memcpy(response, admin_queues->oq_element_array +
4380 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4381
4382 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4383 admin_queues->oq_ci_copy = oq_ci;
4384 writel(oq_ci, admin_queues->oq_ci);
4385
4386 return 0;
4387}
4388
4389static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4390 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4391 struct pqi_io_request *io_request)
4392{
4393 struct pqi_io_request *next;
4394 void *next_element;
4395 pqi_index_t iq_pi;
4396 pqi_index_t iq_ci;
4397 size_t iu_length;
4398 unsigned long flags;
4399 unsigned int num_elements_needed;
4400 unsigned int num_elements_to_end_of_queue;
4401 size_t copy_count;
4402 struct pqi_iu_header *request;
4403
4404 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4405
376fb880
KB
4406 if (io_request) {
4407 io_request->queue_group = queue_group;
6c223761
KB
4408 list_add_tail(&io_request->request_list_entry,
4409 &queue_group->request_list[path]);
376fb880 4410 }
6c223761
KB
4411
4412 iq_pi = queue_group->iq_pi_copy[path];
4413
4414 list_for_each_entry_safe(io_request, next,
4415 &queue_group->request_list[path], request_list_entry) {
4416
4417 request = io_request->iu;
4418
4419 iu_length = get_unaligned_le16(&request->iu_length) +
4420 PQI_REQUEST_HEADER_LENGTH;
4421 num_elements_needed =
4422 DIV_ROUND_UP(iu_length,
4423 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4424
dac12fbc 4425 iq_ci = readl(queue_group->iq_ci[path]);
6c223761
KB
4426
4427 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4428 ctrl_info->num_elements_per_iq))
4429 break;
4430
4431 put_unaligned_le16(queue_group->oq_id,
4432 &request->response_queue_id);
4433
4434 next_element = queue_group->iq_element_array[path] +
4435 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4436
4437 num_elements_to_end_of_queue =
4438 ctrl_info->num_elements_per_iq - iq_pi;
4439
4440 if (num_elements_needed <= num_elements_to_end_of_queue) {
4441 memcpy(next_element, request, iu_length);
4442 } else {
4443 copy_count = num_elements_to_end_of_queue *
4444 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4445 memcpy(next_element, request, copy_count);
4446 memcpy(queue_group->iq_element_array[path],
4447 (u8 *)request + copy_count,
4448 iu_length - copy_count);
4449 }
4450
4451 iq_pi = (iq_pi + num_elements_needed) %
4452 ctrl_info->num_elements_per_iq;
4453
4454 list_del(&io_request->request_list_entry);
4455 }
4456
4457 if (iq_pi != queue_group->iq_pi_copy[path]) {
4458 queue_group->iq_pi_copy[path] = iq_pi;
4459 /*
4460 * This write notifies the controller that one or more IUs are
4461 * available to be processed.
4462 */
4463 writel(iq_pi, queue_group->iq_pi[path]);
4464 }
4465
4466 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4467}
4468
1f37e992
KB
4469#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4470
4471static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4472 struct completion *wait)
4473{
4474 int rc;
1f37e992
KB
4475
4476 while (1) {
4477 if (wait_for_completion_io_timeout(wait,
42dc0426 4478 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
1f37e992
KB
4479 rc = 0;
4480 break;
4481 }
4482
4483 pqi_check_ctrl_health(ctrl_info);
4484 if (pqi_ctrl_offline(ctrl_info)) {
4485 rc = -ENXIO;
4486 break;
4487 }
1f37e992
KB
4488 }
4489
4490 return rc;
4491}
4492
6c223761
KB
4493static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4494 void *context)
4495{
4496 struct completion *waiting = context;
4497
4498 complete(waiting);
4499}
4500
694c5d5b
KB
4501static int pqi_process_raid_io_error_synchronous(
4502 struct pqi_raid_error_info *error_info)
26b390ab
KB
4503{
4504 int rc = -EIO;
4505
4506 switch (error_info->data_out_result) {
4507 case PQI_DATA_IN_OUT_GOOD:
4508 if (error_info->status == SAM_STAT_GOOD)
4509 rc = 0;
4510 break;
4511 case PQI_DATA_IN_OUT_UNDERFLOW:
4512 if (error_info->status == SAM_STAT_GOOD ||
4513 error_info->status == SAM_STAT_CHECK_CONDITION)
4514 rc = 0;
4515 break;
4516 case PQI_DATA_IN_OUT_ABORTED:
4517 rc = PQI_CMD_STATUS_ABORTED;
4518 break;
4519 }
4520
4521 return rc;
4522}
4523
ae0c189d
KB
4524static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4525{
4526 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4527}
4528
6c223761
KB
4529static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4530 struct pqi_iu_header *request, unsigned int flags,
ae0c189d 4531 struct pqi_raid_error_info *error_info)
6c223761 4532{
957c5ab1 4533 int rc = 0;
6c223761 4534 struct pqi_io_request *io_request;
6c223761 4535 size_t iu_length;
957c5ab1 4536 DECLARE_COMPLETION_ONSTACK(wait);
6c223761 4537
6c223761
KB
4538 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4539 if (down_interruptible(&ctrl_info->sync_request_sem))
4540 return -ERESTARTSYS;
4541 } else {
ae0c189d 4542 down(&ctrl_info->sync_request_sem);
6c223761
KB
4543 }
4544
7561a7e4 4545 pqi_ctrl_busy(ctrl_info);
ae0c189d
KB
4546 /*
4547 * Wait for other admin queue updates such as;
4548 * config table changes, OFA memory updates, ...
4549 */
4550 if (pqi_is_blockable_request(request))
4551 pqi_wait_if_ctrl_blocked(ctrl_info);
7561a7e4 4552
376fb880
KB
4553 if (pqi_ctrl_offline(ctrl_info)) {
4554 rc = -ENXIO;
4555 goto out;
4556 }
4557
6c223761
KB
4558 io_request = pqi_alloc_io_request(ctrl_info);
4559
4560 put_unaligned_le16(io_request->index,
4561 &(((struct pqi_raid_path_request *)request)->request_id));
4562
4563 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4564 ((struct pqi_raid_path_request *)request)->error_index =
4565 ((struct pqi_raid_path_request *)request)->request_id;
4566
4567 iu_length = get_unaligned_le16(&request->iu_length) +
4568 PQI_REQUEST_HEADER_LENGTH;
4569 memcpy(io_request->iu, request, iu_length);
4570
957c5ab1
KB
4571 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4572 io_request->context = &wait;
4573
583891c9 4574 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
957c5ab1
KB
4575 io_request);
4576
ae0c189d 4577 pqi_wait_for_completion_io(ctrl_info, &wait);
6c223761
KB
4578
4579 if (error_info) {
4580 if (io_request->error_info)
583891c9 4581 memcpy(error_info, io_request->error_info, sizeof(*error_info));
6c223761
KB
4582 else
4583 memset(error_info, 0, sizeof(*error_info));
4584 } else if (rc == 0 && io_request->error_info) {
583891c9 4585 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
6c223761
KB
4586 }
4587
4588 pqi_free_io_request(io_request);
4589
7561a7e4 4590out:
ae0c189d 4591 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
4592 up(&ctrl_info->sync_request_sem);
4593
4594 return rc;
4595}
4596
4597static int pqi_validate_admin_response(
4598 struct pqi_general_admin_response *response, u8 expected_function_code)
4599{
4600 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4601 return -EINVAL;
4602
4603 if (get_unaligned_le16(&response->header.iu_length) !=
4604 PQI_GENERAL_ADMIN_IU_LENGTH)
4605 return -EINVAL;
4606
4607 if (response->function_code != expected_function_code)
4608 return -EINVAL;
4609
4610 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4611 return -EINVAL;
4612
4613 return 0;
4614}
4615
4616static int pqi_submit_admin_request_synchronous(
4617 struct pqi_ctrl_info *ctrl_info,
4618 struct pqi_general_admin_request *request,
4619 struct pqi_general_admin_response *response)
4620{
4621 int rc;
4622
4623 pqi_submit_admin_request(ctrl_info, request);
4624
4625 rc = pqi_poll_for_admin_response(ctrl_info, response);
4626
4627 if (rc == 0)
ae0c189d 4628 rc = pqi_validate_admin_response(response, request->function_code);
6c223761
KB
4629
4630 return rc;
4631}
4632
4633static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4634{
4635 int rc;
4636 struct pqi_general_admin_request request;
4637 struct pqi_general_admin_response response;
4638 struct pqi_device_capability *capability;
4639 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4640
4641 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4642 if (!capability)
4643 return -ENOMEM;
4644
4645 memset(&request, 0, sizeof(request));
4646
4647 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4648 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4649 &request.header.iu_length);
4650 request.function_code =
4651 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4652 put_unaligned_le32(sizeof(*capability),
4653 &request.data.report_device_capability.buffer_length);
4654
4655 rc = pqi_map_single(ctrl_info->pci_dev,
4656 &request.data.report_device_capability.sg_descriptor,
4657 capability, sizeof(*capability),
6917a9cc 4658 DMA_FROM_DEVICE);
6c223761
KB
4659 if (rc)
4660 goto out;
4661
583891c9 4662 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
6c223761
KB
4663
4664 pqi_pci_unmap(ctrl_info->pci_dev,
4665 &request.data.report_device_capability.sg_descriptor, 1,
6917a9cc 4666 DMA_FROM_DEVICE);
6c223761
KB
4667
4668 if (rc)
4669 goto out;
4670
4671 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4672 rc = -EIO;
4673 goto out;
4674 }
4675
4676 ctrl_info->max_inbound_queues =
4677 get_unaligned_le16(&capability->max_inbound_queues);
4678 ctrl_info->max_elements_per_iq =
4679 get_unaligned_le16(&capability->max_elements_per_iq);
4680 ctrl_info->max_iq_element_length =
4681 get_unaligned_le16(&capability->max_iq_element_length)
4682 * 16;
4683 ctrl_info->max_outbound_queues =
4684 get_unaligned_le16(&capability->max_outbound_queues);
4685 ctrl_info->max_elements_per_oq =
4686 get_unaligned_le16(&capability->max_elements_per_oq);
4687 ctrl_info->max_oq_element_length =
4688 get_unaligned_le16(&capability->max_oq_element_length)
4689 * 16;
4690
4691 sop_iu_layer_descriptor =
4692 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4693
4694 ctrl_info->max_inbound_iu_length_per_firmware =
4695 get_unaligned_le16(
4696 &sop_iu_layer_descriptor->max_inbound_iu_length);
4697 ctrl_info->inbound_spanning_supported =
4698 sop_iu_layer_descriptor->inbound_spanning_supported;
4699 ctrl_info->outbound_spanning_supported =
4700 sop_iu_layer_descriptor->outbound_spanning_supported;
4701
4702out:
4703 kfree(capability);
4704
4705 return rc;
4706}
4707
4708static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4709{
4710 if (ctrl_info->max_iq_element_length <
4711 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4712 dev_err(&ctrl_info->pci_dev->dev,
4713 "max. inbound queue element length of %d is less than the required length of %d\n",
4714 ctrl_info->max_iq_element_length,
4715 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4716 return -EINVAL;
4717 }
4718
4719 if (ctrl_info->max_oq_element_length <
4720 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4721 dev_err(&ctrl_info->pci_dev->dev,
4722 "max. outbound queue element length of %d is less than the required length of %d\n",
4723 ctrl_info->max_oq_element_length,
4724 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4725 return -EINVAL;
4726 }
4727
4728 if (ctrl_info->max_inbound_iu_length_per_firmware <
4729 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4730 dev_err(&ctrl_info->pci_dev->dev,
4731 "max. inbound IU length of %u is less than the min. required length of %d\n",
4732 ctrl_info->max_inbound_iu_length_per_firmware,
4733 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4734 return -EINVAL;
4735 }
4736
77668f41
KB
4737 if (!ctrl_info->inbound_spanning_supported) {
4738 dev_err(&ctrl_info->pci_dev->dev,
4739 "the controller does not support inbound spanning\n");
4740 return -EINVAL;
4741 }
4742
4743 if (ctrl_info->outbound_spanning_supported) {
4744 dev_err(&ctrl_info->pci_dev->dev,
4745 "the controller supports outbound spanning but this driver does not\n");
4746 return -EINVAL;
4747 }
4748
6c223761
KB
4749 return 0;
4750}
4751
6c223761
KB
4752static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4753{
4754 int rc;
4755 struct pqi_event_queue *event_queue;
4756 struct pqi_general_admin_request request;
4757 struct pqi_general_admin_response response;
4758
4759 event_queue = &ctrl_info->event_queue;
4760
4761 /*
4762 * Create OQ (Outbound Queue - device to host queue) to dedicate
4763 * to events.
4764 */
4765 memset(&request, 0, sizeof(request));
4766 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4767 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4768 &request.header.iu_length);
4769 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4770 put_unaligned_le16(event_queue->oq_id,
4771 &request.data.create_operational_oq.queue_id);
4772 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4773 &request.data.create_operational_oq.element_array_addr);
4774 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4775 &request.data.create_operational_oq.pi_addr);
4776 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4777 &request.data.create_operational_oq.num_elements);
4778 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4779 &request.data.create_operational_oq.element_length);
4780 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4781 put_unaligned_le16(event_queue->int_msg_num,
4782 &request.data.create_operational_oq.int_msg_num);
4783
4784 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4785 &response);
4786 if (rc)
4787 return rc;
4788
4789 event_queue->oq_ci = ctrl_info->iomem_base +
4790 PQI_DEVICE_REGISTERS_OFFSET +
4791 get_unaligned_le64(
4792 &response.data.create_operational_oq.oq_ci_offset);
4793
4794 return 0;
4795}
4796
061ef06a
KB
4797static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4798 unsigned int group_number)
6c223761 4799{
6c223761
KB
4800 int rc;
4801 struct pqi_queue_group *queue_group;
4802 struct pqi_general_admin_request request;
4803 struct pqi_general_admin_response response;
4804
061ef06a 4805 queue_group = &ctrl_info->queue_groups[group_number];
6c223761
KB
4806
4807 /*
4808 * Create IQ (Inbound Queue - host to device queue) for
4809 * RAID path.
4810 */
4811 memset(&request, 0, sizeof(request));
4812 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4813 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4814 &request.header.iu_length);
4815 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4816 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4817 &request.data.create_operational_iq.queue_id);
4818 put_unaligned_le64(
4819 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4820 &request.data.create_operational_iq.element_array_addr);
4821 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4822 &request.data.create_operational_iq.ci_addr);
4823 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4824 &request.data.create_operational_iq.num_elements);
4825 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4826 &request.data.create_operational_iq.element_length);
4827 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4828
4829 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4830 &response);
4831 if (rc) {
4832 dev_err(&ctrl_info->pci_dev->dev,
4833 "error creating inbound RAID queue\n");
4834 return rc;
4835 }
4836
4837 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4838 PQI_DEVICE_REGISTERS_OFFSET +
4839 get_unaligned_le64(
4840 &response.data.create_operational_iq.iq_pi_offset);
4841
4842 /*
4843 * Create IQ (Inbound Queue - host to device queue) for
4844 * Advanced I/O (AIO) path.
4845 */
4846 memset(&request, 0, sizeof(request));
4847 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4848 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4849 &request.header.iu_length);
4850 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4851 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4852 &request.data.create_operational_iq.queue_id);
4853 put_unaligned_le64((u64)queue_group->
4854 iq_element_array_bus_addr[AIO_PATH],
4855 &request.data.create_operational_iq.element_array_addr);
4856 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4857 &request.data.create_operational_iq.ci_addr);
4858 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4859 &request.data.create_operational_iq.num_elements);
4860 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4861 &request.data.create_operational_iq.element_length);
4862 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4863
4864 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4865 &response);
4866 if (rc) {
4867 dev_err(&ctrl_info->pci_dev->dev,
4868 "error creating inbound AIO queue\n");
339faa81 4869 return rc;
6c223761
KB
4870 }
4871
4872 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4873 PQI_DEVICE_REGISTERS_OFFSET +
4874 get_unaligned_le64(
4875 &response.data.create_operational_iq.iq_pi_offset);
4876
4877 /*
4878 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4879 * assumed to be for RAID path I/O unless we change the queue's
4880 * property.
4881 */
4882 memset(&request, 0, sizeof(request));
4883 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4884 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4885 &request.header.iu_length);
4886 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4887 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4888 &request.data.change_operational_iq_properties.queue_id);
4889 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4890 &request.data.change_operational_iq_properties.vendor_specific);
4891
4892 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4893 &response);
4894 if (rc) {
4895 dev_err(&ctrl_info->pci_dev->dev,
4896 "error changing queue property\n");
339faa81 4897 return rc;
6c223761
KB
4898 }
4899
4900 /*
4901 * Create OQ (Outbound Queue - device to host queue).
4902 */
4903 memset(&request, 0, sizeof(request));
4904 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4905 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4906 &request.header.iu_length);
4907 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4908 put_unaligned_le16(queue_group->oq_id,
4909 &request.data.create_operational_oq.queue_id);
4910 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4911 &request.data.create_operational_oq.element_array_addr);
4912 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4913 &request.data.create_operational_oq.pi_addr);
4914 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4915 &request.data.create_operational_oq.num_elements);
4916 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4917 &request.data.create_operational_oq.element_length);
4918 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4919 put_unaligned_le16(queue_group->int_msg_num,
4920 &request.data.create_operational_oq.int_msg_num);
4921
4922 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4923 &response);
4924 if (rc) {
4925 dev_err(&ctrl_info->pci_dev->dev,
4926 "error creating outbound queue\n");
339faa81 4927 return rc;
6c223761
KB
4928 }
4929
4930 queue_group->oq_ci = ctrl_info->iomem_base +
4931 PQI_DEVICE_REGISTERS_OFFSET +
4932 get_unaligned_le64(
4933 &response.data.create_operational_oq.oq_ci_offset);
4934
6c223761 4935 return 0;
6c223761
KB
4936}
4937
4938static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4939{
4940 int rc;
4941 unsigned int i;
4942
4943 rc = pqi_create_event_queue(ctrl_info);
4944 if (rc) {
4945 dev_err(&ctrl_info->pci_dev->dev,
4946 "error creating event queue\n");
4947 return rc;
4948 }
4949
4950 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
061ef06a 4951 rc = pqi_create_queue_group(ctrl_info, i);
6c223761
KB
4952 if (rc) {
4953 dev_err(&ctrl_info->pci_dev->dev,
4954 "error creating queue group number %u/%u\n",
4955 i, ctrl_info->num_queue_groups);
4956 return rc;
4957 }
4958 }
4959
4960 return 0;
4961}
4962
4963#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
5f492a7a 4964 struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
6c223761 4965
6a50d6ad
KB
4966static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4967 bool enable_events)
6c223761
KB
4968{
4969 int rc;
4970 unsigned int i;
4971 struct pqi_event_config *event_config;
6a50d6ad 4972 struct pqi_event_descriptor *event_descriptor;
6c223761
KB
4973 struct pqi_general_management_request request;
4974
4975 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4976 GFP_KERNEL);
4977 if (!event_config)
4978 return -ENOMEM;
4979
4980 memset(&request, 0, sizeof(request));
4981
4982 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4983 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4984 data.report_event_configuration.sg_descriptors[1]) -
4985 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4986 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4987 &request.data.report_event_configuration.buffer_length);
4988
4989 rc = pqi_map_single(ctrl_info->pci_dev,
4990 request.data.report_event_configuration.sg_descriptors,
4991 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
6917a9cc 4992 DMA_FROM_DEVICE);
6c223761
KB
4993 if (rc)
4994 goto out;
4995
ae0c189d 4996 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761
KB
4997
4998 pqi_pci_unmap(ctrl_info->pci_dev,
4999 request.data.report_event_configuration.sg_descriptors, 1,
6917a9cc 5000 DMA_FROM_DEVICE);
6c223761
KB
5001
5002 if (rc)
5003 goto out;
5004
6a50d6ad
KB
5005 for (i = 0; i < event_config->num_event_descriptors; i++) {
5006 event_descriptor = &event_config->descriptors[i];
5007 if (enable_events &&
5008 pqi_is_supported_event(event_descriptor->event_type))
583891c9 5009 put_unaligned_le16(ctrl_info->event_queue.oq_id,
6a50d6ad
KB
5010 &event_descriptor->oq_id);
5011 else
5012 put_unaligned_le16(0, &event_descriptor->oq_id);
5013 }
6c223761
KB
5014
5015 memset(&request, 0, sizeof(request));
5016
5017 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5018 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5019 data.report_event_configuration.sg_descriptors[1]) -
5020 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5021 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5022 &request.data.report_event_configuration.buffer_length);
5023
5024 rc = pqi_map_single(ctrl_info->pci_dev,
5025 request.data.report_event_configuration.sg_descriptors,
5026 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
6917a9cc 5027 DMA_TO_DEVICE);
6c223761
KB
5028 if (rc)
5029 goto out;
5030
ae0c189d 5031 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761
KB
5032
5033 pqi_pci_unmap(ctrl_info->pci_dev,
5034 request.data.report_event_configuration.sg_descriptors, 1,
6917a9cc 5035 DMA_TO_DEVICE);
6c223761
KB
5036
5037out:
5038 kfree(event_config);
5039
5040 return rc;
5041}
5042
6a50d6ad
KB
5043static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5044{
5045 return pqi_configure_events(ctrl_info, true);
5046}
5047
6c223761
KB
5048static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5049{
5050 unsigned int i;
5051 struct device *dev;
5052 size_t sg_chain_buffer_length;
5053 struct pqi_io_request *io_request;
5054
5055 if (!ctrl_info->io_request_pool)
5056 return;
5057
5058 dev = &ctrl_info->pci_dev->dev;
5059 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5060 io_request = ctrl_info->io_request_pool;
5061
5062 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5063 kfree(io_request->iu);
5064 if (!io_request->sg_chain_buffer)
5065 break;
5066 dma_free_coherent(dev, sg_chain_buffer_length,
5067 io_request->sg_chain_buffer,
5068 io_request->sg_chain_buffer_dma_handle);
5069 io_request++;
5070 }
5071
5072 kfree(ctrl_info->io_request_pool);
5073 ctrl_info->io_request_pool = NULL;
5074}
5075
5076static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5077{
694c5d5b
KB
5078 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5079 ctrl_info->error_buffer_length,
5080 &ctrl_info->error_buffer_dma_handle,
5081 GFP_KERNEL);
6c223761
KB
5082 if (!ctrl_info->error_buffer)
5083 return -ENOMEM;
5084
5085 return 0;
5086}
5087
5088static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5089{
5090 unsigned int i;
5091 void *sg_chain_buffer;
5092 size_t sg_chain_buffer_length;
5093 dma_addr_t sg_chain_buffer_dma_handle;
5094 struct device *dev;
5095 struct pqi_io_request *io_request;
5096
583891c9
KB
5097 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5098 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
6c223761
KB
5099
5100 if (!ctrl_info->io_request_pool) {
5101 dev_err(&ctrl_info->pci_dev->dev,
5102 "failed to allocate I/O request pool\n");
5103 goto error;
5104 }
5105
5106 dev = &ctrl_info->pci_dev->dev;
5107 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5108 io_request = ctrl_info->io_request_pool;
5109
5110 for (i = 0; i < ctrl_info->max_io_slots; i++) {
583891c9 5111 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
6c223761
KB
5112
5113 if (!io_request->iu) {
5114 dev_err(&ctrl_info->pci_dev->dev,
5115 "failed to allocate IU buffers\n");
5116 goto error;
5117 }
5118
5119 sg_chain_buffer = dma_alloc_coherent(dev,
5120 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5121 GFP_KERNEL);
5122
5123 if (!sg_chain_buffer) {
5124 dev_err(&ctrl_info->pci_dev->dev,
5125 "failed to allocate PQI scatter-gather chain buffers\n");
5126 goto error;
5127 }
5128
5129 io_request->index = i;
5130 io_request->sg_chain_buffer = sg_chain_buffer;
583891c9 5131 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
6c223761
KB
5132 io_request++;
5133 }
5134
5135 return 0;
5136
5137error:
5138 pqi_free_all_io_requests(ctrl_info);
5139
5140 return -ENOMEM;
5141}
5142
5143/*
5144 * Calculate required resources that are sized based on max. outstanding
5145 * requests and max. transfer size.
5146 */
5147
5148static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5149{
5150 u32 max_transfer_size;
5151 u32 max_sg_entries;
5152
5153 ctrl_info->scsi_ml_can_queue =
5154 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5155 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5156
5157 ctrl_info->error_buffer_length =
5158 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5159
d727a776
KB
5160 if (reset_devices)
5161 max_transfer_size = min(ctrl_info->max_transfer_size,
5162 PQI_MAX_TRANSFER_SIZE_KDUMP);
5163 else
5164 max_transfer_size = min(ctrl_info->max_transfer_size,
5165 PQI_MAX_TRANSFER_SIZE);
6c223761
KB
5166
5167 max_sg_entries = max_transfer_size / PAGE_SIZE;
5168
5169 /* +1 to cover when the buffer is not page-aligned. */
5170 max_sg_entries++;
5171
5172 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5173
5174 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5175
5176 ctrl_info->sg_chain_buffer_length =
e1d213bd
KB
5177 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5178 PQI_EXTRA_SGL_MEMORY;
6c223761
KB
5179 ctrl_info->sg_tablesize = max_sg_entries;
5180 ctrl_info->max_sectors = max_transfer_size / 512;
5181}
5182
5183static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5184{
6c223761
KB
5185 int num_queue_groups;
5186 u16 num_elements_per_iq;
5187 u16 num_elements_per_oq;
5188
d727a776
KB
5189 if (reset_devices) {
5190 num_queue_groups = 1;
5191 } else {
5192 int num_cpus;
5193 int max_queue_groups;
5194
5195 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5196 ctrl_info->max_outbound_queues - 1);
5197 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
6c223761 5198
d727a776
KB
5199 num_cpus = num_online_cpus();
5200 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5201 num_queue_groups = min(num_queue_groups, max_queue_groups);
5202 }
6c223761
KB
5203
5204 ctrl_info->num_queue_groups = num_queue_groups;
061ef06a 5205 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
6c223761 5206
77668f41
KB
5207 /*
5208 * Make sure that the max. inbound IU length is an even multiple
5209 * of our inbound element length.
5210 */
5211 ctrl_info->max_inbound_iu_length =
5212 (ctrl_info->max_inbound_iu_length_per_firmware /
5213 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5214 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
5215
5216 num_elements_per_iq =
5217 (ctrl_info->max_inbound_iu_length /
5218 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5219
5220 /* Add one because one element in each queue is unusable. */
5221 num_elements_per_iq++;
5222
5223 num_elements_per_iq = min(num_elements_per_iq,
5224 ctrl_info->max_elements_per_iq);
5225
5226 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5227 num_elements_per_oq = min(num_elements_per_oq,
5228 ctrl_info->max_elements_per_oq);
5229
5230 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5231 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5232
5233 ctrl_info->max_sg_per_iu =
5234 ((ctrl_info->max_inbound_iu_length -
5235 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5236 sizeof(struct pqi_sg_descriptor)) +
5237 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
6702d2c4
DB
5238
5239 ctrl_info->max_sg_per_r56_iu =
5240 ((ctrl_info->max_inbound_iu_length -
5241 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5242 sizeof(struct pqi_sg_descriptor)) +
5243 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
6c223761
KB
5244}
5245
583891c9
KB
5246static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5247 struct scatterlist *sg)
6c223761
KB
5248{
5249 u64 address = (u64)sg_dma_address(sg);
5250 unsigned int length = sg_dma_len(sg);
5251
5252 put_unaligned_le64(address, &sg_descriptor->address);
5253 put_unaligned_le32(length, &sg_descriptor->length);
5254 put_unaligned_le32(0, &sg_descriptor->flags);
5255}
5256
1a22bc4b
DB
5257static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5258 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5259 int max_sg_per_iu, bool *chained)
6c223761
KB
5260{
5261 int i;
6c223761 5262 unsigned int num_sg_in_iu;
6c223761 5263
1a22bc4b 5264 *chained = false;
6c223761 5265 i = 0;
1a22bc4b
DB
5266 num_sg_in_iu = 0;
5267 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
6c223761
KB
5268
5269 while (1) {
5270 pqi_set_sg_descriptor(sg_descriptor, sg);
1a22bc4b 5271 if (!*chained)
6c223761
KB
5272 num_sg_in_iu++;
5273 i++;
5274 if (i == sg_count)
5275 break;
5276 sg_descriptor++;
5277 if (i == max_sg_per_iu) {
1a22bc4b 5278 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
6c223761 5279 &sg_descriptor->address);
1a22bc4b 5280 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
6c223761 5281 &sg_descriptor->length);
1a22bc4b
DB
5282 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5283 *chained = true;
6c223761
KB
5284 num_sg_in_iu++;
5285 sg_descriptor = io_request->sg_chain_buffer;
5286 }
5287 sg = sg_next(sg);
5288 }
5289
5290 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
6c223761 5291
1a22bc4b 5292 return num_sg_in_iu;
6c223761
KB
5293}
5294
6c223761
KB
5295static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5296 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
6c223761
KB
5297 struct pqi_io_request *io_request)
5298{
6c223761
KB
5299 u16 iu_length;
5300 int sg_count;
a60eec02
KB
5301 bool chained;
5302 unsigned int num_sg_in_iu;
6c223761
KB
5303 struct scatterlist *sg;
5304 struct pqi_sg_descriptor *sg_descriptor;
5305
5306 sg_count = scsi_dma_map(scmd);
5307 if (sg_count < 0)
5308 return sg_count;
a60eec02 5309
6c223761 5310 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
a60eec02 5311 PQI_REQUEST_HEADER_LENGTH;
a60eec02 5312
6c223761
KB
5313 if (sg_count == 0)
5314 goto out;
5315
a60eec02
KB
5316 sg = scsi_sglist(scmd);
5317 sg_descriptor = request->sg_descriptors;
a60eec02 5318
1a22bc4b
DB
5319 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5320 ctrl_info->max_sg_per_iu, &chained);
6c223761 5321
a60eec02 5322 request->partial = chained;
6c223761 5323 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
5324
5325out:
6c223761 5326 put_unaligned_le16(iu_length, &request->header.iu_length);
6c223761
KB
5327
5328 return 0;
5329}
5330
7a012c23
DB
5331static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5332 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5333 struct pqi_io_request *io_request)
6c223761 5334{
7a012c23
DB
5335 u16 iu_length;
5336 int sg_count;
5337 bool chained;
5338 unsigned int num_sg_in_iu;
5339 struct scatterlist *sg;
5340 struct pqi_sg_descriptor *sg_descriptor;
5341
5342 sg_count = scsi_dma_map(scmd);
5343 if (sg_count < 0)
5344 return sg_count;
5345
5346 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5347 PQI_REQUEST_HEADER_LENGTH;
5348 num_sg_in_iu = 0;
5349
5350 if (sg_count == 0)
5351 goto out;
5352
5353 sg = scsi_sglist(scmd);
5354 sg_descriptor = request->sg_descriptors;
5355
5356 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5357 ctrl_info->max_sg_per_iu, &chained);
5358
5359 request->partial = chained;
5360 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5361
5362out:
5363 put_unaligned_le16(iu_length, &request->header.iu_length);
5364 request->num_sg_descriptors = num_sg_in_iu;
5365
5366 return 0;
5367}
5368
6702d2c4
DB
5369static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5370 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5371 struct pqi_io_request *io_request)
5372{
5373 u16 iu_length;
5374 int sg_count;
5375 bool chained;
5376 unsigned int num_sg_in_iu;
5377 struct scatterlist *sg;
5378 struct pqi_sg_descriptor *sg_descriptor;
5379
5380 sg_count = scsi_dma_map(scmd);
5381 if (sg_count < 0)
5382 return sg_count;
5383
5384 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5385 PQI_REQUEST_HEADER_LENGTH;
5386 num_sg_in_iu = 0;
5387
5388 if (sg_count != 0) {
5389 sg = scsi_sglist(scmd);
5390 sg_descriptor = request->sg_descriptors;
5391
5392 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5393 ctrl_info->max_sg_per_r56_iu, &chained);
5394
5395 request->partial = chained;
5396 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5397 }
5398
5399 put_unaligned_le16(iu_length, &request->header.iu_length);
5400 request->num_sg_descriptors = num_sg_in_iu;
5401
5402 return 0;
5403}
5404
6c223761
KB
5405static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5406 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5407 struct pqi_io_request *io_request)
5408{
6c223761
KB
5409 u16 iu_length;
5410 int sg_count;
a60eec02
KB
5411 bool chained;
5412 unsigned int num_sg_in_iu;
6c223761
KB
5413 struct scatterlist *sg;
5414 struct pqi_sg_descriptor *sg_descriptor;
5415
5416 sg_count = scsi_dma_map(scmd);
5417 if (sg_count < 0)
5418 return sg_count;
a60eec02
KB
5419
5420 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5421 PQI_REQUEST_HEADER_LENGTH;
5422 num_sg_in_iu = 0;
5423
6c223761
KB
5424 if (sg_count == 0)
5425 goto out;
5426
a60eec02
KB
5427 sg = scsi_sglist(scmd);
5428 sg_descriptor = request->sg_descriptors;
a60eec02 5429
1a22bc4b
DB
5430 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5431 ctrl_info->max_sg_per_iu, &chained);
6c223761 5432
a60eec02 5433 request->partial = chained;
6c223761 5434 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
5435
5436out:
6c223761
KB
5437 put_unaligned_le16(iu_length, &request->header.iu_length);
5438 request->num_sg_descriptors = num_sg_in_iu;
5439
5440 return 0;
5441}
5442
5443static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5444 void *context)
5445{
5446 struct scsi_cmnd *scmd;
5447
5448 scmd = io_request->scmd;
5449 pqi_free_io_request(io_request);
5450 scsi_dma_unmap(scmd);
5451 pqi_scsi_done(scmd);
5452}
5453
376fb880
KB
5454static int pqi_raid_submit_scsi_cmd_with_io_request(
5455 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
6c223761
KB
5456 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5457 struct pqi_queue_group *queue_group)
5458{
5459 int rc;
5460 size_t cdb_length;
6c223761
KB
5461 struct pqi_raid_path_request *request;
5462
6c223761
KB
5463 io_request->io_complete_callback = pqi_raid_io_complete;
5464 io_request->scmd = scmd;
5465
6c223761 5466 request = io_request->iu;
583891c9 5467 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
6c223761
KB
5468
5469 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5470 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5471 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5472 put_unaligned_le16(io_request->index, &request->request_id);
5473 request->error_index = request->request_id;
583891c9 5474 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
904f2bfd 5475 request->ml_device_lun_number = (u8)scmd->device->lun;
6c223761
KB
5476
5477 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5478 memcpy(request->cdb, scmd->cmnd, cdb_length);
5479
5480 switch (cdb_length) {
5481 case 6:
5482 case 10:
5483 case 12:
5484 case 16:
583891c9 5485 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6c223761
KB
5486 break;
5487 case 20:
583891c9 5488 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
6c223761
KB
5489 break;
5490 case 24:
583891c9 5491 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
6c223761
KB
5492 break;
5493 case 28:
583891c9 5494 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
6c223761
KB
5495 break;
5496 case 32:
5497 default:
583891c9 5498 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
6c223761
KB
5499 break;
5500 }
5501
5502 switch (scmd->sc_data_direction) {
69695aea 5503 case DMA_FROM_DEVICE:
6c223761
KB
5504 request->data_direction = SOP_READ_FLAG;
5505 break;
69695aea 5506 case DMA_TO_DEVICE:
6c223761
KB
5507 request->data_direction = SOP_WRITE_FLAG;
5508 break;
5509 case DMA_NONE:
5510 request->data_direction = SOP_NO_DIRECTION_FLAG;
5511 break;
5512 case DMA_BIDIRECTIONAL:
5513 request->data_direction = SOP_BIDIRECTIONAL;
5514 break;
5515 default:
5516 dev_err(&ctrl_info->pci_dev->dev,
5517 "unknown data direction: %d\n",
5518 scmd->sc_data_direction);
6c223761
KB
5519 break;
5520 }
5521
5522 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5523 if (rc) {
5524 pqi_free_io_request(io_request);
5525 return SCSI_MLQUEUE_HOST_BUSY;
5526 }
5527
5528 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5529
5530 return 0;
5531}
5532
376fb880
KB
5533static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5534 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5535 struct pqi_queue_group *queue_group)
5536{
5537 struct pqi_io_request *io_request;
5538
5539 io_request = pqi_alloc_io_request(ctrl_info);
5540
5541 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5542 device, scmd, queue_group);
5543}
5544
376fb880
KB
5545static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5546{
5547 struct scsi_cmnd *scmd;
03b288cf 5548 struct pqi_scsi_dev *device;
376fb880
KB
5549 struct pqi_ctrl_info *ctrl_info;
5550
5551 if (!io_request->raid_bypass)
5552 return false;
5553
5554 scmd = io_request->scmd;
5555 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5556 return false;
5557 if (host_byte(scmd->result) == DID_NO_CONNECT)
5558 return false;
5559
03b288cf 5560 device = scmd->device->hostdata;
5be9db06 5561 if (pqi_device_offline(device) || pqi_device_in_remove(device))
03b288cf
KB
5562 return false;
5563
376fb880
KB
5564 ctrl_info = shost_to_hba(scmd->device->host);
5565 if (pqi_ctrl_offline(ctrl_info))
5566 return false;
5567
5568 return true;
5569}
5570
6c223761
KB
5571static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5572 void *context)
5573{
5574 struct scsi_cmnd *scmd;
5575
5576 scmd = io_request->scmd;
5577 scsi_dma_unmap(scmd);
5be9db06 5578 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
6c223761 5579 set_host_byte(scmd, DID_IMM_RETRY);
c1ea387d 5580 pqi_cmd_priv(scmd)->this_residual++;
376fb880 5581 }
5be9db06 5582
6c223761
KB
5583 pqi_free_io_request(io_request);
5584 pqi_scsi_done(scmd);
5585}
5586
b4dc06a9 5587static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
2a47834d
GW
5588 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
5589{
5590 bool io_high_prio;
5591 int priority_class;
5592
5593 io_high_prio = false;
b4dc06a9 5594
2a47834d
GW
5595 if (device->ncq_prio_enable) {
5596 priority_class =
5597 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
5598 if (priority_class == IOPRIO_CLASS_RT) {
b4dc06a9 5599 /* Set NCQ priority for read/write commands. */
2a47834d
GW
5600 switch (scmd->cmnd[0]) {
5601 case WRITE_16:
5602 case READ_16:
5603 case WRITE_12:
5604 case READ_12:
5605 case WRITE_10:
5606 case READ_10:
5607 case WRITE_6:
5608 case READ_6:
5609 io_high_prio = true;
5610 break;
2a47834d
GW
5611 }
5612 }
5613 }
5614
5615 return io_high_prio;
5616}
5617
6c223761
KB
5618static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5619 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5620 struct pqi_queue_group *queue_group)
5621{
2a47834d
GW
5622 bool io_high_prio;
5623
b4dc06a9
KB
5624 io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
5625
6c223761 5626 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
2a47834d
GW
5627 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5628 false, io_high_prio);
6c223761
KB
5629}
5630
5631static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5632 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5633 unsigned int cdb_length, struct pqi_queue_group *queue_group,
2a47834d
GW
5634 struct pqi_encryption_info *encryption_info, bool raid_bypass,
5635 bool io_high_prio)
6c223761
KB
5636{
5637 int rc;
5638 struct pqi_io_request *io_request;
5639 struct pqi_aio_path_request *request;
904f2bfd 5640 struct pqi_scsi_dev *device;
6c223761 5641
904f2bfd 5642 device = scmd->device->hostdata;
6c223761
KB
5643 io_request = pqi_alloc_io_request(ctrl_info);
5644 io_request->io_complete_callback = pqi_aio_io_complete;
5645 io_request->scmd = scmd;
376fb880 5646 io_request->raid_bypass = raid_bypass;
6c223761
KB
5647
5648 request = io_request->iu;
9e98e60b 5649 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
6c223761
KB
5650
5651 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5652 put_unaligned_le32(aio_handle, &request->nexus_id);
5653 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5654 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
2a47834d 5655 request->command_priority = io_high_prio;
6c223761
KB
5656 put_unaligned_le16(io_request->index, &request->request_id);
5657 request->error_index = request->request_id;
904f2bfd
KM
5658 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
5659 put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
6c223761
KB
5660 if (cdb_length > sizeof(request->cdb))
5661 cdb_length = sizeof(request->cdb);
5662 request->cdb_length = cdb_length;
5663 memcpy(request->cdb, cdb, cdb_length);
5664
5665 switch (scmd->sc_data_direction) {
5666 case DMA_TO_DEVICE:
5667 request->data_direction = SOP_READ_FLAG;
5668 break;
5669 case DMA_FROM_DEVICE:
5670 request->data_direction = SOP_WRITE_FLAG;
5671 break;
5672 case DMA_NONE:
5673 request->data_direction = SOP_NO_DIRECTION_FLAG;
5674 break;
5675 case DMA_BIDIRECTIONAL:
5676 request->data_direction = SOP_BIDIRECTIONAL;
5677 break;
5678 default:
5679 dev_err(&ctrl_info->pci_dev->dev,
5680 "unknown data direction: %d\n",
5681 scmd->sc_data_direction);
6c223761
KB
5682 break;
5683 }
5684
5685 if (encryption_info) {
5686 request->encryption_enable = true;
5687 put_unaligned_le16(encryption_info->data_encryption_key_index,
5688 &request->data_encryption_key_index);
5689 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5690 &request->encrypt_tweak_lower);
5691 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5692 &request->encrypt_tweak_upper);
5693 }
5694
5695 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5696 if (rc) {
5697 pqi_free_io_request(io_request);
5698 return SCSI_MLQUEUE_HOST_BUSY;
5699 }
5700
5701 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5702
5703 return 0;
5704}
5705
7a012c23
DB
5706static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5707 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5708 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5709 struct pqi_scsi_dev_raid_map_data *rmd)
7a012c23
DB
5710{
5711 int rc;
5712 struct pqi_io_request *io_request;
5713 struct pqi_aio_r1_path_request *r1_request;
5714
5715 io_request = pqi_alloc_io_request(ctrl_info);
5716 io_request->io_complete_callback = pqi_aio_io_complete;
5717 io_request->scmd = scmd;
5718 io_request->raid_bypass = true;
5719
5720 r1_request = io_request->iu;
5721 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5722
5723 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
7a012c23
DB
5724 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5725 r1_request->num_drives = rmd->num_it_nexus_entries;
5726 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5727 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5728 if (rmd->num_it_nexus_entries == 3)
5729 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5730
5731 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5732 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5733 put_unaligned_le16(io_request->index, &r1_request->request_id);
5734 r1_request->error_index = r1_request->request_id;
5735 if (rmd->cdb_length > sizeof(r1_request->cdb))
5736 rmd->cdb_length = sizeof(r1_request->cdb);
5737 r1_request->cdb_length = rmd->cdb_length;
5738 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5739
5740 /* The direction is always write. */
5741 r1_request->data_direction = SOP_READ_FLAG;
5742
5743 if (encryption_info) {
5744 r1_request->encryption_enable = true;
5745 put_unaligned_le16(encryption_info->data_encryption_key_index,
5746 &r1_request->data_encryption_key_index);
5747 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5748 &r1_request->encrypt_tweak_lower);
5749 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5750 &r1_request->encrypt_tweak_upper);
5751 }
5752
5753 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5754 if (rc) {
5755 pqi_free_io_request(io_request);
5756 return SCSI_MLQUEUE_HOST_BUSY;
5757 }
5758
5759 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5760
5761 return 0;
5762}
5763
6702d2c4
DB
5764static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5765 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5766 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5767 struct pqi_scsi_dev_raid_map_data *rmd)
5768{
5769 int rc;
5770 struct pqi_io_request *io_request;
5771 struct pqi_aio_r56_path_request *r56_request;
5772
5773 io_request = pqi_alloc_io_request(ctrl_info);
5774 io_request->io_complete_callback = pqi_aio_io_complete;
5775 io_request->scmd = scmd;
5776 io_request->raid_bypass = true;
5777
5778 r56_request = io_request->iu;
5779 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5780
5781 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5782 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5783 else
5784 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5785
5786 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5787 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5788 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5789 if (rmd->raid_level == SA_RAID_6) {
5790 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5791 r56_request->xor_multiplier = rmd->xor_mult;
5792 }
5793 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5794 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5795 put_unaligned_le64(rmd->row, &r56_request->row);
5796
5797 put_unaligned_le16(io_request->index, &r56_request->request_id);
5798 r56_request->error_index = r56_request->request_id;
5799
5800 if (rmd->cdb_length > sizeof(r56_request->cdb))
5801 rmd->cdb_length = sizeof(r56_request->cdb);
5802 r56_request->cdb_length = rmd->cdb_length;
5803 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5804
5805 /* The direction is always write. */
5806 r56_request->data_direction = SOP_READ_FLAG;
5807
5808 if (encryption_info) {
5809 r56_request->encryption_enable = true;
5810 put_unaligned_le16(encryption_info->data_encryption_key_index,
5811 &r56_request->data_encryption_key_index);
5812 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5813 &r56_request->encrypt_tweak_lower);
5814 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5815 &r56_request->encrypt_tweak_upper);
5816 }
5817
5818 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5819 if (rc) {
5820 pqi_free_io_request(io_request);
5821 return SCSI_MLQUEUE_HOST_BUSY;
5822 }
5823
5824 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5825
5826 return 0;
5827}
5828
061ef06a
KB
5829static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5830 struct scsi_cmnd *scmd)
5831{
5832 u16 hw_queue;
5833
12db0f93 5834 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
061ef06a
KB
5835 if (hw_queue > ctrl_info->max_hw_queue_index)
5836 hw_queue = 0;
5837
5838 return hw_queue;
5839}
5840
5be9db06
KB
5841static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5842{
12db0f93 5843 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5be9db06
KB
5844 return false;
5845
c1ea387d 5846 return pqi_cmd_priv(scmd)->this_residual == 0;
5be9db06
KB
5847}
5848
7561a7e4
KB
5849/*
5850 * This function gets called just before we hand the completed SCSI request
5851 * back to the SML.
5852 */
5853
5854void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5855{
5856 struct pqi_scsi_dev *device;
5857
1e46731e
MR
5858 if (!scmd->device) {
5859 set_host_byte(scmd, DID_NO_CONNECT);
5860 return;
5861 }
5862
7561a7e4 5863 device = scmd->device->hostdata;
1e46731e
MR
5864 if (!device) {
5865 set_host_byte(scmd, DID_NO_CONNECT);
5866 return;
5867 }
5868
904f2bfd 5869 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
7561a7e4
KB
5870}
5871
c7ffedb3 5872static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
7d81d2b8 5873 struct scsi_cmnd *scmd)
c7ffedb3
DB
5874{
5875 u32 oldest_jiffies;
5876 u8 lru_index;
5877 int i;
5878 int rc;
5879 struct pqi_scsi_dev *device;
5880 struct pqi_stream_data *pqi_stream_data;
5881 struct pqi_scsi_dev_raid_map_data rmd;
5882
5883 if (!ctrl_info->enable_stream_detection)
5884 return false;
5885
5886 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5887 if (rc)
5888 return false;
5889
5890 /* Check writes only. */
5891 if (!rmd.is_write)
5892 return false;
5893
5894 device = scmd->device->hostdata;
5895
5896 /* Check for RAID 5/6 streams. */
5897 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5898 return false;
5899
5900 /*
5901 * If controller does not support AIO RAID{5,6} writes, need to send
5902 * requests down non-AIO path.
5903 */
5904 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5905 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5906 return true;
5907
5908 lru_index = 0;
5909 oldest_jiffies = INT_MAX;
5910 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5911 pqi_stream_data = &device->stream_data[i];
5912 /*
5913 * Check for adjacent request or request is within
5914 * the previous request.
5915 */
5916 if ((pqi_stream_data->next_lba &&
5917 rmd.first_block >= pqi_stream_data->next_lba) &&
5918 rmd.first_block <= pqi_stream_data->next_lba +
5919 rmd.block_cnt) {
5920 pqi_stream_data->next_lba = rmd.first_block +
5921 rmd.block_cnt;
5922 pqi_stream_data->last_accessed = jiffies;
5923 return true;
5924 }
5925
5926 /* unused entry */
5927 if (pqi_stream_data->last_accessed == 0) {
5928 lru_index = i;
5929 break;
5930 }
5931
5932 /* Find entry with oldest last accessed time. */
5933 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5934 oldest_jiffies = pqi_stream_data->last_accessed;
5935 lru_index = i;
5936 }
5937 }
5938
5939 /* Set LRU entry. */
5940 pqi_stream_data = &device->stream_data[lru_index];
5941 pqi_stream_data->last_accessed = jiffies;
5942 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5943
5944 return false;
5945}
5946
5947static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
6c223761
KB
5948{
5949 int rc;
5950 struct pqi_ctrl_info *ctrl_info;
5951 struct pqi_scsi_dev *device;
061ef06a 5952 u16 hw_queue;
6c223761
KB
5953 struct pqi_queue_group *queue_group;
5954 bool raid_bypassed;
5955
5956 device = scmd->device->hostdata;
6c223761 5957
1e46731e
MR
5958 if (!device) {
5959 set_host_byte(scmd, DID_NO_CONNECT);
5960 pqi_scsi_done(scmd);
5961 return 0;
5962 }
5963
904f2bfd 5964 atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
7561a7e4 5965
583891c9
KB
5966 ctrl_info = shost_to_hba(shost);
5967
1bdf6e93 5968 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
6c223761
KB
5969 set_host_byte(scmd, DID_NO_CONNECT);
5970 pqi_scsi_done(scmd);
5971 return 0;
5972 }
5973
5be9db06 5974 if (pqi_ctrl_blocked(ctrl_info)) {
7561a7e4
KB
5975 rc = SCSI_MLQUEUE_HOST_BUSY;
5976 goto out;
5977 }
5978
7d81d2b8
KB
5979 /*
5980 * This is necessary because the SML doesn't zero out this field during
5981 * error recovery.
5982 */
5983 scmd->result = 0;
5984
061ef06a
KB
5985 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5986 queue_group = &ctrl_info->queue_groups[hw_queue];
6c223761
KB
5987
5988 if (pqi_is_logical_device(device)) {
5989 raid_bypassed = false;
588a63fe 5990 if (device->raid_bypass_enabled &&
5be9db06
KB
5991 pqi_is_bypass_eligible_request(scmd) &&
5992 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
5993 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
8b664fef 5994 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
376fb880 5995 raid_bypassed = true;
8b664fef
KB
5996 atomic_inc(&device->raid_bypass_cnt);
5997 }
6c223761
KB
5998 }
5999 if (!raid_bypassed)
8b664fef 6000 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761
KB
6001 } else {
6002 if (device->aio_enabled)
8b664fef 6003 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761 6004 else
8b664fef 6005 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761
KB
6006 }
6007
7561a7e4 6008out:
7561a7e4 6009 if (rc)
904f2bfd 6010 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
7561a7e4 6011
6c223761
KB
6012 return rc;
6013}
6014
6ce1ddf5 6015static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
7561a7e4 6016{
6ce1ddf5 6017 unsigned int i;
7561a7e4
KB
6018 unsigned int path;
6019 unsigned long flags;
6ce1ddf5
KB
6020 unsigned int queued_io_count;
6021 struct pqi_queue_group *queue_group;
6022 struct pqi_io_request *io_request;
7561a7e4 6023
6ce1ddf5
KB
6024 queued_io_count = 0;
6025
6026 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6027 queue_group = &ctrl_info->queue_groups[i];
6028 for (path = 0; path < 2; path++) {
6029 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6030 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6031 queued_io_count++;
6032 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
7561a7e4
KB
6033 }
6034 }
6035
6ce1ddf5 6036 return queued_io_count;
7561a7e4
KB
6037}
6038
6ce1ddf5 6039static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
7561a7e4 6040{
7561a7e4
KB
6041 unsigned int i;
6042 unsigned int path;
6ce1ddf5 6043 unsigned int nonempty_inbound_queue_count;
7561a7e4
KB
6044 struct pqi_queue_group *queue_group;
6045 pqi_index_t iq_pi;
6046 pqi_index_t iq_ci;
6047
6ce1ddf5
KB
6048 nonempty_inbound_queue_count = 0;
6049
7561a7e4
KB
6050 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6051 queue_group = &ctrl_info->queue_groups[i];
7561a7e4
KB
6052 for (path = 0; path < 2; path++) {
6053 iq_pi = queue_group->iq_pi_copy[path];
6ce1ddf5
KB
6054 iq_ci = readl(queue_group->iq_ci[path]);
6055 if (iq_ci != iq_pi)
6056 nonempty_inbound_queue_count++;
6057 }
6058 }
7561a7e4 6059
6ce1ddf5
KB
6060 return nonempty_inbound_queue_count;
6061}
6062
6063#define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
6064
6065static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6066{
6067 unsigned long start_jiffies;
6068 unsigned long warning_timeout;
6069 unsigned int queued_io_count;
6070 unsigned int nonempty_inbound_queue_count;
6071 bool displayed_warning;
6072
6073 displayed_warning = false;
6074 start_jiffies = jiffies;
42dc0426 6075 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6ce1ddf5
KB
6076
6077 while (1) {
6078 queued_io_count = pqi_queued_io_count(ctrl_info);
6079 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6080 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6081 break;
6082 pqi_check_ctrl_health(ctrl_info);
6083 if (pqi_ctrl_offline(ctrl_info))
6084 return -ENXIO;
6085 if (time_after(jiffies, warning_timeout)) {
6086 dev_warn(&ctrl_info->pci_dev->dev,
6087 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6088 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6089 displayed_warning = true;
42dc0426 6090 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
7561a7e4 6091 }
6ce1ddf5 6092 usleep_range(1000, 2000);
7561a7e4
KB
6093 }
6094
6ce1ddf5
KB
6095 if (displayed_warning)
6096 dev_warn(&ctrl_info->pci_dev->dev,
6097 "queued I/O drained after waiting for %u seconds\n",
6098 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6099
7561a7e4
KB
6100 return 0;
6101}
6102
6103static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6104 struct pqi_scsi_dev *device)
6105{
6106 unsigned int i;
6107 unsigned int path;
6108 struct pqi_queue_group *queue_group;
6109 unsigned long flags;
6110 struct pqi_io_request *io_request;
6111 struct pqi_io_request *next;
6112 struct scsi_cmnd *scmd;
6113 struct pqi_scsi_dev *scsi_device;
6114
6115 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6116 queue_group = &ctrl_info->queue_groups[i];
6117
6118 for (path = 0; path < 2; path++) {
6119 spin_lock_irqsave(
6120 &queue_group->submit_lock[path], flags);
6121
6122 list_for_each_entry_safe(io_request, next,
6123 &queue_group->request_list[path],
6124 request_list_entry) {
583891c9 6125
7561a7e4
KB
6126 scmd = io_request->scmd;
6127 if (!scmd)
6128 continue;
6129
6130 scsi_device = scmd->device->hostdata;
6131 if (scsi_device != device)
6132 continue;
6133
6134 list_del(&io_request->request_list_entry);
6135 set_host_byte(scmd, DID_RESET);
b622a601
MB
6136 pqi_free_io_request(io_request);
6137 scsi_dma_unmap(scmd);
7561a7e4
KB
6138 pqi_scsi_done(scmd);
6139 }
6140
6141 spin_unlock_irqrestore(
6142 &queue_group->submit_lock[path], flags);
6143 }
6144 }
6145}
6146
18ff5f08 6147#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
4fd22c13 6148
061ef06a 6149static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
904f2bfd 6150 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
061ef06a 6151{
18ff5f08
KB
6152 int cmds_outstanding;
6153 unsigned long start_jiffies;
6154 unsigned long warning_timeout;
6155 unsigned long msecs_waiting;
1e46731e 6156
18ff5f08 6157 start_jiffies = jiffies;
42dc0426 6158 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
1e46731e 6159
904f2bfd 6160 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
331f7e99
SB
6161 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6162 pqi_check_ctrl_health(ctrl_info);
6163 if (pqi_ctrl_offline(ctrl_info))
6164 return -ENXIO;
6165 }
18ff5f08 6166 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6ce1ddf5 6167 if (msecs_waiting >= timeout_msecs) {
18ff5f08
KB
6168 dev_err(&ctrl_info->pci_dev->dev,
6169 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6170 ctrl_info->scsi_host->host_no, device->bus, device->target,
904f2bfd 6171 lun, msecs_waiting / 1000, cmds_outstanding);
18ff5f08 6172 return -ETIMEDOUT;
061ef06a 6173 }
18ff5f08
KB
6174 if (time_after(jiffies, warning_timeout)) {
6175 dev_warn(&ctrl_info->pci_dev->dev,
6176 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6177 ctrl_info->scsi_host->host_no, device->bus, device->target,
904f2bfd 6178 lun, msecs_waiting / 1000, cmds_outstanding);
42dc0426 6179 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
4fd22c13 6180 }
061ef06a
KB
6181 usleep_range(1000, 2000);
6182 }
6183
6184 return 0;
6185}
6186
14bb215d
KB
6187static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6188 void *context)
6c223761 6189{
14bb215d 6190 struct completion *waiting = context;
6c223761 6191
14bb215d
KB
6192 complete(waiting);
6193}
6c223761 6194
c2922f17 6195#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
14bb215d
KB
6196
6197static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
904f2bfd 6198 struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
14bb215d
KB
6199{
6200 int rc;
18ff5f08 6201 unsigned int wait_secs;
6ce1ddf5 6202 int cmds_outstanding;
18ff5f08
KB
6203
6204 wait_secs = 0;
14bb215d
KB
6205
6206 while (1) {
6207 if (wait_for_completion_io_timeout(wait,
42dc0426 6208 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
14bb215d
KB
6209 rc = 0;
6210 break;
6c223761
KB
6211 }
6212
14bb215d
KB
6213 pqi_check_ctrl_health(ctrl_info);
6214 if (pqi_ctrl_offline(ctrl_info)) {
4e8415e3 6215 rc = -ENXIO;
14bb215d
KB
6216 break;
6217 }
18ff5f08
KB
6218
6219 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
904f2bfd 6220 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
18ff5f08 6221 dev_warn(&ctrl_info->pci_dev->dev,
6ce1ddf5 6222 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
904f2bfd 6223 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6c223761 6224 }
6c223761 6225
14bb215d 6226 return rc;
6c223761
KB
6227}
6228
18ff5f08
KB
6229#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6230
904f2bfd 6231static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6c223761
KB
6232{
6233 int rc;
6234 struct pqi_io_request *io_request;
6235 DECLARE_COMPLETION_ONSTACK(wait);
6236 struct pqi_task_management_request *request;
904f2bfd 6237 struct pqi_scsi_dev *device;
6c223761 6238
904f2bfd 6239 device = scmd->device->hostdata;
6c223761 6240 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 6241 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
6242 io_request->context = &wait;
6243
6244 request = io_request->iu;
6245 memset(request, 0, sizeof(*request));
6246
6247 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6248 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6249 &request->header.iu_length);
6250 put_unaligned_le16(io_request->index, &request->request_id);
6251 memcpy(request->lun_number, device->scsi3addr,
6252 sizeof(request->lun_number));
904f2bfd
KM
6253 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6254 request->ml_device_lun_number = (u8)scmd->device->lun;
6c223761 6255 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
c2922f17 6256 if (ctrl_info->tmf_iu_timeout_supported)
18ff5f08 6257 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6c223761 6258
583891c9 6259 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6c223761
KB
6260 io_request);
6261
904f2bfd 6262 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
14bb215d 6263 if (rc == 0)
6c223761 6264 rc = io_request->status;
6c223761
KB
6265
6266 pqi_free_io_request(io_request);
6c223761
KB
6267
6268 return rc;
6269}
6270
18ff5f08
KB
6271#define PQI_LUN_RESET_RETRIES 3
6272#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6273#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6274#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6c223761 6275
904f2bfd 6276static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6c223761 6277{
18ff5f08
KB
6278 int reset_rc;
6279 int wait_rc;
3406384b 6280 unsigned int retries;
18ff5f08 6281 unsigned long timeout_msecs;
904f2bfd 6282 struct pqi_scsi_dev *device;
6c223761 6283
904f2bfd 6284 device = scmd->device->hostdata;
3406384b 6285 for (retries = 0;;) {
904f2bfd 6286 reset_rc = pqi_lun_reset(ctrl_info, scmd);
4e7d2602 6287 if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
3406384b
MR
6288 break;
6289 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6290 }
429fab70 6291
18ff5f08
KB
6292 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6293 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
4fd22c13 6294
904f2bfd 6295 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
18ff5f08
KB
6296 if (wait_rc && reset_rc == 0)
6297 reset_rc = wait_rc;
6c223761 6298
18ff5f08 6299 return reset_rc == 0 ? SUCCESS : FAILED;
6c223761
KB
6300}
6301
904f2bfd 6302static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
4fd22c13
MR
6303{
6304 int rc;
904f2bfd 6305 struct pqi_scsi_dev *device;
4fd22c13 6306
904f2bfd 6307 device = scmd->device->hostdata;
4fd22c13
MR
6308 pqi_ctrl_block_requests(ctrl_info);
6309 pqi_ctrl_wait_until_quiesced(ctrl_info);
6310 pqi_fail_io_queued_for_device(ctrl_info, device);
6311 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
4fd22c13
MR
6312 if (rc)
6313 rc = FAILED;
6314 else
904f2bfd 6315 rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
37f33181 6316 pqi_ctrl_unblock_requests(ctrl_info);
429fab70 6317
4fd22c13
MR
6318 return rc;
6319}
6320
6c223761
KB
6321static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6322{
6323 int rc;
7561a7e4 6324 struct Scsi_Host *shost;
6c223761
KB
6325 struct pqi_ctrl_info *ctrl_info;
6326 struct pqi_scsi_dev *device;
6327
7561a7e4
KB
6328 shost = scmd->device->host;
6329 ctrl_info = shost_to_hba(shost);
6c223761
KB
6330 device = scmd->device->hostdata;
6331
37f33181
KB
6332 mutex_lock(&ctrl_info->lun_reset_mutex);
6333
6c223761 6334 dev_err(&ctrl_info->pci_dev->dev,
f0e473e0
MB
6335 "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6336 shost->host_no,
904f2bfd 6337 device->bus, device->target, (u32)scmd->device->lun,
f0e473e0 6338 scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6c223761 6339
7561a7e4 6340 pqi_check_ctrl_health(ctrl_info);
37f33181 6341 if (pqi_ctrl_offline(ctrl_info))
7561a7e4 6342 rc = FAILED;
37f33181 6343 else
904f2bfd 6344 rc = pqi_device_reset(ctrl_info, scmd);
429fab70 6345
6c223761
KB
6346 dev_err(&ctrl_info->pci_dev->dev,
6347 "reset of scsi %d:%d:%d:%d: %s\n",
904f2bfd 6348 shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
6c223761
KB
6349 rc == SUCCESS ? "SUCCESS" : "FAILED");
6350
37f33181
KB
6351 mutex_unlock(&ctrl_info->lun_reset_mutex);
6352
6c223761
KB
6353 return rc;
6354}
6355
6356static int pqi_slave_alloc(struct scsi_device *sdev)
6357{
6358 struct pqi_scsi_dev *device;
6359 unsigned long flags;
6360 struct pqi_ctrl_info *ctrl_info;
6361 struct scsi_target *starget;
6362 struct sas_rphy *rphy;
6363
6364 ctrl_info = shost_to_hba(sdev->host);
6365
6366 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6367
6368 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6369 starget = scsi_target(sdev);
6370 rphy = target_to_rphy(starget);
6371 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6372 if (device) {
d4dc6aea
KB
6373 if (device->target_lun_valid) {
6374 device->ignore_device = true;
6375 } else {
6376 device->target = sdev_id(sdev);
6377 device->lun = sdev->lun;
6378 device->target_lun_valid = true;
6379 }
6c223761
KB
6380 }
6381 } else {
6382 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6383 sdev_id(sdev), sdev->lun);
6384 }
6385
94086f5b 6386 if (device) {
6c223761
KB
6387 sdev->hostdata = device;
6388 device->sdev = sdev;
6389 if (device->queue_depth) {
6390 device->advertised_queue_depth = device->queue_depth;
6391 scsi_change_queue_depth(sdev,
6392 device->advertised_queue_depth);
6393 }
99a12b48 6394 if (pqi_is_logical_device(device)) {
b6e2ef67 6395 pqi_disable_write_same(sdev);
99a12b48 6396 } else {
2b447f81 6397 sdev->allow_restart = 1;
99a12b48
KB
6398 if (device->device_type == SA_DEVICE_TYPE_NVME)
6399 pqi_disable_write_same(sdev);
6400 }
6c223761
KB
6401 }
6402
6403 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6404
6405 return 0;
6406}
6407
52198226
CH
6408static int pqi_map_queues(struct Scsi_Host *shost)
6409{
6410 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6411
79d3fa9e 6412 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
ed76e329 6413 ctrl_info->pci_dev, 0);
52198226
CH
6414}
6415
d4dc6aea
KB
6416static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6417{
6418 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6419}
6420
ce143793
KB
6421static int pqi_slave_configure(struct scsi_device *sdev)
6422{
d4dc6aea 6423 int rc = 0;
ce143793
KB
6424 struct pqi_scsi_dev *device;
6425
6426 device = sdev->hostdata;
6427 device->devtype = sdev->type;
6428
d4dc6aea
KB
6429 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6430 rc = -ENXIO;
6431 device->ignore_device = false;
6432 }
6433
6434 return rc;
ce143793
KB
6435}
6436
8b664fef 6437static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6c223761
KB
6438{
6439 struct pci_dev *pci_dev;
6440 u32 subsystem_vendor;
6441 u32 subsystem_device;
6442 cciss_pci_info_struct pciinfo;
6443
6444 if (!arg)
6445 return -EINVAL;
6446
6447 pci_dev = ctrl_info->pci_dev;
6448
6449 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6450 pciinfo.bus = pci_dev->bus->number;
6451 pciinfo.dev_fn = pci_dev->devfn;
6452 subsystem_vendor = pci_dev->subsystem_vendor;
6453 subsystem_device = pci_dev->subsystem_device;
8b664fef 6454 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6c223761
KB
6455
6456 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6457 return -EFAULT;
6458
6459 return 0;
6460}
6461
6462static int pqi_getdrivver_ioctl(void __user *arg)
6463{
6464 u32 version;
6465
6466 if (!arg)
6467 return -EINVAL;
6468
6469 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6470 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6471
6472 if (copy_to_user(arg, &version, sizeof(version)))
6473 return -EFAULT;
6474
6475 return 0;
6476}
6477
6478struct ciss_error_info {
6479 u8 scsi_status;
6480 int command_status;
6481 size_t sense_data_length;
6482};
6483
6484static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6485 struct ciss_error_info *ciss_error_info)
6486{
6487 int ciss_cmd_status;
6488 size_t sense_data_length;
6489
6490 switch (pqi_error_info->data_out_result) {
6491 case PQI_DATA_IN_OUT_GOOD:
6492 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6493 break;
6494 case PQI_DATA_IN_OUT_UNDERFLOW:
6495 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6496 break;
6497 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6498 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6499 break;
6500 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6501 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6502 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6503 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6504 case PQI_DATA_IN_OUT_ERROR:
6505 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6506 break;
6507 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6508 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6509 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6510 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6511 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6512 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6513 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6514 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6515 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6516 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6517 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6518 break;
6519 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6520 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6521 break;
6522 case PQI_DATA_IN_OUT_ABORTED:
6523 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6524 break;
6525 case PQI_DATA_IN_OUT_TIMEOUT:
6526 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6527 break;
6528 default:
6529 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6530 break;
6531 }
6532
6533 sense_data_length =
6534 get_unaligned_le16(&pqi_error_info->sense_data_length);
6535 if (sense_data_length == 0)
6536 sense_data_length =
6537 get_unaligned_le16(&pqi_error_info->response_data_length);
6538 if (sense_data_length)
6539 if (sense_data_length > sizeof(pqi_error_info->data))
6540 sense_data_length = sizeof(pqi_error_info->data);
6541
6542 ciss_error_info->scsi_status = pqi_error_info->status;
6543 ciss_error_info->command_status = ciss_cmd_status;
6544 ciss_error_info->sense_data_length = sense_data_length;
6545}
6546
6547static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6548{
6549 int rc;
6550 char *kernel_buffer = NULL;
6551 u16 iu_length;
6552 size_t sense_data_length;
6553 IOCTL_Command_struct iocommand;
6554 struct pqi_raid_path_request request;
6555 struct pqi_raid_error_info pqi_error_info;
6556 struct ciss_error_info ciss_error_info;
6557
6558 if (pqi_ctrl_offline(ctrl_info))
6559 return -ENXIO;
2790cd4d
KB
6560 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6561 return -EBUSY;
6c223761
KB
6562 if (!arg)
6563 return -EINVAL;
6564 if (!capable(CAP_SYS_RAWIO))
6565 return -EPERM;
6566 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6567 return -EFAULT;
6568 if (iocommand.buf_size < 1 &&
6569 iocommand.Request.Type.Direction != XFER_NONE)
6570 return -EINVAL;
6571 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6572 return -EINVAL;
6573 if (iocommand.Request.Type.Type != TYPE_CMD)
6574 return -EINVAL;
6575
6576 switch (iocommand.Request.Type.Direction) {
6577 case XFER_NONE:
6578 case XFER_WRITE:
6579 case XFER_READ:
41555d54 6580 case XFER_READ | XFER_WRITE:
6c223761
KB
6581 break;
6582 default:
6583 return -EINVAL;
6584 }
6585
6586 if (iocommand.buf_size > 0) {
6587 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6588 if (!kernel_buffer)
6589 return -ENOMEM;
6590 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6591 if (copy_from_user(kernel_buffer, iocommand.buf,
6592 iocommand.buf_size)) {
6593 rc = -EFAULT;
6594 goto out;
6595 }
6596 } else {
6597 memset(kernel_buffer, 0, iocommand.buf_size);
6598 }
6599 }
6600
6601 memset(&request, 0, sizeof(request));
6602
6603 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6604 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6605 PQI_REQUEST_HEADER_LENGTH;
6606 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6607 sizeof(request.lun_number));
6608 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6609 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6610
6611 switch (iocommand.Request.Type.Direction) {
6612 case XFER_NONE:
6613 request.data_direction = SOP_NO_DIRECTION_FLAG;
6614 break;
6615 case XFER_WRITE:
6616 request.data_direction = SOP_WRITE_FLAG;
6617 break;
6618 case XFER_READ:
6619 request.data_direction = SOP_READ_FLAG;
6620 break;
41555d54
KB
6621 case XFER_READ | XFER_WRITE:
6622 request.data_direction = SOP_BIDIRECTIONAL;
6623 break;
6c223761
KB
6624 }
6625
6626 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6627
6628 if (iocommand.buf_size > 0) {
6629 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6630
6631 rc = pqi_map_single(ctrl_info->pci_dev,
6632 &request.sg_descriptors[0], kernel_buffer,
6917a9cc 6633 iocommand.buf_size, DMA_BIDIRECTIONAL);
6c223761
KB
6634 if (rc)
6635 goto out;
6636
6637 iu_length += sizeof(request.sg_descriptors[0]);
6638 }
6639
6640 put_unaligned_le16(iu_length, &request.header.iu_length);
6641
21432010 6642 if (ctrl_info->raid_iu_timeout_supported)
6643 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6644
6c223761 6645 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
ae0c189d 6646 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6c223761
KB
6647
6648 if (iocommand.buf_size > 0)
6649 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6917a9cc 6650 DMA_BIDIRECTIONAL);
6c223761
KB
6651
6652 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6653
6654 if (rc == 0) {
6655 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6656 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6657 iocommand.error_info.CommandStatus =
6658 ciss_error_info.command_status;
6659 sense_data_length = ciss_error_info.sense_data_length;
6660 if (sense_data_length) {
6661 if (sense_data_length >
6662 sizeof(iocommand.error_info.SenseInfo))
6663 sense_data_length =
6664 sizeof(iocommand.error_info.SenseInfo);
6665 memcpy(iocommand.error_info.SenseInfo,
6666 pqi_error_info.data, sense_data_length);
6667 iocommand.error_info.SenseLen = sense_data_length;
6668 }
6669 }
6670
6671 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6672 rc = -EFAULT;
6673 goto out;
6674 }
6675
6676 if (rc == 0 && iocommand.buf_size > 0 &&
6677 (iocommand.Request.Type.Direction & XFER_READ)) {
6678 if (copy_to_user(iocommand.buf, kernel_buffer,
6679 iocommand.buf_size)) {
6680 rc = -EFAULT;
6681 }
6682 }
6683
6684out:
6685 kfree(kernel_buffer);
6686
6687 return rc;
6688}
6689
6f4e626f
NC
6690static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6691 void __user *arg)
6c223761
KB
6692{
6693 int rc;
6694 struct pqi_ctrl_info *ctrl_info;
6695
6696 ctrl_info = shost_to_hba(sdev->host);
6697
6698 switch (cmd) {
6699 case CCISS_DEREGDISK:
6700 case CCISS_REGNEWDISK:
6701 case CCISS_REGNEWD:
6702 rc = pqi_scan_scsi_devices(ctrl_info);
6703 break;
6704 case CCISS_GETPCIINFO:
6705 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6706 break;
6707 case CCISS_GETDRIVVER:
6708 rc = pqi_getdrivver_ioctl(arg);
6709 break;
6710 case CCISS_PASSTHRU:
6711 rc = pqi_passthru_ioctl(ctrl_info, arg);
6712 break;
6713 default:
6714 rc = -EINVAL;
6715 break;
6716 }
6717
6718 return rc;
6719}
6720
6d90615f 6721static ssize_t pqi_firmware_version_show(struct device *dev,
6c223761
KB
6722 struct device_attribute *attr, char *buffer)
6723{
6c223761
KB
6724 struct Scsi_Host *shost;
6725 struct pqi_ctrl_info *ctrl_info;
6726
6727 shost = class_to_shost(dev);
6728 ctrl_info = shost_to_hba(shost);
6729
a4256252 6730 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6d90615f
MB
6731}
6732
6733static ssize_t pqi_driver_version_show(struct device *dev,
6734 struct device_attribute *attr, char *buffer)
6735{
a4256252 6736 return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6d90615f 6737}
6c223761 6738
6d90615f
MB
6739static ssize_t pqi_serial_number_show(struct device *dev,
6740 struct device_attribute *attr, char *buffer)
6741{
6742 struct Scsi_Host *shost;
6743 struct pqi_ctrl_info *ctrl_info;
6744
6745 shost = class_to_shost(dev);
6746 ctrl_info = shost_to_hba(shost);
6747
a4256252 6748 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6d90615f
MB
6749}
6750
6751static ssize_t pqi_model_show(struct device *dev,
6752 struct device_attribute *attr, char *buffer)
6753{
6754 struct Scsi_Host *shost;
6755 struct pqi_ctrl_info *ctrl_info;
6756
6757 shost = class_to_shost(dev);
6758 ctrl_info = shost_to_hba(shost);
6759
a4256252 6760 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6d90615f
MB
6761}
6762
6763static ssize_t pqi_vendor_show(struct device *dev,
6764 struct device_attribute *attr, char *buffer)
6765{
6766 struct Scsi_Host *shost;
6767 struct pqi_ctrl_info *ctrl_info;
6768
6769 shost = class_to_shost(dev);
6770 ctrl_info = shost_to_hba(shost);
6771
a4256252 6772 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6c223761
KB
6773}
6774
6775static ssize_t pqi_host_rescan_store(struct device *dev,
6776 struct device_attribute *attr, const char *buffer, size_t count)
6777{
6778 struct Scsi_Host *shost = class_to_shost(dev);
6779
6780 pqi_scan_start(shost);
6781
6782 return count;
6783}
6784
3c50976f
KB
6785static ssize_t pqi_lockup_action_show(struct device *dev,
6786 struct device_attribute *attr, char *buffer)
6787{
6788 int count = 0;
6789 unsigned int i;
6790
6791 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6792 if (pqi_lockup_actions[i].action == pqi_lockup_action)
181aea89 6793 count += scnprintf(buffer + count, PAGE_SIZE - count,
3c50976f
KB
6794 "[%s] ", pqi_lockup_actions[i].name);
6795 else
181aea89 6796 count += scnprintf(buffer + count, PAGE_SIZE - count,
3c50976f
KB
6797 "%s ", pqi_lockup_actions[i].name);
6798 }
6799
181aea89 6800 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
3c50976f
KB
6801
6802 return count;
6803}
6804
6805static ssize_t pqi_lockup_action_store(struct device *dev,
6806 struct device_attribute *attr, const char *buffer, size_t count)
6807{
6808 unsigned int i;
6809 char *action_name;
6810 char action_name_buffer[32];
6811
6812 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6813 action_name = strstrip(action_name_buffer);
6814
6815 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6816 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6817 pqi_lockup_action = pqi_lockup_actions[i].action;
6818 return count;
6819 }
6820 }
6821
6822 return -EINVAL;
6823}
6824
5be746d7
DB
6825static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6826 struct device_attribute *attr, char *buffer)
6827{
6828 struct Scsi_Host *shost = class_to_shost(dev);
6829 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6830
6831 return scnprintf(buffer, 10, "%x\n",
6832 ctrl_info->enable_stream_detection);
6833}
6834
6835static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6836 struct device_attribute *attr, const char *buffer, size_t count)
6837{
6838 struct Scsi_Host *shost = class_to_shost(dev);
6839 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6840 u8 set_stream_detection = 0;
6841
6842 if (kstrtou8(buffer, 0, &set_stream_detection))
6843 return -EINVAL;
6844
6845 if (set_stream_detection > 0)
6846 set_stream_detection = 1;
6847
6848 ctrl_info->enable_stream_detection = set_stream_detection;
6849
6850 return count;
6851}
6852
6702d2c4
DB
6853static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6854 struct device_attribute *attr, char *buffer)
6855{
6856 struct Scsi_Host *shost = class_to_shost(dev);
6857 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6858
6859 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6860}
6861
6862static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6863 struct device_attribute *attr, const char *buffer, size_t count)
6864{
6865 struct Scsi_Host *shost = class_to_shost(dev);
6866 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6867 u8 set_r5_writes = 0;
6868
6869 if (kstrtou8(buffer, 0, &set_r5_writes))
6870 return -EINVAL;
6871
6872 if (set_r5_writes > 0)
6873 set_r5_writes = 1;
6874
6875 ctrl_info->enable_r5_writes = set_r5_writes;
6876
6877 return count;
6878}
6879
6880static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6881 struct device_attribute *attr, char *buffer)
6882{
6883 struct Scsi_Host *shost = class_to_shost(dev);
6884 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6885
6886 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6887}
6888
6889static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6890 struct device_attribute *attr, const char *buffer, size_t count)
6891{
6892 struct Scsi_Host *shost = class_to_shost(dev);
6893 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6894 u8 set_r6_writes = 0;
6895
6896 if (kstrtou8(buffer, 0, &set_r6_writes))
6897 return -EINVAL;
6898
6899 if (set_r6_writes > 0)
6900 set_r6_writes = 1;
6901
6902 ctrl_info->enable_r6_writes = set_r6_writes;
6903
6904 return count;
6905}
6906
6d90615f
MB
6907static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6908static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6909static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6910static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6911static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
cbe0c7b1 6912static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
583891c9
KB
6913static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6914 pqi_lockup_action_store);
5be746d7
DB
6915static DEVICE_ATTR(enable_stream_detection, 0644,
6916 pqi_host_enable_stream_detection_show,
6917 pqi_host_enable_stream_detection_store);
6702d2c4
DB
6918static DEVICE_ATTR(enable_r5_writes, 0644,
6919 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6920static DEVICE_ATTR(enable_r6_writes, 0644,
6921 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6c223761 6922
64fc9015
BVA
6923static struct attribute *pqi_shost_attrs[] = {
6924 &dev_attr_driver_version.attr,
6925 &dev_attr_firmware_version.attr,
6926 &dev_attr_model.attr,
6927 &dev_attr_serial_number.attr,
6928 &dev_attr_vendor.attr,
6929 &dev_attr_rescan.attr,
6930 &dev_attr_lockup_action.attr,
6931 &dev_attr_enable_stream_detection.attr,
6932 &dev_attr_enable_r5_writes.attr,
6933 &dev_attr_enable_r6_writes.attr,
6c223761
KB
6934 NULL
6935};
6936
64fc9015
BVA
6937ATTRIBUTE_GROUPS(pqi_shost);
6938
cd128244
DC
6939static ssize_t pqi_unique_id_show(struct device *dev,
6940 struct device_attribute *attr, char *buffer)
6941{
6942 struct pqi_ctrl_info *ctrl_info;
6943 struct scsi_device *sdev;
6944 struct pqi_scsi_dev *device;
6945 unsigned long flags;
5b083b30 6946 u8 unique_id[16];
cd128244
DC
6947
6948 sdev = to_scsi_device(dev);
6949 ctrl_info = shost_to_hba(sdev->host);
6950
331f7e99
SB
6951 if (pqi_ctrl_offline(ctrl_info))
6952 return -ENODEV;
6953
cd128244
DC
6954 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6955
6956 device = sdev->hostdata;
6957 if (!device) {
8b664fef 6958 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
6959 return -ENODEV;
6960 }
5b083b30 6961
28ca6d87
MM
6962 if (device->is_physical_device)
6963 memcpy(unique_id, device->wwid, sizeof(device->wwid));
6964 else
5b083b30 6965 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
cd128244
DC
6966
6967 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6968
a4256252 6969 return scnprintf(buffer, PAGE_SIZE,
583891c9
KB
6970 "%02X%02X%02X%02X%02X%02X%02X%02X"
6971 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
5b083b30
KB
6972 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
6973 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
6974 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
6975 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
cd128244
DC
6976}
6977
6978static ssize_t pqi_lunid_show(struct device *dev,
6979 struct device_attribute *attr, char *buffer)
6980{
6981 struct pqi_ctrl_info *ctrl_info;
6982 struct scsi_device *sdev;
6983 struct pqi_scsi_dev *device;
6984 unsigned long flags;
6985 u8 lunid[8];
6986
6987 sdev = to_scsi_device(dev);
6988 ctrl_info = shost_to_hba(sdev->host);
6989
331f7e99
SB
6990 if (pqi_ctrl_offline(ctrl_info))
6991 return -ENODEV;
6992
cd128244
DC
6993 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6994
6995 device = sdev->hostdata;
6996 if (!device) {
8b664fef 6997 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
6998 return -ENODEV;
6999 }
694c5d5b 7000
cd128244
DC
7001 memcpy(lunid, device->scsi3addr, sizeof(lunid));
7002
7003 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7004
a4256252 7005 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
cd128244
DC
7006}
7007
694c5d5b
KB
7008#define MAX_PATHS 8
7009
cd128244
DC
7010static ssize_t pqi_path_info_show(struct device *dev,
7011 struct device_attribute *attr, char *buf)
7012{
7013 struct pqi_ctrl_info *ctrl_info;
7014 struct scsi_device *sdev;
7015 struct pqi_scsi_dev *device;
7016 unsigned long flags;
7017 int i;
7018 int output_len = 0;
7019 u8 box;
7020 u8 bay;
694c5d5b 7021 u8 path_map_index;
cd128244 7022 char *active;
694c5d5b 7023 u8 phys_connector[2];
cd128244
DC
7024
7025 sdev = to_scsi_device(dev);
7026 ctrl_info = shost_to_hba(sdev->host);
7027
331f7e99
SB
7028 if (pqi_ctrl_offline(ctrl_info))
7029 return -ENODEV;
7030
cd128244
DC
7031 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7032
7033 device = sdev->hostdata;
7034 if (!device) {
8b664fef 7035 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
7036 return -ENODEV;
7037 }
7038
7039 bay = device->bay;
7040 for (i = 0; i < MAX_PATHS; i++) {
694c5d5b 7041 path_map_index = 1 << i;
cd128244
DC
7042 if (i == device->active_path_index)
7043 active = "Active";
7044 else if (device->path_map & path_map_index)
7045 active = "Inactive";
7046 else
7047 continue;
7048
7049 output_len += scnprintf(buf + output_len,
7050 PAGE_SIZE - output_len,
7051 "[%d:%d:%d:%d] %20.20s ",
7052 ctrl_info->scsi_host->host_no,
7053 device->bus, device->target,
7054 device->lun,
7055 scsi_device_type(device->devtype));
7056
7057 if (device->devtype == TYPE_RAID ||
7058 pqi_is_logical_device(device))
7059 goto end_buffer;
7060
7061 memcpy(&phys_connector, &device->phys_connector[i],
7062 sizeof(phys_connector));
7063 if (phys_connector[0] < '0')
7064 phys_connector[0] = '0';
7065 if (phys_connector[1] < '0')
7066 phys_connector[1] = '0';
7067
7068 output_len += scnprintf(buf + output_len,
7069 PAGE_SIZE - output_len,
7070 "PORT: %.2s ", phys_connector);
7071
7072 box = device->box[i];
7073 if (box != 0 && box != 0xFF)
7074 output_len += scnprintf(buf + output_len,
7075 PAGE_SIZE - output_len,
7076 "BOX: %hhu ", box);
7077
7078 if ((device->devtype == TYPE_DISK ||
7079 device->devtype == TYPE_ZBC) &&
7080 pqi_expose_device(device))
7081 output_len += scnprintf(buf + output_len,
7082 PAGE_SIZE - output_len,
7083 "BAY: %hhu ", bay);
7084
7085end_buffer:
7086 output_len += scnprintf(buf + output_len,
7087 PAGE_SIZE - output_len,
7088 "%s\n", active);
7089 }
7090
7091 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
694c5d5b 7092
cd128244
DC
7093 return output_len;
7094}
7095
6c223761
KB
7096static ssize_t pqi_sas_address_show(struct device *dev,
7097 struct device_attribute *attr, char *buffer)
7098{
7099 struct pqi_ctrl_info *ctrl_info;
7100 struct scsi_device *sdev;
7101 struct pqi_scsi_dev *device;
7102 unsigned long flags;
7103 u64 sas_address;
7104
7105 sdev = to_scsi_device(dev);
7106 ctrl_info = shost_to_hba(sdev->host);
7107
331f7e99
SB
7108 if (pqi_ctrl_offline(ctrl_info))
7109 return -ENODEV;
7110
6c223761
KB
7111 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7112
7113 device = sdev->hostdata;
00598b05 7114 if (!device) {
8b664fef 7115 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6c223761
KB
7116 return -ENODEV;
7117 }
694c5d5b 7118
6c223761
KB
7119 sas_address = device->sas_address;
7120
7121 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7122
a4256252 7123 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6c223761
KB
7124}
7125
7126static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7127 struct device_attribute *attr, char *buffer)
7128{
7129 struct pqi_ctrl_info *ctrl_info;
7130 struct scsi_device *sdev;
7131 struct pqi_scsi_dev *device;
7132 unsigned long flags;
7133
7134 sdev = to_scsi_device(dev);
7135 ctrl_info = shost_to_hba(sdev->host);
7136
331f7e99
SB
7137 if (pqi_ctrl_offline(ctrl_info))
7138 return -ENODEV;
7139
6c223761
KB
7140 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7141
7142 device = sdev->hostdata;
8b664fef
KB
7143 if (!device) {
7144 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7145 return -ENODEV;
7146 }
7147
588a63fe 7148 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6c223761
KB
7149 buffer[1] = '\n';
7150 buffer[2] = '\0';
7151
7152 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7153
7154 return 2;
7155}
7156
a9f93392
KB
7157static ssize_t pqi_raid_level_show(struct device *dev,
7158 struct device_attribute *attr, char *buffer)
7159{
7160 struct pqi_ctrl_info *ctrl_info;
7161 struct scsi_device *sdev;
7162 struct pqi_scsi_dev *device;
7163 unsigned long flags;
7164 char *raid_level;
7165
7166 sdev = to_scsi_device(dev);
7167 ctrl_info = shost_to_hba(sdev->host);
7168
331f7e99
SB
7169 if (pqi_ctrl_offline(ctrl_info))
7170 return -ENODEV;
7171
a9f93392
KB
7172 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7173
7174 device = sdev->hostdata;
8b664fef
KB
7175 if (!device) {
7176 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7177 return -ENODEV;
7178 }
a9f93392
KB
7179
7180 if (pqi_is_logical_device(device))
7181 raid_level = pqi_raid_level_to_string(device->raid_level);
7182 else
7183 raid_level = "N/A";
7184
7185 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7186
a4256252 7187 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
a9f93392
KB
7188}
7189
8b664fef
KB
7190static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7191 struct device_attribute *attr, char *buffer)
7192{
7193 struct pqi_ctrl_info *ctrl_info;
7194 struct scsi_device *sdev;
7195 struct pqi_scsi_dev *device;
7196 unsigned long flags;
7197 int raid_bypass_cnt;
7198
7199 sdev = to_scsi_device(dev);
7200 ctrl_info = shost_to_hba(sdev->host);
7201
331f7e99
SB
7202 if (pqi_ctrl_offline(ctrl_info))
7203 return -ENODEV;
7204
8b664fef
KB
7205 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7206
7207 device = sdev->hostdata;
7208 if (!device) {
7209 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7210 return -ENODEV;
7211 }
7212
7213 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
7214
7215 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7216
a4256252 7217 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
8b664fef
KB
7218}
7219
2a47834d
GW
7220static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7221 struct device_attribute *attr, char *buf)
7222{
7223 struct pqi_ctrl_info *ctrl_info;
7224 struct scsi_device *sdev;
7225 struct pqi_scsi_dev *device;
7226 unsigned long flags;
7227 int output_len = 0;
7228
7229 sdev = to_scsi_device(dev);
7230 ctrl_info = shost_to_hba(sdev->host);
7231
331f7e99
SB
7232 if (pqi_ctrl_offline(ctrl_info))
7233 return -ENODEV;
7234
2a47834d
GW
7235 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7236
7237 device = sdev->hostdata;
7238 if (!device) {
7239 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7240 return -ENODEV;
7241 }
7242
7243 output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7244 device->ncq_prio_enable);
7245 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7246
7247 return output_len;
7248}
7249
7250static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7251 struct device_attribute *attr,
7252 const char *buf, size_t count)
7253{
7254 struct pqi_ctrl_info *ctrl_info;
7255 struct scsi_device *sdev;
7256 struct pqi_scsi_dev *device;
7257 unsigned long flags;
7258 u8 ncq_prio_enable = 0;
7259
7260 if (kstrtou8(buf, 0, &ncq_prio_enable))
7261 return -EINVAL;
7262
7263 sdev = to_scsi_device(dev);
7264 ctrl_info = shost_to_hba(sdev->host);
7265
7266 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7267
7268 device = sdev->hostdata;
7269
7270 if (!device) {
7271 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7272 return -ENODEV;
7273 }
7274
7275 if (!device->ncq_prio_support ||
7276 !device->is_physical_device) {
7277 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7278 return -EINVAL;
7279 }
7280
7281 device->ncq_prio_enable = ncq_prio_enable;
7282
7283 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7284
7285 return strlen(buf);
7286}
7287
cd128244
DC
7288static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7289static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7290static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
cbe0c7b1 7291static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
8b664fef 7292static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
a9f93392 7293static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
8b664fef 7294static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
2a47834d
GW
7295static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7296 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
6c223761 7297
64fc9015
BVA
7298static struct attribute *pqi_sdev_attrs[] = {
7299 &dev_attr_lunid.attr,
7300 &dev_attr_unique_id.attr,
7301 &dev_attr_path_info.attr,
7302 &dev_attr_sas_address.attr,
7303 &dev_attr_ssd_smart_path_enabled.attr,
7304 &dev_attr_raid_level.attr,
7305 &dev_attr_raid_bypass_cnt.attr,
2a47834d 7306 &dev_attr_sas_ncq_prio_enable.attr,
6c223761
KB
7307 NULL
7308};
7309
64fc9015
BVA
7310ATTRIBUTE_GROUPS(pqi_sdev);
7311
6c223761
KB
7312static struct scsi_host_template pqi_driver_template = {
7313 .module = THIS_MODULE,
7314 .name = DRIVER_NAME_SHORT,
7315 .proc_name = DRIVER_NAME_SHORT,
7316 .queuecommand = pqi_scsi_queue_command,
7317 .scan_start = pqi_scan_start,
7318 .scan_finished = pqi_scan_finished,
7319 .this_id = -1,
6c223761
KB
7320 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7321 .ioctl = pqi_ioctl,
7322 .slave_alloc = pqi_slave_alloc,
ce143793 7323 .slave_configure = pqi_slave_configure,
52198226 7324 .map_queues = pqi_map_queues,
64fc9015
BVA
7325 .sdev_groups = pqi_sdev_groups,
7326 .shost_groups = pqi_shost_groups,
c1ea387d 7327 .cmd_size = sizeof(struct pqi_cmd_priv),
6c223761
KB
7328};
7329
7330static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7331{
7332 int rc;
7333 struct Scsi_Host *shost;
7334
7335 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7336 if (!shost) {
583891c9 7337 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
6c223761
KB
7338 return -ENOMEM;
7339 }
7340
7341 shost->io_port = 0;
7342 shost->n_io_port = 0;
7343 shost->this_id = -1;
7344 shost->max_channel = PQI_MAX_BUS;
7345 shost->max_cmd_len = MAX_COMMAND_SIZE;
904f2bfd 7346 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
6c223761
KB
7347 shost->max_lun = ~0;
7348 shost->max_id = ~0;
7349 shost->max_sectors = ctrl_info->max_sectors;
7350 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7351 shost->cmd_per_lun = shost->can_queue;
7352 shost->sg_tablesize = ctrl_info->sg_tablesize;
7353 shost->transportt = pqi_sas_transport_template;
52198226 7354 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
7355 shost->unique_id = shost->irq;
7356 shost->nr_hw_queues = ctrl_info->num_queue_groups;
c6d3ee20 7357 shost->host_tagset = 1;
6c223761
KB
7358 shost->hostdata[0] = (unsigned long)ctrl_info;
7359
7360 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7361 if (rc) {
583891c9 7362 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
6c223761
KB
7363 goto free_host;
7364 }
7365
7366 rc = pqi_add_sas_host(shost, ctrl_info);
7367 if (rc) {
583891c9 7368 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
6c223761
KB
7369 goto remove_host;
7370 }
7371
7372 ctrl_info->scsi_host = shost;
7373
7374 return 0;
7375
7376remove_host:
7377 scsi_remove_host(shost);
7378free_host:
7379 scsi_host_put(shost);
7380
7381 return rc;
7382}
7383
7384static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7385{
7386 struct Scsi_Host *shost;
7387
7388 pqi_delete_sas_host(ctrl_info);
7389
7390 shost = ctrl_info->scsi_host;
7391 if (!shost)
7392 return;
7393
7394 scsi_remove_host(shost);
7395 scsi_host_put(shost);
7396}
7397
336b6819
KB
7398static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7399{
7400 int rc = 0;
7401 struct pqi_device_registers __iomem *pqi_registers;
7402 unsigned long timeout;
7403 unsigned int timeout_msecs;
7404 union pqi_reset_register reset_reg;
6c223761 7405
336b6819
KB
7406 pqi_registers = ctrl_info->pqi_registers;
7407 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7408 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7409
7410 while (1) {
7411 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7412 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7413 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7414 break;
85b41834 7415 if (!sis_is_firmware_running(ctrl_info)) {
336b6819
KB
7416 rc = -ENXIO;
7417 break;
7418 }
7419 if (time_after(jiffies, timeout)) {
7420 rc = -ETIMEDOUT;
7421 break;
7422 }
7423 }
7424
7425 return rc;
7426}
6c223761
KB
7427
7428static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7429{
7430 int rc;
336b6819
KB
7431 union pqi_reset_register reset_reg;
7432
7433 if (ctrl_info->pqi_reset_quiesce_supported) {
7434 rc = sis_pqi_reset_quiesce(ctrl_info);
7435 if (rc) {
7436 dev_err(&ctrl_info->pci_dev->dev,
583891c9 7437 "PQI reset failed during quiesce with error %d\n", rc);
336b6819
KB
7438 return rc;
7439 }
7440 }
6c223761 7441
336b6819
KB
7442 reset_reg.all_bits = 0;
7443 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7444 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6c223761 7445
336b6819 7446 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6c223761 7447
336b6819 7448 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6c223761
KB
7449 if (rc)
7450 dev_err(&ctrl_info->pci_dev->dev,
336b6819 7451 "PQI reset failed with error %d\n", rc);
6c223761
KB
7452
7453 return rc;
7454}
7455
6d90615f
MB
7456static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7457{
7458 int rc;
7459 struct bmic_sense_subsystem_info *sense_info;
7460
7461 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7462 if (!sense_info)
7463 return -ENOMEM;
7464
7465 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7466 if (rc)
7467 goto out;
7468
7469 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7470 sizeof(sense_info->ctrl_serial_number));
7471 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7472
7473out:
7474 kfree(sense_info);
7475
7476 return rc;
7477}
7478
7479static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6c223761
KB
7480{
7481 int rc;
7482 struct bmic_identify_controller *identify;
7483
7484 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7485 if (!identify)
7486 return -ENOMEM;
7487
7488 rc = pqi_identify_controller(ctrl_info, identify);
7489 if (rc)
7490 goto out;
7491
598bef8d
KB
7492 if (get_unaligned_le32(&identify->extra_controller_flags) &
7493 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7494 memcpy(ctrl_info->firmware_version,
7495 identify->firmware_version_long,
7496 sizeof(identify->firmware_version_long));
7497 } else {
7498 memcpy(ctrl_info->firmware_version,
7499 identify->firmware_version_short,
7500 sizeof(identify->firmware_version_short));
7501 ctrl_info->firmware_version
7502 [sizeof(identify->firmware_version_short)] = '\0';
7503 snprintf(ctrl_info->firmware_version +
7504 strlen(ctrl_info->firmware_version),
7505 sizeof(ctrl_info->firmware_version) -
7506 sizeof(identify->firmware_version_short),
7507 "-%u",
7508 get_unaligned_le16(&identify->firmware_build_number));
7509 }
6c223761 7510
6d90615f
MB
7511 memcpy(ctrl_info->model, identify->product_id,
7512 sizeof(identify->product_id));
7513 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7514
7515 memcpy(ctrl_info->vendor, identify->vendor_id,
7516 sizeof(identify->vendor_id));
7517 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7518
1d393227
GW
7519 dev_info(&ctrl_info->pci_dev->dev,
7520 "Firmware version: %s\n", ctrl_info->firmware_version);
7521
6c223761
KB
7522out:
7523 kfree(identify);
7524
7525 return rc;
7526}
7527
b212c251
KB
7528struct pqi_config_table_section_info {
7529 struct pqi_ctrl_info *ctrl_info;
7530 void *section;
7531 u32 section_offset;
7532 void __iomem *section_iomem_addr;
7533};
7534
7535static inline bool pqi_is_firmware_feature_supported(
7536 struct pqi_config_table_firmware_features *firmware_features,
7537 unsigned int bit_position)
98f87667 7538{
b212c251 7539 unsigned int byte_index;
98f87667 7540
b212c251 7541 byte_index = bit_position / BITS_PER_BYTE;
98f87667 7542
b212c251
KB
7543 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7544 return false;
98f87667 7545
b212c251
KB
7546 return firmware_features->features_supported[byte_index] &
7547 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7548}
7549
7550static inline bool pqi_is_firmware_feature_enabled(
7551 struct pqi_config_table_firmware_features *firmware_features,
7552 void __iomem *firmware_features_iomem_addr,
7553 unsigned int bit_position)
7554{
7555 unsigned int byte_index;
7556 u8 __iomem *features_enabled_iomem_addr;
7557
7558 byte_index = (bit_position / BITS_PER_BYTE) +
7559 (le16_to_cpu(firmware_features->num_elements) * 2);
7560
7561 features_enabled_iomem_addr = firmware_features_iomem_addr +
7562 offsetof(struct pqi_config_table_firmware_features,
7563 features_supported) + byte_index;
7564
7565 return *((__force u8 *)features_enabled_iomem_addr) &
7566 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7567}
7568
7569static inline void pqi_request_firmware_feature(
7570 struct pqi_config_table_firmware_features *firmware_features,
7571 unsigned int bit_position)
7572{
7573 unsigned int byte_index;
7574
7575 byte_index = (bit_position / BITS_PER_BYTE) +
7576 le16_to_cpu(firmware_features->num_elements);
7577
7578 firmware_features->features_supported[byte_index] |=
7579 (1 << (bit_position % BITS_PER_BYTE));
7580}
7581
7582static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7583 u16 first_section, u16 last_section)
7584{
7585 struct pqi_vendor_general_request request;
7586
7587 memset(&request, 0, sizeof(request));
7588
7589 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7590 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7591 &request.header.iu_length);
7592 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7593 &request.function_code);
7594 put_unaligned_le16(first_section,
7595 &request.data.config_table_update.first_section);
7596 put_unaligned_le16(last_section,
7597 &request.data.config_table_update.last_section);
7598
ae0c189d 7599 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
b212c251
KB
7600}
7601
7602static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7603 struct pqi_config_table_firmware_features *firmware_features,
7604 void __iomem *firmware_features_iomem_addr)
7605{
7606 void *features_requested;
7607 void __iomem *features_requested_iomem_addr;
f6cc2a77 7608 void __iomem *host_max_known_feature_iomem_addr;
b212c251
KB
7609
7610 features_requested = firmware_features->features_supported +
7611 le16_to_cpu(firmware_features->num_elements);
7612
7613 features_requested_iomem_addr = firmware_features_iomem_addr +
7614 (features_requested - (void *)firmware_features);
7615
7616 memcpy_toio(features_requested_iomem_addr, features_requested,
7617 le16_to_cpu(firmware_features->num_elements));
7618
f6cc2a77
KB
7619 if (pqi_is_firmware_feature_supported(firmware_features,
7620 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7621 host_max_known_feature_iomem_addr =
7622 features_requested_iomem_addr +
7623 (le16_to_cpu(firmware_features->num_elements) * 2) +
7624 sizeof(__le16);
7625 writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7626 host_max_known_feature_iomem_addr);
7627 }
7628
b212c251
KB
7629 return pqi_config_table_update(ctrl_info,
7630 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7631 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7632}
7633
7634struct pqi_firmware_feature {
7635 char *feature_name;
7636 unsigned int feature_bit;
7637 bool supported;
7638 bool enabled;
7639 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7640 struct pqi_firmware_feature *firmware_feature);
7641};
7642
7643static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7644 struct pqi_firmware_feature *firmware_feature)
7645{
7646 if (!firmware_feature->supported) {
7647 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7648 firmware_feature->feature_name);
7649 return;
7650 }
7651
7652 if (firmware_feature->enabled) {
7653 dev_info(&ctrl_info->pci_dev->dev,
7654 "%s enabled\n", firmware_feature->feature_name);
7655 return;
7656 }
7657
7658 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7659 firmware_feature->feature_name);
7660}
7661
21432010 7662static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7663 struct pqi_firmware_feature *firmware_feature)
7664{
7665 switch (firmware_feature->feature_bit) {
f6cc2a77
KB
7666 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7667 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7668 break;
7669 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7670 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7671 break;
7672 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7673 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7674 break;
21432010 7675 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7676 ctrl_info->soft_reset_handshake_supported =
4ccc354b
KB
7677 firmware_feature->enabled &&
7678 pqi_read_soft_reset_status(ctrl_info);
21432010 7679 break;
7680 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
583891c9 7681 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
21432010 7682 break;
c2922f17 7683 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
583891c9 7684 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
c2922f17 7685 break;
5d1f03e6
MB
7686 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7687 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
9ee5d6e9 7688 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
5d1f03e6 7689 break;
28ca6d87
MM
7690 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7691 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7692 break;
904f2bfd
KM
7693 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7694 ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7695 break;
21432010 7696 }
7697
7698 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7699}
7700
b212c251
KB
7701static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7702 struct pqi_firmware_feature *firmware_feature)
7703{
7704 if (firmware_feature->feature_status)
7705 firmware_feature->feature_status(ctrl_info, firmware_feature);
7706}
7707
7708static DEFINE_MUTEX(pqi_firmware_features_mutex);
7709
7710static struct pqi_firmware_feature pqi_firmware_features[] = {
7711 {
7712 .feature_name = "Online Firmware Activation",
7713 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7714 .feature_status = pqi_firmware_feature_status,
7715 },
7716 {
7717 .feature_name = "Serial Management Protocol",
7718 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7719 .feature_status = pqi_firmware_feature_status,
7720 },
f6cc2a77
KB
7721 {
7722 .feature_name = "Maximum Known Feature",
7723 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7724 .feature_status = pqi_firmware_feature_status,
7725 },
7726 {
7727 .feature_name = "RAID 0 Read Bypass",
7728 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
b212c251
KB
7729 .feature_status = pqi_firmware_feature_status,
7730 },
7731 {
f6cc2a77
KB
7732 .feature_name = "RAID 1 Read Bypass",
7733 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7734 .feature_status = pqi_firmware_feature_status,
7735 },
7736 {
7737 .feature_name = "RAID 5 Read Bypass",
7738 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
b212c251
KB
7739 .feature_status = pqi_firmware_feature_status,
7740 },
f6cc2a77
KB
7741 {
7742 .feature_name = "RAID 6 Read Bypass",
7743 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7744 .feature_status = pqi_firmware_feature_status,
7745 },
7746 {
7747 .feature_name = "RAID 0 Write Bypass",
7748 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7749 .feature_status = pqi_firmware_feature_status,
7750 },
7751 {
7752 .feature_name = "RAID 1 Write Bypass",
7753 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7754 .feature_status = pqi_ctrl_update_feature_flags,
7755 },
7756 {
7757 .feature_name = "RAID 5 Write Bypass",
7758 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7759 .feature_status = pqi_ctrl_update_feature_flags,
7760 },
7761 {
7762 .feature_name = "RAID 6 Write Bypass",
7763 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7764 .feature_status = pqi_ctrl_update_feature_flags,
7765 },
4fd22c13
MR
7766 {
7767 .feature_name = "New Soft Reset Handshake",
7768 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
21432010 7769 .feature_status = pqi_ctrl_update_feature_flags,
7770 },
7771 {
7772 .feature_name = "RAID IU Timeout",
7773 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7774 .feature_status = pqi_ctrl_update_feature_flags,
4fd22c13 7775 },
c2922f17
MB
7776 {
7777 .feature_name = "TMF IU Timeout",
7778 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7779 .feature_status = pqi_ctrl_update_feature_flags,
7780 },
f6cc2a77
KB
7781 {
7782 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7783 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7784 .feature_status = pqi_firmware_feature_status,
7785 },
5d1f03e6
MB
7786 {
7787 .feature_name = "Firmware Triage",
7788 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7789 .feature_status = pqi_ctrl_update_feature_flags,
7790 },
28ca6d87
MM
7791 {
7792 .feature_name = "RPL Extended Formats 4 and 5",
7793 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7794 .feature_status = pqi_ctrl_update_feature_flags,
7795 },
904f2bfd
KM
7796 {
7797 .feature_name = "Multi-LUN Target",
7798 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
7799 .feature_status = pqi_ctrl_update_feature_flags,
7800 },
b212c251
KB
7801};
7802
7803static void pqi_process_firmware_features(
7804 struct pqi_config_table_section_info *section_info)
7805{
7806 int rc;
7807 struct pqi_ctrl_info *ctrl_info;
7808 struct pqi_config_table_firmware_features *firmware_features;
7809 void __iomem *firmware_features_iomem_addr;
7810 unsigned int i;
7811 unsigned int num_features_supported;
7812
7813 ctrl_info = section_info->ctrl_info;
7814 firmware_features = section_info->section;
7815 firmware_features_iomem_addr = section_info->section_iomem_addr;
7816
7817 for (i = 0, num_features_supported = 0;
7818 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7819 if (pqi_is_firmware_feature_supported(firmware_features,
7820 pqi_firmware_features[i].feature_bit)) {
7821 pqi_firmware_features[i].supported = true;
7822 num_features_supported++;
7823 } else {
7824 pqi_firmware_feature_update(ctrl_info,
7825 &pqi_firmware_features[i]);
7826 }
7827 }
7828
7829 if (num_features_supported == 0)
7830 return;
7831
7832 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7833 if (!pqi_firmware_features[i].supported)
7834 continue;
7835 pqi_request_firmware_feature(firmware_features,
7836 pqi_firmware_features[i].feature_bit);
7837 }
7838
7839 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7840 firmware_features_iomem_addr);
7841 if (rc) {
7842 dev_err(&ctrl_info->pci_dev->dev,
7843 "failed to enable firmware features in PQI configuration table\n");
7844 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7845 if (!pqi_firmware_features[i].supported)
7846 continue;
7847 pqi_firmware_feature_update(ctrl_info,
7848 &pqi_firmware_features[i]);
7849 }
7850 return;
7851 }
7852
7853 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7854 if (!pqi_firmware_features[i].supported)
7855 continue;
7856 if (pqi_is_firmware_feature_enabled(firmware_features,
7857 firmware_features_iomem_addr,
4fd22c13 7858 pqi_firmware_features[i].feature_bit)) {
583891c9 7859 pqi_firmware_features[i].enabled = true;
4fd22c13 7860 }
b212c251
KB
7861 pqi_firmware_feature_update(ctrl_info,
7862 &pqi_firmware_features[i]);
7863 }
7864}
7865
7866static void pqi_init_firmware_features(void)
7867{
7868 unsigned int i;
7869
7870 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7871 pqi_firmware_features[i].supported = false;
7872 pqi_firmware_features[i].enabled = false;
7873 }
7874}
7875
7876static void pqi_process_firmware_features_section(
7877 struct pqi_config_table_section_info *section_info)
7878{
7879 mutex_lock(&pqi_firmware_features_mutex);
7880 pqi_init_firmware_features();
7881 pqi_process_firmware_features(section_info);
7882 mutex_unlock(&pqi_firmware_features_mutex);
7883}
7884
f6cc2a77
KB
7885/*
7886 * Reset all controller settings that can be initialized during the processing
7887 * of the PQI Configuration Table.
7888 */
7889
4ccc354b
KB
7890static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7891{
7892 ctrl_info->heartbeat_counter = NULL;
7893 ctrl_info->soft_reset_status = NULL;
7894 ctrl_info->soft_reset_handshake_supported = false;
7895 ctrl_info->enable_r1_writes = false;
7896 ctrl_info->enable_r5_writes = false;
7897 ctrl_info->enable_r6_writes = false;
7898 ctrl_info->raid_iu_timeout_supported = false;
7899 ctrl_info->tmf_iu_timeout_supported = false;
5d1f03e6 7900 ctrl_info->firmware_triage_supported = false;
28ca6d87 7901 ctrl_info->rpl_extended_format_4_5_supported = false;
904f2bfd 7902 ctrl_info->multi_lun_device_supported = false;
4ccc354b
KB
7903}
7904
98f87667
KB
7905static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7906{
7907 u32 table_length;
7908 u32 section_offset;
f6cc2a77 7909 bool firmware_feature_section_present;
98f87667
KB
7910 void __iomem *table_iomem_addr;
7911 struct pqi_config_table *config_table;
7912 struct pqi_config_table_section_header *section;
b212c251 7913 struct pqi_config_table_section_info section_info;
f6cc2a77 7914 struct pqi_config_table_section_info feature_section_info;
98f87667
KB
7915
7916 table_length = ctrl_info->config_table_length;
b212c251
KB
7917 if (table_length == 0)
7918 return 0;
98f87667
KB
7919
7920 config_table = kmalloc(table_length, GFP_KERNEL);
7921 if (!config_table) {
7922 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 7923 "failed to allocate memory for PQI configuration table\n");
98f87667
KB
7924 return -ENOMEM;
7925 }
7926
7927 /*
7928 * Copy the config table contents from I/O memory space into the
7929 * temporary buffer.
7930 */
583891c9 7931 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
98f87667
KB
7932 memcpy_fromio(config_table, table_iomem_addr, table_length);
7933
f6cc2a77 7934 firmware_feature_section_present = false;
b212c251 7935 section_info.ctrl_info = ctrl_info;
583891c9 7936 section_offset = get_unaligned_le32(&config_table->first_section_offset);
98f87667
KB
7937
7938 while (section_offset) {
7939 section = (void *)config_table + section_offset;
7940
b212c251
KB
7941 section_info.section = section;
7942 section_info.section_offset = section_offset;
583891c9 7943 section_info.section_iomem_addr = table_iomem_addr + section_offset;
b212c251 7944
98f87667 7945 switch (get_unaligned_le16(&section->section_id)) {
b212c251 7946 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
f6cc2a77
KB
7947 firmware_feature_section_present = true;
7948 feature_section_info = section_info;
b212c251 7949 break;
98f87667 7950 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5a259e32
KB
7951 if (pqi_disable_heartbeat)
7952 dev_warn(&ctrl_info->pci_dev->dev,
7953 "heartbeat disabled by module parameter\n");
7954 else
7955 ctrl_info->heartbeat_counter =
7956 table_iomem_addr +
7957 section_offset +
583891c9 7958 offsetof(struct pqi_config_table_heartbeat,
5a259e32 7959 heartbeat_counter);
98f87667 7960 break;
4fd22c13
MR
7961 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
7962 ctrl_info->soft_reset_status =
7963 table_iomem_addr +
7964 section_offset +
7965 offsetof(struct pqi_config_table_soft_reset,
583891c9 7966 soft_reset_status);
4fd22c13 7967 break;
98f87667
KB
7968 }
7969
583891c9 7970 section_offset = get_unaligned_le16(&section->next_section_offset);
98f87667
KB
7971 }
7972
f6cc2a77
KB
7973 /*
7974 * We process the firmware feature section after all other sections
7975 * have been processed so that the feature bit callbacks can take
7976 * into account the settings configured by other sections.
7977 */
7978 if (firmware_feature_section_present)
7979 pqi_process_firmware_features_section(&feature_section_info);
7980
98f87667
KB
7981 kfree(config_table);
7982
7983 return 0;
7984}
7985
162d7753
KB
7986/* Switches the controller from PQI mode back into SIS mode. */
7987
7988static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
7989{
7990 int rc;
7991
061ef06a 7992 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
162d7753
KB
7993 rc = pqi_reset(ctrl_info);
7994 if (rc)
7995 return rc;
4f078e24
KB
7996 rc = sis_reenable_sis_mode(ctrl_info);
7997 if (rc) {
7998 dev_err(&ctrl_info->pci_dev->dev,
7999 "re-enabling SIS mode failed with error %d\n", rc);
8000 return rc;
8001 }
162d7753
KB
8002 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8003
8004 return 0;
8005}
8006
8007/*
8008 * If the controller isn't already in SIS mode, this function forces it into
8009 * SIS mode.
8010 */
8011
8012static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73
KB
8013{
8014 if (!sis_is_firmware_running(ctrl_info))
8015 return -ENXIO;
8016
162d7753
KB
8017 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8018 return 0;
8019
8020 if (sis_is_kernel_up(ctrl_info)) {
8021 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8022 return 0;
ff6abb73
KB
8023 }
8024
162d7753 8025 return pqi_revert_to_sis_mode(ctrl_info);
ff6abb73
KB
8026}
8027
3ada501d
MR
8028static void pqi_perform_lockup_action(void)
8029{
8030 switch (pqi_lockup_action) {
8031 case PANIC:
8032 panic("FATAL: Smart Family Controller lockup detected");
8033 break;
8034 case REBOOT:
8035 emergency_restart();
8036 break;
8037 case NONE:
8038 default:
8039 break;
8040 }
8041}
8042
6c223761
KB
8043static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8044{
8045 int rc;
2708a256 8046 u32 product_id;
6c223761 8047
0530736e 8048 if (reset_devices) {
9ee5d6e9
MR
8049 if (pqi_is_fw_triage_supported(ctrl_info)) {
8050 rc = sis_wait_for_fw_triage_completion(ctrl_info);
8051 if (rc)
8052 return rc;
8053 }
0530736e 8054 sis_soft_reset(ctrl_info);
42dc0426 8055 ssleep(PQI_POST_RESET_DELAY_SECS);
0530736e
KB
8056 } else {
8057 rc = pqi_force_sis_mode(ctrl_info);
8058 if (rc)
8059 return rc;
8060 }
6c223761
KB
8061
8062 /*
8063 * Wait until the controller is ready to start accepting SIS
8064 * commands.
8065 */
8066 rc = sis_wait_for_ctrl_ready(ctrl_info);
3ada501d
MR
8067 if (rc) {
8068 if (reset_devices) {
8069 dev_err(&ctrl_info->pci_dev->dev,
8070 "kdump init failed with error %d\n", rc);
8071 pqi_lockup_action = REBOOT;
8072 pqi_perform_lockup_action();
8073 }
6c223761 8074 return rc;
3ada501d 8075 }
6c223761
KB
8076
8077 /*
8078 * Get the controller properties. This allows us to determine
8079 * whether or not it supports PQI mode.
8080 */
8081 rc = sis_get_ctrl_properties(ctrl_info);
8082 if (rc) {
8083 dev_err(&ctrl_info->pci_dev->dev,
8084 "error obtaining controller properties\n");
8085 return rc;
8086 }
8087
8088 rc = sis_get_pqi_capabilities(ctrl_info);
8089 if (rc) {
8090 dev_err(&ctrl_info->pci_dev->dev,
8091 "error obtaining controller capabilities\n");
8092 return rc;
8093 }
8094
2708a256
KB
8095 product_id = sis_get_product_id(ctrl_info);
8096 ctrl_info->product_id = (u8)product_id;
8097 ctrl_info->product_revision = (u8)(product_id >> 8);
8098
d727a776
KB
8099 if (reset_devices) {
8100 if (ctrl_info->max_outstanding_requests >
8101 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
583891c9 8102 ctrl_info->max_outstanding_requests =
d727a776
KB
8103 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8104 } else {
8105 if (ctrl_info->max_outstanding_requests >
8106 PQI_MAX_OUTSTANDING_REQUESTS)
583891c9 8107 ctrl_info->max_outstanding_requests =
d727a776
KB
8108 PQI_MAX_OUTSTANDING_REQUESTS;
8109 }
6c223761
KB
8110
8111 pqi_calculate_io_resources(ctrl_info);
8112
8113 rc = pqi_alloc_error_buffer(ctrl_info);
8114 if (rc) {
8115 dev_err(&ctrl_info->pci_dev->dev,
8116 "failed to allocate PQI error buffer\n");
8117 return rc;
8118 }
8119
8120 /*
8121 * If the function we are about to call succeeds, the
8122 * controller will transition from legacy SIS mode
8123 * into PQI mode.
8124 */
8125 rc = sis_init_base_struct_addr(ctrl_info);
8126 if (rc) {
8127 dev_err(&ctrl_info->pci_dev->dev,
8128 "error initializing PQI mode\n");
8129 return rc;
8130 }
8131
8132 /* Wait for the controller to complete the SIS -> PQI transition. */
8133 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8134 if (rc) {
8135 dev_err(&ctrl_info->pci_dev->dev,
8136 "transition to PQI mode failed\n");
8137 return rc;
8138 }
8139
8140 /* From here on, we are running in PQI mode. */
8141 ctrl_info->pqi_mode_enabled = true;
ff6abb73 8142 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761
KB
8143
8144 rc = pqi_alloc_admin_queues(ctrl_info);
8145 if (rc) {
8146 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 8147 "failed to allocate admin queues\n");
6c223761
KB
8148 return rc;
8149 }
8150
8151 rc = pqi_create_admin_queues(ctrl_info);
8152 if (rc) {
8153 dev_err(&ctrl_info->pci_dev->dev,
8154 "error creating admin queues\n");
8155 return rc;
8156 }
8157
8158 rc = pqi_report_device_capability(ctrl_info);
8159 if (rc) {
8160 dev_err(&ctrl_info->pci_dev->dev,
8161 "obtaining device capability failed\n");
8162 return rc;
8163 }
8164
8165 rc = pqi_validate_device_capability(ctrl_info);
8166 if (rc)
8167 return rc;
8168
8169 pqi_calculate_queue_resources(ctrl_info);
8170
8171 rc = pqi_enable_msix_interrupts(ctrl_info);
8172 if (rc)
8173 return rc;
8174
8175 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8176 ctrl_info->max_msix_vectors =
8177 ctrl_info->num_msix_vectors_enabled;
8178 pqi_calculate_queue_resources(ctrl_info);
8179 }
8180
8181 rc = pqi_alloc_io_resources(ctrl_info);
8182 if (rc)
8183 return rc;
8184
8185 rc = pqi_alloc_operational_queues(ctrl_info);
d87d5474
KB
8186 if (rc) {
8187 dev_err(&ctrl_info->pci_dev->dev,
8188 "failed to allocate operational queues\n");
6c223761 8189 return rc;
d87d5474 8190 }
6c223761
KB
8191
8192 pqi_init_operational_queues(ctrl_info);
8193
0777a3fb 8194 rc = pqi_create_queues(ctrl_info);
6c223761
KB
8195 if (rc)
8196 return rc;
8197
0777a3fb 8198 rc = pqi_request_irqs(ctrl_info);
6c223761
KB
8199 if (rc)
8200 return rc;
8201
061ef06a
KB
8202 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8203
8204 ctrl_info->controller_online = true;
b212c251
KB
8205
8206 rc = pqi_process_config_table(ctrl_info);
8207 if (rc)
8208 return rc;
8209
061ef06a 8210 pqi_start_heartbeat_timer(ctrl_info);
6c223761 8211
f6cc2a77
KB
8212 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8213 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8214 if (rc) { /* Supported features not returned correctly. */
8215 dev_err(&ctrl_info->pci_dev->dev,
8216 "error obtaining advanced RAID bypass configuration\n");
8217 return rc;
8218 }
8219 ctrl_info->ciss_report_log_flags |=
8220 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8221 }
8222
6a50d6ad 8223 rc = pqi_enable_events(ctrl_info);
6c223761
KB
8224 if (rc) {
8225 dev_err(&ctrl_info->pci_dev->dev,
6a50d6ad 8226 "error enabling events\n");
6c223761
KB
8227 return rc;
8228 }
8229
6c223761
KB
8230 /* Register with the SCSI subsystem. */
8231 rc = pqi_register_scsi(ctrl_info);
8232 if (rc)
8233 return rc;
8234
6d90615f
MB
8235 rc = pqi_get_ctrl_product_details(ctrl_info);
8236 if (rc) {
8237 dev_err(&ctrl_info->pci_dev->dev,
8238 "error obtaining product details\n");
8239 return rc;
8240 }
8241
8242 rc = pqi_get_ctrl_serial_number(ctrl_info);
6c223761
KB
8243 if (rc) {
8244 dev_err(&ctrl_info->pci_dev->dev,
6d90615f 8245 "error obtaining ctrl serial number\n");
6c223761
KB
8246 return rc;
8247 }
8248
171c2865
DC
8249 rc = pqi_set_diag_rescan(ctrl_info);
8250 if (rc) {
8251 dev_err(&ctrl_info->pci_dev->dev,
8252 "error enabling multi-lun rescan\n");
8253 return rc;
8254 }
8255
6c223761
KB
8256 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8257 if (rc) {
8258 dev_err(&ctrl_info->pci_dev->dev,
8259 "error updating host wellness\n");
8260 return rc;
8261 }
8262
8263 pqi_schedule_update_time_worker(ctrl_info);
8264
8265 pqi_scan_scsi_devices(ctrl_info);
8266
8267 return 0;
8268}
8269
061ef06a
KB
8270static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8271{
8272 unsigned int i;
8273 struct pqi_admin_queues *admin_queues;
8274 struct pqi_event_queue *event_queue;
8275
8276 admin_queues = &ctrl_info->admin_queues;
8277 admin_queues->iq_pi_copy = 0;
8278 admin_queues->oq_ci_copy = 0;
dac12fbc 8279 writel(0, admin_queues->oq_pi);
061ef06a
KB
8280
8281 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8282 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8283 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8284 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8285
dac12fbc
KB
8286 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8287 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8288 writel(0, ctrl_info->queue_groups[i].oq_pi);
061ef06a
KB
8289 }
8290
8291 event_queue = &ctrl_info->event_queue;
dac12fbc 8292 writel(0, event_queue->oq_pi);
061ef06a
KB
8293 event_queue->oq_ci_copy = 0;
8294}
8295
8296static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8297{
8298 int rc;
8299
8300 rc = pqi_force_sis_mode(ctrl_info);
8301 if (rc)
8302 return rc;
8303
8304 /*
8305 * Wait until the controller is ready to start accepting SIS
8306 * commands.
8307 */
8308 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8309 if (rc)
8310 return rc;
8311
4fd22c13
MR
8312 /*
8313 * Get the controller properties. This allows us to determine
8314 * whether or not it supports PQI mode.
8315 */
8316 rc = sis_get_ctrl_properties(ctrl_info);
8317 if (rc) {
8318 dev_err(&ctrl_info->pci_dev->dev,
8319 "error obtaining controller properties\n");
8320 return rc;
8321 }
8322
8323 rc = sis_get_pqi_capabilities(ctrl_info);
8324 if (rc) {
8325 dev_err(&ctrl_info->pci_dev->dev,
8326 "error obtaining controller capabilities\n");
8327 return rc;
8328 }
8329
061ef06a
KB
8330 /*
8331 * If the function we are about to call succeeds, the
8332 * controller will transition from legacy SIS mode
8333 * into PQI mode.
8334 */
8335 rc = sis_init_base_struct_addr(ctrl_info);
8336 if (rc) {
8337 dev_err(&ctrl_info->pci_dev->dev,
8338 "error initializing PQI mode\n");
8339 return rc;
8340 }
8341
8342 /* Wait for the controller to complete the SIS -> PQI transition. */
8343 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8344 if (rc) {
8345 dev_err(&ctrl_info->pci_dev->dev,
8346 "transition to PQI mode failed\n");
8347 return rc;
8348 }
8349
8350 /* From here on, we are running in PQI mode. */
8351 ctrl_info->pqi_mode_enabled = true;
8352 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8353
8354 pqi_reinit_queues(ctrl_info);
8355
8356 rc = pqi_create_admin_queues(ctrl_info);
8357 if (rc) {
8358 dev_err(&ctrl_info->pci_dev->dev,
8359 "error creating admin queues\n");
8360 return rc;
8361 }
8362
8363 rc = pqi_create_queues(ctrl_info);
8364 if (rc)
8365 return rc;
8366
8367 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8368
8369 ctrl_info->controller_online = true;
061ef06a
KB
8370 pqi_ctrl_unblock_requests(ctrl_info);
8371
4ccc354b
KB
8372 pqi_ctrl_reset_config(ctrl_info);
8373
4fd22c13
MR
8374 rc = pqi_process_config_table(ctrl_info);
8375 if (rc)
8376 return rc;
8377
8378 pqi_start_heartbeat_timer(ctrl_info);
8379
f6cc2a77
KB
8380 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8381 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8382 if (rc) {
8383 dev_err(&ctrl_info->pci_dev->dev,
8384 "error obtaining advanced RAID bypass configuration\n");
8385 return rc;
8386 }
8387 ctrl_info->ciss_report_log_flags |=
8388 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8389 }
8390
061ef06a
KB
8391 rc = pqi_enable_events(ctrl_info);
8392 if (rc) {
8393 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 8394 "error enabling events\n");
061ef06a
KB
8395 return rc;
8396 }
8397
6d90615f 8398 rc = pqi_get_ctrl_product_details(ctrl_info);
4fd22c13
MR
8399 if (rc) {
8400 dev_err(&ctrl_info->pci_dev->dev,
694c5d5b 8401 "error obtaining product details\n");
4fd22c13
MR
8402 return rc;
8403 }
8404
171c2865
DC
8405 rc = pqi_set_diag_rescan(ctrl_info);
8406 if (rc) {
8407 dev_err(&ctrl_info->pci_dev->dev,
8408 "error enabling multi-lun rescan\n");
8409 return rc;
8410 }
8411
061ef06a
KB
8412 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8413 if (rc) {
8414 dev_err(&ctrl_info->pci_dev->dev,
8415 "error updating host wellness\n");
8416 return rc;
8417 }
8418
2790cd4d
KB
8419 if (pqi_ofa_in_progress(ctrl_info))
8420 pqi_ctrl_unblock_scan(ctrl_info);
061ef06a
KB
8421
8422 pqi_scan_scsi_devices(ctrl_info);
8423
8424 return 0;
8425}
8426
583891c9 8427static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
a81ed5f3 8428{
d20df83b
BOS
8429 int rc;
8430
8431 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
a81ed5f3 8432 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
d20df83b
BOS
8433
8434 return pcibios_err_to_errno(rc);
a81ed5f3
KB
8435}
8436
6c223761
KB
8437static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8438{
8439 int rc;
8440 u64 mask;
8441
8442 rc = pci_enable_device(ctrl_info->pci_dev);
8443 if (rc) {
8444 dev_err(&ctrl_info->pci_dev->dev,
8445 "failed to enable PCI device\n");
8446 return rc;
8447 }
8448
8449 if (sizeof(dma_addr_t) > 4)
8450 mask = DMA_BIT_MASK(64);
8451 else
8452 mask = DMA_BIT_MASK(32);
8453
1d94f06e 8454 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
6c223761
KB
8455 if (rc) {
8456 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8457 goto disable_device;
8458 }
8459
8460 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8461 if (rc) {
8462 dev_err(&ctrl_info->pci_dev->dev,
8463 "failed to obtain PCI resources\n");
8464 goto disable_device;
8465 }
8466
4bdc0d67 8467 ctrl_info->iomem_base = ioremap(pci_resource_start(
6c223761
KB
8468 ctrl_info->pci_dev, 0),
8469 sizeof(struct pqi_ctrl_registers));
8470 if (!ctrl_info->iomem_base) {
8471 dev_err(&ctrl_info->pci_dev->dev,
8472 "failed to map memory for controller registers\n");
8473 rc = -ENOMEM;
8474 goto release_regions;
8475 }
8476
a81ed5f3
KB
8477#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8478
8479 /* Increase the PCIe completion timeout. */
8480 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8481 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8482 if (rc) {
8483 dev_err(&ctrl_info->pci_dev->dev,
8484 "failed to set PCIe completion timeout\n");
8485 goto release_regions;
8486 }
8487
6c223761
KB
8488 /* Enable bus mastering. */
8489 pci_set_master(ctrl_info->pci_dev);
8490
cbe0c7b1
KB
8491 ctrl_info->registers = ctrl_info->iomem_base;
8492 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8493
6c223761
KB
8494 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8495
8496 return 0;
8497
8498release_regions:
8499 pci_release_regions(ctrl_info->pci_dev);
8500disable_device:
8501 pci_disable_device(ctrl_info->pci_dev);
8502
8503 return rc;
8504}
8505
8506static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8507{
8508 iounmap(ctrl_info->iomem_base);
8509 pci_release_regions(ctrl_info->pci_dev);
cbe0c7b1
KB
8510 if (pci_is_enabled(ctrl_info->pci_dev))
8511 pci_disable_device(ctrl_info->pci_dev);
6c223761
KB
8512 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8513}
8514
8515static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8516{
8517 struct pqi_ctrl_info *ctrl_info;
8518
8519 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8520 GFP_KERNEL, numa_node);
8521 if (!ctrl_info)
8522 return NULL;
8523
8524 mutex_init(&ctrl_info->scan_mutex);
7561a7e4 8525 mutex_init(&ctrl_info->lun_reset_mutex);
4fd22c13 8526 mutex_init(&ctrl_info->ofa_mutex);
6c223761
KB
8527
8528 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8529 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8530
8531 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8532 atomic_set(&ctrl_info->num_interrupts, 0);
8533
8534 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8535 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8536
74a0f573 8537 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
5f310425 8538 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
98f87667 8539
2790cd4d
KB
8540 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8541 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8542
6c223761
KB
8543 sema_init(&ctrl_info->sync_request_sem,
8544 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7561a7e4 8545 init_waitqueue_head(&ctrl_info->block_requests_wait);
6c223761
KB
8546
8547 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
061ef06a 8548 ctrl_info->irq_mode = IRQ_MODE_NONE;
6c223761
KB
8549 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8550
f6cc2a77
KB
8551 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8552 ctrl_info->max_transfer_encrypted_sas_sata =
8553 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8554 ctrl_info->max_transfer_encrypted_nvme =
8555 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8556 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8557 ctrl_info->max_write_raid_1_10_2drive = ~0;
8558 ctrl_info->max_write_raid_1_10_3drive = ~0;
8559
6c223761
KB
8560 return ctrl_info;
8561}
8562
8563static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8564{
8565 kfree(ctrl_info);
8566}
8567
8568static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8569{
98bf061b
KB
8570 pqi_free_irqs(ctrl_info);
8571 pqi_disable_msix_interrupts(ctrl_info);
6c223761
KB
8572}
8573
8574static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8575{
6c223761
KB
8576 pqi_free_interrupts(ctrl_info);
8577 if (ctrl_info->queue_memory_base)
8578 dma_free_coherent(&ctrl_info->pci_dev->dev,
8579 ctrl_info->queue_memory_length,
8580 ctrl_info->queue_memory_base,
8581 ctrl_info->queue_memory_base_dma_handle);
8582 if (ctrl_info->admin_queue_memory_base)
8583 dma_free_coherent(&ctrl_info->pci_dev->dev,
8584 ctrl_info->admin_queue_memory_length,
8585 ctrl_info->admin_queue_memory_base,
8586 ctrl_info->admin_queue_memory_base_dma_handle);
8587 pqi_free_all_io_requests(ctrl_info);
8588 if (ctrl_info->error_buffer)
8589 dma_free_coherent(&ctrl_info->pci_dev->dev,
8590 ctrl_info->error_buffer_length,
8591 ctrl_info->error_buffer,
8592 ctrl_info->error_buffer_dma_handle);
8593 if (ctrl_info->iomem_base)
8594 pqi_cleanup_pci_init(ctrl_info);
8595 pqi_free_ctrl_info(ctrl_info);
8596}
8597
8598static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8599{
331f7e99
SB
8600 ctrl_info->controller_online = false;
8601 pqi_stop_heartbeat_timer(ctrl_info);
8602 pqi_ctrl_block_requests(ctrl_info);
061ef06a
KB
8603 pqi_cancel_rescan_worker(ctrl_info);
8604 pqi_cancel_update_time_worker(ctrl_info);
331f7e99
SB
8605 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8606 pqi_fail_all_outstanding_requests(ctrl_info);
8607 ctrl_info->pqi_mode_enabled = false;
8608 }
819225b0 8609 pqi_remove_all_scsi_devices(ctrl_info);
e57a1f9b 8610 pqi_unregister_scsi(ctrl_info);
162d7753
KB
8611 if (ctrl_info->pqi_mode_enabled)
8612 pqi_revert_to_sis_mode(ctrl_info);
6c223761
KB
8613 pqi_free_ctrl_resources(ctrl_info);
8614}
8615
4fd22c13
MR
8616static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8617{
2790cd4d
KB
8618 pqi_ctrl_block_scan(ctrl_info);
8619 pqi_scsi_block_requests(ctrl_info);
8620 pqi_ctrl_block_device_reset(ctrl_info);
4fd22c13
MR
8621 pqi_ctrl_block_requests(ctrl_info);
8622 pqi_ctrl_wait_until_quiesced(ctrl_info);
4fd22c13 8623 pqi_stop_heartbeat_timer(ctrl_info);
4fd22c13
MR
8624}
8625
8626static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8627{
4fd22c13 8628 pqi_start_heartbeat_timer(ctrl_info);
2790cd4d
KB
8629 pqi_ctrl_unblock_requests(ctrl_info);
8630 pqi_ctrl_unblock_device_reset(ctrl_info);
8631 pqi_scsi_unblock_requests(ctrl_info);
8632 pqi_ctrl_unblock_scan(ctrl_info);
4fd22c13
MR
8633}
8634
2790cd4d 8635static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
4fd22c13 8636{
4fd22c13 8637 int i;
2790cd4d 8638 u32 sg_count;
4fd22c13
MR
8639 struct device *dev;
8640 struct pqi_ofa_memory *ofap;
2790cd4d
KB
8641 struct pqi_sg_descriptor *mem_descriptor;
8642 dma_addr_t dma_handle;
4fd22c13
MR
8643
8644 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8645
2790cd4d
KB
8646 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8647 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
4fd22c13
MR
8648 goto out;
8649
2790cd4d 8650 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
4fd22c13
MR
8651 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8652 goto out;
8653
2790cd4d 8654 dev = &ctrl_info->pci_dev->dev;
4fd22c13 8655
2790cd4d 8656 for (i = 0; i < sg_count; i++) {
4fd22c13 8657 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
2790cd4d 8658 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
4fd22c13 8659 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
2790cd4d 8660 goto out_free_chunks;
4fd22c13 8661 mem_descriptor = &ofap->sg_descriptor[i];
583891c9
KB
8662 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8663 put_unaligned_le32(chunk_size, &mem_descriptor->length);
4fd22c13
MR
8664 }
8665
4fd22c13
MR
8666 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8667 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
2790cd4d 8668 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
4fd22c13
MR
8669
8670 return 0;
8671
8672out_free_chunks:
8673 while (--i >= 0) {
8674 mem_descriptor = &ofap->sg_descriptor[i];
8675 dma_free_coherent(dev, chunk_size,
2790cd4d
KB
8676 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8677 get_unaligned_le64(&mem_descriptor->address));
4fd22c13
MR
8678 }
8679 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8680
8681out:
4fd22c13
MR
8682 return -ENOMEM;
8683}
8684
8685static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8686{
8687 u32 total_size;
2790cd4d 8688 u32 chunk_size;
4fd22c13 8689 u32 min_chunk_size;
4fd22c13 8690
2790cd4d
KB
8691 if (ctrl_info->ofa_bytes_requested == 0)
8692 return 0;
4fd22c13 8693
2790cd4d
KB
8694 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8695 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8696 min_chunk_size = PAGE_ALIGN(min_chunk_size);
4fd22c13 8697
2790cd4d
KB
8698 for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8699 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
4fd22c13 8700 return 0;
2790cd4d
KB
8701 chunk_size /= 2;
8702 chunk_size = PAGE_ALIGN(chunk_size);
8703 }
4fd22c13
MR
8704
8705 return -ENOMEM;
8706}
8707
2790cd4d 8708static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
4fd22c13 8709{
4fd22c13 8710 struct device *dev;
2790cd4d 8711 struct pqi_ofa_memory *ofap;
4fd22c13
MR
8712
8713 dev = &ctrl_info->pci_dev->dev;
4fd22c13 8714
2790cd4d
KB
8715 ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8716 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8717 if (!ofap)
4fd22c13
MR
8718 return;
8719
2790cd4d 8720 ctrl_info->pqi_ofa_mem_virt_addr = ofap;
4fd22c13
MR
8721
8722 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
2790cd4d
KB
8723 dev_err(dev,
8724 "failed to allocate host buffer for Online Firmware Activation\n");
8725 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8726 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8727 return;
4fd22c13 8728 }
694c5d5b 8729
2790cd4d
KB
8730 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8731 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
4fd22c13
MR
8732}
8733
8734static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8735{
2790cd4d
KB
8736 unsigned int i;
8737 struct device *dev;
4fd22c13 8738 struct pqi_ofa_memory *ofap;
2790cd4d
KB
8739 struct pqi_sg_descriptor *mem_descriptor;
8740 unsigned int num_memory_descriptors;
4fd22c13
MR
8741
8742 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
4fd22c13
MR
8743 if (!ofap)
8744 return;
8745
2790cd4d
KB
8746 dev = &ctrl_info->pci_dev->dev;
8747
8748 if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
4fd22c13
MR
8749 goto out;
8750
8751 mem_descriptor = ofap->sg_descriptor;
2790cd4d
KB
8752 num_memory_descriptors =
8753 get_unaligned_le16(&ofap->num_memory_descriptors);
4fd22c13 8754
2790cd4d
KB
8755 for (i = 0; i < num_memory_descriptors; i++) {
8756 dma_free_coherent(dev,
4fd22c13
MR
8757 get_unaligned_le32(&mem_descriptor[i].length),
8758 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8759 get_unaligned_le64(&mem_descriptor[i].address));
8760 }
8761 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8762
8763out:
2790cd4d
KB
8764 dma_free_coherent(dev, sizeof(*ofap), ofap,
8765 ctrl_info->pqi_ofa_mem_dma_handle);
4fd22c13
MR
8766 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8767}
8768
8769static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8770{
2790cd4d 8771 u32 buffer_length;
4fd22c13 8772 struct pqi_vendor_general_request request;
4fd22c13
MR
8773 struct pqi_ofa_memory *ofap;
8774
8775 memset(&request, 0, sizeof(request));
8776
4fd22c13
MR
8777 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8778 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8779 &request.header.iu_length);
8780 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8781 &request.function_code);
8782
2790cd4d
KB
8783 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8784
4fd22c13 8785 if (ofap) {
2790cd4d 8786 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
4fd22c13
MR
8787 get_unaligned_le16(&ofap->num_memory_descriptors) *
8788 sizeof(struct pqi_sg_descriptor);
8789
8790 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8791 &request.data.ofa_memory_allocation.buffer_address);
2790cd4d 8792 put_unaligned_le32(buffer_length,
4fd22c13 8793 &request.data.ofa_memory_allocation.buffer_length);
4fd22c13
MR
8794 }
8795
ae0c189d 8796 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4fd22c13
MR
8797}
8798
2790cd4d 8799static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
4fd22c13 8800{
2790cd4d
KB
8801 ssleep(delay_secs);
8802
4fd22c13
MR
8803 return pqi_ctrl_init_resume(ctrl_info);
8804}
8805
5f310425
KB
8806static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8807 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8808 .status = SAM_STAT_CHECK_CONDITION,
8809};
8810
8811static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
376fb880
KB
8812{
8813 unsigned int i;
376fb880 8814 struct pqi_io_request *io_request;
376fb880 8815 struct scsi_cmnd *scmd;
4f3cefc3 8816 struct scsi_device *sdev;
376fb880 8817
5f310425
KB
8818 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8819 io_request = &ctrl_info->io_request_pool[i];
8820 if (atomic_read(&io_request->refcount) == 0)
8821 continue;
376fb880 8822
5f310425
KB
8823 scmd = io_request->scmd;
8824 if (scmd) {
4f3cefc3
MR
8825 sdev = scmd->device;
8826 if (!sdev || !scsi_device_online(sdev)) {
8827 pqi_free_io_request(io_request);
8828 continue;
8829 } else {
8830 set_host_byte(scmd, DID_NO_CONNECT);
8831 }
5f310425
KB
8832 } else {
8833 io_request->status = -ENXIO;
8834 io_request->error_info =
8835 &pqi_ctrl_offline_raid_error_info;
376fb880 8836 }
5f310425
KB
8837
8838 io_request->io_complete_callback(io_request,
8839 io_request->context);
376fb880
KB
8840 }
8841}
8842
5f310425 8843static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
376fb880 8844{
5f310425
KB
8845 pqi_perform_lockup_action();
8846 pqi_stop_heartbeat_timer(ctrl_info);
8847 pqi_free_interrupts(ctrl_info);
8848 pqi_cancel_rescan_worker(ctrl_info);
8849 pqi_cancel_update_time_worker(ctrl_info);
8850 pqi_ctrl_wait_until_quiesced(ctrl_info);
8851 pqi_fail_all_outstanding_requests(ctrl_info);
5f310425
KB
8852 pqi_ctrl_unblock_requests(ctrl_info);
8853}
8854
8855static void pqi_ctrl_offline_worker(struct work_struct *work)
8856{
8857 struct pqi_ctrl_info *ctrl_info;
8858
8859 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8860 pqi_take_ctrl_offline_deferred(ctrl_info);
376fb880
KB
8861}
8862
5d1f03e6
MB
8863static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8864 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
376fb880 8865{
5f310425
KB
8866 if (!ctrl_info->controller_online)
8867 return;
8868
376fb880 8869 ctrl_info->controller_online = false;
5f310425
KB
8870 ctrl_info->pqi_mode_enabled = false;
8871 pqi_ctrl_block_requests(ctrl_info);
5a259e32 8872 if (!pqi_disable_ctrl_shutdown)
5d1f03e6 8873 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
376fb880
KB
8874 pci_disable_device(ctrl_info->pci_dev);
8875 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
5f310425 8876 schedule_work(&ctrl_info->ctrl_offline_work);
376fb880
KB
8877}
8878
d91d7820 8879static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6c223761
KB
8880 const struct pci_device_id *id)
8881{
8882 char *ctrl_description;
8883
37b36847 8884 if (id->driver_data)
6c223761 8885 ctrl_description = (char *)id->driver_data;
37b36847 8886 else
6aa26b5a 8887 ctrl_description = "Microchip Smart Family Controller";
6c223761 8888
d91d7820 8889 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6c223761
KB
8890}
8891
d91d7820
KB
8892static int pqi_pci_probe(struct pci_dev *pci_dev,
8893 const struct pci_device_id *id)
6c223761
KB
8894{
8895 int rc;
c52efc92 8896 int node;
6c223761
KB
8897 struct pqi_ctrl_info *ctrl_info;
8898
d91d7820 8899 pqi_print_ctrl_info(pci_dev, id);
6c223761
KB
8900
8901 if (pqi_disable_device_id_wildcards &&
8902 id->subvendor == PCI_ANY_ID &&
8903 id->subdevice == PCI_ANY_ID) {
d91d7820 8904 dev_warn(&pci_dev->dev,
6c223761
KB
8905 "controller not probed because device ID wildcards are disabled\n");
8906 return -ENODEV;
8907 }
8908
8909 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
d91d7820 8910 dev_warn(&pci_dev->dev,
6c223761
KB
8911 "controller device ID matched using wildcards\n");
8912
d91d7820 8913 node = dev_to_node(&pci_dev->dev);
62dc51fb 8914 if (node == NUMA_NO_NODE) {
c52efc92
MM
8915 node = cpu_to_node(0);
8916 if (node == NUMA_NO_NODE)
8917 node = 0;
8918 set_dev_node(&pci_dev->dev, node);
62dc51fb 8919 }
6c223761
KB
8920
8921 ctrl_info = pqi_alloc_ctrl_info(node);
8922 if (!ctrl_info) {
d91d7820 8923 dev_err(&pci_dev->dev,
6c223761
KB
8924 "failed to allocate controller info block\n");
8925 return -ENOMEM;
8926 }
8927
d91d7820 8928 ctrl_info->pci_dev = pci_dev;
6c223761
KB
8929
8930 rc = pqi_pci_init(ctrl_info);
8931 if (rc)
8932 goto error;
8933
8934 rc = pqi_ctrl_init(ctrl_info);
8935 if (rc)
8936 goto error;
8937
8938 return 0;
8939
8940error:
8941 pqi_remove_ctrl(ctrl_info);
8942
8943 return rc;
8944}
8945
d91d7820 8946static void pqi_pci_remove(struct pci_dev *pci_dev)
6c223761
KB
8947{
8948 struct pqi_ctrl_info *ctrl_info;
331f7e99 8949 u16 vendor_id;
6c223761 8950
d91d7820 8951 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
8952 if (!ctrl_info)
8953 return;
8954
331f7e99
SB
8955 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
8956 if (vendor_id == 0xffff)
8957 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
8958 else
8959 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
8960
6c223761
KB
8961 pqi_remove_ctrl(ctrl_info);
8962}
8963
0530736e
KB
8964static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
8965{
8966 unsigned int i;
8967 struct pqi_io_request *io_request;
8968 struct scsi_cmnd *scmd;
8969
8970 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8971 io_request = &ctrl_info->io_request_pool[i];
8972 if (atomic_read(&io_request->refcount) == 0)
8973 continue;
8974 scmd = io_request->scmd;
8975 WARN_ON(scmd != NULL); /* IO command from SML */
8976 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
8977 }
8978}
8979
d91d7820 8980static void pqi_shutdown(struct pci_dev *pci_dev)
6c223761
KB
8981{
8982 int rc;
8983 struct pqi_ctrl_info *ctrl_info;
70ba20be 8984 enum bmic_flush_cache_shutdown_event shutdown_event;
6c223761 8985
d91d7820 8986 ctrl_info = pci_get_drvdata(pci_dev);
0530736e
KB
8987 if (!ctrl_info) {
8988 dev_err(&pci_dev->dev,
8989 "cache could not be flushed\n");
8990 return;
8991 }
8992
0530736e 8993 pqi_wait_until_ofa_finished(ctrl_info);
0530736e 8994
9fa82023 8995 pqi_scsi_block_requests(ctrl_info);
0530736e 8996 pqi_ctrl_block_device_reset(ctrl_info);
9fa82023
KB
8997 pqi_ctrl_block_requests(ctrl_info);
8998 pqi_ctrl_wait_until_quiesced(ctrl_info);
6c223761 8999
70ba20be
SB
9000 if (system_state == SYSTEM_RESTART)
9001 shutdown_event = RESTART;
9002 else
9003 shutdown_event = SHUTDOWN;
9004
6c223761
KB
9005 /*
9006 * Write all data in the controller's battery-backed cache to
9007 * storage.
9008 */
70ba20be 9009 rc = pqi_flush_cache(ctrl_info, shutdown_event);
0530736e
KB
9010 if (rc)
9011 dev_err(&pci_dev->dev,
9012 "unable to flush controller cache\n");
9013
0530736e
KB
9014 pqi_crash_if_pending_command(ctrl_info);
9015 pqi_reset(ctrl_info);
6c223761
KB
9016}
9017
3c50976f
KB
9018static void pqi_process_lockup_action_param(void)
9019{
9020 unsigned int i;
9021
9022 if (!pqi_lockup_action_param)
9023 return;
9024
9025 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9026 if (strcmp(pqi_lockup_action_param,
9027 pqi_lockup_actions[i].name) == 0) {
9028 pqi_lockup_action = pqi_lockup_actions[i].action;
9029 return;
9030 }
9031 }
9032
9033 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9034 DRIVER_NAME_SHORT, pqi_lockup_action_param);
9035}
9036
9037static void pqi_process_module_params(void)
9038{
9039 pqi_process_lockup_action_param();
9040}
9041
31b17c3a
DB
9042#if defined(CONFIG_PM)
9043
b73357a1
SB
9044static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9045{
9046 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9047 return RESTART;
c66e078a 9048
b73357a1
SB
9049 return SUSPEND;
9050}
9051
c66e078a 9052static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
061ef06a 9053{
c66e078a 9054 struct pci_dev *pci_dev;
061ef06a
KB
9055 struct pqi_ctrl_info *ctrl_info;
9056
c66e078a 9057 pci_dev = to_pci_dev(dev);
061ef06a
KB
9058 ctrl_info = pci_get_drvdata(pci_dev);
9059
4fd22c13 9060 pqi_wait_until_ofa_finished(ctrl_info);
9fa82023
KB
9061
9062 pqi_ctrl_block_scan(ctrl_info);
9063 pqi_scsi_block_requests(ctrl_info);
9064 pqi_ctrl_block_device_reset(ctrl_info);
061ef06a
KB
9065 pqi_ctrl_block_requests(ctrl_info);
9066 pqi_ctrl_wait_until_quiesced(ctrl_info);
061ef06a 9067
c66e078a
KB
9068 if (suspend) {
9069 enum bmic_flush_cache_shutdown_event shutdown_event;
9fa82023 9070
c66e078a
KB
9071 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9072 pqi_flush_cache(ctrl_info, shutdown_event);
9073 }
061ef06a 9074
c66e078a
KB
9075 pqi_stop_heartbeat_timer(ctrl_info);
9076 pqi_crash_if_pending_command(ctrl_info);
9077 pqi_free_irqs(ctrl_info);
061ef06a
KB
9078
9079 ctrl_info->controller_online = false;
9080 ctrl_info->pqi_mode_enabled = false;
9081
9082 return 0;
9083}
9084
c66e078a
KB
9085static __maybe_unused int pqi_suspend(struct device *dev)
9086{
9087 return pqi_suspend_or_freeze(dev, true);
9088}
9089
9090static int pqi_resume_or_restore(struct device *dev)
061ef06a
KB
9091{
9092 int rc;
c66e078a 9093 struct pci_dev *pci_dev;
061ef06a
KB
9094 struct pqi_ctrl_info *ctrl_info;
9095
c66e078a 9096 pci_dev = to_pci_dev(dev);
061ef06a
KB
9097 ctrl_info = pci_get_drvdata(pci_dev);
9098
c66e078a
KB
9099 rc = pqi_request_irqs(ctrl_info);
9100 if (rc)
9101 return rc;
061ef06a 9102
43e97ef4
KB
9103 pqi_ctrl_unblock_device_reset(ctrl_info);
9104 pqi_ctrl_unblock_requests(ctrl_info);
9105 pqi_scsi_unblock_requests(ctrl_info);
9106 pqi_ctrl_unblock_scan(ctrl_info);
9107
c66e078a
KB
9108 ssleep(PQI_POST_RESET_DELAY_SECS);
9109
061ef06a
KB
9110 return pqi_ctrl_init_resume(ctrl_info);
9111}
9112
c66e078a
KB
9113static int pqi_freeze(struct device *dev)
9114{
9115 return pqi_suspend_or_freeze(dev, false);
9116}
9117
9118static int pqi_thaw(struct device *dev)
9119{
9120 int rc;
9121 struct pci_dev *pci_dev;
9122 struct pqi_ctrl_info *ctrl_info;
9123
9124 pci_dev = to_pci_dev(dev);
9125 ctrl_info = pci_get_drvdata(pci_dev);
9126
9127 rc = pqi_request_irqs(ctrl_info);
9128 if (rc)
9129 return rc;
9130
9131 ctrl_info->controller_online = true;
9132 ctrl_info->pqi_mode_enabled = true;
9133
9134 pqi_ctrl_unblock_device_reset(ctrl_info);
9135 pqi_ctrl_unblock_requests(ctrl_info);
9136 pqi_scsi_unblock_requests(ctrl_info);
9137 pqi_ctrl_unblock_scan(ctrl_info);
9138
9139 return 0;
9140}
9141
9142static int pqi_poweroff(struct device *dev)
9143{
9144 struct pci_dev *pci_dev;
9145 struct pqi_ctrl_info *ctrl_info;
9146 enum bmic_flush_cache_shutdown_event shutdown_event;
9147
9148 pci_dev = to_pci_dev(dev);
9149 ctrl_info = pci_get_drvdata(pci_dev);
9150
9151 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9152 pqi_flush_cache(ctrl_info, shutdown_event);
9153
9154 return 0;
9155}
9156
9157static const struct dev_pm_ops pqi_pm_ops = {
9158 .suspend = pqi_suspend,
9159 .resume = pqi_resume_or_restore,
9160 .freeze = pqi_freeze,
9161 .thaw = pqi_thaw,
9162 .poweroff = pqi_poweroff,
9163 .restore = pqi_resume_or_restore,
9164};
9165
31b17c3a
DB
9166#endif /* CONFIG_PM */
9167
6c223761
KB
9168/* Define the PCI IDs for the controllers that we support. */
9169static const struct pci_device_id pqi_pci_id_table[] = {
b0f9408b
KB
9170 {
9171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9172 0x105b, 0x1211)
9173 },
9174 {
9175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9176 0x105b, 0x1321)
9177 },
7eddabff
KB
9178 {
9179 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9180 0x152d, 0x8a22)
9181 },
9182 {
9183 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9184 0x152d, 0x8a23)
9185 },
9186 {
9187 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9188 0x152d, 0x8a24)
9189 },
9190 {
9191 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9192 0x152d, 0x8a36)
9193 },
9194 {
9195 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9196 0x152d, 0x8a37)
9197 },
0595a0b4
AK
9198 {
9199 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9200 0x193d, 0x1104)
9201 },
9202 {
9203 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9204 0x193d, 0x1105)
9205 },
9206 {
9207 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9208 0x193d, 0x1106)
9209 },
9210 {
9211 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9212 0x193d, 0x1107)
9213 },
d3af3f64
MR
9214 {
9215 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9216 0x193d, 0x1108)
9217 },
9218 {
9219 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9220 0x193d, 0x1109)
9221 },
b0f9408b
KB
9222 {
9223 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9224 0x193d, 0x8460)
9225 },
9226 {
9227 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9228 0x193d, 0x8461)
9229 },
84a77fef
MB
9230 {
9231 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9232 0x193d, 0xc460)
9233 },
9234 {
9235 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9236 0x193d, 0xc461)
9237 },
b0f9408b
KB
9238 {
9239 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9240 0x193d, 0xf460)
9241 },
9242 {
9243 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9244 0x193d, 0xf461)
9245 },
9246 {
9247 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9248 0x1bd4, 0x0045)
9249 },
9250 {
9251 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9252 0x1bd4, 0x0046)
9253 },
9254 {
9255 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9256 0x1bd4, 0x0047)
9257 },
9258 {
9259 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9260 0x1bd4, 0x0048)
9261 },
9f8d05fa
KB
9262 {
9263 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9264 0x1bd4, 0x004a)
9265 },
9266 {
9267 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9268 0x1bd4, 0x004b)
9269 },
9270 {
9271 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9272 0x1bd4, 0x004c)
9273 },
63a7956a
GW
9274 {
9275 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9276 0x1bd4, 0x004f)
9277 },
75fbeacc
KB
9278 {
9279 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9280 0x1bd4, 0x0051)
9281 },
9282 {
9283 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9284 0x1bd4, 0x0052)
9285 },
9286 {
9287 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9288 0x1bd4, 0x0053)
9289 },
9290 {
9291 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9292 0x1bd4, 0x0054)
9293 },
c57ee4cc
DB
9294 {
9295 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9296 0x1bd4, 0x006b)
9297 },
9298 {
9299 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9300 0x1bd4, 0x006c)
9301 },
9302 {
9303 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9304 0x1bd4, 0x006d)
9305 },
9306 {
9307 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9308 0x1bd4, 0x006f)
9309 },
9310 {
9311 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9312 0x1bd4, 0x0070)
9313 },
9314 {
9315 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9316 0x1bd4, 0x0071)
9317 },
9318 {
9319 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9320 0x1bd4, 0x0072)
9321 },
c1b10475
AK
9322 {
9323 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9324 0x19e5, 0xd227)
9325 },
9326 {
9327 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9328 0x19e5, 0xd228)
9329 },
9330 {
9331 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9332 0x19e5, 0xd229)
9333 },
9334 {
9335 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9336 0x19e5, 0xd22a)
9337 },
9338 {
9339 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9340 0x19e5, 0xd22b)
9341 },
9342 {
9343 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9344 0x19e5, 0xd22c)
9345 },
6c223761
KB
9346 {
9347 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9348 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9349 },
9350 {
9351 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
55790064 9352 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
6c223761 9353 },
44e68c4a
MM
9354 {
9355 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9356 PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9357 },
6c223761
KB
9358 {
9359 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9360 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6c223761
KB
9361 },
9362 {
9363 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9364 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6c223761
KB
9365 },
9366 {
9367 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9368 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6c223761
KB
9369 },
9370 {
9371 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9372 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6c223761
KB
9373 },
9374 {
9375 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9376 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6c223761
KB
9377 },
9378 {
9379 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9380 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6c223761
KB
9381 },
9382 {
9383 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9384 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6c223761 9385 },
55790064
KB
9386 {
9387 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9388 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9389 },
63a7956a
GW
9390 {
9391 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9392 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9393 },
9394 {
9395 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9396 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9397 },
3af06083
MR
9398 {
9399 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9400 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9401 },
6c223761
KB
9402 {
9403 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9404 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6c223761
KB
9405 },
9406 {
9407 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9408 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6c223761
KB
9409 },
9410 {
9411 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9412 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6c223761
KB
9413 },
9414 {
9415 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9416 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6c223761
KB
9417 },
9418 {
9419 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9420 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6c223761
KB
9421 },
9422 {
9423 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9424 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6c223761
KB
9425 },
9426 {
9427 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9428 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6c223761
KB
9429 },
9430 {
9431 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9432 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6c223761
KB
9433 },
9434 {
9435 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9436 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6c223761 9437 },
55790064
KB
9438 {
9439 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9440 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9441 },
6c223761
KB
9442 {
9443 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9444 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6c223761
KB
9445 },
9446 {
9447 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9448 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6c223761
KB
9449 },
9450 {
9451 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9452 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6c223761
KB
9453 },
9454 {
9455 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9456 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6c223761
KB
9457 },
9458 {
9459 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9460 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6c223761 9461 },
b0f9408b
KB
9462 {
9463 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9464 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9465 },
6c223761
KB
9466 {
9467 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9468 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6c223761
KB
9469 },
9470 {
9471 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9472 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6c223761 9473 },
bd809e8d
KB
9474 {
9475 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9476 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9477 },
9478 {
9479 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9480 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9481 },
c57ee4cc
DB
9482 {
9483 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9484 PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9485 },
6c223761
KB
9486 {
9487 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff
KB
9488 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9489 },
75fbeacc
KB
9490 {
9491 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9492 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9493 },
9494 {
9495 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9496 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9497 },
9498 {
9499 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9500 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9501 },
9502 {
9503 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9504 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9505 },
9506 {
9507 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9508 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9509 },
9510 {
9511 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9512 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9513 },
9514 {
9515 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9516 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9517 },
9518 {
9519 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9520 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9521 },
9522 {
9523 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9524 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9525 },
9526 {
9527 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9528 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9529 },
9530 {
9531 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9532 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9533 },
9534 {
9535 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9536 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9537 },
9538 {
9539 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9540 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9541 },
9542 {
9543 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9544 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9545 },
c57ee4cc
DB
9546 {
9547 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9548 PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9549 },
75fbeacc
KB
9550 {
9551 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9552 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9553 },
9554 {
9555 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9556 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9557 },
9558 {
9559 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9560 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9561 },
c57ee4cc
DB
9562 {
9563 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9564 PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9565 },
9566 {
9567 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9568 PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9569 },
75fbeacc
KB
9570 {
9571 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9572 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9573 },
9574 {
9575 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9576 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9577 },
9578 {
9579 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9580 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9581 },
9582 {
9583 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9584 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9585 },
9586 {
9587 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9588 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9589 },
80982656
MM
9590 {
9591 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9592 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9593 },
c57ee4cc
DB
9594 {
9595 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9596 PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9597 },
9598 {
9599 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9600 PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
9601 },
9602 {
9603 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9604 PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
9605 },
75fbeacc
KB
9606 {
9607 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9608 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9609 },
9610 {
9611 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9612 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9613 },
9614 {
9615 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9616 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9617 },
9618 {
9619 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9620 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9621 },
c57ee4cc
DB
9622 {
9623 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9624 PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
9625 },
75fbeacc
KB
9626 {
9627 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9628 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9629 },
9630 {
9631 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9632 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9633 },
9634 {
9635 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9636 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9637 },
9f8d05fa
KB
9638 {
9639 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9640 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9641 },
55790064
KB
9642 {
9643 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9644 PCI_VENDOR_ID_DELL, 0x1fe0)
9645 },
7eddabff
KB
9646 {
9647 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9648 PCI_VENDOR_ID_HP, 0x0600)
9649 },
9650 {
9651 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9652 PCI_VENDOR_ID_HP, 0x0601)
9653 },
9654 {
9655 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9656 PCI_VENDOR_ID_HP, 0x0602)
9657 },
9658 {
9659 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9660 PCI_VENDOR_ID_HP, 0x0603)
9661 },
9662 {
9663 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
55790064 9664 PCI_VENDOR_ID_HP, 0x0609)
7eddabff
KB
9665 },
9666 {
9667 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9668 PCI_VENDOR_ID_HP, 0x0650)
9669 },
9670 {
9671 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9672 PCI_VENDOR_ID_HP, 0x0651)
9673 },
9674 {
9675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9676 PCI_VENDOR_ID_HP, 0x0652)
9677 },
9678 {
9679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9680 PCI_VENDOR_ID_HP, 0x0653)
9681 },
9682 {
9683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9684 PCI_VENDOR_ID_HP, 0x0654)
9685 },
9686 {
9687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9688 PCI_VENDOR_ID_HP, 0x0655)
9689 },
7eddabff
KB
9690 {
9691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9692 PCI_VENDOR_ID_HP, 0x0700)
9693 },
9694 {
9695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9696 PCI_VENDOR_ID_HP, 0x0701)
6c223761
KB
9697 },
9698 {
9699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9700 PCI_VENDOR_ID_HP, 0x1001)
9701 },
75fbeacc
KB
9702 {
9703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9704 PCI_VENDOR_ID_HP, 0x1002)
9705 },
6c223761
KB
9706 {
9707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9708 PCI_VENDOR_ID_HP, 0x1100)
9709 },
9710 {
9711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9712 PCI_VENDOR_ID_HP, 0x1101)
9713 },
75fbeacc
KB
9714 {
9715 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9716 0x1590, 0x0294)
9717 },
9718 {
9719 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9720 0x1590, 0x02db)
9721 },
9722 {
9723 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9724 0x1590, 0x02dc)
9725 },
9726 {
9727 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9728 0x1590, 0x032e)
9729 },
c57ee4cc
DB
9730 {
9731 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9732 0x1590, 0x036f)
9733 },
9734 {
9735 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9736 0x1590, 0x0381)
9737 },
9738 {
9739 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9740 0x1590, 0x0382)
9741 },
9742 {
9743 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9744 0x1590, 0x0383)
9745 },
8bdb3b9c
GW
9746 {
9747 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9748 0x1d8d, 0x0800)
9749 },
9750 {
9751 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9752 0x1d8d, 0x0908)
9753 },
9754 {
9755 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9756 0x1d8d, 0x0806)
9757 },
9758 {
9759 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9760 0x1d8d, 0x0916)
9761 },
71ecc60d
GW
9762 {
9763 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9764 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9765 },
e326b97c
MM
9766 {
9767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9768 0x1dfc, 0x3161)
9769 },
c57ee4cc
DB
9770 {
9771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9772 0x1f0c, 0x3161)
9773 },
09d9968a
B
9774 {
9775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9776 0x1cf2, 0x5445)
9777 },
9778 {
9779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9780 0x1cf2, 0x5446)
9781 },
9782 {
9783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9784 0x1cf2, 0x5447)
9785 },
c57ee4cc
DB
9786 {
9787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9788 0x1cf2, 0x5449)
9789 },
9790 {
9791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9792 0x1cf2, 0x544a)
9793 },
9794 {
9795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9796 0x1cf2, 0x544b)
9797 },
9798 {
9799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9800 0x1cf2, 0x544d)
9801 },
9802 {
9803 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9804 0x1cf2, 0x544e)
9805 },
9806 {
9807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9808 0x1cf2, 0x544f)
9809 },
09d9968a
B
9810 {
9811 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9812 0x1cf2, 0x0b27)
9813 },
9814 {
9815 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9816 0x1cf2, 0x0b29)
9817 },
9818 {
9819 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9820 0x1cf2, 0x0b45)
9821 },
dab53784
MB
9822 {
9823 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9824 0x1cc4, 0x0101)
9825 },
9826 {
9827 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9828 0x1cc4, 0x0201)
9829 },
2a9c2ba2
MM
9830 {
9831 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9832 PCI_VENDOR_ID_LENOVO, 0x0220)
9833 },
9834 {
9835 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9836 PCI_VENDOR_ID_LENOVO, 0x0221)
9837 },
9838 {
9839 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9840 PCI_VENDOR_ID_LENOVO, 0x0520)
9841 },
9842 {
9843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9844 PCI_VENDOR_ID_LENOVO, 0x0522)
9845 },
9846 {
9847 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9848 PCI_VENDOR_ID_LENOVO, 0x0620)
9849 },
9850 {
9851 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9852 PCI_VENDOR_ID_LENOVO, 0x0621)
9853 },
9854 {
9855 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9856 PCI_VENDOR_ID_LENOVO, 0x0622)
9857 },
9858 {
9859 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9860 PCI_VENDOR_ID_LENOVO, 0x0623)
9861 },
6c223761
KB
9862 {
9863 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9864 PCI_ANY_ID, PCI_ANY_ID)
9865 },
9866 { 0 }
9867};
9868
9869MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
9870
9871static struct pci_driver pqi_pci_driver = {
9872 .name = DRIVER_NAME_SHORT,
9873 .id_table = pqi_pci_id_table,
9874 .probe = pqi_pci_probe,
9875 .remove = pqi_pci_remove,
9876 .shutdown = pqi_shutdown,
061ef06a 9877#if defined(CONFIG_PM)
c66e078a
KB
9878 .driver = {
9879 .pm = &pqi_pm_ops
9880 },
061ef06a 9881#endif
6c223761
KB
9882};
9883
9884static int __init pqi_init(void)
9885{
9886 int rc;
9887
9888 pr_info(DRIVER_NAME "\n");
5e693586
MM
9889 pqi_verify_structures();
9890 sis_verify_structures();
6c223761 9891
8b664fef 9892 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
6c223761
KB
9893 if (!pqi_sas_transport_template)
9894 return -ENODEV;
9895
3c50976f
KB
9896 pqi_process_module_params();
9897
6c223761
KB
9898 rc = pci_register_driver(&pqi_pci_driver);
9899 if (rc)
9900 sas_release_transport(pqi_sas_transport_template);
9901
9902 return rc;
9903}
9904
9905static void __exit pqi_cleanup(void)
9906{
9907 pci_unregister_driver(&pqi_pci_driver);
9908 sas_release_transport(pqi_sas_transport_template);
9909}
9910
9911module_init(pqi_init);
9912module_exit(pqi_cleanup);
9913
5e693586 9914static void pqi_verify_structures(void)
6c223761
KB
9915{
9916 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9917 sis_host_to_ctrl_doorbell) != 0x20);
9918 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9919 sis_interrupt_mask) != 0x34);
9920 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9921 sis_ctrl_to_host_doorbell) != 0x9c);
9922 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9923 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
9924 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9925 sis_driver_scratch) != 0xb0);
2708a256
KB
9926 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9927 sis_product_identifier) != 0xb4);
6c223761
KB
9928 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9929 sis_firmware_status) != 0xbc);
5d1f03e6
MB
9930 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9931 sis_ctrl_shutdown_reason_code) != 0xcc);
6c223761
KB
9932 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9933 sis_mailbox) != 0x1000);
9934 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
9935 pqi_registers) != 0x4000);
9936
9937 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9938 iu_type) != 0x0);
9939 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9940 iu_length) != 0x2);
9941 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
9942 response_queue_id) != 0x4);
9943 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
ae0c189d 9944 driver_flags) != 0x6);
6c223761
KB
9945 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
9946
9947 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9948 status) != 0x0);
9949 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9950 service_response) != 0x1);
9951 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9952 data_present) != 0x2);
9953 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9954 reserved) != 0x3);
9955 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9956 residual_count) != 0x4);
9957 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9958 data_length) != 0x8);
9959 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9960 reserved1) != 0xa);
9961 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
9962 data) != 0xc);
9963 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
9964
9965 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9966 data_in_result) != 0x0);
9967 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9968 data_out_result) != 0x1);
9969 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9970 reserved) != 0x2);
9971 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9972 status) != 0x5);
9973 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9974 status_qualifier) != 0x6);
9975 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9976 sense_data_length) != 0x8);
9977 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9978 response_data_length) != 0xa);
9979 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9980 data_in_transferred) != 0xc);
9981 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9982 data_out_transferred) != 0x10);
9983 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
9984 data) != 0x14);
9985 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
9986
9987 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9988 signature) != 0x0);
9989 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9990 function_and_status_code) != 0x8);
9991 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9992 max_admin_iq_elements) != 0x10);
9993 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9994 max_admin_oq_elements) != 0x11);
9995 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9996 admin_iq_element_length) != 0x12);
9997 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
9998 admin_oq_element_length) != 0x13);
9999 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10000 max_reset_timeout) != 0x14);
10001 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10002 legacy_intx_status) != 0x18);
10003 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10004 legacy_intx_mask_set) != 0x1c);
10005 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10006 legacy_intx_mask_clear) != 0x20);
10007 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10008 device_status) != 0x40);
10009 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10010 admin_iq_pi_offset) != 0x48);
10011 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10012 admin_oq_ci_offset) != 0x50);
10013 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10014 admin_iq_element_array_addr) != 0x58);
10015 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10016 admin_oq_element_array_addr) != 0x60);
10017 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10018 admin_iq_ci_addr) != 0x68);
10019 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10020 admin_oq_pi_addr) != 0x70);
10021 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10022 admin_iq_num_elements) != 0x78);
10023 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10024 admin_oq_num_elements) != 0x79);
10025 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10026 admin_queue_int_msg_num) != 0x7a);
10027 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10028 device_error) != 0x80);
10029 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10030 error_details) != 0x88);
10031 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10032 device_reset) != 0x90);
10033 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10034 power_action) != 0x94);
10035 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10036
10037 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10038 header.iu_type) != 0);
10039 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10040 header.iu_length) != 2);
10041 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
ae0c189d 10042 header.driver_flags) != 6);
6c223761
KB
10043 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10044 request_id) != 8);
10045 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10046 function_code) != 10);
10047 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10048 data.report_device_capability.buffer_length) != 44);
10049 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10050 data.report_device_capability.sg_descriptor) != 48);
10051 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10052 data.create_operational_iq.queue_id) != 12);
10053 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10054 data.create_operational_iq.element_array_addr) != 16);
10055 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10056 data.create_operational_iq.ci_addr) != 24);
10057 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10058 data.create_operational_iq.num_elements) != 32);
10059 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10060 data.create_operational_iq.element_length) != 34);
10061 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10062 data.create_operational_iq.queue_protocol) != 36);
10063 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10064 data.create_operational_oq.queue_id) != 12);
10065 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10066 data.create_operational_oq.element_array_addr) != 16);
10067 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10068 data.create_operational_oq.pi_addr) != 24);
10069 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10070 data.create_operational_oq.num_elements) != 32);
10071 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10072 data.create_operational_oq.element_length) != 34);
10073 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10074 data.create_operational_oq.queue_protocol) != 36);
10075 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10076 data.create_operational_oq.int_msg_num) != 40);
10077 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10078 data.create_operational_oq.coalescing_count) != 42);
10079 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10080 data.create_operational_oq.min_coalescing_time) != 44);
10081 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10082 data.create_operational_oq.max_coalescing_time) != 48);
10083 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10084 data.delete_operational_queue.queue_id) != 12);
10085 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
c593642c 10086 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761 10087 data.create_operational_iq) != 64 - 11);
c593642c 10088 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761 10089 data.create_operational_oq) != 64 - 11);
c593642c 10090 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761
KB
10091 data.delete_operational_queue) != 64 - 11);
10092
10093 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10094 header.iu_type) != 0);
10095 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10096 header.iu_length) != 2);
10097 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
ae0c189d 10098 header.driver_flags) != 6);
6c223761
KB
10099 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10100 request_id) != 8);
10101 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10102 function_code) != 10);
10103 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10104 status) != 11);
10105 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10106 data.create_operational_iq.status_descriptor) != 12);
10107 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10108 data.create_operational_iq.iq_pi_offset) != 16);
10109 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10110 data.create_operational_oq.status_descriptor) != 12);
10111 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10112 data.create_operational_oq.oq_ci_offset) != 16);
10113 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10114
10115 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10116 header.iu_type) != 0);
10117 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10118 header.iu_length) != 2);
10119 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10120 header.response_queue_id) != 4);
10121 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
ae0c189d 10122 header.driver_flags) != 6);
6c223761
KB
10123 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10124 request_id) != 8);
10125 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10126 nexus_id) != 10);
10127 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10128 buffer_length) != 12);
10129 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10130 lun_number) != 16);
10131 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10132 protocol_specific) != 24);
10133 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10134 error_index) != 27);
10135 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10136 cdb) != 32);
21432010 10137 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10138 timeout) != 60);
6c223761
KB
10139 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10140 sg_descriptors) != 64);
10141 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10142 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10143
10144 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10145 header.iu_type) != 0);
10146 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10147 header.iu_length) != 2);
10148 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10149 header.response_queue_id) != 4);
10150 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
ae0c189d 10151 header.driver_flags) != 6);
6c223761
KB
10152 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10153 request_id) != 8);
10154 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10155 nexus_id) != 12);
10156 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10157 buffer_length) != 16);
10158 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10159 data_encryption_key_index) != 22);
10160 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10161 encrypt_tweak_lower) != 24);
10162 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10163 encrypt_tweak_upper) != 28);
10164 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10165 cdb) != 32);
10166 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10167 error_index) != 48);
10168 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10169 num_sg_descriptors) != 50);
10170 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10171 cdb_length) != 51);
10172 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10173 lun_number) != 52);
10174 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10175 sg_descriptors) != 64);
10176 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10177 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10178
10179 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10180 header.iu_type) != 0);
10181 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10182 header.iu_length) != 2);
10183 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10184 request_id) != 8);
10185 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10186 error_index) != 10);
10187
10188 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10189 header.iu_type) != 0);
10190 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10191 header.iu_length) != 2);
10192 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10193 header.response_queue_id) != 4);
10194 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10195 request_id) != 8);
10196 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10197 data.report_event_configuration.buffer_length) != 12);
10198 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10199 data.report_event_configuration.sg_descriptors) != 16);
10200 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10201 data.set_event_configuration.global_event_oq_id) != 10);
10202 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10203 data.set_event_configuration.buffer_length) != 12);
10204 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10205 data.set_event_configuration.sg_descriptors) != 16);
10206
10207 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10208 max_inbound_iu_length) != 6);
10209 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10210 max_outbound_iu_length) != 14);
10211 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10212
10213 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10214 data_length) != 0);
10215 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10216 iq_arbitration_priority_support_bitmask) != 8);
10217 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10218 maximum_aw_a) != 9);
10219 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10220 maximum_aw_b) != 10);
10221 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10222 maximum_aw_c) != 11);
10223 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10224 max_inbound_queues) != 16);
10225 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10226 max_elements_per_iq) != 18);
10227 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10228 max_iq_element_length) != 24);
10229 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10230 min_iq_element_length) != 26);
10231 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10232 max_outbound_queues) != 30);
10233 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10234 max_elements_per_oq) != 32);
10235 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10236 intr_coalescing_time_granularity) != 34);
10237 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10238 max_oq_element_length) != 36);
10239 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10240 min_oq_element_length) != 38);
10241 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10242 iu_layer_descriptors) != 64);
10243 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10244
10245 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10246 event_type) != 0);
10247 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10248 oq_id) != 2);
10249 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10250
10251 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10252 num_event_descriptors) != 2);
10253 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10254 descriptors) != 4);
10255
061ef06a
KB
10256 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10257 ARRAY_SIZE(pqi_supported_event_types));
10258
6c223761
KB
10259 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10260 header.iu_type) != 0);
10261 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10262 header.iu_length) != 2);
10263 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10264 event_type) != 8);
10265 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10266 event_id) != 10);
10267 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10268 additional_event_id) != 12);
10269 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10270 data) != 16);
10271 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10272
10273 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10274 header.iu_type) != 0);
10275 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10276 header.iu_length) != 2);
10277 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10278 event_type) != 8);
10279 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10280 event_id) != 10);
10281 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10282 additional_event_id) != 12);
10283 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10284
10285 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10286 header.iu_type) != 0);
10287 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10288 header.iu_length) != 2);
10289 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10290 request_id) != 8);
10291 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10292 nexus_id) != 10);
10293 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
c2922f17
MB
10294 timeout) != 14);
10295 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6c223761
KB
10296 lun_number) != 16);
10297 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10298 protocol_specific) != 24);
10299 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10300 outbound_queue_id_to_manage) != 26);
10301 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10302 request_id_to_manage) != 28);
10303 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10304 task_management_function) != 30);
10305 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10306
10307 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10308 header.iu_type) != 0);
10309 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10310 header.iu_length) != 2);
10311 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10312 request_id) != 8);
10313 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10314 nexus_id) != 10);
10315 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10316 additional_response_info) != 12);
10317 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10318 response_code) != 15);
10319 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10320
10321 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10322 configured_logical_drive_count) != 0);
10323 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10324 configuration_signature) != 1);
10325 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
598bef8d 10326 firmware_version_short) != 5);
6c223761
KB
10327 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10328 extended_logical_unit_count) != 154);
10329 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10330 firmware_build_number) != 190);
598bef8d
KB
10331 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10332 vendor_id) != 200);
10333 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10334 product_id) != 208);
10335 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10336 extra_controller_flags) != 286);
6c223761
KB
10337 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10338 controller_mode) != 292);
598bef8d
KB
10339 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10340 spare_part_number) != 293);
10341 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10342 firmware_version_long) != 325);
6c223761 10343
1be42f46
KB
10344 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10345 phys_bay_in_box) != 115);
10346 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10347 device_type) != 120);
10348 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10349 redundant_path_present_map) != 1736);
10350 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10351 active_path_number) != 1738);
10352 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10353 alternate_paths_phys_connector) != 1739);
10354 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10355 alternate_paths_phys_box_on_port) != 1755);
10356 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10357 current_queue_depth_limit) != 1796);
10358 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10359
f6cc2a77
KB
10360 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10361 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10362 page_code) != 0);
10363 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10364 subpage_code) != 1);
10365 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10366 buffer_length) != 2);
10367
10368 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10369 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10370 page_code) != 0);
10371 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10372 subpage_code) != 1);
10373 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10374 page_length) != 2);
10375
10376 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10377 != 18);
10378 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10379 header) != 0);
10380 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10381 firmware_read_support) != 4);
10382 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10383 driver_read_support) != 5);
10384 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10385 firmware_write_support) != 6);
10386 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10387 driver_write_support) != 7);
10388 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10389 max_transfer_encrypted_sas_sata) != 8);
10390 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10391 max_transfer_encrypted_nvme) != 10);
10392 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10393 max_write_raid_5_6) != 12);
10394 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10395 max_write_raid_1_10_2drive) != 14);
10396 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10397 max_write_raid_1_10_3drive) != 16);
10398
6c223761
KB
10399 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10400 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10401 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10402 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10403 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10404 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10405 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10406 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10407 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10408 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10409 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10410 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10411
10412 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
d727a776
KB
10413 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10414 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
6c223761 10415}