scsi: smartpqi: Change sysfs raid_level attribute to N/A for controllers
[linux-2.6-block.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
2cc37b15 1// SPDX-License-Identifier: GPL-2.0
6c223761 2/*
889653ec 3 * driver for Microchip PQI-based storage controllers
e4b73b3f 4 * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
2f4c4b92 5 * Copyright (c) 2016-2018 Microsemi Corporation
6c223761
KB
6 * Copyright (c) 2016 PMC-Sierra, Inc.
7 *
2f4c4b92 8 * Questions/Comments/Bugfixes to storagedev@microchip.com
6c223761
KB
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/sched.h>
18#include <linux/rtc.h>
19#include <linux/bcd.h>
3c50976f 20#include <linux/reboot.h>
6c223761 21#include <linux/cciss_ioctl.h>
52198226 22#include <linux/blk-mq-pci.h>
6c223761
KB
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_eh.h>
27#include <scsi/scsi_transport_sas.h>
28#include <asm/unaligned.h>
29#include "smartpqi.h"
30#include "smartpqi_sis.h"
31
32#if !defined(BUILD_TIMESTAMP)
33#define BUILD_TIMESTAMP
34#endif
35
f54f85df 36#define DRIVER_VERSION "2.1.18-045"
d56030f8
DB
37#define DRIVER_MAJOR 2
38#define DRIVER_MINOR 1
f54f85df
DB
39#define DRIVER_RELEASE 18
40#define DRIVER_REVISION 45
6c223761 41
6aa26b5a 42#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
2d154f5f 43 DRIVER_VERSION BUILD_TIMESTAMP ")"
6c223761
KB
44#define DRIVER_NAME_SHORT "smartpqi"
45
e1d213bd
KB
46#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
47
2790cd4d
KB
48#define PQI_POST_RESET_DELAY_SECS 5
49#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
50
6aa26b5a
DB
51MODULE_AUTHOR("Microchip");
52MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
6c223761 53 DRIVER_VERSION);
6c223761
KB
54MODULE_VERSION(DRIVER_VERSION);
55MODULE_LICENSE("GPL");
56
c1ea387d
BVA
57struct pqi_cmd_priv {
58 int this_residual;
59};
60
61static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
62{
63 return scsi_cmd_priv(cmd);
64}
65
5e693586 66static void pqi_verify_structures(void);
5d1f03e6
MB
67static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
68 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
5f310425 69static void pqi_ctrl_offline_worker(struct work_struct *work);
6c223761
KB
70static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
71static void pqi_scan_start(struct Scsi_Host *shost);
72static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
73 struct pqi_queue_group *queue_group, enum pqi_io_path path,
74 struct pqi_io_request *io_request);
75static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
76 struct pqi_iu_header *request, unsigned int flags,
ae0c189d 77 struct pqi_raid_error_info *error_info);
6c223761
KB
78static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
79 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
80 unsigned int cdb_length, struct pqi_queue_group *queue_group,
2a47834d 81 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
7a012c23
DB
82static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
83 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
84 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
85 struct pqi_scsi_dev_raid_map_data *rmd);
6702d2c4
DB
86static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
87 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
88 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
89 struct pqi_scsi_dev_raid_map_data *rmd);
4fd22c13
MR
90static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
91static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
2790cd4d
KB
92static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
93static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
4fd22c13
MR
94static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
95static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
1e46731e 96static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
904f2bfd 97 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
331f7e99 98static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
6c223761
KB
99
100/* for flags argument to pqi_submit_raid_request_synchronous() */
101#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
102
103static struct scsi_transport_template *pqi_sas_transport_template;
104
105static atomic_t pqi_controller_count = ATOMIC_INIT(0);
106
3c50976f
KB
107enum pqi_lockup_action {
108 NONE,
109 REBOOT,
110 PANIC
111};
112
113static enum pqi_lockup_action pqi_lockup_action = NONE;
114
115static struct {
116 enum pqi_lockup_action action;
117 char *name;
118} pqi_lockup_actions[] = {
119 {
120 .action = NONE,
121 .name = "none",
122 },
123 {
124 .action = REBOOT,
125 .name = "reboot",
126 },
127 {
128 .action = PANIC,
129 .name = "panic",
130 },
131};
132
6a50d6ad
KB
133static unsigned int pqi_supported_event_types[] = {
134 PQI_EVENT_TYPE_HOTPLUG,
135 PQI_EVENT_TYPE_HARDWARE,
136 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
137 PQI_EVENT_TYPE_LOGICAL_DEVICE,
4fd22c13 138 PQI_EVENT_TYPE_OFA,
6a50d6ad
KB
139 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
140 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
141};
142
6c223761
KB
143static int pqi_disable_device_id_wildcards;
144module_param_named(disable_device_id_wildcards,
cbe0c7b1 145 pqi_disable_device_id_wildcards, int, 0644);
6c223761
KB
146MODULE_PARM_DESC(disable_device_id_wildcards,
147 "Disable device ID wildcards.");
148
5a259e32
KB
149static int pqi_disable_heartbeat;
150module_param_named(disable_heartbeat,
151 pqi_disable_heartbeat, int, 0644);
152MODULE_PARM_DESC(disable_heartbeat,
153 "Disable heartbeat.");
154
155static int pqi_disable_ctrl_shutdown;
156module_param_named(disable_ctrl_shutdown,
157 pqi_disable_ctrl_shutdown, int, 0644);
158MODULE_PARM_DESC(disable_ctrl_shutdown,
159 "Disable controller shutdown when controller locked up.");
160
3c50976f
KB
161static char *pqi_lockup_action_param;
162module_param_named(lockup_action,
163 pqi_lockup_action_param, charp, 0644);
164MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
165 "\t\tSupported: none, reboot, panic\n"
166 "\t\tDefault: none");
167
5e6a9760
GW
168static int pqi_expose_ld_first;
169module_param_named(expose_ld_first,
170 pqi_expose_ld_first, int, 0644);
583891c9 171MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
5e6a9760 172
522bc026
DC
173static int pqi_hide_vsep;
174module_param_named(hide_vsep,
175 pqi_hide_vsep, int, 0644);
583891c9 176MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
522bc026 177
cf15c3e7
MM
178static int pqi_disable_managed_interrupts;
179module_param_named(disable_managed_interrupts,
180 pqi_disable_managed_interrupts, int, 0644);
181MODULE_PARM_DESC(disable_managed_interrupts,
182 "Disable the kernel automatically assigning SMP affinity to IRQs.");
183
6d567dfe
KB
184static unsigned int pqi_ctrl_ready_timeout_secs;
185module_param_named(ctrl_ready_timeout,
186 pqi_ctrl_ready_timeout_secs, uint, 0644);
187MODULE_PARM_DESC(ctrl_ready_timeout,
188 "Timeout in seconds for driver to wait for controller ready.");
189
6c223761
KB
190static char *raid_levels[] = {
191 "RAID-0",
192 "RAID-4",
193 "RAID-1(1+0)",
194 "RAID-5",
195 "RAID-5+1",
7a012c23
DB
196 "RAID-6",
197 "RAID-1(Triple)",
6c223761
KB
198};
199
200static char *pqi_raid_level_to_string(u8 raid_level)
201{
202 if (raid_level < ARRAY_SIZE(raid_levels))
203 return raid_levels[raid_level];
204
a9f93392 205 return "RAID UNKNOWN";
6c223761
KB
206}
207
208#define SA_RAID_0 0
209#define SA_RAID_4 1
210#define SA_RAID_1 2 /* also used for RAID 10 */
211#define SA_RAID_5 3 /* also used for RAID 50 */
212#define SA_RAID_51 4
213#define SA_RAID_6 5 /* also used for RAID 60 */
7a012c23
DB
214#define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
215#define SA_RAID_MAX SA_RAID_TRIPLE
6c223761
KB
216#define SA_RAID_UNKNOWN 0xff
217
218static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
219{
7561a7e4 220 pqi_prep_for_scsi_done(scmd);
0ca19080 221 scsi_done(scmd);
6c223761
KB
222}
223
b6e2ef67 224static inline void pqi_disable_write_same(struct scsi_device *sdev)
6c223761 225{
b6e2ef67 226 sdev->no_write_same = 1;
6c223761
KB
227}
228
6c223761 229static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
6c223761 230{
6c223761 231 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
6c223761
KB
232}
233
234static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
235{
236 return !device->is_physical_device;
237}
238
bd10cf0b
KB
239static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
240{
241 return scsi3addr[2] != 0;
242}
243
694c5d5b
KB
244static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
245{
246 return !ctrl_info->controller_online;
247}
248
6c223761
KB
249static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
250{
251 if (ctrl_info->controller_online)
252 if (!sis_is_firmware_running(ctrl_info))
5d1f03e6 253 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
6c223761
KB
254}
255
256static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
257{
258 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
259}
260
9ee5d6e9
MR
261#define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
262#define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
263
583891c9 264static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73 265{
9ee5d6e9 266 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
ff6abb73
KB
267}
268
269static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
270 enum pqi_ctrl_mode mode)
271{
9ee5d6e9
MR
272 u32 driver_scratch;
273
274 driver_scratch = sis_read_driver_scratch(ctrl_info);
275
276 if (mode == PQI_MODE)
277 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
278 else
279 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
280
281 sis_write_driver_scratch(ctrl_info, driver_scratch);
282}
283
284static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
285{
286 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
287}
288
289static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
290{
291 u32 driver_scratch;
292
293 driver_scratch = sis_read_driver_scratch(ctrl_info);
294
295 if (is_supported)
296 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
297 else
298 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
299
300 sis_write_driver_scratch(ctrl_info, driver_scratch);
ff6abb73
KB
301}
302
9fa82023
KB
303static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
304{
305 ctrl_info->scan_blocked = true;
306 mutex_lock(&ctrl_info->scan_mutex);
307}
308
309static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
310{
311 ctrl_info->scan_blocked = false;
312 mutex_unlock(&ctrl_info->scan_mutex);
313}
314
315static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
316{
317 return ctrl_info->scan_blocked;
318}
319
694c5d5b
KB
320static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
321{
37f33181 322 mutex_lock(&ctrl_info->lun_reset_mutex);
694c5d5b
KB
323}
324
37f33181 325static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
694c5d5b 326{
37f33181 327 mutex_unlock(&ctrl_info->lun_reset_mutex);
694c5d5b
KB
328}
329
9fa82023
KB
330static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
331{
332 struct Scsi_Host *shost;
333 unsigned int num_loops;
334 int msecs_sleep;
335
336 shost = ctrl_info->scsi_host;
337
338 scsi_block_requests(shost);
339
340 num_loops = 0;
341 msecs_sleep = 20;
342 while (scsi_host_busy(shost)) {
343 num_loops++;
344 if (num_loops == 10)
345 msecs_sleep = 500;
346 msleep(msecs_sleep);
347 }
348}
349
350static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
351{
352 scsi_unblock_requests(ctrl_info->scsi_host);
353}
354
355static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
356{
357 atomic_inc(&ctrl_info->num_busy_threads);
694c5d5b
KB
358}
359
9fa82023 360static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
694c5d5b 361{
9fa82023 362 atomic_dec(&ctrl_info->num_busy_threads);
694c5d5b
KB
363}
364
365static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
366{
367 return ctrl_info->block_requests;
368}
369
7561a7e4
KB
370static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
371{
372 ctrl_info->block_requests = true;
7561a7e4
KB
373}
374
375static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
376{
377 ctrl_info->block_requests = false;
378 wake_up_all(&ctrl_info->block_requests_wait);
7561a7e4
KB
379}
380
ae0c189d 381static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
7561a7e4 382{
7561a7e4 383 if (!pqi_ctrl_blocked(ctrl_info))
ae0c189d 384 return;
7561a7e4
KB
385
386 atomic_inc(&ctrl_info->num_blocked_threads);
ae0c189d
KB
387 wait_event(ctrl_info->block_requests_wait,
388 !pqi_ctrl_blocked(ctrl_info));
7561a7e4 389 atomic_dec(&ctrl_info->num_blocked_threads);
7561a7e4
KB
390}
391
18ff5f08
KB
392#define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
393
7561a7e4
KB
394static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
395{
18ff5f08
KB
396 unsigned long start_jiffies;
397 unsigned long warning_timeout;
398 bool displayed_warning;
399
400 displayed_warning = false;
401 start_jiffies = jiffies;
42dc0426 402 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
18ff5f08 403
7561a7e4 404 while (atomic_read(&ctrl_info->num_busy_threads) >
18ff5f08
KB
405 atomic_read(&ctrl_info->num_blocked_threads)) {
406 if (time_after(jiffies, warning_timeout)) {
407 dev_warn(&ctrl_info->pci_dev->dev,
408 "waiting %u seconds for driver activity to quiesce\n",
409 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
410 displayed_warning = true;
42dc0426 411 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
18ff5f08 412 }
7561a7e4 413 usleep_range(1000, 2000);
18ff5f08
KB
414 }
415
416 if (displayed_warning)
417 dev_warn(&ctrl_info->pci_dev->dev,
418 "driver activity quiesced after waiting for %u seconds\n",
419 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
7561a7e4
KB
420}
421
03b288cf
KB
422static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
423{
424 return device->device_offline;
425}
426
2790cd4d 427static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
7561a7e4 428{
2790cd4d 429 mutex_lock(&ctrl_info->ofa_mutex);
7561a7e4 430}
6c223761 431
2790cd4d 432static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
4fd22c13 433{
2790cd4d 434 mutex_unlock(&ctrl_info->ofa_mutex);
4fd22c13
MR
435}
436
2790cd4d 437static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
4fd22c13 438{
2790cd4d
KB
439 mutex_lock(&ctrl_info->ofa_mutex);
440 mutex_unlock(&ctrl_info->ofa_mutex);
4fd22c13
MR
441}
442
2790cd4d 443static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
4fd22c13 444{
2790cd4d 445 return mutex_is_locked(&ctrl_info->ofa_mutex);
4fd22c13
MR
446}
447
1e46731e
MR
448static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
449{
450 device->in_remove = true;
451}
452
1bdf6e93 453static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
1e46731e 454{
1bdf6e93 455 return device->in_remove;
1e46731e
MR
456}
457
2790cd4d 458static inline int pqi_event_type_to_event_index(unsigned int event_type)
0530736e 459{
2790cd4d
KB
460 int index;
461
462 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
463 if (event_type == pqi_supported_event_types[index])
464 return index;
465
466 return -1;
0530736e
KB
467}
468
2790cd4d 469static inline bool pqi_is_supported_event(unsigned int event_type)
0530736e 470{
2790cd4d 471 return pqi_event_type_to_event_index(event_type) != -1;
0530736e
KB
472}
473
583891c9
KB
474static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
475 unsigned long delay)
5f310425
KB
476{
477 if (pqi_ctrl_offline(ctrl_info))
478 return;
479
480 schedule_delayed_work(&ctrl_info->rescan_work, delay);
481}
482
6c223761
KB
483static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
484{
5f310425
KB
485 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
486}
487
42dc0426 488#define PQI_RESCAN_WORK_DELAY (10 * HZ)
5f310425 489
583891c9 490static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
5f310425
KB
491{
492 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
6c223761
KB
493}
494
061ef06a
KB
495static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
496{
497 cancel_delayed_work_sync(&ctrl_info->rescan_work);
498}
499
98f87667
KB
500static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
501{
502 if (!ctrl_info->heartbeat_counter)
503 return 0;
504
505 return readl(ctrl_info->heartbeat_counter);
506}
507
4fd22c13
MR
508static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
509{
4fd22c13
MR
510 return readb(ctrl_info->soft_reset_status);
511}
512
4ccc354b 513static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
4fd22c13
MR
514{
515 u8 status;
516
4fd22c13 517 status = pqi_read_soft_reset_status(ctrl_info);
4ccc354b 518 status &= ~PQI_SOFT_RESET_ABORT;
4fd22c13
MR
519 writeb(status, ctrl_info->soft_reset_status);
520}
521
6c223761
KB
522static int pqi_map_single(struct pci_dev *pci_dev,
523 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
6917a9cc 524 size_t buffer_length, enum dma_data_direction data_direction)
6c223761
KB
525{
526 dma_addr_t bus_address;
527
6917a9cc 528 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
6c223761
KB
529 return 0;
530
6917a9cc 531 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
6c223761 532 data_direction);
6917a9cc 533 if (dma_mapping_error(&pci_dev->dev, bus_address))
6c223761
KB
534 return -ENOMEM;
535
536 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
537 put_unaligned_le32(buffer_length, &sg_descriptor->length);
538 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
539
540 return 0;
541}
542
543static void pqi_pci_unmap(struct pci_dev *pci_dev,
544 struct pqi_sg_descriptor *descriptors, int num_descriptors,
6917a9cc 545 enum dma_data_direction data_direction)
6c223761
KB
546{
547 int i;
548
6917a9cc 549 if (data_direction == DMA_NONE)
6c223761
KB
550 return;
551
552 for (i = 0; i < num_descriptors; i++)
6917a9cc 553 dma_unmap_single(&pci_dev->dev,
6c223761
KB
554 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
555 get_unaligned_le32(&descriptors[i].length),
556 data_direction);
557}
558
559static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
560 struct pqi_raid_path_request *request, u8 cmd,
561 u8 *scsi3addr, void *buffer, size_t buffer_length,
6917a9cc 562 u16 vpd_page, enum dma_data_direction *dir)
6c223761
KB
563{
564 u8 *cdb;
171c2865 565 size_t cdb_length = buffer_length;
6c223761
KB
566
567 memset(request, 0, sizeof(*request));
568
569 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
570 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
571 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
572 &request->header.iu_length);
573 put_unaligned_le32(buffer_length, &request->buffer_length);
574 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
575 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
576 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
577
578 cdb = request->cdb;
579
580 switch (cmd) {
be76f906
DB
581 case TEST_UNIT_READY:
582 request->data_direction = SOP_READ_FLAG;
583 cdb[0] = TEST_UNIT_READY;
584 break;
6c223761
KB
585 case INQUIRY:
586 request->data_direction = SOP_READ_FLAG;
587 cdb[0] = INQUIRY;
588 if (vpd_page & VPD_PAGE) {
589 cdb[1] = 0x1;
590 cdb[2] = (u8)vpd_page;
591 }
171c2865 592 cdb[4] = (u8)cdb_length;
6c223761
KB
593 break;
594 case CISS_REPORT_LOG:
595 case CISS_REPORT_PHYS:
596 request->data_direction = SOP_READ_FLAG;
597 cdb[0] = cmd;
28ca6d87
MM
598 if (cmd == CISS_REPORT_PHYS) {
599 if (ctrl_info->rpl_extended_format_4_5_supported)
600 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
601 else
602 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
603 } else {
f6cc2a77 604 cdb[1] = ctrl_info->ciss_report_log_flags;
28ca6d87 605 }
171c2865 606 put_unaligned_be32(cdb_length, &cdb[6]);
6c223761
KB
607 break;
608 case CISS_GET_RAID_MAP:
609 request->data_direction = SOP_READ_FLAG;
610 cdb[0] = CISS_READ;
611 cdb[1] = CISS_GET_RAID_MAP;
171c2865 612 put_unaligned_be32(cdb_length, &cdb[6]);
6c223761 613 break;
58322fe0 614 case SA_FLUSH_CACHE:
ae0c189d 615 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
6c223761
KB
616 request->data_direction = SOP_WRITE_FLAG;
617 cdb[0] = BMIC_WRITE;
58322fe0 618 cdb[6] = BMIC_FLUSH_CACHE;
171c2865 619 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 620 break;
171c2865
DC
621 case BMIC_SENSE_DIAG_OPTIONS:
622 cdb_length = 0;
df561f66 623 fallthrough;
6c223761
KB
624 case BMIC_IDENTIFY_CONTROLLER:
625 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6d90615f 626 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
f6cc2a77 627 case BMIC_SENSE_FEATURE:
6c223761
KB
628 request->data_direction = SOP_READ_FLAG;
629 cdb[0] = BMIC_READ;
630 cdb[6] = cmd;
171c2865 631 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 632 break;
171c2865
DC
633 case BMIC_SET_DIAG_OPTIONS:
634 cdb_length = 0;
df561f66 635 fallthrough;
6c223761
KB
636 case BMIC_WRITE_HOST_WELLNESS:
637 request->data_direction = SOP_WRITE_FLAG;
638 cdb[0] = BMIC_WRITE;
639 cdb[6] = cmd;
171c2865 640 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761 641 break;
3d46a59a
DB
642 case BMIC_CSMI_PASSTHRU:
643 request->data_direction = SOP_BIDIRECTIONAL;
644 cdb[0] = BMIC_WRITE;
645 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
646 cdb[6] = cmd;
647 put_unaligned_be16(cdb_length, &cdb[7]);
6c223761
KB
648 break;
649 default:
9e68cccc 650 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
6c223761
KB
651 break;
652 }
653
654 switch (request->data_direction) {
655 case SOP_READ_FLAG:
6917a9cc 656 *dir = DMA_FROM_DEVICE;
6c223761
KB
657 break;
658 case SOP_WRITE_FLAG:
6917a9cc 659 *dir = DMA_TO_DEVICE;
6c223761
KB
660 break;
661 case SOP_NO_DIRECTION_FLAG:
6917a9cc 662 *dir = DMA_NONE;
6c223761
KB
663 break;
664 default:
6917a9cc 665 *dir = DMA_BIDIRECTIONAL;
6c223761
KB
666 break;
667 }
668
6c223761 669 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
6917a9cc 670 buffer, buffer_length, *dir);
6c223761
KB
671}
672
376fb880
KB
673static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
674{
675 io_request->scmd = NULL;
676 io_request->status = 0;
677 io_request->error_info = NULL;
678 io_request->raid_bypass = false;
679}
680
b27ac2fa 681static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6c223761
KB
682{
683 struct pqi_io_request *io_request;
b27ac2fa 684 u16 i;
6c223761 685
b27ac2fa
DB
686 if (scmd) { /* SML I/O request */
687 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
688
689 i = blk_mq_unique_tag_to_tag(blk_tag);
6c223761 690 io_request = &ctrl_info->io_request_pool[i];
b27ac2fa
DB
691 if (atomic_inc_return(&io_request->refcount) > 1) {
692 atomic_dec(&io_request->refcount);
693 return NULL;
694 }
695 } else { /* IOCTL or driver internal request */
696 /*
697 * benignly racy - may have to wait for an open slot.
698 * command slot range is scsi_ml_can_queue -
699 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
700 */
701 i = 0;
702 while (1) {
703 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
704 if (atomic_inc_return(&io_request->refcount) == 1)
705 break;
706 atomic_dec(&io_request->refcount);
707 i = (i + 1) % PQI_RESERVED_IO_SLOTS;
708 }
6c223761
KB
709 }
710
376fb880 711 pqi_reinit_io_request(io_request);
6c223761
KB
712
713 return io_request;
714}
715
716static void pqi_free_io_request(struct pqi_io_request *io_request)
717{
718 atomic_dec(&io_request->refcount);
719}
720
02133b68 721static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
694c5d5b 722 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
ae0c189d 723 struct pqi_raid_error_info *error_info)
6c223761
KB
724{
725 int rc;
6c223761 726 struct pqi_raid_path_request request;
694c5d5b 727 enum dma_data_direction dir;
6c223761 728
583891c9
KB
729 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
730 buffer, buffer_length, vpd_page, &dir);
6c223761
KB
731 if (rc)
732 return rc;
733
ae0c189d 734 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
6c223761 735
6917a9cc 736 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 737
6c223761
KB
738 return rc;
739}
740
694c5d5b 741/* helper functions for pqi_send_scsi_raid_request */
02133b68
DC
742
743static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
694c5d5b 744 u8 cmd, void *buffer, size_t buffer_length)
6c223761 745{
02133b68 746 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
ae0c189d 747 buffer, buffer_length, 0, NULL);
02133b68 748}
6c223761 749
02133b68 750static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
694c5d5b
KB
751 u8 cmd, void *buffer, size_t buffer_length,
752 struct pqi_raid_error_info *error_info)
02133b68
DC
753{
754 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
ae0c189d 755 buffer, buffer_length, 0, error_info);
02133b68 756}
6c223761 757
02133b68 758static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
694c5d5b 759 struct bmic_identify_controller *buffer)
02133b68
DC
760{
761 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
694c5d5b 762 buffer, sizeof(*buffer));
02133b68
DC
763}
764
6d90615f 765static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
694c5d5b 766 struct bmic_sense_subsystem_info *sense_info)
6d90615f
MB
767{
768 return pqi_send_ctrl_raid_request(ctrl_info,
694c5d5b
KB
769 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
770 sizeof(*sense_info));
6d90615f
MB
771}
772
02133b68 773static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
6c223761 774 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
02133b68
DC
775{
776 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
ae0c189d 777 buffer, buffer_length, vpd_page, NULL);
6c223761
KB
778}
779
780static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
781 struct pqi_scsi_dev *device,
694c5d5b 782 struct bmic_identify_physical_device *buffer, size_t buffer_length)
6c223761
KB
783{
784 int rc;
6917a9cc 785 enum dma_data_direction dir;
6c223761
KB
786 u16 bmic_device_index;
787 struct pqi_raid_path_request request;
788
789 rc = pqi_build_raid_path_request(ctrl_info, &request,
790 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
6917a9cc 791 buffer_length, 0, &dir);
6c223761
KB
792 if (rc)
793 return rc;
794
795 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
796 request.cdb[2] = (u8)bmic_device_index;
797 request.cdb[9] = (u8)(bmic_device_index >> 8);
798
ae0c189d 799 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761 800
6917a9cc 801 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 802
6c223761
KB
803 return rc;
804}
805
f6cc2a77
KB
806static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
807{
808 u32 bytes;
809
810 bytes = get_unaligned_le16(limit);
811 if (bytes == 0)
812 bytes = ~0;
813 else
814 bytes *= 1024;
815
816 return bytes;
817}
818
819#pragma pack(1)
820
821struct bmic_sense_feature_buffer {
822 struct bmic_sense_feature_buffer_header header;
823 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
824};
825
826#pragma pack()
827
828#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
829 offsetofend(struct bmic_sense_feature_buffer, \
830 aio_subpage.max_write_raid_1_10_3drive)
831
832#define MINIMUM_AIO_SUBPAGE_LENGTH \
833 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
834 max_write_raid_1_10_3drive) - \
835 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
836
837static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
838{
839 int rc;
840 enum dma_data_direction dir;
841 struct pqi_raid_path_request request;
842 struct bmic_sense_feature_buffer *buffer;
843
844 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
845 if (!buffer)
846 return -ENOMEM;
847
583891c9
KB
848 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
849 buffer, sizeof(*buffer), 0, &dir);
f6cc2a77
KB
850 if (rc)
851 goto error;
852
853 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
854 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
855
ae0c189d 856 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761 857
6917a9cc 858 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
694c5d5b 859
f6cc2a77
KB
860 if (rc)
861 goto error;
862
863 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
864 buffer->header.subpage_code !=
865 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
866 get_unaligned_le16(&buffer->header.buffer_length) <
867 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
868 buffer->aio_subpage.header.page_code !=
869 BMIC_SENSE_FEATURE_IO_PAGE ||
870 buffer->aio_subpage.header.subpage_code !=
871 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
872 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
873 MINIMUM_AIO_SUBPAGE_LENGTH) {
874 goto error;
875 }
876
877 ctrl_info->max_transfer_encrypted_sas_sata =
878 pqi_aio_limit_to_bytes(
879 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
880
881 ctrl_info->max_transfer_encrypted_nvme =
882 pqi_aio_limit_to_bytes(
883 &buffer->aio_subpage.max_transfer_encrypted_nvme);
884
885 ctrl_info->max_write_raid_5_6 =
886 pqi_aio_limit_to_bytes(
887 &buffer->aio_subpage.max_write_raid_5_6);
888
889 ctrl_info->max_write_raid_1_10_2drive =
890 pqi_aio_limit_to_bytes(
891 &buffer->aio_subpage.max_write_raid_1_10_2drive);
892
893 ctrl_info->max_write_raid_1_10_3drive =
894 pqi_aio_limit_to_bytes(
895 &buffer->aio_subpage.max_write_raid_1_10_3drive);
896
897error:
898 kfree(buffer);
899
6c223761
KB
900 return rc;
901}
902
58322fe0
KB
903static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
904 enum bmic_flush_cache_shutdown_event shutdown_event)
6c223761
KB
905{
906 int rc;
58322fe0 907 struct bmic_flush_cache *flush_cache;
6c223761 908
58322fe0
KB
909 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
910 if (!flush_cache)
6c223761
KB
911 return -ENOMEM;
912
58322fe0
KB
913 flush_cache->shutdown_event = shutdown_event;
914
02133b68
DC
915 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
916 sizeof(*flush_cache));
6c223761 917
58322fe0 918 kfree(flush_cache);
6c223761
KB
919
920 return rc;
921}
922
3d46a59a
DB
923int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
924 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
925 struct pqi_raid_error_info *error_info)
926{
927 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
928 buffer, buffer_length, error_info);
929}
171c2865 930
694c5d5b 931#define PQI_FETCH_PTRAID_DATA (1 << 31)
171c2865
DC
932
933static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
6c223761
KB
934{
935 int rc;
171c2865 936 struct bmic_diag_options *diag;
6c223761 937
171c2865
DC
938 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
939 if (!diag)
940 return -ENOMEM;
941
02133b68 942 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
694c5d5b 943 diag, sizeof(*diag));
6c223761 944 if (rc)
171c2865 945 goto out;
6c223761 946
171c2865
DC
947 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
948
694c5d5b
KB
949 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
950 sizeof(*diag));
951
171c2865
DC
952out:
953 kfree(diag);
6c223761 954
6c223761
KB
955 return rc;
956}
957
02133b68 958static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
959 void *buffer, size_t buffer_length)
960{
02133b68 961 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
694c5d5b 962 buffer, buffer_length);
6c223761
KB
963}
964
965#pragma pack(1)
966
967struct bmic_host_wellness_driver_version {
968 u8 start_tag[4];
969 u8 driver_version_tag[2];
970 __le16 driver_version_length;
971 char driver_version[32];
b2346b50 972 u8 dont_write_tag[2];
6c223761
KB
973 u8 end_tag[2];
974};
975
976#pragma pack()
977
978static int pqi_write_driver_version_to_host_wellness(
979 struct pqi_ctrl_info *ctrl_info)
980{
981 int rc;
982 struct bmic_host_wellness_driver_version *buffer;
983 size_t buffer_length;
984
985 buffer_length = sizeof(*buffer);
986
987 buffer = kmalloc(buffer_length, GFP_KERNEL);
988 if (!buffer)
989 return -ENOMEM;
990
991 buffer->start_tag[0] = '<';
992 buffer->start_tag[1] = 'H';
993 buffer->start_tag[2] = 'W';
994 buffer->start_tag[3] = '>';
995 buffer->driver_version_tag[0] = 'D';
996 buffer->driver_version_tag[1] = 'V';
997 put_unaligned_le16(sizeof(buffer->driver_version),
998 &buffer->driver_version_length);
061ef06a 999 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
6c223761
KB
1000 sizeof(buffer->driver_version) - 1);
1001 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
b2346b50
MR
1002 buffer->dont_write_tag[0] = 'D';
1003 buffer->dont_write_tag[1] = 'W';
6c223761
KB
1004 buffer->end_tag[0] = 'Z';
1005 buffer->end_tag[1] = 'Z';
1006
1007 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1008
1009 kfree(buffer);
1010
1011 return rc;
1012}
1013
1014#pragma pack(1)
1015
1016struct bmic_host_wellness_time {
1017 u8 start_tag[4];
1018 u8 time_tag[2];
1019 __le16 time_length;
1020 u8 time[8];
1021 u8 dont_write_tag[2];
1022 u8 end_tag[2];
1023};
1024
1025#pragma pack()
1026
1027static int pqi_write_current_time_to_host_wellness(
1028 struct pqi_ctrl_info *ctrl_info)
1029{
1030 int rc;
1031 struct bmic_host_wellness_time *buffer;
1032 size_t buffer_length;
1033 time64_t local_time;
1034 unsigned int year;
ed10858e 1035 struct tm tm;
6c223761
KB
1036
1037 buffer_length = sizeof(*buffer);
1038
1039 buffer = kmalloc(buffer_length, GFP_KERNEL);
1040 if (!buffer)
1041 return -ENOMEM;
1042
1043 buffer->start_tag[0] = '<';
1044 buffer->start_tag[1] = 'H';
1045 buffer->start_tag[2] = 'W';
1046 buffer->start_tag[3] = '>';
1047 buffer->time_tag[0] = 'T';
1048 buffer->time_tag[1] = 'D';
1049 put_unaligned_le16(sizeof(buffer->time),
1050 &buffer->time_length);
1051
ed10858e
AB
1052 local_time = ktime_get_real_seconds();
1053 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
6c223761
KB
1054 year = tm.tm_year + 1900;
1055
1056 buffer->time[0] = bin2bcd(tm.tm_hour);
1057 buffer->time[1] = bin2bcd(tm.tm_min);
1058 buffer->time[2] = bin2bcd(tm.tm_sec);
1059 buffer->time[3] = 0;
1060 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1061 buffer->time[5] = bin2bcd(tm.tm_mday);
1062 buffer->time[6] = bin2bcd(year / 100);
1063 buffer->time[7] = bin2bcd(year % 100);
1064
1065 buffer->dont_write_tag[0] = 'D';
1066 buffer->dont_write_tag[1] = 'W';
1067 buffer->end_tag[0] = 'Z';
1068 buffer->end_tag[1] = 'Z';
1069
1070 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1071
1072 kfree(buffer);
1073
1074 return rc;
1075}
1076
42dc0426 1077#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
6c223761
KB
1078
1079static void pqi_update_time_worker(struct work_struct *work)
1080{
1081 int rc;
1082 struct pqi_ctrl_info *ctrl_info;
1083
1084 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1085 update_time_work);
1086
6c223761
KB
1087 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1088 if (rc)
1089 dev_warn(&ctrl_info->pci_dev->dev,
1090 "error updating time on controller\n");
1091
1092 schedule_delayed_work(&ctrl_info->update_time_work,
1093 PQI_UPDATE_TIME_WORK_INTERVAL);
1094}
1095
583891c9 1096static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
6c223761 1097{
4fbebf1a 1098 schedule_delayed_work(&ctrl_info->update_time_work, 0);
061ef06a
KB
1099}
1100
583891c9 1101static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
061ef06a 1102{
061ef06a 1103 cancel_delayed_work_sync(&ctrl_info->update_time_work);
6c223761
KB
1104}
1105
583891c9
KB
1106static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1107 size_t buffer_length)
6c223761 1108{
583891c9 1109 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
6c223761
KB
1110}
1111
583891c9 1112static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
6c223761
KB
1113{
1114 int rc;
1115 size_t lun_list_length;
1116 size_t lun_data_length;
1117 size_t new_lun_list_length;
1118 void *lun_data = NULL;
1119 struct report_lun_header *report_lun_header;
1120
1121 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1122 if (!report_lun_header) {
1123 rc = -ENOMEM;
1124 goto out;
1125 }
1126
583891c9 1127 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
6c223761
KB
1128 if (rc)
1129 goto out;
1130
1131 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1132
1133again:
1134 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1135
1136 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1137 if (!lun_data) {
1138 rc = -ENOMEM;
1139 goto out;
1140 }
1141
1142 if (lun_list_length == 0) {
1143 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1144 goto out;
1145 }
1146
1147 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1148 if (rc)
1149 goto out;
1150
583891c9
KB
1151 new_lun_list_length =
1152 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
6c223761
KB
1153
1154 if (new_lun_list_length > lun_list_length) {
1155 lun_list_length = new_lun_list_length;
1156 kfree(lun_data);
1157 goto again;
1158 }
1159
1160out:
1161 kfree(report_lun_header);
1162
1163 if (rc) {
1164 kfree(lun_data);
1165 lun_data = NULL;
1166 }
1167
1168 *buffer = lun_data;
1169
1170 return rc;
1171}
1172
583891c9 1173static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
6c223761 1174{
28ca6d87
MM
1175 int rc;
1176 unsigned int i;
1177 u8 rpl_response_format;
1178 u32 num_physicals;
1179 size_t rpl_16byte_wwid_list_length;
1180 void *rpl_list;
1181 struct report_lun_header *rpl_header;
1182 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1183 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1184
1185 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1186 if (rc)
1187 return rc;
1188
1189 if (ctrl_info->rpl_extended_format_4_5_supported) {
1190 rpl_header = rpl_list;
1191 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1192 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1193 *buffer = rpl_list;
1194 return 0;
1195 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1196 dev_err(&ctrl_info->pci_dev->dev,
1197 "RPL returned unsupported data format %u\n",
1198 rpl_response_format);
1199 return -EINVAL;
1200 } else {
1201 dev_warn(&ctrl_info->pci_dev->dev,
1202 "RPL returned extended format 2 instead of 4\n");
1203 }
1204 }
1205
1206 rpl_8byte_wwid_list = rpl_list;
1207 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1208 rpl_16byte_wwid_list_length = sizeof(struct report_lun_header) + (num_physicals * sizeof(struct report_phys_lun_16byte_wwid));
1209
1210 rpl_16byte_wwid_list = kmalloc(rpl_16byte_wwid_list_length, GFP_KERNEL);
1211 if (!rpl_16byte_wwid_list)
1212 return -ENOMEM;
1213
1214 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1215 &rpl_16byte_wwid_list->header.list_length);
1216 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1217
1218 for (i = 0; i < num_physicals; i++) {
1219 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
291c2e00
KB
1220 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1221 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
28ca6d87
MM
1222 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1223 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1224 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1225 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1226 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1227 }
1228
1229 kfree(rpl_8byte_wwid_list);
1230 *buffer = rpl_16byte_wwid_list;
1231
1232 return 0;
6c223761
KB
1233}
1234
583891c9 1235static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
6c223761
KB
1236{
1237 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1238}
1239
1240static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
28ca6d87
MM
1241 struct report_phys_lun_16byte_wwid_list **physdev_list,
1242 struct report_log_lun_list **logdev_list)
6c223761
KB
1243{
1244 int rc;
1245 size_t logdev_list_length;
1246 size_t logdev_data_length;
28ca6d87
MM
1247 struct report_log_lun_list *internal_logdev_list;
1248 struct report_log_lun_list *logdev_data;
6c223761
KB
1249 struct report_lun_header report_lun_header;
1250
1251 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1252 if (rc)
1253 dev_err(&ctrl_info->pci_dev->dev,
1254 "report physical LUNs failed\n");
1255
1256 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1257 if (rc)
1258 dev_err(&ctrl_info->pci_dev->dev,
1259 "report logical LUNs failed\n");
1260
1261 /*
1262 * Tack the controller itself onto the end of the logical device list.
1263 */
1264
1265 logdev_data = *logdev_list;
1266
1267 if (logdev_data) {
1268 logdev_list_length =
1269 get_unaligned_be32(&logdev_data->header.list_length);
1270 } else {
1271 memset(&report_lun_header, 0, sizeof(report_lun_header));
1272 logdev_data =
28ca6d87 1273 (struct report_log_lun_list *)&report_lun_header;
6c223761
KB
1274 logdev_list_length = 0;
1275 }
1276
1277 logdev_data_length = sizeof(struct report_lun_header) +
1278 logdev_list_length;
1279
1280 internal_logdev_list = kmalloc(logdev_data_length +
28ca6d87 1281 sizeof(struct report_log_lun), GFP_KERNEL);
6c223761
KB
1282 if (!internal_logdev_list) {
1283 kfree(*logdev_list);
1284 *logdev_list = NULL;
1285 return -ENOMEM;
1286 }
1287
1288 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1289 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
28ca6d87 1290 sizeof(struct report_log_lun));
6c223761 1291 put_unaligned_be32(logdev_list_length +
28ca6d87 1292 sizeof(struct report_log_lun),
6c223761
KB
1293 &internal_logdev_list->header.list_length);
1294
1295 kfree(*logdev_list);
1296 *logdev_list = internal_logdev_list;
1297
1298 return 0;
1299}
1300
1301static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1302 int bus, int target, int lun)
1303{
1304 device->bus = bus;
1305 device->target = target;
1306 device->lun = lun;
1307}
1308
1309static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1310{
1311 u8 *scsi3addr;
1312 u32 lunid;
bd10cf0b
KB
1313 int bus;
1314 int target;
1315 int lun;
6c223761
KB
1316
1317 scsi3addr = device->scsi3addr;
1318 lunid = get_unaligned_le32(scsi3addr);
1319
1320 if (pqi_is_hba_lunid(scsi3addr)) {
1321 /* The specified device is the controller. */
1322 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1323 device->target_lun_valid = true;
1324 return;
1325 }
1326
1327 if (pqi_is_logical_device(device)) {
bd10cf0b
KB
1328 if (device->is_external_raid_device) {
1329 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1330 target = (lunid >> 16) & 0x3fff;
1331 lun = lunid & 0xff;
1332 } else {
1333 bus = PQI_RAID_VOLUME_BUS;
1334 target = 0;
1335 lun = lunid & 0x3fff;
1336 }
1337 pqi_set_bus_target_lun(device, bus, target, lun);
6c223761
KB
1338 device->target_lun_valid = true;
1339 return;
1340 }
1341
1342 /*
1343 * Defer target and LUN assignment for non-controller physical devices
1344 * because the SAS transport layer will make these assignments later.
1345 */
1346 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1347}
1348
1349static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1350 struct pqi_scsi_dev *device)
1351{
1352 int rc;
1353 u8 raid_level;
1354 u8 *buffer;
1355
1356 raid_level = SA_RAID_UNKNOWN;
1357
1358 buffer = kmalloc(64, GFP_KERNEL);
1359 if (buffer) {
1360 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1361 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1362 if (rc == 0) {
1363 raid_level = buffer[8];
1364 if (raid_level > SA_RAID_MAX)
1365 raid_level = SA_RAID_UNKNOWN;
1366 }
1367 kfree(buffer);
1368 }
1369
1370 device->raid_level = raid_level;
1371}
1372
1373static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1374 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1375{
1376 char *err_msg;
1377 u32 raid_map_size;
1378 u32 r5or6_blocks_per_row;
6c223761
KB
1379
1380 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1381
1382 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1383 err_msg = "RAID map too small";
1384 goto bad_raid_map;
1385 }
1386
6c223761
KB
1387 if (device->raid_level == SA_RAID_1) {
1388 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1389 err_msg = "invalid RAID-1 map";
1390 goto bad_raid_map;
1391 }
7a012c23 1392 } else if (device->raid_level == SA_RAID_TRIPLE) {
6c223761 1393 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
7a012c23 1394 err_msg = "invalid RAID-1(Triple) map";
6c223761
KB
1395 goto bad_raid_map;
1396 }
1397 } else if ((device->raid_level == SA_RAID_5 ||
1398 device->raid_level == SA_RAID_6) &&
1399 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1400 /* RAID 50/60 */
1401 r5or6_blocks_per_row =
1402 get_unaligned_le16(&raid_map->strip_size) *
1403 get_unaligned_le16(&raid_map->data_disks_per_row);
1404 if (r5or6_blocks_per_row == 0) {
1405 err_msg = "invalid RAID-5 or RAID-6 map";
1406 goto bad_raid_map;
1407 }
1408 }
1409
1410 return 0;
1411
1412bad_raid_map:
d87d5474 1413 dev_warn(&ctrl_info->pci_dev->dev,
38a7338a
KB
1414 "logical device %08x%08x %s\n",
1415 *((u32 *)&device->scsi3addr),
1416 *((u32 *)&device->scsi3addr[4]), err_msg);
6c223761
KB
1417
1418 return -EINVAL;
1419}
1420
1421static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1422 struct pqi_scsi_dev *device)
1423{
1424 int rc;
a91aaae0 1425 u32 raid_map_size;
6c223761
KB
1426 struct raid_map *raid_map;
1427
1428 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1429 if (!raid_map)
1430 return -ENOMEM;
1431
a91aaae0 1432 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
ae0c189d 1433 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
6c223761
KB
1434 if (rc)
1435 goto error;
1436
a91aaae0 1437 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
6c223761 1438
a91aaae0 1439 if (raid_map_size > sizeof(*raid_map)) {
6c223761 1440
a91aaae0
AK
1441 kfree(raid_map);
1442
1443 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1444 if (!raid_map)
1445 return -ENOMEM;
1446
1447 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
ae0c189d 1448 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
a91aaae0
AK
1449 if (rc)
1450 goto error;
1451
1452 if (get_unaligned_le32(&raid_map->structure_size)
1453 != raid_map_size) {
1454 dev_warn(&ctrl_info->pci_dev->dev,
583891c9 1455 "requested %u bytes, received %u bytes\n",
a91aaae0
AK
1456 raid_map_size,
1457 get_unaligned_le32(&raid_map->structure_size));
d1f6581a 1458 rc = -EINVAL;
a91aaae0
AK
1459 goto error;
1460 }
1461 }
6c223761
KB
1462
1463 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1464 if (rc)
1465 goto error;
1466
1467 device->raid_map = raid_map;
1468
1469 return 0;
1470
1471error:
1472 kfree(raid_map);
1473
1474 return rc;
1475}
1476
f6cc2a77
KB
1477static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1478 struct pqi_scsi_dev *device)
1479{
1480 if (!ctrl_info->lv_drive_type_mix_valid) {
1481 device->max_transfer_encrypted = ~0;
1482 return;
1483 }
1484
1485 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1486 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1487 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1488 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1489 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1490 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1491 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1492 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1493 device->max_transfer_encrypted =
1494 ctrl_info->max_transfer_encrypted_sas_sata;
1495 break;
1496 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1497 device->max_transfer_encrypted =
1498 ctrl_info->max_transfer_encrypted_nvme;
1499 break;
1500 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1501 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1502 default:
1503 device->max_transfer_encrypted =
1504 min(ctrl_info->max_transfer_encrypted_sas_sata,
1505 ctrl_info->max_transfer_encrypted_nvme);
1506 break;
1507 }
1508}
1509
588a63fe 1510static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1511 struct pqi_scsi_dev *device)
1512{
1513 int rc;
1514 u8 *buffer;
588a63fe 1515 u8 bypass_status;
6c223761
KB
1516
1517 buffer = kmalloc(64, GFP_KERNEL);
1518 if (!buffer)
1519 return;
1520
1521 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
588a63fe 1522 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
6c223761
KB
1523 if (rc)
1524 goto out;
1525
694c5d5b
KB
1526#define RAID_BYPASS_STATUS 4
1527#define RAID_BYPASS_CONFIGURED 0x1
1528#define RAID_BYPASS_ENABLED 0x2
6c223761 1529
588a63fe
KB
1530 bypass_status = buffer[RAID_BYPASS_STATUS];
1531 device->raid_bypass_configured =
1532 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1533 if (device->raid_bypass_configured &&
1534 (bypass_status & RAID_BYPASS_ENABLED) &&
f6cc2a77 1535 pqi_get_raid_map(ctrl_info, device) == 0) {
588a63fe 1536 device->raid_bypass_enabled = true;
f6cc2a77
KB
1537 if (get_unaligned_le16(&device->raid_map->flags) &
1538 RAID_MAP_ENCRYPTION_ENABLED)
1539 pqi_set_max_transfer_encrypted(ctrl_info, device);
1540 }
6c223761
KB
1541
1542out:
1543 kfree(buffer);
1544}
1545
1546/*
1547 * Use vendor-specific VPD to determine online/offline status of a volume.
1548 */
1549
1550static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1551 struct pqi_scsi_dev *device)
1552{
1553 int rc;
1554 size_t page_length;
1555 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1556 bool volume_offline = true;
1557 u32 volume_flags;
1558 struct ciss_vpd_logical_volume_status *vpd;
1559
1560 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1561 if (!vpd)
1562 goto no_buffer;
1563
1564 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1565 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1566 if (rc)
1567 goto out;
1568
7ff44499
DC
1569 if (vpd->page_code != CISS_VPD_LV_STATUS)
1570 goto out;
1571
6c223761
KB
1572 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1573 volume_status) + vpd->page_length;
1574 if (page_length < sizeof(*vpd))
1575 goto out;
1576
1577 volume_status = vpd->volume_status;
1578 volume_flags = get_unaligned_be32(&vpd->flags);
1579 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1580
1581out:
1582 kfree(vpd);
1583no_buffer:
1584 device->volume_status = volume_status;
1585 device->volume_offline = volume_offline;
1586}
1587
2a47834d 1588#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
ec504b23
MB
1589#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1590
ce143793
KB
1591static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1592 struct pqi_scsi_dev *device,
1593 struct bmic_identify_physical_device *id_phys)
1594{
1595 int rc;
26b390ab 1596
ce143793
KB
1597 memset(id_phys, 0, sizeof(*id_phys));
1598
1599 rc = pqi_identify_physical_device(ctrl_info, device,
1600 id_phys, sizeof(*id_phys));
1601 if (rc) {
1602 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1603 return rc;
1604 }
1605
1606 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1607 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1608
1609 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1610 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1611
1612 device->box_index = id_phys->box_index;
1613 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1614 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1615 device->queue_depth =
1616 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1617 device->active_path_index = id_phys->active_path_number;
1618 device->path_map = id_phys->redundant_path_present_map;
1619 memcpy(&device->box,
1620 &id_phys->alternate_paths_phys_box_on_port,
1621 sizeof(device->box));
1622 memcpy(&device->phys_connector,
1623 &id_phys->alternate_paths_phys_connector,
1624 sizeof(device->phys_connector));
1625 device->bay = id_phys->phys_bay_in_box;
904f2bfd
KM
1626 device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count;
1627 if (!device->multi_lun_device_lun_count)
1628 device->multi_lun_device_lun_count = 1;
ec504b23
MB
1629 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1630 id_phys->phy_count)
1631 device->phy_id =
1632 id_phys->phy_to_phy_map[device->active_path_index];
1633 else
1634 device->phy_id = 0xFF;
1635
2a47834d
GW
1636 device->ncq_prio_support =
1637 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1638 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1639
ce143793
KB
1640 return 0;
1641}
1642
1643static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1644 struct pqi_scsi_dev *device)
1645{
1646 int rc;
1647 u8 *buffer;
3d46a59a 1648
6c223761
KB
1649 buffer = kmalloc(64, GFP_KERNEL);
1650 if (!buffer)
1651 return -ENOMEM;
1652
1653 /* Send an inquiry to the device to see what it is. */
ce143793
KB
1654 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1655 if (rc)
1656 goto out;
6c223761
KB
1657
1658 scsi_sanitize_inquiry_string(&buffer[8], 8);
1659 scsi_sanitize_inquiry_string(&buffer[16], 16);
1660
1661 device->devtype = buffer[0] & 0x1f;
cbe0c7b1
KB
1662 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1663 memcpy(device->model, &buffer[16], sizeof(device->model));
6c223761 1664
ce143793 1665 if (device->devtype == TYPE_DISK) {
bd10cf0b
KB
1666 if (device->is_external_raid_device) {
1667 device->raid_level = SA_RAID_UNKNOWN;
1668 device->volume_status = CISS_LV_OK;
1669 device->volume_offline = false;
1670 } else {
1671 pqi_get_raid_level(ctrl_info, device);
588a63fe 1672 pqi_get_raid_bypass_status(ctrl_info, device);
bd10cf0b
KB
1673 pqi_get_volume_status(ctrl_info, device);
1674 }
6c223761
KB
1675 }
1676
1677out:
1678 kfree(buffer);
1679
1680 return rc;
1681}
1682
be76f906
DB
1683/*
1684 * Prevent adding drive to OS for some corner cases such as a drive
1685 * undergoing a sanitize operation. Some OSes will continue to poll
1686 * the drive until the sanitize completes, which can take hours,
1687 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1688 * are allowed, but READ/WRITE cause check condition. So the OS
1689 * cannot check/read the partition table.
1690 * Note: devices that have completed sanitize must be re-enabled
1691 * using the management utility.
1692 */
1693static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
1694 struct pqi_scsi_dev *device)
1695{
1696 u8 scsi_status;
1697 int rc;
1698 enum dma_data_direction dir;
1699 char *buffer;
1700 int buffer_length = 64;
1701 size_t sense_data_length;
1702 struct scsi_sense_hdr sshdr;
1703 struct pqi_raid_path_request request;
1704 struct pqi_raid_error_info error_info;
1705 bool offline = false; /* Assume keep online */
1706
1707 /* Do not check controllers. */
1708 if (pqi_is_hba_lunid(device->scsi3addr))
1709 return false;
1710
1711 /* Do not check LVs. */
1712 if (pqi_is_logical_device(device))
1713 return false;
1714
1715 buffer = kmalloc(buffer_length, GFP_KERNEL);
1716 if (!buffer)
1717 return false; /* Assume not offline */
1718
1719 /* Check for SANITIZE in progress using TUR */
1720 rc = pqi_build_raid_path_request(ctrl_info, &request,
1721 TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
1722 buffer_length, 0, &dir);
1723 if (rc)
1724 goto out; /* Assume not offline */
1725
1726 memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
1727
1728 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
1729
1730 if (rc)
1731 goto out; /* Assume not offline */
1732
1733 scsi_status = error_info.status;
1734 sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
1735 if (sense_data_length == 0)
1736 sense_data_length =
1737 get_unaligned_le16(&error_info.response_data_length);
1738 if (sense_data_length) {
1739 if (sense_data_length > sizeof(error_info.data))
1740 sense_data_length = sizeof(error_info.data);
1741
1742 /*
1743 * Check for sanitize in progress: asc:0x04, ascq: 0x1b
1744 */
1745 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
1746 scsi_normalize_sense(error_info.data,
1747 sense_data_length, &sshdr) &&
1748 sshdr.sense_key == NOT_READY &&
1749 sshdr.asc == 0x04 &&
1750 sshdr.ascq == 0x1b) {
1751 device->device_offline = true;
1752 offline = true;
1753 goto out; /* Keep device offline */
1754 }
1755 }
1756
1757out:
1758 kfree(buffer);
1759 return offline;
1760}
1761
ce143793 1762static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1763 struct pqi_scsi_dev *device,
1764 struct bmic_identify_physical_device *id_phys)
1765{
1766 int rc;
1767
ce143793
KB
1768 if (device->is_expander_smp_device)
1769 return 0;
6c223761 1770
ce143793
KB
1771 if (pqi_is_logical_device(device))
1772 rc = pqi_get_logical_device_info(ctrl_info, device);
1773 else
1774 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
694c5d5b 1775
ce143793 1776 return rc;
6c223761
KB
1777}
1778
1779static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1780 struct pqi_scsi_dev *device)
1781{
1782 char *status;
1783 static const char unknown_state_str[] =
1784 "Volume is in an unknown state (%u)";
1785 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1786
1787 switch (device->volume_status) {
1788 case CISS_LV_OK:
1789 status = "Volume online";
1790 break;
1791 case CISS_LV_FAILED:
1792 status = "Volume failed";
1793 break;
1794 case CISS_LV_NOT_CONFIGURED:
1795 status = "Volume not configured";
1796 break;
1797 case CISS_LV_DEGRADED:
1798 status = "Volume degraded";
1799 break;
1800 case CISS_LV_READY_FOR_RECOVERY:
1801 status = "Volume ready for recovery operation";
1802 break;
1803 case CISS_LV_UNDERGOING_RECOVERY:
1804 status = "Volume undergoing recovery";
1805 break;
1806 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1807 status = "Wrong physical drive was replaced";
1808 break;
1809 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1810 status = "A physical drive not properly connected";
1811 break;
1812 case CISS_LV_HARDWARE_OVERHEATING:
1813 status = "Hardware is overheating";
1814 break;
1815 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1816 status = "Hardware has overheated";
1817 break;
1818 case CISS_LV_UNDERGOING_EXPANSION:
1819 status = "Volume undergoing expansion";
1820 break;
1821 case CISS_LV_NOT_AVAILABLE:
1822 status = "Volume waiting for transforming volume";
1823 break;
1824 case CISS_LV_QUEUED_FOR_EXPANSION:
1825 status = "Volume queued for expansion";
1826 break;
1827 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1828 status = "Volume disabled due to SCSI ID conflict";
1829 break;
1830 case CISS_LV_EJECTED:
1831 status = "Volume has been ejected";
1832 break;
1833 case CISS_LV_UNDERGOING_ERASE:
1834 status = "Volume undergoing background erase";
1835 break;
1836 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1837 status = "Volume ready for predictive spare rebuild";
1838 break;
1839 case CISS_LV_UNDERGOING_RPI:
1840 status = "Volume undergoing rapid parity initialization";
1841 break;
1842 case CISS_LV_PENDING_RPI:
1843 status = "Volume queued for rapid parity initialization";
1844 break;
1845 case CISS_LV_ENCRYPTED_NO_KEY:
1846 status = "Encrypted volume inaccessible - key not present";
1847 break;
1848 case CISS_LV_UNDERGOING_ENCRYPTION:
1849 status = "Volume undergoing encryption process";
1850 break;
1851 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1852 status = "Volume undergoing encryption re-keying process";
1853 break;
1854 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
d87d5474 1855 status = "Volume encrypted but encryption is disabled";
6c223761
KB
1856 break;
1857 case CISS_LV_PENDING_ENCRYPTION:
1858 status = "Volume pending migration to encrypted state";
1859 break;
1860 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1861 status = "Volume pending encryption rekeying";
1862 break;
1863 case CISS_LV_NOT_SUPPORTED:
1864 status = "Volume not supported on this controller";
1865 break;
1866 case CISS_LV_STATUS_UNAVAILABLE:
1867 status = "Volume status not available";
1868 break;
1869 default:
1870 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1871 unknown_state_str, device->volume_status);
1872 status = unknown_state_buffer;
1873 break;
1874 }
1875
1876 dev_info(&ctrl_info->pci_dev->dev,
1877 "scsi %d:%d:%d:%d %s\n",
1878 ctrl_info->scsi_host->host_no,
1879 device->bus, device->target, device->lun, status);
1880}
1881
6c223761
KB
1882static void pqi_rescan_worker(struct work_struct *work)
1883{
1884 struct pqi_ctrl_info *ctrl_info;
1885
1886 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1887 rescan_work);
1888
1889 pqi_scan_scsi_devices(ctrl_info);
1890}
1891
1892static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1893 struct pqi_scsi_dev *device)
1894{
1895 int rc;
1896
1897 if (pqi_is_logical_device(device))
1898 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1899 device->target, device->lun);
1900 else
1901 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1902
1903 return rc;
1904}
1905
18ff5f08 1906#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1e46731e 1907
583891c9 1908static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
6c223761 1909{
1e46731e 1910 int rc;
904f2bfd 1911 int lun;
1e46731e 1912
904f2bfd
KM
1913 for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) {
1914 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1915 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1916 if (rc)
1917 dev_err(&ctrl_info->pci_dev->dev,
1918 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1919 ctrl_info->scsi_host->host_no, device->bus,
1920 device->target, lun,
1921 atomic_read(&device->scsi_cmds_outstanding[lun]));
1922 }
1e46731e 1923
6c223761
KB
1924 if (pqi_is_logical_device(device))
1925 scsi_remove_device(device->sdev);
1926 else
1927 pqi_remove_sas_device(device);
819225b0
DB
1928
1929 pqi_device_remove_start(device);
6c223761
KB
1930}
1931
1932/* Assumes the SCSI device list lock is held. */
1933
1934static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1935 int bus, int target, int lun)
1936{
1937 struct pqi_scsi_dev *device;
1938
4d15ad38
KB
1939 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1940 if (device->bus == bus && device->target == target && device->lun == lun)
6c223761
KB
1941 return device;
1942
1943 return NULL;
1944}
1945
583891c9 1946static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
6c223761
KB
1947{
1948 if (dev1->is_physical_device != dev2->is_physical_device)
1949 return false;
1950
1951 if (dev1->is_physical_device)
28ca6d87 1952 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
6c223761 1953
583891c9 1954 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
6c223761
KB
1955}
1956
1957enum pqi_find_result {
1958 DEVICE_NOT_FOUND,
1959 DEVICE_CHANGED,
1960 DEVICE_SAME,
1961};
1962
1963static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
4d15ad38 1964 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
6c223761
KB
1965{
1966 struct pqi_scsi_dev *device;
1967
4d15ad38
KB
1968 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1969 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
6c223761
KB
1970 *matching_device = device;
1971 if (pqi_device_equal(device_to_find, device)) {
1972 if (device_to_find->volume_offline)
1973 return DEVICE_CHANGED;
1974 return DEVICE_SAME;
1975 }
1976 return DEVICE_CHANGED;
1977 }
1978 }
1979
1980 return DEVICE_NOT_FOUND;
1981}
1982
3d46a59a
DB
1983static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1984{
1985 if (device->is_expander_smp_device)
1986 return "Enclosure SMP ";
1987
1988 return scsi_device_type(device->devtype);
1989}
1990
6de783f6
KB
1991#define PQI_DEV_INFO_BUFFER_LENGTH 128
1992
6c223761
KB
1993static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1994 char *action, struct pqi_scsi_dev *device)
1995{
6de783f6
KB
1996 ssize_t count;
1997 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1998
a4256252 1999 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
6de783f6
KB
2000 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
2001
2002 if (device->target_lun_valid)
181aea89 2003 count += scnprintf(buffer + count,
6de783f6
KB
2004 PQI_DEV_INFO_BUFFER_LENGTH - count,
2005 "%d:%d",
2006 device->target,
2007 device->lun);
2008 else
181aea89 2009 count += scnprintf(buffer + count,
6de783f6
KB
2010 PQI_DEV_INFO_BUFFER_LENGTH - count,
2011 "-:-");
2012
2013 if (pqi_is_logical_device(device))
181aea89 2014 count += scnprintf(buffer + count,
6de783f6
KB
2015 PQI_DEV_INFO_BUFFER_LENGTH - count,
2016 " %08x%08x",
2017 *((u32 *)&device->scsi3addr),
2018 *((u32 *)&device->scsi3addr[4]));
2019 else
181aea89 2020 count += scnprintf(buffer + count,
6de783f6 2021 PQI_DEV_INFO_BUFFER_LENGTH - count,
28ca6d87
MM
2022 " %016llx%016llx",
2023 get_unaligned_be64(&device->wwid[0]),
2024 get_unaligned_be64(&device->wwid[8]));
6de783f6 2025
181aea89 2026 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
6de783f6 2027 " %s %.8s %.16s ",
3d46a59a 2028 pqi_device_type(device),
6c223761 2029 device->vendor,
6de783f6
KB
2030 device->model);
2031
2032 if (pqi_is_logical_device(device)) {
2033 if (device->devtype == TYPE_DISK)
181aea89 2034 count += scnprintf(buffer + count,
6de783f6
KB
2035 PQI_DEV_INFO_BUFFER_LENGTH - count,
2036 "SSDSmartPathCap%c En%c %-12s",
588a63fe
KB
2037 device->raid_bypass_configured ? '+' : '-',
2038 device->raid_bypass_enabled ? '+' : '-',
6de783f6
KB
2039 pqi_raid_level_to_string(device->raid_level));
2040 } else {
181aea89 2041 count += scnprintf(buffer + count,
6de783f6
KB
2042 PQI_DEV_INFO_BUFFER_LENGTH - count,
2043 "AIO%c", device->aio_enabled ? '+' : '-');
2044 if (device->devtype == TYPE_DISK ||
2045 device->devtype == TYPE_ZBC)
181aea89 2046 count += scnprintf(buffer + count,
6de783f6
KB
2047 PQI_DEV_INFO_BUFFER_LENGTH - count,
2048 " qd=%-6d", device->queue_depth);
2049 }
2050
2051 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
6c223761
KB
2052}
2053
6ce3cfb3
KB
2054static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2055{
2056 u32 raid_map1_size;
2057 u32 raid_map2_size;
2058
2059 if (raid_map1 == NULL || raid_map2 == NULL)
2060 return raid_map1 == raid_map2;
2061
2062 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2063 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2064
2065 if (raid_map1_size != raid_map2_size)
2066 return false;
2067
2068 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2069}
2070
6c223761
KB
2071/* Assumes the SCSI device list lock is held. */
2072
27655e9d
MR
2073static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2074 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
6c223761 2075{
6c223761
KB
2076 existing_device->device_type = new_device->device_type;
2077 existing_device->bus = new_device->bus;
2078 if (new_device->target_lun_valid) {
2079 existing_device->target = new_device->target;
2080 existing_device->lun = new_device->lun;
2081 existing_device->target_lun_valid = true;
2082 }
2083
2084 /* By definition, the scsi3addr and wwid fields are already the same. */
2085
2086 existing_device->is_physical_device = new_device->is_physical_device;
6ce3cfb3
KB
2087 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2088 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
6c223761 2089 existing_device->sas_address = new_device->sas_address;
6c223761 2090 existing_device->queue_depth = new_device->queue_depth;
a9a68101 2091 existing_device->device_offline = false;
6c223761 2092
6ce3cfb3
KB
2093 if (pqi_is_logical_device(existing_device)) {
2094 existing_device->is_external_raid_device = new_device->is_external_raid_device;
2095
2096 if (existing_device->devtype == TYPE_DISK) {
2097 existing_device->raid_level = new_device->raid_level;
2098 existing_device->volume_status = new_device->volume_status;
2099 if (ctrl_info->logical_volume_rescan_needed)
2100 existing_device->rescan = true;
2101 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2102 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2103 kfree(existing_device->raid_map);
2104 existing_device->raid_map = new_device->raid_map;
2105 /* To prevent this from being freed later. */
2106 new_device->raid_map = NULL;
2107 }
2108 existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2109 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2110 }
2111 } else {
2112 existing_device->aio_enabled = new_device->aio_enabled;
2113 existing_device->aio_handle = new_device->aio_handle;
2114 existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2115 existing_device->active_path_index = new_device->active_path_index;
2116 existing_device->phy_id = new_device->phy_id;
2117 existing_device->path_map = new_device->path_map;
2118 existing_device->bay = new_device->bay;
2119 existing_device->box_index = new_device->box_index;
2120 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2121 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2122 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2123 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2124
2125 existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count;
2126 if (existing_device->multi_lun_device_lun_count == 0)
2127 existing_device->multi_lun_device_lun_count = 1;
2128 }
6c223761
KB
2129}
2130
2131static inline void pqi_free_device(struct pqi_scsi_dev *device)
2132{
2133 if (device) {
2134 kfree(device->raid_map);
2135 kfree(device);
2136 }
2137}
2138
2139/*
2140 * Called when exposing a new device to the OS fails in order to re-adjust
2141 * our internal SCSI device list to match the SCSI ML's view.
2142 */
2143
2144static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2145 struct pqi_scsi_dev *device)
2146{
2147 unsigned long flags;
2148
2149 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2150 list_del(&device->scsi_device_list_entry);
2151 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2152
2153 /* Allow the device structure to be freed later. */
2154 device->keep_device = false;
2155}
2156
3d46a59a
DB
2157static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2158{
2159 if (device->is_expander_smp_device)
2160 return device->sas_port != NULL;
2161
2162 return device->sdev != NULL;
2163}
2164
6c223761
KB
2165static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2166 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2167{
2168 int rc;
2169 unsigned int i;
2170 unsigned long flags;
2171 enum pqi_find_result find_result;
2172 struct pqi_scsi_dev *device;
2173 struct pqi_scsi_dev *next;
2174 struct pqi_scsi_dev *matching_device;
8a994a04
KB
2175 LIST_HEAD(add_list);
2176 LIST_HEAD(delete_list);
6c223761
KB
2177
2178 /*
2179 * The idea here is to do as little work as possible while holding the
2180 * spinlock. That's why we go to great pains to defer anything other
2181 * than updating the internal device list until after we release the
2182 * spinlock.
2183 */
2184
2185 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2186
2187 /* Assume that all devices in the existing list have gone away. */
4d15ad38 2188 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
6c223761
KB
2189 device->device_gone = true;
2190
2191 for (i = 0; i < num_new_devices; i++) {
2192 device = new_device_list[i];
2193
2194 find_result = pqi_scsi_find_entry(ctrl_info, device,
694c5d5b 2195 &matching_device);
6c223761
KB
2196
2197 switch (find_result) {
2198 case DEVICE_SAME:
2199 /*
2200 * The newly found device is already in the existing
2201 * device list.
2202 */
2203 device->new_device = false;
2204 matching_device->device_gone = false;
27655e9d 2205 pqi_scsi_update_device(ctrl_info, matching_device, device);
6c223761
KB
2206 break;
2207 case DEVICE_NOT_FOUND:
2208 /*
2209 * The newly found device is NOT in the existing device
2210 * list.
2211 */
2212 device->new_device = true;
2213 break;
2214 case DEVICE_CHANGED:
2215 /*
2216 * The original device has gone away and we need to add
2217 * the new device.
2218 */
2219 device->new_device = true;
2220 break;
6c223761
KB
2221 }
2222 }
2223
2224 /* Process all devices that have gone away. */
2225 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2226 scsi_device_list_entry) {
2227 if (device->device_gone) {
819225b0 2228 list_del(&device->scsi_device_list_entry);
6c223761
KB
2229 list_add_tail(&device->delete_list_entry, &delete_list);
2230 }
2231 }
2232
2233 /* Process all new devices. */
2234 for (i = 0; i < num_new_devices; i++) {
2235 device = new_device_list[i];
2236 if (!device->new_device)
2237 continue;
2238 if (device->volume_offline)
2239 continue;
2240 list_add_tail(&device->scsi_device_list_entry,
2241 &ctrl_info->scsi_device_list);
2242 list_add_tail(&device->add_list_entry, &add_list);
2243 /* To prevent this device structure from being freed later. */
2244 device->keep_device = true;
2245 }
2246
6c223761
KB
2247 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2248
2790cd4d
KB
2249 /*
2250 * If OFA is in progress and there are devices that need to be deleted,
2251 * allow any pending reset operations to continue and unblock any SCSI
2252 * requests before removal.
2253 */
2254 if (pqi_ofa_in_progress(ctrl_info)) {
2255 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2256 if (pqi_is_device_added(device))
2257 pqi_device_remove_start(device);
2258 pqi_ctrl_unblock_device_reset(ctrl_info);
2259 pqi_scsi_unblock_requests(ctrl_info);
2260 }
4fd22c13 2261
6c223761 2262 /* Remove all devices that have gone away. */
4d15ad38 2263 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
6c223761
KB
2264 if (device->volume_offline) {
2265 pqi_dev_info(ctrl_info, "offline", device);
2266 pqi_show_volume_status(ctrl_info, device);
4d15ad38 2267 } else {
819225b0 2268 pqi_dev_info(ctrl_info, "removed", device);
4d15ad38 2269 }
819225b0
DB
2270 if (pqi_is_device_added(device))
2271 pqi_remove_device(ctrl_info, device);
2272 list_del(&device->delete_list_entry);
2273 pqi_free_device(device);
6c223761
KB
2274 }
2275
2276 /*
27655e9d
MR
2277 * Notify the SML of any existing device changes such as;
2278 * queue depth, device size.
6c223761 2279 */
583891c9
KB
2280 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2281 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2282 device->advertised_queue_depth = device->queue_depth;
2283 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
244ca45e
MR
2284 if (device->rescan) {
2285 scsi_rescan_device(&device->sdev->sdev_gendev);
2286 device->rescan = false;
2287 }
6c223761
KB
2288 }
2289 }
2290
2291 /* Expose any new devices. */
2292 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
3d46a59a 2293 if (!pqi_is_device_added(device)) {
6c223761 2294 rc = pqi_add_device(ctrl_info, device);
ce143793
KB
2295 if (rc == 0) {
2296 pqi_dev_info(ctrl_info, "added", device);
2297 } else {
6c223761
KB
2298 dev_warn(&ctrl_info->pci_dev->dev,
2299 "scsi %d:%d:%d:%d addition failed, device not added\n",
2300 ctrl_info->scsi_host->host_no,
2301 device->bus, device->target,
2302 device->lun);
2303 pqi_fixup_botched_add(ctrl_info, device);
6c223761
KB
2304 }
2305 }
6c223761 2306 }
27655e9d
MR
2307
2308 ctrl_info->logical_volume_rescan_needed = false;
2309
6c223761
KB
2310}
2311
ce143793 2312static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
6c223761 2313{
ce143793
KB
2314 /*
2315 * Only support the HBA controller itself as a RAID
2316 * controller. If it's a RAID controller other than
2317 * the HBA itself (an external RAID controller, for
2318 * example), we don't support it.
2319 */
2320 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2321 !pqi_is_hba_lunid(device->scsi3addr))
583891c9 2322 return false;
6c223761 2323
ce143793 2324 return true;
6c223761
KB
2325}
2326
94086f5b 2327static inline bool pqi_skip_device(u8 *scsi3addr)
6c223761 2328{
94086f5b
KB
2329 /* Ignore all masked devices. */
2330 if (MASKED_DEVICE(scsi3addr))
6c223761 2331 return true;
6c223761
KB
2332
2333 return false;
2334}
2335
522bc026
DC
2336static inline void pqi_mask_device(u8 *scsi3addr)
2337{
2338 scsi3addr[3] |= 0xc0;
2339}
2340
94a68c81
MB
2341static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
2342{
2343 if (pqi_is_logical_device(device))
2344 return false;
2345
2346 return (device->path_map & (device->path_map - 1)) != 0;
2347}
2348
cd128244
DC
2349static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2350{
583891c9 2351 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
cd128244
DC
2352}
2353
6c223761
KB
2354static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2355{
2356 int i;
2357 int rc;
8a994a04 2358 LIST_HEAD(new_device_list_head);
28ca6d87
MM
2359 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2360 struct report_log_lun_list *logdev_list = NULL;
2361 struct report_phys_lun_16byte_wwid *phys_lun;
2362 struct report_log_lun *log_lun;
6c223761
KB
2363 struct bmic_identify_physical_device *id_phys = NULL;
2364 u32 num_physicals;
2365 u32 num_logicals;
2366 struct pqi_scsi_dev **new_device_list = NULL;
2367 struct pqi_scsi_dev *device;
2368 struct pqi_scsi_dev *next;
2369 unsigned int num_new_devices;
2370 unsigned int num_valid_devices;
2371 bool is_physical_device;
2372 u8 *scsi3addr;
5e6a9760
GW
2373 unsigned int physical_index;
2374 unsigned int logical_index;
6c223761 2375 static char *out_of_memory_msg =
6de783f6 2376 "failed to allocate memory, device discovery stopped";
6c223761 2377
6c223761
KB
2378 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2379 if (rc)
2380 goto out;
2381
2382 if (physdev_list)
2383 num_physicals =
2384 get_unaligned_be32(&physdev_list->header.list_length)
2385 / sizeof(physdev_list->lun_entries[0]);
2386 else
2387 num_physicals = 0;
2388
2389 if (logdev_list)
2390 num_logicals =
2391 get_unaligned_be32(&logdev_list->header.list_length)
2392 / sizeof(logdev_list->lun_entries[0]);
2393 else
2394 num_logicals = 0;
2395
2396 if (num_physicals) {
2397 /*
2398 * We need this buffer for calls to pqi_get_physical_disk_info()
2399 * below. We allocate it here instead of inside
2400 * pqi_get_physical_disk_info() because it's a fairly large
2401 * buffer.
2402 */
2403 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2404 if (!id_phys) {
2405 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2406 out_of_memory_msg);
2407 rc = -ENOMEM;
2408 goto out;
2409 }
522bc026 2410
694c5d5b 2411 if (pqi_hide_vsep) {
522bc026 2412 for (i = num_physicals - 1; i >= 0; i--) {
28ca6d87
MM
2413 phys_lun = &physdev_list->lun_entries[i];
2414 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2415 pqi_mask_device(phys_lun->lunid);
522bc026
DC
2416 break;
2417 }
2418 }
2419 }
6c223761
KB
2420 }
2421
f6cc2a77
KB
2422 if (num_logicals &&
2423 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2424 ctrl_info->lv_drive_type_mix_valid = true;
2425
6c223761
KB
2426 num_new_devices = num_physicals + num_logicals;
2427
6da2ec56
KC
2428 new_device_list = kmalloc_array(num_new_devices,
2429 sizeof(*new_device_list),
2430 GFP_KERNEL);
6c223761
KB
2431 if (!new_device_list) {
2432 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2433 rc = -ENOMEM;
2434 goto out;
2435 }
2436
2437 for (i = 0; i < num_new_devices; i++) {
2438 device = kzalloc(sizeof(*device), GFP_KERNEL);
2439 if (!device) {
2440 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2441 out_of_memory_msg);
2442 rc = -ENOMEM;
2443 goto out;
2444 }
2445 list_add_tail(&device->new_device_list_entry,
2446 &new_device_list_head);
2447 }
2448
2449 device = NULL;
2450 num_valid_devices = 0;
5e6a9760
GW
2451 physical_index = 0;
2452 logical_index = 0;
6c223761
KB
2453
2454 for (i = 0; i < num_new_devices; i++) {
2455
5e6a9760
GW
2456 if ((!pqi_expose_ld_first && i < num_physicals) ||
2457 (pqi_expose_ld_first && i >= num_logicals)) {
6c223761 2458 is_physical_device = true;
28ca6d87
MM
2459 phys_lun = &physdev_list->lun_entries[physical_index++];
2460 log_lun = NULL;
2461 scsi3addr = phys_lun->lunid;
6c223761
KB
2462 } else {
2463 is_physical_device = false;
28ca6d87
MM
2464 phys_lun = NULL;
2465 log_lun = &logdev_list->lun_entries[logical_index++];
2466 scsi3addr = log_lun->lunid;
6c223761
KB
2467 }
2468
94086f5b 2469 if (is_physical_device && pqi_skip_device(scsi3addr))
6c223761
KB
2470 continue;
2471
2472 if (device)
2473 device = list_next_entry(device, new_device_list_entry);
2474 else
2475 device = list_first_entry(&new_device_list_head,
2476 struct pqi_scsi_dev, new_device_list_entry);
2477
2478 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2479 device->is_physical_device = is_physical_device;
3d46a59a 2480 if (is_physical_device) {
28ca6d87 2481 device->device_type = phys_lun->device_type;
ce143793 2482 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
3d46a59a
DB
2483 device->is_expander_smp_device = true;
2484 } else {
bd10cf0b
KB
2485 device->is_external_raid_device =
2486 pqi_is_external_raid_addr(scsi3addr);
3d46a59a 2487 }
6c223761 2488
ce143793
KB
2489 if (!pqi_is_supported_device(device))
2490 continue;
2491
be76f906
DB
2492 /* Do not present disks that the OS cannot fully probe */
2493 if (pqi_keep_device_offline(ctrl_info, device))
2494 continue;
2495
6c223761 2496 /* Gather information about the device. */
ce143793 2497 rc = pqi_get_device_info(ctrl_info, device, id_phys);
6c223761
KB
2498 if (rc == -ENOMEM) {
2499 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2500 out_of_memory_msg);
2501 goto out;
2502 }
2503 if (rc) {
6de783f6
KB
2504 if (device->is_physical_device)
2505 dev_warn(&ctrl_info->pci_dev->dev,
28ca6d87
MM
2506 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2507 get_unaligned_be64(&phys_lun->wwid[0]),
2508 get_unaligned_be64(&phys_lun->wwid[8]));
6de783f6
KB
2509 else
2510 dev_warn(&ctrl_info->pci_dev->dev,
2511 "obtaining device info failed, skipping logical device %08x%08x\n",
2512 *((u32 *)&device->scsi3addr),
2513 *((u32 *)&device->scsi3addr[4]));
6c223761
KB
2514 rc = 0;
2515 continue;
2516 }
2517
6c223761
KB
2518 pqi_assign_bus_target_lun(device);
2519
6c223761 2520 if (device->is_physical_device) {
00598b05 2521 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
28ca6d87 2522 if ((phys_lun->device_flags &
694c5d5b 2523 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
28ca6d87 2524 phys_lun->aio_handle) {
583891c9
KB
2525 device->aio_enabled = true;
2526 device->aio_handle =
28ca6d87 2527 phys_lun->aio_handle;
3d46a59a 2528 }
6c223761 2529 } else {
28ca6d87 2530 memcpy(device->volume_id, log_lun->volume_id,
6c223761
KB
2531 sizeof(device->volume_id));
2532 }
2533
291c2e00 2534 device->sas_address = get_unaligned_be64(&device->wwid[0]);
6c223761
KB
2535
2536 new_device_list[num_valid_devices++] = device;
2537 }
2538
2539 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2540
2541out:
2542 list_for_each_entry_safe(device, next, &new_device_list_head,
2543 new_device_list_entry) {
2544 if (device->keep_device)
2545 continue;
2546 list_del(&device->new_device_list_entry);
2547 pqi_free_device(device);
2548 }
2549
2550 kfree(new_device_list);
2551 kfree(physdev_list);
2552 kfree(logdev_list);
2553 kfree(id_phys);
2554
2555 return rc;
2556}
2557
6c223761
KB
2558static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2559{
66f1c2b4
KB
2560 int rc;
2561 int mutex_acquired;
6c223761
KB
2562
2563 if (pqi_ctrl_offline(ctrl_info))
2564 return -ENXIO;
2565
66f1c2b4
KB
2566 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2567
2568 if (!mutex_acquired) {
2569 if (pqi_ctrl_scan_blocked(ctrl_info))
2570 return -EBUSY;
5f310425 2571 pqi_schedule_rescan_worker_delayed(ctrl_info);
66f1c2b4 2572 return -EINPROGRESS;
530dd8a7 2573 }
6c223761 2574
66f1c2b4
KB
2575 rc = pqi_update_scsi_devices(ctrl_info);
2576 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2577 pqi_schedule_rescan_worker_delayed(ctrl_info);
2578
2579 mutex_unlock(&ctrl_info->scan_mutex);
2580
6c223761
KB
2581 return rc;
2582}
2583
2584static void pqi_scan_start(struct Scsi_Host *shost)
2585{
4fd22c13
MR
2586 struct pqi_ctrl_info *ctrl_info;
2587
2588 ctrl_info = shost_to_hba(shost);
4fd22c13
MR
2589
2590 pqi_scan_scsi_devices(ctrl_info);
6c223761
KB
2591}
2592
2593/* Returns TRUE if scan is finished. */
2594
2595static int pqi_scan_finished(struct Scsi_Host *shost,
2596 unsigned long elapsed_time)
2597{
2598 struct pqi_ctrl_info *ctrl_info;
2599
2600 ctrl_info = shost_priv(shost);
2601
2602 return !mutex_is_locked(&ctrl_info->scan_mutex);
2603}
2604
583891c9
KB
2605static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2606 struct raid_map *raid_map, u64 first_block)
6c223761
KB
2607{
2608 u32 volume_blk_size;
2609
2610 /*
2611 * Set the encryption tweak values based on logical block address.
2612 * If the block size is 512, the tweak value is equal to the LBA.
2613 * For other block sizes, tweak value is (LBA * block size) / 512.
2614 */
2615 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2616 if (volume_blk_size != 512)
2617 first_block = (first_block * volume_blk_size) / 512;
2618
2619 encryption_info->data_encryption_key_index =
2620 get_unaligned_le16(&raid_map->data_encryption_key_index);
2621 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2622 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2623}
2624
2625/*
588a63fe 2626 * Attempt to perform RAID bypass mapping for a logical volume I/O.
6c223761
KB
2627 */
2628
6702d2c4
DB
2629static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2630 struct pqi_scsi_dev_raid_map_data *rmd)
281a817f
DB
2631{
2632 bool is_supported = true;
2633
2634 switch (rmd->raid_level) {
2635 case SA_RAID_0:
2636 break;
2637 case SA_RAID_1:
f6cc2a77
KB
2638 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2639 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2640 is_supported = false;
2641 break;
7a012c23 2642 case SA_RAID_TRIPLE:
f6cc2a77
KB
2643 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2644 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
281a817f
DB
2645 is_supported = false;
2646 break;
2647 case SA_RAID_5:
f6cc2a77
KB
2648 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2649 rmd->data_length > ctrl_info->max_write_raid_5_6))
6702d2c4
DB
2650 is_supported = false;
2651 break;
281a817f 2652 case SA_RAID_6:
f6cc2a77
KB
2653 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2654 rmd->data_length > ctrl_info->max_write_raid_5_6))
281a817f
DB
2655 is_supported = false;
2656 break;
281a817f
DB
2657 default:
2658 is_supported = false;
f6cc2a77 2659 break;
281a817f
DB
2660 }
2661
2662 return is_supported;
2663}
2664
6c223761
KB
2665#define PQI_RAID_BYPASS_INELIGIBLE 1
2666
281a817f 2667static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
583891c9 2668 struct pqi_scsi_dev_raid_map_data *rmd)
6c223761 2669{
6c223761
KB
2670 /* Check for valid opcode, get LBA and block count. */
2671 switch (scmd->cmnd[0]) {
2672 case WRITE_6:
281a817f 2673 rmd->is_write = true;
df561f66 2674 fallthrough;
6c223761 2675 case READ_6:
281a817f 2676 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
e018ef57 2677 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
281a817f
DB
2678 rmd->block_cnt = (u32)scmd->cmnd[4];
2679 if (rmd->block_cnt == 0)
2680 rmd->block_cnt = 256;
6c223761
KB
2681 break;
2682 case WRITE_10:
281a817f 2683 rmd->is_write = true;
df561f66 2684 fallthrough;
6c223761 2685 case READ_10:
281a817f
DB
2686 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2687 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
6c223761
KB
2688 break;
2689 case WRITE_12:
281a817f 2690 rmd->is_write = true;
df561f66 2691 fallthrough;
6c223761 2692 case READ_12:
281a817f
DB
2693 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2694 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
6c223761
KB
2695 break;
2696 case WRITE_16:
281a817f 2697 rmd->is_write = true;
df561f66 2698 fallthrough;
6c223761 2699 case READ_16:
281a817f
DB
2700 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2701 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
6c223761
KB
2702 break;
2703 default:
2704 /* Process via normal I/O path. */
2705 return PQI_RAID_BYPASS_INELIGIBLE;
2706 }
2707
281a817f 2708 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
6c223761 2709
281a817f
DB
2710 return 0;
2711}
6c223761 2712
281a817f 2713static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
583891c9 2714 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
281a817f
DB
2715{
2716#if BITS_PER_LONG == 32
2717 u64 tmpdiv;
2718#endif
2719
2720 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
6c223761
KB
2721
2722 /* Check for invalid block or wraparound. */
281a817f
DB
2723 if (rmd->last_block >=
2724 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2725 rmd->last_block < rmd->first_block)
6c223761
KB
2726 return PQI_RAID_BYPASS_INELIGIBLE;
2727
281a817f 2728 rmd->data_disks_per_row =
583891c9 2729 get_unaligned_le16(&raid_map->data_disks_per_row);
281a817f
DB
2730 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2731 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
6c223761
KB
2732
2733 /* Calculate stripe information for the request. */
281a817f 2734 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
667298ce
DB
2735 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2736 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2737#if BITS_PER_LONG == 32
281a817f
DB
2738 tmpdiv = rmd->first_block;
2739 do_div(tmpdiv, rmd->blocks_per_row);
2740 rmd->first_row = tmpdiv;
2741 tmpdiv = rmd->last_block;
2742 do_div(tmpdiv, rmd->blocks_per_row);
2743 rmd->last_row = tmpdiv;
2744 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2745 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2746 tmpdiv = rmd->first_row_offset;
2747 do_div(tmpdiv, rmd->strip_size);
2748 rmd->first_column = tmpdiv;
2749 tmpdiv = rmd->last_row_offset;
2750 do_div(tmpdiv, rmd->strip_size);
2751 rmd->last_column = tmpdiv;
6c223761 2752#else
281a817f
DB
2753 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2754 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2755 rmd->first_row_offset = (u32)(rmd->first_block -
583891c9 2756 (rmd->first_row * rmd->blocks_per_row));
281a817f 2757 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
583891c9 2758 rmd->blocks_per_row));
281a817f
DB
2759 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2760 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
6c223761
KB
2761#endif
2762
2763 /* If this isn't a single row/column then give to the controller. */
281a817f 2764 if (rmd->first_row != rmd->last_row ||
583891c9 2765 rmd->first_column != rmd->last_column)
6c223761
KB
2766 return PQI_RAID_BYPASS_INELIGIBLE;
2767
2768 /* Proceeding with driver mapping. */
281a817f 2769 rmd->total_disks_per_row = rmd->data_disks_per_row +
6c223761 2770 get_unaligned_le16(&raid_map->metadata_disks_per_row);
281a817f
DB
2771 rmd->map_row = ((u32)(rmd->first_row >>
2772 raid_map->parity_rotation_shift)) %
6c223761 2773 get_unaligned_le16(&raid_map->row_cnt);
281a817f 2774 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
583891c9 2775 rmd->first_column;
6c223761 2776
281a817f
DB
2777 return 0;
2778}
6c223761 2779
281a817f 2780static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
583891c9 2781 struct raid_map *raid_map)
281a817f 2782{
6c223761 2783#if BITS_PER_LONG == 32
281a817f 2784 u64 tmpdiv;
6c223761 2785#endif
6c223761 2786
667298ce
DB
2787 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2788 return PQI_RAID_BYPASS_INELIGIBLE;
2789
281a817f 2790 /* RAID 50/60 */
583891c9 2791 /* Verify first and last block are in same RAID group. */
281a817f 2792 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
6c223761 2793#if BITS_PER_LONG == 32
281a817f
DB
2794 tmpdiv = rmd->first_block;
2795 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2796 tmpdiv = rmd->first_group;
2797 do_div(tmpdiv, rmd->blocks_per_row);
2798 rmd->first_group = tmpdiv;
2799 tmpdiv = rmd->last_block;
2800 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2801 tmpdiv = rmd->last_group;
2802 do_div(tmpdiv, rmd->blocks_per_row);
2803 rmd->last_group = tmpdiv;
6c223761 2804#else
281a817f
DB
2805 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2806 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
6c223761 2807#endif
281a817f
DB
2808 if (rmd->first_group != rmd->last_group)
2809 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2810
583891c9 2811 /* Verify request is in a single row of RAID 5/6. */
6c223761 2812#if BITS_PER_LONG == 32
281a817f
DB
2813 tmpdiv = rmd->first_block;
2814 do_div(tmpdiv, rmd->stripesize);
2815 rmd->first_row = tmpdiv;
2816 rmd->r5or6_first_row = tmpdiv;
2817 tmpdiv = rmd->last_block;
2818 do_div(tmpdiv, rmd->stripesize);
2819 rmd->r5or6_last_row = tmpdiv;
6c223761 2820#else
281a817f
DB
2821 rmd->first_row = rmd->r5or6_first_row =
2822 rmd->first_block / rmd->stripesize;
2823 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
6c223761 2824#endif
281a817f
DB
2825 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2826 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2827
583891c9 2828 /* Verify request is in a single column. */
6c223761 2829#if BITS_PER_LONG == 32
281a817f
DB
2830 tmpdiv = rmd->first_block;
2831 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2832 tmpdiv = rmd->first_row_offset;
2833 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2834 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2835 tmpdiv = rmd->last_block;
2836 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2837 tmpdiv = rmd->r5or6_last_row_offset;
2838 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2839 tmpdiv = rmd->r5or6_first_row_offset;
2840 do_div(tmpdiv, rmd->strip_size);
2841 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2842 tmpdiv = rmd->r5or6_last_row_offset;
2843 do_div(tmpdiv, rmd->strip_size);
2844 rmd->r5or6_last_column = tmpdiv;
6c223761 2845#else
281a817f 2846 rmd->first_row_offset = rmd->r5or6_first_row_offset =
583891c9
KB
2847 (u32)((rmd->first_block % rmd->stripesize) %
2848 rmd->blocks_per_row);
281a817f
DB
2849
2850 rmd->r5or6_last_row_offset =
2851 (u32)((rmd->last_block % rmd->stripesize) %
2852 rmd->blocks_per_row);
2853
2854 rmd->first_column =
583891c9 2855 rmd->r5or6_first_row_offset / rmd->strip_size;
281a817f
DB
2856 rmd->r5or6_first_column = rmd->first_column;
2857 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2858#endif
2859 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2860 return PQI_RAID_BYPASS_INELIGIBLE;
2861
583891c9 2862 /* Request is eligible. */
281a817f
DB
2863 rmd->map_row =
2864 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2865 get_unaligned_le16(&raid_map->row_cnt);
6c223761 2866
281a817f
DB
2867 rmd->map_index = (rmd->first_group *
2868 (get_unaligned_le16(&raid_map->row_cnt) *
2869 rmd->total_disks_per_row)) +
2870 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
6c223761 2871
6702d2c4
DB
2872 if (rmd->is_write) {
2873 u32 index;
6c223761 2874
6702d2c4
DB
2875 /*
2876 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2877 * parity entries inside the device's raid_map.
2878 *
2879 * A device's RAID map is bounded by: number of RAID disks squared.
2880 *
2881 * The devices RAID map size is checked during device
2882 * initialization.
2883 */
2884 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2885 index *= rmd->total_disks_per_row;
2886 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2887
2888 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2889 if (rmd->raid_level == SA_RAID_6) {
2890 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2891 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2892 }
6702d2c4
DB
2893#if BITS_PER_LONG == 32
2894 tmpdiv = rmd->first_block;
2895 do_div(tmpdiv, rmd->blocks_per_row);
2896 rmd->row = tmpdiv;
2897#else
2898 rmd->row = rmd->first_block / rmd->blocks_per_row;
6c223761 2899#endif
6702d2c4
DB
2900 }
2901
281a817f
DB
2902 return 0;
2903}
2904
2905static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2906{
2907 /* Build the new CDB for the physical disk I/O. */
2908 if (rmd->disk_block > 0xffffffff) {
2909 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2910 rmd->cdb[1] = 0;
2911 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2912 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2913 rmd->cdb[14] = 0;
2914 rmd->cdb[15] = 0;
2915 rmd->cdb_length = 16;
2916 } else {
2917 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2918 rmd->cdb[1] = 0;
2919 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2920 rmd->cdb[6] = 0;
2921 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2922 rmd->cdb[9] = 0;
2923 rmd->cdb_length = 10;
2924 }
2925}
2926
7a012c23 2927static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
583891c9 2928 struct pqi_scsi_dev_raid_map_data *rmd)
7a012c23
DB
2929{
2930 u32 index;
2931 u32 group;
2932
2933 group = rmd->map_index / rmd->data_disks_per_row;
2934
2935 index = rmd->map_index - (group * rmd->data_disks_per_row);
2936 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2937 index += rmd->data_disks_per_row;
2938 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2939 if (rmd->layout_map_count > 2) {
2940 index += rmd->data_disks_per_row;
2941 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2942 }
2943
2944 rmd->num_it_nexus_entries = rmd->layout_map_count;
2945}
2946
281a817f
DB
2947static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2948 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2949 struct pqi_queue_group *queue_group)
2950{
281a817f 2951 int rc;
7a012c23
DB
2952 struct raid_map *raid_map;
2953 u32 group;
2954 u32 next_bypass_group;
281a817f
DB
2955 struct pqi_encryption_info *encryption_info_ptr;
2956 struct pqi_encryption_info encryption_info;
583891c9 2957 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
281a817f
DB
2958
2959 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2960 if (rc)
2961 return PQI_RAID_BYPASS_INELIGIBLE;
2962
2963 rmd.raid_level = device->raid_level;
6c223761 2964
6702d2c4 2965 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
281a817f
DB
2966 return PQI_RAID_BYPASS_INELIGIBLE;
2967
2968 if (unlikely(rmd.block_cnt == 0))
2969 return PQI_RAID_BYPASS_INELIGIBLE;
2970
2971 raid_map = device->raid_map;
6c223761 2972
281a817f
DB
2973 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2974 if (rc)
2975 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761 2976
7a012c23
DB
2977 if (device->raid_level == SA_RAID_1 ||
2978 device->raid_level == SA_RAID_TRIPLE) {
2979 if (rmd.is_write) {
2980 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2981 } else {
5d8fbce0 2982 group = device->next_bypass_group[rmd.map_index];
7a012c23
DB
2983 next_bypass_group = group + 1;
2984 if (next_bypass_group >= rmd.layout_map_count)
2985 next_bypass_group = 0;
5d8fbce0 2986 device->next_bypass_group[rmd.map_index] = next_bypass_group;
7a012c23
DB
2987 rmd.map_index += group * rmd.data_disks_per_row;
2988 }
281a817f 2989 } else if ((device->raid_level == SA_RAID_5 ||
6702d2c4
DB
2990 device->raid_level == SA_RAID_6) &&
2991 (rmd.layout_map_count > 1 || rmd.is_write)) {
281a817f
DB
2992 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2993 if (rc)
2994 return PQI_RAID_BYPASS_INELIGIBLE;
6c223761
KB
2995 }
2996
281a817f
DB
2997 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
2998 return PQI_RAID_BYPASS_INELIGIBLE;
2999
3000 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
3001 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
3002 rmd.first_row * rmd.strip_size +
3003 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
3004 rmd.disk_block_cnt = rmd.block_cnt;
6c223761
KB
3005
3006 /* Handle differing logical/physical block sizes. */
3007 if (raid_map->phys_blk_shift) {
281a817f
DB
3008 rmd.disk_block <<= raid_map->phys_blk_shift;
3009 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
6c223761
KB
3010 }
3011
281a817f 3012 if (unlikely(rmd.disk_block_cnt > 0xffff))
6c223761
KB
3013 return PQI_RAID_BYPASS_INELIGIBLE;
3014
281a817f 3015 pqi_set_aio_cdb(&rmd);
6c223761 3016
583891c9 3017 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
f6cc2a77
KB
3018 if (rmd.data_length > device->max_transfer_encrypted)
3019 return PQI_RAID_BYPASS_INELIGIBLE;
583891c9 3020 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
6c223761
KB
3021 encryption_info_ptr = &encryption_info;
3022 } else {
3023 encryption_info_ptr = NULL;
3024 }
3025
6702d2c4
DB
3026 if (rmd.is_write) {
3027 switch (device->raid_level) {
7a012c23
DB
3028 case SA_RAID_1:
3029 case SA_RAID_TRIPLE:
3030 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3031 encryption_info_ptr, device, &rmd);
6702d2c4
DB
3032 case SA_RAID_5:
3033 case SA_RAID_6:
3034 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
583891c9 3035 encryption_info_ptr, device, &rmd);
6702d2c4 3036 }
6702d2c4
DB
3037 }
3038
f6cc2a77
KB
3039 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3040 rmd.cdb, rmd.cdb_length, queue_group,
2a47834d 3041 encryption_info_ptr, true, false);
6c223761
KB
3042}
3043
3044#define PQI_STATUS_IDLE 0x0
3045
3046#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3047#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3048
3049#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3050#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3051#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3052#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3053#define PQI_DEVICE_STATE_ERROR 0x4
3054
3055#define PQI_MODE_READY_TIMEOUT_SECS 30
3056#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3057
3058static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3059{
3060 struct pqi_device_registers __iomem *pqi_registers;
3061 unsigned long timeout;
3062 u64 signature;
3063 u8 status;
3064
3065 pqi_registers = ctrl_info->pqi_registers;
42dc0426 3066 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
6c223761
KB
3067
3068 while (1) {
3069 signature = readq(&pqi_registers->signature);
3070 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3071 sizeof(signature)) == 0)
3072 break;
3073 if (time_after(jiffies, timeout)) {
3074 dev_err(&ctrl_info->pci_dev->dev,
3075 "timed out waiting for PQI signature\n");
3076 return -ETIMEDOUT;
3077 }
3078 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3079 }
3080
3081 while (1) {
3082 status = readb(&pqi_registers->function_and_status_code);
3083 if (status == PQI_STATUS_IDLE)
3084 break;
3085 if (time_after(jiffies, timeout)) {
3086 dev_err(&ctrl_info->pci_dev->dev,
3087 "timed out waiting for PQI IDLE\n");
3088 return -ETIMEDOUT;
3089 }
3090 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3091 }
3092
3093 while (1) {
3094 if (readl(&pqi_registers->device_status) ==
3095 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3096 break;
3097 if (time_after(jiffies, timeout)) {
3098 dev_err(&ctrl_info->pci_dev->dev,
3099 "timed out waiting for PQI all registers ready\n");
3100 return -ETIMEDOUT;
3101 }
3102 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3103 }
3104
3105 return 0;
3106}
3107
3108static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3109{
3110 struct pqi_scsi_dev *device;
3111
3112 device = io_request->scmd->device->hostdata;
588a63fe 3113 device->raid_bypass_enabled = false;
376fb880 3114 device->aio_enabled = false;
6c223761
KB
3115}
3116
d87d5474 3117static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
6c223761
KB
3118{
3119 struct pqi_ctrl_info *ctrl_info;
e58081a7 3120 struct pqi_scsi_dev *device;
6c223761 3121
03b288cf
KB
3122 device = sdev->hostdata;
3123 if (device->device_offline)
3124 return;
3125
3126 device->device_offline = true;
03b288cf
KB
3127 ctrl_info = shost_to_hba(sdev->host);
3128 pqi_schedule_rescan_worker(ctrl_info);
a9a68101 3129 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
03b288cf
KB
3130 path, ctrl_info->scsi_host->host_no, device->bus,
3131 device->target, device->lun);
6c223761
KB
3132}
3133
3134static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3135{
3136 u8 scsi_status;
3137 u8 host_byte;
3138 struct scsi_cmnd *scmd;
3139 struct pqi_raid_error_info *error_info;
3140 size_t sense_data_length;
3141 int residual_count;
3142 int xfer_count;
3143 struct scsi_sense_hdr sshdr;
3144
3145 scmd = io_request->scmd;
3146 if (!scmd)
3147 return;
3148
3149 error_info = io_request->error_info;
3150 scsi_status = error_info->status;
3151 host_byte = DID_OK;
3152
f5b63206
KB
3153 switch (error_info->data_out_result) {
3154 case PQI_DATA_IN_OUT_GOOD:
3155 break;
3156 case PQI_DATA_IN_OUT_UNDERFLOW:
6c223761
KB
3157 xfer_count =
3158 get_unaligned_le32(&error_info->data_out_transferred);
3159 residual_count = scsi_bufflen(scmd) - xfer_count;
3160 scsi_set_resid(scmd, residual_count);
3161 if (xfer_count < scmd->underflow)
3162 host_byte = DID_SOFT_ERROR;
f5b63206
KB
3163 break;
3164 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3165 case PQI_DATA_IN_OUT_ABORTED:
3166 host_byte = DID_ABORT;
3167 break;
3168 case PQI_DATA_IN_OUT_TIMEOUT:
3169 host_byte = DID_TIME_OUT;
3170 break;
3171 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3172 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3173 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3174 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3175 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3176 case PQI_DATA_IN_OUT_ERROR:
3177 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3178 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3179 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3180 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3181 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3182 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3183 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3184 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3185 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3186 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3187 default:
3188 host_byte = DID_ERROR;
3189 break;
6c223761
KB
3190 }
3191
3192 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3193 if (sense_data_length == 0)
3194 sense_data_length =
3195 get_unaligned_le16(&error_info->response_data_length);
3196 if (sense_data_length) {
3197 if (sense_data_length > sizeof(error_info->data))
3198 sense_data_length = sizeof(error_info->data);
3199
3200 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3201 scsi_normalize_sense(error_info->data,
3202 sense_data_length, &sshdr) &&
3203 sshdr.sense_key == HARDWARE_ERROR &&
8ef860ae 3204 sshdr.asc == 0x3e) {
441b7195
EV
3205 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3206 struct pqi_scsi_dev *device = scmd->device->hostdata;
3207
8ef860ae
EV
3208 switch (sshdr.ascq) {
3209 case 0x1: /* LOGICAL UNIT FAILURE */
3210 if (printk_ratelimit())
3211 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3212 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3213 pqi_take_device_offline(scmd->device, "RAID");
3214 host_byte = DID_NO_CONNECT;
3215 break;
3216
3217 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3218 if (printk_ratelimit())
3219 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3220 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3221 break;
3222 }
6c223761
KB
3223 }
3224
3225 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3226 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3227 memcpy(scmd->sense_buffer, error_info->data,
3228 sense_data_length);
3229 }
3230
3231 scmd->result = scsi_status;
3232 set_host_byte(scmd, host_byte);
3233}
3234
3235static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3236{
3237 u8 scsi_status;
3238 u8 host_byte;
3239 struct scsi_cmnd *scmd;
3240 struct pqi_aio_error_info *error_info;
3241 size_t sense_data_length;
3242 int residual_count;
3243 int xfer_count;
3244 bool device_offline;
94a68c81 3245 struct pqi_scsi_dev *device;
6c223761
KB
3246
3247 scmd = io_request->scmd;
3248 error_info = io_request->error_info;
3249 host_byte = DID_OK;
3250 sense_data_length = 0;
3251 device_offline = false;
94a68c81 3252 device = scmd->device->hostdata;
6c223761
KB
3253
3254 switch (error_info->service_response) {
3255 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3256 scsi_status = error_info->status;
3257 break;
3258 case PQI_AIO_SERV_RESPONSE_FAILURE:
3259 switch (error_info->status) {
3260 case PQI_AIO_STATUS_IO_ABORTED:
3261 scsi_status = SAM_STAT_TASK_ABORTED;
3262 break;
3263 case PQI_AIO_STATUS_UNDERRUN:
3264 scsi_status = SAM_STAT_GOOD;
3265 residual_count = get_unaligned_le32(
3266 &error_info->residual_count);
3267 scsi_set_resid(scmd, residual_count);
3268 xfer_count = scsi_bufflen(scmd) - residual_count;
3269 if (xfer_count < scmd->underflow)
3270 host_byte = DID_SOFT_ERROR;
3271 break;
3272 case PQI_AIO_STATUS_OVERRUN:
3273 scsi_status = SAM_STAT_GOOD;
3274 break;
3275 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3276 pqi_aio_path_disabled(io_request);
94a68c81
MB
3277 if (pqi_is_multipath_device(device)) {
3278 pqi_device_remove_start(device);
3279 host_byte = DID_NO_CONNECT;
3280 scsi_status = SAM_STAT_CHECK_CONDITION;
3281 } else {
3282 scsi_status = SAM_STAT_GOOD;
3283 io_request->status = -EAGAIN;
3284 }
6c223761
KB
3285 break;
3286 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3287 case PQI_AIO_STATUS_INVALID_DEVICE:
376fb880
KB
3288 if (!io_request->raid_bypass) {
3289 device_offline = true;
3290 pqi_take_device_offline(scmd->device, "AIO");
3291 host_byte = DID_NO_CONNECT;
3292 }
6c223761
KB
3293 scsi_status = SAM_STAT_CHECK_CONDITION;
3294 break;
3295 case PQI_AIO_STATUS_IO_ERROR:
3296 default:
3297 scsi_status = SAM_STAT_CHECK_CONDITION;
3298 break;
3299 }
3300 break;
3301 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3302 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3303 scsi_status = SAM_STAT_GOOD;
3304 break;
3305 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3306 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3307 default:
3308 scsi_status = SAM_STAT_CHECK_CONDITION;
3309 break;
3310 }
3311
3312 if (error_info->data_present) {
3313 sense_data_length =
3314 get_unaligned_le16(&error_info->data_length);
3315 if (sense_data_length) {
3316 if (sense_data_length > sizeof(error_info->data))
3317 sense_data_length = sizeof(error_info->data);
3318 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3319 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3320 memcpy(scmd->sense_buffer, error_info->data,
3321 sense_data_length);
3322 }
3323 }
3324
3325 if (device_offline && sense_data_length == 0)
f2b1e9c6 3326 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
6c223761
KB
3327
3328 scmd->result = scsi_status;
3329 set_host_byte(scmd, host_byte);
3330}
3331
3332static void pqi_process_io_error(unsigned int iu_type,
3333 struct pqi_io_request *io_request)
3334{
3335 switch (iu_type) {
3336 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3337 pqi_process_raid_io_error(io_request);
3338 break;
3339 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3340 pqi_process_aio_io_error(io_request);
3341 break;
3342 }
3343}
3344
18ff5f08 3345static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
3346 struct pqi_task_management_response *response)
3347{
3348 int rc;
3349
3350 switch (response->response_code) {
b17f0486
KB
3351 case SOP_TMF_COMPLETE:
3352 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
3353 rc = 0;
3354 break;
3406384b
MR
3355 case SOP_TMF_REJECTED:
3356 rc = -EAGAIN;
3357 break;
4e7d2602
MM
3358 case SOP_RC_INCORRECT_LOGICAL_UNIT:
3359 rc = -ENODEV;
3360 break;
6c223761
KB
3361 default:
3362 rc = -EIO;
3363 break;
3364 }
3365
18ff5f08
KB
3366 if (rc)
3367 dev_err(&ctrl_info->pci_dev->dev,
3368 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3369
6c223761
KB
3370 return rc;
3371}
3372
5d1f03e6
MB
3373static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3374 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
6c223761 3375{
5d1f03e6 3376 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
9e68cccc
KB
3377}
3378
3379static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
6c223761 3380{
9e68cccc 3381 int num_responses;
6c223761
KB
3382 pqi_index_t oq_pi;
3383 pqi_index_t oq_ci;
3384 struct pqi_io_request *io_request;
3385 struct pqi_io_response *response;
3386 u16 request_id;
3387
3388 num_responses = 0;
3389 oq_ci = queue_group->oq_ci_copy;
3390
3391 while (1) {
dac12fbc 3392 oq_pi = readl(queue_group->oq_pi);
9e68cccc 3393 if (oq_pi >= ctrl_info->num_elements_per_oq) {
5d1f03e6 3394 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
9e68cccc
KB
3395 dev_err(&ctrl_info->pci_dev->dev,
3396 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3397 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3398 return -1;
3399 }
6c223761
KB
3400 if (oq_pi == oq_ci)
3401 break;
3402
3403 num_responses++;
3404 response = queue_group->oq_element_array +
3405 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3406
3407 request_id = get_unaligned_le16(&response->request_id);
9e68cccc 3408 if (request_id >= ctrl_info->max_io_slots) {
5d1f03e6 3409 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
9e68cccc
KB
3410 dev_err(&ctrl_info->pci_dev->dev,
3411 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3412 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3413 return -1;
3414 }
6c223761
KB
3415
3416 io_request = &ctrl_info->io_request_pool[request_id];
9e68cccc 3417 if (atomic_read(&io_request->refcount) == 0) {
5d1f03e6 3418 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
9e68cccc
KB
3419 dev_err(&ctrl_info->pci_dev->dev,
3420 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3421 request_id, oq_pi, oq_ci);
3422 return -1;
3423 }
6c223761
KB
3424
3425 switch (response->header.iu_type) {
3426 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3427 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2ba55c98
KB
3428 if (io_request->scmd)
3429 io_request->scmd->result = 0;
df561f66 3430 fallthrough;
6c223761
KB
3431 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3432 break;
b212c251
KB
3433 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3434 io_request->status =
3435 get_unaligned_le16(
583891c9 3436 &((struct pqi_vendor_general_response *)response)->status);
b212c251 3437 break;
6c223761 3438 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
18ff5f08
KB
3439 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3440 (void *)response);
6c223761
KB
3441 break;
3442 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3443 pqi_aio_path_disabled(io_request);
3444 io_request->status = -EAGAIN;
3445 break;
3446 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3447 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3448 io_request->error_info = ctrl_info->error_buffer +
3449 (get_unaligned_le16(&response->error_index) *
3450 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
9e68cccc 3451 pqi_process_io_error(response->header.iu_type, io_request);
6c223761
KB
3452 break;
3453 default:
5d1f03e6 3454 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
6c223761 3455 dev_err(&ctrl_info->pci_dev->dev,
9e68cccc
KB
3456 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3457 response->header.iu_type, oq_pi, oq_ci);
3458 return -1;
6c223761
KB
3459 }
3460
9e68cccc 3461 io_request->io_complete_callback(io_request, io_request->context);
6c223761
KB
3462
3463 /*
3464 * Note that the I/O request structure CANNOT BE TOUCHED after
3465 * returning from the I/O completion callback!
3466 */
6c223761
KB
3467 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3468 }
3469
3470 if (num_responses) {
3471 queue_group->oq_ci_copy = oq_ci;
3472 writel(oq_ci, queue_group->oq_ci);
3473 }
3474
3475 return num_responses;
3476}
3477
3478static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 3479 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
3480{
3481 unsigned int num_elements_used;
3482
3483 if (pi >= ci)
3484 num_elements_used = pi - ci;
3485 else
3486 num_elements_used = elements_in_queue - ci + pi;
3487
3488 return elements_in_queue - num_elements_used - 1;
3489}
3490
98f87667 3491static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
3492 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3493{
3494 pqi_index_t iq_pi;
3495 pqi_index_t iq_ci;
3496 unsigned long flags;
3497 void *next_element;
6c223761
KB
3498 struct pqi_queue_group *queue_group;
3499
3500 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3501 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3502
6c223761
KB
3503 while (1) {
3504 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3505
3506 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
dac12fbc 3507 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
6c223761
KB
3508
3509 if (pqi_num_elements_free(iq_pi, iq_ci,
3510 ctrl_info->num_elements_per_iq))
3511 break;
3512
3513 spin_unlock_irqrestore(
3514 &queue_group->submit_lock[RAID_PATH], flags);
3515
98f87667 3516 if (pqi_ctrl_offline(ctrl_info))
6c223761 3517 return;
6c223761
KB
3518 }
3519
3520 next_element = queue_group->iq_element_array[RAID_PATH] +
3521 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3522
3523 memcpy(next_element, iu, iu_length);
3524
3525 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
6c223761
KB
3526 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3527
3528 /*
3529 * This write notifies the controller that an IU is available to be
3530 * processed.
3531 */
3532 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3533
3534 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
3535}
3536
3537static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3538 struct pqi_event *event)
3539{
3540 struct pqi_event_acknowledge_request request;
3541
3542 memset(&request, 0, sizeof(request));
3543
3544 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3545 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3546 &request.header.iu_length);
3547 request.event_type = event->event_type;
06b41e0d
KB
3548 put_unaligned_le16(event->event_id, &request.event_id);
3549 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
6c223761 3550
98f87667 3551 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
6c223761
KB
3552}
3553
4fd22c13
MR
3554#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3555#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3556
3557static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3558 struct pqi_ctrl_info *ctrl_info)
6c223761 3559{
4fd22c13 3560 u8 status;
583891c9 3561 unsigned long timeout;
6c223761 3562
42dc0426 3563 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
6c223761 3564
4fd22c13
MR
3565 while (1) {
3566 status = pqi_read_soft_reset_status(ctrl_info);
3567 if (status & PQI_SOFT_RESET_INITIATE)
3568 return RESET_INITIATE_DRIVER;
3569
3570 if (status & PQI_SOFT_RESET_ABORT)
3571 return RESET_ABORT;
3572
4ccc354b
KB
3573 if (!sis_is_firmware_running(ctrl_info))
3574 return RESET_NORESPONSE;
3575
4fd22c13 3576 if (time_after(jiffies, timeout)) {
4ccc354b 3577 dev_warn(&ctrl_info->pci_dev->dev,
4fd22c13
MR
3578 "timed out waiting for soft reset status\n");
3579 return RESET_TIMEDOUT;
3580 }
3581
4fd22c13
MR
3582 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3583 }
3584}
3585
4ccc354b 3586static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
4fd22c13
MR
3587{
3588 int rc;
2790cd4d 3589 unsigned int delay_secs;
4ccc354b
KB
3590 enum pqi_soft_reset_status reset_status;
3591
3592 if (ctrl_info->soft_reset_handshake_supported)
3593 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3594 else
3595 reset_status = RESET_INITIATE_FIRMWARE;
4fd22c13 3596
2790cd4d 3597 delay_secs = PQI_POST_RESET_DELAY_SECS;
4fd22c13
MR
3598
3599 switch (reset_status) {
4fd22c13 3600 case RESET_TIMEDOUT:
2790cd4d 3601 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
4ccc354b
KB
3602 fallthrough;
3603 case RESET_INITIATE_DRIVER:
4fd22c13 3604 dev_info(&ctrl_info->pci_dev->dev,
4ccc354b 3605 "Online Firmware Activation: resetting controller\n");
4fd22c13 3606 sis_soft_reset(ctrl_info);
df561f66 3607 fallthrough;
4fd22c13 3608 case RESET_INITIATE_FIRMWARE:
4ccc354b
KB
3609 ctrl_info->pqi_mode_enabled = false;
3610 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
2790cd4d 3611 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
4fd22c13 3612 pqi_ofa_free_host_buffer(ctrl_info);
4ccc354b 3613 pqi_ctrl_ofa_done(ctrl_info);
4fd22c13 3614 dev_info(&ctrl_info->pci_dev->dev,
4ccc354b
KB
3615 "Online Firmware Activation: %s\n",
3616 rc == 0 ? "SUCCESS" : "FAILED");
4fd22c13
MR
3617 break;
3618 case RESET_ABORT:
4fd22c13 3619 dev_info(&ctrl_info->pci_dev->dev,
4ccc354b
KB
3620 "Online Firmware Activation ABORTED\n");
3621 if (ctrl_info->soft_reset_handshake_supported)
3622 pqi_clear_soft_reset_status(ctrl_info);
3623 pqi_ofa_free_host_buffer(ctrl_info);
3624 pqi_ctrl_ofa_done(ctrl_info);
3625 pqi_ofa_ctrl_unquiesce(ctrl_info);
4fd22c13
MR
3626 break;
3627 case RESET_NORESPONSE:
4ccc354b
KB
3628 fallthrough;
3629 default:
3630 dev_err(&ctrl_info->pci_dev->dev,
3631 "unexpected Online Firmware Activation reset status: 0x%x\n",
3632 reset_status);
4fd22c13 3633 pqi_ofa_free_host_buffer(ctrl_info);
4ccc354b
KB
3634 pqi_ctrl_ofa_done(ctrl_info);
3635 pqi_ofa_ctrl_unquiesce(ctrl_info);
5d1f03e6 3636 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
4fd22c13
MR
3637 break;
3638 }
3639}
3640
2790cd4d 3641static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
4fd22c13 3642{
2790cd4d 3643 struct pqi_ctrl_info *ctrl_info;
4fd22c13 3644
2790cd4d 3645 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
4fd22c13 3646
2790cd4d
KB
3647 pqi_ctrl_ofa_start(ctrl_info);
3648 pqi_ofa_setup_host_buffer(ctrl_info);
3649 pqi_ofa_host_memory_update(ctrl_info);
3650}
4fd22c13 3651
2790cd4d
KB
3652static void pqi_ofa_quiesce_worker(struct work_struct *work)
3653{
3654 struct pqi_ctrl_info *ctrl_info;
3655 struct pqi_event *event;
4fd22c13 3656
2790cd4d
KB
3657 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3658
3659 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3660
3661 pqi_ofa_ctrl_quiesce(ctrl_info);
3662 pqi_acknowledge_event(ctrl_info, event);
3663 pqi_process_soft_reset(ctrl_info);
3664}
4fd22c13 3665
2790cd4d
KB
3666static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3667 struct pqi_event *event)
3668{
3669 bool ack_event;
3670
3671 ack_event = true;
3672
3673 switch (event->event_id) {
3674 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
4fd22c13 3675 dev_info(&ctrl_info->pci_dev->dev,
2790cd4d
KB
3676 "received Online Firmware Activation memory allocation request\n");
3677 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3678 break;
3679 case PQI_EVENT_OFA_QUIESCE:
4fd22c13 3680 dev_info(&ctrl_info->pci_dev->dev,
2790cd4d
KB
3681 "received Online Firmware Activation quiesce request\n");
3682 schedule_work(&ctrl_info->ofa_quiesce_work);
3683 ack_event = false;
3684 break;
3685 case PQI_EVENT_OFA_CANCELED:
4fd22c13 3686 dev_info(&ctrl_info->pci_dev->dev,
2790cd4d
KB
3687 "received Online Firmware Activation cancel request: reason: %u\n",
3688 ctrl_info->ofa_cancel_reason);
3689 pqi_ofa_free_host_buffer(ctrl_info);
3690 pqi_ctrl_ofa_done(ctrl_info);
3691 break;
3692 default:
3693 dev_err(&ctrl_info->pci_dev->dev,
3694 "received unknown Online Firmware Activation request: event ID: %u\n",
3695 event->event_id);
3696 break;
4fd22c13
MR
3697 }
3698
2790cd4d 3699 return ack_event;
4fd22c13
MR
3700}
3701
6ce3cfb3
KB
3702static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3703{
3704 unsigned long flags;
3705 struct pqi_scsi_dev *device;
3706
3707 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3708
3709 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3710 if (device->raid_bypass_enabled)
3711 device->raid_bypass_enabled = false;
3712
3713 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3714}
3715
6c223761
KB
3716static void pqi_event_worker(struct work_struct *work)
3717{
3718 unsigned int i;
2790cd4d 3719 bool rescan_needed;
6c223761 3720 struct pqi_ctrl_info *ctrl_info;
6a50d6ad 3721 struct pqi_event *event;
2790cd4d 3722 bool ack_event;
6c223761
KB
3723
3724 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3725
7561a7e4 3726 pqi_ctrl_busy(ctrl_info);
ae0c189d 3727 pqi_wait_if_ctrl_blocked(ctrl_info);
5f310425
KB
3728 if (pqi_ctrl_offline(ctrl_info))
3729 goto out;
3730
2790cd4d 3731 rescan_needed = false;
6a50d6ad 3732 event = ctrl_info->events;
6c223761 3733 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
6a50d6ad
KB
3734 if (event->pending) {
3735 event->pending = false;
4fd22c13 3736 if (event->event_type == PQI_EVENT_TYPE_OFA) {
2790cd4d
KB
3737 ack_event = pqi_ofa_process_event(ctrl_info, event);
3738 } else {
3739 ack_event = true;
3740 rescan_needed = true;
27655e9d
MR
3741 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3742 ctrl_info->logical_volume_rescan_needed = true;
6ce3cfb3
KB
3743 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3744 pqi_disable_raid_bypass(ctrl_info);
4fd22c13 3745 }
2790cd4d
KB
3746 if (ack_event)
3747 pqi_acknowledge_event(ctrl_info, event);
6c223761 3748 }
6a50d6ad 3749 event++;
6c223761
KB
3750 }
3751
4e7d2602
MM
3752#define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
3753
2790cd4d 3754 if (rescan_needed)
4e7d2602
MM
3755 pqi_schedule_rescan_worker_with_delay(ctrl_info,
3756 PQI_RESCAN_WORK_FOR_EVENT_DELAY);
2790cd4d 3757
5f310425 3758out:
7561a7e4 3759 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
3760}
3761
42dc0426 3762#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
6c223761 3763
74a0f573 3764static void pqi_heartbeat_timer_handler(struct timer_list *t)
6c223761
KB
3765{
3766 int num_interrupts;
98f87667 3767 u32 heartbeat_count;
583891c9 3768 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
6c223761 3769
98f87667
KB
3770 pqi_check_ctrl_health(ctrl_info);
3771 if (pqi_ctrl_offline(ctrl_info))
061ef06a
KB
3772 return;
3773
6c223761 3774 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
98f87667 3775 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
6c223761
KB
3776
3777 if (num_interrupts == ctrl_info->previous_num_interrupts) {
98f87667
KB
3778 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3779 dev_err(&ctrl_info->pci_dev->dev,
3780 "no heartbeat detected - last heartbeat count: %u\n",
3781 heartbeat_count);
5d1f03e6 3782 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
6c223761
KB
3783 return;
3784 }
6c223761 3785 } else {
98f87667 3786 ctrl_info->previous_num_interrupts = num_interrupts;
6c223761
KB
3787 }
3788
98f87667 3789 ctrl_info->previous_heartbeat_count = heartbeat_count;
6c223761
KB
3790 mod_timer(&ctrl_info->heartbeat_timer,
3791 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3792}
3793
3794static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3795{
98f87667
KB
3796 if (!ctrl_info->heartbeat_counter)
3797 return;
3798
6c223761
KB
3799 ctrl_info->previous_num_interrupts =
3800 atomic_read(&ctrl_info->num_interrupts);
98f87667
KB
3801 ctrl_info->previous_heartbeat_count =
3802 pqi_read_heartbeat_counter(ctrl_info);
6c223761 3803
6c223761
KB
3804 ctrl_info->heartbeat_timer.expires =
3805 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
061ef06a 3806 add_timer(&ctrl_info->heartbeat_timer);
6c223761
KB
3807}
3808
3809static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3810{
98f87667 3811 del_timer_sync(&ctrl_info->heartbeat_timer);
6c223761
KB
3812}
3813
2790cd4d
KB
3814static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3815 struct pqi_event *event, struct pqi_event_response *response)
4fd22c13 3816{
2790cd4d
KB
3817 switch (event->event_id) {
3818 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3819 ctrl_info->ofa_bytes_requested =
3820 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3821 break;
3822 case PQI_EVENT_OFA_CANCELED:
3823 ctrl_info->ofa_cancel_reason =
3824 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3825 break;
4fd22c13
MR
3826 }
3827}
3828
9e68cccc 3829static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
6c223761 3830{
9e68cccc 3831 int num_events;
6c223761
KB
3832 pqi_index_t oq_pi;
3833 pqi_index_t oq_ci;
3834 struct pqi_event_queue *event_queue;
3835 struct pqi_event_response *response;
6a50d6ad 3836 struct pqi_event *event;
6c223761
KB
3837 int event_index;
3838
3839 event_queue = &ctrl_info->event_queue;
3840 num_events = 0;
6c223761
KB
3841 oq_ci = event_queue->oq_ci_copy;
3842
3843 while (1) {
dac12fbc 3844 oq_pi = readl(event_queue->oq_pi);
9e68cccc 3845 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
5d1f03e6 3846 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
9e68cccc
KB
3847 dev_err(&ctrl_info->pci_dev->dev,
3848 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3849 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3850 return -1;
3851 }
3852
6c223761
KB
3853 if (oq_pi == oq_ci)
3854 break;
3855
3856 num_events++;
9e68cccc 3857 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
6c223761 3858
583891c9 3859 event_index = pqi_event_type_to_event_index(response->event_type);
6c223761 3860
9e68cccc
KB
3861 if (event_index >= 0 && response->request_acknowledge) {
3862 event = &ctrl_info->events[event_index];
3863 event->pending = true;
3864 event->event_type = response->event_type;
06b41e0d
KB
3865 event->event_id = get_unaligned_le16(&response->event_id);
3866 event->additional_event_id =
3867 get_unaligned_le32(&response->additional_event_id);
9e68cccc 3868 if (event->event_type == PQI_EVENT_TYPE_OFA)
2790cd4d 3869 pqi_ofa_capture_event_payload(ctrl_info, event, response);
6c223761
KB
3870 }
3871
3872 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3873 }
3874
3875 if (num_events) {
3876 event_queue->oq_ci_copy = oq_ci;
3877 writel(oq_ci, event_queue->oq_ci);
98f87667 3878 schedule_work(&ctrl_info->event_work);
6c223761
KB
3879 }
3880
3881 return num_events;
3882}
3883
061ef06a
KB
3884#define PQI_LEGACY_INTX_MASK 0x1
3885
583891c9 3886static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
061ef06a
KB
3887{
3888 u32 intx_mask;
3889 struct pqi_device_registers __iomem *pqi_registers;
3890 volatile void __iomem *register_addr;
3891
3892 pqi_registers = ctrl_info->pqi_registers;
3893
3894 if (enable_intx)
3895 register_addr = &pqi_registers->legacy_intx_mask_clear;
3896 else
3897 register_addr = &pqi_registers->legacy_intx_mask_set;
3898
3899 intx_mask = readl(register_addr);
3900 intx_mask |= PQI_LEGACY_INTX_MASK;
3901 writel(intx_mask, register_addr);
3902}
3903
3904static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3905 enum pqi_irq_mode new_mode)
3906{
3907 switch (ctrl_info->irq_mode) {
3908 case IRQ_MODE_MSIX:
3909 switch (new_mode) {
3910 case IRQ_MODE_MSIX:
3911 break;
3912 case IRQ_MODE_INTX:
3913 pqi_configure_legacy_intx(ctrl_info, true);
061ef06a
KB
3914 sis_enable_intx(ctrl_info);
3915 break;
3916 case IRQ_MODE_NONE:
061ef06a
KB
3917 break;
3918 }
3919 break;
3920 case IRQ_MODE_INTX:
3921 switch (new_mode) {
3922 case IRQ_MODE_MSIX:
3923 pqi_configure_legacy_intx(ctrl_info, false);
061ef06a
KB
3924 sis_enable_msix(ctrl_info);
3925 break;
3926 case IRQ_MODE_INTX:
3927 break;
3928 case IRQ_MODE_NONE:
3929 pqi_configure_legacy_intx(ctrl_info, false);
061ef06a
KB
3930 break;
3931 }
3932 break;
3933 case IRQ_MODE_NONE:
3934 switch (new_mode) {
3935 case IRQ_MODE_MSIX:
3936 sis_enable_msix(ctrl_info);
3937 break;
3938 case IRQ_MODE_INTX:
3939 pqi_configure_legacy_intx(ctrl_info, true);
3940 sis_enable_intx(ctrl_info);
3941 break;
3942 case IRQ_MODE_NONE:
3943 break;
3944 }
3945 break;
3946 }
3947
3948 ctrl_info->irq_mode = new_mode;
3949}
3950
3951#define PQI_LEGACY_INTX_PENDING 0x1
3952
3953static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3954{
3955 bool valid_irq;
3956 u32 intx_status;
3957
3958 switch (ctrl_info->irq_mode) {
3959 case IRQ_MODE_MSIX:
3960 valid_irq = true;
3961 break;
3962 case IRQ_MODE_INTX:
583891c9 3963 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
061ef06a
KB
3964 if (intx_status & PQI_LEGACY_INTX_PENDING)
3965 valid_irq = true;
3966 else
3967 valid_irq = false;
3968 break;
3969 case IRQ_MODE_NONE:
3970 default:
3971 valid_irq = false;
3972 break;
3973 }
3974
3975 return valid_irq;
3976}
3977
6c223761
KB
3978static irqreturn_t pqi_irq_handler(int irq, void *data)
3979{
3980 struct pqi_ctrl_info *ctrl_info;
3981 struct pqi_queue_group *queue_group;
9e68cccc
KB
3982 int num_io_responses_handled;
3983 int num_events_handled;
6c223761
KB
3984
3985 queue_group = data;
3986 ctrl_info = queue_group->ctrl_info;
3987
061ef06a 3988 if (!pqi_is_valid_irq(ctrl_info))
6c223761
KB
3989 return IRQ_NONE;
3990
9e68cccc
KB
3991 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3992 if (num_io_responses_handled < 0)
3993 goto out;
6c223761 3994
9e68cccc
KB
3995 if (irq == ctrl_info->event_irq) {
3996 num_events_handled = pqi_process_event_intr(ctrl_info);
3997 if (num_events_handled < 0)
3998 goto out;
3999 } else {
4000 num_events_handled = 0;
4001 }
6c223761 4002
9e68cccc 4003 if (num_io_responses_handled + num_events_handled > 0)
6c223761
KB
4004 atomic_inc(&ctrl_info->num_interrupts);
4005
4006 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
4007 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
4008
9e68cccc 4009out:
6c223761
KB
4010 return IRQ_HANDLED;
4011}
4012
4013static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
4014{
d91d7820 4015 struct pci_dev *pci_dev = ctrl_info->pci_dev;
6c223761
KB
4016 int i;
4017 int rc;
4018
d91d7820 4019 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
6c223761
KB
4020
4021 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
d91d7820 4022 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
52198226 4023 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 4024 if (rc) {
d91d7820 4025 dev_err(&pci_dev->dev,
6c223761 4026 "irq %u init failed with error %d\n",
d91d7820 4027 pci_irq_vector(pci_dev, i), rc);
6c223761
KB
4028 return rc;
4029 }
4030 ctrl_info->num_msix_vectors_initialized++;
4031 }
4032
4033 return 0;
4034}
4035
98bf061b
KB
4036static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4037{
4038 int i;
4039
4040 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4041 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4042 &ctrl_info->queue_groups[i]);
4043
4044 ctrl_info->num_msix_vectors_initialized = 0;
4045}
4046
6c223761
KB
4047static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4048{
98bf061b 4049 int num_vectors_enabled;
cf15c3e7
MM
4050 unsigned int flags = PCI_IRQ_MSIX;
4051
4052 if (!pqi_disable_managed_interrupts)
4053 flags |= PCI_IRQ_AFFINITY;
6c223761 4054
98bf061b 4055 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
52198226 4056 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
cf15c3e7 4057 flags);
98bf061b 4058 if (num_vectors_enabled < 0) {
6c223761 4059 dev_err(&ctrl_info->pci_dev->dev,
98bf061b
KB
4060 "MSI-X init failed with error %d\n",
4061 num_vectors_enabled);
4062 return num_vectors_enabled;
6c223761
KB
4063 }
4064
98bf061b 4065 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
061ef06a 4066 ctrl_info->irq_mode = IRQ_MODE_MSIX;
6c223761
KB
4067 return 0;
4068}
4069
98bf061b
KB
4070static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4071{
4072 if (ctrl_info->num_msix_vectors_enabled) {
4073 pci_free_irq_vectors(ctrl_info->pci_dev);
4074 ctrl_info->num_msix_vectors_enabled = 0;
4075 }
4076}
4077
6c223761
KB
4078static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4079{
4080 unsigned int i;
4081 size_t alloc_length;
4082 size_t element_array_length_per_iq;
4083 size_t element_array_length_per_oq;
4084 void *element_array;
dac12fbc 4085 void __iomem *next_queue_index;
6c223761
KB
4086 void *aligned_pointer;
4087 unsigned int num_inbound_queues;
4088 unsigned int num_outbound_queues;
4089 unsigned int num_queue_indexes;
4090 struct pqi_queue_group *queue_group;
4091
4092 element_array_length_per_iq =
4093 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4094 ctrl_info->num_elements_per_iq;
4095 element_array_length_per_oq =
4096 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4097 ctrl_info->num_elements_per_oq;
4098 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4099 num_outbound_queues = ctrl_info->num_queue_groups;
4100 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4101
4102 aligned_pointer = NULL;
4103
4104 for (i = 0; i < num_inbound_queues; i++) {
4105 aligned_pointer = PTR_ALIGN(aligned_pointer,
4106 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4107 aligned_pointer += element_array_length_per_iq;
4108 }
4109
4110 for (i = 0; i < num_outbound_queues; i++) {
4111 aligned_pointer = PTR_ALIGN(aligned_pointer,
4112 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4113 aligned_pointer += element_array_length_per_oq;
4114 }
4115
4116 aligned_pointer = PTR_ALIGN(aligned_pointer,
4117 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4118 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4119 PQI_EVENT_OQ_ELEMENT_LENGTH;
4120
4121 for (i = 0; i < num_queue_indexes; i++) {
4122 aligned_pointer = PTR_ALIGN(aligned_pointer,
4123 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4124 aligned_pointer += sizeof(pqi_index_t);
4125 }
4126
4127 alloc_length = (size_t)aligned_pointer +
4128 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4129
e1d213bd
KB
4130 alloc_length += PQI_EXTRA_SGL_MEMORY;
4131
6c223761 4132 ctrl_info->queue_memory_base =
750afb08
LC
4133 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4134 &ctrl_info->queue_memory_base_dma_handle,
4135 GFP_KERNEL);
6c223761 4136
d87d5474 4137 if (!ctrl_info->queue_memory_base)
6c223761 4138 return -ENOMEM;
6c223761
KB
4139
4140 ctrl_info->queue_memory_length = alloc_length;
4141
4142 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4143 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4144
4145 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4146 queue_group = &ctrl_info->queue_groups[i];
4147 queue_group->iq_element_array[RAID_PATH] = element_array;
4148 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4149 ctrl_info->queue_memory_base_dma_handle +
4150 (element_array - ctrl_info->queue_memory_base);
4151 element_array += element_array_length_per_iq;
4152 element_array = PTR_ALIGN(element_array,
4153 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4154 queue_group->iq_element_array[AIO_PATH] = element_array;
4155 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4156 ctrl_info->queue_memory_base_dma_handle +
4157 (element_array - ctrl_info->queue_memory_base);
4158 element_array += element_array_length_per_iq;
4159 element_array = PTR_ALIGN(element_array,
4160 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4161 }
4162
4163 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4164 queue_group = &ctrl_info->queue_groups[i];
4165 queue_group->oq_element_array = element_array;
4166 queue_group->oq_element_array_bus_addr =
4167 ctrl_info->queue_memory_base_dma_handle +
4168 (element_array - ctrl_info->queue_memory_base);
4169 element_array += element_array_length_per_oq;
4170 element_array = PTR_ALIGN(element_array,
4171 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4172 }
4173
4174 ctrl_info->event_queue.oq_element_array = element_array;
4175 ctrl_info->event_queue.oq_element_array_bus_addr =
4176 ctrl_info->queue_memory_base_dma_handle +
4177 (element_array - ctrl_info->queue_memory_base);
4178 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4179 PQI_EVENT_OQ_ELEMENT_LENGTH;
4180
dac12fbc 4181 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
6c223761
KB
4182 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4183
4184 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4185 queue_group = &ctrl_info->queue_groups[i];
4186 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4187 queue_group->iq_ci_bus_addr[RAID_PATH] =
4188 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4189 (next_queue_index -
4190 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4191 next_queue_index += sizeof(pqi_index_t);
4192 next_queue_index = PTR_ALIGN(next_queue_index,
4193 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4194 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4195 queue_group->iq_ci_bus_addr[AIO_PATH] =
4196 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4197 (next_queue_index -
4198 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4199 next_queue_index += sizeof(pqi_index_t);
4200 next_queue_index = PTR_ALIGN(next_queue_index,
4201 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4202 queue_group->oq_pi = next_queue_index;
4203 queue_group->oq_pi_bus_addr =
4204 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4205 (next_queue_index -
4206 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4207 next_queue_index += sizeof(pqi_index_t);
4208 next_queue_index = PTR_ALIGN(next_queue_index,
4209 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4210 }
4211
4212 ctrl_info->event_queue.oq_pi = next_queue_index;
4213 ctrl_info->event_queue.oq_pi_bus_addr =
4214 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
4215 (next_queue_index -
4216 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
4217
4218 return 0;
4219}
4220
4221static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4222{
4223 unsigned int i;
4224 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4225 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4226
4227 /*
4228 * Initialize the backpointers to the controller structure in
4229 * each operational queue group structure.
4230 */
4231 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4232 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4233
4234 /*
4235 * Assign IDs to all operational queues. Note that the IDs
4236 * assigned to operational IQs are independent of the IDs
4237 * assigned to operational OQs.
4238 */
4239 ctrl_info->event_queue.oq_id = next_oq_id++;
4240 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4241 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4242 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4243 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4244 }
4245
4246 /*
4247 * Assign MSI-X table entry indexes to all queues. Note that the
4248 * interrupt for the event queue is shared with the first queue group.
4249 */
4250 ctrl_info->event_queue.int_msg_num = 0;
4251 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4252 ctrl_info->queue_groups[i].int_msg_num = i;
4253
4254 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4255 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4256 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4257 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4258 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4259 }
4260}
4261
4262static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4263{
4264 size_t alloc_length;
4265 struct pqi_admin_queues_aligned *admin_queues_aligned;
4266 struct pqi_admin_queues *admin_queues;
4267
4268 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4269 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4270
4271 ctrl_info->admin_queue_memory_base =
750afb08
LC
4272 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4273 &ctrl_info->admin_queue_memory_base_dma_handle,
4274 GFP_KERNEL);
6c223761
KB
4275
4276 if (!ctrl_info->admin_queue_memory_base)
4277 return -ENOMEM;
4278
4279 ctrl_info->admin_queue_memory_length = alloc_length;
4280
4281 admin_queues = &ctrl_info->admin_queues;
4282 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4283 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4284 admin_queues->iq_element_array =
4285 &admin_queues_aligned->iq_element_array;
4286 admin_queues->oq_element_array =
4287 &admin_queues_aligned->oq_element_array;
583891c9
KB
4288 admin_queues->iq_ci =
4289 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
dac12fbc
KB
4290 admin_queues->oq_pi =
4291 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
6c223761
KB
4292
4293 admin_queues->iq_element_array_bus_addr =
4294 ctrl_info->admin_queue_memory_base_dma_handle +
4295 (admin_queues->iq_element_array -
4296 ctrl_info->admin_queue_memory_base);
4297 admin_queues->oq_element_array_bus_addr =
4298 ctrl_info->admin_queue_memory_base_dma_handle +
4299 (admin_queues->oq_element_array -
4300 ctrl_info->admin_queue_memory_base);
4301 admin_queues->iq_ci_bus_addr =
4302 ctrl_info->admin_queue_memory_base_dma_handle +
583891c9
KB
4303 ((void __iomem *)admin_queues->iq_ci -
4304 (void __iomem *)ctrl_info->admin_queue_memory_base);
6c223761
KB
4305 admin_queues->oq_pi_bus_addr =
4306 ctrl_info->admin_queue_memory_base_dma_handle +
dac12fbc
KB
4307 ((void __iomem *)admin_queues->oq_pi -
4308 (void __iomem *)ctrl_info->admin_queue_memory_base);
6c223761
KB
4309
4310 return 0;
4311}
4312
42dc0426 4313#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
6c223761
KB
4314#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4315
4316static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4317{
4318 struct pqi_device_registers __iomem *pqi_registers;
4319 struct pqi_admin_queues *admin_queues;
4320 unsigned long timeout;
4321 u8 status;
4322 u32 reg;
4323
4324 pqi_registers = ctrl_info->pqi_registers;
4325 admin_queues = &ctrl_info->admin_queues;
4326
4327 writeq((u64)admin_queues->iq_element_array_bus_addr,
4328 &pqi_registers->admin_iq_element_array_addr);
4329 writeq((u64)admin_queues->oq_element_array_bus_addr,
4330 &pqi_registers->admin_oq_element_array_addr);
4331 writeq((u64)admin_queues->iq_ci_bus_addr,
4332 &pqi_registers->admin_iq_ci_addr);
4333 writeq((u64)admin_queues->oq_pi_bus_addr,
4334 &pqi_registers->admin_oq_pi_addr);
4335
4336 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
e655d469 4337 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
6c223761
KB
4338 (admin_queues->int_msg_num << 16);
4339 writel(reg, &pqi_registers->admin_iq_num_elements);
583891c9 4340
6c223761
KB
4341 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4342 &pqi_registers->function_and_status_code);
4343
4344 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4345 while (1) {
987d3560 4346 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
6c223761
KB
4347 status = readb(&pqi_registers->function_and_status_code);
4348 if (status == PQI_STATUS_IDLE)
4349 break;
4350 if (time_after(jiffies, timeout))
4351 return -ETIMEDOUT;
6c223761
KB
4352 }
4353
4354 /*
4355 * The offset registers are not initialized to the correct
4356 * offsets until *after* the create admin queue pair command
4357 * completes successfully.
4358 */
4359 admin_queues->iq_pi = ctrl_info->iomem_base +
4360 PQI_DEVICE_REGISTERS_OFFSET +
4361 readq(&pqi_registers->admin_iq_pi_offset);
4362 admin_queues->oq_ci = ctrl_info->iomem_base +
4363 PQI_DEVICE_REGISTERS_OFFSET +
4364 readq(&pqi_registers->admin_oq_ci_offset);
4365
4366 return 0;
4367}
4368
4369static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4370 struct pqi_general_admin_request *request)
4371{
4372 struct pqi_admin_queues *admin_queues;
4373 void *next_element;
4374 pqi_index_t iq_pi;
4375
4376 admin_queues = &ctrl_info->admin_queues;
4377 iq_pi = admin_queues->iq_pi_copy;
4378
4379 next_element = admin_queues->iq_element_array +
4380 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4381
4382 memcpy(next_element, request, sizeof(*request));
4383
4384 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4385 admin_queues->iq_pi_copy = iq_pi;
4386
4387 /*
4388 * This write notifies the controller that an IU is available to be
4389 * processed.
4390 */
4391 writel(iq_pi, admin_queues->iq_pi);
4392}
4393
13bede67
KB
4394#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4395
6c223761
KB
4396static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4397 struct pqi_general_admin_response *response)
4398{
4399 struct pqi_admin_queues *admin_queues;
4400 pqi_index_t oq_pi;
4401 pqi_index_t oq_ci;
4402 unsigned long timeout;
4403
4404 admin_queues = &ctrl_info->admin_queues;
4405 oq_ci = admin_queues->oq_ci_copy;
4406
42dc0426 4407 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
6c223761
KB
4408
4409 while (1) {
dac12fbc 4410 oq_pi = readl(admin_queues->oq_pi);
6c223761
KB
4411 if (oq_pi != oq_ci)
4412 break;
4413 if (time_after(jiffies, timeout)) {
4414 dev_err(&ctrl_info->pci_dev->dev,
4415 "timed out waiting for admin response\n");
4416 return -ETIMEDOUT;
4417 }
13bede67
KB
4418 if (!sis_is_firmware_running(ctrl_info))
4419 return -ENXIO;
6c223761
KB
4420 usleep_range(1000, 2000);
4421 }
4422
4423 memcpy(response, admin_queues->oq_element_array +
4424 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4425
4426 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4427 admin_queues->oq_ci_copy = oq_ci;
4428 writel(oq_ci, admin_queues->oq_ci);
4429
4430 return 0;
4431}
4432
4433static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4434 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4435 struct pqi_io_request *io_request)
4436{
4437 struct pqi_io_request *next;
4438 void *next_element;
4439 pqi_index_t iq_pi;
4440 pqi_index_t iq_ci;
4441 size_t iu_length;
4442 unsigned long flags;
4443 unsigned int num_elements_needed;
4444 unsigned int num_elements_to_end_of_queue;
4445 size_t copy_count;
4446 struct pqi_iu_header *request;
4447
4448 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4449
376fb880
KB
4450 if (io_request) {
4451 io_request->queue_group = queue_group;
6c223761
KB
4452 list_add_tail(&io_request->request_list_entry,
4453 &queue_group->request_list[path]);
376fb880 4454 }
6c223761
KB
4455
4456 iq_pi = queue_group->iq_pi_copy[path];
4457
4458 list_for_each_entry_safe(io_request, next,
4459 &queue_group->request_list[path], request_list_entry) {
4460
4461 request = io_request->iu;
4462
4463 iu_length = get_unaligned_le16(&request->iu_length) +
4464 PQI_REQUEST_HEADER_LENGTH;
4465 num_elements_needed =
4466 DIV_ROUND_UP(iu_length,
4467 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4468
dac12fbc 4469 iq_ci = readl(queue_group->iq_ci[path]);
6c223761
KB
4470
4471 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4472 ctrl_info->num_elements_per_iq))
4473 break;
4474
4475 put_unaligned_le16(queue_group->oq_id,
4476 &request->response_queue_id);
4477
4478 next_element = queue_group->iq_element_array[path] +
4479 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4480
4481 num_elements_to_end_of_queue =
4482 ctrl_info->num_elements_per_iq - iq_pi;
4483
4484 if (num_elements_needed <= num_elements_to_end_of_queue) {
4485 memcpy(next_element, request, iu_length);
4486 } else {
4487 copy_count = num_elements_to_end_of_queue *
4488 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4489 memcpy(next_element, request, copy_count);
4490 memcpy(queue_group->iq_element_array[path],
4491 (u8 *)request + copy_count,
4492 iu_length - copy_count);
4493 }
4494
4495 iq_pi = (iq_pi + num_elements_needed) %
4496 ctrl_info->num_elements_per_iq;
4497
4498 list_del(&io_request->request_list_entry);
4499 }
4500
4501 if (iq_pi != queue_group->iq_pi_copy[path]) {
4502 queue_group->iq_pi_copy[path] = iq_pi;
4503 /*
4504 * This write notifies the controller that one or more IUs are
4505 * available to be processed.
4506 */
4507 writel(iq_pi, queue_group->iq_pi[path]);
4508 }
4509
4510 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4511}
4512
1f37e992
KB
4513#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4514
4515static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4516 struct completion *wait)
4517{
4518 int rc;
1f37e992
KB
4519
4520 while (1) {
4521 if (wait_for_completion_io_timeout(wait,
42dc0426 4522 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
1f37e992
KB
4523 rc = 0;
4524 break;
4525 }
4526
4527 pqi_check_ctrl_health(ctrl_info);
4528 if (pqi_ctrl_offline(ctrl_info)) {
4529 rc = -ENXIO;
4530 break;
4531 }
1f37e992
KB
4532 }
4533
4534 return rc;
4535}
4536
6c223761
KB
4537static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4538 void *context)
4539{
4540 struct completion *waiting = context;
4541
4542 complete(waiting);
4543}
4544
694c5d5b
KB
4545static int pqi_process_raid_io_error_synchronous(
4546 struct pqi_raid_error_info *error_info)
26b390ab
KB
4547{
4548 int rc = -EIO;
4549
4550 switch (error_info->data_out_result) {
4551 case PQI_DATA_IN_OUT_GOOD:
4552 if (error_info->status == SAM_STAT_GOOD)
4553 rc = 0;
4554 break;
4555 case PQI_DATA_IN_OUT_UNDERFLOW:
4556 if (error_info->status == SAM_STAT_GOOD ||
4557 error_info->status == SAM_STAT_CHECK_CONDITION)
4558 rc = 0;
4559 break;
4560 case PQI_DATA_IN_OUT_ABORTED:
4561 rc = PQI_CMD_STATUS_ABORTED;
4562 break;
4563 }
4564
4565 return rc;
4566}
4567
ae0c189d
KB
4568static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4569{
4570 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4571}
4572
6c223761
KB
4573static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4574 struct pqi_iu_header *request, unsigned int flags,
ae0c189d 4575 struct pqi_raid_error_info *error_info)
6c223761 4576{
957c5ab1 4577 int rc = 0;
6c223761 4578 struct pqi_io_request *io_request;
6c223761 4579 size_t iu_length;
957c5ab1 4580 DECLARE_COMPLETION_ONSTACK(wait);
6c223761 4581
6c223761
KB
4582 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4583 if (down_interruptible(&ctrl_info->sync_request_sem))
4584 return -ERESTARTSYS;
4585 } else {
ae0c189d 4586 down(&ctrl_info->sync_request_sem);
6c223761
KB
4587 }
4588
7561a7e4 4589 pqi_ctrl_busy(ctrl_info);
ae0c189d
KB
4590 /*
4591 * Wait for other admin queue updates such as;
4592 * config table changes, OFA memory updates, ...
4593 */
4594 if (pqi_is_blockable_request(request))
4595 pqi_wait_if_ctrl_blocked(ctrl_info);
7561a7e4 4596
376fb880
KB
4597 if (pqi_ctrl_offline(ctrl_info)) {
4598 rc = -ENXIO;
4599 goto out;
4600 }
4601
b27ac2fa 4602 io_request = pqi_alloc_io_request(ctrl_info, NULL);
6c223761
KB
4603
4604 put_unaligned_le16(io_request->index,
4605 &(((struct pqi_raid_path_request *)request)->request_id));
4606
4607 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4608 ((struct pqi_raid_path_request *)request)->error_index =
4609 ((struct pqi_raid_path_request *)request)->request_id;
4610
4611 iu_length = get_unaligned_le16(&request->iu_length) +
4612 PQI_REQUEST_HEADER_LENGTH;
4613 memcpy(io_request->iu, request, iu_length);
4614
957c5ab1
KB
4615 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4616 io_request->context = &wait;
4617
583891c9 4618 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
957c5ab1
KB
4619 io_request);
4620
ae0c189d 4621 pqi_wait_for_completion_io(ctrl_info, &wait);
6c223761
KB
4622
4623 if (error_info) {
4624 if (io_request->error_info)
583891c9 4625 memcpy(error_info, io_request->error_info, sizeof(*error_info));
6c223761
KB
4626 else
4627 memset(error_info, 0, sizeof(*error_info));
4628 } else if (rc == 0 && io_request->error_info) {
583891c9 4629 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
6c223761
KB
4630 }
4631
4632 pqi_free_io_request(io_request);
4633
7561a7e4 4634out:
ae0c189d 4635 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
4636 up(&ctrl_info->sync_request_sem);
4637
4638 return rc;
4639}
4640
4641static int pqi_validate_admin_response(
4642 struct pqi_general_admin_response *response, u8 expected_function_code)
4643{
4644 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4645 return -EINVAL;
4646
4647 if (get_unaligned_le16(&response->header.iu_length) !=
4648 PQI_GENERAL_ADMIN_IU_LENGTH)
4649 return -EINVAL;
4650
4651 if (response->function_code != expected_function_code)
4652 return -EINVAL;
4653
4654 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4655 return -EINVAL;
4656
4657 return 0;
4658}
4659
4660static int pqi_submit_admin_request_synchronous(
4661 struct pqi_ctrl_info *ctrl_info,
4662 struct pqi_general_admin_request *request,
4663 struct pqi_general_admin_response *response)
4664{
4665 int rc;
4666
4667 pqi_submit_admin_request(ctrl_info, request);
4668
4669 rc = pqi_poll_for_admin_response(ctrl_info, response);
4670
4671 if (rc == 0)
ae0c189d 4672 rc = pqi_validate_admin_response(response, request->function_code);
6c223761
KB
4673
4674 return rc;
4675}
4676
4677static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4678{
4679 int rc;
4680 struct pqi_general_admin_request request;
4681 struct pqi_general_admin_response response;
4682 struct pqi_device_capability *capability;
4683 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4684
4685 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4686 if (!capability)
4687 return -ENOMEM;
4688
4689 memset(&request, 0, sizeof(request));
4690
4691 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4692 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4693 &request.header.iu_length);
4694 request.function_code =
4695 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4696 put_unaligned_le32(sizeof(*capability),
4697 &request.data.report_device_capability.buffer_length);
4698
4699 rc = pqi_map_single(ctrl_info->pci_dev,
4700 &request.data.report_device_capability.sg_descriptor,
4701 capability, sizeof(*capability),
6917a9cc 4702 DMA_FROM_DEVICE);
6c223761
KB
4703 if (rc)
4704 goto out;
4705
583891c9 4706 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
6c223761
KB
4707
4708 pqi_pci_unmap(ctrl_info->pci_dev,
4709 &request.data.report_device_capability.sg_descriptor, 1,
6917a9cc 4710 DMA_FROM_DEVICE);
6c223761
KB
4711
4712 if (rc)
4713 goto out;
4714
4715 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4716 rc = -EIO;
4717 goto out;
4718 }
4719
4720 ctrl_info->max_inbound_queues =
4721 get_unaligned_le16(&capability->max_inbound_queues);
4722 ctrl_info->max_elements_per_iq =
4723 get_unaligned_le16(&capability->max_elements_per_iq);
4724 ctrl_info->max_iq_element_length =
4725 get_unaligned_le16(&capability->max_iq_element_length)
4726 * 16;
4727 ctrl_info->max_outbound_queues =
4728 get_unaligned_le16(&capability->max_outbound_queues);
4729 ctrl_info->max_elements_per_oq =
4730 get_unaligned_le16(&capability->max_elements_per_oq);
4731 ctrl_info->max_oq_element_length =
4732 get_unaligned_le16(&capability->max_oq_element_length)
4733 * 16;
4734
4735 sop_iu_layer_descriptor =
4736 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4737
4738 ctrl_info->max_inbound_iu_length_per_firmware =
4739 get_unaligned_le16(
4740 &sop_iu_layer_descriptor->max_inbound_iu_length);
4741 ctrl_info->inbound_spanning_supported =
4742 sop_iu_layer_descriptor->inbound_spanning_supported;
4743 ctrl_info->outbound_spanning_supported =
4744 sop_iu_layer_descriptor->outbound_spanning_supported;
4745
4746out:
4747 kfree(capability);
4748
4749 return rc;
4750}
4751
4752static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4753{
4754 if (ctrl_info->max_iq_element_length <
4755 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4756 dev_err(&ctrl_info->pci_dev->dev,
4757 "max. inbound queue element length of %d is less than the required length of %d\n",
4758 ctrl_info->max_iq_element_length,
4759 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4760 return -EINVAL;
4761 }
4762
4763 if (ctrl_info->max_oq_element_length <
4764 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4765 dev_err(&ctrl_info->pci_dev->dev,
4766 "max. outbound queue element length of %d is less than the required length of %d\n",
4767 ctrl_info->max_oq_element_length,
4768 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4769 return -EINVAL;
4770 }
4771
4772 if (ctrl_info->max_inbound_iu_length_per_firmware <
4773 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4774 dev_err(&ctrl_info->pci_dev->dev,
4775 "max. inbound IU length of %u is less than the min. required length of %d\n",
4776 ctrl_info->max_inbound_iu_length_per_firmware,
4777 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4778 return -EINVAL;
4779 }
4780
77668f41
KB
4781 if (!ctrl_info->inbound_spanning_supported) {
4782 dev_err(&ctrl_info->pci_dev->dev,
4783 "the controller does not support inbound spanning\n");
4784 return -EINVAL;
4785 }
4786
4787 if (ctrl_info->outbound_spanning_supported) {
4788 dev_err(&ctrl_info->pci_dev->dev,
4789 "the controller supports outbound spanning but this driver does not\n");
4790 return -EINVAL;
4791 }
4792
6c223761
KB
4793 return 0;
4794}
4795
6c223761
KB
4796static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4797{
4798 int rc;
4799 struct pqi_event_queue *event_queue;
4800 struct pqi_general_admin_request request;
4801 struct pqi_general_admin_response response;
4802
4803 event_queue = &ctrl_info->event_queue;
4804
4805 /*
4806 * Create OQ (Outbound Queue - device to host queue) to dedicate
4807 * to events.
4808 */
4809 memset(&request, 0, sizeof(request));
4810 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4811 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4812 &request.header.iu_length);
4813 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4814 put_unaligned_le16(event_queue->oq_id,
4815 &request.data.create_operational_oq.queue_id);
4816 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4817 &request.data.create_operational_oq.element_array_addr);
4818 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4819 &request.data.create_operational_oq.pi_addr);
4820 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4821 &request.data.create_operational_oq.num_elements);
4822 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4823 &request.data.create_operational_oq.element_length);
4824 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4825 put_unaligned_le16(event_queue->int_msg_num,
4826 &request.data.create_operational_oq.int_msg_num);
4827
4828 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4829 &response);
4830 if (rc)
4831 return rc;
4832
4833 event_queue->oq_ci = ctrl_info->iomem_base +
4834 PQI_DEVICE_REGISTERS_OFFSET +
4835 get_unaligned_le64(
4836 &response.data.create_operational_oq.oq_ci_offset);
4837
4838 return 0;
4839}
4840
061ef06a
KB
4841static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4842 unsigned int group_number)
6c223761 4843{
6c223761
KB
4844 int rc;
4845 struct pqi_queue_group *queue_group;
4846 struct pqi_general_admin_request request;
4847 struct pqi_general_admin_response response;
4848
061ef06a 4849 queue_group = &ctrl_info->queue_groups[group_number];
6c223761
KB
4850
4851 /*
4852 * Create IQ (Inbound Queue - host to device queue) for
4853 * RAID path.
4854 */
4855 memset(&request, 0, sizeof(request));
4856 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4857 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4858 &request.header.iu_length);
4859 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4860 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4861 &request.data.create_operational_iq.queue_id);
4862 put_unaligned_le64(
4863 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4864 &request.data.create_operational_iq.element_array_addr);
4865 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4866 &request.data.create_operational_iq.ci_addr);
4867 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4868 &request.data.create_operational_iq.num_elements);
4869 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4870 &request.data.create_operational_iq.element_length);
4871 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4872
4873 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4874 &response);
4875 if (rc) {
4876 dev_err(&ctrl_info->pci_dev->dev,
4877 "error creating inbound RAID queue\n");
4878 return rc;
4879 }
4880
4881 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4882 PQI_DEVICE_REGISTERS_OFFSET +
4883 get_unaligned_le64(
4884 &response.data.create_operational_iq.iq_pi_offset);
4885
4886 /*
4887 * Create IQ (Inbound Queue - host to device queue) for
4888 * Advanced I/O (AIO) path.
4889 */
4890 memset(&request, 0, sizeof(request));
4891 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4892 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4893 &request.header.iu_length);
4894 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4895 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4896 &request.data.create_operational_iq.queue_id);
4897 put_unaligned_le64((u64)queue_group->
4898 iq_element_array_bus_addr[AIO_PATH],
4899 &request.data.create_operational_iq.element_array_addr);
4900 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4901 &request.data.create_operational_iq.ci_addr);
4902 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4903 &request.data.create_operational_iq.num_elements);
4904 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4905 &request.data.create_operational_iq.element_length);
4906 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4907
4908 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4909 &response);
4910 if (rc) {
4911 dev_err(&ctrl_info->pci_dev->dev,
4912 "error creating inbound AIO queue\n");
339faa81 4913 return rc;
6c223761
KB
4914 }
4915
4916 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4917 PQI_DEVICE_REGISTERS_OFFSET +
4918 get_unaligned_le64(
4919 &response.data.create_operational_iq.iq_pi_offset);
4920
4921 /*
4922 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4923 * assumed to be for RAID path I/O unless we change the queue's
4924 * property.
4925 */
4926 memset(&request, 0, sizeof(request));
4927 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4928 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4929 &request.header.iu_length);
4930 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4931 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4932 &request.data.change_operational_iq_properties.queue_id);
4933 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4934 &request.data.change_operational_iq_properties.vendor_specific);
4935
4936 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4937 &response);
4938 if (rc) {
4939 dev_err(&ctrl_info->pci_dev->dev,
4940 "error changing queue property\n");
339faa81 4941 return rc;
6c223761
KB
4942 }
4943
4944 /*
4945 * Create OQ (Outbound Queue - device to host queue).
4946 */
4947 memset(&request, 0, sizeof(request));
4948 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4949 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4950 &request.header.iu_length);
4951 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4952 put_unaligned_le16(queue_group->oq_id,
4953 &request.data.create_operational_oq.queue_id);
4954 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4955 &request.data.create_operational_oq.element_array_addr);
4956 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4957 &request.data.create_operational_oq.pi_addr);
4958 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4959 &request.data.create_operational_oq.num_elements);
4960 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4961 &request.data.create_operational_oq.element_length);
4962 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4963 put_unaligned_le16(queue_group->int_msg_num,
4964 &request.data.create_operational_oq.int_msg_num);
4965
4966 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4967 &response);
4968 if (rc) {
4969 dev_err(&ctrl_info->pci_dev->dev,
4970 "error creating outbound queue\n");
339faa81 4971 return rc;
6c223761
KB
4972 }
4973
4974 queue_group->oq_ci = ctrl_info->iomem_base +
4975 PQI_DEVICE_REGISTERS_OFFSET +
4976 get_unaligned_le64(
4977 &response.data.create_operational_oq.oq_ci_offset);
4978
6c223761 4979 return 0;
6c223761
KB
4980}
4981
4982static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4983{
4984 int rc;
4985 unsigned int i;
4986
4987 rc = pqi_create_event_queue(ctrl_info);
4988 if (rc) {
4989 dev_err(&ctrl_info->pci_dev->dev,
4990 "error creating event queue\n");
4991 return rc;
4992 }
4993
4994 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
061ef06a 4995 rc = pqi_create_queue_group(ctrl_info, i);
6c223761
KB
4996 if (rc) {
4997 dev_err(&ctrl_info->pci_dev->dev,
4998 "error creating queue group number %u/%u\n",
4999 i, ctrl_info->num_queue_groups);
5000 return rc;
5001 }
5002 }
5003
5004 return 0;
5005}
5006
5007#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
5f492a7a 5008 struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
6c223761 5009
6a50d6ad
KB
5010static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
5011 bool enable_events)
6c223761
KB
5012{
5013 int rc;
5014 unsigned int i;
5015 struct pqi_event_config *event_config;
6a50d6ad 5016 struct pqi_event_descriptor *event_descriptor;
6c223761
KB
5017 struct pqi_general_management_request request;
5018
5019 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5020 GFP_KERNEL);
5021 if (!event_config)
5022 return -ENOMEM;
5023
5024 memset(&request, 0, sizeof(request));
5025
5026 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5027 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5028 data.report_event_configuration.sg_descriptors[1]) -
5029 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5030 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5031 &request.data.report_event_configuration.buffer_length);
5032
5033 rc = pqi_map_single(ctrl_info->pci_dev,
5034 request.data.report_event_configuration.sg_descriptors,
5035 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
6917a9cc 5036 DMA_FROM_DEVICE);
6c223761
KB
5037 if (rc)
5038 goto out;
5039
ae0c189d 5040 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761
KB
5041
5042 pqi_pci_unmap(ctrl_info->pci_dev,
5043 request.data.report_event_configuration.sg_descriptors, 1,
6917a9cc 5044 DMA_FROM_DEVICE);
6c223761
KB
5045
5046 if (rc)
5047 goto out;
5048
6a50d6ad
KB
5049 for (i = 0; i < event_config->num_event_descriptors; i++) {
5050 event_descriptor = &event_config->descriptors[i];
5051 if (enable_events &&
5052 pqi_is_supported_event(event_descriptor->event_type))
583891c9 5053 put_unaligned_le16(ctrl_info->event_queue.oq_id,
6a50d6ad
KB
5054 &event_descriptor->oq_id);
5055 else
5056 put_unaligned_le16(0, &event_descriptor->oq_id);
5057 }
6c223761
KB
5058
5059 memset(&request, 0, sizeof(request));
5060
5061 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5062 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5063 data.report_event_configuration.sg_descriptors[1]) -
5064 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5065 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5066 &request.data.report_event_configuration.buffer_length);
5067
5068 rc = pqi_map_single(ctrl_info->pci_dev,
5069 request.data.report_event_configuration.sg_descriptors,
5070 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
6917a9cc 5071 DMA_TO_DEVICE);
6c223761
KB
5072 if (rc)
5073 goto out;
5074
ae0c189d 5075 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
6c223761
KB
5076
5077 pqi_pci_unmap(ctrl_info->pci_dev,
5078 request.data.report_event_configuration.sg_descriptors, 1,
6917a9cc 5079 DMA_TO_DEVICE);
6c223761
KB
5080
5081out:
5082 kfree(event_config);
5083
5084 return rc;
5085}
5086
6a50d6ad
KB
5087static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5088{
5089 return pqi_configure_events(ctrl_info, true);
5090}
5091
6c223761
KB
5092static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5093{
5094 unsigned int i;
5095 struct device *dev;
5096 size_t sg_chain_buffer_length;
5097 struct pqi_io_request *io_request;
5098
5099 if (!ctrl_info->io_request_pool)
5100 return;
5101
5102 dev = &ctrl_info->pci_dev->dev;
5103 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5104 io_request = ctrl_info->io_request_pool;
5105
5106 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5107 kfree(io_request->iu);
5108 if (!io_request->sg_chain_buffer)
5109 break;
5110 dma_free_coherent(dev, sg_chain_buffer_length,
5111 io_request->sg_chain_buffer,
5112 io_request->sg_chain_buffer_dma_handle);
5113 io_request++;
5114 }
5115
5116 kfree(ctrl_info->io_request_pool);
5117 ctrl_info->io_request_pool = NULL;
5118}
5119
5120static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5121{
694c5d5b
KB
5122 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5123 ctrl_info->error_buffer_length,
5124 &ctrl_info->error_buffer_dma_handle,
5125 GFP_KERNEL);
6c223761
KB
5126 if (!ctrl_info->error_buffer)
5127 return -ENOMEM;
5128
5129 return 0;
5130}
5131
5132static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5133{
5134 unsigned int i;
5135 void *sg_chain_buffer;
5136 size_t sg_chain_buffer_length;
5137 dma_addr_t sg_chain_buffer_dma_handle;
5138 struct device *dev;
5139 struct pqi_io_request *io_request;
5140
583891c9
KB
5141 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5142 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
6c223761
KB
5143
5144 if (!ctrl_info->io_request_pool) {
5145 dev_err(&ctrl_info->pci_dev->dev,
5146 "failed to allocate I/O request pool\n");
5147 goto error;
5148 }
5149
5150 dev = &ctrl_info->pci_dev->dev;
5151 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5152 io_request = ctrl_info->io_request_pool;
5153
5154 for (i = 0; i < ctrl_info->max_io_slots; i++) {
583891c9 5155 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
6c223761
KB
5156
5157 if (!io_request->iu) {
5158 dev_err(&ctrl_info->pci_dev->dev,
5159 "failed to allocate IU buffers\n");
5160 goto error;
5161 }
5162
5163 sg_chain_buffer = dma_alloc_coherent(dev,
5164 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5165 GFP_KERNEL);
5166
5167 if (!sg_chain_buffer) {
5168 dev_err(&ctrl_info->pci_dev->dev,
5169 "failed to allocate PQI scatter-gather chain buffers\n");
5170 goto error;
5171 }
5172
5173 io_request->index = i;
5174 io_request->sg_chain_buffer = sg_chain_buffer;
583891c9 5175 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
6c223761
KB
5176 io_request++;
5177 }
5178
5179 return 0;
5180
5181error:
5182 pqi_free_all_io_requests(ctrl_info);
5183
5184 return -ENOMEM;
5185}
5186
5187/*
5188 * Calculate required resources that are sized based on max. outstanding
5189 * requests and max. transfer size.
5190 */
5191
5192static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5193{
5194 u32 max_transfer_size;
5195 u32 max_sg_entries;
5196
5197 ctrl_info->scsi_ml_can_queue =
5198 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5199 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5200
5201 ctrl_info->error_buffer_length =
5202 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5203
d727a776
KB
5204 if (reset_devices)
5205 max_transfer_size = min(ctrl_info->max_transfer_size,
5206 PQI_MAX_TRANSFER_SIZE_KDUMP);
5207 else
5208 max_transfer_size = min(ctrl_info->max_transfer_size,
5209 PQI_MAX_TRANSFER_SIZE);
6c223761
KB
5210
5211 max_sg_entries = max_transfer_size / PAGE_SIZE;
5212
5213 /* +1 to cover when the buffer is not page-aligned. */
5214 max_sg_entries++;
5215
5216 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5217
5218 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5219
5220 ctrl_info->sg_chain_buffer_length =
e1d213bd
KB
5221 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5222 PQI_EXTRA_SGL_MEMORY;
6c223761
KB
5223 ctrl_info->sg_tablesize = max_sg_entries;
5224 ctrl_info->max_sectors = max_transfer_size / 512;
5225}
5226
5227static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5228{
6c223761
KB
5229 int num_queue_groups;
5230 u16 num_elements_per_iq;
5231 u16 num_elements_per_oq;
5232
d727a776
KB
5233 if (reset_devices) {
5234 num_queue_groups = 1;
5235 } else {
5236 int num_cpus;
5237 int max_queue_groups;
5238
5239 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5240 ctrl_info->max_outbound_queues - 1);
5241 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
6c223761 5242
d727a776
KB
5243 num_cpus = num_online_cpus();
5244 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5245 num_queue_groups = min(num_queue_groups, max_queue_groups);
5246 }
6c223761
KB
5247
5248 ctrl_info->num_queue_groups = num_queue_groups;
5249
77668f41
KB
5250 /*
5251 * Make sure that the max. inbound IU length is an even multiple
5252 * of our inbound element length.
5253 */
5254 ctrl_info->max_inbound_iu_length =
5255 (ctrl_info->max_inbound_iu_length_per_firmware /
5256 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5257 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
5258
5259 num_elements_per_iq =
5260 (ctrl_info->max_inbound_iu_length /
5261 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5262
5263 /* Add one because one element in each queue is unusable. */
5264 num_elements_per_iq++;
5265
5266 num_elements_per_iq = min(num_elements_per_iq,
5267 ctrl_info->max_elements_per_iq);
5268
5269 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5270 num_elements_per_oq = min(num_elements_per_oq,
5271 ctrl_info->max_elements_per_oq);
5272
5273 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5274 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5275
5276 ctrl_info->max_sg_per_iu =
5277 ((ctrl_info->max_inbound_iu_length -
5278 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5279 sizeof(struct pqi_sg_descriptor)) +
5280 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
6702d2c4
DB
5281
5282 ctrl_info->max_sg_per_r56_iu =
5283 ((ctrl_info->max_inbound_iu_length -
5284 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5285 sizeof(struct pqi_sg_descriptor)) +
5286 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
6c223761
KB
5287}
5288
583891c9
KB
5289static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5290 struct scatterlist *sg)
6c223761
KB
5291{
5292 u64 address = (u64)sg_dma_address(sg);
5293 unsigned int length = sg_dma_len(sg);
5294
5295 put_unaligned_le64(address, &sg_descriptor->address);
5296 put_unaligned_le32(length, &sg_descriptor->length);
5297 put_unaligned_le32(0, &sg_descriptor->flags);
5298}
5299
1a22bc4b
DB
5300static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5301 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5302 int max_sg_per_iu, bool *chained)
6c223761
KB
5303{
5304 int i;
6c223761 5305 unsigned int num_sg_in_iu;
6c223761 5306
1a22bc4b 5307 *chained = false;
6c223761 5308 i = 0;
1a22bc4b
DB
5309 num_sg_in_iu = 0;
5310 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
6c223761
KB
5311
5312 while (1) {
5313 pqi_set_sg_descriptor(sg_descriptor, sg);
1a22bc4b 5314 if (!*chained)
6c223761
KB
5315 num_sg_in_iu++;
5316 i++;
5317 if (i == sg_count)
5318 break;
5319 sg_descriptor++;
5320 if (i == max_sg_per_iu) {
1a22bc4b 5321 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
6c223761 5322 &sg_descriptor->address);
1a22bc4b 5323 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
6c223761 5324 &sg_descriptor->length);
1a22bc4b
DB
5325 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5326 *chained = true;
6c223761
KB
5327 num_sg_in_iu++;
5328 sg_descriptor = io_request->sg_chain_buffer;
5329 }
5330 sg = sg_next(sg);
5331 }
5332
5333 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
6c223761 5334
1a22bc4b 5335 return num_sg_in_iu;
6c223761
KB
5336}
5337
6c223761
KB
5338static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5339 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
6c223761
KB
5340 struct pqi_io_request *io_request)
5341{
6c223761
KB
5342 u16 iu_length;
5343 int sg_count;
a60eec02
KB
5344 bool chained;
5345 unsigned int num_sg_in_iu;
6c223761
KB
5346 struct scatterlist *sg;
5347 struct pqi_sg_descriptor *sg_descriptor;
5348
5349 sg_count = scsi_dma_map(scmd);
5350 if (sg_count < 0)
5351 return sg_count;
a60eec02 5352
6c223761 5353 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
a60eec02 5354 PQI_REQUEST_HEADER_LENGTH;
a60eec02 5355
6c223761
KB
5356 if (sg_count == 0)
5357 goto out;
5358
a60eec02
KB
5359 sg = scsi_sglist(scmd);
5360 sg_descriptor = request->sg_descriptors;
a60eec02 5361
1a22bc4b
DB
5362 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5363 ctrl_info->max_sg_per_iu, &chained);
6c223761 5364
a60eec02 5365 request->partial = chained;
6c223761 5366 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
5367
5368out:
6c223761 5369 put_unaligned_le16(iu_length, &request->header.iu_length);
6c223761
KB
5370
5371 return 0;
5372}
5373
7a012c23
DB
5374static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5375 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5376 struct pqi_io_request *io_request)
6c223761 5377{
7a012c23
DB
5378 u16 iu_length;
5379 int sg_count;
5380 bool chained;
5381 unsigned int num_sg_in_iu;
5382 struct scatterlist *sg;
5383 struct pqi_sg_descriptor *sg_descriptor;
5384
5385 sg_count = scsi_dma_map(scmd);
5386 if (sg_count < 0)
5387 return sg_count;
5388
5389 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5390 PQI_REQUEST_HEADER_LENGTH;
5391 num_sg_in_iu = 0;
5392
5393 if (sg_count == 0)
5394 goto out;
5395
5396 sg = scsi_sglist(scmd);
5397 sg_descriptor = request->sg_descriptors;
5398
5399 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5400 ctrl_info->max_sg_per_iu, &chained);
5401
5402 request->partial = chained;
5403 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5404
5405out:
5406 put_unaligned_le16(iu_length, &request->header.iu_length);
5407 request->num_sg_descriptors = num_sg_in_iu;
5408
5409 return 0;
5410}
5411
6702d2c4
DB
5412static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5413 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5414 struct pqi_io_request *io_request)
5415{
5416 u16 iu_length;
5417 int sg_count;
5418 bool chained;
5419 unsigned int num_sg_in_iu;
5420 struct scatterlist *sg;
5421 struct pqi_sg_descriptor *sg_descriptor;
5422
5423 sg_count = scsi_dma_map(scmd);
5424 if (sg_count < 0)
5425 return sg_count;
5426
5427 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5428 PQI_REQUEST_HEADER_LENGTH;
5429 num_sg_in_iu = 0;
5430
5431 if (sg_count != 0) {
5432 sg = scsi_sglist(scmd);
5433 sg_descriptor = request->sg_descriptors;
5434
5435 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5436 ctrl_info->max_sg_per_r56_iu, &chained);
5437
5438 request->partial = chained;
5439 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5440 }
5441
5442 put_unaligned_le16(iu_length, &request->header.iu_length);
5443 request->num_sg_descriptors = num_sg_in_iu;
5444
5445 return 0;
5446}
5447
6c223761
KB
5448static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5449 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5450 struct pqi_io_request *io_request)
5451{
6c223761
KB
5452 u16 iu_length;
5453 int sg_count;
a60eec02
KB
5454 bool chained;
5455 unsigned int num_sg_in_iu;
6c223761
KB
5456 struct scatterlist *sg;
5457 struct pqi_sg_descriptor *sg_descriptor;
5458
5459 sg_count = scsi_dma_map(scmd);
5460 if (sg_count < 0)
5461 return sg_count;
a60eec02
KB
5462
5463 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5464 PQI_REQUEST_HEADER_LENGTH;
5465 num_sg_in_iu = 0;
5466
6c223761
KB
5467 if (sg_count == 0)
5468 goto out;
5469
a60eec02
KB
5470 sg = scsi_sglist(scmd);
5471 sg_descriptor = request->sg_descriptors;
a60eec02 5472
1a22bc4b
DB
5473 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5474 ctrl_info->max_sg_per_iu, &chained);
6c223761 5475
a60eec02 5476 request->partial = chained;
6c223761 5477 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
5478
5479out:
6c223761
KB
5480 put_unaligned_le16(iu_length, &request->header.iu_length);
5481 request->num_sg_descriptors = num_sg_in_iu;
5482
5483 return 0;
5484}
5485
5486static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5487 void *context)
5488{
5489 struct scsi_cmnd *scmd;
5490
5491 scmd = io_request->scmd;
5492 pqi_free_io_request(io_request);
5493 scsi_dma_unmap(scmd);
5494 pqi_scsi_done(scmd);
5495}
5496
376fb880
KB
5497static int pqi_raid_submit_scsi_cmd_with_io_request(
5498 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
6c223761
KB
5499 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5500 struct pqi_queue_group *queue_group)
5501{
5502 int rc;
5503 size_t cdb_length;
6c223761
KB
5504 struct pqi_raid_path_request *request;
5505
6c223761
KB
5506 io_request->io_complete_callback = pqi_raid_io_complete;
5507 io_request->scmd = scmd;
5508
6c223761 5509 request = io_request->iu;
583891c9 5510 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
6c223761
KB
5511
5512 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5513 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5514 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5515 put_unaligned_le16(io_request->index, &request->request_id);
5516 request->error_index = request->request_id;
583891c9 5517 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
904f2bfd 5518 request->ml_device_lun_number = (u8)scmd->device->lun;
6c223761
KB
5519
5520 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5521 memcpy(request->cdb, scmd->cmnd, cdb_length);
5522
5523 switch (cdb_length) {
5524 case 6:
5525 case 10:
5526 case 12:
5527 case 16:
583891c9 5528 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6c223761
KB
5529 break;
5530 case 20:
583891c9 5531 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
6c223761
KB
5532 break;
5533 case 24:
583891c9 5534 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
6c223761
KB
5535 break;
5536 case 28:
583891c9 5537 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
6c223761
KB
5538 break;
5539 case 32:
5540 default:
583891c9 5541 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
6c223761
KB
5542 break;
5543 }
5544
5545 switch (scmd->sc_data_direction) {
69695aea 5546 case DMA_FROM_DEVICE:
6c223761
KB
5547 request->data_direction = SOP_READ_FLAG;
5548 break;
69695aea 5549 case DMA_TO_DEVICE:
6c223761
KB
5550 request->data_direction = SOP_WRITE_FLAG;
5551 break;
5552 case DMA_NONE:
5553 request->data_direction = SOP_NO_DIRECTION_FLAG;
5554 break;
5555 case DMA_BIDIRECTIONAL:
5556 request->data_direction = SOP_BIDIRECTIONAL;
5557 break;
5558 default:
5559 dev_err(&ctrl_info->pci_dev->dev,
5560 "unknown data direction: %d\n",
5561 scmd->sc_data_direction);
6c223761
KB
5562 break;
5563 }
5564
5565 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5566 if (rc) {
5567 pqi_free_io_request(io_request);
5568 return SCSI_MLQUEUE_HOST_BUSY;
5569 }
5570
5571 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5572
5573 return 0;
5574}
5575
376fb880
KB
5576static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5577 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5578 struct pqi_queue_group *queue_group)
5579{
5580 struct pqi_io_request *io_request;
5581
b27ac2fa
DB
5582 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5583 if (!io_request)
5584 return SCSI_MLQUEUE_HOST_BUSY;
376fb880
KB
5585
5586 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
5587 device, scmd, queue_group);
5588}
5589
376fb880
KB
5590static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5591{
5592 struct scsi_cmnd *scmd;
03b288cf 5593 struct pqi_scsi_dev *device;
376fb880
KB
5594 struct pqi_ctrl_info *ctrl_info;
5595
5596 if (!io_request->raid_bypass)
5597 return false;
5598
5599 scmd = io_request->scmd;
5600 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5601 return false;
5602 if (host_byte(scmd->result) == DID_NO_CONNECT)
5603 return false;
5604
03b288cf 5605 device = scmd->device->hostdata;
5be9db06 5606 if (pqi_device_offline(device) || pqi_device_in_remove(device))
03b288cf
KB
5607 return false;
5608
376fb880
KB
5609 ctrl_info = shost_to_hba(scmd->device->host);
5610 if (pqi_ctrl_offline(ctrl_info))
5611 return false;
5612
5613 return true;
5614}
5615
6c223761
KB
5616static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5617 void *context)
5618{
5619 struct scsi_cmnd *scmd;
5620
5621 scmd = io_request->scmd;
5622 scsi_dma_unmap(scmd);
5be9db06 5623 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
6c223761 5624 set_host_byte(scmd, DID_IMM_RETRY);
c1ea387d 5625 pqi_cmd_priv(scmd)->this_residual++;
376fb880 5626 }
5be9db06 5627
6c223761
KB
5628 pqi_free_io_request(io_request);
5629 pqi_scsi_done(scmd);
5630}
5631
b4dc06a9 5632static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
2a47834d
GW
5633 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
5634{
5635 bool io_high_prio;
5636 int priority_class;
5637
5638 io_high_prio = false;
b4dc06a9 5639
2a47834d
GW
5640 if (device->ncq_prio_enable) {
5641 priority_class =
5642 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
5643 if (priority_class == IOPRIO_CLASS_RT) {
b4dc06a9 5644 /* Set NCQ priority for read/write commands. */
2a47834d
GW
5645 switch (scmd->cmnd[0]) {
5646 case WRITE_16:
5647 case READ_16:
5648 case WRITE_12:
5649 case READ_12:
5650 case WRITE_10:
5651 case READ_10:
5652 case WRITE_6:
5653 case READ_6:
5654 io_high_prio = true;
5655 break;
2a47834d
GW
5656 }
5657 }
5658 }
5659
5660 return io_high_prio;
5661}
5662
6c223761
KB
5663static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5664 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5665 struct pqi_queue_group *queue_group)
5666{
2a47834d
GW
5667 bool io_high_prio;
5668
b4dc06a9
KB
5669 io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
5670
6c223761 5671 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
2a47834d
GW
5672 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5673 false, io_high_prio);
6c223761
KB
5674}
5675
5676static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5677 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5678 unsigned int cdb_length, struct pqi_queue_group *queue_group,
2a47834d
GW
5679 struct pqi_encryption_info *encryption_info, bool raid_bypass,
5680 bool io_high_prio)
6c223761
KB
5681{
5682 int rc;
5683 struct pqi_io_request *io_request;
5684 struct pqi_aio_path_request *request;
904f2bfd 5685 struct pqi_scsi_dev *device;
6c223761 5686
904f2bfd 5687 device = scmd->device->hostdata;
b27ac2fa
DB
5688 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5689 if (!io_request)
5690 return SCSI_MLQUEUE_HOST_BUSY;
6c223761
KB
5691 io_request->io_complete_callback = pqi_aio_io_complete;
5692 io_request->scmd = scmd;
376fb880 5693 io_request->raid_bypass = raid_bypass;
6c223761
KB
5694
5695 request = io_request->iu;
9e98e60b 5696 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
6c223761
KB
5697
5698 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5699 put_unaligned_le32(aio_handle, &request->nexus_id);
5700 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5701 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
2a47834d 5702 request->command_priority = io_high_prio;
6c223761
KB
5703 put_unaligned_le16(io_request->index, &request->request_id);
5704 request->error_index = request->request_id;
904f2bfd
KM
5705 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
5706 put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
6c223761
KB
5707 if (cdb_length > sizeof(request->cdb))
5708 cdb_length = sizeof(request->cdb);
5709 request->cdb_length = cdb_length;
5710 memcpy(request->cdb, cdb, cdb_length);
5711
5712 switch (scmd->sc_data_direction) {
5713 case DMA_TO_DEVICE:
5714 request->data_direction = SOP_READ_FLAG;
5715 break;
5716 case DMA_FROM_DEVICE:
5717 request->data_direction = SOP_WRITE_FLAG;
5718 break;
5719 case DMA_NONE:
5720 request->data_direction = SOP_NO_DIRECTION_FLAG;
5721 break;
5722 case DMA_BIDIRECTIONAL:
5723 request->data_direction = SOP_BIDIRECTIONAL;
5724 break;
5725 default:
5726 dev_err(&ctrl_info->pci_dev->dev,
5727 "unknown data direction: %d\n",
5728 scmd->sc_data_direction);
6c223761
KB
5729 break;
5730 }
5731
5732 if (encryption_info) {
5733 request->encryption_enable = true;
5734 put_unaligned_le16(encryption_info->data_encryption_key_index,
5735 &request->data_encryption_key_index);
5736 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5737 &request->encrypt_tweak_lower);
5738 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5739 &request->encrypt_tweak_upper);
5740 }
5741
5742 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5743 if (rc) {
5744 pqi_free_io_request(io_request);
5745 return SCSI_MLQUEUE_HOST_BUSY;
5746 }
5747
5748 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5749
5750 return 0;
5751}
5752
7a012c23
DB
5753static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5754 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5755 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5756 struct pqi_scsi_dev_raid_map_data *rmd)
7a012c23
DB
5757{
5758 int rc;
5759 struct pqi_io_request *io_request;
5760 struct pqi_aio_r1_path_request *r1_request;
5761
b27ac2fa
DB
5762 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5763 if (!io_request)
5764 return SCSI_MLQUEUE_HOST_BUSY;
5765
7a012c23
DB
5766 io_request->io_complete_callback = pqi_aio_io_complete;
5767 io_request->scmd = scmd;
5768 io_request->raid_bypass = true;
5769
5770 r1_request = io_request->iu;
5771 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5772
5773 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
7a012c23
DB
5774 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5775 r1_request->num_drives = rmd->num_it_nexus_entries;
5776 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5777 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5778 if (rmd->num_it_nexus_entries == 3)
5779 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5780
5781 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5782 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5783 put_unaligned_le16(io_request->index, &r1_request->request_id);
5784 r1_request->error_index = r1_request->request_id;
5785 if (rmd->cdb_length > sizeof(r1_request->cdb))
5786 rmd->cdb_length = sizeof(r1_request->cdb);
5787 r1_request->cdb_length = rmd->cdb_length;
5788 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5789
5790 /* The direction is always write. */
5791 r1_request->data_direction = SOP_READ_FLAG;
5792
5793 if (encryption_info) {
5794 r1_request->encryption_enable = true;
5795 put_unaligned_le16(encryption_info->data_encryption_key_index,
5796 &r1_request->data_encryption_key_index);
5797 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5798 &r1_request->encrypt_tweak_lower);
5799 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5800 &r1_request->encrypt_tweak_upper);
5801 }
5802
5803 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5804 if (rc) {
5805 pqi_free_io_request(io_request);
5806 return SCSI_MLQUEUE_HOST_BUSY;
5807 }
5808
5809 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5810
5811 return 0;
5812}
5813
6702d2c4
DB
5814static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5815 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5816 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5817 struct pqi_scsi_dev_raid_map_data *rmd)
5818{
5819 int rc;
5820 struct pqi_io_request *io_request;
5821 struct pqi_aio_r56_path_request *r56_request;
5822
b27ac2fa
DB
5823 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5824 if (!io_request)
5825 return SCSI_MLQUEUE_HOST_BUSY;
6702d2c4
DB
5826 io_request->io_complete_callback = pqi_aio_io_complete;
5827 io_request->scmd = scmd;
5828 io_request->raid_bypass = true;
5829
5830 r56_request = io_request->iu;
5831 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5832
5833 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5834 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5835 else
5836 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5837
5838 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5839 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5840 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5841 if (rmd->raid_level == SA_RAID_6) {
5842 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5843 r56_request->xor_multiplier = rmd->xor_mult;
5844 }
5845 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5846 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5847 put_unaligned_le64(rmd->row, &r56_request->row);
5848
5849 put_unaligned_le16(io_request->index, &r56_request->request_id);
5850 r56_request->error_index = r56_request->request_id;
5851
5852 if (rmd->cdb_length > sizeof(r56_request->cdb))
5853 rmd->cdb_length = sizeof(r56_request->cdb);
5854 r56_request->cdb_length = rmd->cdb_length;
5855 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5856
5857 /* The direction is always write. */
5858 r56_request->data_direction = SOP_READ_FLAG;
5859
5860 if (encryption_info) {
5861 r56_request->encryption_enable = true;
5862 put_unaligned_le16(encryption_info->data_encryption_key_index,
5863 &r56_request->data_encryption_key_index);
5864 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5865 &r56_request->encrypt_tweak_lower);
5866 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5867 &r56_request->encrypt_tweak_upper);
5868 }
5869
5870 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5871 if (rc) {
5872 pqi_free_io_request(io_request);
5873 return SCSI_MLQUEUE_HOST_BUSY;
5874 }
5875
5876 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5877
5878 return 0;
5879}
5880
061ef06a
KB
5881static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5882 struct scsi_cmnd *scmd)
5883{
b27ac2fa
DB
5884 /*
5885 * We are setting host_tagset = 1 during init.
5886 */
5887 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
061ef06a
KB
5888}
5889
5be9db06
KB
5890static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5891{
12db0f93 5892 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5be9db06
KB
5893 return false;
5894
c1ea387d 5895 return pqi_cmd_priv(scmd)->this_residual == 0;
5be9db06
KB
5896}
5897
7561a7e4
KB
5898/*
5899 * This function gets called just before we hand the completed SCSI request
5900 * back to the SML.
5901 */
5902
5903void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5904{
5905 struct pqi_scsi_dev *device;
5906
1e46731e
MR
5907 if (!scmd->device) {
5908 set_host_byte(scmd, DID_NO_CONNECT);
5909 return;
5910 }
5911
7561a7e4 5912 device = scmd->device->hostdata;
1e46731e
MR
5913 if (!device) {
5914 set_host_byte(scmd, DID_NO_CONNECT);
5915 return;
5916 }
5917
904f2bfd 5918 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
7561a7e4
KB
5919}
5920
c7ffedb3 5921static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
7d81d2b8 5922 struct scsi_cmnd *scmd)
c7ffedb3
DB
5923{
5924 u32 oldest_jiffies;
5925 u8 lru_index;
5926 int i;
5927 int rc;
5928 struct pqi_scsi_dev *device;
5929 struct pqi_stream_data *pqi_stream_data;
5930 struct pqi_scsi_dev_raid_map_data rmd;
5931
5932 if (!ctrl_info->enable_stream_detection)
5933 return false;
5934
5935 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5936 if (rc)
5937 return false;
5938
5939 /* Check writes only. */
5940 if (!rmd.is_write)
5941 return false;
5942
5943 device = scmd->device->hostdata;
5944
5945 /* Check for RAID 5/6 streams. */
5946 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5947 return false;
5948
5949 /*
5950 * If controller does not support AIO RAID{5,6} writes, need to send
5951 * requests down non-AIO path.
5952 */
5953 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5954 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5955 return true;
5956
5957 lru_index = 0;
5958 oldest_jiffies = INT_MAX;
5959 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5960 pqi_stream_data = &device->stream_data[i];
5961 /*
5962 * Check for adjacent request or request is within
5963 * the previous request.
5964 */
5965 if ((pqi_stream_data->next_lba &&
5966 rmd.first_block >= pqi_stream_data->next_lba) &&
5967 rmd.first_block <= pqi_stream_data->next_lba +
5968 rmd.block_cnt) {
5969 pqi_stream_data->next_lba = rmd.first_block +
5970 rmd.block_cnt;
5971 pqi_stream_data->last_accessed = jiffies;
5972 return true;
5973 }
5974
5975 /* unused entry */
5976 if (pqi_stream_data->last_accessed == 0) {
5977 lru_index = i;
5978 break;
5979 }
5980
5981 /* Find entry with oldest last accessed time. */
5982 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5983 oldest_jiffies = pqi_stream_data->last_accessed;
5984 lru_index = i;
5985 }
5986 }
5987
5988 /* Set LRU entry. */
5989 pqi_stream_data = &device->stream_data[lru_index];
5990 pqi_stream_data->last_accessed = jiffies;
5991 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5992
5993 return false;
5994}
5995
5996static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
6c223761
KB
5997{
5998 int rc;
5999 struct pqi_ctrl_info *ctrl_info;
6000 struct pqi_scsi_dev *device;
061ef06a 6001 u16 hw_queue;
6c223761
KB
6002 struct pqi_queue_group *queue_group;
6003 bool raid_bypassed;
6004
6005 device = scmd->device->hostdata;
6c223761 6006
1e46731e
MR
6007 if (!device) {
6008 set_host_byte(scmd, DID_NO_CONNECT);
6009 pqi_scsi_done(scmd);
6010 return 0;
6011 }
6012
904f2bfd 6013 atomic_inc(&device->scsi_cmds_outstanding[scmd->device->lun]);
7561a7e4 6014
583891c9
KB
6015 ctrl_info = shost_to_hba(shost);
6016
1bdf6e93 6017 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
6c223761
KB
6018 set_host_byte(scmd, DID_NO_CONNECT);
6019 pqi_scsi_done(scmd);
6020 return 0;
6021 }
6022
5be9db06 6023 if (pqi_ctrl_blocked(ctrl_info)) {
7561a7e4
KB
6024 rc = SCSI_MLQUEUE_HOST_BUSY;
6025 goto out;
6026 }
6027
7d81d2b8
KB
6028 /*
6029 * This is necessary because the SML doesn't zero out this field during
6030 * error recovery.
6031 */
6032 scmd->result = 0;
6033
061ef06a
KB
6034 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
6035 queue_group = &ctrl_info->queue_groups[hw_queue];
6c223761
KB
6036
6037 if (pqi_is_logical_device(device)) {
6038 raid_bypassed = false;
588a63fe 6039 if (device->raid_bypass_enabled &&
5be9db06
KB
6040 pqi_is_bypass_eligible_request(scmd) &&
6041 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
6042 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
8b664fef 6043 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
376fb880 6044 raid_bypassed = true;
8b664fef
KB
6045 atomic_inc(&device->raid_bypass_cnt);
6046 }
6c223761
KB
6047 }
6048 if (!raid_bypassed)
8b664fef 6049 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761
KB
6050 } else {
6051 if (device->aio_enabled)
8b664fef 6052 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761 6053 else
8b664fef 6054 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6c223761
KB
6055 }
6056
7561a7e4 6057out:
7561a7e4 6058 if (rc)
904f2bfd 6059 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
7561a7e4 6060
6c223761
KB
6061 return rc;
6062}
6063
6ce1ddf5 6064static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
7561a7e4 6065{
6ce1ddf5 6066 unsigned int i;
7561a7e4
KB
6067 unsigned int path;
6068 unsigned long flags;
6ce1ddf5
KB
6069 unsigned int queued_io_count;
6070 struct pqi_queue_group *queue_group;
6071 struct pqi_io_request *io_request;
7561a7e4 6072
6ce1ddf5
KB
6073 queued_io_count = 0;
6074
6075 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6076 queue_group = &ctrl_info->queue_groups[i];
6077 for (path = 0; path < 2; path++) {
6078 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6079 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6080 queued_io_count++;
6081 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
7561a7e4
KB
6082 }
6083 }
6084
6ce1ddf5 6085 return queued_io_count;
7561a7e4
KB
6086}
6087
6ce1ddf5 6088static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
7561a7e4 6089{
7561a7e4
KB
6090 unsigned int i;
6091 unsigned int path;
6ce1ddf5 6092 unsigned int nonempty_inbound_queue_count;
7561a7e4
KB
6093 struct pqi_queue_group *queue_group;
6094 pqi_index_t iq_pi;
6095 pqi_index_t iq_ci;
6096
6ce1ddf5
KB
6097 nonempty_inbound_queue_count = 0;
6098
7561a7e4
KB
6099 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6100 queue_group = &ctrl_info->queue_groups[i];
7561a7e4
KB
6101 for (path = 0; path < 2; path++) {
6102 iq_pi = queue_group->iq_pi_copy[path];
6ce1ddf5
KB
6103 iq_ci = readl(queue_group->iq_ci[path]);
6104 if (iq_ci != iq_pi)
6105 nonempty_inbound_queue_count++;
6106 }
6107 }
7561a7e4 6108
6ce1ddf5
KB
6109 return nonempty_inbound_queue_count;
6110}
6111
6112#define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
6113
6114static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6115{
6116 unsigned long start_jiffies;
6117 unsigned long warning_timeout;
6118 unsigned int queued_io_count;
6119 unsigned int nonempty_inbound_queue_count;
6120 bool displayed_warning;
6121
6122 displayed_warning = false;
6123 start_jiffies = jiffies;
42dc0426 6124 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6ce1ddf5
KB
6125
6126 while (1) {
6127 queued_io_count = pqi_queued_io_count(ctrl_info);
6128 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6129 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6130 break;
6131 pqi_check_ctrl_health(ctrl_info);
6132 if (pqi_ctrl_offline(ctrl_info))
6133 return -ENXIO;
6134 if (time_after(jiffies, warning_timeout)) {
6135 dev_warn(&ctrl_info->pci_dev->dev,
6136 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6137 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6138 displayed_warning = true;
42dc0426 6139 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
7561a7e4 6140 }
6ce1ddf5 6141 usleep_range(1000, 2000);
7561a7e4
KB
6142 }
6143
6ce1ddf5
KB
6144 if (displayed_warning)
6145 dev_warn(&ctrl_info->pci_dev->dev,
6146 "queued I/O drained after waiting for %u seconds\n",
6147 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6148
7561a7e4
KB
6149 return 0;
6150}
6151
6152static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6153 struct pqi_scsi_dev *device)
6154{
6155 unsigned int i;
6156 unsigned int path;
6157 struct pqi_queue_group *queue_group;
6158 unsigned long flags;
6159 struct pqi_io_request *io_request;
6160 struct pqi_io_request *next;
6161 struct scsi_cmnd *scmd;
6162 struct pqi_scsi_dev *scsi_device;
6163
6164 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6165 queue_group = &ctrl_info->queue_groups[i];
6166
6167 for (path = 0; path < 2; path++) {
6168 spin_lock_irqsave(
6169 &queue_group->submit_lock[path], flags);
6170
6171 list_for_each_entry_safe(io_request, next,
6172 &queue_group->request_list[path],
6173 request_list_entry) {
583891c9 6174
7561a7e4
KB
6175 scmd = io_request->scmd;
6176 if (!scmd)
6177 continue;
6178
6179 scsi_device = scmd->device->hostdata;
6180 if (scsi_device != device)
6181 continue;
6182
6183 list_del(&io_request->request_list_entry);
6184 set_host_byte(scmd, DID_RESET);
b622a601
MB
6185 pqi_free_io_request(io_request);
6186 scsi_dma_unmap(scmd);
7561a7e4
KB
6187 pqi_scsi_done(scmd);
6188 }
6189
6190 spin_unlock_irqrestore(
6191 &queue_group->submit_lock[path], flags);
6192 }
6193 }
6194}
6195
18ff5f08 6196#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
4fd22c13 6197
061ef06a 6198static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
904f2bfd 6199 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
061ef06a 6200{
18ff5f08
KB
6201 int cmds_outstanding;
6202 unsigned long start_jiffies;
6203 unsigned long warning_timeout;
6204 unsigned long msecs_waiting;
1e46731e 6205
18ff5f08 6206 start_jiffies = jiffies;
42dc0426 6207 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
1e46731e 6208
904f2bfd 6209 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
331f7e99
SB
6210 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6211 pqi_check_ctrl_health(ctrl_info);
6212 if (pqi_ctrl_offline(ctrl_info))
6213 return -ENXIO;
6214 }
18ff5f08 6215 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6ce1ddf5 6216 if (msecs_waiting >= timeout_msecs) {
18ff5f08
KB
6217 dev_err(&ctrl_info->pci_dev->dev,
6218 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6219 ctrl_info->scsi_host->host_no, device->bus, device->target,
904f2bfd 6220 lun, msecs_waiting / 1000, cmds_outstanding);
18ff5f08 6221 return -ETIMEDOUT;
061ef06a 6222 }
18ff5f08
KB
6223 if (time_after(jiffies, warning_timeout)) {
6224 dev_warn(&ctrl_info->pci_dev->dev,
6225 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6226 ctrl_info->scsi_host->host_no, device->bus, device->target,
904f2bfd 6227 lun, msecs_waiting / 1000, cmds_outstanding);
42dc0426 6228 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
4fd22c13 6229 }
061ef06a
KB
6230 usleep_range(1000, 2000);
6231 }
6232
6233 return 0;
6234}
6235
14bb215d
KB
6236static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6237 void *context)
6c223761 6238{
14bb215d 6239 struct completion *waiting = context;
6c223761 6240
14bb215d
KB
6241 complete(waiting);
6242}
6c223761 6243
c2922f17 6244#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
14bb215d
KB
6245
6246static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
904f2bfd 6247 struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
14bb215d
KB
6248{
6249 int rc;
18ff5f08 6250 unsigned int wait_secs;
6ce1ddf5 6251 int cmds_outstanding;
18ff5f08
KB
6252
6253 wait_secs = 0;
14bb215d
KB
6254
6255 while (1) {
6256 if (wait_for_completion_io_timeout(wait,
42dc0426 6257 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
14bb215d
KB
6258 rc = 0;
6259 break;
6c223761
KB
6260 }
6261
14bb215d
KB
6262 pqi_check_ctrl_health(ctrl_info);
6263 if (pqi_ctrl_offline(ctrl_info)) {
4e8415e3 6264 rc = -ENXIO;
14bb215d
KB
6265 break;
6266 }
18ff5f08
KB
6267
6268 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
904f2bfd 6269 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
18ff5f08 6270 dev_warn(&ctrl_info->pci_dev->dev,
6ce1ddf5 6271 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
904f2bfd 6272 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6c223761 6273 }
6c223761 6274
14bb215d 6275 return rc;
6c223761
KB
6276}
6277
18ff5f08
KB
6278#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6279
904f2bfd 6280static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6c223761
KB
6281{
6282 int rc;
6283 struct pqi_io_request *io_request;
6284 DECLARE_COMPLETION_ONSTACK(wait);
6285 struct pqi_task_management_request *request;
904f2bfd 6286 struct pqi_scsi_dev *device;
6c223761 6287
904f2bfd 6288 device = scmd->device->hostdata;
b27ac2fa 6289 io_request = pqi_alloc_io_request(ctrl_info, NULL);
14bb215d 6290 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
6291 io_request->context = &wait;
6292
6293 request = io_request->iu;
6294 memset(request, 0, sizeof(*request));
6295
6296 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6297 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6298 &request->header.iu_length);
6299 put_unaligned_le16(io_request->index, &request->request_id);
6300 memcpy(request->lun_number, device->scsi3addr,
6301 sizeof(request->lun_number));
904f2bfd
KM
6302 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6303 request->ml_device_lun_number = (u8)scmd->device->lun;
6c223761 6304 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
c2922f17 6305 if (ctrl_info->tmf_iu_timeout_supported)
18ff5f08 6306 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6c223761 6307
583891c9 6308 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6c223761
KB
6309 io_request);
6310
904f2bfd 6311 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, (u8)scmd->device->lun, &wait);
14bb215d 6312 if (rc == 0)
6c223761 6313 rc = io_request->status;
6c223761
KB
6314
6315 pqi_free_io_request(io_request);
6c223761
KB
6316
6317 return rc;
6318}
6319
18ff5f08
KB
6320#define PQI_LUN_RESET_RETRIES 3
6321#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6322#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6323#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6c223761 6324
904f2bfd 6325static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
6c223761 6326{
18ff5f08
KB
6327 int reset_rc;
6328 int wait_rc;
3406384b 6329 unsigned int retries;
18ff5f08 6330 unsigned long timeout_msecs;
904f2bfd 6331 struct pqi_scsi_dev *device;
6c223761 6332
904f2bfd 6333 device = scmd->device->hostdata;
3406384b 6334 for (retries = 0;;) {
904f2bfd 6335 reset_rc = pqi_lun_reset(ctrl_info, scmd);
4e7d2602 6336 if (reset_rc == 0 || reset_rc == -ENODEV || ++retries > PQI_LUN_RESET_RETRIES)
3406384b
MR
6337 break;
6338 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6339 }
429fab70 6340
18ff5f08
KB
6341 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6342 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
4fd22c13 6343
904f2bfd 6344 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, scmd->device->lun, timeout_msecs);
18ff5f08
KB
6345 if (wait_rc && reset_rc == 0)
6346 reset_rc = wait_rc;
6c223761 6347
18ff5f08 6348 return reset_rc == 0 ? SUCCESS : FAILED;
6c223761
KB
6349}
6350
904f2bfd 6351static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
4fd22c13
MR
6352{
6353 int rc;
904f2bfd 6354 struct pqi_scsi_dev *device;
4fd22c13 6355
904f2bfd 6356 device = scmd->device->hostdata;
4fd22c13
MR
6357 pqi_ctrl_block_requests(ctrl_info);
6358 pqi_ctrl_wait_until_quiesced(ctrl_info);
6359 pqi_fail_io_queued_for_device(ctrl_info, device);
6360 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
4fd22c13
MR
6361 if (rc)
6362 rc = FAILED;
6363 else
904f2bfd 6364 rc = pqi_lun_reset_with_retries(ctrl_info, scmd);
37f33181 6365 pqi_ctrl_unblock_requests(ctrl_info);
429fab70 6366
4fd22c13
MR
6367 return rc;
6368}
6369
6c223761
KB
6370static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6371{
6372 int rc;
7561a7e4 6373 struct Scsi_Host *shost;
6c223761
KB
6374 struct pqi_ctrl_info *ctrl_info;
6375 struct pqi_scsi_dev *device;
6376
7561a7e4
KB
6377 shost = scmd->device->host;
6378 ctrl_info = shost_to_hba(shost);
6c223761
KB
6379 device = scmd->device->hostdata;
6380
37f33181
KB
6381 mutex_lock(&ctrl_info->lun_reset_mutex);
6382
6c223761 6383 dev_err(&ctrl_info->pci_dev->dev,
f0e473e0
MB
6384 "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n",
6385 shost->host_no,
904f2bfd 6386 device->bus, device->target, (u32)scmd->device->lun,
f0e473e0 6387 scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff);
6c223761 6388
7561a7e4 6389 pqi_check_ctrl_health(ctrl_info);
37f33181 6390 if (pqi_ctrl_offline(ctrl_info))
7561a7e4 6391 rc = FAILED;
37f33181 6392 else
904f2bfd 6393 rc = pqi_device_reset(ctrl_info, scmd);
429fab70 6394
6c223761
KB
6395 dev_err(&ctrl_info->pci_dev->dev,
6396 "reset of scsi %d:%d:%d:%d: %s\n",
904f2bfd 6397 shost->host_no, device->bus, device->target, (u32)scmd->device->lun,
6c223761
KB
6398 rc == SUCCESS ? "SUCCESS" : "FAILED");
6399
37f33181
KB
6400 mutex_unlock(&ctrl_info->lun_reset_mutex);
6401
6c223761
KB
6402 return rc;
6403}
6404
6405static int pqi_slave_alloc(struct scsi_device *sdev)
6406{
6407 struct pqi_scsi_dev *device;
6408 unsigned long flags;
6409 struct pqi_ctrl_info *ctrl_info;
6410 struct scsi_target *starget;
6411 struct sas_rphy *rphy;
6412
6413 ctrl_info = shost_to_hba(sdev->host);
6414
6415 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6416
6417 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6418 starget = scsi_target(sdev);
6419 rphy = target_to_rphy(starget);
6420 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6421 if (device) {
d4dc6aea
KB
6422 if (device->target_lun_valid) {
6423 device->ignore_device = true;
6424 } else {
6425 device->target = sdev_id(sdev);
6426 device->lun = sdev->lun;
6427 device->target_lun_valid = true;
6428 }
6c223761
KB
6429 }
6430 } else {
6431 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6432 sdev_id(sdev), sdev->lun);
6433 }
6434
94086f5b 6435 if (device) {
6c223761
KB
6436 sdev->hostdata = device;
6437 device->sdev = sdev;
6438 if (device->queue_depth) {
6439 device->advertised_queue_depth = device->queue_depth;
6440 scsi_change_queue_depth(sdev,
6441 device->advertised_queue_depth);
6442 }
99a12b48 6443 if (pqi_is_logical_device(device)) {
b6e2ef67 6444 pqi_disable_write_same(sdev);
99a12b48 6445 } else {
2b447f81 6446 sdev->allow_restart = 1;
99a12b48
KB
6447 if (device->device_type == SA_DEVICE_TYPE_NVME)
6448 pqi_disable_write_same(sdev);
6449 }
6c223761
KB
6450 }
6451
6452 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6453
6454 return 0;
6455}
6456
a4e1d0b7 6457static void pqi_map_queues(struct Scsi_Host *shost)
52198226
CH
6458{
6459 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6460
a4e1d0b7
BVA
6461 blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6462 ctrl_info->pci_dev, 0);
52198226
CH
6463}
6464
d4dc6aea
KB
6465static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6466{
6467 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6468}
6469
ce143793
KB
6470static int pqi_slave_configure(struct scsi_device *sdev)
6471{
d4dc6aea 6472 int rc = 0;
ce143793
KB
6473 struct pqi_scsi_dev *device;
6474
6475 device = sdev->hostdata;
6476 device->devtype = sdev->type;
6477
d4dc6aea
KB
6478 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6479 rc = -ENXIO;
6480 device->ignore_device = false;
6481 }
6482
6483 return rc;
ce143793
KB
6484}
6485
2d80f405
KB
6486static void pqi_slave_destroy(struct scsi_device *sdev)
6487{
6488 struct pqi_ctrl_info *ctrl_info;
6489 struct pqi_scsi_dev *device;
6490 int mutex_acquired;
6491 unsigned long flags;
6492
6493 ctrl_info = shost_to_hba(sdev->host);
6494
6495 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6496 if (!mutex_acquired)
6497 return;
6498
6499 device = sdev->hostdata;
6500 if (!device) {
6501 mutex_unlock(&ctrl_info->scan_mutex);
6502 return;
6503 }
6504
6505 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6506 list_del(&device->scsi_device_list_entry);
6507 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6508
6509 mutex_unlock(&ctrl_info->scan_mutex);
6510
6511 pqi_dev_info(ctrl_info, "removed", device);
6512 pqi_free_device(device);
6513}
6514
8b664fef 6515static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6c223761
KB
6516{
6517 struct pci_dev *pci_dev;
6518 u32 subsystem_vendor;
6519 u32 subsystem_device;
6520 cciss_pci_info_struct pciinfo;
6521
6522 if (!arg)
6523 return -EINVAL;
6524
6525 pci_dev = ctrl_info->pci_dev;
6526
6527 pciinfo.domain = pci_domain_nr(pci_dev->bus);
6528 pciinfo.bus = pci_dev->bus->number;
6529 pciinfo.dev_fn = pci_dev->devfn;
6530 subsystem_vendor = pci_dev->subsystem_vendor;
6531 subsystem_device = pci_dev->subsystem_device;
8b664fef 6532 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6c223761
KB
6533
6534 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
6535 return -EFAULT;
6536
6537 return 0;
6538}
6539
6540static int pqi_getdrivver_ioctl(void __user *arg)
6541{
6542 u32 version;
6543
6544 if (!arg)
6545 return -EINVAL;
6546
6547 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6548 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6549
6550 if (copy_to_user(arg, &version, sizeof(version)))
6551 return -EFAULT;
6552
6553 return 0;
6554}
6555
6556struct ciss_error_info {
6557 u8 scsi_status;
6558 int command_status;
6559 size_t sense_data_length;
6560};
6561
6562static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6563 struct ciss_error_info *ciss_error_info)
6564{
6565 int ciss_cmd_status;
6566 size_t sense_data_length;
6567
6568 switch (pqi_error_info->data_out_result) {
6569 case PQI_DATA_IN_OUT_GOOD:
6570 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6571 break;
6572 case PQI_DATA_IN_OUT_UNDERFLOW:
6573 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6574 break;
6575 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6576 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6577 break;
6578 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6579 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6580 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6581 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6582 case PQI_DATA_IN_OUT_ERROR:
6583 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6584 break;
6585 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6586 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6587 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6588 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6589 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6590 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6591 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6592 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6593 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6594 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6595 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6596 break;
6597 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6598 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6599 break;
6600 case PQI_DATA_IN_OUT_ABORTED:
6601 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6602 break;
6603 case PQI_DATA_IN_OUT_TIMEOUT:
6604 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6605 break;
6606 default:
6607 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6608 break;
6609 }
6610
6611 sense_data_length =
6612 get_unaligned_le16(&pqi_error_info->sense_data_length);
6613 if (sense_data_length == 0)
6614 sense_data_length =
6615 get_unaligned_le16(&pqi_error_info->response_data_length);
6616 if (sense_data_length)
6617 if (sense_data_length > sizeof(pqi_error_info->data))
6618 sense_data_length = sizeof(pqi_error_info->data);
6619
6620 ciss_error_info->scsi_status = pqi_error_info->status;
6621 ciss_error_info->command_status = ciss_cmd_status;
6622 ciss_error_info->sense_data_length = sense_data_length;
6623}
6624
6625static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6626{
6627 int rc;
6628 char *kernel_buffer = NULL;
6629 u16 iu_length;
6630 size_t sense_data_length;
6631 IOCTL_Command_struct iocommand;
6632 struct pqi_raid_path_request request;
6633 struct pqi_raid_error_info pqi_error_info;
6634 struct ciss_error_info ciss_error_info;
6635
6636 if (pqi_ctrl_offline(ctrl_info))
6637 return -ENXIO;
2790cd4d
KB
6638 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6639 return -EBUSY;
6c223761
KB
6640 if (!arg)
6641 return -EINVAL;
6642 if (!capable(CAP_SYS_RAWIO))
6643 return -EPERM;
6644 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6645 return -EFAULT;
6646 if (iocommand.buf_size < 1 &&
6647 iocommand.Request.Type.Direction != XFER_NONE)
6648 return -EINVAL;
6649 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6650 return -EINVAL;
6651 if (iocommand.Request.Type.Type != TYPE_CMD)
6652 return -EINVAL;
6653
6654 switch (iocommand.Request.Type.Direction) {
6655 case XFER_NONE:
6656 case XFER_WRITE:
6657 case XFER_READ:
41555d54 6658 case XFER_READ | XFER_WRITE:
6c223761
KB
6659 break;
6660 default:
6661 return -EINVAL;
6662 }
6663
6664 if (iocommand.buf_size > 0) {
6665 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6666 if (!kernel_buffer)
6667 return -ENOMEM;
6668 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6669 if (copy_from_user(kernel_buffer, iocommand.buf,
6670 iocommand.buf_size)) {
6671 rc = -EFAULT;
6672 goto out;
6673 }
6674 } else {
6675 memset(kernel_buffer, 0, iocommand.buf_size);
6676 }
6677 }
6678
6679 memset(&request, 0, sizeof(request));
6680
6681 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6682 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6683 PQI_REQUEST_HEADER_LENGTH;
6684 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6685 sizeof(request.lun_number));
6686 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6687 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6688
6689 switch (iocommand.Request.Type.Direction) {
6690 case XFER_NONE:
6691 request.data_direction = SOP_NO_DIRECTION_FLAG;
6692 break;
6693 case XFER_WRITE:
6694 request.data_direction = SOP_WRITE_FLAG;
6695 break;
6696 case XFER_READ:
6697 request.data_direction = SOP_READ_FLAG;
6698 break;
41555d54
KB
6699 case XFER_READ | XFER_WRITE:
6700 request.data_direction = SOP_BIDIRECTIONAL;
6701 break;
6c223761
KB
6702 }
6703
6704 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6705
6706 if (iocommand.buf_size > 0) {
6707 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6708
6709 rc = pqi_map_single(ctrl_info->pci_dev,
6710 &request.sg_descriptors[0], kernel_buffer,
6917a9cc 6711 iocommand.buf_size, DMA_BIDIRECTIONAL);
6c223761
KB
6712 if (rc)
6713 goto out;
6714
6715 iu_length += sizeof(request.sg_descriptors[0]);
6716 }
6717
6718 put_unaligned_le16(iu_length, &request.header.iu_length);
6719
21432010 6720 if (ctrl_info->raid_iu_timeout_supported)
6721 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6722
6c223761 6723 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
ae0c189d 6724 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6c223761
KB
6725
6726 if (iocommand.buf_size > 0)
6727 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6917a9cc 6728 DMA_BIDIRECTIONAL);
6c223761
KB
6729
6730 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6731
6732 if (rc == 0) {
6733 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6734 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6735 iocommand.error_info.CommandStatus =
6736 ciss_error_info.command_status;
6737 sense_data_length = ciss_error_info.sense_data_length;
6738 if (sense_data_length) {
6739 if (sense_data_length >
6740 sizeof(iocommand.error_info.SenseInfo))
6741 sense_data_length =
6742 sizeof(iocommand.error_info.SenseInfo);
6743 memcpy(iocommand.error_info.SenseInfo,
6744 pqi_error_info.data, sense_data_length);
6745 iocommand.error_info.SenseLen = sense_data_length;
6746 }
6747 }
6748
6749 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6750 rc = -EFAULT;
6751 goto out;
6752 }
6753
6754 if (rc == 0 && iocommand.buf_size > 0 &&
6755 (iocommand.Request.Type.Direction & XFER_READ)) {
6756 if (copy_to_user(iocommand.buf, kernel_buffer,
6757 iocommand.buf_size)) {
6758 rc = -EFAULT;
6759 }
6760 }
6761
6762out:
6763 kfree(kernel_buffer);
6764
6765 return rc;
6766}
6767
6f4e626f
NC
6768static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6769 void __user *arg)
6c223761
KB
6770{
6771 int rc;
6772 struct pqi_ctrl_info *ctrl_info;
6773
6774 ctrl_info = shost_to_hba(sdev->host);
6775
6776 switch (cmd) {
6777 case CCISS_DEREGDISK:
6778 case CCISS_REGNEWDISK:
6779 case CCISS_REGNEWD:
6780 rc = pqi_scan_scsi_devices(ctrl_info);
6781 break;
6782 case CCISS_GETPCIINFO:
6783 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6784 break;
6785 case CCISS_GETDRIVVER:
6786 rc = pqi_getdrivver_ioctl(arg);
6787 break;
6788 case CCISS_PASSTHRU:
6789 rc = pqi_passthru_ioctl(ctrl_info, arg);
6790 break;
6791 default:
6792 rc = -EINVAL;
6793 break;
6794 }
6795
6796 return rc;
6797}
6798
6d90615f 6799static ssize_t pqi_firmware_version_show(struct device *dev,
6c223761
KB
6800 struct device_attribute *attr, char *buffer)
6801{
6c223761
KB
6802 struct Scsi_Host *shost;
6803 struct pqi_ctrl_info *ctrl_info;
6804
6805 shost = class_to_shost(dev);
6806 ctrl_info = shost_to_hba(shost);
6807
a4256252 6808 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6d90615f
MB
6809}
6810
6811static ssize_t pqi_driver_version_show(struct device *dev,
6812 struct device_attribute *attr, char *buffer)
6813{
a4256252 6814 return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6d90615f 6815}
6c223761 6816
6d90615f
MB
6817static ssize_t pqi_serial_number_show(struct device *dev,
6818 struct device_attribute *attr, char *buffer)
6819{
6820 struct Scsi_Host *shost;
6821 struct pqi_ctrl_info *ctrl_info;
6822
6823 shost = class_to_shost(dev);
6824 ctrl_info = shost_to_hba(shost);
6825
a4256252 6826 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6d90615f
MB
6827}
6828
6829static ssize_t pqi_model_show(struct device *dev,
6830 struct device_attribute *attr, char *buffer)
6831{
6832 struct Scsi_Host *shost;
6833 struct pqi_ctrl_info *ctrl_info;
6834
6835 shost = class_to_shost(dev);
6836 ctrl_info = shost_to_hba(shost);
6837
a4256252 6838 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6d90615f
MB
6839}
6840
6841static ssize_t pqi_vendor_show(struct device *dev,
6842 struct device_attribute *attr, char *buffer)
6843{
6844 struct Scsi_Host *shost;
6845 struct pqi_ctrl_info *ctrl_info;
6846
6847 shost = class_to_shost(dev);
6848 ctrl_info = shost_to_hba(shost);
6849
a4256252 6850 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6c223761
KB
6851}
6852
6853static ssize_t pqi_host_rescan_store(struct device *dev,
6854 struct device_attribute *attr, const char *buffer, size_t count)
6855{
6856 struct Scsi_Host *shost = class_to_shost(dev);
6857
6858 pqi_scan_start(shost);
6859
6860 return count;
6861}
6862
3c50976f
KB
6863static ssize_t pqi_lockup_action_show(struct device *dev,
6864 struct device_attribute *attr, char *buffer)
6865{
6866 int count = 0;
6867 unsigned int i;
6868
6869 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6870 if (pqi_lockup_actions[i].action == pqi_lockup_action)
181aea89 6871 count += scnprintf(buffer + count, PAGE_SIZE - count,
3c50976f
KB
6872 "[%s] ", pqi_lockup_actions[i].name);
6873 else
181aea89 6874 count += scnprintf(buffer + count, PAGE_SIZE - count,
3c50976f
KB
6875 "%s ", pqi_lockup_actions[i].name);
6876 }
6877
181aea89 6878 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
3c50976f
KB
6879
6880 return count;
6881}
6882
6883static ssize_t pqi_lockup_action_store(struct device *dev,
6884 struct device_attribute *attr, const char *buffer, size_t count)
6885{
6886 unsigned int i;
6887 char *action_name;
6888 char action_name_buffer[32];
6889
6890 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6891 action_name = strstrip(action_name_buffer);
6892
6893 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6894 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6895 pqi_lockup_action = pqi_lockup_actions[i].action;
6896 return count;
6897 }
6898 }
6899
6900 return -EINVAL;
6901}
6902
5be746d7
DB
6903static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6904 struct device_attribute *attr, char *buffer)
6905{
6906 struct Scsi_Host *shost = class_to_shost(dev);
6907 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6908
6909 return scnprintf(buffer, 10, "%x\n",
6910 ctrl_info->enable_stream_detection);
6911}
6912
6913static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6914 struct device_attribute *attr, const char *buffer, size_t count)
6915{
6916 struct Scsi_Host *shost = class_to_shost(dev);
6917 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6918 u8 set_stream_detection = 0;
6919
6920 if (kstrtou8(buffer, 0, &set_stream_detection))
6921 return -EINVAL;
6922
6923 if (set_stream_detection > 0)
6924 set_stream_detection = 1;
6925
6926 ctrl_info->enable_stream_detection = set_stream_detection;
6927
6928 return count;
6929}
6930
6702d2c4
DB
6931static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6932 struct device_attribute *attr, char *buffer)
6933{
6934 struct Scsi_Host *shost = class_to_shost(dev);
6935 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6936
6937 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6938}
6939
6940static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6941 struct device_attribute *attr, const char *buffer, size_t count)
6942{
6943 struct Scsi_Host *shost = class_to_shost(dev);
6944 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6945 u8 set_r5_writes = 0;
6946
6947 if (kstrtou8(buffer, 0, &set_r5_writes))
6948 return -EINVAL;
6949
6950 if (set_r5_writes > 0)
6951 set_r5_writes = 1;
6952
6953 ctrl_info->enable_r5_writes = set_r5_writes;
6954
6955 return count;
6956}
6957
6958static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
6959 struct device_attribute *attr, char *buffer)
6960{
6961 struct Scsi_Host *shost = class_to_shost(dev);
6962 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6963
6964 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
6965}
6966
6967static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
6968 struct device_attribute *attr, const char *buffer, size_t count)
6969{
6970 struct Scsi_Host *shost = class_to_shost(dev);
6971 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6972 u8 set_r6_writes = 0;
6973
6974 if (kstrtou8(buffer, 0, &set_r6_writes))
6975 return -EINVAL;
6976
6977 if (set_r6_writes > 0)
6978 set_r6_writes = 1;
6979
6980 ctrl_info->enable_r6_writes = set_r6_writes;
6981
6982 return count;
6983}
6984
6d90615f
MB
6985static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
6986static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
6987static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
6988static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
6989static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
cbe0c7b1 6990static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
583891c9
KB
6991static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
6992 pqi_lockup_action_store);
5be746d7
DB
6993static DEVICE_ATTR(enable_stream_detection, 0644,
6994 pqi_host_enable_stream_detection_show,
6995 pqi_host_enable_stream_detection_store);
6702d2c4
DB
6996static DEVICE_ATTR(enable_r5_writes, 0644,
6997 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
6998static DEVICE_ATTR(enable_r6_writes, 0644,
6999 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
6c223761 7000
64fc9015
BVA
7001static struct attribute *pqi_shost_attrs[] = {
7002 &dev_attr_driver_version.attr,
7003 &dev_attr_firmware_version.attr,
7004 &dev_attr_model.attr,
7005 &dev_attr_serial_number.attr,
7006 &dev_attr_vendor.attr,
7007 &dev_attr_rescan.attr,
7008 &dev_attr_lockup_action.attr,
7009 &dev_attr_enable_stream_detection.attr,
7010 &dev_attr_enable_r5_writes.attr,
7011 &dev_attr_enable_r6_writes.attr,
6c223761
KB
7012 NULL
7013};
7014
64fc9015
BVA
7015ATTRIBUTE_GROUPS(pqi_shost);
7016
cd128244
DC
7017static ssize_t pqi_unique_id_show(struct device *dev,
7018 struct device_attribute *attr, char *buffer)
7019{
7020 struct pqi_ctrl_info *ctrl_info;
7021 struct scsi_device *sdev;
7022 struct pqi_scsi_dev *device;
7023 unsigned long flags;
5b083b30 7024 u8 unique_id[16];
cd128244
DC
7025
7026 sdev = to_scsi_device(dev);
7027 ctrl_info = shost_to_hba(sdev->host);
7028
331f7e99
SB
7029 if (pqi_ctrl_offline(ctrl_info))
7030 return -ENODEV;
7031
cd128244
DC
7032 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7033
7034 device = sdev->hostdata;
7035 if (!device) {
8b664fef 7036 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
7037 return -ENODEV;
7038 }
5b083b30 7039
28ca6d87
MM
7040 if (device->is_physical_device)
7041 memcpy(unique_id, device->wwid, sizeof(device->wwid));
7042 else
5b083b30 7043 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
cd128244
DC
7044
7045 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7046
a4256252 7047 return scnprintf(buffer, PAGE_SIZE,
583891c9
KB
7048 "%02X%02X%02X%02X%02X%02X%02X%02X"
7049 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
5b083b30
KB
7050 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7051 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7052 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7053 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
cd128244
DC
7054}
7055
7056static ssize_t pqi_lunid_show(struct device *dev,
7057 struct device_attribute *attr, char *buffer)
7058{
7059 struct pqi_ctrl_info *ctrl_info;
7060 struct scsi_device *sdev;
7061 struct pqi_scsi_dev *device;
7062 unsigned long flags;
7063 u8 lunid[8];
7064
7065 sdev = to_scsi_device(dev);
7066 ctrl_info = shost_to_hba(sdev->host);
7067
331f7e99
SB
7068 if (pqi_ctrl_offline(ctrl_info))
7069 return -ENODEV;
7070
cd128244
DC
7071 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7072
7073 device = sdev->hostdata;
7074 if (!device) {
8b664fef 7075 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
7076 return -ENODEV;
7077 }
694c5d5b 7078
cd128244
DC
7079 memcpy(lunid, device->scsi3addr, sizeof(lunid));
7080
7081 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7082
a4256252 7083 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
cd128244
DC
7084}
7085
694c5d5b
KB
7086#define MAX_PATHS 8
7087
cd128244
DC
7088static ssize_t pqi_path_info_show(struct device *dev,
7089 struct device_attribute *attr, char *buf)
7090{
7091 struct pqi_ctrl_info *ctrl_info;
7092 struct scsi_device *sdev;
7093 struct pqi_scsi_dev *device;
7094 unsigned long flags;
7095 int i;
7096 int output_len = 0;
7097 u8 box;
7098 u8 bay;
694c5d5b 7099 u8 path_map_index;
cd128244 7100 char *active;
694c5d5b 7101 u8 phys_connector[2];
cd128244
DC
7102
7103 sdev = to_scsi_device(dev);
7104 ctrl_info = shost_to_hba(sdev->host);
7105
331f7e99
SB
7106 if (pqi_ctrl_offline(ctrl_info))
7107 return -ENODEV;
7108
cd128244
DC
7109 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7110
7111 device = sdev->hostdata;
7112 if (!device) {
8b664fef 7113 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
cd128244
DC
7114 return -ENODEV;
7115 }
7116
7117 bay = device->bay;
7118 for (i = 0; i < MAX_PATHS; i++) {
694c5d5b 7119 path_map_index = 1 << i;
cd128244
DC
7120 if (i == device->active_path_index)
7121 active = "Active";
7122 else if (device->path_map & path_map_index)
7123 active = "Inactive";
7124 else
7125 continue;
7126
7127 output_len += scnprintf(buf + output_len,
7128 PAGE_SIZE - output_len,
7129 "[%d:%d:%d:%d] %20.20s ",
7130 ctrl_info->scsi_host->host_no,
7131 device->bus, device->target,
7132 device->lun,
7133 scsi_device_type(device->devtype));
7134
7135 if (device->devtype == TYPE_RAID ||
7136 pqi_is_logical_device(device))
7137 goto end_buffer;
7138
7139 memcpy(&phys_connector, &device->phys_connector[i],
7140 sizeof(phys_connector));
7141 if (phys_connector[0] < '0')
7142 phys_connector[0] = '0';
7143 if (phys_connector[1] < '0')
7144 phys_connector[1] = '0';
7145
7146 output_len += scnprintf(buf + output_len,
7147 PAGE_SIZE - output_len,
7148 "PORT: %.2s ", phys_connector);
7149
7150 box = device->box[i];
7151 if (box != 0 && box != 0xFF)
7152 output_len += scnprintf(buf + output_len,
7153 PAGE_SIZE - output_len,
7154 "BOX: %hhu ", box);
7155
7156 if ((device->devtype == TYPE_DISK ||
7157 device->devtype == TYPE_ZBC) &&
7158 pqi_expose_device(device))
7159 output_len += scnprintf(buf + output_len,
7160 PAGE_SIZE - output_len,
7161 "BAY: %hhu ", bay);
7162
7163end_buffer:
7164 output_len += scnprintf(buf + output_len,
7165 PAGE_SIZE - output_len,
7166 "%s\n", active);
7167 }
7168
7169 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
694c5d5b 7170
cd128244
DC
7171 return output_len;
7172}
7173
6c223761
KB
7174static ssize_t pqi_sas_address_show(struct device *dev,
7175 struct device_attribute *attr, char *buffer)
7176{
7177 struct pqi_ctrl_info *ctrl_info;
7178 struct scsi_device *sdev;
7179 struct pqi_scsi_dev *device;
7180 unsigned long flags;
7181 u64 sas_address;
7182
7183 sdev = to_scsi_device(dev);
7184 ctrl_info = shost_to_hba(sdev->host);
7185
331f7e99
SB
7186 if (pqi_ctrl_offline(ctrl_info))
7187 return -ENODEV;
7188
6c223761
KB
7189 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7190
7191 device = sdev->hostdata;
00598b05 7192 if (!device) {
8b664fef 7193 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6c223761
KB
7194 return -ENODEV;
7195 }
694c5d5b 7196
6c223761
KB
7197 sas_address = device->sas_address;
7198
7199 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7200
a4256252 7201 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
6c223761
KB
7202}
7203
7204static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7205 struct device_attribute *attr, char *buffer)
7206{
7207 struct pqi_ctrl_info *ctrl_info;
7208 struct scsi_device *sdev;
7209 struct pqi_scsi_dev *device;
7210 unsigned long flags;
7211
7212 sdev = to_scsi_device(dev);
7213 ctrl_info = shost_to_hba(sdev->host);
7214
331f7e99
SB
7215 if (pqi_ctrl_offline(ctrl_info))
7216 return -ENODEV;
7217
6c223761
KB
7218 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7219
7220 device = sdev->hostdata;
8b664fef
KB
7221 if (!device) {
7222 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7223 return -ENODEV;
7224 }
7225
588a63fe 7226 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6c223761
KB
7227 buffer[1] = '\n';
7228 buffer[2] = '\0';
7229
7230 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7231
7232 return 2;
7233}
7234
a9f93392
KB
7235static ssize_t pqi_raid_level_show(struct device *dev,
7236 struct device_attribute *attr, char *buffer)
7237{
7238 struct pqi_ctrl_info *ctrl_info;
7239 struct scsi_device *sdev;
7240 struct pqi_scsi_dev *device;
7241 unsigned long flags;
7242 char *raid_level;
7243
7244 sdev = to_scsi_device(dev);
7245 ctrl_info = shost_to_hba(sdev->host);
7246
331f7e99
SB
7247 if (pqi_ctrl_offline(ctrl_info))
7248 return -ENODEV;
7249
a9f93392
KB
7250 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7251
7252 device = sdev->hostdata;
8b664fef
KB
7253 if (!device) {
7254 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7255 return -ENODEV;
7256 }
a9f93392 7257
cbe42ac1 7258 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
a9f93392
KB
7259 raid_level = pqi_raid_level_to_string(device->raid_level);
7260 else
7261 raid_level = "N/A";
7262
7263 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7264
a4256252 7265 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
a9f93392
KB
7266}
7267
8b664fef
KB
7268static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7269 struct device_attribute *attr, char *buffer)
7270{
7271 struct pqi_ctrl_info *ctrl_info;
7272 struct scsi_device *sdev;
7273 struct pqi_scsi_dev *device;
7274 unsigned long flags;
7275 int raid_bypass_cnt;
7276
7277 sdev = to_scsi_device(dev);
7278 ctrl_info = shost_to_hba(sdev->host);
7279
331f7e99
SB
7280 if (pqi_ctrl_offline(ctrl_info))
7281 return -ENODEV;
7282
8b664fef
KB
7283 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7284
7285 device = sdev->hostdata;
7286 if (!device) {
7287 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7288 return -ENODEV;
7289 }
7290
7291 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
7292
7293 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7294
a4256252 7295 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
8b664fef
KB
7296}
7297
2a47834d
GW
7298static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7299 struct device_attribute *attr, char *buf)
7300{
7301 struct pqi_ctrl_info *ctrl_info;
7302 struct scsi_device *sdev;
7303 struct pqi_scsi_dev *device;
7304 unsigned long flags;
7305 int output_len = 0;
7306
7307 sdev = to_scsi_device(dev);
7308 ctrl_info = shost_to_hba(sdev->host);
7309
331f7e99
SB
7310 if (pqi_ctrl_offline(ctrl_info))
7311 return -ENODEV;
7312
2a47834d
GW
7313 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7314
7315 device = sdev->hostdata;
7316 if (!device) {
7317 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7318 return -ENODEV;
7319 }
7320
7321 output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7322 device->ncq_prio_enable);
7323 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7324
7325 return output_len;
7326}
7327
7328static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7329 struct device_attribute *attr,
7330 const char *buf, size_t count)
7331{
7332 struct pqi_ctrl_info *ctrl_info;
7333 struct scsi_device *sdev;
7334 struct pqi_scsi_dev *device;
7335 unsigned long flags;
7336 u8 ncq_prio_enable = 0;
7337
7338 if (kstrtou8(buf, 0, &ncq_prio_enable))
7339 return -EINVAL;
7340
7341 sdev = to_scsi_device(dev);
7342 ctrl_info = shost_to_hba(sdev->host);
7343
7344 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7345
7346 device = sdev->hostdata;
7347
7348 if (!device) {
7349 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7350 return -ENODEV;
7351 }
7352
7353 if (!device->ncq_prio_support ||
7354 !device->is_physical_device) {
7355 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7356 return -EINVAL;
7357 }
7358
7359 device->ncq_prio_enable = ncq_prio_enable;
7360
7361 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7362
7363 return strlen(buf);
7364}
7365
cd128244
DC
7366static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7367static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7368static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
cbe0c7b1 7369static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
8b664fef 7370static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
a9f93392 7371static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
8b664fef 7372static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
2a47834d
GW
7373static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7374 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
6c223761 7375
64fc9015
BVA
7376static struct attribute *pqi_sdev_attrs[] = {
7377 &dev_attr_lunid.attr,
7378 &dev_attr_unique_id.attr,
7379 &dev_attr_path_info.attr,
7380 &dev_attr_sas_address.attr,
7381 &dev_attr_ssd_smart_path_enabled.attr,
7382 &dev_attr_raid_level.attr,
7383 &dev_attr_raid_bypass_cnt.attr,
2a47834d 7384 &dev_attr_sas_ncq_prio_enable.attr,
6c223761
KB
7385 NULL
7386};
7387
64fc9015
BVA
7388ATTRIBUTE_GROUPS(pqi_sdev);
7389
6c223761
KB
7390static struct scsi_host_template pqi_driver_template = {
7391 .module = THIS_MODULE,
7392 .name = DRIVER_NAME_SHORT,
7393 .proc_name = DRIVER_NAME_SHORT,
7394 .queuecommand = pqi_scsi_queue_command,
7395 .scan_start = pqi_scan_start,
7396 .scan_finished = pqi_scan_finished,
7397 .this_id = -1,
6c223761
KB
7398 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7399 .ioctl = pqi_ioctl,
7400 .slave_alloc = pqi_slave_alloc,
ce143793 7401 .slave_configure = pqi_slave_configure,
2d80f405 7402 .slave_destroy = pqi_slave_destroy,
52198226 7403 .map_queues = pqi_map_queues,
64fc9015
BVA
7404 .sdev_groups = pqi_sdev_groups,
7405 .shost_groups = pqi_shost_groups,
c1ea387d 7406 .cmd_size = sizeof(struct pqi_cmd_priv),
6c223761
KB
7407};
7408
7409static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7410{
7411 int rc;
7412 struct Scsi_Host *shost;
7413
7414 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7415 if (!shost) {
583891c9 7416 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
6c223761
KB
7417 return -ENOMEM;
7418 }
7419
7420 shost->io_port = 0;
7421 shost->n_io_port = 0;
7422 shost->this_id = -1;
7423 shost->max_channel = PQI_MAX_BUS;
7424 shost->max_cmd_len = MAX_COMMAND_SIZE;
904f2bfd 7425 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
6c223761
KB
7426 shost->max_id = ~0;
7427 shost->max_sectors = ctrl_info->max_sectors;
7428 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7429 shost->cmd_per_lun = shost->can_queue;
7430 shost->sg_tablesize = ctrl_info->sg_tablesize;
7431 shost->transportt = pqi_sas_transport_template;
52198226 7432 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
7433 shost->unique_id = shost->irq;
7434 shost->nr_hw_queues = ctrl_info->num_queue_groups;
c6d3ee20 7435 shost->host_tagset = 1;
6c223761
KB
7436 shost->hostdata[0] = (unsigned long)ctrl_info;
7437
7438 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7439 if (rc) {
583891c9 7440 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
6c223761
KB
7441 goto free_host;
7442 }
7443
7444 rc = pqi_add_sas_host(shost, ctrl_info);
7445 if (rc) {
583891c9 7446 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
6c223761
KB
7447 goto remove_host;
7448 }
7449
7450 ctrl_info->scsi_host = shost;
7451
7452 return 0;
7453
7454remove_host:
7455 scsi_remove_host(shost);
7456free_host:
7457 scsi_host_put(shost);
7458
7459 return rc;
7460}
7461
7462static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7463{
7464 struct Scsi_Host *shost;
7465
7466 pqi_delete_sas_host(ctrl_info);
7467
7468 shost = ctrl_info->scsi_host;
7469 if (!shost)
7470 return;
7471
7472 scsi_remove_host(shost);
7473 scsi_host_put(shost);
7474}
7475
336b6819
KB
7476static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7477{
7478 int rc = 0;
7479 struct pqi_device_registers __iomem *pqi_registers;
7480 unsigned long timeout;
7481 unsigned int timeout_msecs;
7482 union pqi_reset_register reset_reg;
6c223761 7483
336b6819
KB
7484 pqi_registers = ctrl_info->pqi_registers;
7485 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7486 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7487
7488 while (1) {
7489 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7490 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7491 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7492 break;
85b41834 7493 if (!sis_is_firmware_running(ctrl_info)) {
336b6819
KB
7494 rc = -ENXIO;
7495 break;
7496 }
7497 if (time_after(jiffies, timeout)) {
7498 rc = -ETIMEDOUT;
7499 break;
7500 }
7501 }
7502
7503 return rc;
7504}
6c223761
KB
7505
7506static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7507{
7508 int rc;
336b6819
KB
7509 union pqi_reset_register reset_reg;
7510
7511 if (ctrl_info->pqi_reset_quiesce_supported) {
7512 rc = sis_pqi_reset_quiesce(ctrl_info);
7513 if (rc) {
7514 dev_err(&ctrl_info->pci_dev->dev,
583891c9 7515 "PQI reset failed during quiesce with error %d\n", rc);
336b6819
KB
7516 return rc;
7517 }
7518 }
6c223761 7519
336b6819
KB
7520 reset_reg.all_bits = 0;
7521 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7522 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6c223761 7523
336b6819 7524 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6c223761 7525
336b6819 7526 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6c223761
KB
7527 if (rc)
7528 dev_err(&ctrl_info->pci_dev->dev,
336b6819 7529 "PQI reset failed with error %d\n", rc);
6c223761
KB
7530
7531 return rc;
7532}
7533
6d90615f
MB
7534static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7535{
7536 int rc;
7537 struct bmic_sense_subsystem_info *sense_info;
7538
7539 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7540 if (!sense_info)
7541 return -ENOMEM;
7542
7543 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7544 if (rc)
7545 goto out;
7546
7547 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7548 sizeof(sense_info->ctrl_serial_number));
7549 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7550
7551out:
7552 kfree(sense_info);
7553
7554 return rc;
7555}
7556
7557static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
6c223761
KB
7558{
7559 int rc;
7560 struct bmic_identify_controller *identify;
7561
7562 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7563 if (!identify)
7564 return -ENOMEM;
7565
7566 rc = pqi_identify_controller(ctrl_info, identify);
7567 if (rc)
7568 goto out;
7569
598bef8d
KB
7570 if (get_unaligned_le32(&identify->extra_controller_flags) &
7571 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7572 memcpy(ctrl_info->firmware_version,
7573 identify->firmware_version_long,
7574 sizeof(identify->firmware_version_long));
7575 } else {
7576 memcpy(ctrl_info->firmware_version,
7577 identify->firmware_version_short,
7578 sizeof(identify->firmware_version_short));
7579 ctrl_info->firmware_version
7580 [sizeof(identify->firmware_version_short)] = '\0';
7581 snprintf(ctrl_info->firmware_version +
7582 strlen(ctrl_info->firmware_version),
7583 sizeof(ctrl_info->firmware_version) -
7584 sizeof(identify->firmware_version_short),
7585 "-%u",
7586 get_unaligned_le16(&identify->firmware_build_number));
7587 }
6c223761 7588
6d90615f
MB
7589 memcpy(ctrl_info->model, identify->product_id,
7590 sizeof(identify->product_id));
7591 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7592
7593 memcpy(ctrl_info->vendor, identify->vendor_id,
7594 sizeof(identify->vendor_id));
7595 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7596
1d393227
GW
7597 dev_info(&ctrl_info->pci_dev->dev,
7598 "Firmware version: %s\n", ctrl_info->firmware_version);
7599
6c223761
KB
7600out:
7601 kfree(identify);
7602
7603 return rc;
7604}
7605
b212c251
KB
7606struct pqi_config_table_section_info {
7607 struct pqi_ctrl_info *ctrl_info;
7608 void *section;
7609 u32 section_offset;
7610 void __iomem *section_iomem_addr;
7611};
7612
7613static inline bool pqi_is_firmware_feature_supported(
7614 struct pqi_config_table_firmware_features *firmware_features,
7615 unsigned int bit_position)
98f87667 7616{
b212c251 7617 unsigned int byte_index;
98f87667 7618
b212c251 7619 byte_index = bit_position / BITS_PER_BYTE;
98f87667 7620
b212c251
KB
7621 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7622 return false;
98f87667 7623
b212c251
KB
7624 return firmware_features->features_supported[byte_index] &
7625 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7626}
7627
7628static inline bool pqi_is_firmware_feature_enabled(
7629 struct pqi_config_table_firmware_features *firmware_features,
7630 void __iomem *firmware_features_iomem_addr,
7631 unsigned int bit_position)
7632{
7633 unsigned int byte_index;
7634 u8 __iomem *features_enabled_iomem_addr;
7635
7636 byte_index = (bit_position / BITS_PER_BYTE) +
7637 (le16_to_cpu(firmware_features->num_elements) * 2);
7638
7639 features_enabled_iomem_addr = firmware_features_iomem_addr +
7640 offsetof(struct pqi_config_table_firmware_features,
7641 features_supported) + byte_index;
7642
7643 return *((__force u8 *)features_enabled_iomem_addr) &
7644 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7645}
7646
7647static inline void pqi_request_firmware_feature(
7648 struct pqi_config_table_firmware_features *firmware_features,
7649 unsigned int bit_position)
7650{
7651 unsigned int byte_index;
7652
7653 byte_index = (bit_position / BITS_PER_BYTE) +
7654 le16_to_cpu(firmware_features->num_elements);
7655
7656 firmware_features->features_supported[byte_index] |=
7657 (1 << (bit_position % BITS_PER_BYTE));
7658}
7659
7660static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7661 u16 first_section, u16 last_section)
7662{
7663 struct pqi_vendor_general_request request;
7664
7665 memset(&request, 0, sizeof(request));
7666
7667 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7668 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7669 &request.header.iu_length);
7670 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7671 &request.function_code);
7672 put_unaligned_le16(first_section,
7673 &request.data.config_table_update.first_section);
7674 put_unaligned_le16(last_section,
7675 &request.data.config_table_update.last_section);
7676
ae0c189d 7677 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
b212c251
KB
7678}
7679
7680static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7681 struct pqi_config_table_firmware_features *firmware_features,
7682 void __iomem *firmware_features_iomem_addr)
7683{
7684 void *features_requested;
7685 void __iomem *features_requested_iomem_addr;
f6cc2a77 7686 void __iomem *host_max_known_feature_iomem_addr;
b212c251
KB
7687
7688 features_requested = firmware_features->features_supported +
7689 le16_to_cpu(firmware_features->num_elements);
7690
7691 features_requested_iomem_addr = firmware_features_iomem_addr +
7692 (features_requested - (void *)firmware_features);
7693
7694 memcpy_toio(features_requested_iomem_addr, features_requested,
7695 le16_to_cpu(firmware_features->num_elements));
7696
f6cc2a77
KB
7697 if (pqi_is_firmware_feature_supported(firmware_features,
7698 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7699 host_max_known_feature_iomem_addr =
7700 features_requested_iomem_addr +
7701 (le16_to_cpu(firmware_features->num_elements) * 2) +
7702 sizeof(__le16);
7703 writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
7704 host_max_known_feature_iomem_addr);
7705 }
7706
b212c251
KB
7707 return pqi_config_table_update(ctrl_info,
7708 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7709 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7710}
7711
7712struct pqi_firmware_feature {
7713 char *feature_name;
7714 unsigned int feature_bit;
7715 bool supported;
7716 bool enabled;
7717 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7718 struct pqi_firmware_feature *firmware_feature);
7719};
7720
7721static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7722 struct pqi_firmware_feature *firmware_feature)
7723{
7724 if (!firmware_feature->supported) {
7725 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7726 firmware_feature->feature_name);
7727 return;
7728 }
7729
7730 if (firmware_feature->enabled) {
7731 dev_info(&ctrl_info->pci_dev->dev,
7732 "%s enabled\n", firmware_feature->feature_name);
7733 return;
7734 }
7735
7736 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7737 firmware_feature->feature_name);
7738}
7739
21432010 7740static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7741 struct pqi_firmware_feature *firmware_feature)
7742{
7743 switch (firmware_feature->feature_bit) {
f6cc2a77
KB
7744 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7745 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7746 break;
7747 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7748 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7749 break;
7750 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7751 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7752 break;
21432010 7753 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7754 ctrl_info->soft_reset_handshake_supported =
4ccc354b
KB
7755 firmware_feature->enabled &&
7756 pqi_read_soft_reset_status(ctrl_info);
21432010 7757 break;
7758 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
583891c9 7759 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
21432010 7760 break;
c2922f17 7761 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
583891c9 7762 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
c2922f17 7763 break;
5d1f03e6
MB
7764 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7765 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
9ee5d6e9 7766 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
5d1f03e6 7767 break;
28ca6d87
MM
7768 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7769 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7770 break;
904f2bfd
KM
7771 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7772 ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7773 break;
21432010 7774 }
7775
7776 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7777}
7778
b212c251
KB
7779static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7780 struct pqi_firmware_feature *firmware_feature)
7781{
7782 if (firmware_feature->feature_status)
7783 firmware_feature->feature_status(ctrl_info, firmware_feature);
7784}
7785
7786static DEFINE_MUTEX(pqi_firmware_features_mutex);
7787
7788static struct pqi_firmware_feature pqi_firmware_features[] = {
7789 {
7790 .feature_name = "Online Firmware Activation",
7791 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7792 .feature_status = pqi_firmware_feature_status,
7793 },
7794 {
7795 .feature_name = "Serial Management Protocol",
7796 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7797 .feature_status = pqi_firmware_feature_status,
7798 },
f6cc2a77
KB
7799 {
7800 .feature_name = "Maximum Known Feature",
7801 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7802 .feature_status = pqi_firmware_feature_status,
7803 },
7804 {
7805 .feature_name = "RAID 0 Read Bypass",
7806 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
b212c251
KB
7807 .feature_status = pqi_firmware_feature_status,
7808 },
7809 {
f6cc2a77
KB
7810 .feature_name = "RAID 1 Read Bypass",
7811 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7812 .feature_status = pqi_firmware_feature_status,
7813 },
7814 {
7815 .feature_name = "RAID 5 Read Bypass",
7816 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
b212c251
KB
7817 .feature_status = pqi_firmware_feature_status,
7818 },
f6cc2a77
KB
7819 {
7820 .feature_name = "RAID 6 Read Bypass",
7821 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7822 .feature_status = pqi_firmware_feature_status,
7823 },
7824 {
7825 .feature_name = "RAID 0 Write Bypass",
7826 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7827 .feature_status = pqi_firmware_feature_status,
7828 },
7829 {
7830 .feature_name = "RAID 1 Write Bypass",
7831 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7832 .feature_status = pqi_ctrl_update_feature_flags,
7833 },
7834 {
7835 .feature_name = "RAID 5 Write Bypass",
7836 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7837 .feature_status = pqi_ctrl_update_feature_flags,
7838 },
7839 {
7840 .feature_name = "RAID 6 Write Bypass",
7841 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7842 .feature_status = pqi_ctrl_update_feature_flags,
7843 },
4fd22c13
MR
7844 {
7845 .feature_name = "New Soft Reset Handshake",
7846 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
21432010 7847 .feature_status = pqi_ctrl_update_feature_flags,
7848 },
7849 {
7850 .feature_name = "RAID IU Timeout",
7851 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7852 .feature_status = pqi_ctrl_update_feature_flags,
4fd22c13 7853 },
c2922f17
MB
7854 {
7855 .feature_name = "TMF IU Timeout",
7856 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7857 .feature_status = pqi_ctrl_update_feature_flags,
7858 },
f6cc2a77
KB
7859 {
7860 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7861 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7862 .feature_status = pqi_firmware_feature_status,
7863 },
5d1f03e6
MB
7864 {
7865 .feature_name = "Firmware Triage",
7866 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7867 .feature_status = pqi_ctrl_update_feature_flags,
7868 },
28ca6d87
MM
7869 {
7870 .feature_name = "RPL Extended Formats 4 and 5",
7871 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7872 .feature_status = pqi_ctrl_update_feature_flags,
7873 },
904f2bfd
KM
7874 {
7875 .feature_name = "Multi-LUN Target",
7876 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
7877 .feature_status = pqi_ctrl_update_feature_flags,
7878 },
b212c251
KB
7879};
7880
7881static void pqi_process_firmware_features(
7882 struct pqi_config_table_section_info *section_info)
7883{
7884 int rc;
7885 struct pqi_ctrl_info *ctrl_info;
7886 struct pqi_config_table_firmware_features *firmware_features;
7887 void __iomem *firmware_features_iomem_addr;
7888 unsigned int i;
7889 unsigned int num_features_supported;
7890
7891 ctrl_info = section_info->ctrl_info;
7892 firmware_features = section_info->section;
7893 firmware_features_iomem_addr = section_info->section_iomem_addr;
7894
7895 for (i = 0, num_features_supported = 0;
7896 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7897 if (pqi_is_firmware_feature_supported(firmware_features,
7898 pqi_firmware_features[i].feature_bit)) {
7899 pqi_firmware_features[i].supported = true;
7900 num_features_supported++;
7901 } else {
7902 pqi_firmware_feature_update(ctrl_info,
7903 &pqi_firmware_features[i]);
7904 }
7905 }
7906
7907 if (num_features_supported == 0)
7908 return;
7909
7910 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7911 if (!pqi_firmware_features[i].supported)
7912 continue;
7913 pqi_request_firmware_feature(firmware_features,
7914 pqi_firmware_features[i].feature_bit);
7915 }
7916
7917 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7918 firmware_features_iomem_addr);
7919 if (rc) {
7920 dev_err(&ctrl_info->pci_dev->dev,
7921 "failed to enable firmware features in PQI configuration table\n");
7922 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7923 if (!pqi_firmware_features[i].supported)
7924 continue;
7925 pqi_firmware_feature_update(ctrl_info,
7926 &pqi_firmware_features[i]);
7927 }
7928 return;
7929 }
7930
7931 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7932 if (!pqi_firmware_features[i].supported)
7933 continue;
7934 if (pqi_is_firmware_feature_enabled(firmware_features,
7935 firmware_features_iomem_addr,
4fd22c13 7936 pqi_firmware_features[i].feature_bit)) {
583891c9 7937 pqi_firmware_features[i].enabled = true;
4fd22c13 7938 }
b212c251
KB
7939 pqi_firmware_feature_update(ctrl_info,
7940 &pqi_firmware_features[i]);
7941 }
7942}
7943
7944static void pqi_init_firmware_features(void)
7945{
7946 unsigned int i;
7947
7948 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7949 pqi_firmware_features[i].supported = false;
7950 pqi_firmware_features[i].enabled = false;
7951 }
7952}
7953
7954static void pqi_process_firmware_features_section(
7955 struct pqi_config_table_section_info *section_info)
7956{
7957 mutex_lock(&pqi_firmware_features_mutex);
7958 pqi_init_firmware_features();
7959 pqi_process_firmware_features(section_info);
7960 mutex_unlock(&pqi_firmware_features_mutex);
7961}
7962
f6cc2a77
KB
7963/*
7964 * Reset all controller settings that can be initialized during the processing
7965 * of the PQI Configuration Table.
7966 */
7967
4ccc354b
KB
7968static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
7969{
7970 ctrl_info->heartbeat_counter = NULL;
7971 ctrl_info->soft_reset_status = NULL;
7972 ctrl_info->soft_reset_handshake_supported = false;
7973 ctrl_info->enable_r1_writes = false;
7974 ctrl_info->enable_r5_writes = false;
7975 ctrl_info->enable_r6_writes = false;
7976 ctrl_info->raid_iu_timeout_supported = false;
7977 ctrl_info->tmf_iu_timeout_supported = false;
5d1f03e6 7978 ctrl_info->firmware_triage_supported = false;
28ca6d87 7979 ctrl_info->rpl_extended_format_4_5_supported = false;
904f2bfd 7980 ctrl_info->multi_lun_device_supported = false;
4ccc354b
KB
7981}
7982
98f87667
KB
7983static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
7984{
7985 u32 table_length;
7986 u32 section_offset;
f6cc2a77 7987 bool firmware_feature_section_present;
98f87667
KB
7988 void __iomem *table_iomem_addr;
7989 struct pqi_config_table *config_table;
7990 struct pqi_config_table_section_header *section;
b212c251 7991 struct pqi_config_table_section_info section_info;
f6cc2a77 7992 struct pqi_config_table_section_info feature_section_info;
98f87667
KB
7993
7994 table_length = ctrl_info->config_table_length;
b212c251
KB
7995 if (table_length == 0)
7996 return 0;
98f87667
KB
7997
7998 config_table = kmalloc(table_length, GFP_KERNEL);
7999 if (!config_table) {
8000 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 8001 "failed to allocate memory for PQI configuration table\n");
98f87667
KB
8002 return -ENOMEM;
8003 }
8004
8005 /*
8006 * Copy the config table contents from I/O memory space into the
8007 * temporary buffer.
8008 */
583891c9 8009 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
98f87667
KB
8010 memcpy_fromio(config_table, table_iomem_addr, table_length);
8011
f6cc2a77 8012 firmware_feature_section_present = false;
b212c251 8013 section_info.ctrl_info = ctrl_info;
583891c9 8014 section_offset = get_unaligned_le32(&config_table->first_section_offset);
98f87667
KB
8015
8016 while (section_offset) {
8017 section = (void *)config_table + section_offset;
8018
b212c251
KB
8019 section_info.section = section;
8020 section_info.section_offset = section_offset;
583891c9 8021 section_info.section_iomem_addr = table_iomem_addr + section_offset;
b212c251 8022
98f87667 8023 switch (get_unaligned_le16(&section->section_id)) {
b212c251 8024 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
f6cc2a77
KB
8025 firmware_feature_section_present = true;
8026 feature_section_info = section_info;
b212c251 8027 break;
98f87667 8028 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5a259e32
KB
8029 if (pqi_disable_heartbeat)
8030 dev_warn(&ctrl_info->pci_dev->dev,
8031 "heartbeat disabled by module parameter\n");
8032 else
8033 ctrl_info->heartbeat_counter =
8034 table_iomem_addr +
8035 section_offset +
583891c9 8036 offsetof(struct pqi_config_table_heartbeat,
5a259e32 8037 heartbeat_counter);
98f87667 8038 break;
4fd22c13
MR
8039 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8040 ctrl_info->soft_reset_status =
8041 table_iomem_addr +
8042 section_offset +
8043 offsetof(struct pqi_config_table_soft_reset,
583891c9 8044 soft_reset_status);
4fd22c13 8045 break;
98f87667
KB
8046 }
8047
583891c9 8048 section_offset = get_unaligned_le16(&section->next_section_offset);
98f87667
KB
8049 }
8050
f6cc2a77
KB
8051 /*
8052 * We process the firmware feature section after all other sections
8053 * have been processed so that the feature bit callbacks can take
8054 * into account the settings configured by other sections.
8055 */
8056 if (firmware_feature_section_present)
8057 pqi_process_firmware_features_section(&feature_section_info);
8058
98f87667
KB
8059 kfree(config_table);
8060
8061 return 0;
8062}
8063
162d7753
KB
8064/* Switches the controller from PQI mode back into SIS mode. */
8065
8066static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8067{
8068 int rc;
8069
061ef06a 8070 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
162d7753
KB
8071 rc = pqi_reset(ctrl_info);
8072 if (rc)
8073 return rc;
4f078e24
KB
8074 rc = sis_reenable_sis_mode(ctrl_info);
8075 if (rc) {
8076 dev_err(&ctrl_info->pci_dev->dev,
8077 "re-enabling SIS mode failed with error %d\n", rc);
8078 return rc;
8079 }
162d7753
KB
8080 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8081
8082 return 0;
8083}
8084
8085/*
8086 * If the controller isn't already in SIS mode, this function forces it into
8087 * SIS mode.
8088 */
8089
8090static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73
KB
8091{
8092 if (!sis_is_firmware_running(ctrl_info))
8093 return -ENXIO;
8094
162d7753
KB
8095 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8096 return 0;
8097
8098 if (sis_is_kernel_up(ctrl_info)) {
8099 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8100 return 0;
ff6abb73
KB
8101 }
8102
162d7753 8103 return pqi_revert_to_sis_mode(ctrl_info);
ff6abb73
KB
8104}
8105
3ada501d
MR
8106static void pqi_perform_lockup_action(void)
8107{
8108 switch (pqi_lockup_action) {
8109 case PANIC:
8110 panic("FATAL: Smart Family Controller lockup detected");
8111 break;
8112 case REBOOT:
8113 emergency_restart();
8114 break;
8115 case NONE:
8116 default:
8117 break;
8118 }
8119}
8120
6c223761
KB
8121static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8122{
8123 int rc;
2708a256 8124 u32 product_id;
6c223761 8125
0530736e 8126 if (reset_devices) {
9ee5d6e9
MR
8127 if (pqi_is_fw_triage_supported(ctrl_info)) {
8128 rc = sis_wait_for_fw_triage_completion(ctrl_info);
8129 if (rc)
8130 return rc;
8131 }
0530736e 8132 sis_soft_reset(ctrl_info);
42dc0426 8133 ssleep(PQI_POST_RESET_DELAY_SECS);
0530736e
KB
8134 } else {
8135 rc = pqi_force_sis_mode(ctrl_info);
8136 if (rc)
8137 return rc;
8138 }
6c223761
KB
8139
8140 /*
8141 * Wait until the controller is ready to start accepting SIS
8142 * commands.
8143 */
8144 rc = sis_wait_for_ctrl_ready(ctrl_info);
3ada501d
MR
8145 if (rc) {
8146 if (reset_devices) {
8147 dev_err(&ctrl_info->pci_dev->dev,
8148 "kdump init failed with error %d\n", rc);
8149 pqi_lockup_action = REBOOT;
8150 pqi_perform_lockup_action();
8151 }
6c223761 8152 return rc;
3ada501d 8153 }
6c223761
KB
8154
8155 /*
8156 * Get the controller properties. This allows us to determine
8157 * whether or not it supports PQI mode.
8158 */
8159 rc = sis_get_ctrl_properties(ctrl_info);
8160 if (rc) {
8161 dev_err(&ctrl_info->pci_dev->dev,
8162 "error obtaining controller properties\n");
8163 return rc;
8164 }
8165
8166 rc = sis_get_pqi_capabilities(ctrl_info);
8167 if (rc) {
8168 dev_err(&ctrl_info->pci_dev->dev,
8169 "error obtaining controller capabilities\n");
8170 return rc;
8171 }
8172
2708a256
KB
8173 product_id = sis_get_product_id(ctrl_info);
8174 ctrl_info->product_id = (u8)product_id;
8175 ctrl_info->product_revision = (u8)(product_id >> 8);
8176
d727a776
KB
8177 if (reset_devices) {
8178 if (ctrl_info->max_outstanding_requests >
8179 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
583891c9 8180 ctrl_info->max_outstanding_requests =
d727a776
KB
8181 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8182 } else {
8183 if (ctrl_info->max_outstanding_requests >
8184 PQI_MAX_OUTSTANDING_REQUESTS)
583891c9 8185 ctrl_info->max_outstanding_requests =
d727a776
KB
8186 PQI_MAX_OUTSTANDING_REQUESTS;
8187 }
6c223761
KB
8188
8189 pqi_calculate_io_resources(ctrl_info);
8190
8191 rc = pqi_alloc_error_buffer(ctrl_info);
8192 if (rc) {
8193 dev_err(&ctrl_info->pci_dev->dev,
8194 "failed to allocate PQI error buffer\n");
8195 return rc;
8196 }
8197
8198 /*
8199 * If the function we are about to call succeeds, the
8200 * controller will transition from legacy SIS mode
8201 * into PQI mode.
8202 */
8203 rc = sis_init_base_struct_addr(ctrl_info);
8204 if (rc) {
8205 dev_err(&ctrl_info->pci_dev->dev,
8206 "error initializing PQI mode\n");
8207 return rc;
8208 }
8209
8210 /* Wait for the controller to complete the SIS -> PQI transition. */
8211 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8212 if (rc) {
8213 dev_err(&ctrl_info->pci_dev->dev,
8214 "transition to PQI mode failed\n");
8215 return rc;
8216 }
8217
8218 /* From here on, we are running in PQI mode. */
8219 ctrl_info->pqi_mode_enabled = true;
ff6abb73 8220 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761
KB
8221
8222 rc = pqi_alloc_admin_queues(ctrl_info);
8223 if (rc) {
8224 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 8225 "failed to allocate admin queues\n");
6c223761
KB
8226 return rc;
8227 }
8228
8229 rc = pqi_create_admin_queues(ctrl_info);
8230 if (rc) {
8231 dev_err(&ctrl_info->pci_dev->dev,
8232 "error creating admin queues\n");
8233 return rc;
8234 }
8235
8236 rc = pqi_report_device_capability(ctrl_info);
8237 if (rc) {
8238 dev_err(&ctrl_info->pci_dev->dev,
8239 "obtaining device capability failed\n");
8240 return rc;
8241 }
8242
8243 rc = pqi_validate_device_capability(ctrl_info);
8244 if (rc)
8245 return rc;
8246
8247 pqi_calculate_queue_resources(ctrl_info);
8248
8249 rc = pqi_enable_msix_interrupts(ctrl_info);
8250 if (rc)
8251 return rc;
8252
8253 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8254 ctrl_info->max_msix_vectors =
8255 ctrl_info->num_msix_vectors_enabled;
8256 pqi_calculate_queue_resources(ctrl_info);
8257 }
8258
8259 rc = pqi_alloc_io_resources(ctrl_info);
8260 if (rc)
8261 return rc;
8262
8263 rc = pqi_alloc_operational_queues(ctrl_info);
d87d5474
KB
8264 if (rc) {
8265 dev_err(&ctrl_info->pci_dev->dev,
8266 "failed to allocate operational queues\n");
6c223761 8267 return rc;
d87d5474 8268 }
6c223761
KB
8269
8270 pqi_init_operational_queues(ctrl_info);
8271
0777a3fb 8272 rc = pqi_create_queues(ctrl_info);
6c223761
KB
8273 if (rc)
8274 return rc;
8275
0777a3fb 8276 rc = pqi_request_irqs(ctrl_info);
6c223761
KB
8277 if (rc)
8278 return rc;
8279
061ef06a
KB
8280 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8281
8282 ctrl_info->controller_online = true;
b212c251
KB
8283
8284 rc = pqi_process_config_table(ctrl_info);
8285 if (rc)
8286 return rc;
8287
061ef06a 8288 pqi_start_heartbeat_timer(ctrl_info);
6c223761 8289
f6cc2a77
KB
8290 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8291 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8292 if (rc) { /* Supported features not returned correctly. */
8293 dev_err(&ctrl_info->pci_dev->dev,
8294 "error obtaining advanced RAID bypass configuration\n");
8295 return rc;
8296 }
8297 ctrl_info->ciss_report_log_flags |=
8298 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8299 }
8300
6a50d6ad 8301 rc = pqi_enable_events(ctrl_info);
6c223761
KB
8302 if (rc) {
8303 dev_err(&ctrl_info->pci_dev->dev,
6a50d6ad 8304 "error enabling events\n");
6c223761
KB
8305 return rc;
8306 }
8307
6c223761
KB
8308 /* Register with the SCSI subsystem. */
8309 rc = pqi_register_scsi(ctrl_info);
8310 if (rc)
8311 return rc;
8312
6d90615f
MB
8313 rc = pqi_get_ctrl_product_details(ctrl_info);
8314 if (rc) {
8315 dev_err(&ctrl_info->pci_dev->dev,
8316 "error obtaining product details\n");
8317 return rc;
8318 }
8319
8320 rc = pqi_get_ctrl_serial_number(ctrl_info);
6c223761
KB
8321 if (rc) {
8322 dev_err(&ctrl_info->pci_dev->dev,
6d90615f 8323 "error obtaining ctrl serial number\n");
6c223761
KB
8324 return rc;
8325 }
8326
171c2865
DC
8327 rc = pqi_set_diag_rescan(ctrl_info);
8328 if (rc) {
8329 dev_err(&ctrl_info->pci_dev->dev,
8330 "error enabling multi-lun rescan\n");
8331 return rc;
8332 }
8333
6c223761
KB
8334 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8335 if (rc) {
8336 dev_err(&ctrl_info->pci_dev->dev,
8337 "error updating host wellness\n");
8338 return rc;
8339 }
8340
8341 pqi_schedule_update_time_worker(ctrl_info);
8342
8343 pqi_scan_scsi_devices(ctrl_info);
8344
8345 return 0;
8346}
8347
061ef06a
KB
8348static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8349{
8350 unsigned int i;
8351 struct pqi_admin_queues *admin_queues;
8352 struct pqi_event_queue *event_queue;
8353
8354 admin_queues = &ctrl_info->admin_queues;
8355 admin_queues->iq_pi_copy = 0;
8356 admin_queues->oq_ci_copy = 0;
dac12fbc 8357 writel(0, admin_queues->oq_pi);
061ef06a
KB
8358
8359 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8360 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8361 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8362 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8363
dac12fbc
KB
8364 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8365 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8366 writel(0, ctrl_info->queue_groups[i].oq_pi);
061ef06a
KB
8367 }
8368
8369 event_queue = &ctrl_info->event_queue;
dac12fbc 8370 writel(0, event_queue->oq_pi);
061ef06a
KB
8371 event_queue->oq_ci_copy = 0;
8372}
8373
8374static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8375{
8376 int rc;
8377
8378 rc = pqi_force_sis_mode(ctrl_info);
8379 if (rc)
8380 return rc;
8381
8382 /*
8383 * Wait until the controller is ready to start accepting SIS
8384 * commands.
8385 */
8386 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8387 if (rc)
8388 return rc;
8389
4fd22c13
MR
8390 /*
8391 * Get the controller properties. This allows us to determine
8392 * whether or not it supports PQI mode.
8393 */
8394 rc = sis_get_ctrl_properties(ctrl_info);
8395 if (rc) {
8396 dev_err(&ctrl_info->pci_dev->dev,
8397 "error obtaining controller properties\n");
8398 return rc;
8399 }
8400
8401 rc = sis_get_pqi_capabilities(ctrl_info);
8402 if (rc) {
8403 dev_err(&ctrl_info->pci_dev->dev,
8404 "error obtaining controller capabilities\n");
8405 return rc;
8406 }
8407
061ef06a
KB
8408 /*
8409 * If the function we are about to call succeeds, the
8410 * controller will transition from legacy SIS mode
8411 * into PQI mode.
8412 */
8413 rc = sis_init_base_struct_addr(ctrl_info);
8414 if (rc) {
8415 dev_err(&ctrl_info->pci_dev->dev,
8416 "error initializing PQI mode\n");
8417 return rc;
8418 }
8419
8420 /* Wait for the controller to complete the SIS -> PQI transition. */
8421 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8422 if (rc) {
8423 dev_err(&ctrl_info->pci_dev->dev,
8424 "transition to PQI mode failed\n");
8425 return rc;
8426 }
8427
8428 /* From here on, we are running in PQI mode. */
8429 ctrl_info->pqi_mode_enabled = true;
8430 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8431
8432 pqi_reinit_queues(ctrl_info);
8433
8434 rc = pqi_create_admin_queues(ctrl_info);
8435 if (rc) {
8436 dev_err(&ctrl_info->pci_dev->dev,
8437 "error creating admin queues\n");
8438 return rc;
8439 }
8440
8441 rc = pqi_create_queues(ctrl_info);
8442 if (rc)
8443 return rc;
8444
8445 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8446
8447 ctrl_info->controller_online = true;
061ef06a
KB
8448 pqi_ctrl_unblock_requests(ctrl_info);
8449
4ccc354b
KB
8450 pqi_ctrl_reset_config(ctrl_info);
8451
4fd22c13
MR
8452 rc = pqi_process_config_table(ctrl_info);
8453 if (rc)
8454 return rc;
8455
8456 pqi_start_heartbeat_timer(ctrl_info);
8457
f6cc2a77
KB
8458 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8459 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8460 if (rc) {
8461 dev_err(&ctrl_info->pci_dev->dev,
8462 "error obtaining advanced RAID bypass configuration\n");
8463 return rc;
8464 }
8465 ctrl_info->ciss_report_log_flags |=
8466 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8467 }
8468
061ef06a
KB
8469 rc = pqi_enable_events(ctrl_info);
8470 if (rc) {
8471 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 8472 "error enabling events\n");
061ef06a
KB
8473 return rc;
8474 }
8475
6d90615f 8476 rc = pqi_get_ctrl_product_details(ctrl_info);
4fd22c13
MR
8477 if (rc) {
8478 dev_err(&ctrl_info->pci_dev->dev,
694c5d5b 8479 "error obtaining product details\n");
4fd22c13
MR
8480 return rc;
8481 }
8482
171c2865
DC
8483 rc = pqi_set_diag_rescan(ctrl_info);
8484 if (rc) {
8485 dev_err(&ctrl_info->pci_dev->dev,
8486 "error enabling multi-lun rescan\n");
8487 return rc;
8488 }
8489
061ef06a
KB
8490 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8491 if (rc) {
8492 dev_err(&ctrl_info->pci_dev->dev,
8493 "error updating host wellness\n");
8494 return rc;
8495 }
8496
2790cd4d
KB
8497 if (pqi_ofa_in_progress(ctrl_info))
8498 pqi_ctrl_unblock_scan(ctrl_info);
061ef06a
KB
8499
8500 pqi_scan_scsi_devices(ctrl_info);
8501
8502 return 0;
8503}
8504
583891c9 8505static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
a81ed5f3 8506{
d20df83b
BOS
8507 int rc;
8508
8509 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
a81ed5f3 8510 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
d20df83b
BOS
8511
8512 return pcibios_err_to_errno(rc);
a81ed5f3
KB
8513}
8514
6c223761
KB
8515static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8516{
8517 int rc;
8518 u64 mask;
8519
8520 rc = pci_enable_device(ctrl_info->pci_dev);
8521 if (rc) {
8522 dev_err(&ctrl_info->pci_dev->dev,
8523 "failed to enable PCI device\n");
8524 return rc;
8525 }
8526
8527 if (sizeof(dma_addr_t) > 4)
8528 mask = DMA_BIT_MASK(64);
8529 else
8530 mask = DMA_BIT_MASK(32);
8531
1d94f06e 8532 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
6c223761
KB
8533 if (rc) {
8534 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8535 goto disable_device;
8536 }
8537
8538 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8539 if (rc) {
8540 dev_err(&ctrl_info->pci_dev->dev,
8541 "failed to obtain PCI resources\n");
8542 goto disable_device;
8543 }
8544
4bdc0d67 8545 ctrl_info->iomem_base = ioremap(pci_resource_start(
6c223761
KB
8546 ctrl_info->pci_dev, 0),
8547 sizeof(struct pqi_ctrl_registers));
8548 if (!ctrl_info->iomem_base) {
8549 dev_err(&ctrl_info->pci_dev->dev,
8550 "failed to map memory for controller registers\n");
8551 rc = -ENOMEM;
8552 goto release_regions;
8553 }
8554
a81ed5f3
KB
8555#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8556
8557 /* Increase the PCIe completion timeout. */
8558 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8559 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8560 if (rc) {
8561 dev_err(&ctrl_info->pci_dev->dev,
8562 "failed to set PCIe completion timeout\n");
8563 goto release_regions;
8564 }
8565
6c223761
KB
8566 /* Enable bus mastering. */
8567 pci_set_master(ctrl_info->pci_dev);
8568
cbe0c7b1
KB
8569 ctrl_info->registers = ctrl_info->iomem_base;
8570 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8571
6c223761
KB
8572 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8573
8574 return 0;
8575
8576release_regions:
8577 pci_release_regions(ctrl_info->pci_dev);
8578disable_device:
8579 pci_disable_device(ctrl_info->pci_dev);
8580
8581 return rc;
8582}
8583
8584static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8585{
8586 iounmap(ctrl_info->iomem_base);
8587 pci_release_regions(ctrl_info->pci_dev);
cbe0c7b1
KB
8588 if (pci_is_enabled(ctrl_info->pci_dev))
8589 pci_disable_device(ctrl_info->pci_dev);
6c223761
KB
8590 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8591}
8592
8593static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8594{
8595 struct pqi_ctrl_info *ctrl_info;
8596
8597 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8598 GFP_KERNEL, numa_node);
8599 if (!ctrl_info)
8600 return NULL;
8601
8602 mutex_init(&ctrl_info->scan_mutex);
7561a7e4 8603 mutex_init(&ctrl_info->lun_reset_mutex);
4fd22c13 8604 mutex_init(&ctrl_info->ofa_mutex);
6c223761
KB
8605
8606 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8607 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8608
8609 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8610 atomic_set(&ctrl_info->num_interrupts, 0);
8611
8612 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8613 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8614
74a0f573 8615 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
5f310425 8616 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
98f87667 8617
2790cd4d
KB
8618 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8619 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8620
6c223761
KB
8621 sema_init(&ctrl_info->sync_request_sem,
8622 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7561a7e4 8623 init_waitqueue_head(&ctrl_info->block_requests_wait);
6c223761
KB
8624
8625 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
061ef06a 8626 ctrl_info->irq_mode = IRQ_MODE_NONE;
6c223761
KB
8627 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8628
f6cc2a77
KB
8629 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8630 ctrl_info->max_transfer_encrypted_sas_sata =
8631 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8632 ctrl_info->max_transfer_encrypted_nvme =
8633 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8634 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8635 ctrl_info->max_write_raid_1_10_2drive = ~0;
8636 ctrl_info->max_write_raid_1_10_3drive = ~0;
cf15c3e7 8637 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
f6cc2a77 8638
6c223761
KB
8639 return ctrl_info;
8640}
8641
8642static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8643{
8644 kfree(ctrl_info);
8645}
8646
8647static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8648{
98bf061b
KB
8649 pqi_free_irqs(ctrl_info);
8650 pqi_disable_msix_interrupts(ctrl_info);
6c223761
KB
8651}
8652
8653static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8654{
6c223761
KB
8655 pqi_free_interrupts(ctrl_info);
8656 if (ctrl_info->queue_memory_base)
8657 dma_free_coherent(&ctrl_info->pci_dev->dev,
8658 ctrl_info->queue_memory_length,
8659 ctrl_info->queue_memory_base,
8660 ctrl_info->queue_memory_base_dma_handle);
8661 if (ctrl_info->admin_queue_memory_base)
8662 dma_free_coherent(&ctrl_info->pci_dev->dev,
8663 ctrl_info->admin_queue_memory_length,
8664 ctrl_info->admin_queue_memory_base,
8665 ctrl_info->admin_queue_memory_base_dma_handle);
8666 pqi_free_all_io_requests(ctrl_info);
8667 if (ctrl_info->error_buffer)
8668 dma_free_coherent(&ctrl_info->pci_dev->dev,
8669 ctrl_info->error_buffer_length,
8670 ctrl_info->error_buffer,
8671 ctrl_info->error_buffer_dma_handle);
8672 if (ctrl_info->iomem_base)
8673 pqi_cleanup_pci_init(ctrl_info);
8674 pqi_free_ctrl_info(ctrl_info);
8675}
8676
8677static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8678{
331f7e99
SB
8679 ctrl_info->controller_online = false;
8680 pqi_stop_heartbeat_timer(ctrl_info);
8681 pqi_ctrl_block_requests(ctrl_info);
061ef06a
KB
8682 pqi_cancel_rescan_worker(ctrl_info);
8683 pqi_cancel_update_time_worker(ctrl_info);
331f7e99
SB
8684 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8685 pqi_fail_all_outstanding_requests(ctrl_info);
8686 ctrl_info->pqi_mode_enabled = false;
8687 }
e57a1f9b 8688 pqi_unregister_scsi(ctrl_info);
162d7753
KB
8689 if (ctrl_info->pqi_mode_enabled)
8690 pqi_revert_to_sis_mode(ctrl_info);
6c223761
KB
8691 pqi_free_ctrl_resources(ctrl_info);
8692}
8693
4fd22c13
MR
8694static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8695{
2790cd4d
KB
8696 pqi_ctrl_block_scan(ctrl_info);
8697 pqi_scsi_block_requests(ctrl_info);
8698 pqi_ctrl_block_device_reset(ctrl_info);
4fd22c13
MR
8699 pqi_ctrl_block_requests(ctrl_info);
8700 pqi_ctrl_wait_until_quiesced(ctrl_info);
4fd22c13 8701 pqi_stop_heartbeat_timer(ctrl_info);
4fd22c13
MR
8702}
8703
8704static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8705{
4fd22c13 8706 pqi_start_heartbeat_timer(ctrl_info);
2790cd4d
KB
8707 pqi_ctrl_unblock_requests(ctrl_info);
8708 pqi_ctrl_unblock_device_reset(ctrl_info);
8709 pqi_scsi_unblock_requests(ctrl_info);
8710 pqi_ctrl_unblock_scan(ctrl_info);
4fd22c13
MR
8711}
8712
2790cd4d 8713static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
4fd22c13 8714{
4fd22c13 8715 int i;
2790cd4d 8716 u32 sg_count;
4fd22c13
MR
8717 struct device *dev;
8718 struct pqi_ofa_memory *ofap;
2790cd4d
KB
8719 struct pqi_sg_descriptor *mem_descriptor;
8720 dma_addr_t dma_handle;
4fd22c13
MR
8721
8722 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8723
2790cd4d
KB
8724 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8725 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
4fd22c13
MR
8726 goto out;
8727
2790cd4d 8728 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
4fd22c13
MR
8729 if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8730 goto out;
8731
2790cd4d 8732 dev = &ctrl_info->pci_dev->dev;
4fd22c13 8733
2790cd4d 8734 for (i = 0; i < sg_count; i++) {
4fd22c13 8735 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
2790cd4d 8736 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
4fd22c13 8737 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
2790cd4d 8738 goto out_free_chunks;
4fd22c13 8739 mem_descriptor = &ofap->sg_descriptor[i];
583891c9
KB
8740 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8741 put_unaligned_le32(chunk_size, &mem_descriptor->length);
4fd22c13
MR
8742 }
8743
4fd22c13
MR
8744 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8745 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
2790cd4d 8746 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
4fd22c13
MR
8747
8748 return 0;
8749
8750out_free_chunks:
8751 while (--i >= 0) {
8752 mem_descriptor = &ofap->sg_descriptor[i];
8753 dma_free_coherent(dev, chunk_size,
2790cd4d
KB
8754 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8755 get_unaligned_le64(&mem_descriptor->address));
4fd22c13
MR
8756 }
8757 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8758
8759out:
4fd22c13
MR
8760 return -ENOMEM;
8761}
8762
8763static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8764{
8765 u32 total_size;
2790cd4d 8766 u32 chunk_size;
4fd22c13 8767 u32 min_chunk_size;
4fd22c13 8768
2790cd4d
KB
8769 if (ctrl_info->ofa_bytes_requested == 0)
8770 return 0;
4fd22c13 8771
2790cd4d
KB
8772 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8773 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8774 min_chunk_size = PAGE_ALIGN(min_chunk_size);
4fd22c13 8775
2790cd4d
KB
8776 for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8777 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
4fd22c13 8778 return 0;
2790cd4d
KB
8779 chunk_size /= 2;
8780 chunk_size = PAGE_ALIGN(chunk_size);
8781 }
4fd22c13
MR
8782
8783 return -ENOMEM;
8784}
8785
2790cd4d 8786static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
4fd22c13 8787{
4fd22c13 8788 struct device *dev;
2790cd4d 8789 struct pqi_ofa_memory *ofap;
4fd22c13
MR
8790
8791 dev = &ctrl_info->pci_dev->dev;
4fd22c13 8792
2790cd4d
KB
8793 ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8794 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8795 if (!ofap)
4fd22c13
MR
8796 return;
8797
2790cd4d 8798 ctrl_info->pqi_ofa_mem_virt_addr = ofap;
4fd22c13
MR
8799
8800 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
2790cd4d
KB
8801 dev_err(dev,
8802 "failed to allocate host buffer for Online Firmware Activation\n");
8803 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8804 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8805 return;
4fd22c13 8806 }
694c5d5b 8807
2790cd4d
KB
8808 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8809 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
4fd22c13
MR
8810}
8811
8812static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8813{
2790cd4d
KB
8814 unsigned int i;
8815 struct device *dev;
4fd22c13 8816 struct pqi_ofa_memory *ofap;
2790cd4d
KB
8817 struct pqi_sg_descriptor *mem_descriptor;
8818 unsigned int num_memory_descriptors;
4fd22c13
MR
8819
8820 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
4fd22c13
MR
8821 if (!ofap)
8822 return;
8823
2790cd4d
KB
8824 dev = &ctrl_info->pci_dev->dev;
8825
8826 if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
4fd22c13
MR
8827 goto out;
8828
8829 mem_descriptor = ofap->sg_descriptor;
2790cd4d
KB
8830 num_memory_descriptors =
8831 get_unaligned_le16(&ofap->num_memory_descriptors);
4fd22c13 8832
2790cd4d
KB
8833 for (i = 0; i < num_memory_descriptors; i++) {
8834 dma_free_coherent(dev,
4fd22c13
MR
8835 get_unaligned_le32(&mem_descriptor[i].length),
8836 ctrl_info->pqi_ofa_chunk_virt_addr[i],
8837 get_unaligned_le64(&mem_descriptor[i].address));
8838 }
8839 kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8840
8841out:
2790cd4d
KB
8842 dma_free_coherent(dev, sizeof(*ofap), ofap,
8843 ctrl_info->pqi_ofa_mem_dma_handle);
4fd22c13
MR
8844 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8845}
8846
8847static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8848{
2790cd4d 8849 u32 buffer_length;
4fd22c13 8850 struct pqi_vendor_general_request request;
4fd22c13
MR
8851 struct pqi_ofa_memory *ofap;
8852
8853 memset(&request, 0, sizeof(request));
8854
4fd22c13
MR
8855 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8856 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8857 &request.header.iu_length);
8858 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8859 &request.function_code);
8860
2790cd4d
KB
8861 ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8862
4fd22c13 8863 if (ofap) {
2790cd4d 8864 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
4fd22c13
MR
8865 get_unaligned_le16(&ofap->num_memory_descriptors) *
8866 sizeof(struct pqi_sg_descriptor);
8867
8868 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8869 &request.data.ofa_memory_allocation.buffer_address);
2790cd4d 8870 put_unaligned_le32(buffer_length,
4fd22c13 8871 &request.data.ofa_memory_allocation.buffer_length);
4fd22c13
MR
8872 }
8873
ae0c189d 8874 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
4fd22c13
MR
8875}
8876
2790cd4d 8877static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
4fd22c13 8878{
2790cd4d
KB
8879 ssleep(delay_secs);
8880
4fd22c13
MR
8881 return pqi_ctrl_init_resume(ctrl_info);
8882}
8883
5f310425
KB
8884static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8885 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8886 .status = SAM_STAT_CHECK_CONDITION,
8887};
8888
8889static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
376fb880
KB
8890{
8891 unsigned int i;
376fb880 8892 struct pqi_io_request *io_request;
376fb880 8893 struct scsi_cmnd *scmd;
4f3cefc3 8894 struct scsi_device *sdev;
376fb880 8895
5f310425
KB
8896 for (i = 0; i < ctrl_info->max_io_slots; i++) {
8897 io_request = &ctrl_info->io_request_pool[i];
8898 if (atomic_read(&io_request->refcount) == 0)
8899 continue;
376fb880 8900
5f310425
KB
8901 scmd = io_request->scmd;
8902 if (scmd) {
4f3cefc3
MR
8903 sdev = scmd->device;
8904 if (!sdev || !scsi_device_online(sdev)) {
8905 pqi_free_io_request(io_request);
8906 continue;
8907 } else {
8908 set_host_byte(scmd, DID_NO_CONNECT);
8909 }
5f310425
KB
8910 } else {
8911 io_request->status = -ENXIO;
8912 io_request->error_info =
8913 &pqi_ctrl_offline_raid_error_info;
376fb880 8914 }
5f310425
KB
8915
8916 io_request->io_complete_callback(io_request,
8917 io_request->context);
376fb880
KB
8918 }
8919}
8920
5f310425 8921static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
376fb880 8922{
5f310425
KB
8923 pqi_perform_lockup_action();
8924 pqi_stop_heartbeat_timer(ctrl_info);
8925 pqi_free_interrupts(ctrl_info);
8926 pqi_cancel_rescan_worker(ctrl_info);
8927 pqi_cancel_update_time_worker(ctrl_info);
8928 pqi_ctrl_wait_until_quiesced(ctrl_info);
8929 pqi_fail_all_outstanding_requests(ctrl_info);
5f310425
KB
8930 pqi_ctrl_unblock_requests(ctrl_info);
8931}
8932
8933static void pqi_ctrl_offline_worker(struct work_struct *work)
8934{
8935 struct pqi_ctrl_info *ctrl_info;
8936
8937 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
8938 pqi_take_ctrl_offline_deferred(ctrl_info);
376fb880
KB
8939}
8940
5d1f03e6
MB
8941static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
8942 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
376fb880 8943{
5f310425
KB
8944 if (!ctrl_info->controller_online)
8945 return;
8946
376fb880 8947 ctrl_info->controller_online = false;
5f310425
KB
8948 ctrl_info->pqi_mode_enabled = false;
8949 pqi_ctrl_block_requests(ctrl_info);
5a259e32 8950 if (!pqi_disable_ctrl_shutdown)
5d1f03e6 8951 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
376fb880
KB
8952 pci_disable_device(ctrl_info->pci_dev);
8953 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
5f310425 8954 schedule_work(&ctrl_info->ctrl_offline_work);
376fb880
KB
8955}
8956
d91d7820 8957static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6c223761
KB
8958 const struct pci_device_id *id)
8959{
8960 char *ctrl_description;
8961
37b36847 8962 if (id->driver_data)
6c223761 8963 ctrl_description = (char *)id->driver_data;
37b36847 8964 else
6aa26b5a 8965 ctrl_description = "Microchip Smart Family Controller";
6c223761 8966
d91d7820 8967 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6c223761
KB
8968}
8969
d91d7820
KB
8970static int pqi_pci_probe(struct pci_dev *pci_dev,
8971 const struct pci_device_id *id)
6c223761
KB
8972{
8973 int rc;
c52efc92 8974 int node;
6c223761
KB
8975 struct pqi_ctrl_info *ctrl_info;
8976
d91d7820 8977 pqi_print_ctrl_info(pci_dev, id);
6c223761
KB
8978
8979 if (pqi_disable_device_id_wildcards &&
8980 id->subvendor == PCI_ANY_ID &&
8981 id->subdevice == PCI_ANY_ID) {
d91d7820 8982 dev_warn(&pci_dev->dev,
6c223761
KB
8983 "controller not probed because device ID wildcards are disabled\n");
8984 return -ENODEV;
8985 }
8986
8987 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
d91d7820 8988 dev_warn(&pci_dev->dev,
6c223761
KB
8989 "controller device ID matched using wildcards\n");
8990
d91d7820 8991 node = dev_to_node(&pci_dev->dev);
62dc51fb 8992 if (node == NUMA_NO_NODE) {
c52efc92
MM
8993 node = cpu_to_node(0);
8994 if (node == NUMA_NO_NODE)
8995 node = 0;
8996 set_dev_node(&pci_dev->dev, node);
62dc51fb 8997 }
6c223761
KB
8998
8999 ctrl_info = pqi_alloc_ctrl_info(node);
9000 if (!ctrl_info) {
d91d7820 9001 dev_err(&pci_dev->dev,
6c223761
KB
9002 "failed to allocate controller info block\n");
9003 return -ENOMEM;
9004 }
9005
d91d7820 9006 ctrl_info->pci_dev = pci_dev;
6c223761
KB
9007
9008 rc = pqi_pci_init(ctrl_info);
9009 if (rc)
9010 goto error;
9011
9012 rc = pqi_ctrl_init(ctrl_info);
9013 if (rc)
9014 goto error;
9015
9016 return 0;
9017
9018error:
9019 pqi_remove_ctrl(ctrl_info);
9020
9021 return rc;
9022}
9023
d91d7820 9024static void pqi_pci_remove(struct pci_dev *pci_dev)
6c223761
KB
9025{
9026 struct pqi_ctrl_info *ctrl_info;
331f7e99 9027 u16 vendor_id;
6c223761 9028
d91d7820 9029 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
9030 if (!ctrl_info)
9031 return;
9032
331f7e99
SB
9033 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9034 if (vendor_id == 0xffff)
9035 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9036 else
9037 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9038
6c223761
KB
9039 pqi_remove_ctrl(ctrl_info);
9040}
9041
0530736e
KB
9042static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9043{
9044 unsigned int i;
9045 struct pqi_io_request *io_request;
9046 struct scsi_cmnd *scmd;
9047
9048 for (i = 0; i < ctrl_info->max_io_slots; i++) {
9049 io_request = &ctrl_info->io_request_pool[i];
9050 if (atomic_read(&io_request->refcount) == 0)
9051 continue;
9052 scmd = io_request->scmd;
9053 WARN_ON(scmd != NULL); /* IO command from SML */
9054 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9055 }
9056}
9057
d91d7820 9058static void pqi_shutdown(struct pci_dev *pci_dev)
6c223761
KB
9059{
9060 int rc;
9061 struct pqi_ctrl_info *ctrl_info;
70ba20be 9062 enum bmic_flush_cache_shutdown_event shutdown_event;
6c223761 9063
d91d7820 9064 ctrl_info = pci_get_drvdata(pci_dev);
0530736e
KB
9065 if (!ctrl_info) {
9066 dev_err(&pci_dev->dev,
9067 "cache could not be flushed\n");
9068 return;
9069 }
9070
0530736e 9071 pqi_wait_until_ofa_finished(ctrl_info);
0530736e 9072
9fa82023 9073 pqi_scsi_block_requests(ctrl_info);
0530736e 9074 pqi_ctrl_block_device_reset(ctrl_info);
9fa82023
KB
9075 pqi_ctrl_block_requests(ctrl_info);
9076 pqi_ctrl_wait_until_quiesced(ctrl_info);
6c223761 9077
70ba20be
SB
9078 if (system_state == SYSTEM_RESTART)
9079 shutdown_event = RESTART;
9080 else
9081 shutdown_event = SHUTDOWN;
9082
6c223761
KB
9083 /*
9084 * Write all data in the controller's battery-backed cache to
9085 * storage.
9086 */
70ba20be 9087 rc = pqi_flush_cache(ctrl_info, shutdown_event);
0530736e
KB
9088 if (rc)
9089 dev_err(&pci_dev->dev,
9090 "unable to flush controller cache\n");
9091
0530736e
KB
9092 pqi_crash_if_pending_command(ctrl_info);
9093 pqi_reset(ctrl_info);
6c223761
KB
9094}
9095
3c50976f
KB
9096static void pqi_process_lockup_action_param(void)
9097{
9098 unsigned int i;
9099
9100 if (!pqi_lockup_action_param)
9101 return;
9102
9103 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9104 if (strcmp(pqi_lockup_action_param,
9105 pqi_lockup_actions[i].name) == 0) {
9106 pqi_lockup_action = pqi_lockup_actions[i].action;
9107 return;
9108 }
9109 }
9110
9111 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9112 DRIVER_NAME_SHORT, pqi_lockup_action_param);
9113}
9114
6d567dfe
KB
9115#define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
9116#define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
9117
9118static void pqi_process_ctrl_ready_timeout_param(void)
9119{
9120 if (pqi_ctrl_ready_timeout_secs == 0)
9121 return;
9122
9123 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9124 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9125 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9126 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9127 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9128 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9129 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9130 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9131 }
9132
9133 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9134}
9135
3c50976f
KB
9136static void pqi_process_module_params(void)
9137{
9138 pqi_process_lockup_action_param();
6d567dfe 9139 pqi_process_ctrl_ready_timeout_param();
3c50976f
KB
9140}
9141
31b17c3a
DB
9142#if defined(CONFIG_PM)
9143
b73357a1
SB
9144static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9145{
9146 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9147 return RESTART;
c66e078a 9148
b73357a1
SB
9149 return SUSPEND;
9150}
9151
c66e078a 9152static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
061ef06a 9153{
c66e078a 9154 struct pci_dev *pci_dev;
061ef06a
KB
9155 struct pqi_ctrl_info *ctrl_info;
9156
c66e078a 9157 pci_dev = to_pci_dev(dev);
061ef06a
KB
9158 ctrl_info = pci_get_drvdata(pci_dev);
9159
4fd22c13 9160 pqi_wait_until_ofa_finished(ctrl_info);
9fa82023
KB
9161
9162 pqi_ctrl_block_scan(ctrl_info);
9163 pqi_scsi_block_requests(ctrl_info);
9164 pqi_ctrl_block_device_reset(ctrl_info);
061ef06a
KB
9165 pqi_ctrl_block_requests(ctrl_info);
9166 pqi_ctrl_wait_until_quiesced(ctrl_info);
061ef06a 9167
c66e078a
KB
9168 if (suspend) {
9169 enum bmic_flush_cache_shutdown_event shutdown_event;
9fa82023 9170
c66e078a
KB
9171 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9172 pqi_flush_cache(ctrl_info, shutdown_event);
9173 }
061ef06a 9174
c66e078a
KB
9175 pqi_stop_heartbeat_timer(ctrl_info);
9176 pqi_crash_if_pending_command(ctrl_info);
9177 pqi_free_irqs(ctrl_info);
061ef06a
KB
9178
9179 ctrl_info->controller_online = false;
9180 ctrl_info->pqi_mode_enabled = false;
9181
9182 return 0;
9183}
9184
c66e078a
KB
9185static __maybe_unused int pqi_suspend(struct device *dev)
9186{
9187 return pqi_suspend_or_freeze(dev, true);
9188}
9189
9190static int pqi_resume_or_restore(struct device *dev)
061ef06a
KB
9191{
9192 int rc;
c66e078a 9193 struct pci_dev *pci_dev;
061ef06a
KB
9194 struct pqi_ctrl_info *ctrl_info;
9195
c66e078a 9196 pci_dev = to_pci_dev(dev);
061ef06a
KB
9197 ctrl_info = pci_get_drvdata(pci_dev);
9198
c66e078a
KB
9199 rc = pqi_request_irqs(ctrl_info);
9200 if (rc)
9201 return rc;
061ef06a 9202
43e97ef4
KB
9203 pqi_ctrl_unblock_device_reset(ctrl_info);
9204 pqi_ctrl_unblock_requests(ctrl_info);
9205 pqi_scsi_unblock_requests(ctrl_info);
9206 pqi_ctrl_unblock_scan(ctrl_info);
9207
c66e078a
KB
9208 ssleep(PQI_POST_RESET_DELAY_SECS);
9209
061ef06a
KB
9210 return pqi_ctrl_init_resume(ctrl_info);
9211}
9212
c66e078a
KB
9213static int pqi_freeze(struct device *dev)
9214{
9215 return pqi_suspend_or_freeze(dev, false);
9216}
9217
9218static int pqi_thaw(struct device *dev)
9219{
9220 int rc;
9221 struct pci_dev *pci_dev;
9222 struct pqi_ctrl_info *ctrl_info;
9223
9224 pci_dev = to_pci_dev(dev);
9225 ctrl_info = pci_get_drvdata(pci_dev);
9226
9227 rc = pqi_request_irqs(ctrl_info);
9228 if (rc)
9229 return rc;
9230
9231 ctrl_info->controller_online = true;
9232 ctrl_info->pqi_mode_enabled = true;
9233
9234 pqi_ctrl_unblock_device_reset(ctrl_info);
9235 pqi_ctrl_unblock_requests(ctrl_info);
9236 pqi_scsi_unblock_requests(ctrl_info);
9237 pqi_ctrl_unblock_scan(ctrl_info);
9238
9239 return 0;
9240}
9241
9242static int pqi_poweroff(struct device *dev)
9243{
9244 struct pci_dev *pci_dev;
9245 struct pqi_ctrl_info *ctrl_info;
9246 enum bmic_flush_cache_shutdown_event shutdown_event;
9247
9248 pci_dev = to_pci_dev(dev);
9249 ctrl_info = pci_get_drvdata(pci_dev);
9250
9251 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9252 pqi_flush_cache(ctrl_info, shutdown_event);
9253
9254 return 0;
9255}
9256
9257static const struct dev_pm_ops pqi_pm_ops = {
9258 .suspend = pqi_suspend,
9259 .resume = pqi_resume_or_restore,
9260 .freeze = pqi_freeze,
9261 .thaw = pqi_thaw,
9262 .poweroff = pqi_poweroff,
9263 .restore = pqi_resume_or_restore,
9264};
9265
31b17c3a
DB
9266#endif /* CONFIG_PM */
9267
6c223761
KB
9268/* Define the PCI IDs for the controllers that we support. */
9269static const struct pci_device_id pqi_pci_id_table[] = {
b0f9408b
KB
9270 {
9271 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9272 0x105b, 0x1211)
9273 },
9274 {
9275 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9276 0x105b, 0x1321)
9277 },
7eddabff
KB
9278 {
9279 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9280 0x152d, 0x8a22)
9281 },
9282 {
9283 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9284 0x152d, 0x8a23)
9285 },
9286 {
9287 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9288 0x152d, 0x8a24)
9289 },
9290 {
9291 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9292 0x152d, 0x8a36)
9293 },
9294 {
9295 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9296 0x152d, 0x8a37)
9297 },
0595a0b4
AK
9298 {
9299 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9300 0x193d, 0x1104)
9301 },
9302 {
9303 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9304 0x193d, 0x1105)
9305 },
9306 {
9307 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9308 0x193d, 0x1106)
9309 },
9310 {
9311 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9312 0x193d, 0x1107)
9313 },
d3af3f64
MR
9314 {
9315 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9316 0x193d, 0x1108)
9317 },
9318 {
9319 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9320 0x193d, 0x1109)
9321 },
0b93cf2a
MM
9322 {
9323 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9324 0x193d, 0x110b)
9325 },
b0f9408b
KB
9326 {
9327 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9328 0x193d, 0x8460)
9329 },
9330 {
9331 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9332 0x193d, 0x8461)
9333 },
84a77fef
MB
9334 {
9335 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9336 0x193d, 0xc460)
9337 },
9338 {
9339 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9340 0x193d, 0xc461)
9341 },
b0f9408b
KB
9342 {
9343 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9344 0x193d, 0xf460)
9345 },
9346 {
9347 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9348 0x193d, 0xf461)
9349 },
9350 {
9351 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9352 0x1bd4, 0x0045)
9353 },
9354 {
9355 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9356 0x1bd4, 0x0046)
9357 },
9358 {
9359 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9360 0x1bd4, 0x0047)
9361 },
9362 {
9363 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9364 0x1bd4, 0x0048)
9365 },
9f8d05fa
KB
9366 {
9367 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9368 0x1bd4, 0x004a)
9369 },
9370 {
9371 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9372 0x1bd4, 0x004b)
9373 },
9374 {
9375 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9376 0x1bd4, 0x004c)
9377 },
63a7956a
GW
9378 {
9379 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9380 0x1bd4, 0x004f)
9381 },
75fbeacc
KB
9382 {
9383 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9384 0x1bd4, 0x0051)
9385 },
9386 {
9387 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9388 0x1bd4, 0x0052)
9389 },
9390 {
9391 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9392 0x1bd4, 0x0053)
9393 },
9394 {
9395 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9396 0x1bd4, 0x0054)
9397 },
c57ee4cc
DB
9398 {
9399 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9400 0x1bd4, 0x006b)
9401 },
9402 {
9403 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9404 0x1bd4, 0x006c)
9405 },
9406 {
9407 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9408 0x1bd4, 0x006d)
9409 },
9410 {
9411 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9412 0x1bd4, 0x006f)
9413 },
9414 {
9415 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9416 0x1bd4, 0x0070)
9417 },
9418 {
9419 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9420 0x1bd4, 0x0071)
9421 },
9422 {
9423 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9424 0x1bd4, 0x0072)
9425 },
0b93cf2a
MM
9426 {
9427 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9428 0x1bd4, 0x0086)
9429 },
9430 {
9431 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9432 0x1bd4, 0x0087)
9433 },
9434 {
9435 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9436 0x1bd4, 0x0088)
9437 },
9438 {
9439 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9440 0x1bd4, 0x0089)
9441 },
c1b10475
AK
9442 {
9443 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9444 0x19e5, 0xd227)
9445 },
9446 {
9447 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9448 0x19e5, 0xd228)
9449 },
9450 {
9451 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9452 0x19e5, 0xd229)
9453 },
9454 {
9455 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9456 0x19e5, 0xd22a)
9457 },
9458 {
9459 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9460 0x19e5, 0xd22b)
9461 },
9462 {
9463 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9464 0x19e5, 0xd22c)
9465 },
6c223761
KB
9466 {
9467 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9468 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9469 },
9470 {
9471 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
55790064 9472 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
6c223761 9473 },
44e68c4a
MM
9474 {
9475 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9476 PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9477 },
6c223761
KB
9478 {
9479 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9480 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6c223761
KB
9481 },
9482 {
9483 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9484 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6c223761
KB
9485 },
9486 {
9487 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9488 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6c223761
KB
9489 },
9490 {
9491 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9492 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6c223761
KB
9493 },
9494 {
9495 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9496 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6c223761
KB
9497 },
9498 {
9499 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9500 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6c223761
KB
9501 },
9502 {
9503 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9504 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6c223761 9505 },
55790064
KB
9506 {
9507 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9508 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9509 },
63a7956a
GW
9510 {
9511 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9512 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9513 },
9514 {
9515 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9516 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9517 },
3af06083
MR
9518 {
9519 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9520 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9521 },
6c223761
KB
9522 {
9523 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9524 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6c223761
KB
9525 },
9526 {
9527 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9528 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6c223761
KB
9529 },
9530 {
9531 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9532 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6c223761
KB
9533 },
9534 {
9535 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9536 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6c223761
KB
9537 },
9538 {
9539 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9540 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6c223761
KB
9541 },
9542 {
9543 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9544 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6c223761
KB
9545 },
9546 {
9547 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9548 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6c223761
KB
9549 },
9550 {
9551 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9552 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6c223761
KB
9553 },
9554 {
9555 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9556 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6c223761 9557 },
55790064
KB
9558 {
9559 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9560 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9561 },
6c223761
KB
9562 {
9563 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9564 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6c223761
KB
9565 },
9566 {
9567 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9568 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6c223761
KB
9569 },
9570 {
9571 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9572 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6c223761
KB
9573 },
9574 {
9575 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9576 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6c223761
KB
9577 },
9578 {
9579 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9580 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6c223761 9581 },
b0f9408b
KB
9582 {
9583 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9584 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9585 },
6c223761
KB
9586 {
9587 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9588 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6c223761
KB
9589 },
9590 {
9591 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 9592 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6c223761 9593 },
bd809e8d
KB
9594 {
9595 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9596 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9597 },
9598 {
9599 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9600 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9601 },
c57ee4cc
DB
9602 {
9603 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9604 PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9605 },
6c223761
KB
9606 {
9607 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff
KB
9608 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9609 },
75fbeacc
KB
9610 {
9611 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9612 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9613 },
9614 {
9615 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9616 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9617 },
9618 {
9619 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9620 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9621 },
9622 {
9623 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9624 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9625 },
9626 {
9627 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9628 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9629 },
9630 {
9631 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9632 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9633 },
9634 {
9635 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9636 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9637 },
9638 {
9639 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9640 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9641 },
9642 {
9643 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9644 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9645 },
9646 {
9647 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9648 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9649 },
9650 {
9651 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9652 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9653 },
9654 {
9655 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9656 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9657 },
9658 {
9659 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9660 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9661 },
9662 {
9663 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9664 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9665 },
c57ee4cc
DB
9666 {
9667 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9668 PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9669 },
75fbeacc
KB
9670 {
9671 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9672 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9673 },
9674 {
9675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9676 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9677 },
9678 {
9679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9680 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9681 },
c57ee4cc
DB
9682 {
9683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9684 PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9685 },
9686 {
9687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9688 PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9689 },
0b93cf2a
MM
9690 {
9691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9692 PCI_VENDOR_ID_ADAPTEC2, 0x1475)
9693 },
75fbeacc
KB
9694 {
9695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9696 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9697 },
9698 {
9699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9700 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9701 },
9702 {
9703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9704 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9705 },
9706 {
9707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9708 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9709 },
9710 {
9711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9712 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9713 },
80982656
MM
9714 {
9715 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9716 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9717 },
c57ee4cc
DB
9718 {
9719 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9720 PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9721 },
9722 {
9723 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9724 PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
9725 },
9726 {
9727 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9728 PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
9729 },
75fbeacc
KB
9730 {
9731 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9732 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9733 },
9734 {
9735 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9736 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9737 },
9738 {
9739 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9740 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9741 },
9742 {
9743 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9744 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9745 },
c57ee4cc
DB
9746 {
9747 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9748 PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
9749 },
0b93cf2a
MM
9750 {
9751 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9752 PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
9753 },
9754 {
9755 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9756 PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
9757 },
75fbeacc
KB
9758 {
9759 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9760 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9761 },
9762 {
9763 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9764 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9765 },
9766 {
9767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9768 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9769 },
9f8d05fa
KB
9770 {
9771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9772 PCI_VENDOR_ID_ADVANTECH, 0x8312)
9773 },
55790064
KB
9774 {
9775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9776 PCI_VENDOR_ID_DELL, 0x1fe0)
9777 },
7eddabff
KB
9778 {
9779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9780 PCI_VENDOR_ID_HP, 0x0600)
9781 },
9782 {
9783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9784 PCI_VENDOR_ID_HP, 0x0601)
9785 },
9786 {
9787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9788 PCI_VENDOR_ID_HP, 0x0602)
9789 },
9790 {
9791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9792 PCI_VENDOR_ID_HP, 0x0603)
9793 },
9794 {
9795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
55790064 9796 PCI_VENDOR_ID_HP, 0x0609)
7eddabff
KB
9797 },
9798 {
9799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9800 PCI_VENDOR_ID_HP, 0x0650)
9801 },
9802 {
9803 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9804 PCI_VENDOR_ID_HP, 0x0651)
9805 },
9806 {
9807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9808 PCI_VENDOR_ID_HP, 0x0652)
9809 },
9810 {
9811 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9812 PCI_VENDOR_ID_HP, 0x0653)
9813 },
9814 {
9815 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9816 PCI_VENDOR_ID_HP, 0x0654)
9817 },
9818 {
9819 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9820 PCI_VENDOR_ID_HP, 0x0655)
9821 },
7eddabff
KB
9822 {
9823 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9824 PCI_VENDOR_ID_HP, 0x0700)
9825 },
9826 {
9827 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9828 PCI_VENDOR_ID_HP, 0x0701)
6c223761
KB
9829 },
9830 {
9831 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9832 PCI_VENDOR_ID_HP, 0x1001)
9833 },
75fbeacc
KB
9834 {
9835 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9836 PCI_VENDOR_ID_HP, 0x1002)
9837 },
6c223761
KB
9838 {
9839 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9840 PCI_VENDOR_ID_HP, 0x1100)
9841 },
9842 {
9843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9844 PCI_VENDOR_ID_HP, 0x1101)
9845 },
75fbeacc
KB
9846 {
9847 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9848 0x1590, 0x0294)
9849 },
9850 {
9851 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9852 0x1590, 0x02db)
9853 },
9854 {
9855 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9856 0x1590, 0x02dc)
9857 },
9858 {
9859 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9860 0x1590, 0x032e)
9861 },
c57ee4cc
DB
9862 {
9863 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9864 0x1590, 0x036f)
9865 },
9866 {
9867 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9868 0x1590, 0x0381)
9869 },
9870 {
9871 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9872 0x1590, 0x0382)
9873 },
9874 {
9875 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9876 0x1590, 0x0383)
9877 },
8bdb3b9c
GW
9878 {
9879 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9880 0x1d8d, 0x0800)
9881 },
9882 {
9883 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9884 0x1d8d, 0x0908)
9885 },
9886 {
9887 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9888 0x1d8d, 0x0806)
9889 },
9890 {
9891 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9892 0x1d8d, 0x0916)
9893 },
71ecc60d
GW
9894 {
9895 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9896 PCI_VENDOR_ID_GIGABYTE, 0x1000)
9897 },
e326b97c
MM
9898 {
9899 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9900 0x1dfc, 0x3161)
9901 },
c57ee4cc
DB
9902 {
9903 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9904 0x1f0c, 0x3161)
9905 },
09d9968a
B
9906 {
9907 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9908 0x1cf2, 0x5445)
9909 },
9910 {
9911 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9912 0x1cf2, 0x5446)
9913 },
9914 {
9915 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9916 0x1cf2, 0x5447)
9917 },
c57ee4cc
DB
9918 {
9919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9920 0x1cf2, 0x5449)
9921 },
9922 {
9923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9924 0x1cf2, 0x544a)
9925 },
9926 {
9927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9928 0x1cf2, 0x544b)
9929 },
9930 {
9931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9932 0x1cf2, 0x544d)
9933 },
9934 {
9935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9936 0x1cf2, 0x544e)
9937 },
9938 {
9939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9940 0x1cf2, 0x544f)
9941 },
09d9968a
B
9942 {
9943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9944 0x1cf2, 0x0b27)
9945 },
9946 {
9947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9948 0x1cf2, 0x0b29)
9949 },
9950 {
9951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9952 0x1cf2, 0x0b45)
9953 },
dab53784
MB
9954 {
9955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9956 0x1cc4, 0x0101)
9957 },
9958 {
9959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9960 0x1cc4, 0x0201)
9961 },
2a9c2ba2
MM
9962 {
9963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9964 PCI_VENDOR_ID_LENOVO, 0x0220)
9965 },
9966 {
9967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9968 PCI_VENDOR_ID_LENOVO, 0x0221)
9969 },
9970 {
9971 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9972 PCI_VENDOR_ID_LENOVO, 0x0520)
9973 },
9974 {
9975 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9976 PCI_VENDOR_ID_LENOVO, 0x0522)
9977 },
9978 {
9979 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9980 PCI_VENDOR_ID_LENOVO, 0x0620)
9981 },
9982 {
9983 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9984 PCI_VENDOR_ID_LENOVO, 0x0621)
9985 },
9986 {
9987 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9988 PCI_VENDOR_ID_LENOVO, 0x0622)
9989 },
9990 {
9991 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9992 PCI_VENDOR_ID_LENOVO, 0x0623)
9993 },
0b93cf2a
MM
9994 {
9995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9996 0x1e93, 0x1000)
9997 },
9998 {
9999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10000 0x1e93, 0x1001)
10001 },
10002 {
10003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10004 0x1e93, 0x1002)
10005 },
6c223761
KB
10006 {
10007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10008 PCI_ANY_ID, PCI_ANY_ID)
10009 },
10010 { 0 }
10011};
10012
10013MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10014
10015static struct pci_driver pqi_pci_driver = {
10016 .name = DRIVER_NAME_SHORT,
10017 .id_table = pqi_pci_id_table,
10018 .probe = pqi_pci_probe,
10019 .remove = pqi_pci_remove,
10020 .shutdown = pqi_shutdown,
061ef06a 10021#if defined(CONFIG_PM)
c66e078a
KB
10022 .driver = {
10023 .pm = &pqi_pm_ops
10024 },
061ef06a 10025#endif
6c223761
KB
10026};
10027
10028static int __init pqi_init(void)
10029{
10030 int rc;
10031
10032 pr_info(DRIVER_NAME "\n");
5e693586
MM
10033 pqi_verify_structures();
10034 sis_verify_structures();
6c223761 10035
8b664fef 10036 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
6c223761
KB
10037 if (!pqi_sas_transport_template)
10038 return -ENODEV;
10039
3c50976f
KB
10040 pqi_process_module_params();
10041
6c223761
KB
10042 rc = pci_register_driver(&pqi_pci_driver);
10043 if (rc)
10044 sas_release_transport(pqi_sas_transport_template);
10045
10046 return rc;
10047}
10048
10049static void __exit pqi_cleanup(void)
10050{
10051 pci_unregister_driver(&pqi_pci_driver);
10052 sas_release_transport(pqi_sas_transport_template);
10053}
10054
10055module_init(pqi_init);
10056module_exit(pqi_cleanup);
10057
5e693586 10058static void pqi_verify_structures(void)
6c223761
KB
10059{
10060 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10061 sis_host_to_ctrl_doorbell) != 0x20);
10062 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10063 sis_interrupt_mask) != 0x34);
10064 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10065 sis_ctrl_to_host_doorbell) != 0x9c);
10066 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10067 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
10068 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10069 sis_driver_scratch) != 0xb0);
2708a256
KB
10070 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10071 sis_product_identifier) != 0xb4);
6c223761
KB
10072 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10073 sis_firmware_status) != 0xbc);
5d1f03e6
MB
10074 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10075 sis_ctrl_shutdown_reason_code) != 0xcc);
6c223761
KB
10076 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10077 sis_mailbox) != 0x1000);
10078 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10079 pqi_registers) != 0x4000);
10080
10081 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10082 iu_type) != 0x0);
10083 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10084 iu_length) != 0x2);
10085 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10086 response_queue_id) != 0x4);
10087 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
ae0c189d 10088 driver_flags) != 0x6);
6c223761
KB
10089 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10090
10091 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10092 status) != 0x0);
10093 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10094 service_response) != 0x1);
10095 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10096 data_present) != 0x2);
10097 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10098 reserved) != 0x3);
10099 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10100 residual_count) != 0x4);
10101 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10102 data_length) != 0x8);
10103 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10104 reserved1) != 0xa);
10105 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10106 data) != 0xc);
10107 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10108
10109 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10110 data_in_result) != 0x0);
10111 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10112 data_out_result) != 0x1);
10113 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10114 reserved) != 0x2);
10115 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10116 status) != 0x5);
10117 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10118 status_qualifier) != 0x6);
10119 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10120 sense_data_length) != 0x8);
10121 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10122 response_data_length) != 0xa);
10123 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10124 data_in_transferred) != 0xc);
10125 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10126 data_out_transferred) != 0x10);
10127 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10128 data) != 0x14);
10129 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10130
10131 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10132 signature) != 0x0);
10133 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10134 function_and_status_code) != 0x8);
10135 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10136 max_admin_iq_elements) != 0x10);
10137 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10138 max_admin_oq_elements) != 0x11);
10139 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10140 admin_iq_element_length) != 0x12);
10141 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10142 admin_oq_element_length) != 0x13);
10143 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10144 max_reset_timeout) != 0x14);
10145 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10146 legacy_intx_status) != 0x18);
10147 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10148 legacy_intx_mask_set) != 0x1c);
10149 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10150 legacy_intx_mask_clear) != 0x20);
10151 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10152 device_status) != 0x40);
10153 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10154 admin_iq_pi_offset) != 0x48);
10155 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10156 admin_oq_ci_offset) != 0x50);
10157 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10158 admin_iq_element_array_addr) != 0x58);
10159 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10160 admin_oq_element_array_addr) != 0x60);
10161 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10162 admin_iq_ci_addr) != 0x68);
10163 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10164 admin_oq_pi_addr) != 0x70);
10165 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10166 admin_iq_num_elements) != 0x78);
10167 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10168 admin_oq_num_elements) != 0x79);
10169 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10170 admin_queue_int_msg_num) != 0x7a);
10171 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10172 device_error) != 0x80);
10173 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10174 error_details) != 0x88);
10175 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10176 device_reset) != 0x90);
10177 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10178 power_action) != 0x94);
10179 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10180
10181 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10182 header.iu_type) != 0);
10183 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10184 header.iu_length) != 2);
10185 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
ae0c189d 10186 header.driver_flags) != 6);
6c223761
KB
10187 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10188 request_id) != 8);
10189 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10190 function_code) != 10);
10191 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10192 data.report_device_capability.buffer_length) != 44);
10193 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10194 data.report_device_capability.sg_descriptor) != 48);
10195 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10196 data.create_operational_iq.queue_id) != 12);
10197 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10198 data.create_operational_iq.element_array_addr) != 16);
10199 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10200 data.create_operational_iq.ci_addr) != 24);
10201 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10202 data.create_operational_iq.num_elements) != 32);
10203 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10204 data.create_operational_iq.element_length) != 34);
10205 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10206 data.create_operational_iq.queue_protocol) != 36);
10207 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10208 data.create_operational_oq.queue_id) != 12);
10209 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10210 data.create_operational_oq.element_array_addr) != 16);
10211 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10212 data.create_operational_oq.pi_addr) != 24);
10213 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10214 data.create_operational_oq.num_elements) != 32);
10215 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10216 data.create_operational_oq.element_length) != 34);
10217 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10218 data.create_operational_oq.queue_protocol) != 36);
10219 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10220 data.create_operational_oq.int_msg_num) != 40);
10221 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10222 data.create_operational_oq.coalescing_count) != 42);
10223 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10224 data.create_operational_oq.min_coalescing_time) != 44);
10225 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10226 data.create_operational_oq.max_coalescing_time) != 48);
10227 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10228 data.delete_operational_queue.queue_id) != 12);
10229 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
c593642c 10230 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761 10231 data.create_operational_iq) != 64 - 11);
c593642c 10232 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761 10233 data.create_operational_oq) != 64 - 11);
c593642c 10234 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
6c223761
KB
10235 data.delete_operational_queue) != 64 - 11);
10236
10237 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10238 header.iu_type) != 0);
10239 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10240 header.iu_length) != 2);
10241 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
ae0c189d 10242 header.driver_flags) != 6);
6c223761
KB
10243 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10244 request_id) != 8);
10245 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10246 function_code) != 10);
10247 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10248 status) != 11);
10249 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10250 data.create_operational_iq.status_descriptor) != 12);
10251 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10252 data.create_operational_iq.iq_pi_offset) != 16);
10253 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10254 data.create_operational_oq.status_descriptor) != 12);
10255 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10256 data.create_operational_oq.oq_ci_offset) != 16);
10257 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10258
10259 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10260 header.iu_type) != 0);
10261 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10262 header.iu_length) != 2);
10263 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10264 header.response_queue_id) != 4);
10265 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
ae0c189d 10266 header.driver_flags) != 6);
6c223761
KB
10267 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10268 request_id) != 8);
10269 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10270 nexus_id) != 10);
10271 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10272 buffer_length) != 12);
10273 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10274 lun_number) != 16);
10275 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10276 protocol_specific) != 24);
10277 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10278 error_index) != 27);
10279 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10280 cdb) != 32);
21432010 10281 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10282 timeout) != 60);
6c223761
KB
10283 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10284 sg_descriptors) != 64);
10285 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10286 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10287
10288 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10289 header.iu_type) != 0);
10290 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10291 header.iu_length) != 2);
10292 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10293 header.response_queue_id) != 4);
10294 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
ae0c189d 10295 header.driver_flags) != 6);
6c223761
KB
10296 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10297 request_id) != 8);
10298 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10299 nexus_id) != 12);
10300 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10301 buffer_length) != 16);
10302 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10303 data_encryption_key_index) != 22);
10304 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10305 encrypt_tweak_lower) != 24);
10306 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10307 encrypt_tweak_upper) != 28);
10308 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10309 cdb) != 32);
10310 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10311 error_index) != 48);
10312 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10313 num_sg_descriptors) != 50);
10314 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10315 cdb_length) != 51);
10316 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10317 lun_number) != 52);
10318 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10319 sg_descriptors) != 64);
10320 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10321 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10322
10323 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10324 header.iu_type) != 0);
10325 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10326 header.iu_length) != 2);
10327 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10328 request_id) != 8);
10329 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10330 error_index) != 10);
10331
10332 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10333 header.iu_type) != 0);
10334 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10335 header.iu_length) != 2);
10336 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10337 header.response_queue_id) != 4);
10338 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10339 request_id) != 8);
10340 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10341 data.report_event_configuration.buffer_length) != 12);
10342 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10343 data.report_event_configuration.sg_descriptors) != 16);
10344 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10345 data.set_event_configuration.global_event_oq_id) != 10);
10346 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10347 data.set_event_configuration.buffer_length) != 12);
10348 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10349 data.set_event_configuration.sg_descriptors) != 16);
10350
10351 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10352 max_inbound_iu_length) != 6);
10353 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10354 max_outbound_iu_length) != 14);
10355 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10356
10357 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10358 data_length) != 0);
10359 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10360 iq_arbitration_priority_support_bitmask) != 8);
10361 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10362 maximum_aw_a) != 9);
10363 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10364 maximum_aw_b) != 10);
10365 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10366 maximum_aw_c) != 11);
10367 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10368 max_inbound_queues) != 16);
10369 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10370 max_elements_per_iq) != 18);
10371 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10372 max_iq_element_length) != 24);
10373 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10374 min_iq_element_length) != 26);
10375 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10376 max_outbound_queues) != 30);
10377 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10378 max_elements_per_oq) != 32);
10379 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10380 intr_coalescing_time_granularity) != 34);
10381 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10382 max_oq_element_length) != 36);
10383 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10384 min_oq_element_length) != 38);
10385 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10386 iu_layer_descriptors) != 64);
10387 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10388
10389 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10390 event_type) != 0);
10391 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10392 oq_id) != 2);
10393 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10394
10395 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10396 num_event_descriptors) != 2);
10397 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10398 descriptors) != 4);
10399
061ef06a
KB
10400 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10401 ARRAY_SIZE(pqi_supported_event_types));
10402
6c223761
KB
10403 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10404 header.iu_type) != 0);
10405 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10406 header.iu_length) != 2);
10407 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10408 event_type) != 8);
10409 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10410 event_id) != 10);
10411 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10412 additional_event_id) != 12);
10413 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10414 data) != 16);
10415 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10416
10417 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10418 header.iu_type) != 0);
10419 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10420 header.iu_length) != 2);
10421 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10422 event_type) != 8);
10423 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10424 event_id) != 10);
10425 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10426 additional_event_id) != 12);
10427 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10428
10429 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10430 header.iu_type) != 0);
10431 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10432 header.iu_length) != 2);
10433 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10434 request_id) != 8);
10435 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10436 nexus_id) != 10);
10437 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
c2922f17
MB
10438 timeout) != 14);
10439 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6c223761
KB
10440 lun_number) != 16);
10441 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10442 protocol_specific) != 24);
10443 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10444 outbound_queue_id_to_manage) != 26);
10445 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10446 request_id_to_manage) != 28);
10447 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10448 task_management_function) != 30);
10449 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10450
10451 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10452 header.iu_type) != 0);
10453 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10454 header.iu_length) != 2);
10455 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10456 request_id) != 8);
10457 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10458 nexus_id) != 10);
10459 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10460 additional_response_info) != 12);
10461 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10462 response_code) != 15);
10463 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10464
10465 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10466 configured_logical_drive_count) != 0);
10467 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10468 configuration_signature) != 1);
10469 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
598bef8d 10470 firmware_version_short) != 5);
6c223761
KB
10471 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10472 extended_logical_unit_count) != 154);
10473 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10474 firmware_build_number) != 190);
598bef8d
KB
10475 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10476 vendor_id) != 200);
10477 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10478 product_id) != 208);
10479 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10480 extra_controller_flags) != 286);
6c223761
KB
10481 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10482 controller_mode) != 292);
598bef8d
KB
10483 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10484 spare_part_number) != 293);
10485 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10486 firmware_version_long) != 325);
6c223761 10487
1be42f46
KB
10488 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10489 phys_bay_in_box) != 115);
10490 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10491 device_type) != 120);
10492 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10493 redundant_path_present_map) != 1736);
10494 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10495 active_path_number) != 1738);
10496 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10497 alternate_paths_phys_connector) != 1739);
10498 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10499 alternate_paths_phys_box_on_port) != 1755);
10500 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10501 current_queue_depth_limit) != 1796);
10502 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10503
f6cc2a77
KB
10504 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10505 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10506 page_code) != 0);
10507 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10508 subpage_code) != 1);
10509 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10510 buffer_length) != 2);
10511
10512 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10513 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10514 page_code) != 0);
10515 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10516 subpage_code) != 1);
10517 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10518 page_length) != 2);
10519
10520 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10521 != 18);
10522 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10523 header) != 0);
10524 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10525 firmware_read_support) != 4);
10526 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10527 driver_read_support) != 5);
10528 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10529 firmware_write_support) != 6);
10530 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10531 driver_write_support) != 7);
10532 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10533 max_transfer_encrypted_sas_sata) != 8);
10534 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10535 max_transfer_encrypted_nvme) != 10);
10536 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10537 max_write_raid_5_6) != 12);
10538 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10539 max_write_raid_1_10_2drive) != 14);
10540 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10541 max_write_raid_1_10_3drive) != 16);
10542
6c223761
KB
10543 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10544 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10545 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10546 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10547 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10548 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10549 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10550 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10551 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10552 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10553 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10554 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10555
10556 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
d727a776
KB
10557 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10558 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
6c223761 10559}