Commit | Line | Data |
---|---|---|
2cc37b15 | 1 | // SPDX-License-Identifier: GPL-2.0 |
6c223761 KB |
2 | /* |
3 | * driver for Microsemi PQI-based storage controllers | |
2a712681 | 4 | * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries |
2f4c4b92 | 5 | * Copyright (c) 2016-2018 Microsemi Corporation |
6c223761 KB |
6 | * Copyright (c) 2016 PMC-Sierra, Inc. |
7 | * | |
2f4c4b92 | 8 | * Questions/Comments/Bugfixes to storagedev@microchip.com |
6c223761 KB |
9 | * |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/pci.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/rtc.h> | |
19 | #include <linux/bcd.h> | |
3c50976f | 20 | #include <linux/reboot.h> |
6c223761 | 21 | #include <linux/cciss_ioctl.h> |
52198226 | 22 | #include <linux/blk-mq-pci.h> |
6c223761 KB |
23 | #include <scsi/scsi_host.h> |
24 | #include <scsi/scsi_cmnd.h> | |
25 | #include <scsi/scsi_device.h> | |
26 | #include <scsi/scsi_eh.h> | |
27 | #include <scsi/scsi_transport_sas.h> | |
28 | #include <asm/unaligned.h> | |
29 | #include "smartpqi.h" | |
30 | #include "smartpqi_sis.h" | |
31 | ||
32 | #if !defined(BUILD_TIMESTAMP) | |
33 | #define BUILD_TIMESTAMP | |
34 | #endif | |
35 | ||
5443bdc4 | 36 | #define DRIVER_VERSION "1.2.16-012" |
2d154f5f | 37 | #define DRIVER_MAJOR 1 |
f7cb8ac6 | 38 | #define DRIVER_MINOR 2 |
ce60a2b8 | 39 | #define DRIVER_RELEASE 16 |
5443bdc4 | 40 | #define DRIVER_REVISION 12 |
6c223761 | 41 | |
2d154f5f KB |
42 | #define DRIVER_NAME "Microsemi PQI Driver (v" \ |
43 | DRIVER_VERSION BUILD_TIMESTAMP ")" | |
6c223761 KB |
44 | #define DRIVER_NAME_SHORT "smartpqi" |
45 | ||
e1d213bd KB |
46 | #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) |
47 | ||
6c223761 KB |
48 | MODULE_AUTHOR("Microsemi"); |
49 | MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " | |
50 | DRIVER_VERSION); | |
51 | MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); | |
52 | MODULE_VERSION(DRIVER_VERSION); | |
53 | MODULE_LICENSE("GPL"); | |
54 | ||
6c223761 | 55 | static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); |
5f310425 | 56 | static void pqi_ctrl_offline_worker(struct work_struct *work); |
376fb880 | 57 | static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); |
6c223761 KB |
58 | static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); |
59 | static void pqi_scan_start(struct Scsi_Host *shost); | |
60 | static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, | |
61 | struct pqi_queue_group *queue_group, enum pqi_io_path path, | |
62 | struct pqi_io_request *io_request); | |
63 | static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, | |
64 | struct pqi_iu_header *request, unsigned int flags, | |
65 | struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); | |
66 | static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, | |
67 | struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, | |
68 | unsigned int cdb_length, struct pqi_queue_group *queue_group, | |
376fb880 | 69 | struct pqi_encryption_info *encryption_info, bool raid_bypass); |
7a012c23 DB |
70 | static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, |
71 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, | |
72 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, | |
73 | struct pqi_scsi_dev_raid_map_data *rmd); | |
6702d2c4 DB |
74 | static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, |
75 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, | |
76 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, | |
77 | struct pqi_scsi_dev_raid_map_data *rmd); | |
4fd22c13 MR |
78 | static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); |
79 | static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); | |
80 | static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info); | |
81 | static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, | |
82 | u32 bytes_requested); | |
83 | static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); | |
84 | static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); | |
1e46731e MR |
85 | static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, |
86 | struct pqi_scsi_dev *device, unsigned long timeout_secs); | |
6c223761 KB |
87 | |
88 | /* for flags argument to pqi_submit_raid_request_synchronous() */ | |
89 | #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 | |
90 | ||
91 | static struct scsi_transport_template *pqi_sas_transport_template; | |
92 | ||
93 | static atomic_t pqi_controller_count = ATOMIC_INIT(0); | |
94 | ||
3c50976f KB |
95 | enum pqi_lockup_action { |
96 | NONE, | |
97 | REBOOT, | |
98 | PANIC | |
99 | }; | |
100 | ||
101 | static enum pqi_lockup_action pqi_lockup_action = NONE; | |
102 | ||
103 | static struct { | |
104 | enum pqi_lockup_action action; | |
105 | char *name; | |
106 | } pqi_lockup_actions[] = { | |
107 | { | |
108 | .action = NONE, | |
109 | .name = "none", | |
110 | }, | |
111 | { | |
112 | .action = REBOOT, | |
113 | .name = "reboot", | |
114 | }, | |
115 | { | |
116 | .action = PANIC, | |
117 | .name = "panic", | |
118 | }, | |
119 | }; | |
120 | ||
6a50d6ad KB |
121 | static unsigned int pqi_supported_event_types[] = { |
122 | PQI_EVENT_TYPE_HOTPLUG, | |
123 | PQI_EVENT_TYPE_HARDWARE, | |
124 | PQI_EVENT_TYPE_PHYSICAL_DEVICE, | |
125 | PQI_EVENT_TYPE_LOGICAL_DEVICE, | |
4fd22c13 | 126 | PQI_EVENT_TYPE_OFA, |
6a50d6ad KB |
127 | PQI_EVENT_TYPE_AIO_STATE_CHANGE, |
128 | PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, | |
129 | }; | |
130 | ||
6c223761 KB |
131 | static int pqi_disable_device_id_wildcards; |
132 | module_param_named(disable_device_id_wildcards, | |
cbe0c7b1 | 133 | pqi_disable_device_id_wildcards, int, 0644); |
6c223761 KB |
134 | MODULE_PARM_DESC(disable_device_id_wildcards, |
135 | "Disable device ID wildcards."); | |
136 | ||
5a259e32 KB |
137 | static int pqi_disable_heartbeat; |
138 | module_param_named(disable_heartbeat, | |
139 | pqi_disable_heartbeat, int, 0644); | |
140 | MODULE_PARM_DESC(disable_heartbeat, | |
141 | "Disable heartbeat."); | |
142 | ||
143 | static int pqi_disable_ctrl_shutdown; | |
144 | module_param_named(disable_ctrl_shutdown, | |
145 | pqi_disable_ctrl_shutdown, int, 0644); | |
146 | MODULE_PARM_DESC(disable_ctrl_shutdown, | |
147 | "Disable controller shutdown when controller locked up."); | |
148 | ||
3c50976f KB |
149 | static char *pqi_lockup_action_param; |
150 | module_param_named(lockup_action, | |
151 | pqi_lockup_action_param, charp, 0644); | |
152 | MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" | |
153 | "\t\tSupported: none, reboot, panic\n" | |
154 | "\t\tDefault: none"); | |
155 | ||
5e6a9760 GW |
156 | static int pqi_expose_ld_first; |
157 | module_param_named(expose_ld_first, | |
158 | pqi_expose_ld_first, int, 0644); | |
583891c9 | 159 | MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives."); |
5e6a9760 | 160 | |
522bc026 DC |
161 | static int pqi_hide_vsep; |
162 | module_param_named(hide_vsep, | |
163 | pqi_hide_vsep, int, 0644); | |
583891c9 | 164 | MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives."); |
522bc026 | 165 | |
6c223761 KB |
166 | static char *raid_levels[] = { |
167 | "RAID-0", | |
168 | "RAID-4", | |
169 | "RAID-1(1+0)", | |
170 | "RAID-5", | |
171 | "RAID-5+1", | |
7a012c23 DB |
172 | "RAID-6", |
173 | "RAID-1(Triple)", | |
6c223761 KB |
174 | }; |
175 | ||
176 | static char *pqi_raid_level_to_string(u8 raid_level) | |
177 | { | |
178 | if (raid_level < ARRAY_SIZE(raid_levels)) | |
179 | return raid_levels[raid_level]; | |
180 | ||
a9f93392 | 181 | return "RAID UNKNOWN"; |
6c223761 KB |
182 | } |
183 | ||
184 | #define SA_RAID_0 0 | |
185 | #define SA_RAID_4 1 | |
186 | #define SA_RAID_1 2 /* also used for RAID 10 */ | |
187 | #define SA_RAID_5 3 /* also used for RAID 50 */ | |
188 | #define SA_RAID_51 4 | |
189 | #define SA_RAID_6 5 /* also used for RAID 60 */ | |
7a012c23 DB |
190 | #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */ |
191 | #define SA_RAID_MAX SA_RAID_TRIPLE | |
6c223761 KB |
192 | #define SA_RAID_UNKNOWN 0xff |
193 | ||
194 | static inline void pqi_scsi_done(struct scsi_cmnd *scmd) | |
195 | { | |
7561a7e4 | 196 | pqi_prep_for_scsi_done(scmd); |
6c223761 KB |
197 | scmd->scsi_done(scmd); |
198 | } | |
199 | ||
b6e2ef67 | 200 | static inline void pqi_disable_write_same(struct scsi_device *sdev) |
6c223761 | 201 | { |
b6e2ef67 | 202 | sdev->no_write_same = 1; |
6c223761 KB |
203 | } |
204 | ||
6c223761 | 205 | static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) |
6c223761 | 206 | { |
6c223761 | 207 | return memcmp(scsi3addr1, scsi3addr2, 8) == 0; |
6c223761 KB |
208 | } |
209 | ||
210 | static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) | |
211 | { | |
212 | return !device->is_physical_device; | |
213 | } | |
214 | ||
bd10cf0b KB |
215 | static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) |
216 | { | |
217 | return scsi3addr[2] != 0; | |
218 | } | |
219 | ||
694c5d5b KB |
220 | static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) |
221 | { | |
222 | return !ctrl_info->controller_online; | |
223 | } | |
224 | ||
6c223761 KB |
225 | static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) |
226 | { | |
227 | if (ctrl_info->controller_online) | |
228 | if (!sis_is_firmware_running(ctrl_info)) | |
229 | pqi_take_ctrl_offline(ctrl_info); | |
230 | } | |
231 | ||
232 | static inline bool pqi_is_hba_lunid(u8 *scsi3addr) | |
233 | { | |
234 | return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); | |
235 | } | |
236 | ||
583891c9 | 237 | static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) |
ff6abb73 KB |
238 | { |
239 | return sis_read_driver_scratch(ctrl_info); | |
240 | } | |
241 | ||
242 | static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, | |
243 | enum pqi_ctrl_mode mode) | |
244 | { | |
245 | sis_write_driver_scratch(ctrl_info, mode); | |
246 | } | |
247 | ||
694c5d5b KB |
248 | static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) |
249 | { | |
250 | ctrl_info->block_device_reset = true; | |
251 | } | |
252 | ||
253 | static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info) | |
254 | { | |
255 | return ctrl_info->block_device_reset; | |
256 | } | |
257 | ||
258 | static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) | |
259 | { | |
260 | return ctrl_info->block_requests; | |
261 | } | |
262 | ||
7561a7e4 KB |
263 | static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) |
264 | { | |
265 | ctrl_info->block_requests = true; | |
266 | scsi_block_requests(ctrl_info->scsi_host); | |
267 | } | |
268 | ||
269 | static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) | |
270 | { | |
271 | ctrl_info->block_requests = false; | |
272 | wake_up_all(&ctrl_info->block_requests_wait); | |
376fb880 | 273 | pqi_retry_raid_bypass_requests(ctrl_info); |
7561a7e4 KB |
274 | scsi_unblock_requests(ctrl_info->scsi_host); |
275 | } | |
276 | ||
7561a7e4 KB |
277 | static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, |
278 | unsigned long timeout_msecs) | |
279 | { | |
280 | unsigned long remaining_msecs; | |
281 | ||
282 | if (!pqi_ctrl_blocked(ctrl_info)) | |
283 | return timeout_msecs; | |
284 | ||
285 | atomic_inc(&ctrl_info->num_blocked_threads); | |
286 | ||
287 | if (timeout_msecs == NO_TIMEOUT) { | |
288 | wait_event(ctrl_info->block_requests_wait, | |
289 | !pqi_ctrl_blocked(ctrl_info)); | |
290 | remaining_msecs = timeout_msecs; | |
291 | } else { | |
292 | unsigned long remaining_jiffies; | |
293 | ||
294 | remaining_jiffies = | |
295 | wait_event_timeout(ctrl_info->block_requests_wait, | |
296 | !pqi_ctrl_blocked(ctrl_info), | |
297 | msecs_to_jiffies(timeout_msecs)); | |
298 | remaining_msecs = jiffies_to_msecs(remaining_jiffies); | |
299 | } | |
300 | ||
301 | atomic_dec(&ctrl_info->num_blocked_threads); | |
302 | ||
303 | return remaining_msecs; | |
304 | } | |
305 | ||
7561a7e4 KB |
306 | static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) |
307 | { | |
308 | while (atomic_read(&ctrl_info->num_busy_threads) > | |
309 | atomic_read(&ctrl_info->num_blocked_threads)) | |
310 | usleep_range(1000, 2000); | |
311 | } | |
312 | ||
03b288cf KB |
313 | static inline bool pqi_device_offline(struct pqi_scsi_dev *device) |
314 | { | |
315 | return device->device_offline; | |
316 | } | |
317 | ||
7561a7e4 KB |
318 | static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) |
319 | { | |
320 | device->in_reset = true; | |
321 | } | |
322 | ||
323 | static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) | |
324 | { | |
325 | device->in_reset = false; | |
326 | } | |
327 | ||
328 | static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) | |
329 | { | |
330 | return device->in_reset; | |
331 | } | |
6c223761 | 332 | |
4fd22c13 MR |
333 | static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) |
334 | { | |
335 | ctrl_info->in_ofa = true; | |
336 | } | |
337 | ||
338 | static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) | |
339 | { | |
340 | ctrl_info->in_ofa = false; | |
341 | } | |
342 | ||
343 | static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info) | |
344 | { | |
345 | return ctrl_info->in_ofa; | |
346 | } | |
347 | ||
1e46731e MR |
348 | static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) |
349 | { | |
350 | device->in_remove = true; | |
351 | } | |
352 | ||
1bdf6e93 | 353 | static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) |
1e46731e | 354 | { |
1bdf6e93 | 355 | return device->in_remove; |
1e46731e MR |
356 | } |
357 | ||
0530736e KB |
358 | static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info) |
359 | { | |
360 | ctrl_info->in_shutdown = true; | |
361 | } | |
362 | ||
363 | static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info) | |
364 | { | |
365 | return ctrl_info->in_shutdown; | |
366 | } | |
367 | ||
583891c9 KB |
368 | static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, |
369 | unsigned long delay) | |
5f310425 KB |
370 | { |
371 | if (pqi_ctrl_offline(ctrl_info)) | |
372 | return; | |
4fd22c13 MR |
373 | if (pqi_ctrl_in_ofa(ctrl_info)) |
374 | return; | |
5f310425 KB |
375 | |
376 | schedule_delayed_work(&ctrl_info->rescan_work, delay); | |
377 | } | |
378 | ||
6c223761 KB |
379 | static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) |
380 | { | |
5f310425 KB |
381 | pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); |
382 | } | |
383 | ||
4fd22c13 | 384 | #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ) |
5f310425 | 385 | |
583891c9 | 386 | static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) |
5f310425 KB |
387 | { |
388 | pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); | |
6c223761 KB |
389 | } |
390 | ||
061ef06a KB |
391 | static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) |
392 | { | |
393 | cancel_delayed_work_sync(&ctrl_info->rescan_work); | |
394 | } | |
395 | ||
0530736e KB |
396 | static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info) |
397 | { | |
398 | cancel_work_sync(&ctrl_info->event_work); | |
399 | } | |
400 | ||
98f87667 KB |
401 | static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) |
402 | { | |
403 | if (!ctrl_info->heartbeat_counter) | |
404 | return 0; | |
405 | ||
406 | return readl(ctrl_info->heartbeat_counter); | |
407 | } | |
408 | ||
4fd22c13 MR |
409 | static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) |
410 | { | |
411 | if (!ctrl_info->soft_reset_status) | |
412 | return 0; | |
413 | ||
414 | return readb(ctrl_info->soft_reset_status); | |
415 | } | |
416 | ||
583891c9 | 417 | static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, u8 clear) |
4fd22c13 MR |
418 | { |
419 | u8 status; | |
420 | ||
421 | if (!ctrl_info->soft_reset_status) | |
422 | return; | |
423 | ||
424 | status = pqi_read_soft_reset_status(ctrl_info); | |
425 | status &= ~clear; | |
426 | writeb(status, ctrl_info->soft_reset_status); | |
427 | } | |
428 | ||
6c223761 KB |
429 | static int pqi_map_single(struct pci_dev *pci_dev, |
430 | struct pqi_sg_descriptor *sg_descriptor, void *buffer, | |
6917a9cc | 431 | size_t buffer_length, enum dma_data_direction data_direction) |
6c223761 KB |
432 | { |
433 | dma_addr_t bus_address; | |
434 | ||
6917a9cc | 435 | if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) |
6c223761 KB |
436 | return 0; |
437 | ||
6917a9cc | 438 | bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, |
6c223761 | 439 | data_direction); |
6917a9cc | 440 | if (dma_mapping_error(&pci_dev->dev, bus_address)) |
6c223761 KB |
441 | return -ENOMEM; |
442 | ||
443 | put_unaligned_le64((u64)bus_address, &sg_descriptor->address); | |
444 | put_unaligned_le32(buffer_length, &sg_descriptor->length); | |
445 | put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); | |
446 | ||
447 | return 0; | |
448 | } | |
449 | ||
450 | static void pqi_pci_unmap(struct pci_dev *pci_dev, | |
451 | struct pqi_sg_descriptor *descriptors, int num_descriptors, | |
6917a9cc | 452 | enum dma_data_direction data_direction) |
6c223761 KB |
453 | { |
454 | int i; | |
455 | ||
6917a9cc | 456 | if (data_direction == DMA_NONE) |
6c223761 KB |
457 | return; |
458 | ||
459 | for (i = 0; i < num_descriptors; i++) | |
6917a9cc | 460 | dma_unmap_single(&pci_dev->dev, |
6c223761 KB |
461 | (dma_addr_t)get_unaligned_le64(&descriptors[i].address), |
462 | get_unaligned_le32(&descriptors[i].length), | |
463 | data_direction); | |
464 | } | |
465 | ||
466 | static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, | |
467 | struct pqi_raid_path_request *request, u8 cmd, | |
468 | u8 *scsi3addr, void *buffer, size_t buffer_length, | |
6917a9cc | 469 | u16 vpd_page, enum dma_data_direction *dir) |
6c223761 KB |
470 | { |
471 | u8 *cdb; | |
171c2865 | 472 | size_t cdb_length = buffer_length; |
6c223761 KB |
473 | |
474 | memset(request, 0, sizeof(*request)); | |
475 | ||
476 | request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; | |
477 | put_unaligned_le16(offsetof(struct pqi_raid_path_request, | |
478 | sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, | |
479 | &request->header.iu_length); | |
480 | put_unaligned_le32(buffer_length, &request->buffer_length); | |
481 | memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); | |
482 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; | |
483 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; | |
484 | ||
485 | cdb = request->cdb; | |
486 | ||
487 | switch (cmd) { | |
488 | case INQUIRY: | |
489 | request->data_direction = SOP_READ_FLAG; | |
490 | cdb[0] = INQUIRY; | |
491 | if (vpd_page & VPD_PAGE) { | |
492 | cdb[1] = 0x1; | |
493 | cdb[2] = (u8)vpd_page; | |
494 | } | |
171c2865 | 495 | cdb[4] = (u8)cdb_length; |
6c223761 KB |
496 | break; |
497 | case CISS_REPORT_LOG: | |
498 | case CISS_REPORT_PHYS: | |
499 | request->data_direction = SOP_READ_FLAG; | |
500 | cdb[0] = cmd; | |
501 | if (cmd == CISS_REPORT_PHYS) | |
694c5d5b | 502 | cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER; |
6c223761 | 503 | else |
f6cc2a77 | 504 | cdb[1] = ctrl_info->ciss_report_log_flags; |
171c2865 | 505 | put_unaligned_be32(cdb_length, &cdb[6]); |
6c223761 KB |
506 | break; |
507 | case CISS_GET_RAID_MAP: | |
508 | request->data_direction = SOP_READ_FLAG; | |
509 | cdb[0] = CISS_READ; | |
510 | cdb[1] = CISS_GET_RAID_MAP; | |
171c2865 | 511 | put_unaligned_be32(cdb_length, &cdb[6]); |
6c223761 | 512 | break; |
58322fe0 | 513 | case SA_FLUSH_CACHE: |
6c223761 KB |
514 | request->data_direction = SOP_WRITE_FLAG; |
515 | cdb[0] = BMIC_WRITE; | |
58322fe0 | 516 | cdb[6] = BMIC_FLUSH_CACHE; |
171c2865 | 517 | put_unaligned_be16(cdb_length, &cdb[7]); |
6c223761 | 518 | break; |
171c2865 DC |
519 | case BMIC_SENSE_DIAG_OPTIONS: |
520 | cdb_length = 0; | |
df561f66 | 521 | fallthrough; |
6c223761 KB |
522 | case BMIC_IDENTIFY_CONTROLLER: |
523 | case BMIC_IDENTIFY_PHYSICAL_DEVICE: | |
6d90615f | 524 | case BMIC_SENSE_SUBSYSTEM_INFORMATION: |
f6cc2a77 | 525 | case BMIC_SENSE_FEATURE: |
6c223761 KB |
526 | request->data_direction = SOP_READ_FLAG; |
527 | cdb[0] = BMIC_READ; | |
528 | cdb[6] = cmd; | |
171c2865 | 529 | put_unaligned_be16(cdb_length, &cdb[7]); |
6c223761 | 530 | break; |
171c2865 DC |
531 | case BMIC_SET_DIAG_OPTIONS: |
532 | cdb_length = 0; | |
df561f66 | 533 | fallthrough; |
6c223761 KB |
534 | case BMIC_WRITE_HOST_WELLNESS: |
535 | request->data_direction = SOP_WRITE_FLAG; | |
536 | cdb[0] = BMIC_WRITE; | |
537 | cdb[6] = cmd; | |
171c2865 | 538 | put_unaligned_be16(cdb_length, &cdb[7]); |
6c223761 | 539 | break; |
3d46a59a DB |
540 | case BMIC_CSMI_PASSTHRU: |
541 | request->data_direction = SOP_BIDIRECTIONAL; | |
542 | cdb[0] = BMIC_WRITE; | |
543 | cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; | |
544 | cdb[6] = cmd; | |
545 | put_unaligned_be16(cdb_length, &cdb[7]); | |
6c223761 KB |
546 | break; |
547 | default: | |
9e68cccc | 548 | dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); |
6c223761 KB |
549 | break; |
550 | } | |
551 | ||
552 | switch (request->data_direction) { | |
553 | case SOP_READ_FLAG: | |
6917a9cc | 554 | *dir = DMA_FROM_DEVICE; |
6c223761 KB |
555 | break; |
556 | case SOP_WRITE_FLAG: | |
6917a9cc | 557 | *dir = DMA_TO_DEVICE; |
6c223761 KB |
558 | break; |
559 | case SOP_NO_DIRECTION_FLAG: | |
6917a9cc | 560 | *dir = DMA_NONE; |
6c223761 KB |
561 | break; |
562 | default: | |
6917a9cc | 563 | *dir = DMA_BIDIRECTIONAL; |
6c223761 KB |
564 | break; |
565 | } | |
566 | ||
6c223761 | 567 | return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], |
6917a9cc | 568 | buffer, buffer_length, *dir); |
6c223761 KB |
569 | } |
570 | ||
376fb880 KB |
571 | static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) |
572 | { | |
573 | io_request->scmd = NULL; | |
574 | io_request->status = 0; | |
575 | io_request->error_info = NULL; | |
576 | io_request->raid_bypass = false; | |
577 | } | |
578 | ||
6c223761 KB |
579 | static struct pqi_io_request *pqi_alloc_io_request( |
580 | struct pqi_ctrl_info *ctrl_info) | |
581 | { | |
582 | struct pqi_io_request *io_request; | |
583 | u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ | |
584 | ||
585 | while (1) { | |
586 | io_request = &ctrl_info->io_request_pool[i]; | |
587 | if (atomic_inc_return(&io_request->refcount) == 1) | |
588 | break; | |
589 | atomic_dec(&io_request->refcount); | |
590 | i = (i + 1) % ctrl_info->max_io_slots; | |
591 | } | |
592 | ||
593 | /* benignly racy */ | |
594 | ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; | |
595 | ||
376fb880 | 596 | pqi_reinit_io_request(io_request); |
6c223761 KB |
597 | |
598 | return io_request; | |
599 | } | |
600 | ||
601 | static void pqi_free_io_request(struct pqi_io_request *io_request) | |
602 | { | |
603 | atomic_dec(&io_request->refcount); | |
604 | } | |
605 | ||
02133b68 | 606 | static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, |
694c5d5b KB |
607 | u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, |
608 | struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) | |
6c223761 KB |
609 | { |
610 | int rc; | |
6c223761 | 611 | struct pqi_raid_path_request request; |
694c5d5b | 612 | enum dma_data_direction dir; |
6c223761 | 613 | |
583891c9 KB |
614 | rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, |
615 | buffer, buffer_length, vpd_page, &dir); | |
6c223761 KB |
616 | if (rc) |
617 | return rc; | |
618 | ||
694c5d5b KB |
619 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, |
620 | error_info, timeout_msecs); | |
6c223761 | 621 | |
6917a9cc | 622 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); |
694c5d5b | 623 | |
6c223761 KB |
624 | return rc; |
625 | } | |
626 | ||
694c5d5b | 627 | /* helper functions for pqi_send_scsi_raid_request */ |
02133b68 DC |
628 | |
629 | static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, | |
694c5d5b | 630 | u8 cmd, void *buffer, size_t buffer_length) |
6c223761 | 631 | { |
02133b68 | 632 | return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, |
694c5d5b | 633 | buffer, buffer_length, 0, NULL, NO_TIMEOUT); |
02133b68 | 634 | } |
6c223761 | 635 | |
02133b68 | 636 | static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, |
694c5d5b KB |
637 | u8 cmd, void *buffer, size_t buffer_length, |
638 | struct pqi_raid_error_info *error_info) | |
02133b68 DC |
639 | { |
640 | return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, | |
694c5d5b | 641 | buffer, buffer_length, 0, error_info, NO_TIMEOUT); |
02133b68 | 642 | } |
6c223761 | 643 | |
02133b68 | 644 | static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, |
694c5d5b | 645 | struct bmic_identify_controller *buffer) |
02133b68 DC |
646 | { |
647 | return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, | |
694c5d5b | 648 | buffer, sizeof(*buffer)); |
02133b68 DC |
649 | } |
650 | ||
6d90615f | 651 | static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, |
694c5d5b | 652 | struct bmic_sense_subsystem_info *sense_info) |
6d90615f MB |
653 | { |
654 | return pqi_send_ctrl_raid_request(ctrl_info, | |
694c5d5b KB |
655 | BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, |
656 | sizeof(*sense_info)); | |
6d90615f MB |
657 | } |
658 | ||
02133b68 | 659 | static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, |
6c223761 | 660 | u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) |
02133b68 DC |
661 | { |
662 | return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, | |
663 | buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT); | |
6c223761 KB |
664 | } |
665 | ||
666 | static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, | |
667 | struct pqi_scsi_dev *device, | |
694c5d5b | 668 | struct bmic_identify_physical_device *buffer, size_t buffer_length) |
6c223761 KB |
669 | { |
670 | int rc; | |
6917a9cc | 671 | enum dma_data_direction dir; |
6c223761 KB |
672 | u16 bmic_device_index; |
673 | struct pqi_raid_path_request request; | |
674 | ||
675 | rc = pqi_build_raid_path_request(ctrl_info, &request, | |
676 | BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, | |
6917a9cc | 677 | buffer_length, 0, &dir); |
6c223761 KB |
678 | if (rc) |
679 | return rc; | |
680 | ||
681 | bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); | |
682 | request.cdb[2] = (u8)bmic_device_index; | |
683 | request.cdb[9] = (u8)(bmic_device_index >> 8); | |
684 | ||
685 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, | |
686 | 0, NULL, NO_TIMEOUT); | |
687 | ||
6917a9cc | 688 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); |
694c5d5b | 689 | |
6c223761 KB |
690 | return rc; |
691 | } | |
692 | ||
f6cc2a77 KB |
693 | static inline u32 pqi_aio_limit_to_bytes(__le16 *limit) |
694 | { | |
695 | u32 bytes; | |
696 | ||
697 | bytes = get_unaligned_le16(limit); | |
698 | if (bytes == 0) | |
699 | bytes = ~0; | |
700 | else | |
701 | bytes *= 1024; | |
702 | ||
703 | return bytes; | |
704 | } | |
705 | ||
706 | #pragma pack(1) | |
707 | ||
708 | struct bmic_sense_feature_buffer { | |
709 | struct bmic_sense_feature_buffer_header header; | |
710 | struct bmic_sense_feature_io_page_aio_subpage aio_subpage; | |
711 | }; | |
712 | ||
713 | #pragma pack() | |
714 | ||
715 | #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \ | |
716 | offsetofend(struct bmic_sense_feature_buffer, \ | |
717 | aio_subpage.max_write_raid_1_10_3drive) | |
718 | ||
719 | #define MINIMUM_AIO_SUBPAGE_LENGTH \ | |
720 | (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \ | |
721 | max_write_raid_1_10_3drive) - \ | |
722 | sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header)) | |
723 | ||
724 | static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) | |
725 | { | |
726 | int rc; | |
727 | enum dma_data_direction dir; | |
728 | struct pqi_raid_path_request request; | |
729 | struct bmic_sense_feature_buffer *buffer; | |
730 | ||
731 | buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); | |
732 | if (!buffer) | |
733 | return -ENOMEM; | |
734 | ||
583891c9 KB |
735 | rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, |
736 | buffer, sizeof(*buffer), 0, &dir); | |
f6cc2a77 KB |
737 | if (rc) |
738 | goto error; | |
739 | ||
740 | request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE; | |
741 | request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE; | |
742 | ||
583891c9 | 743 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL, NO_TIMEOUT); |
f6cc2a77 KB |
744 | |
745 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); | |
746 | ||
747 | if (rc) | |
748 | goto error; | |
749 | ||
750 | if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || | |
751 | buffer->header.subpage_code != | |
752 | BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || | |
753 | get_unaligned_le16(&buffer->header.buffer_length) < | |
754 | MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH || | |
755 | buffer->aio_subpage.header.page_code != | |
756 | BMIC_SENSE_FEATURE_IO_PAGE || | |
757 | buffer->aio_subpage.header.subpage_code != | |
758 | BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || | |
759 | get_unaligned_le16(&buffer->aio_subpage.header.page_length) < | |
760 | MINIMUM_AIO_SUBPAGE_LENGTH) { | |
761 | goto error; | |
762 | } | |
763 | ||
764 | ctrl_info->max_transfer_encrypted_sas_sata = | |
765 | pqi_aio_limit_to_bytes( | |
766 | &buffer->aio_subpage.max_transfer_encrypted_sas_sata); | |
767 | ||
768 | ctrl_info->max_transfer_encrypted_nvme = | |
769 | pqi_aio_limit_to_bytes( | |
770 | &buffer->aio_subpage.max_transfer_encrypted_nvme); | |
771 | ||
772 | ctrl_info->max_write_raid_5_6 = | |
773 | pqi_aio_limit_to_bytes( | |
774 | &buffer->aio_subpage.max_write_raid_5_6); | |
775 | ||
776 | ctrl_info->max_write_raid_1_10_2drive = | |
777 | pqi_aio_limit_to_bytes( | |
778 | &buffer->aio_subpage.max_write_raid_1_10_2drive); | |
779 | ||
780 | ctrl_info->max_write_raid_1_10_3drive = | |
781 | pqi_aio_limit_to_bytes( | |
782 | &buffer->aio_subpage.max_write_raid_1_10_3drive); | |
783 | ||
784 | error: | |
785 | kfree(buffer); | |
786 | ||
787 | return rc; | |
788 | } | |
789 | ||
58322fe0 KB |
790 | static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, |
791 | enum bmic_flush_cache_shutdown_event shutdown_event) | |
6c223761 KB |
792 | { |
793 | int rc; | |
58322fe0 | 794 | struct bmic_flush_cache *flush_cache; |
6c223761 KB |
795 | |
796 | /* | |
797 | * Don't bother trying to flush the cache if the controller is | |
798 | * locked up. | |
799 | */ | |
800 | if (pqi_ctrl_offline(ctrl_info)) | |
801 | return -ENXIO; | |
802 | ||
58322fe0 KB |
803 | flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); |
804 | if (!flush_cache) | |
6c223761 KB |
805 | return -ENOMEM; |
806 | ||
58322fe0 KB |
807 | flush_cache->shutdown_event = shutdown_event; |
808 | ||
02133b68 DC |
809 | rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, |
810 | sizeof(*flush_cache)); | |
6c223761 | 811 | |
58322fe0 | 812 | kfree(flush_cache); |
6c223761 KB |
813 | |
814 | return rc; | |
815 | } | |
816 | ||
3d46a59a DB |
817 | int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, |
818 | struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, | |
819 | struct pqi_raid_error_info *error_info) | |
820 | { | |
821 | return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, | |
822 | buffer, buffer_length, error_info); | |
823 | } | |
171c2865 | 824 | |
694c5d5b | 825 | #define PQI_FETCH_PTRAID_DATA (1 << 31) |
171c2865 DC |
826 | |
827 | static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) | |
6c223761 KB |
828 | { |
829 | int rc; | |
171c2865 | 830 | struct bmic_diag_options *diag; |
6c223761 | 831 | |
171c2865 DC |
832 | diag = kzalloc(sizeof(*diag), GFP_KERNEL); |
833 | if (!diag) | |
834 | return -ENOMEM; | |
835 | ||
02133b68 | 836 | rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, |
694c5d5b | 837 | diag, sizeof(*diag)); |
6c223761 | 838 | if (rc) |
171c2865 | 839 | goto out; |
6c223761 | 840 | |
171c2865 DC |
841 | diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); |
842 | ||
694c5d5b KB |
843 | rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, |
844 | sizeof(*diag)); | |
845 | ||
171c2865 DC |
846 | out: |
847 | kfree(diag); | |
6c223761 | 848 | |
6c223761 KB |
849 | return rc; |
850 | } | |
851 | ||
02133b68 | 852 | static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, |
6c223761 KB |
853 | void *buffer, size_t buffer_length) |
854 | { | |
02133b68 | 855 | return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, |
694c5d5b | 856 | buffer, buffer_length); |
6c223761 KB |
857 | } |
858 | ||
859 | #pragma pack(1) | |
860 | ||
861 | struct bmic_host_wellness_driver_version { | |
862 | u8 start_tag[4]; | |
863 | u8 driver_version_tag[2]; | |
864 | __le16 driver_version_length; | |
865 | char driver_version[32]; | |
b2346b50 | 866 | u8 dont_write_tag[2]; |
6c223761 KB |
867 | u8 end_tag[2]; |
868 | }; | |
869 | ||
870 | #pragma pack() | |
871 | ||
872 | static int pqi_write_driver_version_to_host_wellness( | |
873 | struct pqi_ctrl_info *ctrl_info) | |
874 | { | |
875 | int rc; | |
876 | struct bmic_host_wellness_driver_version *buffer; | |
877 | size_t buffer_length; | |
878 | ||
879 | buffer_length = sizeof(*buffer); | |
880 | ||
881 | buffer = kmalloc(buffer_length, GFP_KERNEL); | |
882 | if (!buffer) | |
883 | return -ENOMEM; | |
884 | ||
885 | buffer->start_tag[0] = '<'; | |
886 | buffer->start_tag[1] = 'H'; | |
887 | buffer->start_tag[2] = 'W'; | |
888 | buffer->start_tag[3] = '>'; | |
889 | buffer->driver_version_tag[0] = 'D'; | |
890 | buffer->driver_version_tag[1] = 'V'; | |
891 | put_unaligned_le16(sizeof(buffer->driver_version), | |
892 | &buffer->driver_version_length); | |
061ef06a | 893 | strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, |
6c223761 KB |
894 | sizeof(buffer->driver_version) - 1); |
895 | buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; | |
b2346b50 MR |
896 | buffer->dont_write_tag[0] = 'D'; |
897 | buffer->dont_write_tag[1] = 'W'; | |
6c223761 KB |
898 | buffer->end_tag[0] = 'Z'; |
899 | buffer->end_tag[1] = 'Z'; | |
900 | ||
901 | rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); | |
902 | ||
903 | kfree(buffer); | |
904 | ||
905 | return rc; | |
906 | } | |
907 | ||
908 | #pragma pack(1) | |
909 | ||
910 | struct bmic_host_wellness_time { | |
911 | u8 start_tag[4]; | |
912 | u8 time_tag[2]; | |
913 | __le16 time_length; | |
914 | u8 time[8]; | |
915 | u8 dont_write_tag[2]; | |
916 | u8 end_tag[2]; | |
917 | }; | |
918 | ||
919 | #pragma pack() | |
920 | ||
921 | static int pqi_write_current_time_to_host_wellness( | |
922 | struct pqi_ctrl_info *ctrl_info) | |
923 | { | |
924 | int rc; | |
925 | struct bmic_host_wellness_time *buffer; | |
926 | size_t buffer_length; | |
927 | time64_t local_time; | |
928 | unsigned int year; | |
ed10858e | 929 | struct tm tm; |
6c223761 KB |
930 | |
931 | buffer_length = sizeof(*buffer); | |
932 | ||
933 | buffer = kmalloc(buffer_length, GFP_KERNEL); | |
934 | if (!buffer) | |
935 | return -ENOMEM; | |
936 | ||
937 | buffer->start_tag[0] = '<'; | |
938 | buffer->start_tag[1] = 'H'; | |
939 | buffer->start_tag[2] = 'W'; | |
940 | buffer->start_tag[3] = '>'; | |
941 | buffer->time_tag[0] = 'T'; | |
942 | buffer->time_tag[1] = 'D'; | |
943 | put_unaligned_le16(sizeof(buffer->time), | |
944 | &buffer->time_length); | |
945 | ||
ed10858e AB |
946 | local_time = ktime_get_real_seconds(); |
947 | time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); | |
6c223761 KB |
948 | year = tm.tm_year + 1900; |
949 | ||
950 | buffer->time[0] = bin2bcd(tm.tm_hour); | |
951 | buffer->time[1] = bin2bcd(tm.tm_min); | |
952 | buffer->time[2] = bin2bcd(tm.tm_sec); | |
953 | buffer->time[3] = 0; | |
954 | buffer->time[4] = bin2bcd(tm.tm_mon + 1); | |
955 | buffer->time[5] = bin2bcd(tm.tm_mday); | |
956 | buffer->time[6] = bin2bcd(year / 100); | |
957 | buffer->time[7] = bin2bcd(year % 100); | |
958 | ||
959 | buffer->dont_write_tag[0] = 'D'; | |
960 | buffer->dont_write_tag[1] = 'W'; | |
961 | buffer->end_tag[0] = 'Z'; | |
962 | buffer->end_tag[1] = 'Z'; | |
963 | ||
964 | rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); | |
965 | ||
966 | kfree(buffer); | |
967 | ||
968 | return rc; | |
969 | } | |
970 | ||
4fd22c13 | 971 | #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ) |
6c223761 KB |
972 | |
973 | static void pqi_update_time_worker(struct work_struct *work) | |
974 | { | |
975 | int rc; | |
976 | struct pqi_ctrl_info *ctrl_info; | |
977 | ||
978 | ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, | |
979 | update_time_work); | |
980 | ||
5f310425 KB |
981 | if (pqi_ctrl_offline(ctrl_info)) |
982 | return; | |
983 | ||
6c223761 KB |
984 | rc = pqi_write_current_time_to_host_wellness(ctrl_info); |
985 | if (rc) | |
986 | dev_warn(&ctrl_info->pci_dev->dev, | |
987 | "error updating time on controller\n"); | |
988 | ||
989 | schedule_delayed_work(&ctrl_info->update_time_work, | |
990 | PQI_UPDATE_TIME_WORK_INTERVAL); | |
991 | } | |
992 | ||
583891c9 | 993 | static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) |
6c223761 | 994 | { |
4fbebf1a | 995 | schedule_delayed_work(&ctrl_info->update_time_work, 0); |
061ef06a KB |
996 | } |
997 | ||
583891c9 | 998 | static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) |
061ef06a | 999 | { |
061ef06a | 1000 | cancel_delayed_work_sync(&ctrl_info->update_time_work); |
6c223761 KB |
1001 | } |
1002 | ||
583891c9 KB |
1003 | static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, |
1004 | size_t buffer_length) | |
6c223761 | 1005 | { |
583891c9 | 1006 | return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); |
6c223761 KB |
1007 | } |
1008 | ||
583891c9 | 1009 | static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) |
6c223761 KB |
1010 | { |
1011 | int rc; | |
1012 | size_t lun_list_length; | |
1013 | size_t lun_data_length; | |
1014 | size_t new_lun_list_length; | |
1015 | void *lun_data = NULL; | |
1016 | struct report_lun_header *report_lun_header; | |
1017 | ||
1018 | report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); | |
1019 | if (!report_lun_header) { | |
1020 | rc = -ENOMEM; | |
1021 | goto out; | |
1022 | } | |
1023 | ||
583891c9 | 1024 | rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); |
6c223761 KB |
1025 | if (rc) |
1026 | goto out; | |
1027 | ||
1028 | lun_list_length = get_unaligned_be32(&report_lun_header->list_length); | |
1029 | ||
1030 | again: | |
1031 | lun_data_length = sizeof(struct report_lun_header) + lun_list_length; | |
1032 | ||
1033 | lun_data = kmalloc(lun_data_length, GFP_KERNEL); | |
1034 | if (!lun_data) { | |
1035 | rc = -ENOMEM; | |
1036 | goto out; | |
1037 | } | |
1038 | ||
1039 | if (lun_list_length == 0) { | |
1040 | memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); | |
1041 | goto out; | |
1042 | } | |
1043 | ||
1044 | rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); | |
1045 | if (rc) | |
1046 | goto out; | |
1047 | ||
583891c9 KB |
1048 | new_lun_list_length = |
1049 | get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); | |
6c223761 KB |
1050 | |
1051 | if (new_lun_list_length > lun_list_length) { | |
1052 | lun_list_length = new_lun_list_length; | |
1053 | kfree(lun_data); | |
1054 | goto again; | |
1055 | } | |
1056 | ||
1057 | out: | |
1058 | kfree(report_lun_header); | |
1059 | ||
1060 | if (rc) { | |
1061 | kfree(lun_data); | |
1062 | lun_data = NULL; | |
1063 | } | |
1064 | ||
1065 | *buffer = lun_data; | |
1066 | ||
1067 | return rc; | |
1068 | } | |
1069 | ||
583891c9 | 1070 | static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) |
6c223761 | 1071 | { |
583891c9 | 1072 | return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, buffer); |
6c223761 KB |
1073 | } |
1074 | ||
583891c9 | 1075 | static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) |
6c223761 KB |
1076 | { |
1077 | return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); | |
1078 | } | |
1079 | ||
1080 | static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, | |
1081 | struct report_phys_lun_extended **physdev_list, | |
1082 | struct report_log_lun_extended **logdev_list) | |
1083 | { | |
1084 | int rc; | |
1085 | size_t logdev_list_length; | |
1086 | size_t logdev_data_length; | |
1087 | struct report_log_lun_extended *internal_logdev_list; | |
1088 | struct report_log_lun_extended *logdev_data; | |
1089 | struct report_lun_header report_lun_header; | |
1090 | ||
1091 | rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); | |
1092 | if (rc) | |
1093 | dev_err(&ctrl_info->pci_dev->dev, | |
1094 | "report physical LUNs failed\n"); | |
1095 | ||
1096 | rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); | |
1097 | if (rc) | |
1098 | dev_err(&ctrl_info->pci_dev->dev, | |
1099 | "report logical LUNs failed\n"); | |
1100 | ||
1101 | /* | |
1102 | * Tack the controller itself onto the end of the logical device list. | |
1103 | */ | |
1104 | ||
1105 | logdev_data = *logdev_list; | |
1106 | ||
1107 | if (logdev_data) { | |
1108 | logdev_list_length = | |
1109 | get_unaligned_be32(&logdev_data->header.list_length); | |
1110 | } else { | |
1111 | memset(&report_lun_header, 0, sizeof(report_lun_header)); | |
1112 | logdev_data = | |
1113 | (struct report_log_lun_extended *)&report_lun_header; | |
1114 | logdev_list_length = 0; | |
1115 | } | |
1116 | ||
1117 | logdev_data_length = sizeof(struct report_lun_header) + | |
1118 | logdev_list_length; | |
1119 | ||
1120 | internal_logdev_list = kmalloc(logdev_data_length + | |
1121 | sizeof(struct report_log_lun_extended), GFP_KERNEL); | |
1122 | if (!internal_logdev_list) { | |
1123 | kfree(*logdev_list); | |
1124 | *logdev_list = NULL; | |
1125 | return -ENOMEM; | |
1126 | } | |
1127 | ||
1128 | memcpy(internal_logdev_list, logdev_data, logdev_data_length); | |
1129 | memset((u8 *)internal_logdev_list + logdev_data_length, 0, | |
1130 | sizeof(struct report_log_lun_extended_entry)); | |
1131 | put_unaligned_be32(logdev_list_length + | |
1132 | sizeof(struct report_log_lun_extended_entry), | |
1133 | &internal_logdev_list->header.list_length); | |
1134 | ||
1135 | kfree(*logdev_list); | |
1136 | *logdev_list = internal_logdev_list; | |
1137 | ||
1138 | return 0; | |
1139 | } | |
1140 | ||
1141 | static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, | |
1142 | int bus, int target, int lun) | |
1143 | { | |
1144 | device->bus = bus; | |
1145 | device->target = target; | |
1146 | device->lun = lun; | |
1147 | } | |
1148 | ||
1149 | static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) | |
1150 | { | |
1151 | u8 *scsi3addr; | |
1152 | u32 lunid; | |
bd10cf0b KB |
1153 | int bus; |
1154 | int target; | |
1155 | int lun; | |
6c223761 KB |
1156 | |
1157 | scsi3addr = device->scsi3addr; | |
1158 | lunid = get_unaligned_le32(scsi3addr); | |
1159 | ||
1160 | if (pqi_is_hba_lunid(scsi3addr)) { | |
1161 | /* The specified device is the controller. */ | |
1162 | pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); | |
1163 | device->target_lun_valid = true; | |
1164 | return; | |
1165 | } | |
1166 | ||
1167 | if (pqi_is_logical_device(device)) { | |
bd10cf0b KB |
1168 | if (device->is_external_raid_device) { |
1169 | bus = PQI_EXTERNAL_RAID_VOLUME_BUS; | |
1170 | target = (lunid >> 16) & 0x3fff; | |
1171 | lun = lunid & 0xff; | |
1172 | } else { | |
1173 | bus = PQI_RAID_VOLUME_BUS; | |
1174 | target = 0; | |
1175 | lun = lunid & 0x3fff; | |
1176 | } | |
1177 | pqi_set_bus_target_lun(device, bus, target, lun); | |
6c223761 KB |
1178 | device->target_lun_valid = true; |
1179 | return; | |
1180 | } | |
1181 | ||
1182 | /* | |
1183 | * Defer target and LUN assignment for non-controller physical devices | |
1184 | * because the SAS transport layer will make these assignments later. | |
1185 | */ | |
1186 | pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); | |
1187 | } | |
1188 | ||
1189 | static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, | |
1190 | struct pqi_scsi_dev *device) | |
1191 | { | |
1192 | int rc; | |
1193 | u8 raid_level; | |
1194 | u8 *buffer; | |
1195 | ||
1196 | raid_level = SA_RAID_UNKNOWN; | |
1197 | ||
1198 | buffer = kmalloc(64, GFP_KERNEL); | |
1199 | if (buffer) { | |
1200 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, | |
1201 | VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); | |
1202 | if (rc == 0) { | |
1203 | raid_level = buffer[8]; | |
1204 | if (raid_level > SA_RAID_MAX) | |
1205 | raid_level = SA_RAID_UNKNOWN; | |
1206 | } | |
1207 | kfree(buffer); | |
1208 | } | |
1209 | ||
1210 | device->raid_level = raid_level; | |
1211 | } | |
1212 | ||
1213 | static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, | |
1214 | struct pqi_scsi_dev *device, struct raid_map *raid_map) | |
1215 | { | |
1216 | char *err_msg; | |
1217 | u32 raid_map_size; | |
1218 | u32 r5or6_blocks_per_row; | |
6c223761 KB |
1219 | |
1220 | raid_map_size = get_unaligned_le32(&raid_map->structure_size); | |
1221 | ||
1222 | if (raid_map_size < offsetof(struct raid_map, disk_data)) { | |
1223 | err_msg = "RAID map too small"; | |
1224 | goto bad_raid_map; | |
1225 | } | |
1226 | ||
6c223761 KB |
1227 | if (device->raid_level == SA_RAID_1) { |
1228 | if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { | |
1229 | err_msg = "invalid RAID-1 map"; | |
1230 | goto bad_raid_map; | |
1231 | } | |
7a012c23 | 1232 | } else if (device->raid_level == SA_RAID_TRIPLE) { |
6c223761 | 1233 | if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { |
7a012c23 | 1234 | err_msg = "invalid RAID-1(Triple) map"; |
6c223761 KB |
1235 | goto bad_raid_map; |
1236 | } | |
1237 | } else if ((device->raid_level == SA_RAID_5 || | |
1238 | device->raid_level == SA_RAID_6) && | |
1239 | get_unaligned_le16(&raid_map->layout_map_count) > 1) { | |
1240 | /* RAID 50/60 */ | |
1241 | r5or6_blocks_per_row = | |
1242 | get_unaligned_le16(&raid_map->strip_size) * | |
1243 | get_unaligned_le16(&raid_map->data_disks_per_row); | |
1244 | if (r5or6_blocks_per_row == 0) { | |
1245 | err_msg = "invalid RAID-5 or RAID-6 map"; | |
1246 | goto bad_raid_map; | |
1247 | } | |
1248 | } | |
1249 | ||
1250 | return 0; | |
1251 | ||
1252 | bad_raid_map: | |
d87d5474 | 1253 | dev_warn(&ctrl_info->pci_dev->dev, |
38a7338a KB |
1254 | "logical device %08x%08x %s\n", |
1255 | *((u32 *)&device->scsi3addr), | |
1256 | *((u32 *)&device->scsi3addr[4]), err_msg); | |
6c223761 KB |
1257 | |
1258 | return -EINVAL; | |
1259 | } | |
1260 | ||
1261 | static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, | |
1262 | struct pqi_scsi_dev *device) | |
1263 | { | |
1264 | int rc; | |
a91aaae0 | 1265 | u32 raid_map_size; |
6c223761 KB |
1266 | struct raid_map *raid_map; |
1267 | ||
1268 | raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); | |
1269 | if (!raid_map) | |
1270 | return -ENOMEM; | |
1271 | ||
a91aaae0 AK |
1272 | rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, |
1273 | device->scsi3addr, raid_map, sizeof(*raid_map), | |
1274 | 0, NULL, NO_TIMEOUT); | |
1275 | ||
6c223761 KB |
1276 | if (rc) |
1277 | goto error; | |
1278 | ||
a91aaae0 | 1279 | raid_map_size = get_unaligned_le32(&raid_map->structure_size); |
6c223761 | 1280 | |
a91aaae0 | 1281 | if (raid_map_size > sizeof(*raid_map)) { |
6c223761 | 1282 | |
a91aaae0 AK |
1283 | kfree(raid_map); |
1284 | ||
1285 | raid_map = kmalloc(raid_map_size, GFP_KERNEL); | |
1286 | if (!raid_map) | |
1287 | return -ENOMEM; | |
1288 | ||
1289 | rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, | |
1290 | device->scsi3addr, raid_map, raid_map_size, | |
1291 | 0, NULL, NO_TIMEOUT); | |
1292 | if (rc) | |
1293 | goto error; | |
1294 | ||
1295 | if (get_unaligned_le32(&raid_map->structure_size) | |
1296 | != raid_map_size) { | |
1297 | dev_warn(&ctrl_info->pci_dev->dev, | |
583891c9 | 1298 | "requested %u bytes, received %u bytes\n", |
a91aaae0 AK |
1299 | raid_map_size, |
1300 | get_unaligned_le32(&raid_map->structure_size)); | |
1301 | goto error; | |
1302 | } | |
1303 | } | |
6c223761 KB |
1304 | |
1305 | rc = pqi_validate_raid_map(ctrl_info, device, raid_map); | |
1306 | if (rc) | |
1307 | goto error; | |
1308 | ||
1309 | device->raid_map = raid_map; | |
1310 | ||
1311 | return 0; | |
1312 | ||
1313 | error: | |
1314 | kfree(raid_map); | |
1315 | ||
1316 | return rc; | |
1317 | } | |
1318 | ||
f6cc2a77 KB |
1319 | static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, |
1320 | struct pqi_scsi_dev *device) | |
1321 | { | |
1322 | if (!ctrl_info->lv_drive_type_mix_valid) { | |
1323 | device->max_transfer_encrypted = ~0; | |
1324 | return; | |
1325 | } | |
1326 | ||
1327 | switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { | |
1328 | case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY: | |
1329 | case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY: | |
1330 | case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY: | |
1331 | case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY: | |
1332 | case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY: | |
1333 | case LV_DRIVE_TYPE_MIX_SAS_ONLY: | |
1334 | case LV_DRIVE_TYPE_MIX_SATA_ONLY: | |
1335 | device->max_transfer_encrypted = | |
1336 | ctrl_info->max_transfer_encrypted_sas_sata; | |
1337 | break; | |
1338 | case LV_DRIVE_TYPE_MIX_NVME_ONLY: | |
1339 | device->max_transfer_encrypted = | |
1340 | ctrl_info->max_transfer_encrypted_nvme; | |
1341 | break; | |
1342 | case LV_DRIVE_TYPE_MIX_UNKNOWN: | |
1343 | case LV_DRIVE_TYPE_MIX_NO_RESTRICTION: | |
1344 | default: | |
1345 | device->max_transfer_encrypted = | |
1346 | min(ctrl_info->max_transfer_encrypted_sas_sata, | |
1347 | ctrl_info->max_transfer_encrypted_nvme); | |
1348 | break; | |
1349 | } | |
1350 | } | |
1351 | ||
588a63fe | 1352 | static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, |
6c223761 KB |
1353 | struct pqi_scsi_dev *device) |
1354 | { | |
1355 | int rc; | |
1356 | u8 *buffer; | |
588a63fe | 1357 | u8 bypass_status; |
6c223761 KB |
1358 | |
1359 | buffer = kmalloc(64, GFP_KERNEL); | |
1360 | if (!buffer) | |
1361 | return; | |
1362 | ||
1363 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, | |
588a63fe | 1364 | VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); |
6c223761 KB |
1365 | if (rc) |
1366 | goto out; | |
1367 | ||
694c5d5b KB |
1368 | #define RAID_BYPASS_STATUS 4 |
1369 | #define RAID_BYPASS_CONFIGURED 0x1 | |
1370 | #define RAID_BYPASS_ENABLED 0x2 | |
6c223761 | 1371 | |
588a63fe KB |
1372 | bypass_status = buffer[RAID_BYPASS_STATUS]; |
1373 | device->raid_bypass_configured = | |
1374 | (bypass_status & RAID_BYPASS_CONFIGURED) != 0; | |
1375 | if (device->raid_bypass_configured && | |
1376 | (bypass_status & RAID_BYPASS_ENABLED) && | |
f6cc2a77 | 1377 | pqi_get_raid_map(ctrl_info, device) == 0) { |
588a63fe | 1378 | device->raid_bypass_enabled = true; |
f6cc2a77 KB |
1379 | if (get_unaligned_le16(&device->raid_map->flags) & |
1380 | RAID_MAP_ENCRYPTION_ENABLED) | |
1381 | pqi_set_max_transfer_encrypted(ctrl_info, device); | |
1382 | } | |
6c223761 KB |
1383 | |
1384 | out: | |
1385 | kfree(buffer); | |
1386 | } | |
1387 | ||
1388 | /* | |
1389 | * Use vendor-specific VPD to determine online/offline status of a volume. | |
1390 | */ | |
1391 | ||
1392 | static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, | |
1393 | struct pqi_scsi_dev *device) | |
1394 | { | |
1395 | int rc; | |
1396 | size_t page_length; | |
1397 | u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; | |
1398 | bool volume_offline = true; | |
1399 | u32 volume_flags; | |
1400 | struct ciss_vpd_logical_volume_status *vpd; | |
1401 | ||
1402 | vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); | |
1403 | if (!vpd) | |
1404 | goto no_buffer; | |
1405 | ||
1406 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, | |
1407 | VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); | |
1408 | if (rc) | |
1409 | goto out; | |
1410 | ||
7ff44499 DC |
1411 | if (vpd->page_code != CISS_VPD_LV_STATUS) |
1412 | goto out; | |
1413 | ||
6c223761 KB |
1414 | page_length = offsetof(struct ciss_vpd_logical_volume_status, |
1415 | volume_status) + vpd->page_length; | |
1416 | if (page_length < sizeof(*vpd)) | |
1417 | goto out; | |
1418 | ||
1419 | volume_status = vpd->volume_status; | |
1420 | volume_flags = get_unaligned_be32(&vpd->flags); | |
1421 | volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; | |
1422 | ||
1423 | out: | |
1424 | kfree(vpd); | |
1425 | no_buffer: | |
1426 | device->volume_status = volume_status; | |
1427 | device->volume_offline = volume_offline; | |
1428 | } | |
1429 | ||
ce143793 KB |
1430 | static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, |
1431 | struct pqi_scsi_dev *device, | |
1432 | struct bmic_identify_physical_device *id_phys) | |
1433 | { | |
1434 | int rc; | |
26b390ab | 1435 | |
ce143793 KB |
1436 | memset(id_phys, 0, sizeof(*id_phys)); |
1437 | ||
1438 | rc = pqi_identify_physical_device(ctrl_info, device, | |
1439 | id_phys, sizeof(*id_phys)); | |
1440 | if (rc) { | |
1441 | device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; | |
1442 | return rc; | |
1443 | } | |
1444 | ||
1445 | scsi_sanitize_inquiry_string(&id_phys->model[0], 8); | |
1446 | scsi_sanitize_inquiry_string(&id_phys->model[8], 16); | |
1447 | ||
1448 | memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); | |
1449 | memcpy(device->model, &id_phys->model[8], sizeof(device->model)); | |
1450 | ||
1451 | device->box_index = id_phys->box_index; | |
1452 | device->phys_box_on_bus = id_phys->phys_box_on_bus; | |
1453 | device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; | |
1454 | device->queue_depth = | |
1455 | get_unaligned_le16(&id_phys->current_queue_depth_limit); | |
1456 | device->active_path_index = id_phys->active_path_number; | |
1457 | device->path_map = id_phys->redundant_path_present_map; | |
1458 | memcpy(&device->box, | |
1459 | &id_phys->alternate_paths_phys_box_on_port, | |
1460 | sizeof(device->box)); | |
1461 | memcpy(&device->phys_connector, | |
1462 | &id_phys->alternate_paths_phys_connector, | |
1463 | sizeof(device->phys_connector)); | |
1464 | device->bay = id_phys->phys_bay_in_box; | |
1465 | ||
1466 | return 0; | |
1467 | } | |
1468 | ||
1469 | static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, | |
6c223761 KB |
1470 | struct pqi_scsi_dev *device) |
1471 | { | |
1472 | int rc; | |
1473 | u8 *buffer; | |
3d46a59a | 1474 | |
6c223761 KB |
1475 | buffer = kmalloc(64, GFP_KERNEL); |
1476 | if (!buffer) | |
1477 | return -ENOMEM; | |
1478 | ||
1479 | /* Send an inquiry to the device to see what it is. */ | |
ce143793 KB |
1480 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); |
1481 | if (rc) | |
1482 | goto out; | |
6c223761 KB |
1483 | |
1484 | scsi_sanitize_inquiry_string(&buffer[8], 8); | |
1485 | scsi_sanitize_inquiry_string(&buffer[16], 16); | |
1486 | ||
1487 | device->devtype = buffer[0] & 0x1f; | |
cbe0c7b1 KB |
1488 | memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); |
1489 | memcpy(device->model, &buffer[16], sizeof(device->model)); | |
6c223761 | 1490 | |
ce143793 | 1491 | if (device->devtype == TYPE_DISK) { |
bd10cf0b KB |
1492 | if (device->is_external_raid_device) { |
1493 | device->raid_level = SA_RAID_UNKNOWN; | |
1494 | device->volume_status = CISS_LV_OK; | |
1495 | device->volume_offline = false; | |
1496 | } else { | |
1497 | pqi_get_raid_level(ctrl_info, device); | |
588a63fe | 1498 | pqi_get_raid_bypass_status(ctrl_info, device); |
bd10cf0b KB |
1499 | pqi_get_volume_status(ctrl_info, device); |
1500 | } | |
6c223761 KB |
1501 | } |
1502 | ||
1503 | out: | |
1504 | kfree(buffer); | |
1505 | ||
1506 | return rc; | |
1507 | } | |
1508 | ||
ce143793 | 1509 | static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, |
6c223761 KB |
1510 | struct pqi_scsi_dev *device, |
1511 | struct bmic_identify_physical_device *id_phys) | |
1512 | { | |
1513 | int rc; | |
1514 | ||
ce143793 KB |
1515 | if (device->is_expander_smp_device) |
1516 | return 0; | |
6c223761 | 1517 | |
ce143793 KB |
1518 | if (pqi_is_logical_device(device)) |
1519 | rc = pqi_get_logical_device_info(ctrl_info, device); | |
1520 | else | |
1521 | rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); | |
694c5d5b | 1522 | |
ce143793 | 1523 | return rc; |
6c223761 KB |
1524 | } |
1525 | ||
1526 | static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, | |
1527 | struct pqi_scsi_dev *device) | |
1528 | { | |
1529 | char *status; | |
1530 | static const char unknown_state_str[] = | |
1531 | "Volume is in an unknown state (%u)"; | |
1532 | char unknown_state_buffer[sizeof(unknown_state_str) + 10]; | |
1533 | ||
1534 | switch (device->volume_status) { | |
1535 | case CISS_LV_OK: | |
1536 | status = "Volume online"; | |
1537 | break; | |
1538 | case CISS_LV_FAILED: | |
1539 | status = "Volume failed"; | |
1540 | break; | |
1541 | case CISS_LV_NOT_CONFIGURED: | |
1542 | status = "Volume not configured"; | |
1543 | break; | |
1544 | case CISS_LV_DEGRADED: | |
1545 | status = "Volume degraded"; | |
1546 | break; | |
1547 | case CISS_LV_READY_FOR_RECOVERY: | |
1548 | status = "Volume ready for recovery operation"; | |
1549 | break; | |
1550 | case CISS_LV_UNDERGOING_RECOVERY: | |
1551 | status = "Volume undergoing recovery"; | |
1552 | break; | |
1553 | case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: | |
1554 | status = "Wrong physical drive was replaced"; | |
1555 | break; | |
1556 | case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: | |
1557 | status = "A physical drive not properly connected"; | |
1558 | break; | |
1559 | case CISS_LV_HARDWARE_OVERHEATING: | |
1560 | status = "Hardware is overheating"; | |
1561 | break; | |
1562 | case CISS_LV_HARDWARE_HAS_OVERHEATED: | |
1563 | status = "Hardware has overheated"; | |
1564 | break; | |
1565 | case CISS_LV_UNDERGOING_EXPANSION: | |
1566 | status = "Volume undergoing expansion"; | |
1567 | break; | |
1568 | case CISS_LV_NOT_AVAILABLE: | |
1569 | status = "Volume waiting for transforming volume"; | |
1570 | break; | |
1571 | case CISS_LV_QUEUED_FOR_EXPANSION: | |
1572 | status = "Volume queued for expansion"; | |
1573 | break; | |
1574 | case CISS_LV_DISABLED_SCSI_ID_CONFLICT: | |
1575 | status = "Volume disabled due to SCSI ID conflict"; | |
1576 | break; | |
1577 | case CISS_LV_EJECTED: | |
1578 | status = "Volume has been ejected"; | |
1579 | break; | |
1580 | case CISS_LV_UNDERGOING_ERASE: | |
1581 | status = "Volume undergoing background erase"; | |
1582 | break; | |
1583 | case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: | |
1584 | status = "Volume ready for predictive spare rebuild"; | |
1585 | break; | |
1586 | case CISS_LV_UNDERGOING_RPI: | |
1587 | status = "Volume undergoing rapid parity initialization"; | |
1588 | break; | |
1589 | case CISS_LV_PENDING_RPI: | |
1590 | status = "Volume queued for rapid parity initialization"; | |
1591 | break; | |
1592 | case CISS_LV_ENCRYPTED_NO_KEY: | |
1593 | status = "Encrypted volume inaccessible - key not present"; | |
1594 | break; | |
1595 | case CISS_LV_UNDERGOING_ENCRYPTION: | |
1596 | status = "Volume undergoing encryption process"; | |
1597 | break; | |
1598 | case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: | |
1599 | status = "Volume undergoing encryption re-keying process"; | |
1600 | break; | |
1601 | case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: | |
d87d5474 | 1602 | status = "Volume encrypted but encryption is disabled"; |
6c223761 KB |
1603 | break; |
1604 | case CISS_LV_PENDING_ENCRYPTION: | |
1605 | status = "Volume pending migration to encrypted state"; | |
1606 | break; | |
1607 | case CISS_LV_PENDING_ENCRYPTION_REKEYING: | |
1608 | status = "Volume pending encryption rekeying"; | |
1609 | break; | |
1610 | case CISS_LV_NOT_SUPPORTED: | |
1611 | status = "Volume not supported on this controller"; | |
1612 | break; | |
1613 | case CISS_LV_STATUS_UNAVAILABLE: | |
1614 | status = "Volume status not available"; | |
1615 | break; | |
1616 | default: | |
1617 | snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), | |
1618 | unknown_state_str, device->volume_status); | |
1619 | status = unknown_state_buffer; | |
1620 | break; | |
1621 | } | |
1622 | ||
1623 | dev_info(&ctrl_info->pci_dev->dev, | |
1624 | "scsi %d:%d:%d:%d %s\n", | |
1625 | ctrl_info->scsi_host->host_no, | |
1626 | device->bus, device->target, device->lun, status); | |
1627 | } | |
1628 | ||
6c223761 KB |
1629 | static void pqi_rescan_worker(struct work_struct *work) |
1630 | { | |
1631 | struct pqi_ctrl_info *ctrl_info; | |
1632 | ||
1633 | ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, | |
1634 | rescan_work); | |
1635 | ||
1636 | pqi_scan_scsi_devices(ctrl_info); | |
1637 | } | |
1638 | ||
1639 | static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, | |
1640 | struct pqi_scsi_dev *device) | |
1641 | { | |
1642 | int rc; | |
1643 | ||
1644 | if (pqi_is_logical_device(device)) | |
1645 | rc = scsi_add_device(ctrl_info->scsi_host, device->bus, | |
1646 | device->target, device->lun); | |
1647 | else | |
1648 | rc = pqi_add_sas_device(ctrl_info->sas_host, device); | |
1649 | ||
1650 | return rc; | |
1651 | } | |
1652 | ||
1e46731e MR |
1653 | #define PQI_PENDING_IO_TIMEOUT_SECS 20 |
1654 | ||
583891c9 | 1655 | static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) |
6c223761 | 1656 | { |
1e46731e MR |
1657 | int rc; |
1658 | ||
1659 | pqi_device_remove_start(device); | |
1660 | ||
4d15ad38 | 1661 | rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS); |
1e46731e MR |
1662 | if (rc) |
1663 | dev_err(&ctrl_info->pci_dev->dev, | |
4d15ad38 | 1664 | "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", |
1e46731e MR |
1665 | ctrl_info->scsi_host->host_no, device->bus, |
1666 | device->target, device->lun, | |
1667 | atomic_read(&device->scsi_cmds_outstanding)); | |
1668 | ||
6c223761 KB |
1669 | if (pqi_is_logical_device(device)) |
1670 | scsi_remove_device(device->sdev); | |
1671 | else | |
1672 | pqi_remove_sas_device(device); | |
1673 | } | |
1674 | ||
1675 | /* Assumes the SCSI device list lock is held. */ | |
1676 | ||
1677 | static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, | |
1678 | int bus, int target, int lun) | |
1679 | { | |
1680 | struct pqi_scsi_dev *device; | |
1681 | ||
4d15ad38 KB |
1682 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) |
1683 | if (device->bus == bus && device->target == target && device->lun == lun) | |
6c223761 KB |
1684 | return device; |
1685 | ||
1686 | return NULL; | |
1687 | } | |
1688 | ||
583891c9 | 1689 | static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2) |
6c223761 KB |
1690 | { |
1691 | if (dev1->is_physical_device != dev2->is_physical_device) | |
1692 | return false; | |
1693 | ||
1694 | if (dev1->is_physical_device) | |
1695 | return dev1->wwid == dev2->wwid; | |
1696 | ||
583891c9 | 1697 | return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; |
6c223761 KB |
1698 | } |
1699 | ||
1700 | enum pqi_find_result { | |
1701 | DEVICE_NOT_FOUND, | |
1702 | DEVICE_CHANGED, | |
1703 | DEVICE_SAME, | |
1704 | }; | |
1705 | ||
1706 | static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, | |
4d15ad38 | 1707 | struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) |
6c223761 KB |
1708 | { |
1709 | struct pqi_scsi_dev *device; | |
1710 | ||
4d15ad38 KB |
1711 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { |
1712 | if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { | |
6c223761 KB |
1713 | *matching_device = device; |
1714 | if (pqi_device_equal(device_to_find, device)) { | |
1715 | if (device_to_find->volume_offline) | |
1716 | return DEVICE_CHANGED; | |
1717 | return DEVICE_SAME; | |
1718 | } | |
1719 | return DEVICE_CHANGED; | |
1720 | } | |
1721 | } | |
1722 | ||
1723 | return DEVICE_NOT_FOUND; | |
1724 | } | |
1725 | ||
3d46a59a DB |
1726 | static inline const char *pqi_device_type(struct pqi_scsi_dev *device) |
1727 | { | |
1728 | if (device->is_expander_smp_device) | |
1729 | return "Enclosure SMP "; | |
1730 | ||
1731 | return scsi_device_type(device->devtype); | |
1732 | } | |
1733 | ||
6de783f6 KB |
1734 | #define PQI_DEV_INFO_BUFFER_LENGTH 128 |
1735 | ||
6c223761 KB |
1736 | static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, |
1737 | char *action, struct pqi_scsi_dev *device) | |
1738 | { | |
6de783f6 KB |
1739 | ssize_t count; |
1740 | char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; | |
1741 | ||
1742 | count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, | |
1743 | "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); | |
1744 | ||
1745 | if (device->target_lun_valid) | |
181aea89 | 1746 | count += scnprintf(buffer + count, |
6de783f6 KB |
1747 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
1748 | "%d:%d", | |
1749 | device->target, | |
1750 | device->lun); | |
1751 | else | |
181aea89 | 1752 | count += scnprintf(buffer + count, |
6de783f6 KB |
1753 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
1754 | "-:-"); | |
1755 | ||
1756 | if (pqi_is_logical_device(device)) | |
181aea89 | 1757 | count += scnprintf(buffer + count, |
6de783f6 KB |
1758 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
1759 | " %08x%08x", | |
1760 | *((u32 *)&device->scsi3addr), | |
1761 | *((u32 *)&device->scsi3addr[4])); | |
1762 | else | |
181aea89 | 1763 | count += scnprintf(buffer + count, |
6de783f6 KB |
1764 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
1765 | " %016llx", device->sas_address); | |
1766 | ||
181aea89 | 1767 | count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, |
6de783f6 | 1768 | " %s %.8s %.16s ", |
3d46a59a | 1769 | pqi_device_type(device), |
6c223761 | 1770 | device->vendor, |
6de783f6 KB |
1771 | device->model); |
1772 | ||
1773 | if (pqi_is_logical_device(device)) { | |
1774 | if (device->devtype == TYPE_DISK) | |
181aea89 | 1775 | count += scnprintf(buffer + count, |
6de783f6 KB |
1776 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
1777 | "SSDSmartPathCap%c En%c %-12s", | |
588a63fe KB |
1778 | device->raid_bypass_configured ? '+' : '-', |
1779 | device->raid_bypass_enabled ? '+' : '-', | |
6de783f6 KB |
1780 | pqi_raid_level_to_string(device->raid_level)); |
1781 | } else { | |
181aea89 | 1782 | count += scnprintf(buffer + count, |
6de783f6 KB |
1783 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
1784 | "AIO%c", device->aio_enabled ? '+' : '-'); | |
1785 | if (device->devtype == TYPE_DISK || | |
1786 | device->devtype == TYPE_ZBC) | |
181aea89 | 1787 | count += scnprintf(buffer + count, |
6de783f6 KB |
1788 | PQI_DEV_INFO_BUFFER_LENGTH - count, |
1789 | " qd=%-6d", device->queue_depth); | |
1790 | } | |
1791 | ||
1792 | dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); | |
6c223761 KB |
1793 | } |
1794 | ||
1795 | /* Assumes the SCSI device list lock is held. */ | |
1796 | ||
1797 | static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, | |
1798 | struct pqi_scsi_dev *new_device) | |
1799 | { | |
1800 | existing_device->devtype = new_device->devtype; | |
1801 | existing_device->device_type = new_device->device_type; | |
1802 | existing_device->bus = new_device->bus; | |
1803 | if (new_device->target_lun_valid) { | |
1804 | existing_device->target = new_device->target; | |
1805 | existing_device->lun = new_device->lun; | |
1806 | existing_device->target_lun_valid = true; | |
1807 | } | |
1808 | ||
244ca45e MR |
1809 | if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION || |
1810 | existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) && | |
1811 | new_device->volume_status == CISS_LV_OK) | |
1812 | existing_device->rescan = true; | |
1813 | ||
6c223761 KB |
1814 | /* By definition, the scsi3addr and wwid fields are already the same. */ |
1815 | ||
1816 | existing_device->is_physical_device = new_device->is_physical_device; | |
bd10cf0b KB |
1817 | existing_device->is_external_raid_device = |
1818 | new_device->is_external_raid_device; | |
3d46a59a DB |
1819 | existing_device->is_expander_smp_device = |
1820 | new_device->is_expander_smp_device; | |
6c223761 KB |
1821 | existing_device->aio_enabled = new_device->aio_enabled; |
1822 | memcpy(existing_device->vendor, new_device->vendor, | |
1823 | sizeof(existing_device->vendor)); | |
1824 | memcpy(existing_device->model, new_device->model, | |
1825 | sizeof(existing_device->model)); | |
1826 | existing_device->sas_address = new_device->sas_address; | |
1827 | existing_device->raid_level = new_device->raid_level; | |
1828 | existing_device->queue_depth = new_device->queue_depth; | |
1829 | existing_device->aio_handle = new_device->aio_handle; | |
1830 | existing_device->volume_status = new_device->volume_status; | |
1831 | existing_device->active_path_index = new_device->active_path_index; | |
1832 | existing_device->path_map = new_device->path_map; | |
1833 | existing_device->bay = new_device->bay; | |
2d2ad4bc GW |
1834 | existing_device->box_index = new_device->box_index; |
1835 | existing_device->phys_box_on_bus = new_device->phys_box_on_bus; | |
583891c9 | 1836 | existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; |
6c223761 KB |
1837 | memcpy(existing_device->box, new_device->box, |
1838 | sizeof(existing_device->box)); | |
1839 | memcpy(existing_device->phys_connector, new_device->phys_connector, | |
1840 | sizeof(existing_device->phys_connector)); | |
7a012c23 | 1841 | existing_device->next_bypass_group = 0; |
6c223761 KB |
1842 | kfree(existing_device->raid_map); |
1843 | existing_device->raid_map = new_device->raid_map; | |
588a63fe KB |
1844 | existing_device->raid_bypass_configured = |
1845 | new_device->raid_bypass_configured; | |
1846 | existing_device->raid_bypass_enabled = | |
1847 | new_device->raid_bypass_enabled; | |
a9a68101 | 1848 | existing_device->device_offline = false; |
6c223761 KB |
1849 | |
1850 | /* To prevent this from being freed later. */ | |
1851 | new_device->raid_map = NULL; | |
1852 | } | |
1853 | ||
1854 | static inline void pqi_free_device(struct pqi_scsi_dev *device) | |
1855 | { | |
1856 | if (device) { | |
1857 | kfree(device->raid_map); | |
1858 | kfree(device); | |
1859 | } | |
1860 | } | |
1861 | ||
1862 | /* | |
1863 | * Called when exposing a new device to the OS fails in order to re-adjust | |
1864 | * our internal SCSI device list to match the SCSI ML's view. | |
1865 | */ | |
1866 | ||
1867 | static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, | |
1868 | struct pqi_scsi_dev *device) | |
1869 | { | |
1870 | unsigned long flags; | |
1871 | ||
1872 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
1873 | list_del(&device->scsi_device_list_entry); | |
1874 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
1875 | ||
1876 | /* Allow the device structure to be freed later. */ | |
1877 | device->keep_device = false; | |
1878 | } | |
1879 | ||
3d46a59a DB |
1880 | static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) |
1881 | { | |
1882 | if (device->is_expander_smp_device) | |
1883 | return device->sas_port != NULL; | |
1884 | ||
1885 | return device->sdev != NULL; | |
1886 | } | |
1887 | ||
6c223761 KB |
1888 | static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, |
1889 | struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) | |
1890 | { | |
1891 | int rc; | |
1892 | unsigned int i; | |
1893 | unsigned long flags; | |
1894 | enum pqi_find_result find_result; | |
1895 | struct pqi_scsi_dev *device; | |
1896 | struct pqi_scsi_dev *next; | |
1897 | struct pqi_scsi_dev *matching_device; | |
8a994a04 KB |
1898 | LIST_HEAD(add_list); |
1899 | LIST_HEAD(delete_list); | |
6c223761 KB |
1900 | |
1901 | /* | |
1902 | * The idea here is to do as little work as possible while holding the | |
1903 | * spinlock. That's why we go to great pains to defer anything other | |
1904 | * than updating the internal device list until after we release the | |
1905 | * spinlock. | |
1906 | */ | |
1907 | ||
1908 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
1909 | ||
1910 | /* Assume that all devices in the existing list have gone away. */ | |
4d15ad38 | 1911 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) |
6c223761 KB |
1912 | device->device_gone = true; |
1913 | ||
1914 | for (i = 0; i < num_new_devices; i++) { | |
1915 | device = new_device_list[i]; | |
1916 | ||
1917 | find_result = pqi_scsi_find_entry(ctrl_info, device, | |
694c5d5b | 1918 | &matching_device); |
6c223761 KB |
1919 | |
1920 | switch (find_result) { | |
1921 | case DEVICE_SAME: | |
1922 | /* | |
1923 | * The newly found device is already in the existing | |
1924 | * device list. | |
1925 | */ | |
1926 | device->new_device = false; | |
1927 | matching_device->device_gone = false; | |
1928 | pqi_scsi_update_device(matching_device, device); | |
1929 | break; | |
1930 | case DEVICE_NOT_FOUND: | |
1931 | /* | |
1932 | * The newly found device is NOT in the existing device | |
1933 | * list. | |
1934 | */ | |
1935 | device->new_device = true; | |
1936 | break; | |
1937 | case DEVICE_CHANGED: | |
1938 | /* | |
1939 | * The original device has gone away and we need to add | |
1940 | * the new device. | |
1941 | */ | |
1942 | device->new_device = true; | |
1943 | break; | |
6c223761 KB |
1944 | } |
1945 | } | |
1946 | ||
1947 | /* Process all devices that have gone away. */ | |
1948 | list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, | |
1949 | scsi_device_list_entry) { | |
1950 | if (device->device_gone) { | |
4d15ad38 | 1951 | list_del_init(&device->scsi_device_list_entry); |
6c223761 KB |
1952 | list_add_tail(&device->delete_list_entry, &delete_list); |
1953 | } | |
1954 | } | |
1955 | ||
1956 | /* Process all new devices. */ | |
1957 | for (i = 0; i < num_new_devices; i++) { | |
1958 | device = new_device_list[i]; | |
1959 | if (!device->new_device) | |
1960 | continue; | |
1961 | if (device->volume_offline) | |
1962 | continue; | |
1963 | list_add_tail(&device->scsi_device_list_entry, | |
1964 | &ctrl_info->scsi_device_list); | |
1965 | list_add_tail(&device->add_list_entry, &add_list); | |
1966 | /* To prevent this device structure from being freed later. */ | |
1967 | device->keep_device = true; | |
1968 | } | |
1969 | ||
6c223761 KB |
1970 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
1971 | ||
4fd22c13 MR |
1972 | if (pqi_ctrl_in_ofa(ctrl_info)) |
1973 | pqi_ctrl_ofa_done(ctrl_info); | |
1974 | ||
6c223761 | 1975 | /* Remove all devices that have gone away. */ |
4d15ad38 | 1976 | list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { |
6c223761 KB |
1977 | if (device->volume_offline) { |
1978 | pqi_dev_info(ctrl_info, "offline", device); | |
1979 | pqi_show_volume_status(ctrl_info, device); | |
6c223761 KB |
1980 | } |
1981 | list_del(&device->delete_list_entry); | |
4d15ad38 KB |
1982 | if (pqi_is_device_added(device)) { |
1983 | pqi_remove_device(ctrl_info, device); | |
1984 | } else { | |
1985 | if (!device->volume_offline) | |
1986 | pqi_dev_info(ctrl_info, "removed", device); | |
1987 | pqi_free_device(device); | |
1988 | } | |
6c223761 KB |
1989 | } |
1990 | ||
1991 | /* | |
1992 | * Notify the SCSI ML if the queue depth of any existing device has | |
1993 | * changed. | |
1994 | */ | |
583891c9 KB |
1995 | list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { |
1996 | if (device->sdev && device->queue_depth != device->advertised_queue_depth) { | |
1997 | device->advertised_queue_depth = device->queue_depth; | |
1998 | scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); | |
244ca45e MR |
1999 | if (device->rescan) { |
2000 | scsi_rescan_device(&device->sdev->sdev_gendev); | |
2001 | device->rescan = false; | |
2002 | } | |
6c223761 KB |
2003 | } |
2004 | } | |
2005 | ||
2006 | /* Expose any new devices. */ | |
2007 | list_for_each_entry_safe(device, next, &add_list, add_list_entry) { | |
3d46a59a | 2008 | if (!pqi_is_device_added(device)) { |
6c223761 | 2009 | rc = pqi_add_device(ctrl_info, device); |
ce143793 KB |
2010 | if (rc == 0) { |
2011 | pqi_dev_info(ctrl_info, "added", device); | |
2012 | } else { | |
6c223761 KB |
2013 | dev_warn(&ctrl_info->pci_dev->dev, |
2014 | "scsi %d:%d:%d:%d addition failed, device not added\n", | |
2015 | ctrl_info->scsi_host->host_no, | |
2016 | device->bus, device->target, | |
2017 | device->lun); | |
2018 | pqi_fixup_botched_add(ctrl_info, device); | |
6c223761 KB |
2019 | } |
2020 | } | |
6c223761 KB |
2021 | } |
2022 | } | |
2023 | ||
ce143793 | 2024 | static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) |
6c223761 | 2025 | { |
ce143793 KB |
2026 | /* |
2027 | * Only support the HBA controller itself as a RAID | |
2028 | * controller. If it's a RAID controller other than | |
2029 | * the HBA itself (an external RAID controller, for | |
2030 | * example), we don't support it. | |
2031 | */ | |
2032 | if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && | |
2033 | !pqi_is_hba_lunid(device->scsi3addr)) | |
583891c9 | 2034 | return false; |
6c223761 | 2035 | |
ce143793 | 2036 | return true; |
6c223761 KB |
2037 | } |
2038 | ||
94086f5b | 2039 | static inline bool pqi_skip_device(u8 *scsi3addr) |
6c223761 | 2040 | { |
94086f5b KB |
2041 | /* Ignore all masked devices. */ |
2042 | if (MASKED_DEVICE(scsi3addr)) | |
6c223761 | 2043 | return true; |
6c223761 KB |
2044 | |
2045 | return false; | |
2046 | } | |
2047 | ||
522bc026 DC |
2048 | static inline void pqi_mask_device(u8 *scsi3addr) |
2049 | { | |
2050 | scsi3addr[3] |= 0xc0; | |
2051 | } | |
2052 | ||
3d46a59a DB |
2053 | static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) |
2054 | { | |
ce143793 KB |
2055 | switch (device->device_type) { |
2056 | case SA_DEVICE_TYPE_SAS: | |
2057 | case SA_DEVICE_TYPE_EXPANDER_SMP: | |
2058 | case SA_DEVICE_TYPE_SES: | |
3d46a59a DB |
2059 | return true; |
2060 | } | |
2061 | ||
2062 | return false; | |
2063 | } | |
2064 | ||
cd128244 DC |
2065 | static inline bool pqi_expose_device(struct pqi_scsi_dev *device) |
2066 | { | |
583891c9 | 2067 | return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); |
cd128244 DC |
2068 | } |
2069 | ||
6c223761 KB |
2070 | static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) |
2071 | { | |
2072 | int i; | |
2073 | int rc; | |
8a994a04 | 2074 | LIST_HEAD(new_device_list_head); |
6c223761 KB |
2075 | struct report_phys_lun_extended *physdev_list = NULL; |
2076 | struct report_log_lun_extended *logdev_list = NULL; | |
2077 | struct report_phys_lun_extended_entry *phys_lun_ext_entry; | |
2078 | struct report_log_lun_extended_entry *log_lun_ext_entry; | |
2079 | struct bmic_identify_physical_device *id_phys = NULL; | |
2080 | u32 num_physicals; | |
2081 | u32 num_logicals; | |
2082 | struct pqi_scsi_dev **new_device_list = NULL; | |
2083 | struct pqi_scsi_dev *device; | |
2084 | struct pqi_scsi_dev *next; | |
2085 | unsigned int num_new_devices; | |
2086 | unsigned int num_valid_devices; | |
2087 | bool is_physical_device; | |
2088 | u8 *scsi3addr; | |
5e6a9760 GW |
2089 | unsigned int physical_index; |
2090 | unsigned int logical_index; | |
6c223761 | 2091 | static char *out_of_memory_msg = |
6de783f6 | 2092 | "failed to allocate memory, device discovery stopped"; |
6c223761 | 2093 | |
6c223761 KB |
2094 | rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); |
2095 | if (rc) | |
2096 | goto out; | |
2097 | ||
2098 | if (physdev_list) | |
2099 | num_physicals = | |
2100 | get_unaligned_be32(&physdev_list->header.list_length) | |
2101 | / sizeof(physdev_list->lun_entries[0]); | |
2102 | else | |
2103 | num_physicals = 0; | |
2104 | ||
2105 | if (logdev_list) | |
2106 | num_logicals = | |
2107 | get_unaligned_be32(&logdev_list->header.list_length) | |
2108 | / sizeof(logdev_list->lun_entries[0]); | |
2109 | else | |
2110 | num_logicals = 0; | |
2111 | ||
2112 | if (num_physicals) { | |
2113 | /* | |
2114 | * We need this buffer for calls to pqi_get_physical_disk_info() | |
2115 | * below. We allocate it here instead of inside | |
2116 | * pqi_get_physical_disk_info() because it's a fairly large | |
2117 | * buffer. | |
2118 | */ | |
2119 | id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); | |
2120 | if (!id_phys) { | |
2121 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", | |
2122 | out_of_memory_msg); | |
2123 | rc = -ENOMEM; | |
2124 | goto out; | |
2125 | } | |
522bc026 | 2126 | |
694c5d5b | 2127 | if (pqi_hide_vsep) { |
522bc026 DC |
2128 | for (i = num_physicals - 1; i >= 0; i--) { |
2129 | phys_lun_ext_entry = | |
2130 | &physdev_list->lun_entries[i]; | |
583891c9 KB |
2131 | if (CISS_GET_DRIVE_NUMBER(phys_lun_ext_entry->lunid) == PQI_VSEP_CISS_BTL) { |
2132 | pqi_mask_device(phys_lun_ext_entry->lunid); | |
522bc026 DC |
2133 | break; |
2134 | } | |
2135 | } | |
2136 | } | |
6c223761 KB |
2137 | } |
2138 | ||
f6cc2a77 KB |
2139 | if (num_logicals && |
2140 | (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) | |
2141 | ctrl_info->lv_drive_type_mix_valid = true; | |
2142 | ||
6c223761 KB |
2143 | num_new_devices = num_physicals + num_logicals; |
2144 | ||
6da2ec56 KC |
2145 | new_device_list = kmalloc_array(num_new_devices, |
2146 | sizeof(*new_device_list), | |
2147 | GFP_KERNEL); | |
6c223761 KB |
2148 | if (!new_device_list) { |
2149 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); | |
2150 | rc = -ENOMEM; | |
2151 | goto out; | |
2152 | } | |
2153 | ||
2154 | for (i = 0; i < num_new_devices; i++) { | |
2155 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
2156 | if (!device) { | |
2157 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", | |
2158 | out_of_memory_msg); | |
2159 | rc = -ENOMEM; | |
2160 | goto out; | |
2161 | } | |
2162 | list_add_tail(&device->new_device_list_entry, | |
2163 | &new_device_list_head); | |
2164 | } | |
2165 | ||
2166 | device = NULL; | |
2167 | num_valid_devices = 0; | |
5e6a9760 GW |
2168 | physical_index = 0; |
2169 | logical_index = 0; | |
6c223761 KB |
2170 | |
2171 | for (i = 0; i < num_new_devices; i++) { | |
2172 | ||
5e6a9760 GW |
2173 | if ((!pqi_expose_ld_first && i < num_physicals) || |
2174 | (pqi_expose_ld_first && i >= num_logicals)) { | |
6c223761 | 2175 | is_physical_device = true; |
5e6a9760 GW |
2176 | phys_lun_ext_entry = |
2177 | &physdev_list->lun_entries[physical_index++]; | |
6c223761 KB |
2178 | log_lun_ext_entry = NULL; |
2179 | scsi3addr = phys_lun_ext_entry->lunid; | |
2180 | } else { | |
2181 | is_physical_device = false; | |
2182 | phys_lun_ext_entry = NULL; | |
2183 | log_lun_ext_entry = | |
5e6a9760 | 2184 | &logdev_list->lun_entries[logical_index++]; |
6c223761 KB |
2185 | scsi3addr = log_lun_ext_entry->lunid; |
2186 | } | |
2187 | ||
94086f5b | 2188 | if (is_physical_device && pqi_skip_device(scsi3addr)) |
6c223761 KB |
2189 | continue; |
2190 | ||
2191 | if (device) | |
2192 | device = list_next_entry(device, new_device_list_entry); | |
2193 | else | |
2194 | device = list_first_entry(&new_device_list_head, | |
2195 | struct pqi_scsi_dev, new_device_list_entry); | |
2196 | ||
2197 | memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); | |
2198 | device->is_physical_device = is_physical_device; | |
3d46a59a | 2199 | if (is_physical_device) { |
ce143793 KB |
2200 | device->device_type = phys_lun_ext_entry->device_type; |
2201 | if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) | |
3d46a59a DB |
2202 | device->is_expander_smp_device = true; |
2203 | } else { | |
bd10cf0b KB |
2204 | device->is_external_raid_device = |
2205 | pqi_is_external_raid_addr(scsi3addr); | |
3d46a59a | 2206 | } |
6c223761 | 2207 | |
ce143793 KB |
2208 | if (!pqi_is_supported_device(device)) |
2209 | continue; | |
2210 | ||
6c223761 | 2211 | /* Gather information about the device. */ |
ce143793 | 2212 | rc = pqi_get_device_info(ctrl_info, device, id_phys); |
6c223761 KB |
2213 | if (rc == -ENOMEM) { |
2214 | dev_warn(&ctrl_info->pci_dev->dev, "%s\n", | |
2215 | out_of_memory_msg); | |
2216 | goto out; | |
2217 | } | |
2218 | if (rc) { | |
6de783f6 KB |
2219 | if (device->is_physical_device) |
2220 | dev_warn(&ctrl_info->pci_dev->dev, | |
2221 | "obtaining device info failed, skipping physical device %016llx\n", | |
583891c9 | 2222 | get_unaligned_be64(&phys_lun_ext_entry->wwid)); |
6de783f6 KB |
2223 | else |
2224 | dev_warn(&ctrl_info->pci_dev->dev, | |
2225 | "obtaining device info failed, skipping logical device %08x%08x\n", | |
2226 | *((u32 *)&device->scsi3addr), | |
2227 | *((u32 *)&device->scsi3addr[4])); | |
6c223761 KB |
2228 | rc = 0; |
2229 | continue; | |
2230 | } | |
2231 | ||
6c223761 KB |
2232 | pqi_assign_bus_target_lun(device); |
2233 | ||
6c223761 KB |
2234 | if (device->is_physical_device) { |
2235 | device->wwid = phys_lun_ext_entry->wwid; | |
2236 | if ((phys_lun_ext_entry->device_flags & | |
694c5d5b | 2237 | CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && |
3d46a59a | 2238 | phys_lun_ext_entry->aio_handle) { |
583891c9 KB |
2239 | device->aio_enabled = true; |
2240 | device->aio_handle = | |
2241 | phys_lun_ext_entry->aio_handle; | |
3d46a59a | 2242 | } |
6c223761 KB |
2243 | } else { |
2244 | memcpy(device->volume_id, log_lun_ext_entry->volume_id, | |
2245 | sizeof(device->volume_id)); | |
2246 | } | |
2247 | ||
3d46a59a DB |
2248 | if (pqi_is_device_with_sas_address(device)) |
2249 | device->sas_address = get_unaligned_be64(&device->wwid); | |
6c223761 KB |
2250 | |
2251 | new_device_list[num_valid_devices++] = device; | |
2252 | } | |
2253 | ||
2254 | pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); | |
2255 | ||
2256 | out: | |
2257 | list_for_each_entry_safe(device, next, &new_device_list_head, | |
2258 | new_device_list_entry) { | |
2259 | if (device->keep_device) | |
2260 | continue; | |
2261 | list_del(&device->new_device_list_entry); | |
2262 | pqi_free_device(device); | |
2263 | } | |
2264 | ||
2265 | kfree(new_device_list); | |
2266 | kfree(physdev_list); | |
2267 | kfree(logdev_list); | |
2268 | kfree(id_phys); | |
2269 | ||
2270 | return rc; | |
2271 | } | |
2272 | ||
6c223761 KB |
2273 | static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) |
2274 | { | |
530dd8a7 | 2275 | int rc = 0; |
6c223761 KB |
2276 | |
2277 | if (pqi_ctrl_offline(ctrl_info)) | |
2278 | return -ENXIO; | |
2279 | ||
530dd8a7 | 2280 | if (!mutex_trylock(&ctrl_info->scan_mutex)) { |
5f310425 | 2281 | pqi_schedule_rescan_worker_delayed(ctrl_info); |
9946a398 | 2282 | rc = -EINPROGRESS; |
530dd8a7 MR |
2283 | } else { |
2284 | rc = pqi_update_scsi_devices(ctrl_info); | |
2285 | if (rc) | |
2286 | pqi_schedule_rescan_worker_delayed(ctrl_info); | |
2287 | mutex_unlock(&ctrl_info->scan_mutex); | |
2288 | } | |
6c223761 KB |
2289 | |
2290 | return rc; | |
2291 | } | |
2292 | ||
2293 | static void pqi_scan_start(struct Scsi_Host *shost) | |
2294 | { | |
4fd22c13 MR |
2295 | struct pqi_ctrl_info *ctrl_info; |
2296 | ||
2297 | ctrl_info = shost_to_hba(shost); | |
2298 | if (pqi_ctrl_in_ofa(ctrl_info)) | |
2299 | return; | |
2300 | ||
2301 | pqi_scan_scsi_devices(ctrl_info); | |
6c223761 KB |
2302 | } |
2303 | ||
2304 | /* Returns TRUE if scan is finished. */ | |
2305 | ||
2306 | static int pqi_scan_finished(struct Scsi_Host *shost, | |
2307 | unsigned long elapsed_time) | |
2308 | { | |
2309 | struct pqi_ctrl_info *ctrl_info; | |
2310 | ||
2311 | ctrl_info = shost_priv(shost); | |
2312 | ||
2313 | return !mutex_is_locked(&ctrl_info->scan_mutex); | |
2314 | } | |
2315 | ||
061ef06a KB |
2316 | static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) |
2317 | { | |
2318 | mutex_lock(&ctrl_info->scan_mutex); | |
2319 | mutex_unlock(&ctrl_info->scan_mutex); | |
2320 | } | |
2321 | ||
2322 | static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) | |
2323 | { | |
2324 | mutex_lock(&ctrl_info->lun_reset_mutex); | |
2325 | mutex_unlock(&ctrl_info->lun_reset_mutex); | |
2326 | } | |
2327 | ||
4fd22c13 MR |
2328 | static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) |
2329 | { | |
2330 | mutex_lock(&ctrl_info->ofa_mutex); | |
2331 | mutex_unlock(&ctrl_info->ofa_mutex); | |
2332 | } | |
2333 | ||
583891c9 KB |
2334 | static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info, |
2335 | struct raid_map *raid_map, u64 first_block) | |
6c223761 KB |
2336 | { |
2337 | u32 volume_blk_size; | |
2338 | ||
2339 | /* | |
2340 | * Set the encryption tweak values based on logical block address. | |
2341 | * If the block size is 512, the tweak value is equal to the LBA. | |
2342 | * For other block sizes, tweak value is (LBA * block size) / 512. | |
2343 | */ | |
2344 | volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); | |
2345 | if (volume_blk_size != 512) | |
2346 | first_block = (first_block * volume_blk_size) / 512; | |
2347 | ||
2348 | encryption_info->data_encryption_key_index = | |
2349 | get_unaligned_le16(&raid_map->data_encryption_key_index); | |
2350 | encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); | |
2351 | encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); | |
2352 | } | |
2353 | ||
2354 | /* | |
588a63fe | 2355 | * Attempt to perform RAID bypass mapping for a logical volume I/O. |
6c223761 KB |
2356 | */ |
2357 | ||
6702d2c4 DB |
2358 | static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, |
2359 | struct pqi_scsi_dev_raid_map_data *rmd) | |
281a817f DB |
2360 | { |
2361 | bool is_supported = true; | |
2362 | ||
2363 | switch (rmd->raid_level) { | |
2364 | case SA_RAID_0: | |
2365 | break; | |
2366 | case SA_RAID_1: | |
f6cc2a77 KB |
2367 | if (rmd->is_write && (!ctrl_info->enable_r1_writes || |
2368 | rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) | |
2369 | is_supported = false; | |
2370 | break; | |
7a012c23 | 2371 | case SA_RAID_TRIPLE: |
f6cc2a77 KB |
2372 | if (rmd->is_write && (!ctrl_info->enable_r1_writes || |
2373 | rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) | |
281a817f DB |
2374 | is_supported = false; |
2375 | break; | |
2376 | case SA_RAID_5: | |
f6cc2a77 KB |
2377 | if (rmd->is_write && (!ctrl_info->enable_r5_writes || |
2378 | rmd->data_length > ctrl_info->max_write_raid_5_6)) | |
6702d2c4 DB |
2379 | is_supported = false; |
2380 | break; | |
281a817f | 2381 | case SA_RAID_6: |
f6cc2a77 KB |
2382 | if (rmd->is_write && (!ctrl_info->enable_r6_writes || |
2383 | rmd->data_length > ctrl_info->max_write_raid_5_6)) | |
281a817f DB |
2384 | is_supported = false; |
2385 | break; | |
281a817f DB |
2386 | default: |
2387 | is_supported = false; | |
f6cc2a77 | 2388 | break; |
281a817f DB |
2389 | } |
2390 | ||
2391 | return is_supported; | |
2392 | } | |
2393 | ||
6c223761 KB |
2394 | #define PQI_RAID_BYPASS_INELIGIBLE 1 |
2395 | ||
281a817f | 2396 | static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd, |
583891c9 | 2397 | struct pqi_scsi_dev_raid_map_data *rmd) |
6c223761 | 2398 | { |
6c223761 KB |
2399 | /* Check for valid opcode, get LBA and block count. */ |
2400 | switch (scmd->cmnd[0]) { | |
2401 | case WRITE_6: | |
281a817f | 2402 | rmd->is_write = true; |
df561f66 | 2403 | fallthrough; |
6c223761 | 2404 | case READ_6: |
281a817f | 2405 | rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | |
e018ef57 | 2406 | (scmd->cmnd[2] << 8) | scmd->cmnd[3]); |
281a817f DB |
2407 | rmd->block_cnt = (u32)scmd->cmnd[4]; |
2408 | if (rmd->block_cnt == 0) | |
2409 | rmd->block_cnt = 256; | |
6c223761 KB |
2410 | break; |
2411 | case WRITE_10: | |
281a817f | 2412 | rmd->is_write = true; |
df561f66 | 2413 | fallthrough; |
6c223761 | 2414 | case READ_10: |
281a817f DB |
2415 | rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); |
2416 | rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); | |
6c223761 KB |
2417 | break; |
2418 | case WRITE_12: | |
281a817f | 2419 | rmd->is_write = true; |
df561f66 | 2420 | fallthrough; |
6c223761 | 2421 | case READ_12: |
281a817f DB |
2422 | rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); |
2423 | rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); | |
6c223761 KB |
2424 | break; |
2425 | case WRITE_16: | |
281a817f | 2426 | rmd->is_write = true; |
df561f66 | 2427 | fallthrough; |
6c223761 | 2428 | case READ_16: |
281a817f DB |
2429 | rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); |
2430 | rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); | |
6c223761 KB |
2431 | break; |
2432 | default: | |
2433 | /* Process via normal I/O path. */ | |
2434 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2435 | } | |
2436 | ||
281a817f | 2437 | put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); |
6c223761 | 2438 | |
281a817f DB |
2439 | return 0; |
2440 | } | |
6c223761 | 2441 | |
281a817f | 2442 | static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, |
583891c9 | 2443 | struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) |
281a817f DB |
2444 | { |
2445 | #if BITS_PER_LONG == 32 | |
2446 | u64 tmpdiv; | |
2447 | #endif | |
2448 | ||
2449 | rmd->last_block = rmd->first_block + rmd->block_cnt - 1; | |
6c223761 KB |
2450 | |
2451 | /* Check for invalid block or wraparound. */ | |
281a817f DB |
2452 | if (rmd->last_block >= |
2453 | get_unaligned_le64(&raid_map->volume_blk_cnt) || | |
2454 | rmd->last_block < rmd->first_block) | |
6c223761 KB |
2455 | return PQI_RAID_BYPASS_INELIGIBLE; |
2456 | ||
281a817f | 2457 | rmd->data_disks_per_row = |
583891c9 | 2458 | get_unaligned_le16(&raid_map->data_disks_per_row); |
281a817f DB |
2459 | rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); |
2460 | rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); | |
6c223761 KB |
2461 | |
2462 | /* Calculate stripe information for the request. */ | |
281a817f | 2463 | rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; |
6c223761 | 2464 | #if BITS_PER_LONG == 32 |
281a817f DB |
2465 | tmpdiv = rmd->first_block; |
2466 | do_div(tmpdiv, rmd->blocks_per_row); | |
2467 | rmd->first_row = tmpdiv; | |
2468 | tmpdiv = rmd->last_block; | |
2469 | do_div(tmpdiv, rmd->blocks_per_row); | |
2470 | rmd->last_row = tmpdiv; | |
2471 | rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); | |
2472 | rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); | |
2473 | tmpdiv = rmd->first_row_offset; | |
2474 | do_div(tmpdiv, rmd->strip_size); | |
2475 | rmd->first_column = tmpdiv; | |
2476 | tmpdiv = rmd->last_row_offset; | |
2477 | do_div(tmpdiv, rmd->strip_size); | |
2478 | rmd->last_column = tmpdiv; | |
6c223761 | 2479 | #else |
281a817f DB |
2480 | rmd->first_row = rmd->first_block / rmd->blocks_per_row; |
2481 | rmd->last_row = rmd->last_block / rmd->blocks_per_row; | |
2482 | rmd->first_row_offset = (u32)(rmd->first_block - | |
583891c9 | 2483 | (rmd->first_row * rmd->blocks_per_row)); |
281a817f | 2484 | rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * |
583891c9 | 2485 | rmd->blocks_per_row)); |
281a817f DB |
2486 | rmd->first_column = rmd->first_row_offset / rmd->strip_size; |
2487 | rmd->last_column = rmd->last_row_offset / rmd->strip_size; | |
6c223761 KB |
2488 | #endif |
2489 | ||
2490 | /* If this isn't a single row/column then give to the controller. */ | |
281a817f | 2491 | if (rmd->first_row != rmd->last_row || |
583891c9 | 2492 | rmd->first_column != rmd->last_column) |
6c223761 KB |
2493 | return PQI_RAID_BYPASS_INELIGIBLE; |
2494 | ||
2495 | /* Proceeding with driver mapping. */ | |
281a817f | 2496 | rmd->total_disks_per_row = rmd->data_disks_per_row + |
6c223761 | 2497 | get_unaligned_le16(&raid_map->metadata_disks_per_row); |
281a817f DB |
2498 | rmd->map_row = ((u32)(rmd->first_row >> |
2499 | raid_map->parity_rotation_shift)) % | |
6c223761 | 2500 | get_unaligned_le16(&raid_map->row_cnt); |
281a817f | 2501 | rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + |
583891c9 | 2502 | rmd->first_column; |
6c223761 | 2503 | |
281a817f DB |
2504 | return 0; |
2505 | } | |
2506 | ||
281a817f | 2507 | static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, |
583891c9 | 2508 | struct raid_map *raid_map) |
281a817f DB |
2509 | { |
2510 | #if BITS_PER_LONG == 32 | |
2511 | u64 tmpdiv; | |
2512 | #endif | |
2513 | /* RAID 50/60 */ | |
583891c9 | 2514 | /* Verify first and last block are in same RAID group. */ |
281a817f | 2515 | rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; |
6c223761 | 2516 | #if BITS_PER_LONG == 32 |
281a817f DB |
2517 | tmpdiv = rmd->first_block; |
2518 | rmd->first_group = do_div(tmpdiv, rmd->stripesize); | |
2519 | tmpdiv = rmd->first_group; | |
2520 | do_div(tmpdiv, rmd->blocks_per_row); | |
2521 | rmd->first_group = tmpdiv; | |
2522 | tmpdiv = rmd->last_block; | |
2523 | rmd->last_group = do_div(tmpdiv, rmd->stripesize); | |
2524 | tmpdiv = rmd->last_group; | |
2525 | do_div(tmpdiv, rmd->blocks_per_row); | |
2526 | rmd->last_group = tmpdiv; | |
6c223761 | 2527 | #else |
281a817f DB |
2528 | rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; |
2529 | rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; | |
6c223761 | 2530 | #endif |
281a817f DB |
2531 | if (rmd->first_group != rmd->last_group) |
2532 | return PQI_RAID_BYPASS_INELIGIBLE; | |
6c223761 | 2533 | |
583891c9 | 2534 | /* Verify request is in a single row of RAID 5/6. */ |
6c223761 | 2535 | #if BITS_PER_LONG == 32 |
281a817f DB |
2536 | tmpdiv = rmd->first_block; |
2537 | do_div(tmpdiv, rmd->stripesize); | |
2538 | rmd->first_row = tmpdiv; | |
2539 | rmd->r5or6_first_row = tmpdiv; | |
2540 | tmpdiv = rmd->last_block; | |
2541 | do_div(tmpdiv, rmd->stripesize); | |
2542 | rmd->r5or6_last_row = tmpdiv; | |
6c223761 | 2543 | #else |
281a817f DB |
2544 | rmd->first_row = rmd->r5or6_first_row = |
2545 | rmd->first_block / rmd->stripesize; | |
2546 | rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; | |
6c223761 | 2547 | #endif |
281a817f DB |
2548 | if (rmd->r5or6_first_row != rmd->r5or6_last_row) |
2549 | return PQI_RAID_BYPASS_INELIGIBLE; | |
6c223761 | 2550 | |
583891c9 | 2551 | /* Verify request is in a single column. */ |
6c223761 | 2552 | #if BITS_PER_LONG == 32 |
281a817f DB |
2553 | tmpdiv = rmd->first_block; |
2554 | rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); | |
2555 | tmpdiv = rmd->first_row_offset; | |
2556 | rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); | |
2557 | rmd->r5or6_first_row_offset = rmd->first_row_offset; | |
2558 | tmpdiv = rmd->last_block; | |
2559 | rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); | |
2560 | tmpdiv = rmd->r5or6_last_row_offset; | |
2561 | rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); | |
2562 | tmpdiv = rmd->r5or6_first_row_offset; | |
2563 | do_div(tmpdiv, rmd->strip_size); | |
2564 | rmd->first_column = rmd->r5or6_first_column = tmpdiv; | |
2565 | tmpdiv = rmd->r5or6_last_row_offset; | |
2566 | do_div(tmpdiv, rmd->strip_size); | |
2567 | rmd->r5or6_last_column = tmpdiv; | |
6c223761 | 2568 | #else |
281a817f | 2569 | rmd->first_row_offset = rmd->r5or6_first_row_offset = |
583891c9 KB |
2570 | (u32)((rmd->first_block % rmd->stripesize) % |
2571 | rmd->blocks_per_row); | |
281a817f DB |
2572 | |
2573 | rmd->r5or6_last_row_offset = | |
2574 | (u32)((rmd->last_block % rmd->stripesize) % | |
2575 | rmd->blocks_per_row); | |
2576 | ||
2577 | rmd->first_column = | |
583891c9 | 2578 | rmd->r5or6_first_row_offset / rmd->strip_size; |
281a817f DB |
2579 | rmd->r5or6_first_column = rmd->first_column; |
2580 | rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; | |
2581 | #endif | |
2582 | if (rmd->r5or6_first_column != rmd->r5or6_last_column) | |
2583 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2584 | ||
583891c9 | 2585 | /* Request is eligible. */ |
281a817f DB |
2586 | rmd->map_row = |
2587 | ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % | |
2588 | get_unaligned_le16(&raid_map->row_cnt); | |
6c223761 | 2589 | |
281a817f DB |
2590 | rmd->map_index = (rmd->first_group * |
2591 | (get_unaligned_le16(&raid_map->row_cnt) * | |
2592 | rmd->total_disks_per_row)) + | |
2593 | (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; | |
6c223761 | 2594 | |
6702d2c4 DB |
2595 | if (rmd->is_write) { |
2596 | u32 index; | |
2597 | ||
2598 | /* | |
2599 | * p_parity_it_nexus and q_parity_it_nexus are pointers to the | |
2600 | * parity entries inside the device's raid_map. | |
2601 | * | |
2602 | * A device's RAID map is bounded by: number of RAID disks squared. | |
2603 | * | |
2604 | * The devices RAID map size is checked during device | |
2605 | * initialization. | |
2606 | */ | |
2607 | index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); | |
2608 | index *= rmd->total_disks_per_row; | |
2609 | index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); | |
2610 | ||
2611 | rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; | |
2612 | if (rmd->raid_level == SA_RAID_6) { | |
2613 | rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; | |
2614 | rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; | |
2615 | } | |
2616 | if (rmd->blocks_per_row == 0) | |
2617 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2618 | #if BITS_PER_LONG == 32 | |
2619 | tmpdiv = rmd->first_block; | |
2620 | do_div(tmpdiv, rmd->blocks_per_row); | |
2621 | rmd->row = tmpdiv; | |
2622 | #else | |
2623 | rmd->row = rmd->first_block / rmd->blocks_per_row; | |
2624 | #endif | |
2625 | } | |
2626 | ||
281a817f DB |
2627 | return 0; |
2628 | } | |
2629 | ||
2630 | static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) | |
2631 | { | |
2632 | /* Build the new CDB for the physical disk I/O. */ | |
2633 | if (rmd->disk_block > 0xffffffff) { | |
2634 | rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; | |
2635 | rmd->cdb[1] = 0; | |
2636 | put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); | |
2637 | put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); | |
2638 | rmd->cdb[14] = 0; | |
2639 | rmd->cdb[15] = 0; | |
2640 | rmd->cdb_length = 16; | |
2641 | } else { | |
2642 | rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; | |
2643 | rmd->cdb[1] = 0; | |
2644 | put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); | |
2645 | rmd->cdb[6] = 0; | |
2646 | put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); | |
2647 | rmd->cdb[9] = 0; | |
2648 | rmd->cdb_length = 10; | |
2649 | } | |
2650 | } | |
2651 | ||
7a012c23 | 2652 | static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map, |
583891c9 | 2653 | struct pqi_scsi_dev_raid_map_data *rmd) |
7a012c23 DB |
2654 | { |
2655 | u32 index; | |
2656 | u32 group; | |
2657 | ||
2658 | group = rmd->map_index / rmd->data_disks_per_row; | |
2659 | ||
2660 | index = rmd->map_index - (group * rmd->data_disks_per_row); | |
2661 | rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; | |
2662 | index += rmd->data_disks_per_row; | |
2663 | rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; | |
2664 | if (rmd->layout_map_count > 2) { | |
2665 | index += rmd->data_disks_per_row; | |
2666 | rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; | |
2667 | } | |
2668 | ||
2669 | rmd->num_it_nexus_entries = rmd->layout_map_count; | |
2670 | } | |
2671 | ||
281a817f DB |
2672 | static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, |
2673 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, | |
2674 | struct pqi_queue_group *queue_group) | |
2675 | { | |
281a817f | 2676 | int rc; |
7a012c23 DB |
2677 | struct raid_map *raid_map; |
2678 | u32 group; | |
2679 | u32 next_bypass_group; | |
281a817f DB |
2680 | struct pqi_encryption_info *encryption_info_ptr; |
2681 | struct pqi_encryption_info encryption_info; | |
583891c9 | 2682 | struct pqi_scsi_dev_raid_map_data rmd = { 0 }; |
281a817f DB |
2683 | |
2684 | rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); | |
2685 | if (rc) | |
2686 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2687 | ||
2688 | rmd.raid_level = device->raid_level; | |
2689 | ||
6702d2c4 | 2690 | if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) |
281a817f DB |
2691 | return PQI_RAID_BYPASS_INELIGIBLE; |
2692 | ||
2693 | if (unlikely(rmd.block_cnt == 0)) | |
2694 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2695 | ||
2696 | raid_map = device->raid_map; | |
6c223761 | 2697 | |
281a817f DB |
2698 | rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); |
2699 | if (rc) | |
2700 | return PQI_RAID_BYPASS_INELIGIBLE; | |
6c223761 | 2701 | |
7a012c23 DB |
2702 | if (device->raid_level == SA_RAID_1 || |
2703 | device->raid_level == SA_RAID_TRIPLE) { | |
2704 | if (rmd.is_write) { | |
2705 | pqi_calc_aio_r1_nexus(raid_map, &rmd); | |
2706 | } else { | |
2707 | group = device->next_bypass_group; | |
2708 | next_bypass_group = group + 1; | |
2709 | if (next_bypass_group >= rmd.layout_map_count) | |
2710 | next_bypass_group = 0; | |
2711 | device->next_bypass_group = next_bypass_group; | |
2712 | rmd.map_index += group * rmd.data_disks_per_row; | |
2713 | } | |
281a817f | 2714 | } else if ((device->raid_level == SA_RAID_5 || |
6702d2c4 DB |
2715 | device->raid_level == SA_RAID_6) && |
2716 | (rmd.layout_map_count > 1 || rmd.is_write)) { | |
281a817f DB |
2717 | rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); |
2718 | if (rc) | |
2719 | return PQI_RAID_BYPASS_INELIGIBLE; | |
6c223761 KB |
2720 | } |
2721 | ||
281a817f DB |
2722 | if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) |
2723 | return PQI_RAID_BYPASS_INELIGIBLE; | |
2724 | ||
2725 | rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; | |
2726 | rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + | |
2727 | rmd.first_row * rmd.strip_size + | |
2728 | (rmd.first_row_offset - rmd.first_column * rmd.strip_size); | |
2729 | rmd.disk_block_cnt = rmd.block_cnt; | |
6c223761 KB |
2730 | |
2731 | /* Handle differing logical/physical block sizes. */ | |
2732 | if (raid_map->phys_blk_shift) { | |
281a817f DB |
2733 | rmd.disk_block <<= raid_map->phys_blk_shift; |
2734 | rmd.disk_block_cnt <<= raid_map->phys_blk_shift; | |
6c223761 KB |
2735 | } |
2736 | ||
281a817f | 2737 | if (unlikely(rmd.disk_block_cnt > 0xffff)) |
6c223761 KB |
2738 | return PQI_RAID_BYPASS_INELIGIBLE; |
2739 | ||
281a817f | 2740 | pqi_set_aio_cdb(&rmd); |
6c223761 | 2741 | |
583891c9 | 2742 | if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { |
f6cc2a77 KB |
2743 | if (rmd.data_length > device->max_transfer_encrypted) |
2744 | return PQI_RAID_BYPASS_INELIGIBLE; | |
583891c9 | 2745 | pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block); |
6c223761 KB |
2746 | encryption_info_ptr = &encryption_info; |
2747 | } else { | |
2748 | encryption_info_ptr = NULL; | |
2749 | } | |
2750 | ||
6702d2c4 DB |
2751 | if (rmd.is_write) { |
2752 | switch (device->raid_level) { | |
7a012c23 DB |
2753 | case SA_RAID_1: |
2754 | case SA_RAID_TRIPLE: | |
2755 | return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, | |
2756 | encryption_info_ptr, device, &rmd); | |
6702d2c4 DB |
2757 | case SA_RAID_5: |
2758 | case SA_RAID_6: | |
2759 | return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, | |
583891c9 | 2760 | encryption_info_ptr, device, &rmd); |
6702d2c4 | 2761 | } |
6702d2c4 DB |
2762 | } |
2763 | ||
f6cc2a77 KB |
2764 | return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, |
2765 | rmd.cdb, rmd.cdb_length, queue_group, | |
2766 | encryption_info_ptr, true); | |
6c223761 KB |
2767 | } |
2768 | ||
2769 | #define PQI_STATUS_IDLE 0x0 | |
2770 | ||
2771 | #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 | |
2772 | #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 | |
2773 | ||
2774 | #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 | |
2775 | #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 | |
2776 | #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 | |
2777 | #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 | |
2778 | #define PQI_DEVICE_STATE_ERROR 0x4 | |
2779 | ||
2780 | #define PQI_MODE_READY_TIMEOUT_SECS 30 | |
2781 | #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 | |
2782 | ||
2783 | static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) | |
2784 | { | |
2785 | struct pqi_device_registers __iomem *pqi_registers; | |
2786 | unsigned long timeout; | |
2787 | u64 signature; | |
2788 | u8 status; | |
2789 | ||
2790 | pqi_registers = ctrl_info->pqi_registers; | |
4fd22c13 | 2791 | timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies; |
6c223761 KB |
2792 | |
2793 | while (1) { | |
2794 | signature = readq(&pqi_registers->signature); | |
2795 | if (memcmp(&signature, PQI_DEVICE_SIGNATURE, | |
2796 | sizeof(signature)) == 0) | |
2797 | break; | |
2798 | if (time_after(jiffies, timeout)) { | |
2799 | dev_err(&ctrl_info->pci_dev->dev, | |
2800 | "timed out waiting for PQI signature\n"); | |
2801 | return -ETIMEDOUT; | |
2802 | } | |
2803 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); | |
2804 | } | |
2805 | ||
2806 | while (1) { | |
2807 | status = readb(&pqi_registers->function_and_status_code); | |
2808 | if (status == PQI_STATUS_IDLE) | |
2809 | break; | |
2810 | if (time_after(jiffies, timeout)) { | |
2811 | dev_err(&ctrl_info->pci_dev->dev, | |
2812 | "timed out waiting for PQI IDLE\n"); | |
2813 | return -ETIMEDOUT; | |
2814 | } | |
2815 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); | |
2816 | } | |
2817 | ||
2818 | while (1) { | |
2819 | if (readl(&pqi_registers->device_status) == | |
2820 | PQI_DEVICE_STATE_ALL_REGISTERS_READY) | |
2821 | break; | |
2822 | if (time_after(jiffies, timeout)) { | |
2823 | dev_err(&ctrl_info->pci_dev->dev, | |
2824 | "timed out waiting for PQI all registers ready\n"); | |
2825 | return -ETIMEDOUT; | |
2826 | } | |
2827 | msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); | |
2828 | } | |
2829 | ||
2830 | return 0; | |
2831 | } | |
2832 | ||
2833 | static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) | |
2834 | { | |
2835 | struct pqi_scsi_dev *device; | |
2836 | ||
2837 | device = io_request->scmd->device->hostdata; | |
588a63fe | 2838 | device->raid_bypass_enabled = false; |
376fb880 | 2839 | device->aio_enabled = false; |
6c223761 KB |
2840 | } |
2841 | ||
d87d5474 | 2842 | static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) |
6c223761 KB |
2843 | { |
2844 | struct pqi_ctrl_info *ctrl_info; | |
e58081a7 | 2845 | struct pqi_scsi_dev *device; |
6c223761 | 2846 | |
03b288cf KB |
2847 | device = sdev->hostdata; |
2848 | if (device->device_offline) | |
2849 | return; | |
2850 | ||
2851 | device->device_offline = true; | |
03b288cf KB |
2852 | ctrl_info = shost_to_hba(sdev->host); |
2853 | pqi_schedule_rescan_worker(ctrl_info); | |
a9a68101 | 2854 | dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", |
03b288cf KB |
2855 | path, ctrl_info->scsi_host->host_no, device->bus, |
2856 | device->target, device->lun); | |
6c223761 KB |
2857 | } |
2858 | ||
2859 | static void pqi_process_raid_io_error(struct pqi_io_request *io_request) | |
2860 | { | |
2861 | u8 scsi_status; | |
2862 | u8 host_byte; | |
2863 | struct scsi_cmnd *scmd; | |
2864 | struct pqi_raid_error_info *error_info; | |
2865 | size_t sense_data_length; | |
2866 | int residual_count; | |
2867 | int xfer_count; | |
2868 | struct scsi_sense_hdr sshdr; | |
2869 | ||
2870 | scmd = io_request->scmd; | |
2871 | if (!scmd) | |
2872 | return; | |
2873 | ||
2874 | error_info = io_request->error_info; | |
2875 | scsi_status = error_info->status; | |
2876 | host_byte = DID_OK; | |
2877 | ||
f5b63206 KB |
2878 | switch (error_info->data_out_result) { |
2879 | case PQI_DATA_IN_OUT_GOOD: | |
2880 | break; | |
2881 | case PQI_DATA_IN_OUT_UNDERFLOW: | |
6c223761 KB |
2882 | xfer_count = |
2883 | get_unaligned_le32(&error_info->data_out_transferred); | |
2884 | residual_count = scsi_bufflen(scmd) - xfer_count; | |
2885 | scsi_set_resid(scmd, residual_count); | |
2886 | if (xfer_count < scmd->underflow) | |
2887 | host_byte = DID_SOFT_ERROR; | |
f5b63206 KB |
2888 | break; |
2889 | case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: | |
2890 | case PQI_DATA_IN_OUT_ABORTED: | |
2891 | host_byte = DID_ABORT; | |
2892 | break; | |
2893 | case PQI_DATA_IN_OUT_TIMEOUT: | |
2894 | host_byte = DID_TIME_OUT; | |
2895 | break; | |
2896 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: | |
2897 | case PQI_DATA_IN_OUT_PROTOCOL_ERROR: | |
2898 | case PQI_DATA_IN_OUT_BUFFER_ERROR: | |
2899 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: | |
2900 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: | |
2901 | case PQI_DATA_IN_OUT_ERROR: | |
2902 | case PQI_DATA_IN_OUT_HARDWARE_ERROR: | |
2903 | case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: | |
2904 | case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: | |
2905 | case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: | |
2906 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: | |
2907 | case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: | |
2908 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: | |
2909 | case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: | |
2910 | case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: | |
2911 | case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: | |
2912 | default: | |
2913 | host_byte = DID_ERROR; | |
2914 | break; | |
6c223761 KB |
2915 | } |
2916 | ||
2917 | sense_data_length = get_unaligned_le16(&error_info->sense_data_length); | |
2918 | if (sense_data_length == 0) | |
2919 | sense_data_length = | |
2920 | get_unaligned_le16(&error_info->response_data_length); | |
2921 | if (sense_data_length) { | |
2922 | if (sense_data_length > sizeof(error_info->data)) | |
2923 | sense_data_length = sizeof(error_info->data); | |
2924 | ||
2925 | if (scsi_status == SAM_STAT_CHECK_CONDITION && | |
2926 | scsi_normalize_sense(error_info->data, | |
2927 | sense_data_length, &sshdr) && | |
2928 | sshdr.sense_key == HARDWARE_ERROR && | |
8ef860ae | 2929 | sshdr.asc == 0x3e) { |
441b7195 EV |
2930 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); |
2931 | struct pqi_scsi_dev *device = scmd->device->hostdata; | |
2932 | ||
8ef860ae EV |
2933 | switch (sshdr.ascq) { |
2934 | case 0x1: /* LOGICAL UNIT FAILURE */ | |
2935 | if (printk_ratelimit()) | |
2936 | scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", | |
2937 | ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); | |
2938 | pqi_take_device_offline(scmd->device, "RAID"); | |
2939 | host_byte = DID_NO_CONNECT; | |
2940 | break; | |
2941 | ||
2942 | default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ | |
2943 | if (printk_ratelimit()) | |
2944 | scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", | |
2945 | sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); | |
2946 | break; | |
2947 | } | |
6c223761 KB |
2948 | } |
2949 | ||
2950 | if (sense_data_length > SCSI_SENSE_BUFFERSIZE) | |
2951 | sense_data_length = SCSI_SENSE_BUFFERSIZE; | |
2952 | memcpy(scmd->sense_buffer, error_info->data, | |
2953 | sense_data_length); | |
2954 | } | |
2955 | ||
2956 | scmd->result = scsi_status; | |
2957 | set_host_byte(scmd, host_byte); | |
2958 | } | |
2959 | ||
2960 | static void pqi_process_aio_io_error(struct pqi_io_request *io_request) | |
2961 | { | |
2962 | u8 scsi_status; | |
2963 | u8 host_byte; | |
2964 | struct scsi_cmnd *scmd; | |
2965 | struct pqi_aio_error_info *error_info; | |
2966 | size_t sense_data_length; | |
2967 | int residual_count; | |
2968 | int xfer_count; | |
2969 | bool device_offline; | |
2970 | ||
2971 | scmd = io_request->scmd; | |
2972 | error_info = io_request->error_info; | |
2973 | host_byte = DID_OK; | |
2974 | sense_data_length = 0; | |
2975 | device_offline = false; | |
2976 | ||
2977 | switch (error_info->service_response) { | |
2978 | case PQI_AIO_SERV_RESPONSE_COMPLETE: | |
2979 | scsi_status = error_info->status; | |
2980 | break; | |
2981 | case PQI_AIO_SERV_RESPONSE_FAILURE: | |
2982 | switch (error_info->status) { | |
2983 | case PQI_AIO_STATUS_IO_ABORTED: | |
2984 | scsi_status = SAM_STAT_TASK_ABORTED; | |
2985 | break; | |
2986 | case PQI_AIO_STATUS_UNDERRUN: | |
2987 | scsi_status = SAM_STAT_GOOD; | |
2988 | residual_count = get_unaligned_le32( | |
2989 | &error_info->residual_count); | |
2990 | scsi_set_resid(scmd, residual_count); | |
2991 | xfer_count = scsi_bufflen(scmd) - residual_count; | |
2992 | if (xfer_count < scmd->underflow) | |
2993 | host_byte = DID_SOFT_ERROR; | |
2994 | break; | |
2995 | case PQI_AIO_STATUS_OVERRUN: | |
2996 | scsi_status = SAM_STAT_GOOD; | |
2997 | break; | |
2998 | case PQI_AIO_STATUS_AIO_PATH_DISABLED: | |
2999 | pqi_aio_path_disabled(io_request); | |
3000 | scsi_status = SAM_STAT_GOOD; | |
3001 | io_request->status = -EAGAIN; | |
3002 | break; | |
3003 | case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: | |
3004 | case PQI_AIO_STATUS_INVALID_DEVICE: | |
376fb880 KB |
3005 | if (!io_request->raid_bypass) { |
3006 | device_offline = true; | |
3007 | pqi_take_device_offline(scmd->device, "AIO"); | |
3008 | host_byte = DID_NO_CONNECT; | |
3009 | } | |
6c223761 KB |
3010 | scsi_status = SAM_STAT_CHECK_CONDITION; |
3011 | break; | |
3012 | case PQI_AIO_STATUS_IO_ERROR: | |
3013 | default: | |
3014 | scsi_status = SAM_STAT_CHECK_CONDITION; | |
3015 | break; | |
3016 | } | |
3017 | break; | |
3018 | case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: | |
3019 | case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: | |
3020 | scsi_status = SAM_STAT_GOOD; | |
3021 | break; | |
3022 | case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: | |
3023 | case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: | |
3024 | default: | |
3025 | scsi_status = SAM_STAT_CHECK_CONDITION; | |
3026 | break; | |
3027 | } | |
3028 | ||
3029 | if (error_info->data_present) { | |
3030 | sense_data_length = | |
3031 | get_unaligned_le16(&error_info->data_length); | |
3032 | if (sense_data_length) { | |
3033 | if (sense_data_length > sizeof(error_info->data)) | |
3034 | sense_data_length = sizeof(error_info->data); | |
3035 | if (sense_data_length > SCSI_SENSE_BUFFERSIZE) | |
3036 | sense_data_length = SCSI_SENSE_BUFFERSIZE; | |
3037 | memcpy(scmd->sense_buffer, error_info->data, | |
3038 | sense_data_length); | |
3039 | } | |
3040 | } | |
3041 | ||
3042 | if (device_offline && sense_data_length == 0) | |
3043 | scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, | |
3044 | 0x3e, 0x1); | |
3045 | ||
3046 | scmd->result = scsi_status; | |
3047 | set_host_byte(scmd, host_byte); | |
3048 | } | |
3049 | ||
3050 | static void pqi_process_io_error(unsigned int iu_type, | |
3051 | struct pqi_io_request *io_request) | |
3052 | { | |
3053 | switch (iu_type) { | |
3054 | case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: | |
3055 | pqi_process_raid_io_error(io_request); | |
3056 | break; | |
3057 | case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: | |
3058 | pqi_process_aio_io_error(io_request); | |
3059 | break; | |
3060 | } | |
3061 | } | |
3062 | ||
3063 | static int pqi_interpret_task_management_response( | |
3064 | struct pqi_task_management_response *response) | |
3065 | { | |
3066 | int rc; | |
3067 | ||
3068 | switch (response->response_code) { | |
b17f0486 KB |
3069 | case SOP_TMF_COMPLETE: |
3070 | case SOP_TMF_FUNCTION_SUCCEEDED: | |
6c223761 KB |
3071 | rc = 0; |
3072 | break; | |
3406384b MR |
3073 | case SOP_TMF_REJECTED: |
3074 | rc = -EAGAIN; | |
3075 | break; | |
6c223761 KB |
3076 | default: |
3077 | rc = -EIO; | |
3078 | break; | |
3079 | } | |
3080 | ||
3081 | return rc; | |
3082 | } | |
3083 | ||
9e68cccc | 3084 | static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info) |
6c223761 | 3085 | { |
9e68cccc KB |
3086 | pqi_take_ctrl_offline(ctrl_info); |
3087 | } | |
3088 | ||
3089 | static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) | |
6c223761 | 3090 | { |
9e68cccc | 3091 | int num_responses; |
6c223761 KB |
3092 | pqi_index_t oq_pi; |
3093 | pqi_index_t oq_ci; | |
3094 | struct pqi_io_request *io_request; | |
3095 | struct pqi_io_response *response; | |
3096 | u16 request_id; | |
3097 | ||
3098 | num_responses = 0; | |
3099 | oq_ci = queue_group->oq_ci_copy; | |
3100 | ||
3101 | while (1) { | |
dac12fbc | 3102 | oq_pi = readl(queue_group->oq_pi); |
9e68cccc KB |
3103 | if (oq_pi >= ctrl_info->num_elements_per_oq) { |
3104 | pqi_invalid_response(ctrl_info); | |
3105 | dev_err(&ctrl_info->pci_dev->dev, | |
3106 | "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", | |
3107 | oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); | |
3108 | return -1; | |
3109 | } | |
6c223761 KB |
3110 | if (oq_pi == oq_ci) |
3111 | break; | |
3112 | ||
3113 | num_responses++; | |
3114 | response = queue_group->oq_element_array + | |
3115 | (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); | |
3116 | ||
3117 | request_id = get_unaligned_le16(&response->request_id); | |
9e68cccc KB |
3118 | if (request_id >= ctrl_info->max_io_slots) { |
3119 | pqi_invalid_response(ctrl_info); | |
3120 | dev_err(&ctrl_info->pci_dev->dev, | |
3121 | "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", | |
3122 | request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); | |
3123 | return -1; | |
3124 | } | |
6c223761 KB |
3125 | |
3126 | io_request = &ctrl_info->io_request_pool[request_id]; | |
9e68cccc KB |
3127 | if (atomic_read(&io_request->refcount) == 0) { |
3128 | pqi_invalid_response(ctrl_info); | |
3129 | dev_err(&ctrl_info->pci_dev->dev, | |
3130 | "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", | |
3131 | request_id, oq_pi, oq_ci); | |
3132 | return -1; | |
3133 | } | |
6c223761 KB |
3134 | |
3135 | switch (response->header.iu_type) { | |
3136 | case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: | |
3137 | case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: | |
2ba55c98 KB |
3138 | if (io_request->scmd) |
3139 | io_request->scmd->result = 0; | |
df561f66 | 3140 | fallthrough; |
6c223761 KB |
3141 | case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: |
3142 | break; | |
b212c251 KB |
3143 | case PQI_RESPONSE_IU_VENDOR_GENERAL: |
3144 | io_request->status = | |
3145 | get_unaligned_le16( | |
583891c9 | 3146 | &((struct pqi_vendor_general_response *)response)->status); |
b212c251 | 3147 | break; |
6c223761 KB |
3148 | case PQI_RESPONSE_IU_TASK_MANAGEMENT: |
3149 | io_request->status = | |
3150 | pqi_interpret_task_management_response( | |
3151 | (void *)response); | |
3152 | break; | |
3153 | case PQI_RESPONSE_IU_AIO_PATH_DISABLED: | |
3154 | pqi_aio_path_disabled(io_request); | |
3155 | io_request->status = -EAGAIN; | |
3156 | break; | |
3157 | case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: | |
3158 | case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: | |
3159 | io_request->error_info = ctrl_info->error_buffer + | |
3160 | (get_unaligned_le16(&response->error_index) * | |
3161 | PQI_ERROR_BUFFER_ELEMENT_LENGTH); | |
9e68cccc | 3162 | pqi_process_io_error(response->header.iu_type, io_request); |
6c223761 KB |
3163 | break; |
3164 | default: | |
9e68cccc | 3165 | pqi_invalid_response(ctrl_info); |
6c223761 | 3166 | dev_err(&ctrl_info->pci_dev->dev, |
9e68cccc KB |
3167 | "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", |
3168 | response->header.iu_type, oq_pi, oq_ci); | |
3169 | return -1; | |
6c223761 KB |
3170 | } |
3171 | ||
9e68cccc | 3172 | io_request->io_complete_callback(io_request, io_request->context); |
6c223761 KB |
3173 | |
3174 | /* | |
3175 | * Note that the I/O request structure CANNOT BE TOUCHED after | |
3176 | * returning from the I/O completion callback! | |
3177 | */ | |
6c223761 KB |
3178 | oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; |
3179 | } | |
3180 | ||
3181 | if (num_responses) { | |
3182 | queue_group->oq_ci_copy = oq_ci; | |
3183 | writel(oq_ci, queue_group->oq_ci); | |
3184 | } | |
3185 | ||
3186 | return num_responses; | |
3187 | } | |
3188 | ||
3189 | static inline unsigned int pqi_num_elements_free(unsigned int pi, | |
df7a1fcf | 3190 | unsigned int ci, unsigned int elements_in_queue) |
6c223761 KB |
3191 | { |
3192 | unsigned int num_elements_used; | |
3193 | ||
3194 | if (pi >= ci) | |
3195 | num_elements_used = pi - ci; | |
3196 | else | |
3197 | num_elements_used = elements_in_queue - ci + pi; | |
3198 | ||
3199 | return elements_in_queue - num_elements_used - 1; | |
3200 | } | |
3201 | ||
98f87667 | 3202 | static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, |
6c223761 KB |
3203 | struct pqi_event_acknowledge_request *iu, size_t iu_length) |
3204 | { | |
3205 | pqi_index_t iq_pi; | |
3206 | pqi_index_t iq_ci; | |
3207 | unsigned long flags; | |
3208 | void *next_element; | |
6c223761 KB |
3209 | struct pqi_queue_group *queue_group; |
3210 | ||
3211 | queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; | |
3212 | put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); | |
3213 | ||
6c223761 KB |
3214 | while (1) { |
3215 | spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); | |
3216 | ||
3217 | iq_pi = queue_group->iq_pi_copy[RAID_PATH]; | |
dac12fbc | 3218 | iq_ci = readl(queue_group->iq_ci[RAID_PATH]); |
6c223761 KB |
3219 | |
3220 | if (pqi_num_elements_free(iq_pi, iq_ci, | |
3221 | ctrl_info->num_elements_per_iq)) | |
3222 | break; | |
3223 | ||
3224 | spin_unlock_irqrestore( | |
3225 | &queue_group->submit_lock[RAID_PATH], flags); | |
3226 | ||
98f87667 | 3227 | if (pqi_ctrl_offline(ctrl_info)) |
6c223761 | 3228 | return; |
6c223761 KB |
3229 | } |
3230 | ||
3231 | next_element = queue_group->iq_element_array[RAID_PATH] + | |
3232 | (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
3233 | ||
3234 | memcpy(next_element, iu, iu_length); | |
3235 | ||
3236 | iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; | |
6c223761 KB |
3237 | queue_group->iq_pi_copy[RAID_PATH] = iq_pi; |
3238 | ||
3239 | /* | |
3240 | * This write notifies the controller that an IU is available to be | |
3241 | * processed. | |
3242 | */ | |
3243 | writel(iq_pi, queue_group->iq_pi[RAID_PATH]); | |
3244 | ||
3245 | spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); | |
6c223761 KB |
3246 | } |
3247 | ||
3248 | static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, | |
3249 | struct pqi_event *event) | |
3250 | { | |
3251 | struct pqi_event_acknowledge_request request; | |
3252 | ||
3253 | memset(&request, 0, sizeof(request)); | |
3254 | ||
3255 | request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; | |
3256 | put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, | |
3257 | &request.header.iu_length); | |
3258 | request.event_type = event->event_type; | |
3259 | request.event_id = event->event_id; | |
3260 | request.additional_event_id = event->additional_event_id; | |
3261 | ||
98f87667 | 3262 | pqi_send_event_ack(ctrl_info, &request, sizeof(request)); |
6c223761 KB |
3263 | } |
3264 | ||
4fd22c13 MR |
3265 | #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 |
3266 | #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 | |
3267 | ||
3268 | static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( | |
3269 | struct pqi_ctrl_info *ctrl_info) | |
6c223761 | 3270 | { |
4fd22c13 | 3271 | u8 status; |
583891c9 | 3272 | unsigned long timeout; |
6c223761 | 3273 | |
4fd22c13 | 3274 | timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies; |
6c223761 | 3275 | |
4fd22c13 MR |
3276 | while (1) { |
3277 | status = pqi_read_soft_reset_status(ctrl_info); | |
3278 | if (status & PQI_SOFT_RESET_INITIATE) | |
3279 | return RESET_INITIATE_DRIVER; | |
3280 | ||
3281 | if (status & PQI_SOFT_RESET_ABORT) | |
3282 | return RESET_ABORT; | |
3283 | ||
3284 | if (time_after(jiffies, timeout)) { | |
3285 | dev_err(&ctrl_info->pci_dev->dev, | |
3286 | "timed out waiting for soft reset status\n"); | |
3287 | return RESET_TIMEDOUT; | |
3288 | } | |
3289 | ||
3290 | if (!sis_is_firmware_running(ctrl_info)) | |
3291 | return RESET_NORESPONSE; | |
3292 | ||
3293 | ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); | |
3294 | } | |
3295 | } | |
3296 | ||
3297 | static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, | |
694c5d5b | 3298 | enum pqi_soft_reset_status reset_status) |
4fd22c13 MR |
3299 | { |
3300 | int rc; | |
3301 | ||
3302 | switch (reset_status) { | |
3303 | case RESET_INITIATE_DRIVER: | |
4fd22c13 MR |
3304 | case RESET_TIMEDOUT: |
3305 | dev_info(&ctrl_info->pci_dev->dev, | |
3306 | "resetting controller %u\n", ctrl_info->ctrl_id); | |
3307 | sis_soft_reset(ctrl_info); | |
df561f66 | 3308 | fallthrough; |
4fd22c13 MR |
3309 | case RESET_INITIATE_FIRMWARE: |
3310 | rc = pqi_ofa_ctrl_restart(ctrl_info); | |
3311 | pqi_ofa_free_host_buffer(ctrl_info); | |
3312 | dev_info(&ctrl_info->pci_dev->dev, | |
3313 | "Online Firmware Activation for controller %u: %s\n", | |
3314 | ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED"); | |
3315 | break; | |
3316 | case RESET_ABORT: | |
3317 | pqi_ofa_ctrl_unquiesce(ctrl_info); | |
3318 | dev_info(&ctrl_info->pci_dev->dev, | |
3319 | "Online Firmware Activation for controller %u: %s\n", | |
3320 | ctrl_info->ctrl_id, "ABORTED"); | |
3321 | break; | |
3322 | case RESET_NORESPONSE: | |
3323 | pqi_ofa_free_host_buffer(ctrl_info); | |
3324 | pqi_take_ctrl_offline(ctrl_info); | |
3325 | break; | |
3326 | } | |
3327 | } | |
3328 | ||
3329 | static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, | |
3330 | struct pqi_event *event) | |
3331 | { | |
3332 | u16 event_id; | |
3333 | enum pqi_soft_reset_status status; | |
3334 | ||
3335 | event_id = get_unaligned_le16(&event->event_id); | |
3336 | ||
3337 | mutex_lock(&ctrl_info->ofa_mutex); | |
3338 | ||
3339 | if (event_id == PQI_EVENT_OFA_QUIESCE) { | |
3340 | dev_info(&ctrl_info->pci_dev->dev, | |
694c5d5b KB |
3341 | "Received Online Firmware Activation quiesce event for controller %u\n", |
3342 | ctrl_info->ctrl_id); | |
4fd22c13 MR |
3343 | pqi_ofa_ctrl_quiesce(ctrl_info); |
3344 | pqi_acknowledge_event(ctrl_info, event); | |
3345 | if (ctrl_info->soft_reset_handshake_supported) { | |
3346 | status = pqi_poll_for_soft_reset_status(ctrl_info); | |
3347 | pqi_process_soft_reset(ctrl_info, status); | |
3348 | } else { | |
3349 | pqi_process_soft_reset(ctrl_info, | |
3350 | RESET_INITIATE_FIRMWARE); | |
3351 | } | |
3352 | ||
3353 | } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { | |
3354 | pqi_acknowledge_event(ctrl_info, event); | |
3355 | pqi_ofa_setup_host_buffer(ctrl_info, | |
3356 | le32_to_cpu(event->ofa_bytes_requested)); | |
3357 | pqi_ofa_host_memory_update(ctrl_info); | |
583891c9 | 3358 | } else if (event_id == PQI_EVENT_OFA_CANCELED) { |
4fd22c13 MR |
3359 | pqi_ofa_free_host_buffer(ctrl_info); |
3360 | pqi_acknowledge_event(ctrl_info, event); | |
3361 | dev_info(&ctrl_info->pci_dev->dev, | |
694c5d5b KB |
3362 | "Online Firmware Activation(%u) cancel reason : %u\n", |
3363 | ctrl_info->ctrl_id, event->ofa_cancel_reason); | |
4fd22c13 MR |
3364 | } |
3365 | ||
3366 | mutex_unlock(&ctrl_info->ofa_mutex); | |
3367 | } | |
3368 | ||
6c223761 KB |
3369 | static void pqi_event_worker(struct work_struct *work) |
3370 | { | |
3371 | unsigned int i; | |
3372 | struct pqi_ctrl_info *ctrl_info; | |
6a50d6ad | 3373 | struct pqi_event *event; |
6c223761 KB |
3374 | |
3375 | ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); | |
3376 | ||
7561a7e4 KB |
3377 | pqi_ctrl_busy(ctrl_info); |
3378 | pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); | |
5f310425 KB |
3379 | if (pqi_ctrl_offline(ctrl_info)) |
3380 | goto out; | |
3381 | ||
3382 | pqi_schedule_rescan_worker_delayed(ctrl_info); | |
7561a7e4 | 3383 | |
6a50d6ad | 3384 | event = ctrl_info->events; |
6c223761 | 3385 | for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { |
6a50d6ad KB |
3386 | if (event->pending) { |
3387 | event->pending = false; | |
4fd22c13 MR |
3388 | if (event->event_type == PQI_EVENT_TYPE_OFA) { |
3389 | pqi_ctrl_unbusy(ctrl_info); | |
3390 | pqi_ofa_process_event(ctrl_info, event); | |
3391 | return; | |
3392 | } | |
6a50d6ad | 3393 | pqi_acknowledge_event(ctrl_info, event); |
6c223761 | 3394 | } |
6a50d6ad | 3395 | event++; |
6c223761 KB |
3396 | } |
3397 | ||
5f310425 | 3398 | out: |
7561a7e4 | 3399 | pqi_ctrl_unbusy(ctrl_info); |
6c223761 KB |
3400 | } |
3401 | ||
4fd22c13 | 3402 | #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ) |
6c223761 | 3403 | |
74a0f573 | 3404 | static void pqi_heartbeat_timer_handler(struct timer_list *t) |
6c223761 KB |
3405 | { |
3406 | int num_interrupts; | |
98f87667 | 3407 | u32 heartbeat_count; |
583891c9 | 3408 | struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); |
6c223761 | 3409 | |
98f87667 KB |
3410 | pqi_check_ctrl_health(ctrl_info); |
3411 | if (pqi_ctrl_offline(ctrl_info)) | |
061ef06a KB |
3412 | return; |
3413 | ||
6c223761 | 3414 | num_interrupts = atomic_read(&ctrl_info->num_interrupts); |
98f87667 | 3415 | heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); |
6c223761 KB |
3416 | |
3417 | if (num_interrupts == ctrl_info->previous_num_interrupts) { | |
98f87667 KB |
3418 | if (heartbeat_count == ctrl_info->previous_heartbeat_count) { |
3419 | dev_err(&ctrl_info->pci_dev->dev, | |
3420 | "no heartbeat detected - last heartbeat count: %u\n", | |
3421 | heartbeat_count); | |
6c223761 KB |
3422 | pqi_take_ctrl_offline(ctrl_info); |
3423 | return; | |
3424 | } | |
6c223761 | 3425 | } else { |
98f87667 | 3426 | ctrl_info->previous_num_interrupts = num_interrupts; |
6c223761 KB |
3427 | } |
3428 | ||
98f87667 | 3429 | ctrl_info->previous_heartbeat_count = heartbeat_count; |
6c223761 KB |
3430 | mod_timer(&ctrl_info->heartbeat_timer, |
3431 | jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); | |
3432 | } | |
3433 | ||
3434 | static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) | |
3435 | { | |
98f87667 KB |
3436 | if (!ctrl_info->heartbeat_counter) |
3437 | return; | |
3438 | ||
6c223761 KB |
3439 | ctrl_info->previous_num_interrupts = |
3440 | atomic_read(&ctrl_info->num_interrupts); | |
98f87667 KB |
3441 | ctrl_info->previous_heartbeat_count = |
3442 | pqi_read_heartbeat_counter(ctrl_info); | |
6c223761 | 3443 | |
6c223761 KB |
3444 | ctrl_info->heartbeat_timer.expires = |
3445 | jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; | |
061ef06a | 3446 | add_timer(&ctrl_info->heartbeat_timer); |
6c223761 KB |
3447 | } |
3448 | ||
3449 | static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) | |
3450 | { | |
98f87667 | 3451 | del_timer_sync(&ctrl_info->heartbeat_timer); |
6c223761 KB |
3452 | } |
3453 | ||
6a50d6ad | 3454 | static inline int pqi_event_type_to_event_index(unsigned int event_type) |
6c223761 KB |
3455 | { |
3456 | int index; | |
3457 | ||
6a50d6ad KB |
3458 | for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) |
3459 | if (event_type == pqi_supported_event_types[index]) | |
3460 | return index; | |
6c223761 | 3461 | |
6a50d6ad KB |
3462 | return -1; |
3463 | } | |
3464 | ||
3465 | static inline bool pqi_is_supported_event(unsigned int event_type) | |
3466 | { | |
3467 | return pqi_event_type_to_event_index(event_type) != -1; | |
6c223761 KB |
3468 | } |
3469 | ||
4fd22c13 MR |
3470 | static void pqi_ofa_capture_event_payload(struct pqi_event *event, |
3471 | struct pqi_event_response *response) | |
3472 | { | |
3473 | u16 event_id; | |
3474 | ||
3475 | event_id = get_unaligned_le16(&event->event_id); | |
3476 | ||
3477 | if (event->event_type == PQI_EVENT_TYPE_OFA) { | |
3478 | if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { | |
3479 | event->ofa_bytes_requested = | |
3480 | response->data.ofa_memory_allocation.bytes_requested; | |
583891c9 | 3481 | } else if (event_id == PQI_EVENT_OFA_CANCELED) { |
4fd22c13 MR |
3482 | event->ofa_cancel_reason = |
3483 | response->data.ofa_cancelled.reason; | |
3484 | } | |
3485 | } | |
3486 | } | |
3487 | ||
9e68cccc | 3488 | static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) |
6c223761 | 3489 | { |
9e68cccc | 3490 | int num_events; |
6c223761 KB |
3491 | pqi_index_t oq_pi; |
3492 | pqi_index_t oq_ci; | |
3493 | struct pqi_event_queue *event_queue; | |
3494 | struct pqi_event_response *response; | |
6a50d6ad | 3495 | struct pqi_event *event; |
6c223761 KB |
3496 | int event_index; |
3497 | ||
3498 | event_queue = &ctrl_info->event_queue; | |
3499 | num_events = 0; | |
6c223761 KB |
3500 | oq_ci = event_queue->oq_ci_copy; |
3501 | ||
3502 | while (1) { | |
dac12fbc | 3503 | oq_pi = readl(event_queue->oq_pi); |
9e68cccc KB |
3504 | if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { |
3505 | pqi_invalid_response(ctrl_info); | |
3506 | dev_err(&ctrl_info->pci_dev->dev, | |
3507 | "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", | |
3508 | oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); | |
3509 | return -1; | |
3510 | } | |
3511 | ||
6c223761 KB |
3512 | if (oq_pi == oq_ci) |
3513 | break; | |
3514 | ||
3515 | num_events++; | |
9e68cccc | 3516 | response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); |
6c223761 | 3517 | |
583891c9 | 3518 | event_index = pqi_event_type_to_event_index(response->event_type); |
6c223761 | 3519 | |
9e68cccc KB |
3520 | if (event_index >= 0 && response->request_acknowledge) { |
3521 | event = &ctrl_info->events[event_index]; | |
3522 | event->pending = true; | |
3523 | event->event_type = response->event_type; | |
3524 | event->event_id = response->event_id; | |
3525 | event->additional_event_id = response->additional_event_id; | |
3526 | if (event->event_type == PQI_EVENT_TYPE_OFA) | |
4fd22c13 | 3527 | pqi_ofa_capture_event_payload(event, response); |
6c223761 KB |
3528 | } |
3529 | ||
3530 | oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; | |
3531 | } | |
3532 | ||
3533 | if (num_events) { | |
3534 | event_queue->oq_ci_copy = oq_ci; | |
3535 | writel(oq_ci, event_queue->oq_ci); | |
98f87667 | 3536 | schedule_work(&ctrl_info->event_work); |
6c223761 KB |
3537 | } |
3538 | ||
3539 | return num_events; | |
3540 | } | |
3541 | ||
061ef06a KB |
3542 | #define PQI_LEGACY_INTX_MASK 0x1 |
3543 | ||
583891c9 | 3544 | static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) |
061ef06a KB |
3545 | { |
3546 | u32 intx_mask; | |
3547 | struct pqi_device_registers __iomem *pqi_registers; | |
3548 | volatile void __iomem *register_addr; | |
3549 | ||
3550 | pqi_registers = ctrl_info->pqi_registers; | |
3551 | ||
3552 | if (enable_intx) | |
3553 | register_addr = &pqi_registers->legacy_intx_mask_clear; | |
3554 | else | |
3555 | register_addr = &pqi_registers->legacy_intx_mask_set; | |
3556 | ||
3557 | intx_mask = readl(register_addr); | |
3558 | intx_mask |= PQI_LEGACY_INTX_MASK; | |
3559 | writel(intx_mask, register_addr); | |
3560 | } | |
3561 | ||
3562 | static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, | |
3563 | enum pqi_irq_mode new_mode) | |
3564 | { | |
3565 | switch (ctrl_info->irq_mode) { | |
3566 | case IRQ_MODE_MSIX: | |
3567 | switch (new_mode) { | |
3568 | case IRQ_MODE_MSIX: | |
3569 | break; | |
3570 | case IRQ_MODE_INTX: | |
3571 | pqi_configure_legacy_intx(ctrl_info, true); | |
061ef06a KB |
3572 | sis_enable_intx(ctrl_info); |
3573 | break; | |
3574 | case IRQ_MODE_NONE: | |
061ef06a KB |
3575 | break; |
3576 | } | |
3577 | break; | |
3578 | case IRQ_MODE_INTX: | |
3579 | switch (new_mode) { | |
3580 | case IRQ_MODE_MSIX: | |
3581 | pqi_configure_legacy_intx(ctrl_info, false); | |
061ef06a KB |
3582 | sis_enable_msix(ctrl_info); |
3583 | break; | |
3584 | case IRQ_MODE_INTX: | |
3585 | break; | |
3586 | case IRQ_MODE_NONE: | |
3587 | pqi_configure_legacy_intx(ctrl_info, false); | |
061ef06a KB |
3588 | break; |
3589 | } | |
3590 | break; | |
3591 | case IRQ_MODE_NONE: | |
3592 | switch (new_mode) { | |
3593 | case IRQ_MODE_MSIX: | |
3594 | sis_enable_msix(ctrl_info); | |
3595 | break; | |
3596 | case IRQ_MODE_INTX: | |
3597 | pqi_configure_legacy_intx(ctrl_info, true); | |
3598 | sis_enable_intx(ctrl_info); | |
3599 | break; | |
3600 | case IRQ_MODE_NONE: | |
3601 | break; | |
3602 | } | |
3603 | break; | |
3604 | } | |
3605 | ||
3606 | ctrl_info->irq_mode = new_mode; | |
3607 | } | |
3608 | ||
3609 | #define PQI_LEGACY_INTX_PENDING 0x1 | |
3610 | ||
3611 | static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) | |
3612 | { | |
3613 | bool valid_irq; | |
3614 | u32 intx_status; | |
3615 | ||
3616 | switch (ctrl_info->irq_mode) { | |
3617 | case IRQ_MODE_MSIX: | |
3618 | valid_irq = true; | |
3619 | break; | |
3620 | case IRQ_MODE_INTX: | |
583891c9 | 3621 | intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); |
061ef06a KB |
3622 | if (intx_status & PQI_LEGACY_INTX_PENDING) |
3623 | valid_irq = true; | |
3624 | else | |
3625 | valid_irq = false; | |
3626 | break; | |
3627 | case IRQ_MODE_NONE: | |
3628 | default: | |
3629 | valid_irq = false; | |
3630 | break; | |
3631 | } | |
3632 | ||
3633 | return valid_irq; | |
3634 | } | |
3635 | ||
6c223761 KB |
3636 | static irqreturn_t pqi_irq_handler(int irq, void *data) |
3637 | { | |
3638 | struct pqi_ctrl_info *ctrl_info; | |
3639 | struct pqi_queue_group *queue_group; | |
9e68cccc KB |
3640 | int num_io_responses_handled; |
3641 | int num_events_handled; | |
6c223761 KB |
3642 | |
3643 | queue_group = data; | |
3644 | ctrl_info = queue_group->ctrl_info; | |
3645 | ||
061ef06a | 3646 | if (!pqi_is_valid_irq(ctrl_info)) |
6c223761 KB |
3647 | return IRQ_NONE; |
3648 | ||
9e68cccc KB |
3649 | num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); |
3650 | if (num_io_responses_handled < 0) | |
3651 | goto out; | |
6c223761 | 3652 | |
9e68cccc KB |
3653 | if (irq == ctrl_info->event_irq) { |
3654 | num_events_handled = pqi_process_event_intr(ctrl_info); | |
3655 | if (num_events_handled < 0) | |
3656 | goto out; | |
3657 | } else { | |
3658 | num_events_handled = 0; | |
3659 | } | |
6c223761 | 3660 | |
9e68cccc | 3661 | if (num_io_responses_handled + num_events_handled > 0) |
6c223761 KB |
3662 | atomic_inc(&ctrl_info->num_interrupts); |
3663 | ||
3664 | pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); | |
3665 | pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); | |
3666 | ||
9e68cccc | 3667 | out: |
6c223761 KB |
3668 | return IRQ_HANDLED; |
3669 | } | |
3670 | ||
3671 | static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) | |
3672 | { | |
d91d7820 | 3673 | struct pci_dev *pci_dev = ctrl_info->pci_dev; |
6c223761 KB |
3674 | int i; |
3675 | int rc; | |
3676 | ||
d91d7820 | 3677 | ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); |
6c223761 KB |
3678 | |
3679 | for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { | |
d91d7820 | 3680 | rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, |
52198226 | 3681 | DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); |
6c223761 | 3682 | if (rc) { |
d91d7820 | 3683 | dev_err(&pci_dev->dev, |
6c223761 | 3684 | "irq %u init failed with error %d\n", |
d91d7820 | 3685 | pci_irq_vector(pci_dev, i), rc); |
6c223761 KB |
3686 | return rc; |
3687 | } | |
3688 | ctrl_info->num_msix_vectors_initialized++; | |
3689 | } | |
3690 | ||
3691 | return 0; | |
3692 | } | |
3693 | ||
98bf061b KB |
3694 | static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) |
3695 | { | |
3696 | int i; | |
3697 | ||
3698 | for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) | |
3699 | free_irq(pci_irq_vector(ctrl_info->pci_dev, i), | |
3700 | &ctrl_info->queue_groups[i]); | |
3701 | ||
3702 | ctrl_info->num_msix_vectors_initialized = 0; | |
3703 | } | |
3704 | ||
6c223761 KB |
3705 | static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) |
3706 | { | |
98bf061b | 3707 | int num_vectors_enabled; |
6c223761 | 3708 | |
98bf061b | 3709 | num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, |
52198226 CH |
3710 | PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, |
3711 | PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); | |
98bf061b | 3712 | if (num_vectors_enabled < 0) { |
6c223761 | 3713 | dev_err(&ctrl_info->pci_dev->dev, |
98bf061b KB |
3714 | "MSI-X init failed with error %d\n", |
3715 | num_vectors_enabled); | |
3716 | return num_vectors_enabled; | |
6c223761 KB |
3717 | } |
3718 | ||
98bf061b | 3719 | ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; |
061ef06a | 3720 | ctrl_info->irq_mode = IRQ_MODE_MSIX; |
6c223761 KB |
3721 | return 0; |
3722 | } | |
3723 | ||
98bf061b KB |
3724 | static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) |
3725 | { | |
3726 | if (ctrl_info->num_msix_vectors_enabled) { | |
3727 | pci_free_irq_vectors(ctrl_info->pci_dev); | |
3728 | ctrl_info->num_msix_vectors_enabled = 0; | |
3729 | } | |
3730 | } | |
3731 | ||
6c223761 KB |
3732 | static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) |
3733 | { | |
3734 | unsigned int i; | |
3735 | size_t alloc_length; | |
3736 | size_t element_array_length_per_iq; | |
3737 | size_t element_array_length_per_oq; | |
3738 | void *element_array; | |
dac12fbc | 3739 | void __iomem *next_queue_index; |
6c223761 KB |
3740 | void *aligned_pointer; |
3741 | unsigned int num_inbound_queues; | |
3742 | unsigned int num_outbound_queues; | |
3743 | unsigned int num_queue_indexes; | |
3744 | struct pqi_queue_group *queue_group; | |
3745 | ||
3746 | element_array_length_per_iq = | |
3747 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * | |
3748 | ctrl_info->num_elements_per_iq; | |
3749 | element_array_length_per_oq = | |
3750 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * | |
3751 | ctrl_info->num_elements_per_oq; | |
3752 | num_inbound_queues = ctrl_info->num_queue_groups * 2; | |
3753 | num_outbound_queues = ctrl_info->num_queue_groups; | |
3754 | num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; | |
3755 | ||
3756 | aligned_pointer = NULL; | |
3757 | ||
3758 | for (i = 0; i < num_inbound_queues; i++) { | |
3759 | aligned_pointer = PTR_ALIGN(aligned_pointer, | |
3760 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3761 | aligned_pointer += element_array_length_per_iq; | |
3762 | } | |
3763 | ||
3764 | for (i = 0; i < num_outbound_queues; i++) { | |
3765 | aligned_pointer = PTR_ALIGN(aligned_pointer, | |
3766 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3767 | aligned_pointer += element_array_length_per_oq; | |
3768 | } | |
3769 | ||
3770 | aligned_pointer = PTR_ALIGN(aligned_pointer, | |
3771 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3772 | aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * | |
3773 | PQI_EVENT_OQ_ELEMENT_LENGTH; | |
3774 | ||
3775 | for (i = 0; i < num_queue_indexes; i++) { | |
3776 | aligned_pointer = PTR_ALIGN(aligned_pointer, | |
3777 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | |
3778 | aligned_pointer += sizeof(pqi_index_t); | |
3779 | } | |
3780 | ||
3781 | alloc_length = (size_t)aligned_pointer + | |
3782 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; | |
3783 | ||
e1d213bd KB |
3784 | alloc_length += PQI_EXTRA_SGL_MEMORY; |
3785 | ||
6c223761 | 3786 | ctrl_info->queue_memory_base = |
750afb08 LC |
3787 | dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, |
3788 | &ctrl_info->queue_memory_base_dma_handle, | |
3789 | GFP_KERNEL); | |
6c223761 | 3790 | |
d87d5474 | 3791 | if (!ctrl_info->queue_memory_base) |
6c223761 | 3792 | return -ENOMEM; |
6c223761 KB |
3793 | |
3794 | ctrl_info->queue_memory_length = alloc_length; | |
3795 | ||
3796 | element_array = PTR_ALIGN(ctrl_info->queue_memory_base, | |
3797 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3798 | ||
3799 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
3800 | queue_group = &ctrl_info->queue_groups[i]; | |
3801 | queue_group->iq_element_array[RAID_PATH] = element_array; | |
3802 | queue_group->iq_element_array_bus_addr[RAID_PATH] = | |
3803 | ctrl_info->queue_memory_base_dma_handle + | |
3804 | (element_array - ctrl_info->queue_memory_base); | |
3805 | element_array += element_array_length_per_iq; | |
3806 | element_array = PTR_ALIGN(element_array, | |
3807 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3808 | queue_group->iq_element_array[AIO_PATH] = element_array; | |
3809 | queue_group->iq_element_array_bus_addr[AIO_PATH] = | |
3810 | ctrl_info->queue_memory_base_dma_handle + | |
3811 | (element_array - ctrl_info->queue_memory_base); | |
3812 | element_array += element_array_length_per_iq; | |
3813 | element_array = PTR_ALIGN(element_array, | |
3814 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3815 | } | |
3816 | ||
3817 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
3818 | queue_group = &ctrl_info->queue_groups[i]; | |
3819 | queue_group->oq_element_array = element_array; | |
3820 | queue_group->oq_element_array_bus_addr = | |
3821 | ctrl_info->queue_memory_base_dma_handle + | |
3822 | (element_array - ctrl_info->queue_memory_base); | |
3823 | element_array += element_array_length_per_oq; | |
3824 | element_array = PTR_ALIGN(element_array, | |
3825 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3826 | } | |
3827 | ||
3828 | ctrl_info->event_queue.oq_element_array = element_array; | |
3829 | ctrl_info->event_queue.oq_element_array_bus_addr = | |
3830 | ctrl_info->queue_memory_base_dma_handle + | |
3831 | (element_array - ctrl_info->queue_memory_base); | |
3832 | element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * | |
3833 | PQI_EVENT_OQ_ELEMENT_LENGTH; | |
3834 | ||
dac12fbc | 3835 | next_queue_index = (void __iomem *)PTR_ALIGN(element_array, |
6c223761 KB |
3836 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
3837 | ||
3838 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
3839 | queue_group = &ctrl_info->queue_groups[i]; | |
3840 | queue_group->iq_ci[RAID_PATH] = next_queue_index; | |
3841 | queue_group->iq_ci_bus_addr[RAID_PATH] = | |
3842 | ctrl_info->queue_memory_base_dma_handle + | |
dac12fbc KB |
3843 | (next_queue_index - |
3844 | (void __iomem *)ctrl_info->queue_memory_base); | |
6c223761 KB |
3845 | next_queue_index += sizeof(pqi_index_t); |
3846 | next_queue_index = PTR_ALIGN(next_queue_index, | |
3847 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | |
3848 | queue_group->iq_ci[AIO_PATH] = next_queue_index; | |
3849 | queue_group->iq_ci_bus_addr[AIO_PATH] = | |
3850 | ctrl_info->queue_memory_base_dma_handle + | |
dac12fbc KB |
3851 | (next_queue_index - |
3852 | (void __iomem *)ctrl_info->queue_memory_base); | |
6c223761 KB |
3853 | next_queue_index += sizeof(pqi_index_t); |
3854 | next_queue_index = PTR_ALIGN(next_queue_index, | |
3855 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | |
3856 | queue_group->oq_pi = next_queue_index; | |
3857 | queue_group->oq_pi_bus_addr = | |
3858 | ctrl_info->queue_memory_base_dma_handle + | |
dac12fbc KB |
3859 | (next_queue_index - |
3860 | (void __iomem *)ctrl_info->queue_memory_base); | |
6c223761 KB |
3861 | next_queue_index += sizeof(pqi_index_t); |
3862 | next_queue_index = PTR_ALIGN(next_queue_index, | |
3863 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | |
3864 | } | |
3865 | ||
3866 | ctrl_info->event_queue.oq_pi = next_queue_index; | |
3867 | ctrl_info->event_queue.oq_pi_bus_addr = | |
3868 | ctrl_info->queue_memory_base_dma_handle + | |
dac12fbc KB |
3869 | (next_queue_index - |
3870 | (void __iomem *)ctrl_info->queue_memory_base); | |
6c223761 KB |
3871 | |
3872 | return 0; | |
3873 | } | |
3874 | ||
3875 | static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) | |
3876 | { | |
3877 | unsigned int i; | |
3878 | u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; | |
3879 | u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; | |
3880 | ||
3881 | /* | |
3882 | * Initialize the backpointers to the controller structure in | |
3883 | * each operational queue group structure. | |
3884 | */ | |
3885 | for (i = 0; i < ctrl_info->num_queue_groups; i++) | |
3886 | ctrl_info->queue_groups[i].ctrl_info = ctrl_info; | |
3887 | ||
3888 | /* | |
3889 | * Assign IDs to all operational queues. Note that the IDs | |
3890 | * assigned to operational IQs are independent of the IDs | |
3891 | * assigned to operational OQs. | |
3892 | */ | |
3893 | ctrl_info->event_queue.oq_id = next_oq_id++; | |
3894 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
3895 | ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; | |
3896 | ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; | |
3897 | ctrl_info->queue_groups[i].oq_id = next_oq_id++; | |
3898 | } | |
3899 | ||
3900 | /* | |
3901 | * Assign MSI-X table entry indexes to all queues. Note that the | |
3902 | * interrupt for the event queue is shared with the first queue group. | |
3903 | */ | |
3904 | ctrl_info->event_queue.int_msg_num = 0; | |
3905 | for (i = 0; i < ctrl_info->num_queue_groups; i++) | |
3906 | ctrl_info->queue_groups[i].int_msg_num = i; | |
3907 | ||
3908 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
3909 | spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); | |
3910 | spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); | |
3911 | INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); | |
3912 | INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); | |
3913 | } | |
3914 | } | |
3915 | ||
3916 | static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) | |
3917 | { | |
3918 | size_t alloc_length; | |
3919 | struct pqi_admin_queues_aligned *admin_queues_aligned; | |
3920 | struct pqi_admin_queues *admin_queues; | |
3921 | ||
3922 | alloc_length = sizeof(struct pqi_admin_queues_aligned) + | |
3923 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; | |
3924 | ||
3925 | ctrl_info->admin_queue_memory_base = | |
750afb08 LC |
3926 | dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, |
3927 | &ctrl_info->admin_queue_memory_base_dma_handle, | |
3928 | GFP_KERNEL); | |
6c223761 KB |
3929 | |
3930 | if (!ctrl_info->admin_queue_memory_base) | |
3931 | return -ENOMEM; | |
3932 | ||
3933 | ctrl_info->admin_queue_memory_length = alloc_length; | |
3934 | ||
3935 | admin_queues = &ctrl_info->admin_queues; | |
3936 | admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, | |
3937 | PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); | |
3938 | admin_queues->iq_element_array = | |
3939 | &admin_queues_aligned->iq_element_array; | |
3940 | admin_queues->oq_element_array = | |
3941 | &admin_queues_aligned->oq_element_array; | |
583891c9 KB |
3942 | admin_queues->iq_ci = |
3943 | (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; | |
dac12fbc KB |
3944 | admin_queues->oq_pi = |
3945 | (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; | |
6c223761 KB |
3946 | |
3947 | admin_queues->iq_element_array_bus_addr = | |
3948 | ctrl_info->admin_queue_memory_base_dma_handle + | |
3949 | (admin_queues->iq_element_array - | |
3950 | ctrl_info->admin_queue_memory_base); | |
3951 | admin_queues->oq_element_array_bus_addr = | |
3952 | ctrl_info->admin_queue_memory_base_dma_handle + | |
3953 | (admin_queues->oq_element_array - | |
3954 | ctrl_info->admin_queue_memory_base); | |
3955 | admin_queues->iq_ci_bus_addr = | |
3956 | ctrl_info->admin_queue_memory_base_dma_handle + | |
583891c9 KB |
3957 | ((void __iomem *)admin_queues->iq_ci - |
3958 | (void __iomem *)ctrl_info->admin_queue_memory_base); | |
6c223761 KB |
3959 | admin_queues->oq_pi_bus_addr = |
3960 | ctrl_info->admin_queue_memory_base_dma_handle + | |
dac12fbc KB |
3961 | ((void __iomem *)admin_queues->oq_pi - |
3962 | (void __iomem *)ctrl_info->admin_queue_memory_base); | |
6c223761 KB |
3963 | |
3964 | return 0; | |
3965 | } | |
3966 | ||
4fd22c13 | 3967 | #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ |
6c223761 KB |
3968 | #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 |
3969 | ||
3970 | static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) | |
3971 | { | |
3972 | struct pqi_device_registers __iomem *pqi_registers; | |
3973 | struct pqi_admin_queues *admin_queues; | |
3974 | unsigned long timeout; | |
3975 | u8 status; | |
3976 | u32 reg; | |
3977 | ||
3978 | pqi_registers = ctrl_info->pqi_registers; | |
3979 | admin_queues = &ctrl_info->admin_queues; | |
3980 | ||
3981 | writeq((u64)admin_queues->iq_element_array_bus_addr, | |
3982 | &pqi_registers->admin_iq_element_array_addr); | |
3983 | writeq((u64)admin_queues->oq_element_array_bus_addr, | |
3984 | &pqi_registers->admin_oq_element_array_addr); | |
3985 | writeq((u64)admin_queues->iq_ci_bus_addr, | |
3986 | &pqi_registers->admin_iq_ci_addr); | |
3987 | writeq((u64)admin_queues->oq_pi_bus_addr, | |
3988 | &pqi_registers->admin_oq_pi_addr); | |
3989 | ||
3990 | reg = PQI_ADMIN_IQ_NUM_ELEMENTS | | |
e655d469 | 3991 | (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | |
6c223761 KB |
3992 | (admin_queues->int_msg_num << 16); |
3993 | writel(reg, &pqi_registers->admin_iq_num_elements); | |
583891c9 | 3994 | |
6c223761 KB |
3995 | writel(PQI_CREATE_ADMIN_QUEUE_PAIR, |
3996 | &pqi_registers->function_and_status_code); | |
3997 | ||
3998 | timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; | |
3999 | while (1) { | |
4000 | status = readb(&pqi_registers->function_and_status_code); | |
4001 | if (status == PQI_STATUS_IDLE) | |
4002 | break; | |
4003 | if (time_after(jiffies, timeout)) | |
4004 | return -ETIMEDOUT; | |
4005 | msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); | |
4006 | } | |
4007 | ||
4008 | /* | |
4009 | * The offset registers are not initialized to the correct | |
4010 | * offsets until *after* the create admin queue pair command | |
4011 | * completes successfully. | |
4012 | */ | |
4013 | admin_queues->iq_pi = ctrl_info->iomem_base + | |
4014 | PQI_DEVICE_REGISTERS_OFFSET + | |
4015 | readq(&pqi_registers->admin_iq_pi_offset); | |
4016 | admin_queues->oq_ci = ctrl_info->iomem_base + | |
4017 | PQI_DEVICE_REGISTERS_OFFSET + | |
4018 | readq(&pqi_registers->admin_oq_ci_offset); | |
4019 | ||
4020 | return 0; | |
4021 | } | |
4022 | ||
4023 | static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, | |
4024 | struct pqi_general_admin_request *request) | |
4025 | { | |
4026 | struct pqi_admin_queues *admin_queues; | |
4027 | void *next_element; | |
4028 | pqi_index_t iq_pi; | |
4029 | ||
4030 | admin_queues = &ctrl_info->admin_queues; | |
4031 | iq_pi = admin_queues->iq_pi_copy; | |
4032 | ||
4033 | next_element = admin_queues->iq_element_array + | |
4034 | (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); | |
4035 | ||
4036 | memcpy(next_element, request, sizeof(*request)); | |
4037 | ||
4038 | iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; | |
4039 | admin_queues->iq_pi_copy = iq_pi; | |
4040 | ||
4041 | /* | |
4042 | * This write notifies the controller that an IU is available to be | |
4043 | * processed. | |
4044 | */ | |
4045 | writel(iq_pi, admin_queues->iq_pi); | |
4046 | } | |
4047 | ||
13bede67 KB |
4048 | #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 |
4049 | ||
6c223761 KB |
4050 | static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, |
4051 | struct pqi_general_admin_response *response) | |
4052 | { | |
4053 | struct pqi_admin_queues *admin_queues; | |
4054 | pqi_index_t oq_pi; | |
4055 | pqi_index_t oq_ci; | |
4056 | unsigned long timeout; | |
4057 | ||
4058 | admin_queues = &ctrl_info->admin_queues; | |
4059 | oq_ci = admin_queues->oq_ci_copy; | |
4060 | ||
4fd22c13 | 4061 | timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies; |
6c223761 KB |
4062 | |
4063 | while (1) { | |
dac12fbc | 4064 | oq_pi = readl(admin_queues->oq_pi); |
6c223761 KB |
4065 | if (oq_pi != oq_ci) |
4066 | break; | |
4067 | if (time_after(jiffies, timeout)) { | |
4068 | dev_err(&ctrl_info->pci_dev->dev, | |
4069 | "timed out waiting for admin response\n"); | |
4070 | return -ETIMEDOUT; | |
4071 | } | |
13bede67 KB |
4072 | if (!sis_is_firmware_running(ctrl_info)) |
4073 | return -ENXIO; | |
6c223761 KB |
4074 | usleep_range(1000, 2000); |
4075 | } | |
4076 | ||
4077 | memcpy(response, admin_queues->oq_element_array + | |
4078 | (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); | |
4079 | ||
4080 | oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; | |
4081 | admin_queues->oq_ci_copy = oq_ci; | |
4082 | writel(oq_ci, admin_queues->oq_ci); | |
4083 | ||
4084 | return 0; | |
4085 | } | |
4086 | ||
4087 | static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, | |
4088 | struct pqi_queue_group *queue_group, enum pqi_io_path path, | |
4089 | struct pqi_io_request *io_request) | |
4090 | { | |
4091 | struct pqi_io_request *next; | |
4092 | void *next_element; | |
4093 | pqi_index_t iq_pi; | |
4094 | pqi_index_t iq_ci; | |
4095 | size_t iu_length; | |
4096 | unsigned long flags; | |
4097 | unsigned int num_elements_needed; | |
4098 | unsigned int num_elements_to_end_of_queue; | |
4099 | size_t copy_count; | |
4100 | struct pqi_iu_header *request; | |
4101 | ||
4102 | spin_lock_irqsave(&queue_group->submit_lock[path], flags); | |
4103 | ||
376fb880 KB |
4104 | if (io_request) { |
4105 | io_request->queue_group = queue_group; | |
6c223761 KB |
4106 | list_add_tail(&io_request->request_list_entry, |
4107 | &queue_group->request_list[path]); | |
376fb880 | 4108 | } |
6c223761 KB |
4109 | |
4110 | iq_pi = queue_group->iq_pi_copy[path]; | |
4111 | ||
4112 | list_for_each_entry_safe(io_request, next, | |
4113 | &queue_group->request_list[path], request_list_entry) { | |
4114 | ||
4115 | request = io_request->iu; | |
4116 | ||
4117 | iu_length = get_unaligned_le16(&request->iu_length) + | |
4118 | PQI_REQUEST_HEADER_LENGTH; | |
4119 | num_elements_needed = | |
4120 | DIV_ROUND_UP(iu_length, | |
4121 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
4122 | ||
dac12fbc | 4123 | iq_ci = readl(queue_group->iq_ci[path]); |
6c223761 KB |
4124 | |
4125 | if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, | |
4126 | ctrl_info->num_elements_per_iq)) | |
4127 | break; | |
4128 | ||
4129 | put_unaligned_le16(queue_group->oq_id, | |
4130 | &request->response_queue_id); | |
4131 | ||
4132 | next_element = queue_group->iq_element_array[path] + | |
4133 | (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
4134 | ||
4135 | num_elements_to_end_of_queue = | |
4136 | ctrl_info->num_elements_per_iq - iq_pi; | |
4137 | ||
4138 | if (num_elements_needed <= num_elements_to_end_of_queue) { | |
4139 | memcpy(next_element, request, iu_length); | |
4140 | } else { | |
4141 | copy_count = num_elements_to_end_of_queue * | |
4142 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; | |
4143 | memcpy(next_element, request, copy_count); | |
4144 | memcpy(queue_group->iq_element_array[path], | |
4145 | (u8 *)request + copy_count, | |
4146 | iu_length - copy_count); | |
4147 | } | |
4148 | ||
4149 | iq_pi = (iq_pi + num_elements_needed) % | |
4150 | ctrl_info->num_elements_per_iq; | |
4151 | ||
4152 | list_del(&io_request->request_list_entry); | |
4153 | } | |
4154 | ||
4155 | if (iq_pi != queue_group->iq_pi_copy[path]) { | |
4156 | queue_group->iq_pi_copy[path] = iq_pi; | |
4157 | /* | |
4158 | * This write notifies the controller that one or more IUs are | |
4159 | * available to be processed. | |
4160 | */ | |
4161 | writel(iq_pi, queue_group->iq_pi[path]); | |
4162 | } | |
4163 | ||
4164 | spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); | |
4165 | } | |
4166 | ||
1f37e992 KB |
4167 | #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 |
4168 | ||
4169 | static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, | |
4170 | struct completion *wait) | |
4171 | { | |
4172 | int rc; | |
1f37e992 KB |
4173 | |
4174 | while (1) { | |
4175 | if (wait_for_completion_io_timeout(wait, | |
4fd22c13 | 4176 | PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) { |
1f37e992 KB |
4177 | rc = 0; |
4178 | break; | |
4179 | } | |
4180 | ||
4181 | pqi_check_ctrl_health(ctrl_info); | |
4182 | if (pqi_ctrl_offline(ctrl_info)) { | |
4183 | rc = -ENXIO; | |
4184 | break; | |
4185 | } | |
1f37e992 KB |
4186 | } |
4187 | ||
4188 | return rc; | |
4189 | } | |
4190 | ||
6c223761 KB |
4191 | static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, |
4192 | void *context) | |
4193 | { | |
4194 | struct completion *waiting = context; | |
4195 | ||
4196 | complete(waiting); | |
4197 | } | |
4198 | ||
694c5d5b KB |
4199 | static int pqi_process_raid_io_error_synchronous( |
4200 | struct pqi_raid_error_info *error_info) | |
26b390ab KB |
4201 | { |
4202 | int rc = -EIO; | |
4203 | ||
4204 | switch (error_info->data_out_result) { | |
4205 | case PQI_DATA_IN_OUT_GOOD: | |
4206 | if (error_info->status == SAM_STAT_GOOD) | |
4207 | rc = 0; | |
4208 | break; | |
4209 | case PQI_DATA_IN_OUT_UNDERFLOW: | |
4210 | if (error_info->status == SAM_STAT_GOOD || | |
4211 | error_info->status == SAM_STAT_CHECK_CONDITION) | |
4212 | rc = 0; | |
4213 | break; | |
4214 | case PQI_DATA_IN_OUT_ABORTED: | |
4215 | rc = PQI_CMD_STATUS_ABORTED; | |
4216 | break; | |
4217 | } | |
4218 | ||
4219 | return rc; | |
4220 | } | |
4221 | ||
6c223761 KB |
4222 | static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, |
4223 | struct pqi_iu_header *request, unsigned int flags, | |
4224 | struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) | |
4225 | { | |
957c5ab1 | 4226 | int rc = 0; |
6c223761 KB |
4227 | struct pqi_io_request *io_request; |
4228 | unsigned long start_jiffies; | |
4229 | unsigned long msecs_blocked; | |
4230 | size_t iu_length; | |
957c5ab1 | 4231 | DECLARE_COMPLETION_ONSTACK(wait); |
6c223761 KB |
4232 | |
4233 | /* | |
4234 | * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value | |
4235 | * are mutually exclusive. | |
4236 | */ | |
4237 | ||
4238 | if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { | |
4239 | if (down_interruptible(&ctrl_info->sync_request_sem)) | |
4240 | return -ERESTARTSYS; | |
4241 | } else { | |
4242 | if (timeout_msecs == NO_TIMEOUT) { | |
4243 | down(&ctrl_info->sync_request_sem); | |
4244 | } else { | |
4245 | start_jiffies = jiffies; | |
4246 | if (down_timeout(&ctrl_info->sync_request_sem, | |
4247 | msecs_to_jiffies(timeout_msecs))) | |
4248 | return -ETIMEDOUT; | |
4249 | msecs_blocked = | |
4250 | jiffies_to_msecs(jiffies - start_jiffies); | |
cc8f5260 DC |
4251 | if (msecs_blocked >= timeout_msecs) { |
4252 | rc = -ETIMEDOUT; | |
4253 | goto out; | |
4254 | } | |
6c223761 KB |
4255 | timeout_msecs -= msecs_blocked; |
4256 | } | |
4257 | } | |
4258 | ||
7561a7e4 KB |
4259 | pqi_ctrl_busy(ctrl_info); |
4260 | timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); | |
4261 | if (timeout_msecs == 0) { | |
957c5ab1 | 4262 | pqi_ctrl_unbusy(ctrl_info); |
7561a7e4 KB |
4263 | rc = -ETIMEDOUT; |
4264 | goto out; | |
4265 | } | |
4266 | ||
376fb880 | 4267 | if (pqi_ctrl_offline(ctrl_info)) { |
957c5ab1 | 4268 | pqi_ctrl_unbusy(ctrl_info); |
376fb880 KB |
4269 | rc = -ENXIO; |
4270 | goto out; | |
4271 | } | |
4272 | ||
0530736e KB |
4273 | atomic_inc(&ctrl_info->sync_cmds_outstanding); |
4274 | ||
6c223761 KB |
4275 | io_request = pqi_alloc_io_request(ctrl_info); |
4276 | ||
4277 | put_unaligned_le16(io_request->index, | |
4278 | &(((struct pqi_raid_path_request *)request)->request_id)); | |
4279 | ||
4280 | if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) | |
4281 | ((struct pqi_raid_path_request *)request)->error_index = | |
4282 | ((struct pqi_raid_path_request *)request)->request_id; | |
4283 | ||
4284 | iu_length = get_unaligned_le16(&request->iu_length) + | |
4285 | PQI_REQUEST_HEADER_LENGTH; | |
4286 | memcpy(io_request->iu, request, iu_length); | |
4287 | ||
957c5ab1 KB |
4288 | io_request->io_complete_callback = pqi_raid_synchronous_complete; |
4289 | io_request->context = &wait; | |
4290 | ||
583891c9 | 4291 | pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, |
957c5ab1 KB |
4292 | io_request); |
4293 | ||
4294 | pqi_ctrl_unbusy(ctrl_info); | |
4295 | ||
4296 | if (timeout_msecs == NO_TIMEOUT) { | |
4297 | pqi_wait_for_completion_io(ctrl_info, &wait); | |
4298 | } else { | |
4299 | if (!wait_for_completion_io_timeout(&wait, | |
4300 | msecs_to_jiffies(timeout_msecs))) { | |
4301 | dev_warn(&ctrl_info->pci_dev->dev, | |
4302 | "command timed out\n"); | |
4303 | rc = -ETIMEDOUT; | |
4304 | } | |
4305 | } | |
6c223761 KB |
4306 | |
4307 | if (error_info) { | |
4308 | if (io_request->error_info) | |
583891c9 | 4309 | memcpy(error_info, io_request->error_info, sizeof(*error_info)); |
6c223761 KB |
4310 | else |
4311 | memset(error_info, 0, sizeof(*error_info)); | |
4312 | } else if (rc == 0 && io_request->error_info) { | |
583891c9 | 4313 | rc = pqi_process_raid_io_error_synchronous(io_request->error_info); |
6c223761 KB |
4314 | } |
4315 | ||
4316 | pqi_free_io_request(io_request); | |
4317 | ||
0530736e | 4318 | atomic_dec(&ctrl_info->sync_cmds_outstanding); |
7561a7e4 | 4319 | out: |
6c223761 KB |
4320 | up(&ctrl_info->sync_request_sem); |
4321 | ||
4322 | return rc; | |
4323 | } | |
4324 | ||
4325 | static int pqi_validate_admin_response( | |
4326 | struct pqi_general_admin_response *response, u8 expected_function_code) | |
4327 | { | |
4328 | if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) | |
4329 | return -EINVAL; | |
4330 | ||
4331 | if (get_unaligned_le16(&response->header.iu_length) != | |
4332 | PQI_GENERAL_ADMIN_IU_LENGTH) | |
4333 | return -EINVAL; | |
4334 | ||
4335 | if (response->function_code != expected_function_code) | |
4336 | return -EINVAL; | |
4337 | ||
4338 | if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) | |
4339 | return -EINVAL; | |
4340 | ||
4341 | return 0; | |
4342 | } | |
4343 | ||
4344 | static int pqi_submit_admin_request_synchronous( | |
4345 | struct pqi_ctrl_info *ctrl_info, | |
4346 | struct pqi_general_admin_request *request, | |
4347 | struct pqi_general_admin_response *response) | |
4348 | { | |
4349 | int rc; | |
4350 | ||
4351 | pqi_submit_admin_request(ctrl_info, request); | |
4352 | ||
4353 | rc = pqi_poll_for_admin_response(ctrl_info, response); | |
4354 | ||
4355 | if (rc == 0) | |
4356 | rc = pqi_validate_admin_response(response, | |
4357 | request->function_code); | |
4358 | ||
4359 | return rc; | |
4360 | } | |
4361 | ||
4362 | static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) | |
4363 | { | |
4364 | int rc; | |
4365 | struct pqi_general_admin_request request; | |
4366 | struct pqi_general_admin_response response; | |
4367 | struct pqi_device_capability *capability; | |
4368 | struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; | |
4369 | ||
4370 | capability = kmalloc(sizeof(*capability), GFP_KERNEL); | |
4371 | if (!capability) | |
4372 | return -ENOMEM; | |
4373 | ||
4374 | memset(&request, 0, sizeof(request)); | |
4375 | ||
4376 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
4377 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
4378 | &request.header.iu_length); | |
4379 | request.function_code = | |
4380 | PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; | |
4381 | put_unaligned_le32(sizeof(*capability), | |
4382 | &request.data.report_device_capability.buffer_length); | |
4383 | ||
4384 | rc = pqi_map_single(ctrl_info->pci_dev, | |
4385 | &request.data.report_device_capability.sg_descriptor, | |
4386 | capability, sizeof(*capability), | |
6917a9cc | 4387 | DMA_FROM_DEVICE); |
6c223761 KB |
4388 | if (rc) |
4389 | goto out; | |
4390 | ||
583891c9 | 4391 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); |
6c223761 KB |
4392 | |
4393 | pqi_pci_unmap(ctrl_info->pci_dev, | |
4394 | &request.data.report_device_capability.sg_descriptor, 1, | |
6917a9cc | 4395 | DMA_FROM_DEVICE); |
6c223761 KB |
4396 | |
4397 | if (rc) | |
4398 | goto out; | |
4399 | ||
4400 | if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { | |
4401 | rc = -EIO; | |
4402 | goto out; | |
4403 | } | |
4404 | ||
4405 | ctrl_info->max_inbound_queues = | |
4406 | get_unaligned_le16(&capability->max_inbound_queues); | |
4407 | ctrl_info->max_elements_per_iq = | |
4408 | get_unaligned_le16(&capability->max_elements_per_iq); | |
4409 | ctrl_info->max_iq_element_length = | |
4410 | get_unaligned_le16(&capability->max_iq_element_length) | |
4411 | * 16; | |
4412 | ctrl_info->max_outbound_queues = | |
4413 | get_unaligned_le16(&capability->max_outbound_queues); | |
4414 | ctrl_info->max_elements_per_oq = | |
4415 | get_unaligned_le16(&capability->max_elements_per_oq); | |
4416 | ctrl_info->max_oq_element_length = | |
4417 | get_unaligned_le16(&capability->max_oq_element_length) | |
4418 | * 16; | |
4419 | ||
4420 | sop_iu_layer_descriptor = | |
4421 | &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; | |
4422 | ||
4423 | ctrl_info->max_inbound_iu_length_per_firmware = | |
4424 | get_unaligned_le16( | |
4425 | &sop_iu_layer_descriptor->max_inbound_iu_length); | |
4426 | ctrl_info->inbound_spanning_supported = | |
4427 | sop_iu_layer_descriptor->inbound_spanning_supported; | |
4428 | ctrl_info->outbound_spanning_supported = | |
4429 | sop_iu_layer_descriptor->outbound_spanning_supported; | |
4430 | ||
4431 | out: | |
4432 | kfree(capability); | |
4433 | ||
4434 | return rc; | |
4435 | } | |
4436 | ||
4437 | static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) | |
4438 | { | |
4439 | if (ctrl_info->max_iq_element_length < | |
4440 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { | |
4441 | dev_err(&ctrl_info->pci_dev->dev, | |
4442 | "max. inbound queue element length of %d is less than the required length of %d\n", | |
4443 | ctrl_info->max_iq_element_length, | |
4444 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
4445 | return -EINVAL; | |
4446 | } | |
4447 | ||
4448 | if (ctrl_info->max_oq_element_length < | |
4449 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { | |
4450 | dev_err(&ctrl_info->pci_dev->dev, | |
4451 | "max. outbound queue element length of %d is less than the required length of %d\n", | |
4452 | ctrl_info->max_oq_element_length, | |
4453 | PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); | |
4454 | return -EINVAL; | |
4455 | } | |
4456 | ||
4457 | if (ctrl_info->max_inbound_iu_length_per_firmware < | |
4458 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { | |
4459 | dev_err(&ctrl_info->pci_dev->dev, | |
4460 | "max. inbound IU length of %u is less than the min. required length of %d\n", | |
4461 | ctrl_info->max_inbound_iu_length_per_firmware, | |
4462 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
4463 | return -EINVAL; | |
4464 | } | |
4465 | ||
77668f41 KB |
4466 | if (!ctrl_info->inbound_spanning_supported) { |
4467 | dev_err(&ctrl_info->pci_dev->dev, | |
4468 | "the controller does not support inbound spanning\n"); | |
4469 | return -EINVAL; | |
4470 | } | |
4471 | ||
4472 | if (ctrl_info->outbound_spanning_supported) { | |
4473 | dev_err(&ctrl_info->pci_dev->dev, | |
4474 | "the controller supports outbound spanning but this driver does not\n"); | |
4475 | return -EINVAL; | |
4476 | } | |
4477 | ||
6c223761 KB |
4478 | return 0; |
4479 | } | |
4480 | ||
6c223761 KB |
4481 | static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) |
4482 | { | |
4483 | int rc; | |
4484 | struct pqi_event_queue *event_queue; | |
4485 | struct pqi_general_admin_request request; | |
4486 | struct pqi_general_admin_response response; | |
4487 | ||
4488 | event_queue = &ctrl_info->event_queue; | |
4489 | ||
4490 | /* | |
4491 | * Create OQ (Outbound Queue - device to host queue) to dedicate | |
4492 | * to events. | |
4493 | */ | |
4494 | memset(&request, 0, sizeof(request)); | |
4495 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
4496 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
4497 | &request.header.iu_length); | |
4498 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; | |
4499 | put_unaligned_le16(event_queue->oq_id, | |
4500 | &request.data.create_operational_oq.queue_id); | |
4501 | put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, | |
4502 | &request.data.create_operational_oq.element_array_addr); | |
4503 | put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, | |
4504 | &request.data.create_operational_oq.pi_addr); | |
4505 | put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, | |
4506 | &request.data.create_operational_oq.num_elements); | |
4507 | put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, | |
4508 | &request.data.create_operational_oq.element_length); | |
4509 | request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; | |
4510 | put_unaligned_le16(event_queue->int_msg_num, | |
4511 | &request.data.create_operational_oq.int_msg_num); | |
4512 | ||
4513 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
4514 | &response); | |
4515 | if (rc) | |
4516 | return rc; | |
4517 | ||
4518 | event_queue->oq_ci = ctrl_info->iomem_base + | |
4519 | PQI_DEVICE_REGISTERS_OFFSET + | |
4520 | get_unaligned_le64( | |
4521 | &response.data.create_operational_oq.oq_ci_offset); | |
4522 | ||
4523 | return 0; | |
4524 | } | |
4525 | ||
061ef06a KB |
4526 | static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, |
4527 | unsigned int group_number) | |
6c223761 | 4528 | { |
6c223761 KB |
4529 | int rc; |
4530 | struct pqi_queue_group *queue_group; | |
4531 | struct pqi_general_admin_request request; | |
4532 | struct pqi_general_admin_response response; | |
4533 | ||
061ef06a | 4534 | queue_group = &ctrl_info->queue_groups[group_number]; |
6c223761 KB |
4535 | |
4536 | /* | |
4537 | * Create IQ (Inbound Queue - host to device queue) for | |
4538 | * RAID path. | |
4539 | */ | |
4540 | memset(&request, 0, sizeof(request)); | |
4541 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
4542 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
4543 | &request.header.iu_length); | |
4544 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; | |
4545 | put_unaligned_le16(queue_group->iq_id[RAID_PATH], | |
4546 | &request.data.create_operational_iq.queue_id); | |
4547 | put_unaligned_le64( | |
4548 | (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], | |
4549 | &request.data.create_operational_iq.element_array_addr); | |
4550 | put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], | |
4551 | &request.data.create_operational_iq.ci_addr); | |
4552 | put_unaligned_le16(ctrl_info->num_elements_per_iq, | |
4553 | &request.data.create_operational_iq.num_elements); | |
4554 | put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, | |
4555 | &request.data.create_operational_iq.element_length); | |
4556 | request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; | |
4557 | ||
4558 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
4559 | &response); | |
4560 | if (rc) { | |
4561 | dev_err(&ctrl_info->pci_dev->dev, | |
4562 | "error creating inbound RAID queue\n"); | |
4563 | return rc; | |
4564 | } | |
4565 | ||
4566 | queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + | |
4567 | PQI_DEVICE_REGISTERS_OFFSET + | |
4568 | get_unaligned_le64( | |
4569 | &response.data.create_operational_iq.iq_pi_offset); | |
4570 | ||
4571 | /* | |
4572 | * Create IQ (Inbound Queue - host to device queue) for | |
4573 | * Advanced I/O (AIO) path. | |
4574 | */ | |
4575 | memset(&request, 0, sizeof(request)); | |
4576 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
4577 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
4578 | &request.header.iu_length); | |
4579 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; | |
4580 | put_unaligned_le16(queue_group->iq_id[AIO_PATH], | |
4581 | &request.data.create_operational_iq.queue_id); | |
4582 | put_unaligned_le64((u64)queue_group-> | |
4583 | iq_element_array_bus_addr[AIO_PATH], | |
4584 | &request.data.create_operational_iq.element_array_addr); | |
4585 | put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], | |
4586 | &request.data.create_operational_iq.ci_addr); | |
4587 | put_unaligned_le16(ctrl_info->num_elements_per_iq, | |
4588 | &request.data.create_operational_iq.num_elements); | |
4589 | put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, | |
4590 | &request.data.create_operational_iq.element_length); | |
4591 | request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; | |
4592 | ||
4593 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
4594 | &response); | |
4595 | if (rc) { | |
4596 | dev_err(&ctrl_info->pci_dev->dev, | |
4597 | "error creating inbound AIO queue\n"); | |
339faa81 | 4598 | return rc; |
6c223761 KB |
4599 | } |
4600 | ||
4601 | queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + | |
4602 | PQI_DEVICE_REGISTERS_OFFSET + | |
4603 | get_unaligned_le64( | |
4604 | &response.data.create_operational_iq.iq_pi_offset); | |
4605 | ||
4606 | /* | |
4607 | * Designate the 2nd IQ as the AIO path. By default, all IQs are | |
4608 | * assumed to be for RAID path I/O unless we change the queue's | |
4609 | * property. | |
4610 | */ | |
4611 | memset(&request, 0, sizeof(request)); | |
4612 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
4613 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
4614 | &request.header.iu_length); | |
4615 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; | |
4616 | put_unaligned_le16(queue_group->iq_id[AIO_PATH], | |
4617 | &request.data.change_operational_iq_properties.queue_id); | |
4618 | put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, | |
4619 | &request.data.change_operational_iq_properties.vendor_specific); | |
4620 | ||
4621 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
4622 | &response); | |
4623 | if (rc) { | |
4624 | dev_err(&ctrl_info->pci_dev->dev, | |
4625 | "error changing queue property\n"); | |
339faa81 | 4626 | return rc; |
6c223761 KB |
4627 | } |
4628 | ||
4629 | /* | |
4630 | * Create OQ (Outbound Queue - device to host queue). | |
4631 | */ | |
4632 | memset(&request, 0, sizeof(request)); | |
4633 | request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; | |
4634 | put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, | |
4635 | &request.header.iu_length); | |
4636 | request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; | |
4637 | put_unaligned_le16(queue_group->oq_id, | |
4638 | &request.data.create_operational_oq.queue_id); | |
4639 | put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, | |
4640 | &request.data.create_operational_oq.element_array_addr); | |
4641 | put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, | |
4642 | &request.data.create_operational_oq.pi_addr); | |
4643 | put_unaligned_le16(ctrl_info->num_elements_per_oq, | |
4644 | &request.data.create_operational_oq.num_elements); | |
4645 | put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, | |
4646 | &request.data.create_operational_oq.element_length); | |
4647 | request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; | |
4648 | put_unaligned_le16(queue_group->int_msg_num, | |
4649 | &request.data.create_operational_oq.int_msg_num); | |
4650 | ||
4651 | rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, | |
4652 | &response); | |
4653 | if (rc) { | |
4654 | dev_err(&ctrl_info->pci_dev->dev, | |
4655 | "error creating outbound queue\n"); | |
339faa81 | 4656 | return rc; |
6c223761 KB |
4657 | } |
4658 | ||
4659 | queue_group->oq_ci = ctrl_info->iomem_base + | |
4660 | PQI_DEVICE_REGISTERS_OFFSET + | |
4661 | get_unaligned_le64( | |
4662 | &response.data.create_operational_oq.oq_ci_offset); | |
4663 | ||
6c223761 | 4664 | return 0; |
6c223761 KB |
4665 | } |
4666 | ||
4667 | static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) | |
4668 | { | |
4669 | int rc; | |
4670 | unsigned int i; | |
4671 | ||
4672 | rc = pqi_create_event_queue(ctrl_info); | |
4673 | if (rc) { | |
4674 | dev_err(&ctrl_info->pci_dev->dev, | |
4675 | "error creating event queue\n"); | |
4676 | return rc; | |
4677 | } | |
4678 | ||
4679 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
061ef06a | 4680 | rc = pqi_create_queue_group(ctrl_info, i); |
6c223761 KB |
4681 | if (rc) { |
4682 | dev_err(&ctrl_info->pci_dev->dev, | |
4683 | "error creating queue group number %u/%u\n", | |
4684 | i, ctrl_info->num_queue_groups); | |
4685 | return rc; | |
4686 | } | |
4687 | } | |
4688 | ||
4689 | return 0; | |
4690 | } | |
4691 | ||
4692 | #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ | |
4693 | (offsetof(struct pqi_event_config, descriptors) + \ | |
4694 | (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) | |
4695 | ||
6a50d6ad KB |
4696 | static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, |
4697 | bool enable_events) | |
6c223761 KB |
4698 | { |
4699 | int rc; | |
4700 | unsigned int i; | |
4701 | struct pqi_event_config *event_config; | |
6a50d6ad | 4702 | struct pqi_event_descriptor *event_descriptor; |
6c223761 KB |
4703 | struct pqi_general_management_request request; |
4704 | ||
4705 | event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, | |
4706 | GFP_KERNEL); | |
4707 | if (!event_config) | |
4708 | return -ENOMEM; | |
4709 | ||
4710 | memset(&request, 0, sizeof(request)); | |
4711 | ||
4712 | request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; | |
4713 | put_unaligned_le16(offsetof(struct pqi_general_management_request, | |
4714 | data.report_event_configuration.sg_descriptors[1]) - | |
4715 | PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); | |
4716 | put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, | |
4717 | &request.data.report_event_configuration.buffer_length); | |
4718 | ||
4719 | rc = pqi_map_single(ctrl_info->pci_dev, | |
4720 | request.data.report_event_configuration.sg_descriptors, | |
4721 | event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, | |
6917a9cc | 4722 | DMA_FROM_DEVICE); |
6c223761 KB |
4723 | if (rc) |
4724 | goto out; | |
4725 | ||
4726 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, | |
4727 | 0, NULL, NO_TIMEOUT); | |
4728 | ||
4729 | pqi_pci_unmap(ctrl_info->pci_dev, | |
4730 | request.data.report_event_configuration.sg_descriptors, 1, | |
6917a9cc | 4731 | DMA_FROM_DEVICE); |
6c223761 KB |
4732 | |
4733 | if (rc) | |
4734 | goto out; | |
4735 | ||
6a50d6ad KB |
4736 | for (i = 0; i < event_config->num_event_descriptors; i++) { |
4737 | event_descriptor = &event_config->descriptors[i]; | |
4738 | if (enable_events && | |
4739 | pqi_is_supported_event(event_descriptor->event_type)) | |
583891c9 | 4740 | put_unaligned_le16(ctrl_info->event_queue.oq_id, |
6a50d6ad KB |
4741 | &event_descriptor->oq_id); |
4742 | else | |
4743 | put_unaligned_le16(0, &event_descriptor->oq_id); | |
4744 | } | |
6c223761 KB |
4745 | |
4746 | memset(&request, 0, sizeof(request)); | |
4747 | ||
4748 | request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; | |
4749 | put_unaligned_le16(offsetof(struct pqi_general_management_request, | |
4750 | data.report_event_configuration.sg_descriptors[1]) - | |
4751 | PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); | |
4752 | put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, | |
4753 | &request.data.report_event_configuration.buffer_length); | |
4754 | ||
4755 | rc = pqi_map_single(ctrl_info->pci_dev, | |
4756 | request.data.report_event_configuration.sg_descriptors, | |
4757 | event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, | |
6917a9cc | 4758 | DMA_TO_DEVICE); |
6c223761 KB |
4759 | if (rc) |
4760 | goto out; | |
4761 | ||
4762 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, | |
4763 | NULL, NO_TIMEOUT); | |
4764 | ||
4765 | pqi_pci_unmap(ctrl_info->pci_dev, | |
4766 | request.data.report_event_configuration.sg_descriptors, 1, | |
6917a9cc | 4767 | DMA_TO_DEVICE); |
6c223761 KB |
4768 | |
4769 | out: | |
4770 | kfree(event_config); | |
4771 | ||
4772 | return rc; | |
4773 | } | |
4774 | ||
6a50d6ad KB |
4775 | static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) |
4776 | { | |
4777 | return pqi_configure_events(ctrl_info, true); | |
4778 | } | |
4779 | ||
4780 | static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) | |
4781 | { | |
4782 | return pqi_configure_events(ctrl_info, false); | |
4783 | } | |
4784 | ||
6c223761 KB |
4785 | static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) |
4786 | { | |
4787 | unsigned int i; | |
4788 | struct device *dev; | |
4789 | size_t sg_chain_buffer_length; | |
4790 | struct pqi_io_request *io_request; | |
4791 | ||
4792 | if (!ctrl_info->io_request_pool) | |
4793 | return; | |
4794 | ||
4795 | dev = &ctrl_info->pci_dev->dev; | |
4796 | sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; | |
4797 | io_request = ctrl_info->io_request_pool; | |
4798 | ||
4799 | for (i = 0; i < ctrl_info->max_io_slots; i++) { | |
4800 | kfree(io_request->iu); | |
4801 | if (!io_request->sg_chain_buffer) | |
4802 | break; | |
4803 | dma_free_coherent(dev, sg_chain_buffer_length, | |
4804 | io_request->sg_chain_buffer, | |
4805 | io_request->sg_chain_buffer_dma_handle); | |
4806 | io_request++; | |
4807 | } | |
4808 | ||
4809 | kfree(ctrl_info->io_request_pool); | |
4810 | ctrl_info->io_request_pool = NULL; | |
4811 | } | |
4812 | ||
4813 | static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) | |
4814 | { | |
694c5d5b KB |
4815 | ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, |
4816 | ctrl_info->error_buffer_length, | |
4817 | &ctrl_info->error_buffer_dma_handle, | |
4818 | GFP_KERNEL); | |
6c223761 KB |
4819 | if (!ctrl_info->error_buffer) |
4820 | return -ENOMEM; | |
4821 | ||
4822 | return 0; | |
4823 | } | |
4824 | ||
4825 | static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) | |
4826 | { | |
4827 | unsigned int i; | |
4828 | void *sg_chain_buffer; | |
4829 | size_t sg_chain_buffer_length; | |
4830 | dma_addr_t sg_chain_buffer_dma_handle; | |
4831 | struct device *dev; | |
4832 | struct pqi_io_request *io_request; | |
4833 | ||
583891c9 KB |
4834 | ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, |
4835 | sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); | |
6c223761 KB |
4836 | |
4837 | if (!ctrl_info->io_request_pool) { | |
4838 | dev_err(&ctrl_info->pci_dev->dev, | |
4839 | "failed to allocate I/O request pool\n"); | |
4840 | goto error; | |
4841 | } | |
4842 | ||
4843 | dev = &ctrl_info->pci_dev->dev; | |
4844 | sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; | |
4845 | io_request = ctrl_info->io_request_pool; | |
4846 | ||
4847 | for (i = 0; i < ctrl_info->max_io_slots; i++) { | |
583891c9 | 4848 | io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); |
6c223761 KB |
4849 | |
4850 | if (!io_request->iu) { | |
4851 | dev_err(&ctrl_info->pci_dev->dev, | |
4852 | "failed to allocate IU buffers\n"); | |
4853 | goto error; | |
4854 | } | |
4855 | ||
4856 | sg_chain_buffer = dma_alloc_coherent(dev, | |
4857 | sg_chain_buffer_length, &sg_chain_buffer_dma_handle, | |
4858 | GFP_KERNEL); | |
4859 | ||
4860 | if (!sg_chain_buffer) { | |
4861 | dev_err(&ctrl_info->pci_dev->dev, | |
4862 | "failed to allocate PQI scatter-gather chain buffers\n"); | |
4863 | goto error; | |
4864 | } | |
4865 | ||
4866 | io_request->index = i; | |
4867 | io_request->sg_chain_buffer = sg_chain_buffer; | |
583891c9 | 4868 | io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; |
6c223761 KB |
4869 | io_request++; |
4870 | } | |
4871 | ||
4872 | return 0; | |
4873 | ||
4874 | error: | |
4875 | pqi_free_all_io_requests(ctrl_info); | |
4876 | ||
4877 | return -ENOMEM; | |
4878 | } | |
4879 | ||
4880 | /* | |
4881 | * Calculate required resources that are sized based on max. outstanding | |
4882 | * requests and max. transfer size. | |
4883 | */ | |
4884 | ||
4885 | static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) | |
4886 | { | |
4887 | u32 max_transfer_size; | |
4888 | u32 max_sg_entries; | |
4889 | ||
4890 | ctrl_info->scsi_ml_can_queue = | |
4891 | ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; | |
4892 | ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; | |
4893 | ||
4894 | ctrl_info->error_buffer_length = | |
4895 | ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; | |
4896 | ||
d727a776 KB |
4897 | if (reset_devices) |
4898 | max_transfer_size = min(ctrl_info->max_transfer_size, | |
4899 | PQI_MAX_TRANSFER_SIZE_KDUMP); | |
4900 | else | |
4901 | max_transfer_size = min(ctrl_info->max_transfer_size, | |
4902 | PQI_MAX_TRANSFER_SIZE); | |
6c223761 KB |
4903 | |
4904 | max_sg_entries = max_transfer_size / PAGE_SIZE; | |
4905 | ||
4906 | /* +1 to cover when the buffer is not page-aligned. */ | |
4907 | max_sg_entries++; | |
4908 | ||
4909 | max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); | |
4910 | ||
4911 | max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; | |
4912 | ||
4913 | ctrl_info->sg_chain_buffer_length = | |
e1d213bd KB |
4914 | (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + |
4915 | PQI_EXTRA_SGL_MEMORY; | |
6c223761 KB |
4916 | ctrl_info->sg_tablesize = max_sg_entries; |
4917 | ctrl_info->max_sectors = max_transfer_size / 512; | |
4918 | } | |
4919 | ||
4920 | static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) | |
4921 | { | |
6c223761 KB |
4922 | int num_queue_groups; |
4923 | u16 num_elements_per_iq; | |
4924 | u16 num_elements_per_oq; | |
4925 | ||
d727a776 KB |
4926 | if (reset_devices) { |
4927 | num_queue_groups = 1; | |
4928 | } else { | |
4929 | int num_cpus; | |
4930 | int max_queue_groups; | |
4931 | ||
4932 | max_queue_groups = min(ctrl_info->max_inbound_queues / 2, | |
4933 | ctrl_info->max_outbound_queues - 1); | |
4934 | max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); | |
6c223761 | 4935 | |
d727a776 KB |
4936 | num_cpus = num_online_cpus(); |
4937 | num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); | |
4938 | num_queue_groups = min(num_queue_groups, max_queue_groups); | |
4939 | } | |
6c223761 KB |
4940 | |
4941 | ctrl_info->num_queue_groups = num_queue_groups; | |
061ef06a | 4942 | ctrl_info->max_hw_queue_index = num_queue_groups - 1; |
6c223761 | 4943 | |
77668f41 KB |
4944 | /* |
4945 | * Make sure that the max. inbound IU length is an even multiple | |
4946 | * of our inbound element length. | |
4947 | */ | |
4948 | ctrl_info->max_inbound_iu_length = | |
4949 | (ctrl_info->max_inbound_iu_length_per_firmware / | |
4950 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * | |
4951 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; | |
6c223761 KB |
4952 | |
4953 | num_elements_per_iq = | |
4954 | (ctrl_info->max_inbound_iu_length / | |
4955 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
4956 | ||
4957 | /* Add one because one element in each queue is unusable. */ | |
4958 | num_elements_per_iq++; | |
4959 | ||
4960 | num_elements_per_iq = min(num_elements_per_iq, | |
4961 | ctrl_info->max_elements_per_iq); | |
4962 | ||
4963 | num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; | |
4964 | num_elements_per_oq = min(num_elements_per_oq, | |
4965 | ctrl_info->max_elements_per_oq); | |
4966 | ||
4967 | ctrl_info->num_elements_per_iq = num_elements_per_iq; | |
4968 | ctrl_info->num_elements_per_oq = num_elements_per_oq; | |
4969 | ||
4970 | ctrl_info->max_sg_per_iu = | |
4971 | ((ctrl_info->max_inbound_iu_length - | |
4972 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / | |
4973 | sizeof(struct pqi_sg_descriptor)) + | |
4974 | PQI_MAX_EMBEDDED_SG_DESCRIPTORS; | |
6702d2c4 DB |
4975 | |
4976 | ctrl_info->max_sg_per_r56_iu = | |
4977 | ((ctrl_info->max_inbound_iu_length - | |
4978 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / | |
4979 | sizeof(struct pqi_sg_descriptor)) + | |
4980 | PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; | |
6c223761 KB |
4981 | } |
4982 | ||
583891c9 KB |
4983 | static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor, |
4984 | struct scatterlist *sg) | |
6c223761 KB |
4985 | { |
4986 | u64 address = (u64)sg_dma_address(sg); | |
4987 | unsigned int length = sg_dma_len(sg); | |
4988 | ||
4989 | put_unaligned_le64(address, &sg_descriptor->address); | |
4990 | put_unaligned_le32(length, &sg_descriptor->length); | |
4991 | put_unaligned_le32(0, &sg_descriptor->flags); | |
4992 | } | |
4993 | ||
1a22bc4b DB |
4994 | static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor, |
4995 | struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request, | |
4996 | int max_sg_per_iu, bool *chained) | |
4997 | { | |
4998 | int i; | |
4999 | unsigned int num_sg_in_iu; | |
5000 | ||
5001 | *chained = false; | |
5002 | i = 0; | |
5003 | num_sg_in_iu = 0; | |
5004 | max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ | |
5005 | ||
5006 | while (1) { | |
5007 | pqi_set_sg_descriptor(sg_descriptor, sg); | |
5008 | if (!*chained) | |
5009 | num_sg_in_iu++; | |
5010 | i++; | |
5011 | if (i == sg_count) | |
5012 | break; | |
5013 | sg_descriptor++; | |
5014 | if (i == max_sg_per_iu) { | |
5015 | put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, | |
5016 | &sg_descriptor->address); | |
5017 | put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), | |
5018 | &sg_descriptor->length); | |
5019 | put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); | |
5020 | *chained = true; | |
5021 | num_sg_in_iu++; | |
5022 | sg_descriptor = io_request->sg_chain_buffer; | |
5023 | } | |
5024 | sg = sg_next(sg); | |
5025 | } | |
5026 | ||
5027 | put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); | |
5028 | ||
5029 | return num_sg_in_iu; | |
5030 | } | |
5031 | ||
6c223761 KB |
5032 | static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, |
5033 | struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, | |
5034 | struct pqi_io_request *io_request) | |
5035 | { | |
6c223761 KB |
5036 | u16 iu_length; |
5037 | int sg_count; | |
5038 | bool chained; | |
5039 | unsigned int num_sg_in_iu; | |
6c223761 KB |
5040 | struct scatterlist *sg; |
5041 | struct pqi_sg_descriptor *sg_descriptor; | |
5042 | ||
5043 | sg_count = scsi_dma_map(scmd); | |
5044 | if (sg_count < 0) | |
5045 | return sg_count; | |
5046 | ||
5047 | iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - | |
5048 | PQI_REQUEST_HEADER_LENGTH; | |
5049 | ||
5050 | if (sg_count == 0) | |
5051 | goto out; | |
5052 | ||
5053 | sg = scsi_sglist(scmd); | |
5054 | sg_descriptor = request->sg_descriptors; | |
6c223761 | 5055 | |
1a22bc4b DB |
5056 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, |
5057 | ctrl_info->max_sg_per_iu, &chained); | |
6c223761 | 5058 | |
6c223761 KB |
5059 | request->partial = chained; |
5060 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); | |
5061 | ||
5062 | out: | |
5063 | put_unaligned_le16(iu_length, &request->header.iu_length); | |
5064 | ||
5065 | return 0; | |
5066 | } | |
5067 | ||
7a012c23 DB |
5068 | static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, |
5069 | struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd, | |
5070 | struct pqi_io_request *io_request) | |
5071 | { | |
5072 | u16 iu_length; | |
5073 | int sg_count; | |
5074 | bool chained; | |
5075 | unsigned int num_sg_in_iu; | |
5076 | struct scatterlist *sg; | |
5077 | struct pqi_sg_descriptor *sg_descriptor; | |
5078 | ||
5079 | sg_count = scsi_dma_map(scmd); | |
5080 | if (sg_count < 0) | |
5081 | return sg_count; | |
5082 | ||
5083 | iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - | |
5084 | PQI_REQUEST_HEADER_LENGTH; | |
5085 | num_sg_in_iu = 0; | |
5086 | ||
5087 | if (sg_count == 0) | |
5088 | goto out; | |
5089 | ||
5090 | sg = scsi_sglist(scmd); | |
5091 | sg_descriptor = request->sg_descriptors; | |
5092 | ||
5093 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, | |
5094 | ctrl_info->max_sg_per_iu, &chained); | |
5095 | ||
5096 | request->partial = chained; | |
5097 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); | |
5098 | ||
5099 | out: | |
5100 | put_unaligned_le16(iu_length, &request->header.iu_length); | |
5101 | request->num_sg_descriptors = num_sg_in_iu; | |
5102 | ||
5103 | return 0; | |
5104 | } | |
5105 | ||
6702d2c4 DB |
5106 | static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, |
5107 | struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, | |
5108 | struct pqi_io_request *io_request) | |
5109 | { | |
5110 | u16 iu_length; | |
5111 | int sg_count; | |
5112 | bool chained; | |
5113 | unsigned int num_sg_in_iu; | |
5114 | struct scatterlist *sg; | |
5115 | struct pqi_sg_descriptor *sg_descriptor; | |
5116 | ||
5117 | sg_count = scsi_dma_map(scmd); | |
5118 | if (sg_count < 0) | |
5119 | return sg_count; | |
5120 | ||
5121 | iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - | |
5122 | PQI_REQUEST_HEADER_LENGTH; | |
5123 | num_sg_in_iu = 0; | |
5124 | ||
5125 | if (sg_count != 0) { | |
5126 | sg = scsi_sglist(scmd); | |
5127 | sg_descriptor = request->sg_descriptors; | |
5128 | ||
5129 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, | |
5130 | ctrl_info->max_sg_per_r56_iu, &chained); | |
5131 | ||
5132 | request->partial = chained; | |
5133 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); | |
5134 | } | |
5135 | ||
5136 | put_unaligned_le16(iu_length, &request->header.iu_length); | |
5137 | request->num_sg_descriptors = num_sg_in_iu; | |
5138 | ||
5139 | return 0; | |
5140 | } | |
5141 | ||
6c223761 KB |
5142 | static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, |
5143 | struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, | |
5144 | struct pqi_io_request *io_request) | |
5145 | { | |
6c223761 KB |
5146 | u16 iu_length; |
5147 | int sg_count; | |
a60eec02 KB |
5148 | bool chained; |
5149 | unsigned int num_sg_in_iu; | |
6c223761 KB |
5150 | struct scatterlist *sg; |
5151 | struct pqi_sg_descriptor *sg_descriptor; | |
5152 | ||
5153 | sg_count = scsi_dma_map(scmd); | |
5154 | if (sg_count < 0) | |
5155 | return sg_count; | |
a60eec02 KB |
5156 | |
5157 | iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - | |
5158 | PQI_REQUEST_HEADER_LENGTH; | |
5159 | num_sg_in_iu = 0; | |
5160 | ||
6c223761 KB |
5161 | if (sg_count == 0) |
5162 | goto out; | |
5163 | ||
a60eec02 KB |
5164 | sg = scsi_sglist(scmd); |
5165 | sg_descriptor = request->sg_descriptors; | |
a60eec02 | 5166 | |
1a22bc4b DB |
5167 | num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, |
5168 | ctrl_info->max_sg_per_iu, &chained); | |
6c223761 | 5169 | |
a60eec02 | 5170 | request->partial = chained; |
6c223761 | 5171 | iu_length += num_sg_in_iu * sizeof(*sg_descriptor); |
a60eec02 KB |
5172 | |
5173 | out: | |
6c223761 KB |
5174 | put_unaligned_le16(iu_length, &request->header.iu_length); |
5175 | request->num_sg_descriptors = num_sg_in_iu; | |
5176 | ||
5177 | return 0; | |
5178 | } | |
5179 | ||
5180 | static void pqi_raid_io_complete(struct pqi_io_request *io_request, | |
5181 | void *context) | |
5182 | { | |
5183 | struct scsi_cmnd *scmd; | |
5184 | ||
5185 | scmd = io_request->scmd; | |
5186 | pqi_free_io_request(io_request); | |
5187 | scsi_dma_unmap(scmd); | |
5188 | pqi_scsi_done(scmd); | |
5189 | } | |
5190 | ||
376fb880 KB |
5191 | static int pqi_raid_submit_scsi_cmd_with_io_request( |
5192 | struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, | |
6c223761 KB |
5193 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, |
5194 | struct pqi_queue_group *queue_group) | |
5195 | { | |
5196 | int rc; | |
5197 | size_t cdb_length; | |
6c223761 KB |
5198 | struct pqi_raid_path_request *request; |
5199 | ||
6c223761 KB |
5200 | io_request->io_complete_callback = pqi_raid_io_complete; |
5201 | io_request->scmd = scmd; | |
5202 | ||
6c223761 | 5203 | request = io_request->iu; |
583891c9 | 5204 | memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); |
6c223761 KB |
5205 | |
5206 | request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; | |
5207 | put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); | |
5208 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; | |
5209 | put_unaligned_le16(io_request->index, &request->request_id); | |
5210 | request->error_index = request->request_id; | |
583891c9 | 5211 | memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); |
6c223761 KB |
5212 | |
5213 | cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); | |
5214 | memcpy(request->cdb, scmd->cmnd, cdb_length); | |
5215 | ||
5216 | switch (cdb_length) { | |
5217 | case 6: | |
5218 | case 10: | |
5219 | case 12: | |
5220 | case 16: | |
583891c9 | 5221 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; |
6c223761 KB |
5222 | break; |
5223 | case 20: | |
583891c9 | 5224 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; |
6c223761 KB |
5225 | break; |
5226 | case 24: | |
583891c9 | 5227 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; |
6c223761 KB |
5228 | break; |
5229 | case 28: | |
583891c9 | 5230 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; |
6c223761 KB |
5231 | break; |
5232 | case 32: | |
5233 | default: | |
583891c9 | 5234 | request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; |
6c223761 KB |
5235 | break; |
5236 | } | |
5237 | ||
5238 | switch (scmd->sc_data_direction) { | |
5239 | case DMA_TO_DEVICE: | |
5240 | request->data_direction = SOP_READ_FLAG; | |
5241 | break; | |
5242 | case DMA_FROM_DEVICE: | |
5243 | request->data_direction = SOP_WRITE_FLAG; | |
5244 | break; | |
5245 | case DMA_NONE: | |
5246 | request->data_direction = SOP_NO_DIRECTION_FLAG; | |
5247 | break; | |
5248 | case DMA_BIDIRECTIONAL: | |
5249 | request->data_direction = SOP_BIDIRECTIONAL; | |
5250 | break; | |
5251 | default: | |
5252 | dev_err(&ctrl_info->pci_dev->dev, | |
5253 | "unknown data direction: %d\n", | |
5254 | scmd->sc_data_direction); | |
6c223761 KB |
5255 | break; |
5256 | } | |
5257 | ||
5258 | rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); | |
5259 | if (rc) { | |
5260 | pqi_free_io_request(io_request); | |
5261 | return SCSI_MLQUEUE_HOST_BUSY; | |
5262 | } | |
5263 | ||
5264 | pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); | |
5265 | ||
5266 | return 0; | |
5267 | } | |
5268 | ||
376fb880 KB |
5269 | static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, |
5270 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, | |
5271 | struct pqi_queue_group *queue_group) | |
5272 | { | |
5273 | struct pqi_io_request *io_request; | |
5274 | ||
5275 | io_request = pqi_alloc_io_request(ctrl_info); | |
5276 | ||
5277 | return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, | |
5278 | device, scmd, queue_group); | |
5279 | } | |
5280 | ||
5281 | static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) | |
5282 | { | |
5283 | if (!pqi_ctrl_blocked(ctrl_info)) | |
5284 | schedule_work(&ctrl_info->raid_bypass_retry_work); | |
5285 | } | |
5286 | ||
5287 | static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) | |
5288 | { | |
5289 | struct scsi_cmnd *scmd; | |
03b288cf | 5290 | struct pqi_scsi_dev *device; |
376fb880 KB |
5291 | struct pqi_ctrl_info *ctrl_info; |
5292 | ||
5293 | if (!io_request->raid_bypass) | |
5294 | return false; | |
5295 | ||
5296 | scmd = io_request->scmd; | |
5297 | if ((scmd->result & 0xff) == SAM_STAT_GOOD) | |
5298 | return false; | |
5299 | if (host_byte(scmd->result) == DID_NO_CONNECT) | |
5300 | return false; | |
5301 | ||
03b288cf KB |
5302 | device = scmd->device->hostdata; |
5303 | if (pqi_device_offline(device)) | |
5304 | return false; | |
5305 | ||
376fb880 KB |
5306 | ctrl_info = shost_to_hba(scmd->device->host); |
5307 | if (pqi_ctrl_offline(ctrl_info)) | |
5308 | return false; | |
5309 | ||
5310 | return true; | |
5311 | } | |
5312 | ||
5313 | static inline void pqi_add_to_raid_bypass_retry_list( | |
5314 | struct pqi_ctrl_info *ctrl_info, | |
5315 | struct pqi_io_request *io_request, bool at_head) | |
5316 | { | |
5317 | unsigned long flags; | |
5318 | ||
5319 | spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); | |
5320 | if (at_head) | |
5321 | list_add(&io_request->request_list_entry, | |
5322 | &ctrl_info->raid_bypass_retry_list); | |
5323 | else | |
5324 | list_add_tail(&io_request->request_list_entry, | |
5325 | &ctrl_info->raid_bypass_retry_list); | |
5326 | spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); | |
5327 | } | |
5328 | ||
5329 | static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, | |
5330 | void *context) | |
5331 | { | |
5332 | struct scsi_cmnd *scmd; | |
5333 | ||
5334 | scmd = io_request->scmd; | |
5335 | pqi_free_io_request(io_request); | |
5336 | pqi_scsi_done(scmd); | |
5337 | } | |
5338 | ||
5339 | static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) | |
5340 | { | |
5341 | struct scsi_cmnd *scmd; | |
5342 | struct pqi_ctrl_info *ctrl_info; | |
5343 | ||
5344 | io_request->io_complete_callback = pqi_queued_raid_bypass_complete; | |
5345 | scmd = io_request->scmd; | |
5346 | scmd->result = 0; | |
5347 | ctrl_info = shost_to_hba(scmd->device->host); | |
5348 | ||
5349 | pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); | |
5350 | pqi_schedule_bypass_retry(ctrl_info); | |
5351 | } | |
5352 | ||
5353 | static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) | |
5354 | { | |
5355 | struct scsi_cmnd *scmd; | |
5356 | struct pqi_scsi_dev *device; | |
5357 | struct pqi_ctrl_info *ctrl_info; | |
5358 | struct pqi_queue_group *queue_group; | |
5359 | ||
5360 | scmd = io_request->scmd; | |
5361 | device = scmd->device->hostdata; | |
5362 | if (pqi_device_in_reset(device)) { | |
5363 | pqi_free_io_request(io_request); | |
5364 | set_host_byte(scmd, DID_RESET); | |
5365 | pqi_scsi_done(scmd); | |
5366 | return 0; | |
5367 | } | |
5368 | ||
5369 | ctrl_info = shost_to_hba(scmd->device->host); | |
5370 | queue_group = io_request->queue_group; | |
5371 | ||
5372 | pqi_reinit_io_request(io_request); | |
5373 | ||
5374 | return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, | |
5375 | device, scmd, queue_group); | |
5376 | } | |
5377 | ||
5378 | static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( | |
5379 | struct pqi_ctrl_info *ctrl_info) | |
5380 | { | |
5381 | unsigned long flags; | |
5382 | struct pqi_io_request *io_request; | |
5383 | ||
5384 | spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); | |
5385 | io_request = list_first_entry_or_null( | |
5386 | &ctrl_info->raid_bypass_retry_list, | |
5387 | struct pqi_io_request, request_list_entry); | |
5388 | if (io_request) | |
5389 | list_del(&io_request->request_list_entry); | |
5390 | spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); | |
5391 | ||
5392 | return io_request; | |
5393 | } | |
5394 | ||
5395 | static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) | |
5396 | { | |
5397 | int rc; | |
5398 | struct pqi_io_request *io_request; | |
5399 | ||
5400 | pqi_ctrl_busy(ctrl_info); | |
5401 | ||
5402 | while (1) { | |
5403 | if (pqi_ctrl_blocked(ctrl_info)) | |
5404 | break; | |
5405 | io_request = pqi_next_queued_raid_bypass_request(ctrl_info); | |
5406 | if (!io_request) | |
5407 | break; | |
5408 | rc = pqi_retry_raid_bypass(io_request); | |
5409 | if (rc) { | |
5410 | pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, | |
5411 | true); | |
5412 | pqi_schedule_bypass_retry(ctrl_info); | |
5413 | break; | |
5414 | } | |
5415 | } | |
5416 | ||
5417 | pqi_ctrl_unbusy(ctrl_info); | |
5418 | } | |
5419 | ||
5420 | static void pqi_raid_bypass_retry_worker(struct work_struct *work) | |
5421 | { | |
5422 | struct pqi_ctrl_info *ctrl_info; | |
5423 | ||
5424 | ctrl_info = container_of(work, struct pqi_ctrl_info, | |
5425 | raid_bypass_retry_work); | |
5426 | pqi_retry_raid_bypass_requests(ctrl_info); | |
5427 | } | |
5428 | ||
5f310425 KB |
5429 | static void pqi_clear_all_queued_raid_bypass_retries( |
5430 | struct pqi_ctrl_info *ctrl_info) | |
376fb880 KB |
5431 | { |
5432 | unsigned long flags; | |
376fb880 KB |
5433 | |
5434 | spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); | |
5f310425 | 5435 | INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); |
376fb880 KB |
5436 | spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); |
5437 | } | |
5438 | ||
6c223761 KB |
5439 | static void pqi_aio_io_complete(struct pqi_io_request *io_request, |
5440 | void *context) | |
5441 | { | |
5442 | struct scsi_cmnd *scmd; | |
5443 | ||
5444 | scmd = io_request->scmd; | |
5445 | scsi_dma_unmap(scmd); | |
5446 | if (io_request->status == -EAGAIN) | |
5447 | set_host_byte(scmd, DID_IMM_RETRY); | |
376fb880 KB |
5448 | else if (pqi_raid_bypass_retry_needed(io_request)) { |
5449 | pqi_queue_raid_bypass_retry(io_request); | |
5450 | return; | |
5451 | } | |
6c223761 KB |
5452 | pqi_free_io_request(io_request); |
5453 | pqi_scsi_done(scmd); | |
5454 | } | |
5455 | ||
5456 | static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, | |
5457 | struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, | |
5458 | struct pqi_queue_group *queue_group) | |
5459 | { | |
5460 | return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, | |
376fb880 | 5461 | scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); |
6c223761 KB |
5462 | } |
5463 | ||
5464 | static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, | |
5465 | struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, | |
5466 | unsigned int cdb_length, struct pqi_queue_group *queue_group, | |
376fb880 | 5467 | struct pqi_encryption_info *encryption_info, bool raid_bypass) |
6c223761 KB |
5468 | { |
5469 | int rc; | |
5470 | struct pqi_io_request *io_request; | |
5471 | struct pqi_aio_path_request *request; | |
5472 | ||
5473 | io_request = pqi_alloc_io_request(ctrl_info); | |
5474 | io_request->io_complete_callback = pqi_aio_io_complete; | |
5475 | io_request->scmd = scmd; | |
376fb880 | 5476 | io_request->raid_bypass = raid_bypass; |
6c223761 KB |
5477 | |
5478 | request = io_request->iu; | |
583891c9 | 5479 | memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); |
6c223761 KB |
5480 | |
5481 | request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; | |
5482 | put_unaligned_le32(aio_handle, &request->nexus_id); | |
5483 | put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); | |
5484 | request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; | |
5485 | put_unaligned_le16(io_request->index, &request->request_id); | |
5486 | request->error_index = request->request_id; | |
5487 | if (cdb_length > sizeof(request->cdb)) | |
5488 | cdb_length = sizeof(request->cdb); | |
5489 | request->cdb_length = cdb_length; | |
5490 | memcpy(request->cdb, cdb, cdb_length); | |
5491 | ||
5492 | switch (scmd->sc_data_direction) { | |
5493 | case DMA_TO_DEVICE: | |
5494 | request->data_direction = SOP_READ_FLAG; | |
5495 | break; | |
5496 | case DMA_FROM_DEVICE: | |
5497 | request->data_direction = SOP_WRITE_FLAG; | |
5498 | break; | |
5499 | case DMA_NONE: | |
5500 | request->data_direction = SOP_NO_DIRECTION_FLAG; | |
5501 | break; | |
5502 | case DMA_BIDIRECTIONAL: | |
5503 | request->data_direction = SOP_BIDIRECTIONAL; | |
5504 | break; | |
5505 | default: | |
5506 | dev_err(&ctrl_info->pci_dev->dev, | |
5507 | "unknown data direction: %d\n", | |
5508 | scmd->sc_data_direction); | |
6c223761 KB |
5509 | break; |
5510 | } | |
5511 | ||
5512 | if (encryption_info) { | |
5513 | request->encryption_enable = true; | |
5514 | put_unaligned_le16(encryption_info->data_encryption_key_index, | |
5515 | &request->data_encryption_key_index); | |
5516 | put_unaligned_le32(encryption_info->encrypt_tweak_lower, | |
5517 | &request->encrypt_tweak_lower); | |
5518 | put_unaligned_le32(encryption_info->encrypt_tweak_upper, | |
5519 | &request->encrypt_tweak_upper); | |
5520 | } | |
5521 | ||
5522 | rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); | |
5523 | if (rc) { | |
5524 | pqi_free_io_request(io_request); | |
5525 | return SCSI_MLQUEUE_HOST_BUSY; | |
5526 | } | |
5527 | ||
5528 | pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); | |
5529 | ||
5530 | return 0; | |
5531 | } | |
5532 | ||
7a012c23 DB |
5533 | static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, |
5534 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, | |
5535 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, | |
5536 | struct pqi_scsi_dev_raid_map_data *rmd) | |
7a012c23 DB |
5537 | { |
5538 | int rc; | |
5539 | struct pqi_io_request *io_request; | |
5540 | struct pqi_aio_r1_path_request *r1_request; | |
5541 | ||
5542 | io_request = pqi_alloc_io_request(ctrl_info); | |
5543 | io_request->io_complete_callback = pqi_aio_io_complete; | |
5544 | io_request->scmd = scmd; | |
5545 | io_request->raid_bypass = true; | |
5546 | ||
5547 | r1_request = io_request->iu; | |
5548 | memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors)); | |
5549 | ||
5550 | r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; | |
7a012c23 DB |
5551 | put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); |
5552 | r1_request->num_drives = rmd->num_it_nexus_entries; | |
5553 | put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); | |
5554 | put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); | |
5555 | if (rmd->num_it_nexus_entries == 3) | |
5556 | put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); | |
5557 | ||
5558 | put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); | |
5559 | r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; | |
5560 | put_unaligned_le16(io_request->index, &r1_request->request_id); | |
5561 | r1_request->error_index = r1_request->request_id; | |
5562 | if (rmd->cdb_length > sizeof(r1_request->cdb)) | |
5563 | rmd->cdb_length = sizeof(r1_request->cdb); | |
5564 | r1_request->cdb_length = rmd->cdb_length; | |
5565 | memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); | |
5566 | ||
5567 | /* The direction is always write. */ | |
5568 | r1_request->data_direction = SOP_READ_FLAG; | |
5569 | ||
5570 | if (encryption_info) { | |
5571 | r1_request->encryption_enable = true; | |
5572 | put_unaligned_le16(encryption_info->data_encryption_key_index, | |
5573 | &r1_request->data_encryption_key_index); | |
5574 | put_unaligned_le32(encryption_info->encrypt_tweak_lower, | |
5575 | &r1_request->encrypt_tweak_lower); | |
5576 | put_unaligned_le32(encryption_info->encrypt_tweak_upper, | |
5577 | &r1_request->encrypt_tweak_upper); | |
5578 | } | |
5579 | ||
5580 | rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); | |
5581 | if (rc) { | |
5582 | pqi_free_io_request(io_request); | |
5583 | return SCSI_MLQUEUE_HOST_BUSY; | |
5584 | } | |
5585 | ||
5586 | pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); | |
5587 | ||
5588 | return 0; | |
5589 | } | |
5590 | ||
6702d2c4 DB |
5591 | static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, |
5592 | struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, | |
5593 | struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, | |
5594 | struct pqi_scsi_dev_raid_map_data *rmd) | |
5595 | { | |
5596 | int rc; | |
5597 | struct pqi_io_request *io_request; | |
5598 | struct pqi_aio_r56_path_request *r56_request; | |
5599 | ||
5600 | io_request = pqi_alloc_io_request(ctrl_info); | |
5601 | io_request->io_complete_callback = pqi_aio_io_complete; | |
5602 | io_request->scmd = scmd; | |
5603 | io_request->raid_bypass = true; | |
5604 | ||
5605 | r56_request = io_request->iu; | |
5606 | memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); | |
5607 | ||
5608 | if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) | |
5609 | r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; | |
5610 | else | |
5611 | r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; | |
5612 | ||
5613 | put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); | |
5614 | put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); | |
5615 | put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); | |
5616 | if (rmd->raid_level == SA_RAID_6) { | |
5617 | put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); | |
5618 | r56_request->xor_multiplier = rmd->xor_mult; | |
5619 | } | |
5620 | put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); | |
5621 | r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; | |
5622 | put_unaligned_le64(rmd->row, &r56_request->row); | |
5623 | ||
5624 | put_unaligned_le16(io_request->index, &r56_request->request_id); | |
5625 | r56_request->error_index = r56_request->request_id; | |
5626 | ||
5627 | if (rmd->cdb_length > sizeof(r56_request->cdb)) | |
5628 | rmd->cdb_length = sizeof(r56_request->cdb); | |
5629 | r56_request->cdb_length = rmd->cdb_length; | |
5630 | memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); | |
5631 | ||
5632 | /* The direction is always write. */ | |
5633 | r56_request->data_direction = SOP_READ_FLAG; | |
5634 | ||
5635 | if (encryption_info) { | |
5636 | r56_request->encryption_enable = true; | |
5637 | put_unaligned_le16(encryption_info->data_encryption_key_index, | |
5638 | &r56_request->data_encryption_key_index); | |
5639 | put_unaligned_le32(encryption_info->encrypt_tweak_lower, | |
5640 | &r56_request->encrypt_tweak_lower); | |
5641 | put_unaligned_le32(encryption_info->encrypt_tweak_upper, | |
5642 | &r56_request->encrypt_tweak_upper); | |
5643 | } | |
5644 | ||
5645 | rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); | |
5646 | if (rc) { | |
5647 | pqi_free_io_request(io_request); | |
5648 | return SCSI_MLQUEUE_HOST_BUSY; | |
5649 | } | |
5650 | ||
5651 | pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); | |
5652 | ||
5653 | return 0; | |
5654 | } | |
5655 | ||
061ef06a KB |
5656 | static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, |
5657 | struct scsi_cmnd *scmd) | |
5658 | { | |
5659 | u16 hw_queue; | |
5660 | ||
5661 | hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); | |
5662 | if (hw_queue > ctrl_info->max_hw_queue_index) | |
5663 | hw_queue = 0; | |
5664 | ||
5665 | return hw_queue; | |
5666 | } | |
5667 | ||
7561a7e4 KB |
5668 | /* |
5669 | * This function gets called just before we hand the completed SCSI request | |
5670 | * back to the SML. | |
5671 | */ | |
5672 | ||
5673 | void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) | |
5674 | { | |
5675 | struct pqi_scsi_dev *device; | |
5676 | ||
1e46731e MR |
5677 | if (!scmd->device) { |
5678 | set_host_byte(scmd, DID_NO_CONNECT); | |
5679 | return; | |
5680 | } | |
5681 | ||
7561a7e4 | 5682 | device = scmd->device->hostdata; |
1e46731e MR |
5683 | if (!device) { |
5684 | set_host_byte(scmd, DID_NO_CONNECT); | |
5685 | return; | |
5686 | } | |
5687 | ||
7561a7e4 KB |
5688 | atomic_dec(&device->scsi_cmds_outstanding); |
5689 | } | |
5690 | ||
c7ffedb3 | 5691 | static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, |
7d81d2b8 | 5692 | struct scsi_cmnd *scmd) |
c7ffedb3 DB |
5693 | { |
5694 | u32 oldest_jiffies; | |
5695 | u8 lru_index; | |
5696 | int i; | |
5697 | int rc; | |
5698 | struct pqi_scsi_dev *device; | |
5699 | struct pqi_stream_data *pqi_stream_data; | |
5700 | struct pqi_scsi_dev_raid_map_data rmd; | |
5701 | ||
5702 | if (!ctrl_info->enable_stream_detection) | |
5703 | return false; | |
5704 | ||
5705 | rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); | |
5706 | if (rc) | |
5707 | return false; | |
5708 | ||
5709 | /* Check writes only. */ | |
5710 | if (!rmd.is_write) | |
5711 | return false; | |
5712 | ||
5713 | device = scmd->device->hostdata; | |
5714 | ||
5715 | /* Check for RAID 5/6 streams. */ | |
5716 | if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) | |
5717 | return false; | |
5718 | ||
5719 | /* | |
5720 | * If controller does not support AIO RAID{5,6} writes, need to send | |
5721 | * requests down non-AIO path. | |
5722 | */ | |
5723 | if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || | |
5724 | (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) | |
5725 | return true; | |
5726 | ||
5727 | lru_index = 0; | |
5728 | oldest_jiffies = INT_MAX; | |
5729 | for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { | |
5730 | pqi_stream_data = &device->stream_data[i]; | |
5731 | /* | |
5732 | * Check for adjacent request or request is within | |
5733 | * the previous request. | |
5734 | */ | |
5735 | if ((pqi_stream_data->next_lba && | |
5736 | rmd.first_block >= pqi_stream_data->next_lba) && | |
5737 | rmd.first_block <= pqi_stream_data->next_lba + | |
5738 | rmd.block_cnt) { | |
5739 | pqi_stream_data->next_lba = rmd.first_block + | |
5740 | rmd.block_cnt; | |
5741 | pqi_stream_data->last_accessed = jiffies; | |
5742 | return true; | |
5743 | } | |
5744 | ||
5745 | /* unused entry */ | |
5746 | if (pqi_stream_data->last_accessed == 0) { | |
5747 | lru_index = i; | |
5748 | break; | |
5749 | } | |
5750 | ||
5751 | /* Find entry with oldest last accessed time. */ | |
5752 | if (pqi_stream_data->last_accessed <= oldest_jiffies) { | |
5753 | oldest_jiffies = pqi_stream_data->last_accessed; | |
5754 | lru_index = i; | |
5755 | } | |
5756 | } | |
5757 | ||
5758 | /* Set LRU entry. */ | |
5759 | pqi_stream_data = &device->stream_data[lru_index]; | |
5760 | pqi_stream_data->last_accessed = jiffies; | |
5761 | pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; | |
5762 | ||
5763 | return false; | |
5764 | } | |
5765 | ||
5766 | static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) | |
6c223761 KB |
5767 | { |
5768 | int rc; | |
5769 | struct pqi_ctrl_info *ctrl_info; | |
5770 | struct pqi_scsi_dev *device; | |
061ef06a | 5771 | u16 hw_queue; |
6c223761 KB |
5772 | struct pqi_queue_group *queue_group; |
5773 | bool raid_bypassed; | |
5774 | ||
5775 | device = scmd->device->hostdata; | |
6c223761 | 5776 | |
1e46731e MR |
5777 | if (!device) { |
5778 | set_host_byte(scmd, DID_NO_CONNECT); | |
5779 | pqi_scsi_done(scmd); | |
5780 | return 0; | |
5781 | } | |
5782 | ||
7561a7e4 KB |
5783 | atomic_inc(&device->scsi_cmds_outstanding); |
5784 | ||
583891c9 KB |
5785 | ctrl_info = shost_to_hba(shost); |
5786 | ||
1bdf6e93 | 5787 | if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { |
6c223761 KB |
5788 | set_host_byte(scmd, DID_NO_CONNECT); |
5789 | pqi_scsi_done(scmd); | |
5790 | return 0; | |
5791 | } | |
5792 | ||
7561a7e4 | 5793 | pqi_ctrl_busy(ctrl_info); |
4fd22c13 | 5794 | if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || |
0530736e | 5795 | pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) { |
7561a7e4 KB |
5796 | rc = SCSI_MLQUEUE_HOST_BUSY; |
5797 | goto out; | |
5798 | } | |
5799 | ||
7d81d2b8 KB |
5800 | /* |
5801 | * This is necessary because the SML doesn't zero out this field during | |
5802 | * error recovery. | |
5803 | */ | |
5804 | scmd->result = 0; | |
5805 | ||
061ef06a KB |
5806 | hw_queue = pqi_get_hw_queue(ctrl_info, scmd); |
5807 | queue_group = &ctrl_info->queue_groups[hw_queue]; | |
6c223761 KB |
5808 | |
5809 | if (pqi_is_logical_device(device)) { | |
5810 | raid_bypassed = false; | |
588a63fe | 5811 | if (device->raid_bypass_enabled && |
694c5d5b | 5812 | !blk_rq_is_passthrough(scmd->request)) { |
c7ffedb3 DB |
5813 | if (!pqi_is_parity_write_stream(ctrl_info, scmd)) { |
5814 | rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); | |
5815 | if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { | |
5816 | raid_bypassed = true; | |
5817 | atomic_inc(&device->raid_bypass_cnt); | |
5818 | } | |
8b664fef | 5819 | } |
6c223761 KB |
5820 | } |
5821 | if (!raid_bypassed) | |
8b664fef | 5822 | rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
6c223761 KB |
5823 | } else { |
5824 | if (device->aio_enabled) | |
8b664fef | 5825 | rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
6c223761 | 5826 | else |
8b664fef | 5827 | rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); |
6c223761 KB |
5828 | } |
5829 | ||
7561a7e4 KB |
5830 | out: |
5831 | pqi_ctrl_unbusy(ctrl_info); | |
5832 | if (rc) | |
5833 | atomic_dec(&device->scsi_cmds_outstanding); | |
5834 | ||
6c223761 KB |
5835 | return rc; |
5836 | } | |
5837 | ||
7561a7e4 KB |
5838 | static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, |
5839 | struct pqi_queue_group *queue_group) | |
5840 | { | |
5841 | unsigned int path; | |
5842 | unsigned long flags; | |
5843 | bool list_is_empty; | |
5844 | ||
5845 | for (path = 0; path < 2; path++) { | |
5846 | while (1) { | |
5847 | spin_lock_irqsave( | |
5848 | &queue_group->submit_lock[path], flags); | |
5849 | list_is_empty = | |
5850 | list_empty(&queue_group->request_list[path]); | |
5851 | spin_unlock_irqrestore( | |
5852 | &queue_group->submit_lock[path], flags); | |
5853 | if (list_is_empty) | |
5854 | break; | |
5855 | pqi_check_ctrl_health(ctrl_info); | |
5856 | if (pqi_ctrl_offline(ctrl_info)) | |
5857 | return -ENXIO; | |
5858 | usleep_range(1000, 2000); | |
5859 | } | |
5860 | } | |
5861 | ||
5862 | return 0; | |
5863 | } | |
5864 | ||
5865 | static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) | |
5866 | { | |
5867 | int rc; | |
5868 | unsigned int i; | |
5869 | unsigned int path; | |
5870 | struct pqi_queue_group *queue_group; | |
5871 | pqi_index_t iq_pi; | |
5872 | pqi_index_t iq_ci; | |
5873 | ||
5874 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
5875 | queue_group = &ctrl_info->queue_groups[i]; | |
5876 | ||
5877 | rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); | |
5878 | if (rc) | |
5879 | return rc; | |
5880 | ||
5881 | for (path = 0; path < 2; path++) { | |
5882 | iq_pi = queue_group->iq_pi_copy[path]; | |
5883 | ||
5884 | while (1) { | |
dac12fbc | 5885 | iq_ci = readl(queue_group->iq_ci[path]); |
7561a7e4 KB |
5886 | if (iq_ci == iq_pi) |
5887 | break; | |
5888 | pqi_check_ctrl_health(ctrl_info); | |
5889 | if (pqi_ctrl_offline(ctrl_info)) | |
5890 | return -ENXIO; | |
5891 | usleep_range(1000, 2000); | |
5892 | } | |
5893 | } | |
5894 | } | |
5895 | ||
5896 | return 0; | |
5897 | } | |
5898 | ||
5899 | static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, | |
5900 | struct pqi_scsi_dev *device) | |
5901 | { | |
5902 | unsigned int i; | |
5903 | unsigned int path; | |
5904 | struct pqi_queue_group *queue_group; | |
5905 | unsigned long flags; | |
5906 | struct pqi_io_request *io_request; | |
5907 | struct pqi_io_request *next; | |
5908 | struct scsi_cmnd *scmd; | |
5909 | struct pqi_scsi_dev *scsi_device; | |
5910 | ||
5911 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
5912 | queue_group = &ctrl_info->queue_groups[i]; | |
5913 | ||
5914 | for (path = 0; path < 2; path++) { | |
5915 | spin_lock_irqsave( | |
5916 | &queue_group->submit_lock[path], flags); | |
5917 | ||
5918 | list_for_each_entry_safe(io_request, next, | |
5919 | &queue_group->request_list[path], | |
5920 | request_list_entry) { | |
583891c9 | 5921 | |
7561a7e4 KB |
5922 | scmd = io_request->scmd; |
5923 | if (!scmd) | |
5924 | continue; | |
5925 | ||
5926 | scsi_device = scmd->device->hostdata; | |
5927 | if (scsi_device != device) | |
5928 | continue; | |
5929 | ||
5930 | list_del(&io_request->request_list_entry); | |
5931 | set_host_byte(scmd, DID_RESET); | |
b622a601 MB |
5932 | pqi_free_io_request(io_request); |
5933 | scsi_dma_unmap(scmd); | |
7561a7e4 KB |
5934 | pqi_scsi_done(scmd); |
5935 | } | |
5936 | ||
5937 | spin_unlock_irqrestore( | |
5938 | &queue_group->submit_lock[path], flags); | |
5939 | } | |
5940 | } | |
5941 | } | |
5942 | ||
4fd22c13 MR |
5943 | static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info) |
5944 | { | |
5945 | unsigned int i; | |
5946 | unsigned int path; | |
5947 | struct pqi_queue_group *queue_group; | |
5948 | unsigned long flags; | |
5949 | struct pqi_io_request *io_request; | |
5950 | struct pqi_io_request *next; | |
5951 | struct scsi_cmnd *scmd; | |
5952 | ||
5953 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
5954 | queue_group = &ctrl_info->queue_groups[i]; | |
5955 | ||
5956 | for (path = 0; path < 2; path++) { | |
5957 | spin_lock_irqsave(&queue_group->submit_lock[path], | |
5958 | flags); | |
5959 | ||
5960 | list_for_each_entry_safe(io_request, next, | |
5961 | &queue_group->request_list[path], | |
5962 | request_list_entry) { | |
5963 | ||
5964 | scmd = io_request->scmd; | |
5965 | if (!scmd) | |
5966 | continue; | |
5967 | ||
5968 | list_del(&io_request->request_list_entry); | |
5969 | set_host_byte(scmd, DID_RESET); | |
b622a601 MB |
5970 | pqi_free_io_request(io_request); |
5971 | scsi_dma_unmap(scmd); | |
4fd22c13 MR |
5972 | pqi_scsi_done(scmd); |
5973 | } | |
5974 | ||
5975 | spin_unlock_irqrestore( | |
5976 | &queue_group->submit_lock[path], flags); | |
5977 | } | |
5978 | } | |
5979 | } | |
5980 | ||
061ef06a | 5981 | static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, |
1e46731e | 5982 | struct pqi_scsi_dev *device, unsigned long timeout_secs) |
061ef06a | 5983 | { |
1e46731e MR |
5984 | unsigned long timeout; |
5985 | ||
4fd22c13 | 5986 | timeout = (timeout_secs * PQI_HZ) + jiffies; |
1e46731e | 5987 | |
061ef06a KB |
5988 | while (atomic_read(&device->scsi_cmds_outstanding)) { |
5989 | pqi_check_ctrl_health(ctrl_info); | |
5990 | if (pqi_ctrl_offline(ctrl_info)) | |
5991 | return -ENXIO; | |
1e46731e MR |
5992 | if (timeout_secs != NO_TIMEOUT) { |
5993 | if (time_after(jiffies, timeout)) { | |
5994 | dev_err(&ctrl_info->pci_dev->dev, | |
5995 | "timed out waiting for pending IO\n"); | |
5996 | return -ETIMEDOUT; | |
5997 | } | |
5998 | } | |
061ef06a KB |
5999 | usleep_range(1000, 2000); |
6000 | } | |
6001 | ||
6002 | return 0; | |
6003 | } | |
6004 | ||
4fd22c13 MR |
6005 | static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, |
6006 | unsigned long timeout_secs) | |
061ef06a KB |
6007 | { |
6008 | bool io_pending; | |
6009 | unsigned long flags; | |
4fd22c13 | 6010 | unsigned long timeout; |
061ef06a KB |
6011 | struct pqi_scsi_dev *device; |
6012 | ||
4fd22c13 | 6013 | timeout = (timeout_secs * PQI_HZ) + jiffies; |
061ef06a KB |
6014 | while (1) { |
6015 | io_pending = false; | |
6016 | ||
6017 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
6018 | list_for_each_entry(device, &ctrl_info->scsi_device_list, | |
6019 | scsi_device_list_entry) { | |
6020 | if (atomic_read(&device->scsi_cmds_outstanding)) { | |
6021 | io_pending = true; | |
6022 | break; | |
6023 | } | |
6024 | } | |
6025 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, | |
6026 | flags); | |
6027 | ||
6028 | if (!io_pending) | |
6029 | break; | |
6030 | ||
6031 | pqi_check_ctrl_health(ctrl_info); | |
6032 | if (pqi_ctrl_offline(ctrl_info)) | |
6033 | return -ENXIO; | |
6034 | ||
4fd22c13 MR |
6035 | if (timeout_secs != NO_TIMEOUT) { |
6036 | if (time_after(jiffies, timeout)) { | |
6037 | dev_err(&ctrl_info->pci_dev->dev, | |
6038 | "timed out waiting for pending IO\n"); | |
6039 | return -ETIMEDOUT; | |
6040 | } | |
6041 | } | |
061ef06a KB |
6042 | usleep_range(1000, 2000); |
6043 | } | |
6044 | ||
6045 | return 0; | |
6046 | } | |
6047 | ||
0530736e KB |
6048 | static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info) |
6049 | { | |
6050 | while (atomic_read(&ctrl_info->sync_cmds_outstanding)) { | |
6051 | pqi_check_ctrl_health(ctrl_info); | |
6052 | if (pqi_ctrl_offline(ctrl_info)) | |
6053 | return -ENXIO; | |
6054 | usleep_range(1000, 2000); | |
6055 | } | |
6056 | ||
6057 | return 0; | |
6058 | } | |
6059 | ||
14bb215d KB |
6060 | static void pqi_lun_reset_complete(struct pqi_io_request *io_request, |
6061 | void *context) | |
6c223761 | 6062 | { |
14bb215d | 6063 | struct completion *waiting = context; |
6c223761 | 6064 | |
14bb215d KB |
6065 | complete(waiting); |
6066 | } | |
6c223761 | 6067 | |
bb9af08c | 6068 | #define PQI_LUN_RESET_TIMEOUT_SECS 30 |
c2922f17 | 6069 | #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 |
14bb215d KB |
6070 | |
6071 | static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, | |
6072 | struct pqi_scsi_dev *device, struct completion *wait) | |
6073 | { | |
6074 | int rc; | |
14bb215d KB |
6075 | |
6076 | while (1) { | |
6077 | if (wait_for_completion_io_timeout(wait, | |
c2922f17 | 6078 | PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) { |
14bb215d KB |
6079 | rc = 0; |
6080 | break; | |
6c223761 KB |
6081 | } |
6082 | ||
14bb215d KB |
6083 | pqi_check_ctrl_health(ctrl_info); |
6084 | if (pqi_ctrl_offline(ctrl_info)) { | |
4e8415e3 | 6085 | rc = -ENXIO; |
14bb215d KB |
6086 | break; |
6087 | } | |
6c223761 | 6088 | } |
6c223761 | 6089 | |
14bb215d | 6090 | return rc; |
6c223761 KB |
6091 | } |
6092 | ||
14bb215d | 6093 | static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, |
6c223761 KB |
6094 | struct pqi_scsi_dev *device) |
6095 | { | |
6096 | int rc; | |
6097 | struct pqi_io_request *io_request; | |
6098 | DECLARE_COMPLETION_ONSTACK(wait); | |
6099 | struct pqi_task_management_request *request; | |
6100 | ||
6c223761 | 6101 | io_request = pqi_alloc_io_request(ctrl_info); |
14bb215d | 6102 | io_request->io_complete_callback = pqi_lun_reset_complete; |
6c223761 KB |
6103 | io_request->context = &wait; |
6104 | ||
6105 | request = io_request->iu; | |
6106 | memset(request, 0, sizeof(*request)); | |
6107 | ||
6108 | request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; | |
6109 | put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, | |
6110 | &request->header.iu_length); | |
6111 | put_unaligned_le16(io_request->index, &request->request_id); | |
6112 | memcpy(request->lun_number, device->scsi3addr, | |
6113 | sizeof(request->lun_number)); | |
6114 | request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; | |
c2922f17 MB |
6115 | if (ctrl_info->tmf_iu_timeout_supported) |
6116 | put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS, | |
6117 | &request->timeout); | |
6c223761 | 6118 | |
583891c9 | 6119 | pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, |
6c223761 KB |
6120 | io_request); |
6121 | ||
14bb215d KB |
6122 | rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); |
6123 | if (rc == 0) | |
6c223761 | 6124 | rc = io_request->status; |
6c223761 KB |
6125 | |
6126 | pqi_free_io_request(io_request); | |
6c223761 KB |
6127 | |
6128 | return rc; | |
6129 | } | |
6130 | ||
429fab70 KB |
6131 | /* Performs a reset at the LUN level. */ |
6132 | ||
3406384b MR |
6133 | #define PQI_LUN_RESET_RETRIES 3 |
6134 | #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000 | |
429fab70 | 6135 | #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120 |
6c223761 | 6136 | |
4fd22c13 | 6137 | static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info, |
6c223761 KB |
6138 | struct pqi_scsi_dev *device) |
6139 | { | |
6140 | int rc; | |
3406384b | 6141 | unsigned int retries; |
4fd22c13 | 6142 | unsigned long timeout_secs; |
6c223761 | 6143 | |
3406384b MR |
6144 | for (retries = 0;;) { |
6145 | rc = pqi_lun_reset(ctrl_info, device); | |
c2922f17 | 6146 | if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES) |
3406384b MR |
6147 | break; |
6148 | msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); | |
6149 | } | |
429fab70 KB |
6150 | |
6151 | timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT; | |
4fd22c13 MR |
6152 | |
6153 | rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs); | |
6c223761 | 6154 | |
14bb215d | 6155 | return rc == 0 ? SUCCESS : FAILED; |
6c223761 KB |
6156 | } |
6157 | ||
4fd22c13 MR |
6158 | static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, |
6159 | struct pqi_scsi_dev *device) | |
6160 | { | |
6161 | int rc; | |
6162 | ||
6163 | mutex_lock(&ctrl_info->lun_reset_mutex); | |
6164 | ||
6165 | pqi_ctrl_block_requests(ctrl_info); | |
6166 | pqi_ctrl_wait_until_quiesced(ctrl_info); | |
6167 | pqi_fail_io_queued_for_device(ctrl_info, device); | |
6168 | rc = pqi_wait_until_inbound_queues_empty(ctrl_info); | |
6169 | pqi_device_reset_start(device); | |
6170 | pqi_ctrl_unblock_requests(ctrl_info); | |
6171 | ||
6172 | if (rc) | |
6173 | rc = FAILED; | |
6174 | else | |
6175 | rc = _pqi_device_reset(ctrl_info, device); | |
6176 | ||
6177 | pqi_device_reset_done(device); | |
6178 | ||
6179 | mutex_unlock(&ctrl_info->lun_reset_mutex); | |
429fab70 | 6180 | |
4fd22c13 MR |
6181 | return rc; |
6182 | } | |
6183 | ||
6c223761 KB |
6184 | static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) |
6185 | { | |
6186 | int rc; | |
7561a7e4 | 6187 | struct Scsi_Host *shost; |
6c223761 KB |
6188 | struct pqi_ctrl_info *ctrl_info; |
6189 | struct pqi_scsi_dev *device; | |
6190 | ||
7561a7e4 KB |
6191 | shost = scmd->device->host; |
6192 | ctrl_info = shost_to_hba(shost); | |
6c223761 KB |
6193 | device = scmd->device->hostdata; |
6194 | ||
6195 | dev_err(&ctrl_info->pci_dev->dev, | |
6196 | "resetting scsi %d:%d:%d:%d\n", | |
7561a7e4 | 6197 | shost->host_no, device->bus, device->target, device->lun); |
6c223761 | 6198 | |
7561a7e4 | 6199 | pqi_check_ctrl_health(ctrl_info); |
0530736e KB |
6200 | if (pqi_ctrl_offline(ctrl_info) || |
6201 | pqi_device_reset_blocked(ctrl_info)) { | |
7561a7e4 KB |
6202 | rc = FAILED; |
6203 | goto out; | |
6204 | } | |
6c223761 | 6205 | |
4fd22c13 | 6206 | pqi_wait_until_ofa_finished(ctrl_info); |
7561a7e4 | 6207 | |
0530736e | 6208 | atomic_inc(&ctrl_info->sync_cmds_outstanding); |
4fd22c13 | 6209 | rc = pqi_device_reset(ctrl_info, device); |
0530736e | 6210 | atomic_dec(&ctrl_info->sync_cmds_outstanding); |
429fab70 | 6211 | |
7561a7e4 | 6212 | out: |
6c223761 KB |
6213 | dev_err(&ctrl_info->pci_dev->dev, |
6214 | "reset of scsi %d:%d:%d:%d: %s\n", | |
7561a7e4 | 6215 | shost->host_no, device->bus, device->target, device->lun, |
6c223761 KB |
6216 | rc == SUCCESS ? "SUCCESS" : "FAILED"); |
6217 | ||
6218 | return rc; | |
6219 | } | |
6220 | ||
6221 | static int pqi_slave_alloc(struct scsi_device *sdev) | |
6222 | { | |
6223 | struct pqi_scsi_dev *device; | |
6224 | unsigned long flags; | |
6225 | struct pqi_ctrl_info *ctrl_info; | |
6226 | struct scsi_target *starget; | |
6227 | struct sas_rphy *rphy; | |
6228 | ||
6229 | ctrl_info = shost_to_hba(sdev->host); | |
6230 | ||
6231 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
6232 | ||
6233 | if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { | |
6234 | starget = scsi_target(sdev); | |
6235 | rphy = target_to_rphy(starget); | |
6236 | device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); | |
6237 | if (device) { | |
6238 | device->target = sdev_id(sdev); | |
6239 | device->lun = sdev->lun; | |
6240 | device->target_lun_valid = true; | |
6241 | } | |
6242 | } else { | |
6243 | device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), | |
6244 | sdev_id(sdev), sdev->lun); | |
6245 | } | |
6246 | ||
94086f5b | 6247 | if (device) { |
6c223761 KB |
6248 | sdev->hostdata = device; |
6249 | device->sdev = sdev; | |
6250 | if (device->queue_depth) { | |
6251 | device->advertised_queue_depth = device->queue_depth; | |
6252 | scsi_change_queue_depth(sdev, | |
6253 | device->advertised_queue_depth); | |
6254 | } | |
99a12b48 | 6255 | if (pqi_is_logical_device(device)) { |
b6e2ef67 | 6256 | pqi_disable_write_same(sdev); |
99a12b48 | 6257 | } else { |
2b447f81 | 6258 | sdev->allow_restart = 1; |
99a12b48 KB |
6259 | if (device->device_type == SA_DEVICE_TYPE_NVME) |
6260 | pqi_disable_write_same(sdev); | |
6261 | } | |
6c223761 KB |
6262 | } |
6263 | ||
6264 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
6265 | ||
6266 | return 0; | |
6267 | } | |
6268 | ||
52198226 CH |
6269 | static int pqi_map_queues(struct Scsi_Host *shost) |
6270 | { | |
6271 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); | |
6272 | ||
79d3fa9e | 6273 | return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], |
ed76e329 | 6274 | ctrl_info->pci_dev, 0); |
52198226 CH |
6275 | } |
6276 | ||
ce143793 KB |
6277 | static int pqi_slave_configure(struct scsi_device *sdev) |
6278 | { | |
6279 | struct pqi_scsi_dev *device; | |
6280 | ||
6281 | device = sdev->hostdata; | |
6282 | device->devtype = sdev->type; | |
6283 | ||
6284 | return 0; | |
6285 | } | |
6286 | ||
4d15ad38 KB |
6287 | static void pqi_slave_destroy(struct scsi_device *sdev) |
6288 | { | |
6289 | unsigned long flags; | |
6290 | struct pqi_scsi_dev *device; | |
6291 | struct pqi_ctrl_info *ctrl_info; | |
6292 | ||
6293 | ctrl_info = shost_to_hba(sdev->host); | |
6294 | ||
6295 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
6296 | ||
6297 | device = sdev->hostdata; | |
6298 | if (device) { | |
6299 | sdev->hostdata = NULL; | |
6300 | if (!list_empty(&device->scsi_device_list_entry)) | |
6301 | list_del(&device->scsi_device_list_entry); | |
6302 | } | |
6303 | ||
6304 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
6305 | ||
6306 | if (device) { | |
6307 | pqi_dev_info(ctrl_info, "removed", device); | |
6308 | pqi_free_device(device); | |
6309 | } | |
6310 | } | |
6311 | ||
8b664fef | 6312 | static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) |
6c223761 KB |
6313 | { |
6314 | struct pci_dev *pci_dev; | |
6315 | u32 subsystem_vendor; | |
6316 | u32 subsystem_device; | |
6317 | cciss_pci_info_struct pciinfo; | |
6318 | ||
6319 | if (!arg) | |
6320 | return -EINVAL; | |
6321 | ||
6322 | pci_dev = ctrl_info->pci_dev; | |
6323 | ||
6324 | pciinfo.domain = pci_domain_nr(pci_dev->bus); | |
6325 | pciinfo.bus = pci_dev->bus->number; | |
6326 | pciinfo.dev_fn = pci_dev->devfn; | |
6327 | subsystem_vendor = pci_dev->subsystem_vendor; | |
6328 | subsystem_device = pci_dev->subsystem_device; | |
8b664fef | 6329 | pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; |
6c223761 KB |
6330 | |
6331 | if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) | |
6332 | return -EFAULT; | |
6333 | ||
6334 | return 0; | |
6335 | } | |
6336 | ||
6337 | static int pqi_getdrivver_ioctl(void __user *arg) | |
6338 | { | |
6339 | u32 version; | |
6340 | ||
6341 | if (!arg) | |
6342 | return -EINVAL; | |
6343 | ||
6344 | version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | | |
6345 | (DRIVER_RELEASE << 16) | DRIVER_REVISION; | |
6346 | ||
6347 | if (copy_to_user(arg, &version, sizeof(version))) | |
6348 | return -EFAULT; | |
6349 | ||
6350 | return 0; | |
6351 | } | |
6352 | ||
6353 | struct ciss_error_info { | |
6354 | u8 scsi_status; | |
6355 | int command_status; | |
6356 | size_t sense_data_length; | |
6357 | }; | |
6358 | ||
6359 | static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, | |
6360 | struct ciss_error_info *ciss_error_info) | |
6361 | { | |
6362 | int ciss_cmd_status; | |
6363 | size_t sense_data_length; | |
6364 | ||
6365 | switch (pqi_error_info->data_out_result) { | |
6366 | case PQI_DATA_IN_OUT_GOOD: | |
6367 | ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; | |
6368 | break; | |
6369 | case PQI_DATA_IN_OUT_UNDERFLOW: | |
6370 | ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; | |
6371 | break; | |
6372 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: | |
6373 | ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; | |
6374 | break; | |
6375 | case PQI_DATA_IN_OUT_PROTOCOL_ERROR: | |
6376 | case PQI_DATA_IN_OUT_BUFFER_ERROR: | |
6377 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: | |
6378 | case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: | |
6379 | case PQI_DATA_IN_OUT_ERROR: | |
6380 | ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; | |
6381 | break; | |
6382 | case PQI_DATA_IN_OUT_HARDWARE_ERROR: | |
6383 | case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: | |
6384 | case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: | |
6385 | case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: | |
6386 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: | |
6387 | case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: | |
6388 | case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: | |
6389 | case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: | |
6390 | case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: | |
6391 | case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: | |
6392 | ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; | |
6393 | break; | |
6394 | case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: | |
6395 | ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; | |
6396 | break; | |
6397 | case PQI_DATA_IN_OUT_ABORTED: | |
6398 | ciss_cmd_status = CISS_CMD_STATUS_ABORTED; | |
6399 | break; | |
6400 | case PQI_DATA_IN_OUT_TIMEOUT: | |
6401 | ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; | |
6402 | break; | |
6403 | default: | |
6404 | ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; | |
6405 | break; | |
6406 | } | |
6407 | ||
6408 | sense_data_length = | |
6409 | get_unaligned_le16(&pqi_error_info->sense_data_length); | |
6410 | if (sense_data_length == 0) | |
6411 | sense_data_length = | |
6412 | get_unaligned_le16(&pqi_error_info->response_data_length); | |
6413 | if (sense_data_length) | |
6414 | if (sense_data_length > sizeof(pqi_error_info->data)) | |
6415 | sense_data_length = sizeof(pqi_error_info->data); | |
6416 | ||
6417 | ciss_error_info->scsi_status = pqi_error_info->status; | |
6418 | ciss_error_info->command_status = ciss_cmd_status; | |
6419 | ciss_error_info->sense_data_length = sense_data_length; | |
6420 | } | |
6421 | ||
6422 | static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) | |
6423 | { | |
6424 | int rc; | |
6425 | char *kernel_buffer = NULL; | |
6426 | u16 iu_length; | |
6427 | size_t sense_data_length; | |
6428 | IOCTL_Command_struct iocommand; | |
6429 | struct pqi_raid_path_request request; | |
6430 | struct pqi_raid_error_info pqi_error_info; | |
6431 | struct ciss_error_info ciss_error_info; | |
6432 | ||
6433 | if (pqi_ctrl_offline(ctrl_info)) | |
6434 | return -ENXIO; | |
6435 | if (!arg) | |
6436 | return -EINVAL; | |
6437 | if (!capable(CAP_SYS_RAWIO)) | |
6438 | return -EPERM; | |
6439 | if (copy_from_user(&iocommand, arg, sizeof(iocommand))) | |
6440 | return -EFAULT; | |
6441 | if (iocommand.buf_size < 1 && | |
6442 | iocommand.Request.Type.Direction != XFER_NONE) | |
6443 | return -EINVAL; | |
6444 | if (iocommand.Request.CDBLen > sizeof(request.cdb)) | |
6445 | return -EINVAL; | |
6446 | if (iocommand.Request.Type.Type != TYPE_CMD) | |
6447 | return -EINVAL; | |
6448 | ||
6449 | switch (iocommand.Request.Type.Direction) { | |
6450 | case XFER_NONE: | |
6451 | case XFER_WRITE: | |
6452 | case XFER_READ: | |
41555d54 | 6453 | case XFER_READ | XFER_WRITE: |
6c223761 KB |
6454 | break; |
6455 | default: | |
6456 | return -EINVAL; | |
6457 | } | |
6458 | ||
6459 | if (iocommand.buf_size > 0) { | |
6460 | kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); | |
6461 | if (!kernel_buffer) | |
6462 | return -ENOMEM; | |
6463 | if (iocommand.Request.Type.Direction & XFER_WRITE) { | |
6464 | if (copy_from_user(kernel_buffer, iocommand.buf, | |
6465 | iocommand.buf_size)) { | |
6466 | rc = -EFAULT; | |
6467 | goto out; | |
6468 | } | |
6469 | } else { | |
6470 | memset(kernel_buffer, 0, iocommand.buf_size); | |
6471 | } | |
6472 | } | |
6473 | ||
6474 | memset(&request, 0, sizeof(request)); | |
6475 | ||
6476 | request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; | |
6477 | iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - | |
6478 | PQI_REQUEST_HEADER_LENGTH; | |
6479 | memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, | |
6480 | sizeof(request.lun_number)); | |
6481 | memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); | |
6482 | request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; | |
6483 | ||
6484 | switch (iocommand.Request.Type.Direction) { | |
6485 | case XFER_NONE: | |
6486 | request.data_direction = SOP_NO_DIRECTION_FLAG; | |
6487 | break; | |
6488 | case XFER_WRITE: | |
6489 | request.data_direction = SOP_WRITE_FLAG; | |
6490 | break; | |
6491 | case XFER_READ: | |
6492 | request.data_direction = SOP_READ_FLAG; | |
6493 | break; | |
41555d54 KB |
6494 | case XFER_READ | XFER_WRITE: |
6495 | request.data_direction = SOP_BIDIRECTIONAL; | |
6496 | break; | |
6c223761 KB |
6497 | } |
6498 | ||
6499 | request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; | |
6500 | ||
6501 | if (iocommand.buf_size > 0) { | |
6502 | put_unaligned_le32(iocommand.buf_size, &request.buffer_length); | |
6503 | ||
6504 | rc = pqi_map_single(ctrl_info->pci_dev, | |
6505 | &request.sg_descriptors[0], kernel_buffer, | |
6917a9cc | 6506 | iocommand.buf_size, DMA_BIDIRECTIONAL); |
6c223761 KB |
6507 | if (rc) |
6508 | goto out; | |
6509 | ||
6510 | iu_length += sizeof(request.sg_descriptors[0]); | |
6511 | } | |
6512 | ||
6513 | put_unaligned_le16(iu_length, &request.header.iu_length); | |
6514 | ||
21432010 | 6515 | if (ctrl_info->raid_iu_timeout_supported) |
6516 | put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); | |
6517 | ||
6c223761 KB |
6518 | rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, |
6519 | PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); | |
6520 | ||
6521 | if (iocommand.buf_size > 0) | |
6522 | pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, | |
6917a9cc | 6523 | DMA_BIDIRECTIONAL); |
6c223761 KB |
6524 | |
6525 | memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); | |
6526 | ||
6527 | if (rc == 0) { | |
6528 | pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); | |
6529 | iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; | |
6530 | iocommand.error_info.CommandStatus = | |
6531 | ciss_error_info.command_status; | |
6532 | sense_data_length = ciss_error_info.sense_data_length; | |
6533 | if (sense_data_length) { | |
6534 | if (sense_data_length > | |
6535 | sizeof(iocommand.error_info.SenseInfo)) | |
6536 | sense_data_length = | |
6537 | sizeof(iocommand.error_info.SenseInfo); | |
6538 | memcpy(iocommand.error_info.SenseInfo, | |
6539 | pqi_error_info.data, sense_data_length); | |
6540 | iocommand.error_info.SenseLen = sense_data_length; | |
6541 | } | |
6542 | } | |
6543 | ||
6544 | if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { | |
6545 | rc = -EFAULT; | |
6546 | goto out; | |
6547 | } | |
6548 | ||
6549 | if (rc == 0 && iocommand.buf_size > 0 && | |
6550 | (iocommand.Request.Type.Direction & XFER_READ)) { | |
6551 | if (copy_to_user(iocommand.buf, kernel_buffer, | |
6552 | iocommand.buf_size)) { | |
6553 | rc = -EFAULT; | |
6554 | } | |
6555 | } | |
6556 | ||
6557 | out: | |
6558 | kfree(kernel_buffer); | |
6559 | ||
6560 | return rc; | |
6561 | } | |
6562 | ||
6f4e626f NC |
6563 | static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, |
6564 | void __user *arg) | |
6c223761 KB |
6565 | { |
6566 | int rc; | |
6567 | struct pqi_ctrl_info *ctrl_info; | |
6568 | ||
6569 | ctrl_info = shost_to_hba(sdev->host); | |
6570 | ||
694c5d5b | 6571 | if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) |
4fd22c13 MR |
6572 | return -EBUSY; |
6573 | ||
6c223761 KB |
6574 | switch (cmd) { |
6575 | case CCISS_DEREGDISK: | |
6576 | case CCISS_REGNEWDISK: | |
6577 | case CCISS_REGNEWD: | |
6578 | rc = pqi_scan_scsi_devices(ctrl_info); | |
6579 | break; | |
6580 | case CCISS_GETPCIINFO: | |
6581 | rc = pqi_getpciinfo_ioctl(ctrl_info, arg); | |
6582 | break; | |
6583 | case CCISS_GETDRIVVER: | |
6584 | rc = pqi_getdrivver_ioctl(arg); | |
6585 | break; | |
6586 | case CCISS_PASSTHRU: | |
6587 | rc = pqi_passthru_ioctl(ctrl_info, arg); | |
6588 | break; | |
6589 | default: | |
6590 | rc = -EINVAL; | |
6591 | break; | |
6592 | } | |
6593 | ||
6594 | return rc; | |
6595 | } | |
6596 | ||
6d90615f | 6597 | static ssize_t pqi_firmware_version_show(struct device *dev, |
6c223761 KB |
6598 | struct device_attribute *attr, char *buffer) |
6599 | { | |
6c223761 KB |
6600 | struct Scsi_Host *shost; |
6601 | struct pqi_ctrl_info *ctrl_info; | |
6602 | ||
6603 | shost = class_to_shost(dev); | |
6604 | ctrl_info = shost_to_hba(shost); | |
6605 | ||
6d90615f MB |
6606 | return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); |
6607 | } | |
6608 | ||
6609 | static ssize_t pqi_driver_version_show(struct device *dev, | |
6610 | struct device_attribute *attr, char *buffer) | |
6611 | { | |
694c5d5b KB |
6612 | return snprintf(buffer, PAGE_SIZE, "%s\n", |
6613 | DRIVER_VERSION BUILD_TIMESTAMP); | |
6d90615f | 6614 | } |
6c223761 | 6615 | |
6d90615f MB |
6616 | static ssize_t pqi_serial_number_show(struct device *dev, |
6617 | struct device_attribute *attr, char *buffer) | |
6618 | { | |
6619 | struct Scsi_Host *shost; | |
6620 | struct pqi_ctrl_info *ctrl_info; | |
6621 | ||
6622 | shost = class_to_shost(dev); | |
6623 | ctrl_info = shost_to_hba(shost); | |
6624 | ||
6625 | return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); | |
6626 | } | |
6627 | ||
6628 | static ssize_t pqi_model_show(struct device *dev, | |
6629 | struct device_attribute *attr, char *buffer) | |
6630 | { | |
6631 | struct Scsi_Host *shost; | |
6632 | struct pqi_ctrl_info *ctrl_info; | |
6633 | ||
6634 | shost = class_to_shost(dev); | |
6635 | ctrl_info = shost_to_hba(shost); | |
6636 | ||
6637 | return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); | |
6638 | } | |
6639 | ||
6640 | static ssize_t pqi_vendor_show(struct device *dev, | |
6641 | struct device_attribute *attr, char *buffer) | |
6642 | { | |
6643 | struct Scsi_Host *shost; | |
6644 | struct pqi_ctrl_info *ctrl_info; | |
6645 | ||
6646 | shost = class_to_shost(dev); | |
6647 | ctrl_info = shost_to_hba(shost); | |
6648 | ||
6649 | return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); | |
6c223761 KB |
6650 | } |
6651 | ||
6652 | static ssize_t pqi_host_rescan_store(struct device *dev, | |
6653 | struct device_attribute *attr, const char *buffer, size_t count) | |
6654 | { | |
6655 | struct Scsi_Host *shost = class_to_shost(dev); | |
6656 | ||
6657 | pqi_scan_start(shost); | |
6658 | ||
6659 | return count; | |
6660 | } | |
6661 | ||
3c50976f KB |
6662 | static ssize_t pqi_lockup_action_show(struct device *dev, |
6663 | struct device_attribute *attr, char *buffer) | |
6664 | { | |
6665 | int count = 0; | |
6666 | unsigned int i; | |
6667 | ||
6668 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { | |
6669 | if (pqi_lockup_actions[i].action == pqi_lockup_action) | |
181aea89 | 6670 | count += scnprintf(buffer + count, PAGE_SIZE - count, |
3c50976f KB |
6671 | "[%s] ", pqi_lockup_actions[i].name); |
6672 | else | |
181aea89 | 6673 | count += scnprintf(buffer + count, PAGE_SIZE - count, |
3c50976f KB |
6674 | "%s ", pqi_lockup_actions[i].name); |
6675 | } | |
6676 | ||
181aea89 | 6677 | count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); |
3c50976f KB |
6678 | |
6679 | return count; | |
6680 | } | |
6681 | ||
6682 | static ssize_t pqi_lockup_action_store(struct device *dev, | |
6683 | struct device_attribute *attr, const char *buffer, size_t count) | |
6684 | { | |
6685 | unsigned int i; | |
6686 | char *action_name; | |
6687 | char action_name_buffer[32]; | |
6688 | ||
6689 | strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); | |
6690 | action_name = strstrip(action_name_buffer); | |
6691 | ||
6692 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { | |
6693 | if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { | |
6694 | pqi_lockup_action = pqi_lockup_actions[i].action; | |
6695 | return count; | |
6696 | } | |
6697 | } | |
6698 | ||
6699 | return -EINVAL; | |
6700 | } | |
6701 | ||
5be746d7 DB |
6702 | static ssize_t pqi_host_enable_stream_detection_show(struct device *dev, |
6703 | struct device_attribute *attr, char *buffer) | |
6704 | { | |
6705 | struct Scsi_Host *shost = class_to_shost(dev); | |
6706 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); | |
6707 | ||
6708 | return scnprintf(buffer, 10, "%x\n", | |
6709 | ctrl_info->enable_stream_detection); | |
6710 | } | |
6711 | ||
6712 | static ssize_t pqi_host_enable_stream_detection_store(struct device *dev, | |
6713 | struct device_attribute *attr, const char *buffer, size_t count) | |
6714 | { | |
6715 | struct Scsi_Host *shost = class_to_shost(dev); | |
6716 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); | |
6717 | u8 set_stream_detection = 0; | |
6718 | ||
6719 | if (kstrtou8(buffer, 0, &set_stream_detection)) | |
6720 | return -EINVAL; | |
6721 | ||
6722 | if (set_stream_detection > 0) | |
6723 | set_stream_detection = 1; | |
6724 | ||
6725 | ctrl_info->enable_stream_detection = set_stream_detection; | |
6726 | ||
6727 | return count; | |
6728 | } | |
6729 | ||
6702d2c4 DB |
6730 | static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, |
6731 | struct device_attribute *attr, char *buffer) | |
6732 | { | |
6733 | struct Scsi_Host *shost = class_to_shost(dev); | |
6734 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); | |
6735 | ||
6736 | return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); | |
6737 | } | |
6738 | ||
6739 | static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, | |
6740 | struct device_attribute *attr, const char *buffer, size_t count) | |
6741 | { | |
6742 | struct Scsi_Host *shost = class_to_shost(dev); | |
6743 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); | |
6744 | u8 set_r5_writes = 0; | |
6745 | ||
6746 | if (kstrtou8(buffer, 0, &set_r5_writes)) | |
6747 | return -EINVAL; | |
6748 | ||
6749 | if (set_r5_writes > 0) | |
6750 | set_r5_writes = 1; | |
6751 | ||
6752 | ctrl_info->enable_r5_writes = set_r5_writes; | |
6753 | ||
6754 | return count; | |
6755 | } | |
6756 | ||
6757 | static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, | |
6758 | struct device_attribute *attr, char *buffer) | |
6759 | { | |
6760 | struct Scsi_Host *shost = class_to_shost(dev); | |
6761 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); | |
6762 | ||
6763 | return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); | |
6764 | } | |
6765 | ||
6766 | static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, | |
6767 | struct device_attribute *attr, const char *buffer, size_t count) | |
6768 | { | |
6769 | struct Scsi_Host *shost = class_to_shost(dev); | |
6770 | struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); | |
6771 | u8 set_r6_writes = 0; | |
6772 | ||
6773 | if (kstrtou8(buffer, 0, &set_r6_writes)) | |
6774 | return -EINVAL; | |
6775 | ||
6776 | if (set_r6_writes > 0) | |
6777 | set_r6_writes = 1; | |
6778 | ||
6779 | ctrl_info->enable_r6_writes = set_r6_writes; | |
6780 | ||
6781 | return count; | |
6782 | } | |
6783 | ||
6d90615f MB |
6784 | static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); |
6785 | static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); | |
6786 | static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); | |
6787 | static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); | |
6788 | static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); | |
cbe0c7b1 | 6789 | static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); |
583891c9 KB |
6790 | static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show, |
6791 | pqi_lockup_action_store); | |
5be746d7 DB |
6792 | static DEVICE_ATTR(enable_stream_detection, 0644, |
6793 | pqi_host_enable_stream_detection_show, | |
6794 | pqi_host_enable_stream_detection_store); | |
6702d2c4 DB |
6795 | static DEVICE_ATTR(enable_r5_writes, 0644, |
6796 | pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); | |
6797 | static DEVICE_ATTR(enable_r6_writes, 0644, | |
6798 | pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); | |
6c223761 KB |
6799 | |
6800 | static struct device_attribute *pqi_shost_attrs[] = { | |
6d90615f MB |
6801 | &dev_attr_driver_version, |
6802 | &dev_attr_firmware_version, | |
6803 | &dev_attr_model, | |
6804 | &dev_attr_serial_number, | |
6805 | &dev_attr_vendor, | |
6c223761 | 6806 | &dev_attr_rescan, |
3c50976f | 6807 | &dev_attr_lockup_action, |
5be746d7 | 6808 | &dev_attr_enable_stream_detection, |
6702d2c4 DB |
6809 | &dev_attr_enable_r5_writes, |
6810 | &dev_attr_enable_r6_writes, | |
6c223761 KB |
6811 | NULL |
6812 | }; | |
6813 | ||
cd128244 DC |
6814 | static ssize_t pqi_unique_id_show(struct device *dev, |
6815 | struct device_attribute *attr, char *buffer) | |
6816 | { | |
6817 | struct pqi_ctrl_info *ctrl_info; | |
6818 | struct scsi_device *sdev; | |
6819 | struct pqi_scsi_dev *device; | |
6820 | unsigned long flags; | |
5b083b30 | 6821 | u8 unique_id[16]; |
cd128244 DC |
6822 | |
6823 | sdev = to_scsi_device(dev); | |
6824 | ctrl_info = shost_to_hba(sdev->host); | |
6825 | ||
6826 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
6827 | ||
6828 | device = sdev->hostdata; | |
6829 | if (!device) { | |
8b664fef | 6830 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
cd128244 DC |
6831 | return -ENODEV; |
6832 | } | |
5b083b30 KB |
6833 | |
6834 | if (device->is_physical_device) { | |
6835 | memset(unique_id, 0, 8); | |
6836 | memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid)); | |
6837 | } else { | |
6838 | memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); | |
6839 | } | |
cd128244 DC |
6840 | |
6841 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
6842 | ||
5995b236 | 6843 | return snprintf(buffer, PAGE_SIZE, |
583891c9 KB |
6844 | "%02X%02X%02X%02X%02X%02X%02X%02X" |
6845 | "%02X%02X%02X%02X%02X%02X%02X%02X\n", | |
5b083b30 KB |
6846 | unique_id[0], unique_id[1], unique_id[2], unique_id[3], |
6847 | unique_id[4], unique_id[5], unique_id[6], unique_id[7], | |
6848 | unique_id[8], unique_id[9], unique_id[10], unique_id[11], | |
6849 | unique_id[12], unique_id[13], unique_id[14], unique_id[15]); | |
cd128244 DC |
6850 | } |
6851 | ||
6852 | static ssize_t pqi_lunid_show(struct device *dev, | |
6853 | struct device_attribute *attr, char *buffer) | |
6854 | { | |
6855 | struct pqi_ctrl_info *ctrl_info; | |
6856 | struct scsi_device *sdev; | |
6857 | struct pqi_scsi_dev *device; | |
6858 | unsigned long flags; | |
6859 | u8 lunid[8]; | |
6860 | ||
6861 | sdev = to_scsi_device(dev); | |
6862 | ctrl_info = shost_to_hba(sdev->host); | |
6863 | ||
6864 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
6865 | ||
6866 | device = sdev->hostdata; | |
6867 | if (!device) { | |
8b664fef | 6868 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
cd128244 DC |
6869 | return -ENODEV; |
6870 | } | |
694c5d5b | 6871 | |
cd128244 DC |
6872 | memcpy(lunid, device->scsi3addr, sizeof(lunid)); |
6873 | ||
6874 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
6875 | ||
6876 | return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); | |
6877 | } | |
6878 | ||
694c5d5b KB |
6879 | #define MAX_PATHS 8 |
6880 | ||
cd128244 DC |
6881 | static ssize_t pqi_path_info_show(struct device *dev, |
6882 | struct device_attribute *attr, char *buf) | |
6883 | { | |
6884 | struct pqi_ctrl_info *ctrl_info; | |
6885 | struct scsi_device *sdev; | |
6886 | struct pqi_scsi_dev *device; | |
6887 | unsigned long flags; | |
6888 | int i; | |
6889 | int output_len = 0; | |
6890 | u8 box; | |
6891 | u8 bay; | |
694c5d5b | 6892 | u8 path_map_index; |
cd128244 | 6893 | char *active; |
694c5d5b | 6894 | u8 phys_connector[2]; |
cd128244 DC |
6895 | |
6896 | sdev = to_scsi_device(dev); | |
6897 | ctrl_info = shost_to_hba(sdev->host); | |
6898 | ||
6899 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
6900 | ||
6901 | device = sdev->hostdata; | |
6902 | if (!device) { | |
8b664fef | 6903 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); |
cd128244 DC |
6904 | return -ENODEV; |
6905 | } | |
6906 | ||
6907 | bay = device->bay; | |
6908 | for (i = 0; i < MAX_PATHS; i++) { | |
694c5d5b | 6909 | path_map_index = 1 << i; |
cd128244 DC |
6910 | if (i == device->active_path_index) |
6911 | active = "Active"; | |
6912 | else if (device->path_map & path_map_index) | |
6913 | active = "Inactive"; | |
6914 | else | |
6915 | continue; | |
6916 | ||
6917 | output_len += scnprintf(buf + output_len, | |
6918 | PAGE_SIZE - output_len, | |
6919 | "[%d:%d:%d:%d] %20.20s ", | |
6920 | ctrl_info->scsi_host->host_no, | |
6921 | device->bus, device->target, | |
6922 | device->lun, | |
6923 | scsi_device_type(device->devtype)); | |
6924 | ||
6925 | if (device->devtype == TYPE_RAID || | |
6926 | pqi_is_logical_device(device)) | |
6927 | goto end_buffer; | |
6928 | ||
6929 | memcpy(&phys_connector, &device->phys_connector[i], | |
6930 | sizeof(phys_connector)); | |
6931 | if (phys_connector[0] < '0') | |
6932 | phys_connector[0] = '0'; | |
6933 | if (phys_connector[1] < '0') | |
6934 | phys_connector[1] = '0'; | |
6935 | ||
6936 | output_len += scnprintf(buf + output_len, | |
6937 | PAGE_SIZE - output_len, | |
6938 | "PORT: %.2s ", phys_connector); | |
6939 | ||
6940 | box = device->box[i]; | |
6941 | if (box != 0 && box != 0xFF) | |
6942 | output_len += scnprintf(buf + output_len, | |
6943 | PAGE_SIZE - output_len, | |
6944 | "BOX: %hhu ", box); | |
6945 | ||
6946 | if ((device->devtype == TYPE_DISK || | |
6947 | device->devtype == TYPE_ZBC) && | |
6948 | pqi_expose_device(device)) | |
6949 | output_len += scnprintf(buf + output_len, | |
6950 | PAGE_SIZE - output_len, | |
6951 | "BAY: %hhu ", bay); | |
6952 | ||
6953 | end_buffer: | |
6954 | output_len += scnprintf(buf + output_len, | |
6955 | PAGE_SIZE - output_len, | |
6956 | "%s\n", active); | |
6957 | } | |
6958 | ||
6959 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
694c5d5b | 6960 | |
cd128244 DC |
6961 | return output_len; |
6962 | } | |
6963 | ||
6c223761 KB |
6964 | static ssize_t pqi_sas_address_show(struct device *dev, |
6965 | struct device_attribute *attr, char *buffer) | |
6966 | { | |
6967 | struct pqi_ctrl_info *ctrl_info; | |
6968 | struct scsi_device *sdev; | |
6969 | struct pqi_scsi_dev *device; | |
6970 | unsigned long flags; | |
6971 | u64 sas_address; | |
6972 | ||
6973 | sdev = to_scsi_device(dev); | |
6974 | ctrl_info = shost_to_hba(sdev->host); | |
6975 | ||
6976 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
6977 | ||
6978 | device = sdev->hostdata; | |
8b664fef KB |
6979 | if (!device || !pqi_is_device_with_sas_address(device)) { |
6980 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
6c223761 KB |
6981 | return -ENODEV; |
6982 | } | |
694c5d5b | 6983 | |
6c223761 KB |
6984 | sas_address = device->sas_address; |
6985 | ||
6986 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
6987 | ||
6988 | return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); | |
6989 | } | |
6990 | ||
6991 | static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, | |
6992 | struct device_attribute *attr, char *buffer) | |
6993 | { | |
6994 | struct pqi_ctrl_info *ctrl_info; | |
6995 | struct scsi_device *sdev; | |
6996 | struct pqi_scsi_dev *device; | |
6997 | unsigned long flags; | |
6998 | ||
6999 | sdev = to_scsi_device(dev); | |
7000 | ctrl_info = shost_to_hba(sdev->host); | |
7001 | ||
7002 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
7003 | ||
7004 | device = sdev->hostdata; | |
8b664fef KB |
7005 | if (!device) { |
7006 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
7007 | return -ENODEV; | |
7008 | } | |
7009 | ||
588a63fe | 7010 | buffer[0] = device->raid_bypass_enabled ? '1' : '0'; |
6c223761 KB |
7011 | buffer[1] = '\n'; |
7012 | buffer[2] = '\0'; | |
7013 | ||
7014 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
7015 | ||
7016 | return 2; | |
7017 | } | |
7018 | ||
a9f93392 KB |
7019 | static ssize_t pqi_raid_level_show(struct device *dev, |
7020 | struct device_attribute *attr, char *buffer) | |
7021 | { | |
7022 | struct pqi_ctrl_info *ctrl_info; | |
7023 | struct scsi_device *sdev; | |
7024 | struct pqi_scsi_dev *device; | |
7025 | unsigned long flags; | |
7026 | char *raid_level; | |
7027 | ||
7028 | sdev = to_scsi_device(dev); | |
7029 | ctrl_info = shost_to_hba(sdev->host); | |
7030 | ||
7031 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
7032 | ||
7033 | device = sdev->hostdata; | |
8b664fef KB |
7034 | if (!device) { |
7035 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
7036 | return -ENODEV; | |
7037 | } | |
a9f93392 KB |
7038 | |
7039 | if (pqi_is_logical_device(device)) | |
7040 | raid_level = pqi_raid_level_to_string(device->raid_level); | |
7041 | else | |
7042 | raid_level = "N/A"; | |
7043 | ||
7044 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
7045 | ||
7046 | return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); | |
7047 | } | |
7048 | ||
8b664fef KB |
7049 | static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, |
7050 | struct device_attribute *attr, char *buffer) | |
7051 | { | |
7052 | struct pqi_ctrl_info *ctrl_info; | |
7053 | struct scsi_device *sdev; | |
7054 | struct pqi_scsi_dev *device; | |
7055 | unsigned long flags; | |
7056 | int raid_bypass_cnt; | |
7057 | ||
7058 | sdev = to_scsi_device(dev); | |
7059 | ctrl_info = shost_to_hba(sdev->host); | |
7060 | ||
7061 | spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); | |
7062 | ||
7063 | device = sdev->hostdata; | |
7064 | if (!device) { | |
7065 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
7066 | return -ENODEV; | |
7067 | } | |
7068 | ||
7069 | raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt); | |
7070 | ||
7071 | spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); | |
7072 | ||
7073 | return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt); | |
7074 | } | |
7075 | ||
cd128244 DC |
7076 | static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); |
7077 | static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); | |
7078 | static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); | |
cbe0c7b1 | 7079 | static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); |
8b664fef | 7080 | static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); |
a9f93392 | 7081 | static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); |
8b664fef | 7082 | static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); |
6c223761 KB |
7083 | |
7084 | static struct device_attribute *pqi_sdev_attrs[] = { | |
cd128244 DC |
7085 | &dev_attr_lunid, |
7086 | &dev_attr_unique_id, | |
7087 | &dev_attr_path_info, | |
6c223761 KB |
7088 | &dev_attr_sas_address, |
7089 | &dev_attr_ssd_smart_path_enabled, | |
a9f93392 | 7090 | &dev_attr_raid_level, |
8b664fef | 7091 | &dev_attr_raid_bypass_cnt, |
6c223761 KB |
7092 | NULL |
7093 | }; | |
7094 | ||
7095 | static struct scsi_host_template pqi_driver_template = { | |
7096 | .module = THIS_MODULE, | |
7097 | .name = DRIVER_NAME_SHORT, | |
7098 | .proc_name = DRIVER_NAME_SHORT, | |
7099 | .queuecommand = pqi_scsi_queue_command, | |
7100 | .scan_start = pqi_scan_start, | |
7101 | .scan_finished = pqi_scan_finished, | |
7102 | .this_id = -1, | |
6c223761 KB |
7103 | .eh_device_reset_handler = pqi_eh_device_reset_handler, |
7104 | .ioctl = pqi_ioctl, | |
7105 | .slave_alloc = pqi_slave_alloc, | |
ce143793 | 7106 | .slave_configure = pqi_slave_configure, |
4d15ad38 | 7107 | .slave_destroy = pqi_slave_destroy, |
52198226 | 7108 | .map_queues = pqi_map_queues, |
6c223761 KB |
7109 | .sdev_attrs = pqi_sdev_attrs, |
7110 | .shost_attrs = pqi_shost_attrs, | |
7111 | }; | |
7112 | ||
7113 | static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) | |
7114 | { | |
7115 | int rc; | |
7116 | struct Scsi_Host *shost; | |
7117 | ||
7118 | shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); | |
7119 | if (!shost) { | |
583891c9 | 7120 | dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); |
6c223761 KB |
7121 | return -ENOMEM; |
7122 | } | |
7123 | ||
7124 | shost->io_port = 0; | |
7125 | shost->n_io_port = 0; | |
7126 | shost->this_id = -1; | |
7127 | shost->max_channel = PQI_MAX_BUS; | |
7128 | shost->max_cmd_len = MAX_COMMAND_SIZE; | |
7129 | shost->max_lun = ~0; | |
7130 | shost->max_id = ~0; | |
7131 | shost->max_sectors = ctrl_info->max_sectors; | |
7132 | shost->can_queue = ctrl_info->scsi_ml_can_queue; | |
7133 | shost->cmd_per_lun = shost->can_queue; | |
7134 | shost->sg_tablesize = ctrl_info->sg_tablesize; | |
7135 | shost->transportt = pqi_sas_transport_template; | |
52198226 | 7136 | shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); |
6c223761 KB |
7137 | shost->unique_id = shost->irq; |
7138 | shost->nr_hw_queues = ctrl_info->num_queue_groups; | |
c6d3ee20 | 7139 | shost->host_tagset = 1; |
6c223761 KB |
7140 | shost->hostdata[0] = (unsigned long)ctrl_info; |
7141 | ||
7142 | rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); | |
7143 | if (rc) { | |
583891c9 | 7144 | dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); |
6c223761 KB |
7145 | goto free_host; |
7146 | } | |
7147 | ||
7148 | rc = pqi_add_sas_host(shost, ctrl_info); | |
7149 | if (rc) { | |
583891c9 | 7150 | dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); |
6c223761 KB |
7151 | goto remove_host; |
7152 | } | |
7153 | ||
7154 | ctrl_info->scsi_host = shost; | |
7155 | ||
7156 | return 0; | |
7157 | ||
7158 | remove_host: | |
7159 | scsi_remove_host(shost); | |
7160 | free_host: | |
7161 | scsi_host_put(shost); | |
7162 | ||
7163 | return rc; | |
7164 | } | |
7165 | ||
7166 | static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) | |
7167 | { | |
7168 | struct Scsi_Host *shost; | |
7169 | ||
7170 | pqi_delete_sas_host(ctrl_info); | |
7171 | ||
7172 | shost = ctrl_info->scsi_host; | |
7173 | if (!shost) | |
7174 | return; | |
7175 | ||
7176 | scsi_remove_host(shost); | |
7177 | scsi_host_put(shost); | |
7178 | } | |
7179 | ||
336b6819 KB |
7180 | static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) |
7181 | { | |
7182 | int rc = 0; | |
7183 | struct pqi_device_registers __iomem *pqi_registers; | |
7184 | unsigned long timeout; | |
7185 | unsigned int timeout_msecs; | |
7186 | union pqi_reset_register reset_reg; | |
6c223761 | 7187 | |
336b6819 KB |
7188 | pqi_registers = ctrl_info->pqi_registers; |
7189 | timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; | |
7190 | timeout = msecs_to_jiffies(timeout_msecs) + jiffies; | |
7191 | ||
7192 | while (1) { | |
7193 | msleep(PQI_RESET_POLL_INTERVAL_MSECS); | |
7194 | reset_reg.all_bits = readl(&pqi_registers->device_reset); | |
7195 | if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) | |
7196 | break; | |
7197 | pqi_check_ctrl_health(ctrl_info); | |
7198 | if (pqi_ctrl_offline(ctrl_info)) { | |
7199 | rc = -ENXIO; | |
7200 | break; | |
7201 | } | |
7202 | if (time_after(jiffies, timeout)) { | |
7203 | rc = -ETIMEDOUT; | |
7204 | break; | |
7205 | } | |
7206 | } | |
7207 | ||
7208 | return rc; | |
7209 | } | |
6c223761 KB |
7210 | |
7211 | static int pqi_reset(struct pqi_ctrl_info *ctrl_info) | |
7212 | { | |
7213 | int rc; | |
336b6819 KB |
7214 | union pqi_reset_register reset_reg; |
7215 | ||
7216 | if (ctrl_info->pqi_reset_quiesce_supported) { | |
7217 | rc = sis_pqi_reset_quiesce(ctrl_info); | |
7218 | if (rc) { | |
7219 | dev_err(&ctrl_info->pci_dev->dev, | |
583891c9 | 7220 | "PQI reset failed during quiesce with error %d\n", rc); |
336b6819 KB |
7221 | return rc; |
7222 | } | |
7223 | } | |
6c223761 | 7224 | |
336b6819 KB |
7225 | reset_reg.all_bits = 0; |
7226 | reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; | |
7227 | reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; | |
6c223761 | 7228 | |
336b6819 | 7229 | writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); |
6c223761 | 7230 | |
336b6819 | 7231 | rc = pqi_wait_for_pqi_reset_completion(ctrl_info); |
6c223761 KB |
7232 | if (rc) |
7233 | dev_err(&ctrl_info->pci_dev->dev, | |
336b6819 | 7234 | "PQI reset failed with error %d\n", rc); |
6c223761 KB |
7235 | |
7236 | return rc; | |
7237 | } | |
7238 | ||
6d90615f MB |
7239 | static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) |
7240 | { | |
7241 | int rc; | |
7242 | struct bmic_sense_subsystem_info *sense_info; | |
7243 | ||
7244 | sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); | |
7245 | if (!sense_info) | |
7246 | return -ENOMEM; | |
7247 | ||
7248 | rc = pqi_sense_subsystem_info(ctrl_info, sense_info); | |
7249 | if (rc) | |
7250 | goto out; | |
7251 | ||
7252 | memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, | |
7253 | sizeof(sense_info->ctrl_serial_number)); | |
7254 | ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; | |
7255 | ||
7256 | out: | |
7257 | kfree(sense_info); | |
7258 | ||
7259 | return rc; | |
7260 | } | |
7261 | ||
7262 | static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) | |
6c223761 KB |
7263 | { |
7264 | int rc; | |
7265 | struct bmic_identify_controller *identify; | |
7266 | ||
7267 | identify = kmalloc(sizeof(*identify), GFP_KERNEL); | |
7268 | if (!identify) | |
7269 | return -ENOMEM; | |
7270 | ||
7271 | rc = pqi_identify_controller(ctrl_info, identify); | |
7272 | if (rc) | |
7273 | goto out; | |
7274 | ||
598bef8d KB |
7275 | if (get_unaligned_le32(&identify->extra_controller_flags) & |
7276 | BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) { | |
7277 | memcpy(ctrl_info->firmware_version, | |
7278 | identify->firmware_version_long, | |
7279 | sizeof(identify->firmware_version_long)); | |
7280 | } else { | |
7281 | memcpy(ctrl_info->firmware_version, | |
7282 | identify->firmware_version_short, | |
7283 | sizeof(identify->firmware_version_short)); | |
7284 | ctrl_info->firmware_version | |
7285 | [sizeof(identify->firmware_version_short)] = '\0'; | |
7286 | snprintf(ctrl_info->firmware_version + | |
7287 | strlen(ctrl_info->firmware_version), | |
7288 | sizeof(ctrl_info->firmware_version) - | |
7289 | sizeof(identify->firmware_version_short), | |
7290 | "-%u", | |
7291 | get_unaligned_le16(&identify->firmware_build_number)); | |
7292 | } | |
6c223761 | 7293 | |
6d90615f MB |
7294 | memcpy(ctrl_info->model, identify->product_id, |
7295 | sizeof(identify->product_id)); | |
7296 | ctrl_info->model[sizeof(identify->product_id)] = '\0'; | |
7297 | ||
7298 | memcpy(ctrl_info->vendor, identify->vendor_id, | |
7299 | sizeof(identify->vendor_id)); | |
7300 | ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; | |
7301 | ||
6c223761 KB |
7302 | out: |
7303 | kfree(identify); | |
7304 | ||
7305 | return rc; | |
7306 | } | |
7307 | ||
b212c251 KB |
7308 | struct pqi_config_table_section_info { |
7309 | struct pqi_ctrl_info *ctrl_info; | |
7310 | void *section; | |
7311 | u32 section_offset; | |
7312 | void __iomem *section_iomem_addr; | |
7313 | }; | |
7314 | ||
7315 | static inline bool pqi_is_firmware_feature_supported( | |
7316 | struct pqi_config_table_firmware_features *firmware_features, | |
7317 | unsigned int bit_position) | |
98f87667 | 7318 | { |
b212c251 | 7319 | unsigned int byte_index; |
98f87667 | 7320 | |
b212c251 | 7321 | byte_index = bit_position / BITS_PER_BYTE; |
98f87667 | 7322 | |
b212c251 KB |
7323 | if (byte_index >= le16_to_cpu(firmware_features->num_elements)) |
7324 | return false; | |
98f87667 | 7325 | |
b212c251 KB |
7326 | return firmware_features->features_supported[byte_index] & |
7327 | (1 << (bit_position % BITS_PER_BYTE)) ? true : false; | |
7328 | } | |
7329 | ||
7330 | static inline bool pqi_is_firmware_feature_enabled( | |
7331 | struct pqi_config_table_firmware_features *firmware_features, | |
7332 | void __iomem *firmware_features_iomem_addr, | |
7333 | unsigned int bit_position) | |
7334 | { | |
7335 | unsigned int byte_index; | |
7336 | u8 __iomem *features_enabled_iomem_addr; | |
7337 | ||
7338 | byte_index = (bit_position / BITS_PER_BYTE) + | |
7339 | (le16_to_cpu(firmware_features->num_elements) * 2); | |
7340 | ||
7341 | features_enabled_iomem_addr = firmware_features_iomem_addr + | |
7342 | offsetof(struct pqi_config_table_firmware_features, | |
7343 | features_supported) + byte_index; | |
7344 | ||
7345 | return *((__force u8 *)features_enabled_iomem_addr) & | |
7346 | (1 << (bit_position % BITS_PER_BYTE)) ? true : false; | |
7347 | } | |
7348 | ||
7349 | static inline void pqi_request_firmware_feature( | |
7350 | struct pqi_config_table_firmware_features *firmware_features, | |
7351 | unsigned int bit_position) | |
7352 | { | |
7353 | unsigned int byte_index; | |
7354 | ||
7355 | byte_index = (bit_position / BITS_PER_BYTE) + | |
7356 | le16_to_cpu(firmware_features->num_elements); | |
7357 | ||
7358 | firmware_features->features_supported[byte_index] |= | |
7359 | (1 << (bit_position % BITS_PER_BYTE)); | |
7360 | } | |
7361 | ||
7362 | static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, | |
7363 | u16 first_section, u16 last_section) | |
7364 | { | |
7365 | struct pqi_vendor_general_request request; | |
7366 | ||
7367 | memset(&request, 0, sizeof(request)); | |
7368 | ||
7369 | request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; | |
7370 | put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, | |
7371 | &request.header.iu_length); | |
7372 | put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, | |
7373 | &request.function_code); | |
7374 | put_unaligned_le16(first_section, | |
7375 | &request.data.config_table_update.first_section); | |
7376 | put_unaligned_le16(last_section, | |
7377 | &request.data.config_table_update.last_section); | |
7378 | ||
7379 | return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, | |
7380 | 0, NULL, NO_TIMEOUT); | |
7381 | } | |
7382 | ||
7383 | static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, | |
7384 | struct pqi_config_table_firmware_features *firmware_features, | |
7385 | void __iomem *firmware_features_iomem_addr) | |
7386 | { | |
7387 | void *features_requested; | |
7388 | void __iomem *features_requested_iomem_addr; | |
f6cc2a77 | 7389 | void __iomem *host_max_known_feature_iomem_addr; |
b212c251 KB |
7390 | |
7391 | features_requested = firmware_features->features_supported + | |
7392 | le16_to_cpu(firmware_features->num_elements); | |
7393 | ||
7394 | features_requested_iomem_addr = firmware_features_iomem_addr + | |
7395 | (features_requested - (void *)firmware_features); | |
7396 | ||
7397 | memcpy_toio(features_requested_iomem_addr, features_requested, | |
7398 | le16_to_cpu(firmware_features->num_elements)); | |
7399 | ||
f6cc2a77 KB |
7400 | if (pqi_is_firmware_feature_supported(firmware_features, |
7401 | PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { | |
7402 | host_max_known_feature_iomem_addr = | |
7403 | features_requested_iomem_addr + | |
7404 | (le16_to_cpu(firmware_features->num_elements) * 2) + | |
7405 | sizeof(__le16); | |
7406 | writew(PQI_FIRMWARE_FEATURE_MAXIMUM, | |
7407 | host_max_known_feature_iomem_addr); | |
7408 | } | |
7409 | ||
b212c251 KB |
7410 | return pqi_config_table_update(ctrl_info, |
7411 | PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, | |
7412 | PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); | |
7413 | } | |
7414 | ||
7415 | struct pqi_firmware_feature { | |
7416 | char *feature_name; | |
7417 | unsigned int feature_bit; | |
7418 | bool supported; | |
7419 | bool enabled; | |
7420 | void (*feature_status)(struct pqi_ctrl_info *ctrl_info, | |
7421 | struct pqi_firmware_feature *firmware_feature); | |
7422 | }; | |
7423 | ||
7424 | static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, | |
7425 | struct pqi_firmware_feature *firmware_feature) | |
7426 | { | |
7427 | if (!firmware_feature->supported) { | |
7428 | dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", | |
7429 | firmware_feature->feature_name); | |
7430 | return; | |
7431 | } | |
7432 | ||
7433 | if (firmware_feature->enabled) { | |
7434 | dev_info(&ctrl_info->pci_dev->dev, | |
7435 | "%s enabled\n", firmware_feature->feature_name); | |
7436 | return; | |
7437 | } | |
7438 | ||
7439 | dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", | |
7440 | firmware_feature->feature_name); | |
7441 | } | |
7442 | ||
21432010 | 7443 | static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, |
7444 | struct pqi_firmware_feature *firmware_feature) | |
7445 | { | |
7446 | switch (firmware_feature->feature_bit) { | |
f6cc2a77 KB |
7447 | case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: |
7448 | ctrl_info->enable_r1_writes = firmware_feature->enabled; | |
7449 | break; | |
7450 | case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: | |
7451 | ctrl_info->enable_r5_writes = firmware_feature->enabled; | |
7452 | break; | |
7453 | case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: | |
7454 | ctrl_info->enable_r6_writes = firmware_feature->enabled; | |
7455 | break; | |
21432010 | 7456 | case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: |
7457 | ctrl_info->soft_reset_handshake_supported = | |
7458 | firmware_feature->enabled; | |
7459 | break; | |
7460 | case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: | |
583891c9 | 7461 | ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; |
21432010 | 7462 | break; |
c2922f17 | 7463 | case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: |
583891c9 | 7464 | ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; |
c2922f17 | 7465 | break; |
21432010 | 7466 | } |
7467 | ||
7468 | pqi_firmware_feature_status(ctrl_info, firmware_feature); | |
7469 | } | |
7470 | ||
b212c251 KB |
7471 | static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, |
7472 | struct pqi_firmware_feature *firmware_feature) | |
7473 | { | |
7474 | if (firmware_feature->feature_status) | |
7475 | firmware_feature->feature_status(ctrl_info, firmware_feature); | |
7476 | } | |
7477 | ||
7478 | static DEFINE_MUTEX(pqi_firmware_features_mutex); | |
7479 | ||
7480 | static struct pqi_firmware_feature pqi_firmware_features[] = { | |
7481 | { | |
7482 | .feature_name = "Online Firmware Activation", | |
7483 | .feature_bit = PQI_FIRMWARE_FEATURE_OFA, | |
7484 | .feature_status = pqi_firmware_feature_status, | |
7485 | }, | |
7486 | { | |
7487 | .feature_name = "Serial Management Protocol", | |
7488 | .feature_bit = PQI_FIRMWARE_FEATURE_SMP, | |
7489 | .feature_status = pqi_firmware_feature_status, | |
7490 | }, | |
f6cc2a77 KB |
7491 | { |
7492 | .feature_name = "Maximum Known Feature", | |
7493 | .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, | |
7494 | .feature_status = pqi_firmware_feature_status, | |
7495 | }, | |
7496 | { | |
7497 | .feature_name = "RAID 0 Read Bypass", | |
7498 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, | |
7499 | .feature_status = pqi_firmware_feature_status, | |
7500 | }, | |
7501 | { | |
7502 | .feature_name = "RAID 1 Read Bypass", | |
7503 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, | |
7504 | .feature_status = pqi_firmware_feature_status, | |
7505 | }, | |
7506 | { | |
7507 | .feature_name = "RAID 5 Read Bypass", | |
7508 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, | |
7509 | .feature_status = pqi_firmware_feature_status, | |
7510 | }, | |
7511 | { | |
7512 | .feature_name = "RAID 6 Read Bypass", | |
7513 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, | |
7514 | .feature_status = pqi_firmware_feature_status, | |
7515 | }, | |
7516 | { | |
7517 | .feature_name = "RAID 0 Write Bypass", | |
7518 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, | |
7519 | .feature_status = pqi_firmware_feature_status, | |
7520 | }, | |
7521 | { | |
7522 | .feature_name = "RAID 1 Write Bypass", | |
7523 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, | |
7524 | .feature_status = pqi_ctrl_update_feature_flags, | |
7525 | }, | |
7526 | { | |
7527 | .feature_name = "RAID 5 Write Bypass", | |
7528 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, | |
7529 | .feature_status = pqi_ctrl_update_feature_flags, | |
7530 | }, | |
7531 | { | |
7532 | .feature_name = "RAID 6 Write Bypass", | |
7533 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, | |
7534 | .feature_status = pqi_ctrl_update_feature_flags, | |
7535 | }, | |
4fd22c13 MR |
7536 | { |
7537 | .feature_name = "New Soft Reset Handshake", | |
7538 | .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, | |
21432010 | 7539 | .feature_status = pqi_ctrl_update_feature_flags, |
7540 | }, | |
7541 | { | |
7542 | .feature_name = "RAID IU Timeout", | |
7543 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, | |
7544 | .feature_status = pqi_ctrl_update_feature_flags, | |
4fd22c13 | 7545 | }, |
c2922f17 MB |
7546 | { |
7547 | .feature_name = "TMF IU Timeout", | |
7548 | .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, | |
7549 | .feature_status = pqi_ctrl_update_feature_flags, | |
7550 | }, | |
f6cc2a77 KB |
7551 | { |
7552 | .feature_name = "RAID Bypass on encrypted logical volumes on NVMe", | |
7553 | .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME, | |
7554 | .feature_status = pqi_firmware_feature_status, | |
7555 | }, | |
b212c251 KB |
7556 | }; |
7557 | ||
7558 | static void pqi_process_firmware_features( | |
7559 | struct pqi_config_table_section_info *section_info) | |
7560 | { | |
7561 | int rc; | |
7562 | struct pqi_ctrl_info *ctrl_info; | |
7563 | struct pqi_config_table_firmware_features *firmware_features; | |
7564 | void __iomem *firmware_features_iomem_addr; | |
7565 | unsigned int i; | |
7566 | unsigned int num_features_supported; | |
7567 | ||
7568 | ctrl_info = section_info->ctrl_info; | |
7569 | firmware_features = section_info->section; | |
7570 | firmware_features_iomem_addr = section_info->section_iomem_addr; | |
7571 | ||
7572 | for (i = 0, num_features_supported = 0; | |
7573 | i < ARRAY_SIZE(pqi_firmware_features); i++) { | |
7574 | if (pqi_is_firmware_feature_supported(firmware_features, | |
7575 | pqi_firmware_features[i].feature_bit)) { | |
7576 | pqi_firmware_features[i].supported = true; | |
7577 | num_features_supported++; | |
7578 | } else { | |
7579 | pqi_firmware_feature_update(ctrl_info, | |
7580 | &pqi_firmware_features[i]); | |
7581 | } | |
7582 | } | |
7583 | ||
7584 | if (num_features_supported == 0) | |
7585 | return; | |
7586 | ||
7587 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { | |
7588 | if (!pqi_firmware_features[i].supported) | |
7589 | continue; | |
7590 | pqi_request_firmware_feature(firmware_features, | |
7591 | pqi_firmware_features[i].feature_bit); | |
7592 | } | |
7593 | ||
7594 | rc = pqi_enable_firmware_features(ctrl_info, firmware_features, | |
7595 | firmware_features_iomem_addr); | |
7596 | if (rc) { | |
7597 | dev_err(&ctrl_info->pci_dev->dev, | |
7598 | "failed to enable firmware features in PQI configuration table\n"); | |
7599 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { | |
7600 | if (!pqi_firmware_features[i].supported) | |
7601 | continue; | |
7602 | pqi_firmware_feature_update(ctrl_info, | |
7603 | &pqi_firmware_features[i]); | |
7604 | } | |
7605 | return; | |
7606 | } | |
7607 | ||
7608 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { | |
7609 | if (!pqi_firmware_features[i].supported) | |
7610 | continue; | |
7611 | if (pqi_is_firmware_feature_enabled(firmware_features, | |
7612 | firmware_features_iomem_addr, | |
4fd22c13 | 7613 | pqi_firmware_features[i].feature_bit)) { |
583891c9 | 7614 | pqi_firmware_features[i].enabled = true; |
4fd22c13 | 7615 | } |
b212c251 KB |
7616 | pqi_firmware_feature_update(ctrl_info, |
7617 | &pqi_firmware_features[i]); | |
7618 | } | |
7619 | } | |
7620 | ||
7621 | static void pqi_init_firmware_features(void) | |
7622 | { | |
7623 | unsigned int i; | |
7624 | ||
7625 | for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { | |
7626 | pqi_firmware_features[i].supported = false; | |
7627 | pqi_firmware_features[i].enabled = false; | |
7628 | } | |
7629 | } | |
7630 | ||
7631 | static void pqi_process_firmware_features_section( | |
7632 | struct pqi_config_table_section_info *section_info) | |
7633 | { | |
7634 | mutex_lock(&pqi_firmware_features_mutex); | |
7635 | pqi_init_firmware_features(); | |
7636 | pqi_process_firmware_features(section_info); | |
7637 | mutex_unlock(&pqi_firmware_features_mutex); | |
7638 | } | |
7639 | ||
f6cc2a77 KB |
7640 | /* |
7641 | * Reset all controller settings that can be initialized during the processing | |
7642 | * of the PQI Configuration Table. | |
7643 | */ | |
7644 | ||
98f87667 KB |
7645 | static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) |
7646 | { | |
7647 | u32 table_length; | |
7648 | u32 section_offset; | |
f6cc2a77 | 7649 | bool firmware_feature_section_present; |
98f87667 KB |
7650 | void __iomem *table_iomem_addr; |
7651 | struct pqi_config_table *config_table; | |
7652 | struct pqi_config_table_section_header *section; | |
b212c251 | 7653 | struct pqi_config_table_section_info section_info; |
f6cc2a77 | 7654 | struct pqi_config_table_section_info feature_section_info; |
98f87667 KB |
7655 | |
7656 | table_length = ctrl_info->config_table_length; | |
b212c251 KB |
7657 | if (table_length == 0) |
7658 | return 0; | |
98f87667 KB |
7659 | |
7660 | config_table = kmalloc(table_length, GFP_KERNEL); | |
7661 | if (!config_table) { | |
7662 | dev_err(&ctrl_info->pci_dev->dev, | |
d87d5474 | 7663 | "failed to allocate memory for PQI configuration table\n"); |
98f87667 KB |
7664 | return -ENOMEM; |
7665 | } | |
7666 | ||
7667 | /* | |
7668 | * Copy the config table contents from I/O memory space into the | |
7669 | * temporary buffer. | |
7670 | */ | |
583891c9 | 7671 | table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; |
98f87667 KB |
7672 | memcpy_fromio(config_table, table_iomem_addr, table_length); |
7673 | ||
f6cc2a77 | 7674 | firmware_feature_section_present = false; |
b212c251 | 7675 | section_info.ctrl_info = ctrl_info; |
583891c9 | 7676 | section_offset = get_unaligned_le32(&config_table->first_section_offset); |
98f87667 KB |
7677 | |
7678 | while (section_offset) { | |
7679 | section = (void *)config_table + section_offset; | |
7680 | ||
b212c251 KB |
7681 | section_info.section = section; |
7682 | section_info.section_offset = section_offset; | |
583891c9 | 7683 | section_info.section_iomem_addr = table_iomem_addr + section_offset; |
b212c251 | 7684 | |
98f87667 | 7685 | switch (get_unaligned_le16(§ion->section_id)) { |
b212c251 | 7686 | case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: |
f6cc2a77 KB |
7687 | firmware_feature_section_present = true; |
7688 | feature_section_info = section_info; | |
b212c251 | 7689 | break; |
98f87667 | 7690 | case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: |
5a259e32 KB |
7691 | if (pqi_disable_heartbeat) |
7692 | dev_warn(&ctrl_info->pci_dev->dev, | |
7693 | "heartbeat disabled by module parameter\n"); | |
7694 | else | |
7695 | ctrl_info->heartbeat_counter = | |
7696 | table_iomem_addr + | |
7697 | section_offset + | |
583891c9 | 7698 | offsetof(struct pqi_config_table_heartbeat, |
5a259e32 | 7699 | heartbeat_counter); |
98f87667 | 7700 | break; |
4fd22c13 MR |
7701 | case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: |
7702 | ctrl_info->soft_reset_status = | |
7703 | table_iomem_addr + | |
7704 | section_offset + | |
7705 | offsetof(struct pqi_config_table_soft_reset, | |
583891c9 | 7706 | soft_reset_status); |
4fd22c13 | 7707 | break; |
98f87667 KB |
7708 | } |
7709 | ||
583891c9 | 7710 | section_offset = get_unaligned_le16(§ion->next_section_offset); |
98f87667 KB |
7711 | } |
7712 | ||
f6cc2a77 KB |
7713 | /* |
7714 | * We process the firmware feature section after all other sections | |
7715 | * have been processed so that the feature bit callbacks can take | |
7716 | * into account the settings configured by other sections. | |
7717 | */ | |
7718 | if (firmware_feature_section_present) | |
7719 | pqi_process_firmware_features_section(&feature_section_info); | |
7720 | ||
98f87667 KB |
7721 | kfree(config_table); |
7722 | ||
7723 | return 0; | |
7724 | } | |
7725 | ||
162d7753 KB |
7726 | /* Switches the controller from PQI mode back into SIS mode. */ |
7727 | ||
7728 | static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) | |
7729 | { | |
7730 | int rc; | |
7731 | ||
061ef06a | 7732 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); |
162d7753 KB |
7733 | rc = pqi_reset(ctrl_info); |
7734 | if (rc) | |
7735 | return rc; | |
4f078e24 KB |
7736 | rc = sis_reenable_sis_mode(ctrl_info); |
7737 | if (rc) { | |
7738 | dev_err(&ctrl_info->pci_dev->dev, | |
7739 | "re-enabling SIS mode failed with error %d\n", rc); | |
7740 | return rc; | |
7741 | } | |
162d7753 KB |
7742 | pqi_save_ctrl_mode(ctrl_info, SIS_MODE); |
7743 | ||
7744 | return 0; | |
7745 | } | |
7746 | ||
7747 | /* | |
7748 | * If the controller isn't already in SIS mode, this function forces it into | |
7749 | * SIS mode. | |
7750 | */ | |
7751 | ||
7752 | static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) | |
ff6abb73 KB |
7753 | { |
7754 | if (!sis_is_firmware_running(ctrl_info)) | |
7755 | return -ENXIO; | |
7756 | ||
162d7753 KB |
7757 | if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) |
7758 | return 0; | |
7759 | ||
7760 | if (sis_is_kernel_up(ctrl_info)) { | |
7761 | pqi_save_ctrl_mode(ctrl_info, SIS_MODE); | |
7762 | return 0; | |
ff6abb73 KB |
7763 | } |
7764 | ||
162d7753 | 7765 | return pqi_revert_to_sis_mode(ctrl_info); |
ff6abb73 KB |
7766 | } |
7767 | ||
0530736e KB |
7768 | #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 |
7769 | ||
6c223761 KB |
7770 | static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) |
7771 | { | |
7772 | int rc; | |
2708a256 | 7773 | u32 product_id; |
6c223761 | 7774 | |
0530736e KB |
7775 | if (reset_devices) { |
7776 | sis_soft_reset(ctrl_info); | |
7777 | msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); | |
7778 | } else { | |
7779 | rc = pqi_force_sis_mode(ctrl_info); | |
7780 | if (rc) | |
7781 | return rc; | |
7782 | } | |
6c223761 KB |
7783 | |
7784 | /* | |
7785 | * Wait until the controller is ready to start accepting SIS | |
7786 | * commands. | |
7787 | */ | |
7788 | rc = sis_wait_for_ctrl_ready(ctrl_info); | |
8845fdfa | 7789 | if (rc) |
6c223761 | 7790 | return rc; |
6c223761 KB |
7791 | |
7792 | /* | |
7793 | * Get the controller properties. This allows us to determine | |
7794 | * whether or not it supports PQI mode. | |
7795 | */ | |
7796 | rc = sis_get_ctrl_properties(ctrl_info); | |
7797 | if (rc) { | |
7798 | dev_err(&ctrl_info->pci_dev->dev, | |
7799 | "error obtaining controller properties\n"); | |
7800 | return rc; | |
7801 | } | |
7802 | ||
7803 | rc = sis_get_pqi_capabilities(ctrl_info); | |
7804 | if (rc) { | |
7805 | dev_err(&ctrl_info->pci_dev->dev, | |
7806 | "error obtaining controller capabilities\n"); | |
7807 | return rc; | |
7808 | } | |
7809 | ||
2708a256 KB |
7810 | product_id = sis_get_product_id(ctrl_info); |
7811 | ctrl_info->product_id = (u8)product_id; | |
7812 | ctrl_info->product_revision = (u8)(product_id >> 8); | |
7813 | ||
d727a776 KB |
7814 | if (reset_devices) { |
7815 | if (ctrl_info->max_outstanding_requests > | |
7816 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) | |
583891c9 | 7817 | ctrl_info->max_outstanding_requests = |
d727a776 KB |
7818 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; |
7819 | } else { | |
7820 | if (ctrl_info->max_outstanding_requests > | |
7821 | PQI_MAX_OUTSTANDING_REQUESTS) | |
583891c9 | 7822 | ctrl_info->max_outstanding_requests = |
d727a776 KB |
7823 | PQI_MAX_OUTSTANDING_REQUESTS; |
7824 | } | |
6c223761 KB |
7825 | |
7826 | pqi_calculate_io_resources(ctrl_info); | |
7827 | ||
7828 | rc = pqi_alloc_error_buffer(ctrl_info); | |
7829 | if (rc) { | |
7830 | dev_err(&ctrl_info->pci_dev->dev, | |
7831 | "failed to allocate PQI error buffer\n"); | |
7832 | return rc; | |
7833 | } | |
7834 | ||
7835 | /* | |
7836 | * If the function we are about to call succeeds, the | |
7837 | * controller will transition from legacy SIS mode | |
7838 | * into PQI mode. | |
7839 | */ | |
7840 | rc = sis_init_base_struct_addr(ctrl_info); | |
7841 | if (rc) { | |
7842 | dev_err(&ctrl_info->pci_dev->dev, | |
7843 | "error initializing PQI mode\n"); | |
7844 | return rc; | |
7845 | } | |
7846 | ||
7847 | /* Wait for the controller to complete the SIS -> PQI transition. */ | |
7848 | rc = pqi_wait_for_pqi_mode_ready(ctrl_info); | |
7849 | if (rc) { | |
7850 | dev_err(&ctrl_info->pci_dev->dev, | |
7851 | "transition to PQI mode failed\n"); | |
7852 | return rc; | |
7853 | } | |
7854 | ||
7855 | /* From here on, we are running in PQI mode. */ | |
7856 | ctrl_info->pqi_mode_enabled = true; | |
ff6abb73 | 7857 | pqi_save_ctrl_mode(ctrl_info, PQI_MODE); |
6c223761 KB |
7858 | |
7859 | rc = pqi_alloc_admin_queues(ctrl_info); | |
7860 | if (rc) { | |
7861 | dev_err(&ctrl_info->pci_dev->dev, | |
d87d5474 | 7862 | "failed to allocate admin queues\n"); |
6c223761 KB |
7863 | return rc; |
7864 | } | |
7865 | ||
7866 | rc = pqi_create_admin_queues(ctrl_info); | |
7867 | if (rc) { | |
7868 | dev_err(&ctrl_info->pci_dev->dev, | |
7869 | "error creating admin queues\n"); | |
7870 | return rc; | |
7871 | } | |
7872 | ||
7873 | rc = pqi_report_device_capability(ctrl_info); | |
7874 | if (rc) { | |
7875 | dev_err(&ctrl_info->pci_dev->dev, | |
7876 | "obtaining device capability failed\n"); | |
7877 | return rc; | |
7878 | } | |
7879 | ||
7880 | rc = pqi_validate_device_capability(ctrl_info); | |
7881 | if (rc) | |
7882 | return rc; | |
7883 | ||
7884 | pqi_calculate_queue_resources(ctrl_info); | |
7885 | ||
7886 | rc = pqi_enable_msix_interrupts(ctrl_info); | |
7887 | if (rc) | |
7888 | return rc; | |
7889 | ||
7890 | if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { | |
7891 | ctrl_info->max_msix_vectors = | |
7892 | ctrl_info->num_msix_vectors_enabled; | |
7893 | pqi_calculate_queue_resources(ctrl_info); | |
7894 | } | |
7895 | ||
7896 | rc = pqi_alloc_io_resources(ctrl_info); | |
7897 | if (rc) | |
7898 | return rc; | |
7899 | ||
7900 | rc = pqi_alloc_operational_queues(ctrl_info); | |
d87d5474 KB |
7901 | if (rc) { |
7902 | dev_err(&ctrl_info->pci_dev->dev, | |
7903 | "failed to allocate operational queues\n"); | |
6c223761 | 7904 | return rc; |
d87d5474 | 7905 | } |
6c223761 KB |
7906 | |
7907 | pqi_init_operational_queues(ctrl_info); | |
7908 | ||
7909 | rc = pqi_request_irqs(ctrl_info); | |
7910 | if (rc) | |
7911 | return rc; | |
7912 | ||
6c223761 KB |
7913 | rc = pqi_create_queues(ctrl_info); |
7914 | if (rc) | |
7915 | return rc; | |
7916 | ||
061ef06a KB |
7917 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); |
7918 | ||
7919 | ctrl_info->controller_online = true; | |
b212c251 KB |
7920 | |
7921 | rc = pqi_process_config_table(ctrl_info); | |
7922 | if (rc) | |
7923 | return rc; | |
7924 | ||
061ef06a | 7925 | pqi_start_heartbeat_timer(ctrl_info); |
6c223761 | 7926 | |
f6cc2a77 KB |
7927 | if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { |
7928 | rc = pqi_get_advanced_raid_bypass_config(ctrl_info); | |
7929 | if (rc) { /* Supported features not returned correctly. */ | |
7930 | dev_err(&ctrl_info->pci_dev->dev, | |
7931 | "error obtaining advanced RAID bypass configuration\n"); | |
7932 | return rc; | |
7933 | } | |
7934 | ctrl_info->ciss_report_log_flags |= | |
7935 | CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; | |
7936 | } | |
7937 | ||
6a50d6ad | 7938 | rc = pqi_enable_events(ctrl_info); |
6c223761 KB |
7939 | if (rc) { |
7940 | dev_err(&ctrl_info->pci_dev->dev, | |
6a50d6ad | 7941 | "error enabling events\n"); |
6c223761 KB |
7942 | return rc; |
7943 | } | |
7944 | ||
6c223761 KB |
7945 | /* Register with the SCSI subsystem. */ |
7946 | rc = pqi_register_scsi(ctrl_info); | |
7947 | if (rc) | |
7948 | return rc; | |
7949 | ||
6d90615f MB |
7950 | rc = pqi_get_ctrl_product_details(ctrl_info); |
7951 | if (rc) { | |
7952 | dev_err(&ctrl_info->pci_dev->dev, | |
7953 | "error obtaining product details\n"); | |
7954 | return rc; | |
7955 | } | |
7956 | ||
7957 | rc = pqi_get_ctrl_serial_number(ctrl_info); | |
6c223761 KB |
7958 | if (rc) { |
7959 | dev_err(&ctrl_info->pci_dev->dev, | |
6d90615f | 7960 | "error obtaining ctrl serial number\n"); |
6c223761 KB |
7961 | return rc; |
7962 | } | |
7963 | ||
171c2865 DC |
7964 | rc = pqi_set_diag_rescan(ctrl_info); |
7965 | if (rc) { | |
7966 | dev_err(&ctrl_info->pci_dev->dev, | |
7967 | "error enabling multi-lun rescan\n"); | |
7968 | return rc; | |
7969 | } | |
7970 | ||
6c223761 KB |
7971 | rc = pqi_write_driver_version_to_host_wellness(ctrl_info); |
7972 | if (rc) { | |
7973 | dev_err(&ctrl_info->pci_dev->dev, | |
7974 | "error updating host wellness\n"); | |
7975 | return rc; | |
7976 | } | |
7977 | ||
7978 | pqi_schedule_update_time_worker(ctrl_info); | |
7979 | ||
7980 | pqi_scan_scsi_devices(ctrl_info); | |
7981 | ||
7982 | return 0; | |
7983 | } | |
7984 | ||
061ef06a KB |
7985 | static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) |
7986 | { | |
7987 | unsigned int i; | |
7988 | struct pqi_admin_queues *admin_queues; | |
7989 | struct pqi_event_queue *event_queue; | |
7990 | ||
7991 | admin_queues = &ctrl_info->admin_queues; | |
7992 | admin_queues->iq_pi_copy = 0; | |
7993 | admin_queues->oq_ci_copy = 0; | |
dac12fbc | 7994 | writel(0, admin_queues->oq_pi); |
061ef06a KB |
7995 | |
7996 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | |
7997 | ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; | |
7998 | ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; | |
7999 | ctrl_info->queue_groups[i].oq_ci_copy = 0; | |
8000 | ||
dac12fbc KB |
8001 | writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); |
8002 | writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); | |
8003 | writel(0, ctrl_info->queue_groups[i].oq_pi); | |
061ef06a KB |
8004 | } |
8005 | ||
8006 | event_queue = &ctrl_info->event_queue; | |
dac12fbc | 8007 | writel(0, event_queue->oq_pi); |
061ef06a KB |
8008 | event_queue->oq_ci_copy = 0; |
8009 | } | |
8010 | ||
8011 | static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) | |
8012 | { | |
8013 | int rc; | |
8014 | ||
8015 | rc = pqi_force_sis_mode(ctrl_info); | |
8016 | if (rc) | |
8017 | return rc; | |
8018 | ||
8019 | /* | |
8020 | * Wait until the controller is ready to start accepting SIS | |
8021 | * commands. | |
8022 | */ | |
8023 | rc = sis_wait_for_ctrl_ready_resume(ctrl_info); | |
8024 | if (rc) | |
8025 | return rc; | |
8026 | ||
4fd22c13 MR |
8027 | /* |
8028 | * Get the controller properties. This allows us to determine | |
8029 | * whether or not it supports PQI mode. | |
8030 | */ | |
8031 | rc = sis_get_ctrl_properties(ctrl_info); | |
8032 | if (rc) { | |
8033 | dev_err(&ctrl_info->pci_dev->dev, | |
8034 | "error obtaining controller properties\n"); | |
8035 | return rc; | |
8036 | } | |
8037 | ||
8038 | rc = sis_get_pqi_capabilities(ctrl_info); | |
8039 | if (rc) { | |
8040 | dev_err(&ctrl_info->pci_dev->dev, | |
8041 | "error obtaining controller capabilities\n"); | |
8042 | return rc; | |
8043 | } | |
8044 | ||
061ef06a KB |
8045 | /* |
8046 | * If the function we are about to call succeeds, the | |
8047 | * controller will transition from legacy SIS mode | |
8048 | * into PQI mode. | |
8049 | */ | |
8050 | rc = sis_init_base_struct_addr(ctrl_info); | |
8051 | if (rc) { | |
8052 | dev_err(&ctrl_info->pci_dev->dev, | |
8053 | "error initializing PQI mode\n"); | |
8054 | return rc; | |
8055 | } | |
8056 | ||
8057 | /* Wait for the controller to complete the SIS -> PQI transition. */ | |
8058 | rc = pqi_wait_for_pqi_mode_ready(ctrl_info); | |
8059 | if (rc) { | |
8060 | dev_err(&ctrl_info->pci_dev->dev, | |
8061 | "transition to PQI mode failed\n"); | |
8062 | return rc; | |
8063 | } | |
8064 | ||
8065 | /* From here on, we are running in PQI mode. */ | |
8066 | ctrl_info->pqi_mode_enabled = true; | |
8067 | pqi_save_ctrl_mode(ctrl_info, PQI_MODE); | |
8068 | ||
8069 | pqi_reinit_queues(ctrl_info); | |
8070 | ||
8071 | rc = pqi_create_admin_queues(ctrl_info); | |
8072 | if (rc) { | |
8073 | dev_err(&ctrl_info->pci_dev->dev, | |
8074 | "error creating admin queues\n"); | |
8075 | return rc; | |
8076 | } | |
8077 | ||
8078 | rc = pqi_create_queues(ctrl_info); | |
8079 | if (rc) | |
8080 | return rc; | |
8081 | ||
8082 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); | |
8083 | ||
8084 | ctrl_info->controller_online = true; | |
061ef06a KB |
8085 | pqi_ctrl_unblock_requests(ctrl_info); |
8086 | ||
4fd22c13 MR |
8087 | rc = pqi_process_config_table(ctrl_info); |
8088 | if (rc) | |
8089 | return rc; | |
8090 | ||
8091 | pqi_start_heartbeat_timer(ctrl_info); | |
8092 | ||
f6cc2a77 KB |
8093 | if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { |
8094 | rc = pqi_get_advanced_raid_bypass_config(ctrl_info); | |
8095 | if (rc) { | |
8096 | dev_err(&ctrl_info->pci_dev->dev, | |
8097 | "error obtaining advanced RAID bypass configuration\n"); | |
8098 | return rc; | |
8099 | } | |
8100 | ctrl_info->ciss_report_log_flags |= | |
8101 | CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; | |
8102 | } | |
8103 | ||
061ef06a KB |
8104 | rc = pqi_enable_events(ctrl_info); |
8105 | if (rc) { | |
8106 | dev_err(&ctrl_info->pci_dev->dev, | |
d87d5474 | 8107 | "error enabling events\n"); |
061ef06a KB |
8108 | return rc; |
8109 | } | |
8110 | ||
6d90615f | 8111 | rc = pqi_get_ctrl_product_details(ctrl_info); |
4fd22c13 MR |
8112 | if (rc) { |
8113 | dev_err(&ctrl_info->pci_dev->dev, | |
694c5d5b | 8114 | "error obtaining product details\n"); |
4fd22c13 MR |
8115 | return rc; |
8116 | } | |
8117 | ||
171c2865 DC |
8118 | rc = pqi_set_diag_rescan(ctrl_info); |
8119 | if (rc) { | |
8120 | dev_err(&ctrl_info->pci_dev->dev, | |
8121 | "error enabling multi-lun rescan\n"); | |
8122 | return rc; | |
8123 | } | |
8124 | ||
061ef06a KB |
8125 | rc = pqi_write_driver_version_to_host_wellness(ctrl_info); |
8126 | if (rc) { | |
8127 | dev_err(&ctrl_info->pci_dev->dev, | |
8128 | "error updating host wellness\n"); | |
8129 | return rc; | |
8130 | } | |
8131 | ||
8132 | pqi_schedule_update_time_worker(ctrl_info); | |
8133 | ||
8134 | pqi_scan_scsi_devices(ctrl_info); | |
8135 | ||
8136 | return 0; | |
8137 | } | |
8138 | ||
583891c9 | 8139 | static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) |
a81ed5f3 | 8140 | { |
d20df83b BOS |
8141 | int rc; |
8142 | ||
8143 | rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, | |
a81ed5f3 | 8144 | PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); |
d20df83b BOS |
8145 | |
8146 | return pcibios_err_to_errno(rc); | |
a81ed5f3 KB |
8147 | } |
8148 | ||
6c223761 KB |
8149 | static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) |
8150 | { | |
8151 | int rc; | |
8152 | u64 mask; | |
8153 | ||
8154 | rc = pci_enable_device(ctrl_info->pci_dev); | |
8155 | if (rc) { | |
8156 | dev_err(&ctrl_info->pci_dev->dev, | |
8157 | "failed to enable PCI device\n"); | |
8158 | return rc; | |
8159 | } | |
8160 | ||
8161 | if (sizeof(dma_addr_t) > 4) | |
8162 | mask = DMA_BIT_MASK(64); | |
8163 | else | |
8164 | mask = DMA_BIT_MASK(32); | |
8165 | ||
1d94f06e | 8166 | rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); |
6c223761 KB |
8167 | if (rc) { |
8168 | dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); | |
8169 | goto disable_device; | |
8170 | } | |
8171 | ||
8172 | rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); | |
8173 | if (rc) { | |
8174 | dev_err(&ctrl_info->pci_dev->dev, | |
8175 | "failed to obtain PCI resources\n"); | |
8176 | goto disable_device; | |
8177 | } | |
8178 | ||
4bdc0d67 | 8179 | ctrl_info->iomem_base = ioremap(pci_resource_start( |
6c223761 KB |
8180 | ctrl_info->pci_dev, 0), |
8181 | sizeof(struct pqi_ctrl_registers)); | |
8182 | if (!ctrl_info->iomem_base) { | |
8183 | dev_err(&ctrl_info->pci_dev->dev, | |
8184 | "failed to map memory for controller registers\n"); | |
8185 | rc = -ENOMEM; | |
8186 | goto release_regions; | |
8187 | } | |
8188 | ||
a81ed5f3 KB |
8189 | #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 |
8190 | ||
8191 | /* Increase the PCIe completion timeout. */ | |
8192 | rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, | |
8193 | PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); | |
8194 | if (rc) { | |
8195 | dev_err(&ctrl_info->pci_dev->dev, | |
8196 | "failed to set PCIe completion timeout\n"); | |
8197 | goto release_regions; | |
8198 | } | |
8199 | ||
6c223761 KB |
8200 | /* Enable bus mastering. */ |
8201 | pci_set_master(ctrl_info->pci_dev); | |
8202 | ||
cbe0c7b1 KB |
8203 | ctrl_info->registers = ctrl_info->iomem_base; |
8204 | ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; | |
8205 | ||
6c223761 KB |
8206 | pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); |
8207 | ||
8208 | return 0; | |
8209 | ||
8210 | release_regions: | |
8211 | pci_release_regions(ctrl_info->pci_dev); | |
8212 | disable_device: | |
8213 | pci_disable_device(ctrl_info->pci_dev); | |
8214 | ||
8215 | return rc; | |
8216 | } | |
8217 | ||
8218 | static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) | |
8219 | { | |
8220 | iounmap(ctrl_info->iomem_base); | |
8221 | pci_release_regions(ctrl_info->pci_dev); | |
cbe0c7b1 KB |
8222 | if (pci_is_enabled(ctrl_info->pci_dev)) |
8223 | pci_disable_device(ctrl_info->pci_dev); | |
6c223761 KB |
8224 | pci_set_drvdata(ctrl_info->pci_dev, NULL); |
8225 | } | |
8226 | ||
8227 | static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) | |
8228 | { | |
8229 | struct pqi_ctrl_info *ctrl_info; | |
8230 | ||
8231 | ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), | |
8232 | GFP_KERNEL, numa_node); | |
8233 | if (!ctrl_info) | |
8234 | return NULL; | |
8235 | ||
8236 | mutex_init(&ctrl_info->scan_mutex); | |
7561a7e4 | 8237 | mutex_init(&ctrl_info->lun_reset_mutex); |
4fd22c13 | 8238 | mutex_init(&ctrl_info->ofa_mutex); |
6c223761 KB |
8239 | |
8240 | INIT_LIST_HEAD(&ctrl_info->scsi_device_list); | |
8241 | spin_lock_init(&ctrl_info->scsi_device_list_lock); | |
8242 | ||
8243 | INIT_WORK(&ctrl_info->event_work, pqi_event_worker); | |
8244 | atomic_set(&ctrl_info->num_interrupts, 0); | |
0530736e | 8245 | atomic_set(&ctrl_info->sync_cmds_outstanding, 0); |
6c223761 KB |
8246 | |
8247 | INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); | |
8248 | INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); | |
8249 | ||
74a0f573 | 8250 | timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); |
5f310425 | 8251 | INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); |
98f87667 | 8252 | |
6c223761 KB |
8253 | sema_init(&ctrl_info->sync_request_sem, |
8254 | PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); | |
7561a7e4 | 8255 | init_waitqueue_head(&ctrl_info->block_requests_wait); |
6c223761 | 8256 | |
376fb880 KB |
8257 | INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); |
8258 | spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); | |
8259 | INIT_WORK(&ctrl_info->raid_bypass_retry_work, | |
8260 | pqi_raid_bypass_retry_worker); | |
8261 | ||
6c223761 | 8262 | ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; |
061ef06a | 8263 | ctrl_info->irq_mode = IRQ_MODE_NONE; |
6c223761 KB |
8264 | ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; |
8265 | ||
f6cc2a77 KB |
8266 | ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; |
8267 | ctrl_info->max_transfer_encrypted_sas_sata = | |
8268 | PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA; | |
8269 | ctrl_info->max_transfer_encrypted_nvme = | |
8270 | PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME; | |
8271 | ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; | |
8272 | ctrl_info->max_write_raid_1_10_2drive = ~0; | |
8273 | ctrl_info->max_write_raid_1_10_3drive = ~0; | |
8274 | ||
6c223761 KB |
8275 | return ctrl_info; |
8276 | } | |
8277 | ||
8278 | static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) | |
8279 | { | |
8280 | kfree(ctrl_info); | |
8281 | } | |
8282 | ||
8283 | static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) | |
8284 | { | |
98bf061b KB |
8285 | pqi_free_irqs(ctrl_info); |
8286 | pqi_disable_msix_interrupts(ctrl_info); | |
6c223761 KB |
8287 | } |
8288 | ||
8289 | static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) | |
8290 | { | |
8291 | pqi_stop_heartbeat_timer(ctrl_info); | |
8292 | pqi_free_interrupts(ctrl_info); | |
8293 | if (ctrl_info->queue_memory_base) | |
8294 | dma_free_coherent(&ctrl_info->pci_dev->dev, | |
8295 | ctrl_info->queue_memory_length, | |
8296 | ctrl_info->queue_memory_base, | |
8297 | ctrl_info->queue_memory_base_dma_handle); | |
8298 | if (ctrl_info->admin_queue_memory_base) | |
8299 | dma_free_coherent(&ctrl_info->pci_dev->dev, | |
8300 | ctrl_info->admin_queue_memory_length, | |
8301 | ctrl_info->admin_queue_memory_base, | |
8302 | ctrl_info->admin_queue_memory_base_dma_handle); | |
8303 | pqi_free_all_io_requests(ctrl_info); | |
8304 | if (ctrl_info->error_buffer) | |
8305 | dma_free_coherent(&ctrl_info->pci_dev->dev, | |
8306 | ctrl_info->error_buffer_length, | |
8307 | ctrl_info->error_buffer, | |
8308 | ctrl_info->error_buffer_dma_handle); | |
8309 | if (ctrl_info->iomem_base) | |
8310 | pqi_cleanup_pci_init(ctrl_info); | |
8311 | pqi_free_ctrl_info(ctrl_info); | |
8312 | } | |
8313 | ||
8314 | static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) | |
8315 | { | |
061ef06a KB |
8316 | pqi_cancel_rescan_worker(ctrl_info); |
8317 | pqi_cancel_update_time_worker(ctrl_info); | |
e57a1f9b | 8318 | pqi_unregister_scsi(ctrl_info); |
162d7753 KB |
8319 | if (ctrl_info->pqi_mode_enabled) |
8320 | pqi_revert_to_sis_mode(ctrl_info); | |
6c223761 KB |
8321 | pqi_free_ctrl_resources(ctrl_info); |
8322 | } | |
8323 | ||
4fd22c13 MR |
8324 | static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) |
8325 | { | |
8326 | pqi_cancel_update_time_worker(ctrl_info); | |
8327 | pqi_cancel_rescan_worker(ctrl_info); | |
8328 | pqi_wait_until_lun_reset_finished(ctrl_info); | |
8329 | pqi_wait_until_scan_finished(ctrl_info); | |
8330 | pqi_ctrl_ofa_start(ctrl_info); | |
8331 | pqi_ctrl_block_requests(ctrl_info); | |
8332 | pqi_ctrl_wait_until_quiesced(ctrl_info); | |
8333 | pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS); | |
8334 | pqi_fail_io_queued_for_all_devices(ctrl_info); | |
8335 | pqi_wait_until_inbound_queues_empty(ctrl_info); | |
8336 | pqi_stop_heartbeat_timer(ctrl_info); | |
8337 | ctrl_info->pqi_mode_enabled = false; | |
8338 | pqi_save_ctrl_mode(ctrl_info, SIS_MODE); | |
8339 | } | |
8340 | ||
8341 | static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) | |
8342 | { | |
8343 | pqi_ofa_free_host_buffer(ctrl_info); | |
8344 | ctrl_info->pqi_mode_enabled = true; | |
8345 | pqi_save_ctrl_mode(ctrl_info, PQI_MODE); | |
8346 | ctrl_info->controller_online = true; | |
8347 | pqi_ctrl_unblock_requests(ctrl_info); | |
8348 | pqi_start_heartbeat_timer(ctrl_info); | |
8349 | pqi_schedule_update_time_worker(ctrl_info); | |
8350 | pqi_clear_soft_reset_status(ctrl_info, | |
8351 | PQI_SOFT_RESET_ABORT); | |
8352 | pqi_scan_scsi_devices(ctrl_info); | |
8353 | } | |
8354 | ||
8355 | static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, | |
8356 | u32 total_size, u32 chunk_size) | |
8357 | { | |
8358 | u32 sg_count; | |
8359 | u32 size; | |
8360 | int i; | |
8361 | struct pqi_sg_descriptor *mem_descriptor = NULL; | |
8362 | struct device *dev; | |
8363 | struct pqi_ofa_memory *ofap; | |
8364 | ||
8365 | dev = &ctrl_info->pci_dev->dev; | |
8366 | ||
8367 | sg_count = (total_size + chunk_size - 1); | |
e52c9e07 | 8368 | sg_count /= chunk_size; |
4fd22c13 MR |
8369 | |
8370 | ofap = ctrl_info->pqi_ofa_mem_virt_addr; | |
8371 | ||
8372 | if (sg_count*chunk_size < total_size) | |
8373 | goto out; | |
8374 | ||
8375 | ctrl_info->pqi_ofa_chunk_virt_addr = | |
8376 | kcalloc(sg_count, sizeof(void *), GFP_KERNEL); | |
8377 | if (!ctrl_info->pqi_ofa_chunk_virt_addr) | |
8378 | goto out; | |
8379 | ||
8380 | for (size = 0, i = 0; size < total_size; size += chunk_size, i++) { | |
8381 | dma_addr_t dma_handle; | |
8382 | ||
8383 | ctrl_info->pqi_ofa_chunk_virt_addr[i] = | |
750afb08 LC |
8384 | dma_alloc_coherent(dev, chunk_size, &dma_handle, |
8385 | GFP_KERNEL); | |
4fd22c13 MR |
8386 | |
8387 | if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) | |
8388 | break; | |
8389 | ||
8390 | mem_descriptor = &ofap->sg_descriptor[i]; | |
583891c9 KB |
8391 | put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); |
8392 | put_unaligned_le32(chunk_size, &mem_descriptor->length); | |
4fd22c13 MR |
8393 | } |
8394 | ||
8395 | if (!size || size < total_size) | |
8396 | goto out_free_chunks; | |
8397 | ||
8398 | put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); | |
8399 | put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); | |
8400 | put_unaligned_le32(size, &ofap->bytes_allocated); | |
8401 | ||
8402 | return 0; | |
8403 | ||
8404 | out_free_chunks: | |
8405 | while (--i >= 0) { | |
8406 | mem_descriptor = &ofap->sg_descriptor[i]; | |
8407 | dma_free_coherent(dev, chunk_size, | |
8408 | ctrl_info->pqi_ofa_chunk_virt_addr[i], | |
8409 | get_unaligned_le64(&mem_descriptor->address)); | |
8410 | } | |
8411 | kfree(ctrl_info->pqi_ofa_chunk_virt_addr); | |
8412 | ||
8413 | out: | |
8414 | put_unaligned_le32 (0, &ofap->bytes_allocated); | |
8415 | return -ENOMEM; | |
8416 | } | |
8417 | ||
8418 | static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) | |
8419 | { | |
8420 | u32 total_size; | |
8421 | u32 min_chunk_size; | |
8422 | u32 chunk_sz; | |
8423 | ||
8424 | total_size = le32_to_cpu( | |
8425 | ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated); | |
8426 | min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS; | |
8427 | ||
8428 | for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2) | |
8429 | if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz)) | |
8430 | return 0; | |
8431 | ||
8432 | return -ENOMEM; | |
8433 | } | |
8434 | ||
8435 | static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, | |
8436 | u32 bytes_requested) | |
8437 | { | |
8438 | struct pqi_ofa_memory *pqi_ofa_memory; | |
8439 | struct device *dev; | |
8440 | ||
8441 | dev = &ctrl_info->pci_dev->dev; | |
750afb08 LC |
8442 | pqi_ofa_memory = dma_alloc_coherent(dev, |
8443 | PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, | |
8444 | &ctrl_info->pqi_ofa_mem_dma_handle, | |
8445 | GFP_KERNEL); | |
4fd22c13 MR |
8446 | |
8447 | if (!pqi_ofa_memory) | |
8448 | return; | |
8449 | ||
8450 | put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version); | |
8451 | memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE, | |
8452 | sizeof(pqi_ofa_memory->signature)); | |
8453 | pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested); | |
8454 | ||
8455 | ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory; | |
8456 | ||
8457 | if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { | |
8458 | dev_err(dev, "Failed to allocate host buffer of size = %u", | |
8459 | bytes_requested); | |
8460 | } | |
694c5d5b KB |
8461 | |
8462 | return; | |
4fd22c13 MR |
8463 | } |
8464 | ||
8465 | static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) | |
8466 | { | |
8467 | int i; | |
8468 | struct pqi_sg_descriptor *mem_descriptor; | |
8469 | struct pqi_ofa_memory *ofap; | |
8470 | ||
8471 | ofap = ctrl_info->pqi_ofa_mem_virt_addr; | |
8472 | ||
8473 | if (!ofap) | |
8474 | return; | |
8475 | ||
8476 | if (!ofap->bytes_allocated) | |
8477 | goto out; | |
8478 | ||
8479 | mem_descriptor = ofap->sg_descriptor; | |
8480 | ||
8481 | for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors); | |
8482 | i++) { | |
8483 | dma_free_coherent(&ctrl_info->pci_dev->dev, | |
8484 | get_unaligned_le32(&mem_descriptor[i].length), | |
8485 | ctrl_info->pqi_ofa_chunk_virt_addr[i], | |
8486 | get_unaligned_le64(&mem_descriptor[i].address)); | |
8487 | } | |
8488 | kfree(ctrl_info->pqi_ofa_chunk_virt_addr); | |
8489 | ||
8490 | out: | |
8491 | dma_free_coherent(&ctrl_info->pci_dev->dev, | |
8492 | PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap, | |
8493 | ctrl_info->pqi_ofa_mem_dma_handle); | |
8494 | ctrl_info->pqi_ofa_mem_virt_addr = NULL; | |
8495 | } | |
8496 | ||
8497 | static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) | |
8498 | { | |
8499 | struct pqi_vendor_general_request request; | |
8500 | size_t size; | |
8501 | struct pqi_ofa_memory *ofap; | |
8502 | ||
8503 | memset(&request, 0, sizeof(request)); | |
8504 | ||
8505 | ofap = ctrl_info->pqi_ofa_mem_virt_addr; | |
8506 | ||
8507 | request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; | |
8508 | put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, | |
8509 | &request.header.iu_length); | |
8510 | put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, | |
8511 | &request.function_code); | |
8512 | ||
8513 | if (ofap) { | |
8514 | size = offsetof(struct pqi_ofa_memory, sg_descriptor) + | |
8515 | get_unaligned_le16(&ofap->num_memory_descriptors) * | |
8516 | sizeof(struct pqi_sg_descriptor); | |
8517 | ||
8518 | put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, | |
8519 | &request.data.ofa_memory_allocation.buffer_address); | |
8520 | put_unaligned_le32(size, | |
8521 | &request.data.ofa_memory_allocation.buffer_length); | |
8522 | ||
8523 | } | |
8524 | ||
8525 | return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, | |
8526 | 0, NULL, NO_TIMEOUT); | |
8527 | } | |
8528 | ||
4fd22c13 MR |
8529 | static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) |
8530 | { | |
8531 | msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); | |
8532 | return pqi_ctrl_init_resume(ctrl_info); | |
8533 | } | |
8534 | ||
3c50976f KB |
8535 | static void pqi_perform_lockup_action(void) |
8536 | { | |
8537 | switch (pqi_lockup_action) { | |
8538 | case PANIC: | |
8539 | panic("FATAL: Smart Family Controller lockup detected"); | |
8540 | break; | |
8541 | case REBOOT: | |
8542 | emergency_restart(); | |
8543 | break; | |
8544 | case NONE: | |
8545 | default: | |
8546 | break; | |
8547 | } | |
8548 | } | |
8549 | ||
5f310425 KB |
8550 | static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { |
8551 | .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, | |
8552 | .status = SAM_STAT_CHECK_CONDITION, | |
8553 | }; | |
8554 | ||
8555 | static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) | |
376fb880 KB |
8556 | { |
8557 | unsigned int i; | |
376fb880 | 8558 | struct pqi_io_request *io_request; |
376fb880 KB |
8559 | struct scsi_cmnd *scmd; |
8560 | ||
5f310425 KB |
8561 | for (i = 0; i < ctrl_info->max_io_slots; i++) { |
8562 | io_request = &ctrl_info->io_request_pool[i]; | |
8563 | if (atomic_read(&io_request->refcount) == 0) | |
8564 | continue; | |
376fb880 | 8565 | |
5f310425 KB |
8566 | scmd = io_request->scmd; |
8567 | if (scmd) { | |
8568 | set_host_byte(scmd, DID_NO_CONNECT); | |
8569 | } else { | |
8570 | io_request->status = -ENXIO; | |
8571 | io_request->error_info = | |
8572 | &pqi_ctrl_offline_raid_error_info; | |
376fb880 | 8573 | } |
5f310425 KB |
8574 | |
8575 | io_request->io_complete_callback(io_request, | |
8576 | io_request->context); | |
376fb880 KB |
8577 | } |
8578 | } | |
8579 | ||
5f310425 | 8580 | static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) |
376fb880 | 8581 | { |
5f310425 KB |
8582 | pqi_perform_lockup_action(); |
8583 | pqi_stop_heartbeat_timer(ctrl_info); | |
8584 | pqi_free_interrupts(ctrl_info); | |
8585 | pqi_cancel_rescan_worker(ctrl_info); | |
8586 | pqi_cancel_update_time_worker(ctrl_info); | |
8587 | pqi_ctrl_wait_until_quiesced(ctrl_info); | |
8588 | pqi_fail_all_outstanding_requests(ctrl_info); | |
8589 | pqi_clear_all_queued_raid_bypass_retries(ctrl_info); | |
8590 | pqi_ctrl_unblock_requests(ctrl_info); | |
8591 | } | |
8592 | ||
8593 | static void pqi_ctrl_offline_worker(struct work_struct *work) | |
8594 | { | |
8595 | struct pqi_ctrl_info *ctrl_info; | |
8596 | ||
8597 | ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); | |
8598 | pqi_take_ctrl_offline_deferred(ctrl_info); | |
376fb880 KB |
8599 | } |
8600 | ||
8601 | static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) | |
8602 | { | |
5f310425 KB |
8603 | if (!ctrl_info->controller_online) |
8604 | return; | |
8605 | ||
376fb880 | 8606 | ctrl_info->controller_online = false; |
5f310425 KB |
8607 | ctrl_info->pqi_mode_enabled = false; |
8608 | pqi_ctrl_block_requests(ctrl_info); | |
5a259e32 KB |
8609 | if (!pqi_disable_ctrl_shutdown) |
8610 | sis_shutdown_ctrl(ctrl_info); | |
376fb880 KB |
8611 | pci_disable_device(ctrl_info->pci_dev); |
8612 | dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); | |
5f310425 | 8613 | schedule_work(&ctrl_info->ctrl_offline_work); |
376fb880 KB |
8614 | } |
8615 | ||
d91d7820 | 8616 | static void pqi_print_ctrl_info(struct pci_dev *pci_dev, |
6c223761 KB |
8617 | const struct pci_device_id *id) |
8618 | { | |
8619 | char *ctrl_description; | |
8620 | ||
37b36847 | 8621 | if (id->driver_data) |
6c223761 | 8622 | ctrl_description = (char *)id->driver_data; |
37b36847 KB |
8623 | else |
8624 | ctrl_description = "Microsemi Smart Family Controller"; | |
6c223761 | 8625 | |
d91d7820 | 8626 | dev_info(&pci_dev->dev, "%s found\n", ctrl_description); |
6c223761 KB |
8627 | } |
8628 | ||
d91d7820 KB |
8629 | static int pqi_pci_probe(struct pci_dev *pci_dev, |
8630 | const struct pci_device_id *id) | |
6c223761 KB |
8631 | { |
8632 | int rc; | |
62dc51fb | 8633 | int node, cp_node; |
6c223761 KB |
8634 | struct pqi_ctrl_info *ctrl_info; |
8635 | ||
d91d7820 | 8636 | pqi_print_ctrl_info(pci_dev, id); |
6c223761 KB |
8637 | |
8638 | if (pqi_disable_device_id_wildcards && | |
8639 | id->subvendor == PCI_ANY_ID && | |
8640 | id->subdevice == PCI_ANY_ID) { | |
d91d7820 | 8641 | dev_warn(&pci_dev->dev, |
6c223761 KB |
8642 | "controller not probed because device ID wildcards are disabled\n"); |
8643 | return -ENODEV; | |
8644 | } | |
8645 | ||
8646 | if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) | |
d91d7820 | 8647 | dev_warn(&pci_dev->dev, |
6c223761 KB |
8648 | "controller device ID matched using wildcards\n"); |
8649 | ||
d91d7820 | 8650 | node = dev_to_node(&pci_dev->dev); |
62dc51fb SB |
8651 | if (node == NUMA_NO_NODE) { |
8652 | cp_node = cpu_to_node(0); | |
8653 | if (cp_node == NUMA_NO_NODE) | |
8654 | cp_node = 0; | |
8655 | set_dev_node(&pci_dev->dev, cp_node); | |
8656 | } | |
6c223761 KB |
8657 | |
8658 | ctrl_info = pqi_alloc_ctrl_info(node); | |
8659 | if (!ctrl_info) { | |
d91d7820 | 8660 | dev_err(&pci_dev->dev, |
6c223761 KB |
8661 | "failed to allocate controller info block\n"); |
8662 | return -ENOMEM; | |
8663 | } | |
8664 | ||
d91d7820 | 8665 | ctrl_info->pci_dev = pci_dev; |
6c223761 KB |
8666 | |
8667 | rc = pqi_pci_init(ctrl_info); | |
8668 | if (rc) | |
8669 | goto error; | |
8670 | ||
8671 | rc = pqi_ctrl_init(ctrl_info); | |
8672 | if (rc) | |
8673 | goto error; | |
8674 | ||
8675 | return 0; | |
8676 | ||
8677 | error: | |
8678 | pqi_remove_ctrl(ctrl_info); | |
8679 | ||
8680 | return rc; | |
8681 | } | |
8682 | ||
d91d7820 | 8683 | static void pqi_pci_remove(struct pci_dev *pci_dev) |
6c223761 KB |
8684 | { |
8685 | struct pqi_ctrl_info *ctrl_info; | |
8686 | ||
d91d7820 | 8687 | ctrl_info = pci_get_drvdata(pci_dev); |
6c223761 KB |
8688 | if (!ctrl_info) |
8689 | return; | |
8690 | ||
8691 | pqi_remove_ctrl(ctrl_info); | |
8692 | } | |
8693 | ||
0530736e KB |
8694 | static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) |
8695 | { | |
8696 | unsigned int i; | |
8697 | struct pqi_io_request *io_request; | |
8698 | struct scsi_cmnd *scmd; | |
8699 | ||
8700 | for (i = 0; i < ctrl_info->max_io_slots; i++) { | |
8701 | io_request = &ctrl_info->io_request_pool[i]; | |
8702 | if (atomic_read(&io_request->refcount) == 0) | |
8703 | continue; | |
8704 | scmd = io_request->scmd; | |
8705 | WARN_ON(scmd != NULL); /* IO command from SML */ | |
8706 | WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ | |
8707 | } | |
8708 | } | |
8709 | ||
d91d7820 | 8710 | static void pqi_shutdown(struct pci_dev *pci_dev) |
6c223761 KB |
8711 | { |
8712 | int rc; | |
8713 | struct pqi_ctrl_info *ctrl_info; | |
8714 | ||
d91d7820 | 8715 | ctrl_info = pci_get_drvdata(pci_dev); |
0530736e KB |
8716 | if (!ctrl_info) { |
8717 | dev_err(&pci_dev->dev, | |
8718 | "cache could not be flushed\n"); | |
8719 | return; | |
8720 | } | |
8721 | ||
8722 | pqi_disable_events(ctrl_info); | |
8723 | pqi_wait_until_ofa_finished(ctrl_info); | |
8724 | pqi_cancel_update_time_worker(ctrl_info); | |
8725 | pqi_cancel_rescan_worker(ctrl_info); | |
8726 | pqi_cancel_event_worker(ctrl_info); | |
8727 | ||
8728 | pqi_ctrl_shutdown_start(ctrl_info); | |
8729 | pqi_ctrl_wait_until_quiesced(ctrl_info); | |
8730 | ||
8731 | rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); | |
8732 | if (rc) { | |
8733 | dev_err(&pci_dev->dev, | |
8734 | "wait for pending I/O failed\n"); | |
8735 | return; | |
8736 | } | |
8737 | ||
8738 | pqi_ctrl_block_device_reset(ctrl_info); | |
8739 | pqi_wait_until_lun_reset_finished(ctrl_info); | |
6c223761 KB |
8740 | |
8741 | /* | |
8742 | * Write all data in the controller's battery-backed cache to | |
8743 | * storage. | |
8744 | */ | |
58322fe0 | 8745 | rc = pqi_flush_cache(ctrl_info, SHUTDOWN); |
0530736e KB |
8746 | if (rc) |
8747 | dev_err(&pci_dev->dev, | |
8748 | "unable to flush controller cache\n"); | |
8749 | ||
8750 | pqi_ctrl_block_requests(ctrl_info); | |
8751 | ||
8752 | rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info); | |
8753 | if (rc) { | |
8754 | dev_err(&pci_dev->dev, | |
8755 | "wait for pending sync cmds failed\n"); | |
6c223761 | 8756 | return; |
0530736e KB |
8757 | } |
8758 | ||
8759 | pqi_crash_if_pending_command(ctrl_info); | |
8760 | pqi_reset(ctrl_info); | |
6c223761 KB |
8761 | } |
8762 | ||
3c50976f KB |
8763 | static void pqi_process_lockup_action_param(void) |
8764 | { | |
8765 | unsigned int i; | |
8766 | ||
8767 | if (!pqi_lockup_action_param) | |
8768 | return; | |
8769 | ||
8770 | for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { | |
8771 | if (strcmp(pqi_lockup_action_param, | |
8772 | pqi_lockup_actions[i].name) == 0) { | |
8773 | pqi_lockup_action = pqi_lockup_actions[i].action; | |
8774 | return; | |
8775 | } | |
8776 | } | |
8777 | ||
8778 | pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", | |
8779 | DRIVER_NAME_SHORT, pqi_lockup_action_param); | |
8780 | } | |
8781 | ||
8782 | static void pqi_process_module_params(void) | |
8783 | { | |
8784 | pqi_process_lockup_action_param(); | |
8785 | } | |
8786 | ||
5c146686 | 8787 | static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) |
061ef06a KB |
8788 | { |
8789 | struct pqi_ctrl_info *ctrl_info; | |
8790 | ||
8791 | ctrl_info = pci_get_drvdata(pci_dev); | |
8792 | ||
8793 | pqi_disable_events(ctrl_info); | |
8794 | pqi_cancel_update_time_worker(ctrl_info); | |
8795 | pqi_cancel_rescan_worker(ctrl_info); | |
8796 | pqi_wait_until_scan_finished(ctrl_info); | |
8797 | pqi_wait_until_lun_reset_finished(ctrl_info); | |
4fd22c13 | 8798 | pqi_wait_until_ofa_finished(ctrl_info); |
58322fe0 | 8799 | pqi_flush_cache(ctrl_info, SUSPEND); |
061ef06a KB |
8800 | pqi_ctrl_block_requests(ctrl_info); |
8801 | pqi_ctrl_wait_until_quiesced(ctrl_info); | |
8802 | pqi_wait_until_inbound_queues_empty(ctrl_info); | |
4fd22c13 | 8803 | pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); |
061ef06a KB |
8804 | pqi_stop_heartbeat_timer(ctrl_info); |
8805 | ||
8806 | if (state.event == PM_EVENT_FREEZE) | |
8807 | return 0; | |
8808 | ||
8809 | pci_save_state(pci_dev); | |
8810 | pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); | |
8811 | ||
8812 | ctrl_info->controller_online = false; | |
8813 | ctrl_info->pqi_mode_enabled = false; | |
8814 | ||
8815 | return 0; | |
8816 | } | |
8817 | ||
5c146686 | 8818 | static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) |
061ef06a KB |
8819 | { |
8820 | int rc; | |
8821 | struct pqi_ctrl_info *ctrl_info; | |
8822 | ||
8823 | ctrl_info = pci_get_drvdata(pci_dev); | |
8824 | ||
8825 | if (pci_dev->current_state != PCI_D0) { | |
8826 | ctrl_info->max_hw_queue_index = 0; | |
8827 | pqi_free_interrupts(ctrl_info); | |
8828 | pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); | |
8829 | rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, | |
8830 | IRQF_SHARED, DRIVER_NAME_SHORT, | |
8831 | &ctrl_info->queue_groups[0]); | |
8832 | if (rc) { | |
8833 | dev_err(&ctrl_info->pci_dev->dev, | |
8834 | "irq %u init failed with error %d\n", | |
8835 | pci_dev->irq, rc); | |
8836 | return rc; | |
8837 | } | |
8838 | pqi_start_heartbeat_timer(ctrl_info); | |
8839 | pqi_ctrl_unblock_requests(ctrl_info); | |
8840 | return 0; | |
8841 | } | |
8842 | ||
8843 | pci_set_power_state(pci_dev, PCI_D0); | |
8844 | pci_restore_state(pci_dev); | |
8845 | ||
8846 | return pqi_ctrl_init_resume(ctrl_info); | |
8847 | } | |
8848 | ||
6c223761 KB |
8849 | /* Define the PCI IDs for the controllers that we support. */ |
8850 | static const struct pci_device_id pqi_pci_id_table[] = { | |
b0f9408b KB |
8851 | { |
8852 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8853 | 0x105b, 0x1211) | |
8854 | }, | |
8855 | { | |
8856 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8857 | 0x105b, 0x1321) | |
8858 | }, | |
7eddabff KB |
8859 | { |
8860 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8861 | 0x152d, 0x8a22) | |
8862 | }, | |
8863 | { | |
8864 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8865 | 0x152d, 0x8a23) | |
8866 | }, | |
8867 | { | |
8868 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8869 | 0x152d, 0x8a24) | |
8870 | }, | |
8871 | { | |
8872 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8873 | 0x152d, 0x8a36) | |
8874 | }, | |
8875 | { | |
8876 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8877 | 0x152d, 0x8a37) | |
8878 | }, | |
0595a0b4 AK |
8879 | { |
8880 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8881 | 0x193d, 0x1104) | |
8882 | }, | |
8883 | { | |
8884 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8885 | 0x193d, 0x1105) | |
8886 | }, | |
8887 | { | |
8888 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8889 | 0x193d, 0x1106) | |
8890 | }, | |
8891 | { | |
8892 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8893 | 0x193d, 0x1107) | |
8894 | }, | |
b0f9408b KB |
8895 | { |
8896 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8897 | 0x193d, 0x8460) | |
8898 | }, | |
8899 | { | |
8900 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8901 | 0x193d, 0x8461) | |
8902 | }, | |
84a77fef MB |
8903 | { |
8904 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8905 | 0x193d, 0xc460) | |
8906 | }, | |
8907 | { | |
8908 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8909 | 0x193d, 0xc461) | |
8910 | }, | |
b0f9408b KB |
8911 | { |
8912 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8913 | 0x193d, 0xf460) | |
8914 | }, | |
8915 | { | |
8916 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8917 | 0x193d, 0xf461) | |
8918 | }, | |
8919 | { | |
8920 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8921 | 0x1bd4, 0x0045) | |
8922 | }, | |
8923 | { | |
8924 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8925 | 0x1bd4, 0x0046) | |
8926 | }, | |
8927 | { | |
8928 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8929 | 0x1bd4, 0x0047) | |
8930 | }, | |
8931 | { | |
8932 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8933 | 0x1bd4, 0x0048) | |
8934 | }, | |
9f8d05fa KB |
8935 | { |
8936 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8937 | 0x1bd4, 0x004a) | |
8938 | }, | |
8939 | { | |
8940 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8941 | 0x1bd4, 0x004b) | |
8942 | }, | |
8943 | { | |
8944 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8945 | 0x1bd4, 0x004c) | |
8946 | }, | |
63a7956a GW |
8947 | { |
8948 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8949 | 0x1bd4, 0x004f) | |
8950 | }, | |
c1b10475 AK |
8951 | { |
8952 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8953 | 0x19e5, 0xd227) | |
8954 | }, | |
8955 | { | |
8956 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8957 | 0x19e5, 0xd228) | |
8958 | }, | |
8959 | { | |
8960 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8961 | 0x19e5, 0xd229) | |
8962 | }, | |
8963 | { | |
8964 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8965 | 0x19e5, 0xd22a) | |
8966 | }, | |
8967 | { | |
8968 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8969 | 0x19e5, 0xd22b) | |
8970 | }, | |
8971 | { | |
8972 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8973 | 0x19e5, 0xd22c) | |
8974 | }, | |
6c223761 KB |
8975 | { |
8976 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
8977 | PCI_VENDOR_ID_ADAPTEC2, 0x0110) | |
8978 | }, | |
8979 | { | |
8980 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
55790064 | 8981 | PCI_VENDOR_ID_ADAPTEC2, 0x0608) |
6c223761 KB |
8982 | }, |
8983 | { | |
8984 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 8985 | PCI_VENDOR_ID_ADAPTEC2, 0x0800) |
6c223761 KB |
8986 | }, |
8987 | { | |
8988 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 8989 | PCI_VENDOR_ID_ADAPTEC2, 0x0801) |
6c223761 KB |
8990 | }, |
8991 | { | |
8992 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 8993 | PCI_VENDOR_ID_ADAPTEC2, 0x0802) |
6c223761 KB |
8994 | }, |
8995 | { | |
8996 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 8997 | PCI_VENDOR_ID_ADAPTEC2, 0x0803) |
6c223761 KB |
8998 | }, |
8999 | { | |
9000 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9001 | PCI_VENDOR_ID_ADAPTEC2, 0x0804) |
6c223761 KB |
9002 | }, |
9003 | { | |
9004 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9005 | PCI_VENDOR_ID_ADAPTEC2, 0x0805) |
6c223761 KB |
9006 | }, |
9007 | { | |
9008 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9009 | PCI_VENDOR_ID_ADAPTEC2, 0x0806) |
6c223761 | 9010 | }, |
55790064 KB |
9011 | { |
9012 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9013 | PCI_VENDOR_ID_ADAPTEC2, 0x0807) | |
9014 | }, | |
63a7956a GW |
9015 | { |
9016 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9017 | PCI_VENDOR_ID_ADAPTEC2, 0x0808) | |
9018 | }, | |
9019 | { | |
9020 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9021 | PCI_VENDOR_ID_ADAPTEC2, 0x0809) | |
9022 | }, | |
3af06083 MR |
9023 | { |
9024 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9025 | PCI_VENDOR_ID_ADAPTEC2, 0x080a) | |
9026 | }, | |
6c223761 KB |
9027 | { |
9028 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9029 | PCI_VENDOR_ID_ADAPTEC2, 0x0900) |
6c223761 KB |
9030 | }, |
9031 | { | |
9032 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9033 | PCI_VENDOR_ID_ADAPTEC2, 0x0901) |
6c223761 KB |
9034 | }, |
9035 | { | |
9036 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9037 | PCI_VENDOR_ID_ADAPTEC2, 0x0902) |
6c223761 KB |
9038 | }, |
9039 | { | |
9040 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9041 | PCI_VENDOR_ID_ADAPTEC2, 0x0903) |
6c223761 KB |
9042 | }, |
9043 | { | |
9044 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9045 | PCI_VENDOR_ID_ADAPTEC2, 0x0904) |
6c223761 KB |
9046 | }, |
9047 | { | |
9048 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9049 | PCI_VENDOR_ID_ADAPTEC2, 0x0905) |
6c223761 KB |
9050 | }, |
9051 | { | |
9052 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9053 | PCI_VENDOR_ID_ADAPTEC2, 0x0906) |
6c223761 KB |
9054 | }, |
9055 | { | |
9056 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9057 | PCI_VENDOR_ID_ADAPTEC2, 0x0907) |
6c223761 KB |
9058 | }, |
9059 | { | |
9060 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9061 | PCI_VENDOR_ID_ADAPTEC2, 0x0908) |
6c223761 | 9062 | }, |
55790064 KB |
9063 | { |
9064 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9065 | PCI_VENDOR_ID_ADAPTEC2, 0x090a) | |
9066 | }, | |
6c223761 KB |
9067 | { |
9068 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9069 | PCI_VENDOR_ID_ADAPTEC2, 0x1200) |
6c223761 KB |
9070 | }, |
9071 | { | |
9072 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9073 | PCI_VENDOR_ID_ADAPTEC2, 0x1201) |
6c223761 KB |
9074 | }, |
9075 | { | |
9076 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9077 | PCI_VENDOR_ID_ADAPTEC2, 0x1202) |
6c223761 KB |
9078 | }, |
9079 | { | |
9080 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9081 | PCI_VENDOR_ID_ADAPTEC2, 0x1280) |
6c223761 KB |
9082 | }, |
9083 | { | |
9084 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9085 | PCI_VENDOR_ID_ADAPTEC2, 0x1281) |
6c223761 | 9086 | }, |
b0f9408b KB |
9087 | { |
9088 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9089 | PCI_VENDOR_ID_ADAPTEC2, 0x1282) | |
9090 | }, | |
6c223761 KB |
9091 | { |
9092 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9093 | PCI_VENDOR_ID_ADAPTEC2, 0x1300) |
6c223761 KB |
9094 | }, |
9095 | { | |
9096 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff | 9097 | PCI_VENDOR_ID_ADAPTEC2, 0x1301) |
6c223761 | 9098 | }, |
bd809e8d KB |
9099 | { |
9100 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9101 | PCI_VENDOR_ID_ADAPTEC2, 0x1302) | |
9102 | }, | |
9103 | { | |
9104 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9105 | PCI_VENDOR_ID_ADAPTEC2, 0x1303) | |
9106 | }, | |
6c223761 KB |
9107 | { |
9108 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
7eddabff KB |
9109 | PCI_VENDOR_ID_ADAPTEC2, 0x1380) |
9110 | }, | |
9f8d05fa KB |
9111 | { |
9112 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9113 | PCI_VENDOR_ID_ADVANTECH, 0x8312) | |
9114 | }, | |
55790064 KB |
9115 | { |
9116 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9117 | PCI_VENDOR_ID_DELL, 0x1fe0) | |
9118 | }, | |
7eddabff KB |
9119 | { |
9120 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9121 | PCI_VENDOR_ID_HP, 0x0600) | |
9122 | }, | |
9123 | { | |
9124 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9125 | PCI_VENDOR_ID_HP, 0x0601) | |
9126 | }, | |
9127 | { | |
9128 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9129 | PCI_VENDOR_ID_HP, 0x0602) | |
9130 | }, | |
9131 | { | |
9132 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9133 | PCI_VENDOR_ID_HP, 0x0603) | |
9134 | }, | |
9135 | { | |
9136 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
55790064 | 9137 | PCI_VENDOR_ID_HP, 0x0609) |
7eddabff KB |
9138 | }, |
9139 | { | |
9140 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9141 | PCI_VENDOR_ID_HP, 0x0650) | |
9142 | }, | |
9143 | { | |
9144 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9145 | PCI_VENDOR_ID_HP, 0x0651) | |
9146 | }, | |
9147 | { | |
9148 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9149 | PCI_VENDOR_ID_HP, 0x0652) | |
9150 | }, | |
9151 | { | |
9152 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9153 | PCI_VENDOR_ID_HP, 0x0653) | |
9154 | }, | |
9155 | { | |
9156 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9157 | PCI_VENDOR_ID_HP, 0x0654) | |
9158 | }, | |
9159 | { | |
9160 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9161 | PCI_VENDOR_ID_HP, 0x0655) | |
9162 | }, | |
7eddabff KB |
9163 | { |
9164 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9165 | PCI_VENDOR_ID_HP, 0x0700) | |
9166 | }, | |
9167 | { | |
9168 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9169 | PCI_VENDOR_ID_HP, 0x0701) | |
6c223761 KB |
9170 | }, |
9171 | { | |
9172 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9173 | PCI_VENDOR_ID_HP, 0x1001) | |
9174 | }, | |
9175 | { | |
9176 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9177 | PCI_VENDOR_ID_HP, 0x1100) | |
9178 | }, | |
9179 | { | |
9180 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9181 | PCI_VENDOR_ID_HP, 0x1101) | |
9182 | }, | |
8bdb3b9c GW |
9183 | { |
9184 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9185 | 0x1d8d, 0x0800) | |
9186 | }, | |
9187 | { | |
9188 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9189 | 0x1d8d, 0x0908) | |
9190 | }, | |
9191 | { | |
9192 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9193 | 0x1d8d, 0x0806) | |
9194 | }, | |
9195 | { | |
9196 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9197 | 0x1d8d, 0x0916) | |
9198 | }, | |
71ecc60d GW |
9199 | { |
9200 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9201 | PCI_VENDOR_ID_GIGABYTE, 0x1000) | |
9202 | }, | |
6c223761 KB |
9203 | { |
9204 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | |
9205 | PCI_ANY_ID, PCI_ANY_ID) | |
9206 | }, | |
9207 | { 0 } | |
9208 | }; | |
9209 | ||
9210 | MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); | |
9211 | ||
9212 | static struct pci_driver pqi_pci_driver = { | |
9213 | .name = DRIVER_NAME_SHORT, | |
9214 | .id_table = pqi_pci_id_table, | |
9215 | .probe = pqi_pci_probe, | |
9216 | .remove = pqi_pci_remove, | |
9217 | .shutdown = pqi_shutdown, | |
061ef06a KB |
9218 | #if defined(CONFIG_PM) |
9219 | .suspend = pqi_suspend, | |
9220 | .resume = pqi_resume, | |
9221 | #endif | |
6c223761 KB |
9222 | }; |
9223 | ||
9224 | static int __init pqi_init(void) | |
9225 | { | |
9226 | int rc; | |
9227 | ||
9228 | pr_info(DRIVER_NAME "\n"); | |
9229 | ||
8b664fef | 9230 | pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); |
6c223761 KB |
9231 | if (!pqi_sas_transport_template) |
9232 | return -ENODEV; | |
9233 | ||
3c50976f KB |
9234 | pqi_process_module_params(); |
9235 | ||
6c223761 KB |
9236 | rc = pci_register_driver(&pqi_pci_driver); |
9237 | if (rc) | |
9238 | sas_release_transport(pqi_sas_transport_template); | |
9239 | ||
9240 | return rc; | |
9241 | } | |
9242 | ||
9243 | static void __exit pqi_cleanup(void) | |
9244 | { | |
9245 | pci_unregister_driver(&pqi_pci_driver); | |
9246 | sas_release_transport(pqi_sas_transport_template); | |
9247 | } | |
9248 | ||
9249 | module_init(pqi_init); | |
9250 | module_exit(pqi_cleanup); | |
9251 | ||
9252 | static void __attribute__((unused)) verify_structures(void) | |
9253 | { | |
9254 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
9255 | sis_host_to_ctrl_doorbell) != 0x20); | |
9256 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
9257 | sis_interrupt_mask) != 0x34); | |
9258 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
9259 | sis_ctrl_to_host_doorbell) != 0x9c); | |
9260 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
9261 | sis_ctrl_to_host_doorbell_clear) != 0xa0); | |
ff6abb73 KB |
9262 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
9263 | sis_driver_scratch) != 0xb0); | |
2708a256 KB |
9264 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
9265 | sis_product_identifier) != 0xb4); | |
6c223761 KB |
9266 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, |
9267 | sis_firmware_status) != 0xbc); | |
9268 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
9269 | sis_mailbox) != 0x1000); | |
9270 | BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, | |
9271 | pqi_registers) != 0x4000); | |
9272 | ||
9273 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, | |
9274 | iu_type) != 0x0); | |
9275 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, | |
9276 | iu_length) != 0x2); | |
9277 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, | |
9278 | response_queue_id) != 0x4); | |
9279 | BUILD_BUG_ON(offsetof(struct pqi_iu_header, | |
9280 | work_area) != 0x6); | |
9281 | BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); | |
9282 | ||
9283 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
9284 | status) != 0x0); | |
9285 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
9286 | service_response) != 0x1); | |
9287 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
9288 | data_present) != 0x2); | |
9289 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
9290 | reserved) != 0x3); | |
9291 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
9292 | residual_count) != 0x4); | |
9293 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
9294 | data_length) != 0x8); | |
9295 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
9296 | reserved1) != 0xa); | |
9297 | BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, | |
9298 | data) != 0xc); | |
9299 | BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); | |
9300 | ||
9301 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
9302 | data_in_result) != 0x0); | |
9303 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
9304 | data_out_result) != 0x1); | |
9305 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
9306 | reserved) != 0x2); | |
9307 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
9308 | status) != 0x5); | |
9309 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
9310 | status_qualifier) != 0x6); | |
9311 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
9312 | sense_data_length) != 0x8); | |
9313 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
9314 | response_data_length) != 0xa); | |
9315 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
9316 | data_in_transferred) != 0xc); | |
9317 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
9318 | data_out_transferred) != 0x10); | |
9319 | BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, | |
9320 | data) != 0x14); | |
9321 | BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); | |
9322 | ||
9323 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9324 | signature) != 0x0); | |
9325 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9326 | function_and_status_code) != 0x8); | |
9327 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9328 | max_admin_iq_elements) != 0x10); | |
9329 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9330 | max_admin_oq_elements) != 0x11); | |
9331 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9332 | admin_iq_element_length) != 0x12); | |
9333 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9334 | admin_oq_element_length) != 0x13); | |
9335 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9336 | max_reset_timeout) != 0x14); | |
9337 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9338 | legacy_intx_status) != 0x18); | |
9339 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9340 | legacy_intx_mask_set) != 0x1c); | |
9341 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9342 | legacy_intx_mask_clear) != 0x20); | |
9343 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9344 | device_status) != 0x40); | |
9345 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9346 | admin_iq_pi_offset) != 0x48); | |
9347 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9348 | admin_oq_ci_offset) != 0x50); | |
9349 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9350 | admin_iq_element_array_addr) != 0x58); | |
9351 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9352 | admin_oq_element_array_addr) != 0x60); | |
9353 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9354 | admin_iq_ci_addr) != 0x68); | |
9355 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9356 | admin_oq_pi_addr) != 0x70); | |
9357 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9358 | admin_iq_num_elements) != 0x78); | |
9359 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9360 | admin_oq_num_elements) != 0x79); | |
9361 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9362 | admin_queue_int_msg_num) != 0x7a); | |
9363 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9364 | device_error) != 0x80); | |
9365 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9366 | error_details) != 0x88); | |
9367 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9368 | device_reset) != 0x90); | |
9369 | BUILD_BUG_ON(offsetof(struct pqi_device_registers, | |
9370 | power_action) != 0x94); | |
9371 | BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); | |
9372 | ||
9373 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9374 | header.iu_type) != 0); | |
9375 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9376 | header.iu_length) != 2); | |
9377 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9378 | header.work_area) != 6); | |
9379 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9380 | request_id) != 8); | |
9381 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9382 | function_code) != 10); | |
9383 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9384 | data.report_device_capability.buffer_length) != 44); | |
9385 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9386 | data.report_device_capability.sg_descriptor) != 48); | |
9387 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9388 | data.create_operational_iq.queue_id) != 12); | |
9389 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9390 | data.create_operational_iq.element_array_addr) != 16); | |
9391 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9392 | data.create_operational_iq.ci_addr) != 24); | |
9393 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9394 | data.create_operational_iq.num_elements) != 32); | |
9395 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9396 | data.create_operational_iq.element_length) != 34); | |
9397 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9398 | data.create_operational_iq.queue_protocol) != 36); | |
9399 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9400 | data.create_operational_oq.queue_id) != 12); | |
9401 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9402 | data.create_operational_oq.element_array_addr) != 16); | |
9403 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9404 | data.create_operational_oq.pi_addr) != 24); | |
9405 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9406 | data.create_operational_oq.num_elements) != 32); | |
9407 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9408 | data.create_operational_oq.element_length) != 34); | |
9409 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9410 | data.create_operational_oq.queue_protocol) != 36); | |
9411 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9412 | data.create_operational_oq.int_msg_num) != 40); | |
9413 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9414 | data.create_operational_oq.coalescing_count) != 42); | |
9415 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9416 | data.create_operational_oq.min_coalescing_time) != 44); | |
9417 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9418 | data.create_operational_oq.max_coalescing_time) != 48); | |
9419 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, | |
9420 | data.delete_operational_queue.queue_id) != 12); | |
9421 | BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); | |
c593642c | 9422 | BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, |
6c223761 | 9423 | data.create_operational_iq) != 64 - 11); |
c593642c | 9424 | BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, |
6c223761 | 9425 | data.create_operational_oq) != 64 - 11); |
c593642c | 9426 | BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, |
6c223761 KB |
9427 | data.delete_operational_queue) != 64 - 11); |
9428 | ||
9429 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
9430 | header.iu_type) != 0); | |
9431 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
9432 | header.iu_length) != 2); | |
9433 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
9434 | header.work_area) != 6); | |
9435 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
9436 | request_id) != 8); | |
9437 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
9438 | function_code) != 10); | |
9439 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
9440 | status) != 11); | |
9441 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
9442 | data.create_operational_iq.status_descriptor) != 12); | |
9443 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
9444 | data.create_operational_iq.iq_pi_offset) != 16); | |
9445 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
9446 | data.create_operational_oq.status_descriptor) != 12); | |
9447 | BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, | |
9448 | data.create_operational_oq.oq_ci_offset) != 16); | |
9449 | BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); | |
9450 | ||
9451 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9452 | header.iu_type) != 0); | |
9453 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9454 | header.iu_length) != 2); | |
9455 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9456 | header.response_queue_id) != 4); | |
9457 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9458 | header.work_area) != 6); | |
9459 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9460 | request_id) != 8); | |
9461 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9462 | nexus_id) != 10); | |
9463 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9464 | buffer_length) != 12); | |
9465 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9466 | lun_number) != 16); | |
9467 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9468 | protocol_specific) != 24); | |
9469 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9470 | error_index) != 27); | |
9471 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, | |
9472 | cdb) != 32); | |
21432010 | 9473 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
9474 | timeout) != 60); | |
6c223761 KB |
9475 | BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, |
9476 | sg_descriptors) != 64); | |
9477 | BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != | |
9478 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
9479 | ||
9480 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9481 | header.iu_type) != 0); | |
9482 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9483 | header.iu_length) != 2); | |
9484 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9485 | header.response_queue_id) != 4); | |
9486 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9487 | header.work_area) != 6); | |
9488 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9489 | request_id) != 8); | |
9490 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9491 | nexus_id) != 12); | |
9492 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9493 | buffer_length) != 16); | |
9494 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9495 | data_encryption_key_index) != 22); | |
9496 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9497 | encrypt_tweak_lower) != 24); | |
9498 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9499 | encrypt_tweak_upper) != 28); | |
9500 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9501 | cdb) != 32); | |
9502 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9503 | error_index) != 48); | |
9504 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9505 | num_sg_descriptors) != 50); | |
9506 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9507 | cdb_length) != 51); | |
9508 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9509 | lun_number) != 52); | |
9510 | BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, | |
9511 | sg_descriptors) != 64); | |
9512 | BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != | |
9513 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | |
9514 | ||
9515 | BUILD_BUG_ON(offsetof(struct pqi_io_response, | |
9516 | header.iu_type) != 0); | |
9517 | BUILD_BUG_ON(offsetof(struct pqi_io_response, | |
9518 | header.iu_length) != 2); | |
9519 | BUILD_BUG_ON(offsetof(struct pqi_io_response, | |
9520 | request_id) != 8); | |
9521 | BUILD_BUG_ON(offsetof(struct pqi_io_response, | |
9522 | error_index) != 10); | |
9523 | ||
9524 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
9525 | header.iu_type) != 0); | |
9526 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
9527 | header.iu_length) != 2); | |
9528 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
9529 | header.response_queue_id) != 4); | |
9530 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
9531 | request_id) != 8); | |
9532 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
9533 | data.report_event_configuration.buffer_length) != 12); | |
9534 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
9535 | data.report_event_configuration.sg_descriptors) != 16); | |
9536 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
9537 | data.set_event_configuration.global_event_oq_id) != 10); | |
9538 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
9539 | data.set_event_configuration.buffer_length) != 12); | |
9540 | BUILD_BUG_ON(offsetof(struct pqi_general_management_request, | |
9541 | data.set_event_configuration.sg_descriptors) != 16); | |
9542 | ||
9543 | BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, | |
9544 | max_inbound_iu_length) != 6); | |
9545 | BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, | |
9546 | max_outbound_iu_length) != 14); | |
9547 | BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); | |
9548 | ||
9549 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9550 | data_length) != 0); | |
9551 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9552 | iq_arbitration_priority_support_bitmask) != 8); | |
9553 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9554 | maximum_aw_a) != 9); | |
9555 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9556 | maximum_aw_b) != 10); | |
9557 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9558 | maximum_aw_c) != 11); | |
9559 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9560 | max_inbound_queues) != 16); | |
9561 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9562 | max_elements_per_iq) != 18); | |
9563 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9564 | max_iq_element_length) != 24); | |
9565 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9566 | min_iq_element_length) != 26); | |
9567 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9568 | max_outbound_queues) != 30); | |
9569 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9570 | max_elements_per_oq) != 32); | |
9571 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9572 | intr_coalescing_time_granularity) != 34); | |
9573 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9574 | max_oq_element_length) != 36); | |
9575 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9576 | min_oq_element_length) != 38); | |
9577 | BUILD_BUG_ON(offsetof(struct pqi_device_capability, | |
9578 | iu_layer_descriptors) != 64); | |
9579 | BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); | |
9580 | ||
9581 | BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, | |
9582 | event_type) != 0); | |
9583 | BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, | |
9584 | oq_id) != 2); | |
9585 | BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); | |
9586 | ||
9587 | BUILD_BUG_ON(offsetof(struct pqi_event_config, | |
9588 | num_event_descriptors) != 2); | |
9589 | BUILD_BUG_ON(offsetof(struct pqi_event_config, | |
9590 | descriptors) != 4); | |
9591 | ||
061ef06a KB |
9592 | BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != |
9593 | ARRAY_SIZE(pqi_supported_event_types)); | |
9594 | ||
6c223761 KB |
9595 | BUILD_BUG_ON(offsetof(struct pqi_event_response, |
9596 | header.iu_type) != 0); | |
9597 | BUILD_BUG_ON(offsetof(struct pqi_event_response, | |
9598 | header.iu_length) != 2); | |
9599 | BUILD_BUG_ON(offsetof(struct pqi_event_response, | |
9600 | event_type) != 8); | |
9601 | BUILD_BUG_ON(offsetof(struct pqi_event_response, | |
9602 | event_id) != 10); | |
9603 | BUILD_BUG_ON(offsetof(struct pqi_event_response, | |
9604 | additional_event_id) != 12); | |
9605 | BUILD_BUG_ON(offsetof(struct pqi_event_response, | |
9606 | data) != 16); | |
9607 | BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); | |
9608 | ||
9609 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, | |
9610 | header.iu_type) != 0); | |
9611 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, | |
9612 | header.iu_length) != 2); | |
9613 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, | |
9614 | event_type) != 8); | |
9615 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, | |
9616 | event_id) != 10); | |
9617 | BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, | |
9618 | additional_event_id) != 12); | |
9619 | BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); | |
9620 | ||
9621 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
9622 | header.iu_type) != 0); | |
9623 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
9624 | header.iu_length) != 2); | |
9625 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
9626 | request_id) != 8); | |
9627 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
9628 | nexus_id) != 10); | |
9629 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
c2922f17 MB |
9630 | timeout) != 14); |
9631 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
6c223761 KB |
9632 | lun_number) != 16); |
9633 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
9634 | protocol_specific) != 24); | |
9635 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
9636 | outbound_queue_id_to_manage) != 26); | |
9637 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
9638 | request_id_to_manage) != 28); | |
9639 | BUILD_BUG_ON(offsetof(struct pqi_task_management_request, | |
9640 | task_management_function) != 30); | |
9641 | BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); | |
9642 | ||
9643 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
9644 | header.iu_type) != 0); | |
9645 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
9646 | header.iu_length) != 2); | |
9647 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
9648 | request_id) != 8); | |
9649 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
9650 | nexus_id) != 10); | |
9651 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
9652 | additional_response_info) != 12); | |
9653 | BUILD_BUG_ON(offsetof(struct pqi_task_management_response, | |
9654 | response_code) != 15); | |
9655 | BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); | |
9656 | ||
9657 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
9658 | configured_logical_drive_count) != 0); | |
9659 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
9660 | configuration_signature) != 1); | |
9661 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
598bef8d | 9662 | firmware_version_short) != 5); |
6c223761 KB |
9663 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
9664 | extended_logical_unit_count) != 154); | |
9665 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
9666 | firmware_build_number) != 190); | |
598bef8d KB |
9667 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
9668 | vendor_id) != 200); | |
9669 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
9670 | product_id) != 208); | |
9671 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
9672 | extra_controller_flags) != 286); | |
6c223761 KB |
9673 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
9674 | controller_mode) != 292); | |
598bef8d KB |
9675 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, |
9676 | spare_part_number) != 293); | |
9677 | BUILD_BUG_ON(offsetof(struct bmic_identify_controller, | |
9678 | firmware_version_long) != 325); | |
6c223761 | 9679 | |
1be42f46 KB |
9680 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, |
9681 | phys_bay_in_box) != 115); | |
9682 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
9683 | device_type) != 120); | |
9684 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
9685 | redundant_path_present_map) != 1736); | |
9686 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
9687 | active_path_number) != 1738); | |
9688 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
9689 | alternate_paths_phys_connector) != 1739); | |
9690 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
9691 | alternate_paths_phys_box_on_port) != 1755); | |
9692 | BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, | |
9693 | current_queue_depth_limit) != 1796); | |
9694 | BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); | |
9695 | ||
f6cc2a77 KB |
9696 | BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4); |
9697 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, | |
9698 | page_code) != 0); | |
9699 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, | |
9700 | subpage_code) != 1); | |
9701 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, | |
9702 | buffer_length) != 2); | |
9703 | ||
9704 | BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4); | |
9705 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, | |
9706 | page_code) != 0); | |
9707 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, | |
9708 | subpage_code) != 1); | |
9709 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, | |
9710 | page_length) != 2); | |
9711 | ||
9712 | BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage) | |
9713 | != 18); | |
9714 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, | |
9715 | header) != 0); | |
9716 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, | |
9717 | firmware_read_support) != 4); | |
9718 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, | |
9719 | driver_read_support) != 5); | |
9720 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, | |
9721 | firmware_write_support) != 6); | |
9722 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, | |
9723 | driver_write_support) != 7); | |
9724 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, | |
9725 | max_transfer_encrypted_sas_sata) != 8); | |
9726 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, | |
9727 | max_transfer_encrypted_nvme) != 10); | |
9728 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, | |
9729 | max_write_raid_5_6) != 12); | |
9730 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, | |
9731 | max_write_raid_1_10_2drive) != 14); | |
9732 | BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, | |
9733 | max_write_raid_1_10_3drive) != 16); | |
9734 | ||
6c223761 KB |
9735 | BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); |
9736 | BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); | |
9737 | BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % | |
9738 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); | |
9739 | BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % | |
9740 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); | |
9741 | BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); | |
9742 | BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % | |
9743 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); | |
9744 | BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); | |
9745 | BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % | |
9746 | PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); | |
9747 | ||
9748 | BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); | |
d727a776 KB |
9749 | BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= |
9750 | PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); | |
6c223761 | 9751 | } |