scsi: smartpqi: change return value for LUN reset operations
[linux-2.6-block.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
6c223761
KB
1/*
2 * driver for Microsemi PQI-based storage controllers
b805dbfe 3 * Copyright (c) 2016-2017 Microsemi Corporation
6c223761
KB
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h>
52198226 28#include <linux/blk-mq-pci.h>
6c223761
KB
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_transport_sas.h>
34#include <asm/unaligned.h>
35#include "smartpqi.h"
36#include "smartpqi_sis.h"
37
38#if !defined(BUILD_TIMESTAMP)
39#define BUILD_TIMESTAMP
40#endif
41
699bed75 42#define DRIVER_VERSION "0.9.13-370"
6c223761
KB
43#define DRIVER_MAJOR 0
44#define DRIVER_MINOR 9
699bed75
KB
45#define DRIVER_RELEASE 13
46#define DRIVER_REVISION 370
6c223761
KB
47
48#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
49#define DRIVER_NAME_SHORT "smartpqi"
50
e1d213bd
KB
51#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
52
6c223761
KB
53MODULE_AUTHOR("Microsemi");
54MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
55 DRIVER_VERSION);
56MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
57MODULE_VERSION(DRIVER_VERSION);
58MODULE_LICENSE("GPL");
59
6c223761
KB
60static char *hpe_branded_controller = "HPE Smart Array Controller";
61static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
62
63static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
64static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
65static void pqi_scan_start(struct Scsi_Host *shost);
66static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
67 struct pqi_queue_group *queue_group, enum pqi_io_path path,
68 struct pqi_io_request *io_request);
69static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
70 struct pqi_iu_header *request, unsigned int flags,
71 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
72static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
73 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
74 unsigned int cdb_length, struct pqi_queue_group *queue_group,
75 struct pqi_encryption_info *encryption_info);
76
77/* for flags argument to pqi_submit_raid_request_synchronous() */
78#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
79
80static struct scsi_transport_template *pqi_sas_transport_template;
81
82static atomic_t pqi_controller_count = ATOMIC_INIT(0);
83
6a50d6ad
KB
84static unsigned int pqi_supported_event_types[] = {
85 PQI_EVENT_TYPE_HOTPLUG,
86 PQI_EVENT_TYPE_HARDWARE,
87 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
88 PQI_EVENT_TYPE_LOGICAL_DEVICE,
89 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
90 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
91};
92
6c223761
KB
93static int pqi_disable_device_id_wildcards;
94module_param_named(disable_device_id_wildcards,
cbe0c7b1 95 pqi_disable_device_id_wildcards, int, 0644);
6c223761
KB
96MODULE_PARM_DESC(disable_device_id_wildcards,
97 "Disable device ID wildcards.");
98
99static char *raid_levels[] = {
100 "RAID-0",
101 "RAID-4",
102 "RAID-1(1+0)",
103 "RAID-5",
104 "RAID-5+1",
105 "RAID-ADG",
106 "RAID-1(ADM)",
107};
108
109static char *pqi_raid_level_to_string(u8 raid_level)
110{
111 if (raid_level < ARRAY_SIZE(raid_levels))
112 return raid_levels[raid_level];
113
114 return "";
115}
116
117#define SA_RAID_0 0
118#define SA_RAID_4 1
119#define SA_RAID_1 2 /* also used for RAID 10 */
120#define SA_RAID_5 3 /* also used for RAID 50 */
121#define SA_RAID_51 4
122#define SA_RAID_6 5 /* also used for RAID 60 */
123#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
124#define SA_RAID_MAX SA_RAID_ADM
125#define SA_RAID_UNKNOWN 0xff
126
127static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
128{
7561a7e4 129 pqi_prep_for_scsi_done(scmd);
6c223761
KB
130 scmd->scsi_done(scmd);
131}
132
133static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
134{
135 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
136}
137
138static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
139{
140 void *hostdata = shost_priv(shost);
141
142 return *((struct pqi_ctrl_info **)hostdata);
143}
144
145static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
146{
147 return !device->is_physical_device;
148}
149
bd10cf0b
KB
150static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
151{
152 return scsi3addr[2] != 0;
153}
154
6c223761
KB
155static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
156{
157 return !ctrl_info->controller_online;
158}
159
160static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
161{
162 if (ctrl_info->controller_online)
163 if (!sis_is_firmware_running(ctrl_info))
164 pqi_take_ctrl_offline(ctrl_info);
165}
166
167static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
168{
169 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
170}
171
ff6abb73
KB
172static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
173 struct pqi_ctrl_info *ctrl_info)
174{
175 return sis_read_driver_scratch(ctrl_info);
176}
177
178static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
179 enum pqi_ctrl_mode mode)
180{
181 sis_write_driver_scratch(ctrl_info, mode);
182}
183
7561a7e4
KB
184#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
185static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
186{
187 ctrl_info->block_requests = true;
188 scsi_block_requests(ctrl_info->scsi_host);
189}
190
191static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
192{
193 ctrl_info->block_requests = false;
194 wake_up_all(&ctrl_info->block_requests_wait);
195 scsi_unblock_requests(ctrl_info->scsi_host);
196}
197
198static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
199{
200 return ctrl_info->block_requests;
201}
202
203static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
204 unsigned long timeout_msecs)
205{
206 unsigned long remaining_msecs;
207
208 if (!pqi_ctrl_blocked(ctrl_info))
209 return timeout_msecs;
210
211 atomic_inc(&ctrl_info->num_blocked_threads);
212
213 if (timeout_msecs == NO_TIMEOUT) {
214 wait_event(ctrl_info->block_requests_wait,
215 !pqi_ctrl_blocked(ctrl_info));
216 remaining_msecs = timeout_msecs;
217 } else {
218 unsigned long remaining_jiffies;
219
220 remaining_jiffies =
221 wait_event_timeout(ctrl_info->block_requests_wait,
222 !pqi_ctrl_blocked(ctrl_info),
223 msecs_to_jiffies(timeout_msecs));
224 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
225 }
226
227 atomic_dec(&ctrl_info->num_blocked_threads);
228
229 return remaining_msecs;
230}
231
232static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
233{
234 atomic_inc(&ctrl_info->num_busy_threads);
235}
236
237static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
238{
239 atomic_dec(&ctrl_info->num_busy_threads);
240}
241
242static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
243{
244 while (atomic_read(&ctrl_info->num_busy_threads) >
245 atomic_read(&ctrl_info->num_blocked_threads))
246 usleep_range(1000, 2000);
247}
248
249static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
250{
251 device->in_reset = true;
252}
253
254static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
255{
256 device->in_reset = false;
257}
258
259static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
260{
261 return device->in_reset;
262}
6c223761
KB
263
264static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
265{
266 schedule_delayed_work(&ctrl_info->rescan_work,
267 PQI_RESCAN_WORK_INTERVAL);
268}
269
061ef06a
KB
270static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
271{
272 cancel_delayed_work_sync(&ctrl_info->rescan_work);
273}
274
98f87667
KB
275static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
276{
277 if (!ctrl_info->heartbeat_counter)
278 return 0;
279
280 return readl(ctrl_info->heartbeat_counter);
281}
282
6c223761
KB
283static int pqi_map_single(struct pci_dev *pci_dev,
284 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
285 size_t buffer_length, int data_direction)
286{
287 dma_addr_t bus_address;
288
289 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
290 return 0;
291
292 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
293 data_direction);
294 if (pci_dma_mapping_error(pci_dev, bus_address))
295 return -ENOMEM;
296
297 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
298 put_unaligned_le32(buffer_length, &sg_descriptor->length);
299 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
300
301 return 0;
302}
303
304static void pqi_pci_unmap(struct pci_dev *pci_dev,
305 struct pqi_sg_descriptor *descriptors, int num_descriptors,
306 int data_direction)
307{
308 int i;
309
310 if (data_direction == PCI_DMA_NONE)
311 return;
312
313 for (i = 0; i < num_descriptors; i++)
314 pci_unmap_single(pci_dev,
315 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
316 get_unaligned_le32(&descriptors[i].length),
317 data_direction);
318}
319
320static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
321 struct pqi_raid_path_request *request, u8 cmd,
322 u8 *scsi3addr, void *buffer, size_t buffer_length,
323 u16 vpd_page, int *pci_direction)
324{
325 u8 *cdb;
326 int pci_dir;
327
328 memset(request, 0, sizeof(*request));
329
330 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
331 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
332 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
333 &request->header.iu_length);
334 put_unaligned_le32(buffer_length, &request->buffer_length);
335 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
336 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
337 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
338
339 cdb = request->cdb;
340
341 switch (cmd) {
342 case INQUIRY:
343 request->data_direction = SOP_READ_FLAG;
344 cdb[0] = INQUIRY;
345 if (vpd_page & VPD_PAGE) {
346 cdb[1] = 0x1;
347 cdb[2] = (u8)vpd_page;
348 }
349 cdb[4] = (u8)buffer_length;
350 break;
351 case CISS_REPORT_LOG:
352 case CISS_REPORT_PHYS:
353 request->data_direction = SOP_READ_FLAG;
354 cdb[0] = cmd;
355 if (cmd == CISS_REPORT_PHYS)
356 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
357 else
358 cdb[1] = CISS_REPORT_LOG_EXTENDED;
359 put_unaligned_be32(buffer_length, &cdb[6]);
360 break;
361 case CISS_GET_RAID_MAP:
362 request->data_direction = SOP_READ_FLAG;
363 cdb[0] = CISS_READ;
364 cdb[1] = CISS_GET_RAID_MAP;
365 put_unaligned_be32(buffer_length, &cdb[6]);
366 break;
367 case SA_CACHE_FLUSH:
368 request->data_direction = SOP_WRITE_FLAG;
369 cdb[0] = BMIC_WRITE;
370 cdb[6] = BMIC_CACHE_FLUSH;
371 put_unaligned_be16(buffer_length, &cdb[7]);
372 break;
373 case BMIC_IDENTIFY_CONTROLLER:
374 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
375 request->data_direction = SOP_READ_FLAG;
376 cdb[0] = BMIC_READ;
377 cdb[6] = cmd;
378 put_unaligned_be16(buffer_length, &cdb[7]);
379 break;
380 case BMIC_WRITE_HOST_WELLNESS:
381 request->data_direction = SOP_WRITE_FLAG;
382 cdb[0] = BMIC_WRITE;
383 cdb[6] = cmd;
384 put_unaligned_be16(buffer_length, &cdb[7]);
385 break;
386 default:
387 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
388 cmd);
6c223761
KB
389 break;
390 }
391
392 switch (request->data_direction) {
393 case SOP_READ_FLAG:
394 pci_dir = PCI_DMA_FROMDEVICE;
395 break;
396 case SOP_WRITE_FLAG:
397 pci_dir = PCI_DMA_TODEVICE;
398 break;
399 case SOP_NO_DIRECTION_FLAG:
400 pci_dir = PCI_DMA_NONE;
401 break;
402 default:
403 pci_dir = PCI_DMA_BIDIRECTIONAL;
404 break;
405 }
406
407 *pci_direction = pci_dir;
408
409 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
410 buffer, buffer_length, pci_dir);
411}
412
413static struct pqi_io_request *pqi_alloc_io_request(
414 struct pqi_ctrl_info *ctrl_info)
415{
416 struct pqi_io_request *io_request;
417 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
418
419 while (1) {
420 io_request = &ctrl_info->io_request_pool[i];
421 if (atomic_inc_return(&io_request->refcount) == 1)
422 break;
423 atomic_dec(&io_request->refcount);
424 i = (i + 1) % ctrl_info->max_io_slots;
425 }
426
427 /* benignly racy */
428 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
429
430 io_request->scmd = NULL;
431 io_request->status = 0;
432 io_request->error_info = NULL;
433
434 return io_request;
435}
436
437static void pqi_free_io_request(struct pqi_io_request *io_request)
438{
439 atomic_dec(&io_request->refcount);
440}
441
442static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
443 struct bmic_identify_controller *buffer)
444{
445 int rc;
446 int pci_direction;
447 struct pqi_raid_path_request request;
448
449 rc = pqi_build_raid_path_request(ctrl_info, &request,
450 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
451 sizeof(*buffer), 0, &pci_direction);
452 if (rc)
453 return rc;
454
455 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
456 NULL, NO_TIMEOUT);
457
458 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
459 pci_direction);
460
461 return rc;
462}
463
464static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
465 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
466{
467 int rc;
468 int pci_direction;
469 struct pqi_raid_path_request request;
470
471 rc = pqi_build_raid_path_request(ctrl_info, &request,
472 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
473 &pci_direction);
474 if (rc)
475 return rc;
476
477 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
478 NULL, NO_TIMEOUT);
479
480 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
481 pci_direction);
482
483 return rc;
484}
485
486static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
487 struct pqi_scsi_dev *device,
488 struct bmic_identify_physical_device *buffer,
489 size_t buffer_length)
490{
491 int rc;
492 int pci_direction;
493 u16 bmic_device_index;
494 struct pqi_raid_path_request request;
495
496 rc = pqi_build_raid_path_request(ctrl_info, &request,
497 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
498 buffer_length, 0, &pci_direction);
499 if (rc)
500 return rc;
501
502 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
503 request.cdb[2] = (u8)bmic_device_index;
504 request.cdb[9] = (u8)(bmic_device_index >> 8);
505
506 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
507 0, NULL, NO_TIMEOUT);
508
509 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
510 pci_direction);
511
512 return rc;
513}
514
515#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
6c223761
KB
516
517static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
518{
519 int rc;
520 struct pqi_raid_path_request request;
521 int pci_direction;
522 u8 *buffer;
523
524 /*
525 * Don't bother trying to flush the cache if the controller is
526 * locked up.
527 */
528 if (pqi_ctrl_offline(ctrl_info))
529 return -ENXIO;
530
531 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
532 if (!buffer)
533 return -ENOMEM;
534
535 rc = pqi_build_raid_path_request(ctrl_info, &request,
536 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
537 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
538 if (rc)
539 goto out;
540
541 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
d48f8fad 542 0, NULL, NO_TIMEOUT);
6c223761
KB
543
544 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
545 pci_direction);
546
547out:
548 kfree(buffer);
549
550 return rc;
551}
552
553static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
554 void *buffer, size_t buffer_length)
555{
556 int rc;
557 struct pqi_raid_path_request request;
558 int pci_direction;
559
560 rc = pqi_build_raid_path_request(ctrl_info, &request,
561 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
562 buffer_length, 0, &pci_direction);
563 if (rc)
564 return rc;
565
566 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
567 0, NULL, NO_TIMEOUT);
568
569 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
570 pci_direction);
571
572 return rc;
573}
574
575#pragma pack(1)
576
577struct bmic_host_wellness_driver_version {
578 u8 start_tag[4];
579 u8 driver_version_tag[2];
580 __le16 driver_version_length;
581 char driver_version[32];
582 u8 end_tag[2];
583};
584
585#pragma pack()
586
587static int pqi_write_driver_version_to_host_wellness(
588 struct pqi_ctrl_info *ctrl_info)
589{
590 int rc;
591 struct bmic_host_wellness_driver_version *buffer;
592 size_t buffer_length;
593
594 buffer_length = sizeof(*buffer);
595
596 buffer = kmalloc(buffer_length, GFP_KERNEL);
597 if (!buffer)
598 return -ENOMEM;
599
600 buffer->start_tag[0] = '<';
601 buffer->start_tag[1] = 'H';
602 buffer->start_tag[2] = 'W';
603 buffer->start_tag[3] = '>';
604 buffer->driver_version_tag[0] = 'D';
605 buffer->driver_version_tag[1] = 'V';
606 put_unaligned_le16(sizeof(buffer->driver_version),
607 &buffer->driver_version_length);
061ef06a 608 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
6c223761
KB
609 sizeof(buffer->driver_version) - 1);
610 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
611 buffer->end_tag[0] = 'Z';
612 buffer->end_tag[1] = 'Z';
613
614 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
615
616 kfree(buffer);
617
618 return rc;
619}
620
621#pragma pack(1)
622
623struct bmic_host_wellness_time {
624 u8 start_tag[4];
625 u8 time_tag[2];
626 __le16 time_length;
627 u8 time[8];
628 u8 dont_write_tag[2];
629 u8 end_tag[2];
630};
631
632#pragma pack()
633
634static int pqi_write_current_time_to_host_wellness(
635 struct pqi_ctrl_info *ctrl_info)
636{
637 int rc;
638 struct bmic_host_wellness_time *buffer;
639 size_t buffer_length;
640 time64_t local_time;
641 unsigned int year;
ed10858e 642 struct tm tm;
6c223761
KB
643
644 buffer_length = sizeof(*buffer);
645
646 buffer = kmalloc(buffer_length, GFP_KERNEL);
647 if (!buffer)
648 return -ENOMEM;
649
650 buffer->start_tag[0] = '<';
651 buffer->start_tag[1] = 'H';
652 buffer->start_tag[2] = 'W';
653 buffer->start_tag[3] = '>';
654 buffer->time_tag[0] = 'T';
655 buffer->time_tag[1] = 'D';
656 put_unaligned_le16(sizeof(buffer->time),
657 &buffer->time_length);
658
ed10858e
AB
659 local_time = ktime_get_real_seconds();
660 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
6c223761
KB
661 year = tm.tm_year + 1900;
662
663 buffer->time[0] = bin2bcd(tm.tm_hour);
664 buffer->time[1] = bin2bcd(tm.tm_min);
665 buffer->time[2] = bin2bcd(tm.tm_sec);
666 buffer->time[3] = 0;
667 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
668 buffer->time[5] = bin2bcd(tm.tm_mday);
669 buffer->time[6] = bin2bcd(year / 100);
670 buffer->time[7] = bin2bcd(year % 100);
671
672 buffer->dont_write_tag[0] = 'D';
673 buffer->dont_write_tag[1] = 'W';
674 buffer->end_tag[0] = 'Z';
675 buffer->end_tag[1] = 'Z';
676
677 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
678
679 kfree(buffer);
680
681 return rc;
682}
683
684#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
685
686static void pqi_update_time_worker(struct work_struct *work)
687{
688 int rc;
689 struct pqi_ctrl_info *ctrl_info;
690
691 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
692 update_time_work);
693
6c223761
KB
694 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
695 if (rc)
696 dev_warn(&ctrl_info->pci_dev->dev,
697 "error updating time on controller\n");
698
699 schedule_delayed_work(&ctrl_info->update_time_work,
700 PQI_UPDATE_TIME_WORK_INTERVAL);
701}
702
703static inline void pqi_schedule_update_time_worker(
4fbebf1a 704 struct pqi_ctrl_info *ctrl_info)
6c223761 705{
061ef06a
KB
706 if (ctrl_info->update_time_worker_scheduled)
707 return;
708
4fbebf1a 709 schedule_delayed_work(&ctrl_info->update_time_work, 0);
061ef06a
KB
710 ctrl_info->update_time_worker_scheduled = true;
711}
712
713static inline void pqi_cancel_update_time_worker(
714 struct pqi_ctrl_info *ctrl_info)
715{
716 if (!ctrl_info->update_time_worker_scheduled)
717 return;
718
719 cancel_delayed_work_sync(&ctrl_info->update_time_work);
720 ctrl_info->update_time_worker_scheduled = false;
6c223761
KB
721}
722
723static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
724 void *buffer, size_t buffer_length)
725{
726 int rc;
727 int pci_direction;
728 struct pqi_raid_path_request request;
729
730 rc = pqi_build_raid_path_request(ctrl_info, &request,
731 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
732 if (rc)
733 return rc;
734
735 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
736 NULL, NO_TIMEOUT);
737
738 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
739 pci_direction);
740
741 return rc;
742}
743
744static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
745 void **buffer)
746{
747 int rc;
748 size_t lun_list_length;
749 size_t lun_data_length;
750 size_t new_lun_list_length;
751 void *lun_data = NULL;
752 struct report_lun_header *report_lun_header;
753
754 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
755 if (!report_lun_header) {
756 rc = -ENOMEM;
757 goto out;
758 }
759
760 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
761 sizeof(*report_lun_header));
762 if (rc)
763 goto out;
764
765 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
766
767again:
768 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
769
770 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
771 if (!lun_data) {
772 rc = -ENOMEM;
773 goto out;
774 }
775
776 if (lun_list_length == 0) {
777 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
778 goto out;
779 }
780
781 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
782 if (rc)
783 goto out;
784
785 new_lun_list_length = get_unaligned_be32(
786 &((struct report_lun_header *)lun_data)->list_length);
787
788 if (new_lun_list_length > lun_list_length) {
789 lun_list_length = new_lun_list_length;
790 kfree(lun_data);
791 goto again;
792 }
793
794out:
795 kfree(report_lun_header);
796
797 if (rc) {
798 kfree(lun_data);
799 lun_data = NULL;
800 }
801
802 *buffer = lun_data;
803
804 return rc;
805}
806
807static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
808 void **buffer)
809{
810 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
811 buffer);
812}
813
814static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
815 void **buffer)
816{
817 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
818}
819
820static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
821 struct report_phys_lun_extended **physdev_list,
822 struct report_log_lun_extended **logdev_list)
823{
824 int rc;
825 size_t logdev_list_length;
826 size_t logdev_data_length;
827 struct report_log_lun_extended *internal_logdev_list;
828 struct report_log_lun_extended *logdev_data;
829 struct report_lun_header report_lun_header;
830
831 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
832 if (rc)
833 dev_err(&ctrl_info->pci_dev->dev,
834 "report physical LUNs failed\n");
835
836 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
837 if (rc)
838 dev_err(&ctrl_info->pci_dev->dev,
839 "report logical LUNs failed\n");
840
841 /*
842 * Tack the controller itself onto the end of the logical device list.
843 */
844
845 logdev_data = *logdev_list;
846
847 if (logdev_data) {
848 logdev_list_length =
849 get_unaligned_be32(&logdev_data->header.list_length);
850 } else {
851 memset(&report_lun_header, 0, sizeof(report_lun_header));
852 logdev_data =
853 (struct report_log_lun_extended *)&report_lun_header;
854 logdev_list_length = 0;
855 }
856
857 logdev_data_length = sizeof(struct report_lun_header) +
858 logdev_list_length;
859
860 internal_logdev_list = kmalloc(logdev_data_length +
861 sizeof(struct report_log_lun_extended), GFP_KERNEL);
862 if (!internal_logdev_list) {
863 kfree(*logdev_list);
864 *logdev_list = NULL;
865 return -ENOMEM;
866 }
867
868 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
869 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
870 sizeof(struct report_log_lun_extended_entry));
871 put_unaligned_be32(logdev_list_length +
872 sizeof(struct report_log_lun_extended_entry),
873 &internal_logdev_list->header.list_length);
874
875 kfree(*logdev_list);
876 *logdev_list = internal_logdev_list;
877
878 return 0;
879}
880
881static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
882 int bus, int target, int lun)
883{
884 device->bus = bus;
885 device->target = target;
886 device->lun = lun;
887}
888
889static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
890{
891 u8 *scsi3addr;
892 u32 lunid;
bd10cf0b
KB
893 int bus;
894 int target;
895 int lun;
6c223761
KB
896
897 scsi3addr = device->scsi3addr;
898 lunid = get_unaligned_le32(scsi3addr);
899
900 if (pqi_is_hba_lunid(scsi3addr)) {
901 /* The specified device is the controller. */
902 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
903 device->target_lun_valid = true;
904 return;
905 }
906
907 if (pqi_is_logical_device(device)) {
bd10cf0b
KB
908 if (device->is_external_raid_device) {
909 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
910 target = (lunid >> 16) & 0x3fff;
911 lun = lunid & 0xff;
912 } else {
913 bus = PQI_RAID_VOLUME_BUS;
914 target = 0;
915 lun = lunid & 0x3fff;
916 }
917 pqi_set_bus_target_lun(device, bus, target, lun);
6c223761
KB
918 device->target_lun_valid = true;
919 return;
920 }
921
922 /*
923 * Defer target and LUN assignment for non-controller physical devices
924 * because the SAS transport layer will make these assignments later.
925 */
926 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
927}
928
929static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
930 struct pqi_scsi_dev *device)
931{
932 int rc;
933 u8 raid_level;
934 u8 *buffer;
935
936 raid_level = SA_RAID_UNKNOWN;
937
938 buffer = kmalloc(64, GFP_KERNEL);
939 if (buffer) {
940 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
941 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
942 if (rc == 0) {
943 raid_level = buffer[8];
944 if (raid_level > SA_RAID_MAX)
945 raid_level = SA_RAID_UNKNOWN;
946 }
947 kfree(buffer);
948 }
949
950 device->raid_level = raid_level;
951}
952
953static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
954 struct pqi_scsi_dev *device, struct raid_map *raid_map)
955{
956 char *err_msg;
957 u32 raid_map_size;
958 u32 r5or6_blocks_per_row;
959 unsigned int num_phys_disks;
960 unsigned int num_raid_map_entries;
961
962 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
963
964 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
965 err_msg = "RAID map too small";
966 goto bad_raid_map;
967 }
968
969 if (raid_map_size > sizeof(*raid_map)) {
970 err_msg = "RAID map too large";
971 goto bad_raid_map;
972 }
973
974 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
975 (get_unaligned_le16(&raid_map->data_disks_per_row) +
976 get_unaligned_le16(&raid_map->metadata_disks_per_row));
977 num_raid_map_entries = num_phys_disks *
978 get_unaligned_le16(&raid_map->row_cnt);
979
980 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
981 err_msg = "invalid number of map entries in RAID map";
982 goto bad_raid_map;
983 }
984
985 if (device->raid_level == SA_RAID_1) {
986 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
987 err_msg = "invalid RAID-1 map";
988 goto bad_raid_map;
989 }
990 } else if (device->raid_level == SA_RAID_ADM) {
991 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
992 err_msg = "invalid RAID-1(ADM) map";
993 goto bad_raid_map;
994 }
995 } else if ((device->raid_level == SA_RAID_5 ||
996 device->raid_level == SA_RAID_6) &&
997 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
998 /* RAID 50/60 */
999 r5or6_blocks_per_row =
1000 get_unaligned_le16(&raid_map->strip_size) *
1001 get_unaligned_le16(&raid_map->data_disks_per_row);
1002 if (r5or6_blocks_per_row == 0) {
1003 err_msg = "invalid RAID-5 or RAID-6 map";
1004 goto bad_raid_map;
1005 }
1006 }
1007
1008 return 0;
1009
1010bad_raid_map:
d87d5474
KB
1011 dev_warn(&ctrl_info->pci_dev->dev,
1012 "scsi %d:%d:%d:%d %s\n",
1013 ctrl_info->scsi_host->host_no,
1014 device->bus, device->target, device->lun, err_msg);
6c223761
KB
1015
1016 return -EINVAL;
1017}
1018
1019static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1020 struct pqi_scsi_dev *device)
1021{
1022 int rc;
1023 int pci_direction;
1024 struct pqi_raid_path_request request;
1025 struct raid_map *raid_map;
1026
1027 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1028 if (!raid_map)
1029 return -ENOMEM;
1030
1031 rc = pqi_build_raid_path_request(ctrl_info, &request,
1032 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1033 sizeof(*raid_map), 0, &pci_direction);
1034 if (rc)
1035 goto error;
1036
1037 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1038 NULL, NO_TIMEOUT);
1039
1040 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1041 pci_direction);
1042
1043 if (rc)
1044 goto error;
1045
1046 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1047 if (rc)
1048 goto error;
1049
1050 device->raid_map = raid_map;
1051
1052 return 0;
1053
1054error:
1055 kfree(raid_map);
1056
1057 return rc;
1058}
1059
1060static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
1061 struct pqi_scsi_dev *device)
1062{
1063 int rc;
1064 u8 *buffer;
1065 u8 offload_status;
1066
1067 buffer = kmalloc(64, GFP_KERNEL);
1068 if (!buffer)
1069 return;
1070
1071 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1072 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
1073 if (rc)
1074 goto out;
1075
1076#define OFFLOAD_STATUS_BYTE 4
1077#define OFFLOAD_CONFIGURED_BIT 0x1
1078#define OFFLOAD_ENABLED_BIT 0x2
1079
1080 offload_status = buffer[OFFLOAD_STATUS_BYTE];
1081 device->offload_configured =
1082 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
1083 if (device->offload_configured) {
1084 device->offload_enabled_pending =
1085 !!(offload_status & OFFLOAD_ENABLED_BIT);
1086 if (pqi_get_raid_map(ctrl_info, device))
1087 device->offload_enabled_pending = false;
1088 }
1089
1090out:
1091 kfree(buffer);
1092}
1093
1094/*
1095 * Use vendor-specific VPD to determine online/offline status of a volume.
1096 */
1097
1098static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1099 struct pqi_scsi_dev *device)
1100{
1101 int rc;
1102 size_t page_length;
1103 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1104 bool volume_offline = true;
1105 u32 volume_flags;
1106 struct ciss_vpd_logical_volume_status *vpd;
1107
1108 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1109 if (!vpd)
1110 goto no_buffer;
1111
1112 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1113 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1114 if (rc)
1115 goto out;
1116
1117 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1118 volume_status) + vpd->page_length;
1119 if (page_length < sizeof(*vpd))
1120 goto out;
1121
1122 volume_status = vpd->volume_status;
1123 volume_flags = get_unaligned_be32(&vpd->flags);
1124 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1125
1126out:
1127 kfree(vpd);
1128no_buffer:
1129 device->volume_status = volume_status;
1130 device->volume_offline = volume_offline;
1131}
1132
1133static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1134 struct pqi_scsi_dev *device)
1135{
1136 int rc;
1137 u8 *buffer;
1138
1139 buffer = kmalloc(64, GFP_KERNEL);
1140 if (!buffer)
1141 return -ENOMEM;
1142
1143 /* Send an inquiry to the device to see what it is. */
1144 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1145 if (rc)
1146 goto out;
1147
1148 scsi_sanitize_inquiry_string(&buffer[8], 8);
1149 scsi_sanitize_inquiry_string(&buffer[16], 16);
1150
1151 device->devtype = buffer[0] & 0x1f;
cbe0c7b1
KB
1152 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1153 memcpy(device->model, &buffer[16], sizeof(device->model));
6c223761
KB
1154
1155 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
bd10cf0b
KB
1156 if (device->is_external_raid_device) {
1157 device->raid_level = SA_RAID_UNKNOWN;
1158 device->volume_status = CISS_LV_OK;
1159 device->volume_offline = false;
1160 } else {
1161 pqi_get_raid_level(ctrl_info, device);
1162 pqi_get_offload_status(ctrl_info, device);
1163 pqi_get_volume_status(ctrl_info, device);
1164 }
6c223761
KB
1165 }
1166
1167out:
1168 kfree(buffer);
1169
1170 return rc;
1171}
1172
1173static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1174 struct pqi_scsi_dev *device,
1175 struct bmic_identify_physical_device *id_phys)
1176{
1177 int rc;
1178
1179 memset(id_phys, 0, sizeof(*id_phys));
1180
1181 rc = pqi_identify_physical_device(ctrl_info, device,
1182 id_phys, sizeof(*id_phys));
1183 if (rc) {
1184 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1185 return;
1186 }
1187
1188 device->queue_depth =
1189 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1190 device->device_type = id_phys->device_type;
1191 device->active_path_index = id_phys->active_path_number;
1192 device->path_map = id_phys->redundant_path_present_map;
1193 memcpy(&device->box,
1194 &id_phys->alternate_paths_phys_box_on_port,
1195 sizeof(device->box));
1196 memcpy(&device->phys_connector,
1197 &id_phys->alternate_paths_phys_connector,
1198 sizeof(device->phys_connector));
1199 device->bay = id_phys->phys_bay_in_box;
1200}
1201
1202static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1203 struct pqi_scsi_dev *device)
1204{
1205 char *status;
1206 static const char unknown_state_str[] =
1207 "Volume is in an unknown state (%u)";
1208 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1209
1210 switch (device->volume_status) {
1211 case CISS_LV_OK:
1212 status = "Volume online";
1213 break;
1214 case CISS_LV_FAILED:
1215 status = "Volume failed";
1216 break;
1217 case CISS_LV_NOT_CONFIGURED:
1218 status = "Volume not configured";
1219 break;
1220 case CISS_LV_DEGRADED:
1221 status = "Volume degraded";
1222 break;
1223 case CISS_LV_READY_FOR_RECOVERY:
1224 status = "Volume ready for recovery operation";
1225 break;
1226 case CISS_LV_UNDERGOING_RECOVERY:
1227 status = "Volume undergoing recovery";
1228 break;
1229 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1230 status = "Wrong physical drive was replaced";
1231 break;
1232 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1233 status = "A physical drive not properly connected";
1234 break;
1235 case CISS_LV_HARDWARE_OVERHEATING:
1236 status = "Hardware is overheating";
1237 break;
1238 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1239 status = "Hardware has overheated";
1240 break;
1241 case CISS_LV_UNDERGOING_EXPANSION:
1242 status = "Volume undergoing expansion";
1243 break;
1244 case CISS_LV_NOT_AVAILABLE:
1245 status = "Volume waiting for transforming volume";
1246 break;
1247 case CISS_LV_QUEUED_FOR_EXPANSION:
1248 status = "Volume queued for expansion";
1249 break;
1250 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1251 status = "Volume disabled due to SCSI ID conflict";
1252 break;
1253 case CISS_LV_EJECTED:
1254 status = "Volume has been ejected";
1255 break;
1256 case CISS_LV_UNDERGOING_ERASE:
1257 status = "Volume undergoing background erase";
1258 break;
1259 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1260 status = "Volume ready for predictive spare rebuild";
1261 break;
1262 case CISS_LV_UNDERGOING_RPI:
1263 status = "Volume undergoing rapid parity initialization";
1264 break;
1265 case CISS_LV_PENDING_RPI:
1266 status = "Volume queued for rapid parity initialization";
1267 break;
1268 case CISS_LV_ENCRYPTED_NO_KEY:
1269 status = "Encrypted volume inaccessible - key not present";
1270 break;
1271 case CISS_LV_UNDERGOING_ENCRYPTION:
1272 status = "Volume undergoing encryption process";
1273 break;
1274 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1275 status = "Volume undergoing encryption re-keying process";
1276 break;
1277 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
d87d5474 1278 status = "Volume encrypted but encryption is disabled";
6c223761
KB
1279 break;
1280 case CISS_LV_PENDING_ENCRYPTION:
1281 status = "Volume pending migration to encrypted state";
1282 break;
1283 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1284 status = "Volume pending encryption rekeying";
1285 break;
1286 case CISS_LV_NOT_SUPPORTED:
1287 status = "Volume not supported on this controller";
1288 break;
1289 case CISS_LV_STATUS_UNAVAILABLE:
1290 status = "Volume status not available";
1291 break;
1292 default:
1293 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1294 unknown_state_str, device->volume_status);
1295 status = unknown_state_buffer;
1296 break;
1297 }
1298
1299 dev_info(&ctrl_info->pci_dev->dev,
1300 "scsi %d:%d:%d:%d %s\n",
1301 ctrl_info->scsi_host->host_no,
1302 device->bus, device->target, device->lun, status);
1303}
1304
1305static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1306 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1307{
1308 struct pqi_scsi_dev *device;
1309
1310 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1311 scsi_device_list_entry) {
1312 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1313 continue;
1314 if (pqi_is_logical_device(device))
1315 continue;
1316 if (device->aio_handle == aio_handle)
1317 return device;
1318 }
1319
1320 return NULL;
1321}
1322
1323static void pqi_update_logical_drive_queue_depth(
1324 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1325{
1326 unsigned int i;
1327 struct raid_map *raid_map;
1328 struct raid_map_disk_data *disk_data;
1329 struct pqi_scsi_dev *phys_disk;
1330 unsigned int num_phys_disks;
1331 unsigned int num_raid_map_entries;
1332 unsigned int queue_depth;
1333
1334 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1335
1336 raid_map = logical_drive->raid_map;
1337 if (!raid_map)
1338 return;
1339
1340 disk_data = raid_map->disk_data;
1341 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1342 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1343 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1344 num_raid_map_entries = num_phys_disks *
1345 get_unaligned_le16(&raid_map->row_cnt);
1346
1347 queue_depth = 0;
1348 for (i = 0; i < num_raid_map_entries; i++) {
1349 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1350 disk_data[i].aio_handle);
1351
1352 if (!phys_disk) {
1353 dev_warn(&ctrl_info->pci_dev->dev,
1354 "failed to find physical disk for logical drive %016llx\n",
1355 get_unaligned_be64(logical_drive->scsi3addr));
1356 logical_drive->offload_enabled = false;
1357 logical_drive->offload_enabled_pending = false;
1358 kfree(raid_map);
1359 logical_drive->raid_map = NULL;
1360 return;
1361 }
1362
1363 queue_depth += phys_disk->queue_depth;
1364 }
1365
1366 logical_drive->queue_depth = queue_depth;
1367}
1368
1369static void pqi_update_all_logical_drive_queue_depths(
1370 struct pqi_ctrl_info *ctrl_info)
1371{
1372 struct pqi_scsi_dev *device;
1373
1374 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1375 scsi_device_list_entry) {
1376 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1377 continue;
1378 if (!pqi_is_logical_device(device))
1379 continue;
bd10cf0b
KB
1380 if (device->is_external_raid_device)
1381 continue;
6c223761
KB
1382 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1383 }
1384}
1385
1386static void pqi_rescan_worker(struct work_struct *work)
1387{
1388 struct pqi_ctrl_info *ctrl_info;
1389
1390 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1391 rescan_work);
1392
1393 pqi_scan_scsi_devices(ctrl_info);
1394}
1395
1396static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1397 struct pqi_scsi_dev *device)
1398{
1399 int rc;
1400
1401 if (pqi_is_logical_device(device))
1402 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1403 device->target, device->lun);
1404 else
1405 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1406
1407 return rc;
1408}
1409
1410static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1411 struct pqi_scsi_dev *device)
1412{
1413 if (pqi_is_logical_device(device))
1414 scsi_remove_device(device->sdev);
1415 else
1416 pqi_remove_sas_device(device);
1417}
1418
1419/* Assumes the SCSI device list lock is held. */
1420
1421static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1422 int bus, int target, int lun)
1423{
1424 struct pqi_scsi_dev *device;
1425
1426 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1427 scsi_device_list_entry)
1428 if (device->bus == bus && device->target == target &&
1429 device->lun == lun)
1430 return device;
1431
1432 return NULL;
1433}
1434
1435static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1436 struct pqi_scsi_dev *dev2)
1437{
1438 if (dev1->is_physical_device != dev2->is_physical_device)
1439 return false;
1440
1441 if (dev1->is_physical_device)
1442 return dev1->wwid == dev2->wwid;
1443
1444 return memcmp(dev1->volume_id, dev2->volume_id,
1445 sizeof(dev1->volume_id)) == 0;
1446}
1447
1448enum pqi_find_result {
1449 DEVICE_NOT_FOUND,
1450 DEVICE_CHANGED,
1451 DEVICE_SAME,
1452};
1453
1454static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1455 struct pqi_scsi_dev *device_to_find,
1456 struct pqi_scsi_dev **matching_device)
1457{
1458 struct pqi_scsi_dev *device;
1459
1460 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1461 scsi_device_list_entry) {
1462 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1463 device->scsi3addr)) {
1464 *matching_device = device;
1465 if (pqi_device_equal(device_to_find, device)) {
1466 if (device_to_find->volume_offline)
1467 return DEVICE_CHANGED;
1468 return DEVICE_SAME;
1469 }
1470 return DEVICE_CHANGED;
1471 }
1472 }
1473
1474 return DEVICE_NOT_FOUND;
1475}
1476
1477static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1478 char *action, struct pqi_scsi_dev *device)
1479{
1480 dev_info(&ctrl_info->pci_dev->dev,
1481 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1482 action,
1483 ctrl_info->scsi_host->host_no,
1484 device->bus,
1485 device->target,
1486 device->lun,
1487 scsi_device_type(device->devtype),
1488 device->vendor,
1489 device->model,
bd10cf0b
KB
1490 pqi_is_logical_device(device) ?
1491 pqi_raid_level_to_string(device->raid_level) : "",
6c223761
KB
1492 device->offload_configured ? '+' : '-',
1493 device->offload_enabled_pending ? '+' : '-',
1494 device->expose_device ? '+' : '-',
1495 device->queue_depth);
1496}
1497
1498/* Assumes the SCSI device list lock is held. */
1499
1500static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1501 struct pqi_scsi_dev *new_device)
1502{
1503 existing_device->devtype = new_device->devtype;
1504 existing_device->device_type = new_device->device_type;
1505 existing_device->bus = new_device->bus;
1506 if (new_device->target_lun_valid) {
1507 existing_device->target = new_device->target;
1508 existing_device->lun = new_device->lun;
1509 existing_device->target_lun_valid = true;
1510 }
1511
1512 /* By definition, the scsi3addr and wwid fields are already the same. */
1513
1514 existing_device->is_physical_device = new_device->is_physical_device;
bd10cf0b
KB
1515 existing_device->is_external_raid_device =
1516 new_device->is_external_raid_device;
6c223761
KB
1517 existing_device->expose_device = new_device->expose_device;
1518 existing_device->no_uld_attach = new_device->no_uld_attach;
1519 existing_device->aio_enabled = new_device->aio_enabled;
1520 memcpy(existing_device->vendor, new_device->vendor,
1521 sizeof(existing_device->vendor));
1522 memcpy(existing_device->model, new_device->model,
1523 sizeof(existing_device->model));
1524 existing_device->sas_address = new_device->sas_address;
1525 existing_device->raid_level = new_device->raid_level;
1526 existing_device->queue_depth = new_device->queue_depth;
1527 existing_device->aio_handle = new_device->aio_handle;
1528 existing_device->volume_status = new_device->volume_status;
1529 existing_device->active_path_index = new_device->active_path_index;
1530 existing_device->path_map = new_device->path_map;
1531 existing_device->bay = new_device->bay;
1532 memcpy(existing_device->box, new_device->box,
1533 sizeof(existing_device->box));
1534 memcpy(existing_device->phys_connector, new_device->phys_connector,
1535 sizeof(existing_device->phys_connector));
1536 existing_device->offload_configured = new_device->offload_configured;
1537 existing_device->offload_enabled = false;
1538 existing_device->offload_enabled_pending =
1539 new_device->offload_enabled_pending;
1540 existing_device->offload_to_mirror = 0;
1541 kfree(existing_device->raid_map);
1542 existing_device->raid_map = new_device->raid_map;
1543
1544 /* To prevent this from being freed later. */
1545 new_device->raid_map = NULL;
1546}
1547
1548static inline void pqi_free_device(struct pqi_scsi_dev *device)
1549{
1550 if (device) {
1551 kfree(device->raid_map);
1552 kfree(device);
1553 }
1554}
1555
1556/*
1557 * Called when exposing a new device to the OS fails in order to re-adjust
1558 * our internal SCSI device list to match the SCSI ML's view.
1559 */
1560
1561static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1562 struct pqi_scsi_dev *device)
1563{
1564 unsigned long flags;
1565
1566 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1567 list_del(&device->scsi_device_list_entry);
1568 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1569
1570 /* Allow the device structure to be freed later. */
1571 device->keep_device = false;
1572}
1573
1574static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1575 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1576{
1577 int rc;
1578 unsigned int i;
1579 unsigned long flags;
1580 enum pqi_find_result find_result;
1581 struct pqi_scsi_dev *device;
1582 struct pqi_scsi_dev *next;
1583 struct pqi_scsi_dev *matching_device;
1584 struct list_head add_list;
1585 struct list_head delete_list;
1586
1587 INIT_LIST_HEAD(&add_list);
1588 INIT_LIST_HEAD(&delete_list);
1589
1590 /*
1591 * The idea here is to do as little work as possible while holding the
1592 * spinlock. That's why we go to great pains to defer anything other
1593 * than updating the internal device list until after we release the
1594 * spinlock.
1595 */
1596
1597 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1598
1599 /* Assume that all devices in the existing list have gone away. */
1600 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1601 scsi_device_list_entry)
1602 device->device_gone = true;
1603
1604 for (i = 0; i < num_new_devices; i++) {
1605 device = new_device_list[i];
1606
1607 find_result = pqi_scsi_find_entry(ctrl_info, device,
1608 &matching_device);
1609
1610 switch (find_result) {
1611 case DEVICE_SAME:
1612 /*
1613 * The newly found device is already in the existing
1614 * device list.
1615 */
1616 device->new_device = false;
1617 matching_device->device_gone = false;
1618 pqi_scsi_update_device(matching_device, device);
1619 break;
1620 case DEVICE_NOT_FOUND:
1621 /*
1622 * The newly found device is NOT in the existing device
1623 * list.
1624 */
1625 device->new_device = true;
1626 break;
1627 case DEVICE_CHANGED:
1628 /*
1629 * The original device has gone away and we need to add
1630 * the new device.
1631 */
1632 device->new_device = true;
1633 break;
6c223761
KB
1634 }
1635 }
1636
1637 /* Process all devices that have gone away. */
1638 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1639 scsi_device_list_entry) {
1640 if (device->device_gone) {
1641 list_del(&device->scsi_device_list_entry);
1642 list_add_tail(&device->delete_list_entry, &delete_list);
1643 }
1644 }
1645
1646 /* Process all new devices. */
1647 for (i = 0; i < num_new_devices; i++) {
1648 device = new_device_list[i];
1649 if (!device->new_device)
1650 continue;
1651 if (device->volume_offline)
1652 continue;
1653 list_add_tail(&device->scsi_device_list_entry,
1654 &ctrl_info->scsi_device_list);
1655 list_add_tail(&device->add_list_entry, &add_list);
1656 /* To prevent this device structure from being freed later. */
1657 device->keep_device = true;
1658 }
1659
1660 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1661
1662 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1663 scsi_device_list_entry)
1664 device->offload_enabled =
1665 device->offload_enabled_pending;
1666
1667 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1668
1669 /* Remove all devices that have gone away. */
1670 list_for_each_entry_safe(device, next, &delete_list,
1671 delete_list_entry) {
1672 if (device->sdev)
1673 pqi_remove_device(ctrl_info, device);
1674 if (device->volume_offline) {
1675 pqi_dev_info(ctrl_info, "offline", device);
1676 pqi_show_volume_status(ctrl_info, device);
1677 } else {
1678 pqi_dev_info(ctrl_info, "removed", device);
1679 }
1680 list_del(&device->delete_list_entry);
1681 pqi_free_device(device);
1682 }
1683
1684 /*
1685 * Notify the SCSI ML if the queue depth of any existing device has
1686 * changed.
1687 */
1688 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1689 scsi_device_list_entry) {
1690 if (device->sdev && device->queue_depth !=
1691 device->advertised_queue_depth) {
1692 device->advertised_queue_depth = device->queue_depth;
1693 scsi_change_queue_depth(device->sdev,
1694 device->advertised_queue_depth);
1695 }
1696 }
1697
1698 /* Expose any new devices. */
1699 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1700 if (device->expose_device && !device->sdev) {
1701 rc = pqi_add_device(ctrl_info, device);
1702 if (rc) {
1703 dev_warn(&ctrl_info->pci_dev->dev,
1704 "scsi %d:%d:%d:%d addition failed, device not added\n",
1705 ctrl_info->scsi_host->host_no,
1706 device->bus, device->target,
1707 device->lun);
1708 pqi_fixup_botched_add(ctrl_info, device);
1709 continue;
1710 }
1711 }
1712 pqi_dev_info(ctrl_info, "added", device);
1713 }
1714}
1715
1716static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1717{
1718 bool is_supported = false;
1719
1720 switch (device->devtype) {
1721 case TYPE_DISK:
1722 case TYPE_ZBC:
1723 case TYPE_TAPE:
1724 case TYPE_MEDIUM_CHANGER:
1725 case TYPE_ENCLOSURE:
1726 is_supported = true;
1727 break;
1728 case TYPE_RAID:
1729 /*
1730 * Only support the HBA controller itself as a RAID
1731 * controller. If it's a RAID controller other than
1732 * the HBA itself (an external RAID controller, MSA500
1733 * or similar), we don't support it.
1734 */
1735 if (pqi_is_hba_lunid(device->scsi3addr))
1736 is_supported = true;
1737 break;
1738 }
1739
1740 return is_supported;
1741}
1742
1743static inline bool pqi_skip_device(u8 *scsi3addr,
1744 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1745{
1746 u8 device_flags;
1747
1748 if (!MASKED_DEVICE(scsi3addr))
1749 return false;
1750
1751 /* The device is masked. */
1752
1753 device_flags = phys_lun_ext_entry->device_flags;
1754
1755 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1756 /*
1757 * It's a non-disk device. We ignore all devices of this type
1758 * when they're masked.
1759 */
1760 return true;
1761 }
1762
1763 return false;
1764}
1765
cbe0c7b1 1766static inline bool pqi_ok_to_expose_device(struct pqi_scsi_dev *device)
6c223761
KB
1767{
1768 /* Expose all devices except for physical devices that are masked. */
1769 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1770 return false;
1771
1772 return true;
1773}
1774
1775static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1776{
1777 int i;
1778 int rc;
1779 struct list_head new_device_list_head;
1780 struct report_phys_lun_extended *physdev_list = NULL;
1781 struct report_log_lun_extended *logdev_list = NULL;
1782 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1783 struct report_log_lun_extended_entry *log_lun_ext_entry;
1784 struct bmic_identify_physical_device *id_phys = NULL;
1785 u32 num_physicals;
1786 u32 num_logicals;
1787 struct pqi_scsi_dev **new_device_list = NULL;
1788 struct pqi_scsi_dev *device;
1789 struct pqi_scsi_dev *next;
1790 unsigned int num_new_devices;
1791 unsigned int num_valid_devices;
1792 bool is_physical_device;
1793 u8 *scsi3addr;
1794 static char *out_of_memory_msg =
1795 "out of memory, device discovery stopped";
1796
1797 INIT_LIST_HEAD(&new_device_list_head);
1798
1799 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1800 if (rc)
1801 goto out;
1802
1803 if (physdev_list)
1804 num_physicals =
1805 get_unaligned_be32(&physdev_list->header.list_length)
1806 / sizeof(physdev_list->lun_entries[0]);
1807 else
1808 num_physicals = 0;
1809
1810 if (logdev_list)
1811 num_logicals =
1812 get_unaligned_be32(&logdev_list->header.list_length)
1813 / sizeof(logdev_list->lun_entries[0]);
1814 else
1815 num_logicals = 0;
1816
1817 if (num_physicals) {
1818 /*
1819 * We need this buffer for calls to pqi_get_physical_disk_info()
1820 * below. We allocate it here instead of inside
1821 * pqi_get_physical_disk_info() because it's a fairly large
1822 * buffer.
1823 */
1824 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1825 if (!id_phys) {
1826 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1827 out_of_memory_msg);
1828 rc = -ENOMEM;
1829 goto out;
1830 }
1831 }
1832
1833 num_new_devices = num_physicals + num_logicals;
1834
1835 new_device_list = kmalloc(sizeof(*new_device_list) *
1836 num_new_devices, GFP_KERNEL);
1837 if (!new_device_list) {
1838 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1839 rc = -ENOMEM;
1840 goto out;
1841 }
1842
1843 for (i = 0; i < num_new_devices; i++) {
1844 device = kzalloc(sizeof(*device), GFP_KERNEL);
1845 if (!device) {
1846 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1847 out_of_memory_msg);
1848 rc = -ENOMEM;
1849 goto out;
1850 }
1851 list_add_tail(&device->new_device_list_entry,
1852 &new_device_list_head);
1853 }
1854
1855 device = NULL;
1856 num_valid_devices = 0;
1857
1858 for (i = 0; i < num_new_devices; i++) {
1859
1860 if (i < num_physicals) {
1861 is_physical_device = true;
1862 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1863 log_lun_ext_entry = NULL;
1864 scsi3addr = phys_lun_ext_entry->lunid;
1865 } else {
1866 is_physical_device = false;
1867 phys_lun_ext_entry = NULL;
1868 log_lun_ext_entry =
1869 &logdev_list->lun_entries[i - num_physicals];
1870 scsi3addr = log_lun_ext_entry->lunid;
1871 }
1872
1873 if (is_physical_device &&
1874 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1875 continue;
1876
1877 if (device)
1878 device = list_next_entry(device, new_device_list_entry);
1879 else
1880 device = list_first_entry(&new_device_list_head,
1881 struct pqi_scsi_dev, new_device_list_entry);
1882
1883 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1884 device->is_physical_device = is_physical_device;
bd10cf0b
KB
1885 if (!is_physical_device)
1886 device->is_external_raid_device =
1887 pqi_is_external_raid_addr(scsi3addr);
6c223761
KB
1888
1889 /* Gather information about the device. */
1890 rc = pqi_get_device_info(ctrl_info, device);
1891 if (rc == -ENOMEM) {
1892 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1893 out_of_memory_msg);
1894 goto out;
1895 }
1896 if (rc) {
1897 dev_warn(&ctrl_info->pci_dev->dev,
1898 "obtaining device info failed, skipping device %016llx\n",
1899 get_unaligned_be64(device->scsi3addr));
1900 rc = 0;
1901 continue;
1902 }
1903
1904 if (!pqi_is_supported_device(device))
1905 continue;
1906
1907 pqi_assign_bus_target_lun(device);
1908
cbe0c7b1 1909 device->expose_device = pqi_ok_to_expose_device(device);
6c223761
KB
1910
1911 if (device->is_physical_device) {
1912 device->wwid = phys_lun_ext_entry->wwid;
1913 if ((phys_lun_ext_entry->device_flags &
1914 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1915 phys_lun_ext_entry->aio_handle)
1916 device->aio_enabled = true;
1917 } else {
1918 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1919 sizeof(device->volume_id));
1920 }
1921
1922 switch (device->devtype) {
1923 case TYPE_DISK:
1924 case TYPE_ZBC:
1925 case TYPE_ENCLOSURE:
1926 if (device->is_physical_device) {
1927 device->sas_address =
1928 get_unaligned_be64(&device->wwid);
1929 if (device->devtype == TYPE_DISK ||
1930 device->devtype == TYPE_ZBC) {
1931 device->aio_handle =
1932 phys_lun_ext_entry->aio_handle;
1933 pqi_get_physical_disk_info(ctrl_info,
1934 device, id_phys);
1935 }
1936 }
1937 break;
1938 }
1939
1940 new_device_list[num_valid_devices++] = device;
1941 }
1942
1943 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1944
1945out:
1946 list_for_each_entry_safe(device, next, &new_device_list_head,
1947 new_device_list_entry) {
1948 if (device->keep_device)
1949 continue;
1950 list_del(&device->new_device_list_entry);
1951 pqi_free_device(device);
1952 }
1953
1954 kfree(new_device_list);
1955 kfree(physdev_list);
1956 kfree(logdev_list);
1957 kfree(id_phys);
1958
1959 return rc;
1960}
1961
1962static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1963{
1964 unsigned long flags;
1965 struct pqi_scsi_dev *device;
6c223761 1966
a37ef745
KB
1967 while (1) {
1968 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1969
1970 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1971 struct pqi_scsi_dev, scsi_device_list_entry);
1972 if (device)
1973 list_del(&device->scsi_device_list_entry);
1974
1975 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1976 flags);
1977
1978 if (!device)
1979 break;
6c223761 1980
6c223761
KB
1981 if (device->sdev)
1982 pqi_remove_device(ctrl_info, device);
6c223761
KB
1983 pqi_free_device(device);
1984 }
6c223761
KB
1985}
1986
1987static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1988{
1989 int rc;
1990
1991 if (pqi_ctrl_offline(ctrl_info))
1992 return -ENXIO;
1993
1994 mutex_lock(&ctrl_info->scan_mutex);
1995
1996 rc = pqi_update_scsi_devices(ctrl_info);
1997 if (rc)
1998 pqi_schedule_rescan_worker(ctrl_info);
1999
2000 mutex_unlock(&ctrl_info->scan_mutex);
2001
2002 return rc;
2003}
2004
2005static void pqi_scan_start(struct Scsi_Host *shost)
2006{
2007 pqi_scan_scsi_devices(shost_to_hba(shost));
2008}
2009
2010/* Returns TRUE if scan is finished. */
2011
2012static int pqi_scan_finished(struct Scsi_Host *shost,
2013 unsigned long elapsed_time)
2014{
2015 struct pqi_ctrl_info *ctrl_info;
2016
2017 ctrl_info = shost_priv(shost);
2018
2019 return !mutex_is_locked(&ctrl_info->scan_mutex);
2020}
2021
061ef06a
KB
2022static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2023{
2024 mutex_lock(&ctrl_info->scan_mutex);
2025 mutex_unlock(&ctrl_info->scan_mutex);
2026}
2027
2028static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2029{
2030 mutex_lock(&ctrl_info->lun_reset_mutex);
2031 mutex_unlock(&ctrl_info->lun_reset_mutex);
2032}
2033
6c223761
KB
2034static inline void pqi_set_encryption_info(
2035 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2036 u64 first_block)
2037{
2038 u32 volume_blk_size;
2039
2040 /*
2041 * Set the encryption tweak values based on logical block address.
2042 * If the block size is 512, the tweak value is equal to the LBA.
2043 * For other block sizes, tweak value is (LBA * block size) / 512.
2044 */
2045 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2046 if (volume_blk_size != 512)
2047 first_block = (first_block * volume_blk_size) / 512;
2048
2049 encryption_info->data_encryption_key_index =
2050 get_unaligned_le16(&raid_map->data_encryption_key_index);
2051 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2052 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2053}
2054
2055/*
2056 * Attempt to perform offload RAID mapping for a logical volume I/O.
2057 */
2058
2059#define PQI_RAID_BYPASS_INELIGIBLE 1
2060
2061static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2062 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2063 struct pqi_queue_group *queue_group)
2064{
2065 struct raid_map *raid_map;
2066 bool is_write = false;
2067 u32 map_index;
2068 u64 first_block;
2069 u64 last_block;
2070 u32 block_cnt;
2071 u32 blocks_per_row;
2072 u64 first_row;
2073 u64 last_row;
2074 u32 first_row_offset;
2075 u32 last_row_offset;
2076 u32 first_column;
2077 u32 last_column;
2078 u64 r0_first_row;
2079 u64 r0_last_row;
2080 u32 r5or6_blocks_per_row;
2081 u64 r5or6_first_row;
2082 u64 r5or6_last_row;
2083 u32 r5or6_first_row_offset;
2084 u32 r5or6_last_row_offset;
2085 u32 r5or6_first_column;
2086 u32 r5or6_last_column;
2087 u16 data_disks_per_row;
2088 u32 total_disks_per_row;
2089 u16 layout_map_count;
2090 u32 stripesize;
2091 u16 strip_size;
2092 u32 first_group;
2093 u32 last_group;
2094 u32 current_group;
2095 u32 map_row;
2096 u32 aio_handle;
2097 u64 disk_block;
2098 u32 disk_block_cnt;
2099 u8 cdb[16];
2100 u8 cdb_length;
2101 int offload_to_mirror;
2102 struct pqi_encryption_info *encryption_info_ptr;
2103 struct pqi_encryption_info encryption_info;
2104#if BITS_PER_LONG == 32
2105 u64 tmpdiv;
2106#endif
2107
2108 /* Check for valid opcode, get LBA and block count. */
2109 switch (scmd->cmnd[0]) {
2110 case WRITE_6:
2111 is_write = true;
2112 /* fall through */
2113 case READ_6:
e018ef57
B
2114 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2115 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
6c223761
KB
2116 block_cnt = (u32)scmd->cmnd[4];
2117 if (block_cnt == 0)
2118 block_cnt = 256;
2119 break;
2120 case WRITE_10:
2121 is_write = true;
2122 /* fall through */
2123 case READ_10:
2124 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2125 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2126 break;
2127 case WRITE_12:
2128 is_write = true;
2129 /* fall through */
2130 case READ_12:
2131 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2132 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2133 break;
2134 case WRITE_16:
2135 is_write = true;
2136 /* fall through */
2137 case READ_16:
2138 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2139 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2140 break;
2141 default:
2142 /* Process via normal I/O path. */
2143 return PQI_RAID_BYPASS_INELIGIBLE;
2144 }
2145
2146 /* Check for write to non-RAID-0. */
2147 if (is_write && device->raid_level != SA_RAID_0)
2148 return PQI_RAID_BYPASS_INELIGIBLE;
2149
2150 if (unlikely(block_cnt == 0))
2151 return PQI_RAID_BYPASS_INELIGIBLE;
2152
2153 last_block = first_block + block_cnt - 1;
2154 raid_map = device->raid_map;
2155
2156 /* Check for invalid block or wraparound. */
2157 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2158 last_block < first_block)
2159 return PQI_RAID_BYPASS_INELIGIBLE;
2160
2161 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2162 strip_size = get_unaligned_le16(&raid_map->strip_size);
2163 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2164
2165 /* Calculate stripe information for the request. */
2166 blocks_per_row = data_disks_per_row * strip_size;
2167#if BITS_PER_LONG == 32
2168 tmpdiv = first_block;
2169 do_div(tmpdiv, blocks_per_row);
2170 first_row = tmpdiv;
2171 tmpdiv = last_block;
2172 do_div(tmpdiv, blocks_per_row);
2173 last_row = tmpdiv;
2174 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2175 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2176 tmpdiv = first_row_offset;
2177 do_div(tmpdiv, strip_size);
2178 first_column = tmpdiv;
2179 tmpdiv = last_row_offset;
2180 do_div(tmpdiv, strip_size);
2181 last_column = tmpdiv;
2182#else
2183 first_row = first_block / blocks_per_row;
2184 last_row = last_block / blocks_per_row;
2185 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2186 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2187 first_column = first_row_offset / strip_size;
2188 last_column = last_row_offset / strip_size;
2189#endif
2190
2191 /* If this isn't a single row/column then give to the controller. */
2192 if (first_row != last_row || first_column != last_column)
2193 return PQI_RAID_BYPASS_INELIGIBLE;
2194
2195 /* Proceeding with driver mapping. */
2196 total_disks_per_row = data_disks_per_row +
2197 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2198 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2199 get_unaligned_le16(&raid_map->row_cnt);
2200 map_index = (map_row * total_disks_per_row) + first_column;
2201
2202 /* RAID 1 */
2203 if (device->raid_level == SA_RAID_1) {
2204 if (device->offload_to_mirror)
2205 map_index += data_disks_per_row;
2206 device->offload_to_mirror = !device->offload_to_mirror;
2207 } else if (device->raid_level == SA_RAID_ADM) {
2208 /* RAID ADM */
2209 /*
2210 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2211 * divisible by 3.
2212 */
2213 offload_to_mirror = device->offload_to_mirror;
2214 if (offload_to_mirror == 0) {
2215 /* use physical disk in the first mirrored group. */
2216 map_index %= data_disks_per_row;
2217 } else {
2218 do {
2219 /*
2220 * Determine mirror group that map_index
2221 * indicates.
2222 */
2223 current_group = map_index / data_disks_per_row;
2224
2225 if (offload_to_mirror != current_group) {
2226 if (current_group <
2227 layout_map_count - 1) {
2228 /*
2229 * Select raid index from
2230 * next group.
2231 */
2232 map_index += data_disks_per_row;
2233 current_group++;
2234 } else {
2235 /*
2236 * Select raid index from first
2237 * group.
2238 */
2239 map_index %= data_disks_per_row;
2240 current_group = 0;
2241 }
2242 }
2243 } while (offload_to_mirror != current_group);
2244 }
2245
2246 /* Set mirror group to use next time. */
2247 offload_to_mirror =
2248 (offload_to_mirror >= layout_map_count - 1) ?
2249 0 : offload_to_mirror + 1;
2250 WARN_ON(offload_to_mirror >= layout_map_count);
2251 device->offload_to_mirror = offload_to_mirror;
2252 /*
2253 * Avoid direct use of device->offload_to_mirror within this
2254 * function since multiple threads might simultaneously
2255 * increment it beyond the range of device->layout_map_count -1.
2256 */
2257 } else if ((device->raid_level == SA_RAID_5 ||
2258 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2259 /* RAID 50/60 */
2260 /* Verify first and last block are in same RAID group */
2261 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2262 stripesize = r5or6_blocks_per_row * layout_map_count;
2263#if BITS_PER_LONG == 32
2264 tmpdiv = first_block;
2265 first_group = do_div(tmpdiv, stripesize);
2266 tmpdiv = first_group;
2267 do_div(tmpdiv, r5or6_blocks_per_row);
2268 first_group = tmpdiv;
2269 tmpdiv = last_block;
2270 last_group = do_div(tmpdiv, stripesize);
2271 tmpdiv = last_group;
2272 do_div(tmpdiv, r5or6_blocks_per_row);
2273 last_group = tmpdiv;
2274#else
2275 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2276 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2277#endif
2278 if (first_group != last_group)
2279 return PQI_RAID_BYPASS_INELIGIBLE;
2280
2281 /* Verify request is in a single row of RAID 5/6 */
2282#if BITS_PER_LONG == 32
2283 tmpdiv = first_block;
2284 do_div(tmpdiv, stripesize);
2285 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2286 tmpdiv = last_block;
2287 do_div(tmpdiv, stripesize);
2288 r5or6_last_row = r0_last_row = tmpdiv;
2289#else
2290 first_row = r5or6_first_row = r0_first_row =
2291 first_block / stripesize;
2292 r5or6_last_row = r0_last_row = last_block / stripesize;
2293#endif
2294 if (r5or6_first_row != r5or6_last_row)
2295 return PQI_RAID_BYPASS_INELIGIBLE;
2296
2297 /* Verify request is in a single column */
2298#if BITS_PER_LONG == 32
2299 tmpdiv = first_block;
2300 first_row_offset = do_div(tmpdiv, stripesize);
2301 tmpdiv = first_row_offset;
2302 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2303 r5or6_first_row_offset = first_row_offset;
2304 tmpdiv = last_block;
2305 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2306 tmpdiv = r5or6_last_row_offset;
2307 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2308 tmpdiv = r5or6_first_row_offset;
2309 do_div(tmpdiv, strip_size);
2310 first_column = r5or6_first_column = tmpdiv;
2311 tmpdiv = r5or6_last_row_offset;
2312 do_div(tmpdiv, strip_size);
2313 r5or6_last_column = tmpdiv;
2314#else
2315 first_row_offset = r5or6_first_row_offset =
2316 (u32)((first_block % stripesize) %
2317 r5or6_blocks_per_row);
2318
2319 r5or6_last_row_offset =
2320 (u32)((last_block % stripesize) %
2321 r5or6_blocks_per_row);
2322
2323 first_column = r5or6_first_row_offset / strip_size;
2324 r5or6_first_column = first_column;
2325 r5or6_last_column = r5or6_last_row_offset / strip_size;
2326#endif
2327 if (r5or6_first_column != r5or6_last_column)
2328 return PQI_RAID_BYPASS_INELIGIBLE;
2329
2330 /* Request is eligible */
2331 map_row =
2332 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2333 get_unaligned_le16(&raid_map->row_cnt);
2334
2335 map_index = (first_group *
2336 (get_unaligned_le16(&raid_map->row_cnt) *
2337 total_disks_per_row)) +
2338 (map_row * total_disks_per_row) + first_column;
2339 }
2340
2341 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2342 return PQI_RAID_BYPASS_INELIGIBLE;
2343
2344 aio_handle = raid_map->disk_data[map_index].aio_handle;
2345 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2346 first_row * strip_size +
2347 (first_row_offset - first_column * strip_size);
2348 disk_block_cnt = block_cnt;
2349
2350 /* Handle differing logical/physical block sizes. */
2351 if (raid_map->phys_blk_shift) {
2352 disk_block <<= raid_map->phys_blk_shift;
2353 disk_block_cnt <<= raid_map->phys_blk_shift;
2354 }
2355
2356 if (unlikely(disk_block_cnt > 0xffff))
2357 return PQI_RAID_BYPASS_INELIGIBLE;
2358
2359 /* Build the new CDB for the physical disk I/O. */
2360 if (disk_block > 0xffffffff) {
2361 cdb[0] = is_write ? WRITE_16 : READ_16;
2362 cdb[1] = 0;
2363 put_unaligned_be64(disk_block, &cdb[2]);
2364 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2365 cdb[14] = 0;
2366 cdb[15] = 0;
2367 cdb_length = 16;
2368 } else {
2369 cdb[0] = is_write ? WRITE_10 : READ_10;
2370 cdb[1] = 0;
2371 put_unaligned_be32((u32)disk_block, &cdb[2]);
2372 cdb[6] = 0;
2373 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2374 cdb[9] = 0;
2375 cdb_length = 10;
2376 }
2377
2378 if (get_unaligned_le16(&raid_map->flags) &
2379 RAID_MAP_ENCRYPTION_ENABLED) {
2380 pqi_set_encryption_info(&encryption_info, raid_map,
2381 first_block);
2382 encryption_info_ptr = &encryption_info;
2383 } else {
2384 encryption_info_ptr = NULL;
2385 }
2386
2387 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2388 cdb, cdb_length, queue_group, encryption_info_ptr);
2389}
2390
2391#define PQI_STATUS_IDLE 0x0
2392
2393#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2394#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2395
2396#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2397#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2398#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2399#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2400#define PQI_DEVICE_STATE_ERROR 0x4
2401
2402#define PQI_MODE_READY_TIMEOUT_SECS 30
2403#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2404
2405static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2406{
2407 struct pqi_device_registers __iomem *pqi_registers;
2408 unsigned long timeout;
2409 u64 signature;
2410 u8 status;
2411
2412 pqi_registers = ctrl_info->pqi_registers;
2413 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2414
2415 while (1) {
2416 signature = readq(&pqi_registers->signature);
2417 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2418 sizeof(signature)) == 0)
2419 break;
2420 if (time_after(jiffies, timeout)) {
2421 dev_err(&ctrl_info->pci_dev->dev,
2422 "timed out waiting for PQI signature\n");
2423 return -ETIMEDOUT;
2424 }
2425 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2426 }
2427
2428 while (1) {
2429 status = readb(&pqi_registers->function_and_status_code);
2430 if (status == PQI_STATUS_IDLE)
2431 break;
2432 if (time_after(jiffies, timeout)) {
2433 dev_err(&ctrl_info->pci_dev->dev,
2434 "timed out waiting for PQI IDLE\n");
2435 return -ETIMEDOUT;
2436 }
2437 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2438 }
2439
2440 while (1) {
2441 if (readl(&pqi_registers->device_status) ==
2442 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2443 break;
2444 if (time_after(jiffies, timeout)) {
2445 dev_err(&ctrl_info->pci_dev->dev,
2446 "timed out waiting for PQI all registers ready\n");
2447 return -ETIMEDOUT;
2448 }
2449 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2450 }
2451
2452 return 0;
2453}
2454
2455static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2456{
2457 struct pqi_scsi_dev *device;
2458
2459 device = io_request->scmd->device->hostdata;
2460 device->offload_enabled = false;
2461}
2462
d87d5474 2463static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
6c223761
KB
2464{
2465 struct pqi_ctrl_info *ctrl_info;
e58081a7 2466 struct pqi_scsi_dev *device;
6c223761
KB
2467
2468 if (scsi_device_online(sdev)) {
2469 scsi_device_set_state(sdev, SDEV_OFFLINE);
2470 ctrl_info = shost_to_hba(sdev->host);
2471 schedule_delayed_work(&ctrl_info->rescan_work, 0);
e58081a7 2472 device = sdev->hostdata;
d87d5474
KB
2473 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
2474 path, ctrl_info->scsi_host->host_no, device->bus,
e58081a7 2475 device->target, device->lun);
6c223761
KB
2476 }
2477}
2478
2479static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2480{
2481 u8 scsi_status;
2482 u8 host_byte;
2483 struct scsi_cmnd *scmd;
2484 struct pqi_raid_error_info *error_info;
2485 size_t sense_data_length;
2486 int residual_count;
2487 int xfer_count;
2488 struct scsi_sense_hdr sshdr;
2489
2490 scmd = io_request->scmd;
2491 if (!scmd)
2492 return;
2493
2494 error_info = io_request->error_info;
2495 scsi_status = error_info->status;
2496 host_byte = DID_OK;
2497
2498 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2499 xfer_count =
2500 get_unaligned_le32(&error_info->data_out_transferred);
2501 residual_count = scsi_bufflen(scmd) - xfer_count;
2502 scsi_set_resid(scmd, residual_count);
2503 if (xfer_count < scmd->underflow)
2504 host_byte = DID_SOFT_ERROR;
2505 }
2506
2507 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2508 if (sense_data_length == 0)
2509 sense_data_length =
2510 get_unaligned_le16(&error_info->response_data_length);
2511 if (sense_data_length) {
2512 if (sense_data_length > sizeof(error_info->data))
2513 sense_data_length = sizeof(error_info->data);
2514
2515 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2516 scsi_normalize_sense(error_info->data,
2517 sense_data_length, &sshdr) &&
2518 sshdr.sense_key == HARDWARE_ERROR &&
2519 sshdr.asc == 0x3e &&
2520 sshdr.ascq == 0x1) {
d87d5474 2521 pqi_take_device_offline(scmd->device, "RAID");
6c223761
KB
2522 host_byte = DID_NO_CONNECT;
2523 }
2524
2525 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2526 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2527 memcpy(scmd->sense_buffer, error_info->data,
2528 sense_data_length);
2529 }
2530
2531 scmd->result = scsi_status;
2532 set_host_byte(scmd, host_byte);
2533}
2534
2535static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2536{
2537 u8 scsi_status;
2538 u8 host_byte;
2539 struct scsi_cmnd *scmd;
2540 struct pqi_aio_error_info *error_info;
2541 size_t sense_data_length;
2542 int residual_count;
2543 int xfer_count;
2544 bool device_offline;
2545
2546 scmd = io_request->scmd;
2547 error_info = io_request->error_info;
2548 host_byte = DID_OK;
2549 sense_data_length = 0;
2550 device_offline = false;
2551
2552 switch (error_info->service_response) {
2553 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2554 scsi_status = error_info->status;
2555 break;
2556 case PQI_AIO_SERV_RESPONSE_FAILURE:
2557 switch (error_info->status) {
2558 case PQI_AIO_STATUS_IO_ABORTED:
2559 scsi_status = SAM_STAT_TASK_ABORTED;
2560 break;
2561 case PQI_AIO_STATUS_UNDERRUN:
2562 scsi_status = SAM_STAT_GOOD;
2563 residual_count = get_unaligned_le32(
2564 &error_info->residual_count);
2565 scsi_set_resid(scmd, residual_count);
2566 xfer_count = scsi_bufflen(scmd) - residual_count;
2567 if (xfer_count < scmd->underflow)
2568 host_byte = DID_SOFT_ERROR;
2569 break;
2570 case PQI_AIO_STATUS_OVERRUN:
2571 scsi_status = SAM_STAT_GOOD;
2572 break;
2573 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2574 pqi_aio_path_disabled(io_request);
2575 scsi_status = SAM_STAT_GOOD;
2576 io_request->status = -EAGAIN;
2577 break;
2578 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2579 case PQI_AIO_STATUS_INVALID_DEVICE:
2580 device_offline = true;
d87d5474 2581 pqi_take_device_offline(scmd->device, "AIO");
6c223761
KB
2582 host_byte = DID_NO_CONNECT;
2583 scsi_status = SAM_STAT_CHECK_CONDITION;
2584 break;
2585 case PQI_AIO_STATUS_IO_ERROR:
2586 default:
2587 scsi_status = SAM_STAT_CHECK_CONDITION;
2588 break;
2589 }
2590 break;
2591 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2592 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2593 scsi_status = SAM_STAT_GOOD;
2594 break;
2595 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2596 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2597 default:
2598 scsi_status = SAM_STAT_CHECK_CONDITION;
2599 break;
2600 }
2601
2602 if (error_info->data_present) {
2603 sense_data_length =
2604 get_unaligned_le16(&error_info->data_length);
2605 if (sense_data_length) {
2606 if (sense_data_length > sizeof(error_info->data))
2607 sense_data_length = sizeof(error_info->data);
2608 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2609 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2610 memcpy(scmd->sense_buffer, error_info->data,
2611 sense_data_length);
2612 }
2613 }
2614
2615 if (device_offline && sense_data_length == 0)
2616 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2617 0x3e, 0x1);
2618
2619 scmd->result = scsi_status;
2620 set_host_byte(scmd, host_byte);
2621}
2622
2623static void pqi_process_io_error(unsigned int iu_type,
2624 struct pqi_io_request *io_request)
2625{
2626 switch (iu_type) {
2627 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2628 pqi_process_raid_io_error(io_request);
2629 break;
2630 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2631 pqi_process_aio_io_error(io_request);
2632 break;
2633 }
2634}
2635
2636static int pqi_interpret_task_management_response(
2637 struct pqi_task_management_response *response)
2638{
2639 int rc;
2640
2641 switch (response->response_code) {
b17f0486
KB
2642 case SOP_TMF_COMPLETE:
2643 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
2644 rc = 0;
2645 break;
2646 default:
2647 rc = -EIO;
2648 break;
2649 }
2650
2651 return rc;
2652}
2653
2654static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2655 struct pqi_queue_group *queue_group)
2656{
2657 unsigned int num_responses;
2658 pqi_index_t oq_pi;
2659 pqi_index_t oq_ci;
2660 struct pqi_io_request *io_request;
2661 struct pqi_io_response *response;
2662 u16 request_id;
2663
2664 num_responses = 0;
2665 oq_ci = queue_group->oq_ci_copy;
2666
2667 while (1) {
2668 oq_pi = *queue_group->oq_pi;
2669 if (oq_pi == oq_ci)
2670 break;
2671
2672 num_responses++;
2673 response = queue_group->oq_element_array +
2674 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2675
2676 request_id = get_unaligned_le16(&response->request_id);
2677 WARN_ON(request_id >= ctrl_info->max_io_slots);
2678
2679 io_request = &ctrl_info->io_request_pool[request_id];
2680 WARN_ON(atomic_read(&io_request->refcount) == 0);
2681
2682 switch (response->header.iu_type) {
2683 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2684 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2685 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2686 break;
2687 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2688 io_request->status =
2689 pqi_interpret_task_management_response(
2690 (void *)response);
2691 break;
2692 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2693 pqi_aio_path_disabled(io_request);
2694 io_request->status = -EAGAIN;
2695 break;
2696 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2697 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2698 io_request->error_info = ctrl_info->error_buffer +
2699 (get_unaligned_le16(&response->error_index) *
2700 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2701 pqi_process_io_error(response->header.iu_type,
2702 io_request);
2703 break;
2704 default:
2705 dev_err(&ctrl_info->pci_dev->dev,
2706 "unexpected IU type: 0x%x\n",
2707 response->header.iu_type);
6c223761
KB
2708 break;
2709 }
2710
2711 io_request->io_complete_callback(io_request,
2712 io_request->context);
2713
2714 /*
2715 * Note that the I/O request structure CANNOT BE TOUCHED after
2716 * returning from the I/O completion callback!
2717 */
2718
2719 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2720 }
2721
2722 if (num_responses) {
2723 queue_group->oq_ci_copy = oq_ci;
2724 writel(oq_ci, queue_group->oq_ci);
2725 }
2726
2727 return num_responses;
2728}
2729
2730static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 2731 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
2732{
2733 unsigned int num_elements_used;
2734
2735 if (pi >= ci)
2736 num_elements_used = pi - ci;
2737 else
2738 num_elements_used = elements_in_queue - ci + pi;
2739
2740 return elements_in_queue - num_elements_used - 1;
2741}
2742
98f87667 2743static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
2744 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2745{
2746 pqi_index_t iq_pi;
2747 pqi_index_t iq_ci;
2748 unsigned long flags;
2749 void *next_element;
6c223761
KB
2750 struct pqi_queue_group *queue_group;
2751
2752 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2753 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2754
6c223761
KB
2755 while (1) {
2756 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2757
2758 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2759 iq_ci = *queue_group->iq_ci[RAID_PATH];
2760
2761 if (pqi_num_elements_free(iq_pi, iq_ci,
2762 ctrl_info->num_elements_per_iq))
2763 break;
2764
2765 spin_unlock_irqrestore(
2766 &queue_group->submit_lock[RAID_PATH], flags);
2767
98f87667 2768 if (pqi_ctrl_offline(ctrl_info))
6c223761 2769 return;
6c223761
KB
2770 }
2771
2772 next_element = queue_group->iq_element_array[RAID_PATH] +
2773 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2774
2775 memcpy(next_element, iu, iu_length);
2776
2777 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
6c223761
KB
2778 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2779
2780 /*
2781 * This write notifies the controller that an IU is available to be
2782 * processed.
2783 */
2784 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2785
2786 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
2787}
2788
2789static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2790 struct pqi_event *event)
2791{
2792 struct pqi_event_acknowledge_request request;
2793
2794 memset(&request, 0, sizeof(request));
2795
2796 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2797 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2798 &request.header.iu_length);
2799 request.event_type = event->event_type;
2800 request.event_id = event->event_id;
2801 request.additional_event_id = event->additional_event_id;
2802
98f87667 2803 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
6c223761
KB
2804}
2805
2806static void pqi_event_worker(struct work_struct *work)
2807{
2808 unsigned int i;
2809 struct pqi_ctrl_info *ctrl_info;
6a50d6ad 2810 struct pqi_event *event;
6c223761
KB
2811
2812 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2813
7561a7e4
KB
2814 pqi_ctrl_busy(ctrl_info);
2815 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
2816
6a50d6ad 2817 event = ctrl_info->events;
6c223761 2818 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
6a50d6ad
KB
2819 if (event->pending) {
2820 event->pending = false;
2821 pqi_acknowledge_event(ctrl_info, event);
6c223761 2822 }
6a50d6ad 2823 event++;
6c223761
KB
2824 }
2825
7561a7e4
KB
2826 pqi_ctrl_unbusy(ctrl_info);
2827
2828 pqi_schedule_rescan_worker(ctrl_info);
6c223761
KB
2829}
2830
2831static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2832{
2833 unsigned int i;
2834 unsigned int path;
2835 struct pqi_queue_group *queue_group;
2836 unsigned long flags;
2837 struct pqi_io_request *io_request;
2838 struct pqi_io_request *next;
2839 struct scsi_cmnd *scmd;
2840
2841 ctrl_info->controller_online = false;
2842 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
5b0fba0f 2843 sis_shutdown_ctrl(ctrl_info);
6c223761
KB
2844
2845 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2846 queue_group = &ctrl_info->queue_groups[i];
2847
2848 for (path = 0; path < 2; path++) {
2849 spin_lock_irqsave(
2850 &queue_group->submit_lock[path], flags);
2851
2852 list_for_each_entry_safe(io_request, next,
2853 &queue_group->request_list[path],
2854 request_list_entry) {
2855
2856 scmd = io_request->scmd;
2857 if (scmd) {
2858 set_host_byte(scmd, DID_NO_CONNECT);
2859 pqi_scsi_done(scmd);
2860 }
2861
2862 list_del(&io_request->request_list_entry);
2863 }
2864
2865 spin_unlock_irqrestore(
2866 &queue_group->submit_lock[path], flags);
2867 }
2868 }
2869}
2870
98f87667 2871#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
6c223761
KB
2872
2873static void pqi_heartbeat_timer_handler(unsigned long data)
2874{
2875 int num_interrupts;
98f87667 2876 u32 heartbeat_count;
6c223761
KB
2877 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2878
98f87667
KB
2879 pqi_check_ctrl_health(ctrl_info);
2880 if (pqi_ctrl_offline(ctrl_info))
061ef06a
KB
2881 return;
2882
6c223761 2883 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
98f87667 2884 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
6c223761
KB
2885
2886 if (num_interrupts == ctrl_info->previous_num_interrupts) {
98f87667
KB
2887 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
2888 dev_err(&ctrl_info->pci_dev->dev,
2889 "no heartbeat detected - last heartbeat count: %u\n",
2890 heartbeat_count);
6c223761
KB
2891 pqi_take_ctrl_offline(ctrl_info);
2892 return;
2893 }
6c223761 2894 } else {
98f87667 2895 ctrl_info->previous_num_interrupts = num_interrupts;
6c223761
KB
2896 }
2897
98f87667 2898 ctrl_info->previous_heartbeat_count = heartbeat_count;
6c223761
KB
2899 mod_timer(&ctrl_info->heartbeat_timer,
2900 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2901}
2902
2903static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2904{
98f87667
KB
2905 if (!ctrl_info->heartbeat_counter)
2906 return;
2907
6c223761
KB
2908 ctrl_info->previous_num_interrupts =
2909 atomic_read(&ctrl_info->num_interrupts);
98f87667
KB
2910 ctrl_info->previous_heartbeat_count =
2911 pqi_read_heartbeat_counter(ctrl_info);
6c223761 2912
6c223761
KB
2913 ctrl_info->heartbeat_timer.expires =
2914 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2915 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2916 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
061ef06a 2917 add_timer(&ctrl_info->heartbeat_timer);
6c223761
KB
2918}
2919
2920static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2921{
98f87667 2922 del_timer_sync(&ctrl_info->heartbeat_timer);
6c223761
KB
2923}
2924
6a50d6ad 2925static inline int pqi_event_type_to_event_index(unsigned int event_type)
6c223761
KB
2926{
2927 int index;
2928
6a50d6ad
KB
2929 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2930 if (event_type == pqi_supported_event_types[index])
2931 return index;
6c223761 2932
6a50d6ad
KB
2933 return -1;
2934}
2935
2936static inline bool pqi_is_supported_event(unsigned int event_type)
2937{
2938 return pqi_event_type_to_event_index(event_type) != -1;
6c223761
KB
2939}
2940
2941static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2942{
2943 unsigned int num_events;
2944 pqi_index_t oq_pi;
2945 pqi_index_t oq_ci;
2946 struct pqi_event_queue *event_queue;
2947 struct pqi_event_response *response;
6a50d6ad 2948 struct pqi_event *event;
6c223761
KB
2949 int event_index;
2950
2951 event_queue = &ctrl_info->event_queue;
2952 num_events = 0;
6c223761
KB
2953 oq_ci = event_queue->oq_ci_copy;
2954
2955 while (1) {
2956 oq_pi = *event_queue->oq_pi;
2957 if (oq_pi == oq_ci)
2958 break;
2959
2960 num_events++;
2961 response = event_queue->oq_element_array +
2962 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2963
2964 event_index =
2965 pqi_event_type_to_event_index(response->event_type);
2966
2967 if (event_index >= 0) {
2968 if (response->request_acknowlege) {
6a50d6ad
KB
2969 event = &ctrl_info->events[event_index];
2970 event->pending = true;
2971 event->event_type = response->event_type;
2972 event->event_id = response->event_id;
2973 event->additional_event_id =
6c223761 2974 response->additional_event_id;
6c223761
KB
2975 }
2976 }
2977
2978 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2979 }
2980
2981 if (num_events) {
2982 event_queue->oq_ci_copy = oq_ci;
2983 writel(oq_ci, event_queue->oq_ci);
98f87667 2984 schedule_work(&ctrl_info->event_work);
6c223761
KB
2985 }
2986
2987 return num_events;
2988}
2989
061ef06a
KB
2990#define PQI_LEGACY_INTX_MASK 0x1
2991
2992static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2993 bool enable_intx)
2994{
2995 u32 intx_mask;
2996 struct pqi_device_registers __iomem *pqi_registers;
2997 volatile void __iomem *register_addr;
2998
2999 pqi_registers = ctrl_info->pqi_registers;
3000
3001 if (enable_intx)
3002 register_addr = &pqi_registers->legacy_intx_mask_clear;
3003 else
3004 register_addr = &pqi_registers->legacy_intx_mask_set;
3005
3006 intx_mask = readl(register_addr);
3007 intx_mask |= PQI_LEGACY_INTX_MASK;
3008 writel(intx_mask, register_addr);
3009}
3010
3011static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3012 enum pqi_irq_mode new_mode)
3013{
3014 switch (ctrl_info->irq_mode) {
3015 case IRQ_MODE_MSIX:
3016 switch (new_mode) {
3017 case IRQ_MODE_MSIX:
3018 break;
3019 case IRQ_MODE_INTX:
3020 pqi_configure_legacy_intx(ctrl_info, true);
3021 sis_disable_msix(ctrl_info);
3022 sis_enable_intx(ctrl_info);
3023 break;
3024 case IRQ_MODE_NONE:
3025 sis_disable_msix(ctrl_info);
3026 break;
3027 }
3028 break;
3029 case IRQ_MODE_INTX:
3030 switch (new_mode) {
3031 case IRQ_MODE_MSIX:
3032 pqi_configure_legacy_intx(ctrl_info, false);
3033 sis_disable_intx(ctrl_info);
3034 sis_enable_msix(ctrl_info);
3035 break;
3036 case IRQ_MODE_INTX:
3037 break;
3038 case IRQ_MODE_NONE:
3039 pqi_configure_legacy_intx(ctrl_info, false);
3040 sis_disable_intx(ctrl_info);
3041 break;
3042 }
3043 break;
3044 case IRQ_MODE_NONE:
3045 switch (new_mode) {
3046 case IRQ_MODE_MSIX:
3047 sis_enable_msix(ctrl_info);
3048 break;
3049 case IRQ_MODE_INTX:
3050 pqi_configure_legacy_intx(ctrl_info, true);
3051 sis_enable_intx(ctrl_info);
3052 break;
3053 case IRQ_MODE_NONE:
3054 break;
3055 }
3056 break;
3057 }
3058
3059 ctrl_info->irq_mode = new_mode;
3060}
3061
3062#define PQI_LEGACY_INTX_PENDING 0x1
3063
3064static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3065{
3066 bool valid_irq;
3067 u32 intx_status;
3068
3069 switch (ctrl_info->irq_mode) {
3070 case IRQ_MODE_MSIX:
3071 valid_irq = true;
3072 break;
3073 case IRQ_MODE_INTX:
3074 intx_status =
3075 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3076 if (intx_status & PQI_LEGACY_INTX_PENDING)
3077 valid_irq = true;
3078 else
3079 valid_irq = false;
3080 break;
3081 case IRQ_MODE_NONE:
3082 default:
3083 valid_irq = false;
3084 break;
3085 }
3086
3087 return valid_irq;
3088}
3089
6c223761
KB
3090static irqreturn_t pqi_irq_handler(int irq, void *data)
3091{
3092 struct pqi_ctrl_info *ctrl_info;
3093 struct pqi_queue_group *queue_group;
3094 unsigned int num_responses_handled;
3095
3096 queue_group = data;
3097 ctrl_info = queue_group->ctrl_info;
3098
061ef06a 3099 if (!pqi_is_valid_irq(ctrl_info))
6c223761
KB
3100 return IRQ_NONE;
3101
3102 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3103
3104 if (irq == ctrl_info->event_irq)
3105 num_responses_handled += pqi_process_event_intr(ctrl_info);
3106
3107 if (num_responses_handled)
3108 atomic_inc(&ctrl_info->num_interrupts);
3109
3110 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3111 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3112
3113 return IRQ_HANDLED;
3114}
3115
3116static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3117{
d91d7820 3118 struct pci_dev *pci_dev = ctrl_info->pci_dev;
6c223761
KB
3119 int i;
3120 int rc;
3121
d91d7820 3122 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
6c223761
KB
3123
3124 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
d91d7820 3125 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
52198226 3126 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 3127 if (rc) {
d91d7820 3128 dev_err(&pci_dev->dev,
6c223761 3129 "irq %u init failed with error %d\n",
d91d7820 3130 pci_irq_vector(pci_dev, i), rc);
6c223761
KB
3131 return rc;
3132 }
3133 ctrl_info->num_msix_vectors_initialized++;
3134 }
3135
3136 return 0;
3137}
3138
98bf061b
KB
3139static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3140{
3141 int i;
3142
3143 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3144 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3145 &ctrl_info->queue_groups[i]);
3146
3147 ctrl_info->num_msix_vectors_initialized = 0;
3148}
3149
6c223761
KB
3150static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3151{
98bf061b 3152 int num_vectors_enabled;
6c223761 3153
98bf061b 3154 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
52198226
CH
3155 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3156 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
98bf061b 3157 if (num_vectors_enabled < 0) {
6c223761 3158 dev_err(&ctrl_info->pci_dev->dev,
98bf061b
KB
3159 "MSI-X init failed with error %d\n",
3160 num_vectors_enabled);
3161 return num_vectors_enabled;
6c223761
KB
3162 }
3163
98bf061b 3164 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
061ef06a 3165 ctrl_info->irq_mode = IRQ_MODE_MSIX;
6c223761
KB
3166 return 0;
3167}
3168
98bf061b
KB
3169static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3170{
3171 if (ctrl_info->num_msix_vectors_enabled) {
3172 pci_free_irq_vectors(ctrl_info->pci_dev);
3173 ctrl_info->num_msix_vectors_enabled = 0;
3174 }
3175}
3176
6c223761
KB
3177static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3178{
3179 unsigned int i;
3180 size_t alloc_length;
3181 size_t element_array_length_per_iq;
3182 size_t element_array_length_per_oq;
3183 void *element_array;
3184 void *next_queue_index;
3185 void *aligned_pointer;
3186 unsigned int num_inbound_queues;
3187 unsigned int num_outbound_queues;
3188 unsigned int num_queue_indexes;
3189 struct pqi_queue_group *queue_group;
3190
3191 element_array_length_per_iq =
3192 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3193 ctrl_info->num_elements_per_iq;
3194 element_array_length_per_oq =
3195 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3196 ctrl_info->num_elements_per_oq;
3197 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3198 num_outbound_queues = ctrl_info->num_queue_groups;
3199 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3200
3201 aligned_pointer = NULL;
3202
3203 for (i = 0; i < num_inbound_queues; i++) {
3204 aligned_pointer = PTR_ALIGN(aligned_pointer,
3205 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3206 aligned_pointer += element_array_length_per_iq;
3207 }
3208
3209 for (i = 0; i < num_outbound_queues; i++) {
3210 aligned_pointer = PTR_ALIGN(aligned_pointer,
3211 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3212 aligned_pointer += element_array_length_per_oq;
3213 }
3214
3215 aligned_pointer = PTR_ALIGN(aligned_pointer,
3216 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3217 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3218 PQI_EVENT_OQ_ELEMENT_LENGTH;
3219
3220 for (i = 0; i < num_queue_indexes; i++) {
3221 aligned_pointer = PTR_ALIGN(aligned_pointer,
3222 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3223 aligned_pointer += sizeof(pqi_index_t);
3224 }
3225
3226 alloc_length = (size_t)aligned_pointer +
3227 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3228
e1d213bd
KB
3229 alloc_length += PQI_EXTRA_SGL_MEMORY;
3230
6c223761
KB
3231 ctrl_info->queue_memory_base =
3232 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3233 alloc_length,
3234 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3235
d87d5474 3236 if (!ctrl_info->queue_memory_base)
6c223761 3237 return -ENOMEM;
6c223761
KB
3238
3239 ctrl_info->queue_memory_length = alloc_length;
3240
3241 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3242 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3243
3244 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3245 queue_group = &ctrl_info->queue_groups[i];
3246 queue_group->iq_element_array[RAID_PATH] = element_array;
3247 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3248 ctrl_info->queue_memory_base_dma_handle +
3249 (element_array - ctrl_info->queue_memory_base);
3250 element_array += element_array_length_per_iq;
3251 element_array = PTR_ALIGN(element_array,
3252 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3253 queue_group->iq_element_array[AIO_PATH] = element_array;
3254 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3255 ctrl_info->queue_memory_base_dma_handle +
3256 (element_array - ctrl_info->queue_memory_base);
3257 element_array += element_array_length_per_iq;
3258 element_array = PTR_ALIGN(element_array,
3259 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3260 }
3261
3262 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3263 queue_group = &ctrl_info->queue_groups[i];
3264 queue_group->oq_element_array = element_array;
3265 queue_group->oq_element_array_bus_addr =
3266 ctrl_info->queue_memory_base_dma_handle +
3267 (element_array - ctrl_info->queue_memory_base);
3268 element_array += element_array_length_per_oq;
3269 element_array = PTR_ALIGN(element_array,
3270 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3271 }
3272
3273 ctrl_info->event_queue.oq_element_array = element_array;
3274 ctrl_info->event_queue.oq_element_array_bus_addr =
3275 ctrl_info->queue_memory_base_dma_handle +
3276 (element_array - ctrl_info->queue_memory_base);
3277 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3278 PQI_EVENT_OQ_ELEMENT_LENGTH;
3279
3280 next_queue_index = PTR_ALIGN(element_array,
3281 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3282
3283 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3284 queue_group = &ctrl_info->queue_groups[i];
3285 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3286 queue_group->iq_ci_bus_addr[RAID_PATH] =
3287 ctrl_info->queue_memory_base_dma_handle +
3288 (next_queue_index - ctrl_info->queue_memory_base);
3289 next_queue_index += sizeof(pqi_index_t);
3290 next_queue_index = PTR_ALIGN(next_queue_index,
3291 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3292 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3293 queue_group->iq_ci_bus_addr[AIO_PATH] =
3294 ctrl_info->queue_memory_base_dma_handle +
3295 (next_queue_index - ctrl_info->queue_memory_base);
3296 next_queue_index += sizeof(pqi_index_t);
3297 next_queue_index = PTR_ALIGN(next_queue_index,
3298 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3299 queue_group->oq_pi = next_queue_index;
3300 queue_group->oq_pi_bus_addr =
3301 ctrl_info->queue_memory_base_dma_handle +
3302 (next_queue_index - ctrl_info->queue_memory_base);
3303 next_queue_index += sizeof(pqi_index_t);
3304 next_queue_index = PTR_ALIGN(next_queue_index,
3305 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3306 }
3307
3308 ctrl_info->event_queue.oq_pi = next_queue_index;
3309 ctrl_info->event_queue.oq_pi_bus_addr =
3310 ctrl_info->queue_memory_base_dma_handle +
3311 (next_queue_index - ctrl_info->queue_memory_base);
3312
3313 return 0;
3314}
3315
3316static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3317{
3318 unsigned int i;
3319 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3320 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3321
3322 /*
3323 * Initialize the backpointers to the controller structure in
3324 * each operational queue group structure.
3325 */
3326 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3327 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3328
3329 /*
3330 * Assign IDs to all operational queues. Note that the IDs
3331 * assigned to operational IQs are independent of the IDs
3332 * assigned to operational OQs.
3333 */
3334 ctrl_info->event_queue.oq_id = next_oq_id++;
3335 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3336 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3337 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3338 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3339 }
3340
3341 /*
3342 * Assign MSI-X table entry indexes to all queues. Note that the
3343 * interrupt for the event queue is shared with the first queue group.
3344 */
3345 ctrl_info->event_queue.int_msg_num = 0;
3346 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3347 ctrl_info->queue_groups[i].int_msg_num = i;
3348
3349 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3350 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3351 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3352 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3353 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3354 }
3355}
3356
3357static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3358{
3359 size_t alloc_length;
3360 struct pqi_admin_queues_aligned *admin_queues_aligned;
3361 struct pqi_admin_queues *admin_queues;
3362
3363 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3364 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3365
3366 ctrl_info->admin_queue_memory_base =
3367 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3368 alloc_length,
3369 &ctrl_info->admin_queue_memory_base_dma_handle,
3370 GFP_KERNEL);
3371
3372 if (!ctrl_info->admin_queue_memory_base)
3373 return -ENOMEM;
3374
3375 ctrl_info->admin_queue_memory_length = alloc_length;
3376
3377 admin_queues = &ctrl_info->admin_queues;
3378 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3379 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3380 admin_queues->iq_element_array =
3381 &admin_queues_aligned->iq_element_array;
3382 admin_queues->oq_element_array =
3383 &admin_queues_aligned->oq_element_array;
3384 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3385 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3386
3387 admin_queues->iq_element_array_bus_addr =
3388 ctrl_info->admin_queue_memory_base_dma_handle +
3389 (admin_queues->iq_element_array -
3390 ctrl_info->admin_queue_memory_base);
3391 admin_queues->oq_element_array_bus_addr =
3392 ctrl_info->admin_queue_memory_base_dma_handle +
3393 (admin_queues->oq_element_array -
3394 ctrl_info->admin_queue_memory_base);
3395 admin_queues->iq_ci_bus_addr =
3396 ctrl_info->admin_queue_memory_base_dma_handle +
3397 ((void *)admin_queues->iq_ci -
3398 ctrl_info->admin_queue_memory_base);
3399 admin_queues->oq_pi_bus_addr =
3400 ctrl_info->admin_queue_memory_base_dma_handle +
3401 ((void *)admin_queues->oq_pi -
3402 ctrl_info->admin_queue_memory_base);
3403
3404 return 0;
3405}
3406
3407#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3408#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3409
3410static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3411{
3412 struct pqi_device_registers __iomem *pqi_registers;
3413 struct pqi_admin_queues *admin_queues;
3414 unsigned long timeout;
3415 u8 status;
3416 u32 reg;
3417
3418 pqi_registers = ctrl_info->pqi_registers;
3419 admin_queues = &ctrl_info->admin_queues;
3420
3421 writeq((u64)admin_queues->iq_element_array_bus_addr,
3422 &pqi_registers->admin_iq_element_array_addr);
3423 writeq((u64)admin_queues->oq_element_array_bus_addr,
3424 &pqi_registers->admin_oq_element_array_addr);
3425 writeq((u64)admin_queues->iq_ci_bus_addr,
3426 &pqi_registers->admin_iq_ci_addr);
3427 writeq((u64)admin_queues->oq_pi_bus_addr,
3428 &pqi_registers->admin_oq_pi_addr);
3429
3430 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3431 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3432 (admin_queues->int_msg_num << 16);
3433 writel(reg, &pqi_registers->admin_iq_num_elements);
3434 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3435 &pqi_registers->function_and_status_code);
3436
3437 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3438 while (1) {
3439 status = readb(&pqi_registers->function_and_status_code);
3440 if (status == PQI_STATUS_IDLE)
3441 break;
3442 if (time_after(jiffies, timeout))
3443 return -ETIMEDOUT;
3444 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3445 }
3446
3447 /*
3448 * The offset registers are not initialized to the correct
3449 * offsets until *after* the create admin queue pair command
3450 * completes successfully.
3451 */
3452 admin_queues->iq_pi = ctrl_info->iomem_base +
3453 PQI_DEVICE_REGISTERS_OFFSET +
3454 readq(&pqi_registers->admin_iq_pi_offset);
3455 admin_queues->oq_ci = ctrl_info->iomem_base +
3456 PQI_DEVICE_REGISTERS_OFFSET +
3457 readq(&pqi_registers->admin_oq_ci_offset);
3458
3459 return 0;
3460}
3461
3462static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3463 struct pqi_general_admin_request *request)
3464{
3465 struct pqi_admin_queues *admin_queues;
3466 void *next_element;
3467 pqi_index_t iq_pi;
3468
3469 admin_queues = &ctrl_info->admin_queues;
3470 iq_pi = admin_queues->iq_pi_copy;
3471
3472 next_element = admin_queues->iq_element_array +
3473 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3474
3475 memcpy(next_element, request, sizeof(*request));
3476
3477 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3478 admin_queues->iq_pi_copy = iq_pi;
3479
3480 /*
3481 * This write notifies the controller that an IU is available to be
3482 * processed.
3483 */
3484 writel(iq_pi, admin_queues->iq_pi);
3485}
3486
3487static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3488 struct pqi_general_admin_response *response)
3489{
3490 struct pqi_admin_queues *admin_queues;
3491 pqi_index_t oq_pi;
3492 pqi_index_t oq_ci;
3493 unsigned long timeout;
3494
3495 admin_queues = &ctrl_info->admin_queues;
3496 oq_ci = admin_queues->oq_ci_copy;
3497
3498 timeout = (3 * HZ) + jiffies;
3499
3500 while (1) {
3501 oq_pi = *admin_queues->oq_pi;
3502 if (oq_pi != oq_ci)
3503 break;
3504 if (time_after(jiffies, timeout)) {
3505 dev_err(&ctrl_info->pci_dev->dev,
3506 "timed out waiting for admin response\n");
3507 return -ETIMEDOUT;
3508 }
3509 usleep_range(1000, 2000);
3510 }
3511
3512 memcpy(response, admin_queues->oq_element_array +
3513 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3514
3515 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3516 admin_queues->oq_ci_copy = oq_ci;
3517 writel(oq_ci, admin_queues->oq_ci);
3518
3519 return 0;
3520}
3521
3522static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3523 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3524 struct pqi_io_request *io_request)
3525{
3526 struct pqi_io_request *next;
3527 void *next_element;
3528 pqi_index_t iq_pi;
3529 pqi_index_t iq_ci;
3530 size_t iu_length;
3531 unsigned long flags;
3532 unsigned int num_elements_needed;
3533 unsigned int num_elements_to_end_of_queue;
3534 size_t copy_count;
3535 struct pqi_iu_header *request;
3536
3537 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3538
3539 if (io_request)
3540 list_add_tail(&io_request->request_list_entry,
3541 &queue_group->request_list[path]);
3542
3543 iq_pi = queue_group->iq_pi_copy[path];
3544
3545 list_for_each_entry_safe(io_request, next,
3546 &queue_group->request_list[path], request_list_entry) {
3547
3548 request = io_request->iu;
3549
3550 iu_length = get_unaligned_le16(&request->iu_length) +
3551 PQI_REQUEST_HEADER_LENGTH;
3552 num_elements_needed =
3553 DIV_ROUND_UP(iu_length,
3554 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3555
3556 iq_ci = *queue_group->iq_ci[path];
3557
3558 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3559 ctrl_info->num_elements_per_iq))
3560 break;
3561
3562 put_unaligned_le16(queue_group->oq_id,
3563 &request->response_queue_id);
3564
3565 next_element = queue_group->iq_element_array[path] +
3566 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3567
3568 num_elements_to_end_of_queue =
3569 ctrl_info->num_elements_per_iq - iq_pi;
3570
3571 if (num_elements_needed <= num_elements_to_end_of_queue) {
3572 memcpy(next_element, request, iu_length);
3573 } else {
3574 copy_count = num_elements_to_end_of_queue *
3575 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3576 memcpy(next_element, request, copy_count);
3577 memcpy(queue_group->iq_element_array[path],
3578 (u8 *)request + copy_count,
3579 iu_length - copy_count);
3580 }
3581
3582 iq_pi = (iq_pi + num_elements_needed) %
3583 ctrl_info->num_elements_per_iq;
3584
3585 list_del(&io_request->request_list_entry);
3586 }
3587
3588 if (iq_pi != queue_group->iq_pi_copy[path]) {
3589 queue_group->iq_pi_copy[path] = iq_pi;
3590 /*
3591 * This write notifies the controller that one or more IUs are
3592 * available to be processed.
3593 */
3594 writel(iq_pi, queue_group->iq_pi[path]);
3595 }
3596
3597 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3598}
3599
1f37e992
KB
3600#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3601
3602static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3603 struct completion *wait)
3604{
3605 int rc;
1f37e992
KB
3606
3607 while (1) {
3608 if (wait_for_completion_io_timeout(wait,
3609 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3610 rc = 0;
3611 break;
3612 }
3613
3614 pqi_check_ctrl_health(ctrl_info);
3615 if (pqi_ctrl_offline(ctrl_info)) {
3616 rc = -ENXIO;
3617 break;
3618 }
1f37e992
KB
3619 }
3620
3621 return rc;
3622}
3623
6c223761
KB
3624static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3625 void *context)
3626{
3627 struct completion *waiting = context;
3628
3629 complete(waiting);
3630}
3631
3632static int pqi_submit_raid_request_synchronous_with_io_request(
3633 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3634 unsigned long timeout_msecs)
3635{
3636 int rc = 0;
3637 DECLARE_COMPLETION_ONSTACK(wait);
3638
3639 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3640 io_request->context = &wait;
3641
3642 pqi_start_io(ctrl_info,
3643 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3644 io_request);
3645
3646 if (timeout_msecs == NO_TIMEOUT) {
1f37e992 3647 pqi_wait_for_completion_io(ctrl_info, &wait);
6c223761
KB
3648 } else {
3649 if (!wait_for_completion_io_timeout(&wait,
3650 msecs_to_jiffies(timeout_msecs))) {
3651 dev_warn(&ctrl_info->pci_dev->dev,
3652 "command timed out\n");
3653 rc = -ETIMEDOUT;
3654 }
3655 }
3656
3657 return rc;
3658}
3659
3660static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3661 struct pqi_iu_header *request, unsigned int flags,
3662 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3663{
3664 int rc;
3665 struct pqi_io_request *io_request;
3666 unsigned long start_jiffies;
3667 unsigned long msecs_blocked;
3668 size_t iu_length;
3669
3670 /*
3671 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3672 * are mutually exclusive.
3673 */
3674
3675 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3676 if (down_interruptible(&ctrl_info->sync_request_sem))
3677 return -ERESTARTSYS;
3678 } else {
3679 if (timeout_msecs == NO_TIMEOUT) {
3680 down(&ctrl_info->sync_request_sem);
3681 } else {
3682 start_jiffies = jiffies;
3683 if (down_timeout(&ctrl_info->sync_request_sem,
3684 msecs_to_jiffies(timeout_msecs)))
3685 return -ETIMEDOUT;
3686 msecs_blocked =
3687 jiffies_to_msecs(jiffies - start_jiffies);
3688 if (msecs_blocked >= timeout_msecs)
3689 return -ETIMEDOUT;
3690 timeout_msecs -= msecs_blocked;
3691 }
3692 }
3693
7561a7e4
KB
3694 pqi_ctrl_busy(ctrl_info);
3695 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3696 if (timeout_msecs == 0) {
3697 rc = -ETIMEDOUT;
3698 goto out;
3699 }
3700
6c223761
KB
3701 io_request = pqi_alloc_io_request(ctrl_info);
3702
3703 put_unaligned_le16(io_request->index,
3704 &(((struct pqi_raid_path_request *)request)->request_id));
3705
3706 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3707 ((struct pqi_raid_path_request *)request)->error_index =
3708 ((struct pqi_raid_path_request *)request)->request_id;
3709
3710 iu_length = get_unaligned_le16(&request->iu_length) +
3711 PQI_REQUEST_HEADER_LENGTH;
3712 memcpy(io_request->iu, request, iu_length);
3713
3714 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3715 io_request, timeout_msecs);
3716
3717 if (error_info) {
3718 if (io_request->error_info)
3719 memcpy(error_info, io_request->error_info,
3720 sizeof(*error_info));
3721 else
3722 memset(error_info, 0, sizeof(*error_info));
3723 } else if (rc == 0 && io_request->error_info) {
3724 u8 scsi_status;
3725 struct pqi_raid_error_info *raid_error_info;
3726
3727 raid_error_info = io_request->error_info;
3728 scsi_status = raid_error_info->status;
3729
3730 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3731 raid_error_info->data_out_result ==
3732 PQI_DATA_IN_OUT_UNDERFLOW)
3733 scsi_status = SAM_STAT_GOOD;
3734
3735 if (scsi_status != SAM_STAT_GOOD)
3736 rc = -EIO;
3737 }
3738
3739 pqi_free_io_request(io_request);
3740
7561a7e4
KB
3741out:
3742 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
3743 up(&ctrl_info->sync_request_sem);
3744
3745 return rc;
3746}
3747
3748static int pqi_validate_admin_response(
3749 struct pqi_general_admin_response *response, u8 expected_function_code)
3750{
3751 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3752 return -EINVAL;
3753
3754 if (get_unaligned_le16(&response->header.iu_length) !=
3755 PQI_GENERAL_ADMIN_IU_LENGTH)
3756 return -EINVAL;
3757
3758 if (response->function_code != expected_function_code)
3759 return -EINVAL;
3760
3761 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3762 return -EINVAL;
3763
3764 return 0;
3765}
3766
3767static int pqi_submit_admin_request_synchronous(
3768 struct pqi_ctrl_info *ctrl_info,
3769 struct pqi_general_admin_request *request,
3770 struct pqi_general_admin_response *response)
3771{
3772 int rc;
3773
3774 pqi_submit_admin_request(ctrl_info, request);
3775
3776 rc = pqi_poll_for_admin_response(ctrl_info, response);
3777
3778 if (rc == 0)
3779 rc = pqi_validate_admin_response(response,
3780 request->function_code);
3781
3782 return rc;
3783}
3784
3785static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3786{
3787 int rc;
3788 struct pqi_general_admin_request request;
3789 struct pqi_general_admin_response response;
3790 struct pqi_device_capability *capability;
3791 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3792
3793 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3794 if (!capability)
3795 return -ENOMEM;
3796
3797 memset(&request, 0, sizeof(request));
3798
3799 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3800 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3801 &request.header.iu_length);
3802 request.function_code =
3803 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3804 put_unaligned_le32(sizeof(*capability),
3805 &request.data.report_device_capability.buffer_length);
3806
3807 rc = pqi_map_single(ctrl_info->pci_dev,
3808 &request.data.report_device_capability.sg_descriptor,
3809 capability, sizeof(*capability),
3810 PCI_DMA_FROMDEVICE);
3811 if (rc)
3812 goto out;
3813
3814 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3815 &response);
3816
3817 pqi_pci_unmap(ctrl_info->pci_dev,
3818 &request.data.report_device_capability.sg_descriptor, 1,
3819 PCI_DMA_FROMDEVICE);
3820
3821 if (rc)
3822 goto out;
3823
3824 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3825 rc = -EIO;
3826 goto out;
3827 }
3828
3829 ctrl_info->max_inbound_queues =
3830 get_unaligned_le16(&capability->max_inbound_queues);
3831 ctrl_info->max_elements_per_iq =
3832 get_unaligned_le16(&capability->max_elements_per_iq);
3833 ctrl_info->max_iq_element_length =
3834 get_unaligned_le16(&capability->max_iq_element_length)
3835 * 16;
3836 ctrl_info->max_outbound_queues =
3837 get_unaligned_le16(&capability->max_outbound_queues);
3838 ctrl_info->max_elements_per_oq =
3839 get_unaligned_le16(&capability->max_elements_per_oq);
3840 ctrl_info->max_oq_element_length =
3841 get_unaligned_le16(&capability->max_oq_element_length)
3842 * 16;
3843
3844 sop_iu_layer_descriptor =
3845 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3846
3847 ctrl_info->max_inbound_iu_length_per_firmware =
3848 get_unaligned_le16(
3849 &sop_iu_layer_descriptor->max_inbound_iu_length);
3850 ctrl_info->inbound_spanning_supported =
3851 sop_iu_layer_descriptor->inbound_spanning_supported;
3852 ctrl_info->outbound_spanning_supported =
3853 sop_iu_layer_descriptor->outbound_spanning_supported;
3854
3855out:
3856 kfree(capability);
3857
3858 return rc;
3859}
3860
3861static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3862{
3863 if (ctrl_info->max_iq_element_length <
3864 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3865 dev_err(&ctrl_info->pci_dev->dev,
3866 "max. inbound queue element length of %d is less than the required length of %d\n",
3867 ctrl_info->max_iq_element_length,
3868 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3869 return -EINVAL;
3870 }
3871
3872 if (ctrl_info->max_oq_element_length <
3873 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3874 dev_err(&ctrl_info->pci_dev->dev,
3875 "max. outbound queue element length of %d is less than the required length of %d\n",
3876 ctrl_info->max_oq_element_length,
3877 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3878 return -EINVAL;
3879 }
3880
3881 if (ctrl_info->max_inbound_iu_length_per_firmware <
3882 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3883 dev_err(&ctrl_info->pci_dev->dev,
3884 "max. inbound IU length of %u is less than the min. required length of %d\n",
3885 ctrl_info->max_inbound_iu_length_per_firmware,
3886 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3887 return -EINVAL;
3888 }
3889
77668f41
KB
3890 if (!ctrl_info->inbound_spanning_supported) {
3891 dev_err(&ctrl_info->pci_dev->dev,
3892 "the controller does not support inbound spanning\n");
3893 return -EINVAL;
3894 }
3895
3896 if (ctrl_info->outbound_spanning_supported) {
3897 dev_err(&ctrl_info->pci_dev->dev,
3898 "the controller supports outbound spanning but this driver does not\n");
3899 return -EINVAL;
3900 }
3901
6c223761
KB
3902 return 0;
3903}
3904
3905static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3906 bool inbound_queue, u16 queue_id)
3907{
3908 struct pqi_general_admin_request request;
3909 struct pqi_general_admin_response response;
3910
3911 memset(&request, 0, sizeof(request));
3912 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3913 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3914 &request.header.iu_length);
3915 if (inbound_queue)
3916 request.function_code =
3917 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3918 else
3919 request.function_code =
3920 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3921 put_unaligned_le16(queue_id,
3922 &request.data.delete_operational_queue.queue_id);
3923
3924 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3925 &response);
3926}
3927
3928static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3929{
3930 int rc;
3931 struct pqi_event_queue *event_queue;
3932 struct pqi_general_admin_request request;
3933 struct pqi_general_admin_response response;
3934
3935 event_queue = &ctrl_info->event_queue;
3936
3937 /*
3938 * Create OQ (Outbound Queue - device to host queue) to dedicate
3939 * to events.
3940 */
3941 memset(&request, 0, sizeof(request));
3942 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3943 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3944 &request.header.iu_length);
3945 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3946 put_unaligned_le16(event_queue->oq_id,
3947 &request.data.create_operational_oq.queue_id);
3948 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3949 &request.data.create_operational_oq.element_array_addr);
3950 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3951 &request.data.create_operational_oq.pi_addr);
3952 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3953 &request.data.create_operational_oq.num_elements);
3954 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3955 &request.data.create_operational_oq.element_length);
3956 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3957 put_unaligned_le16(event_queue->int_msg_num,
3958 &request.data.create_operational_oq.int_msg_num);
3959
3960 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3961 &response);
3962 if (rc)
3963 return rc;
3964
3965 event_queue->oq_ci = ctrl_info->iomem_base +
3966 PQI_DEVICE_REGISTERS_OFFSET +
3967 get_unaligned_le64(
3968 &response.data.create_operational_oq.oq_ci_offset);
3969
3970 return 0;
3971}
3972
061ef06a
KB
3973static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3974 unsigned int group_number)
6c223761 3975{
6c223761
KB
3976 int rc;
3977 struct pqi_queue_group *queue_group;
3978 struct pqi_general_admin_request request;
3979 struct pqi_general_admin_response response;
3980
061ef06a 3981 queue_group = &ctrl_info->queue_groups[group_number];
6c223761
KB
3982
3983 /*
3984 * Create IQ (Inbound Queue - host to device queue) for
3985 * RAID path.
3986 */
3987 memset(&request, 0, sizeof(request));
3988 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3989 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3990 &request.header.iu_length);
3991 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3992 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3993 &request.data.create_operational_iq.queue_id);
3994 put_unaligned_le64(
3995 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3996 &request.data.create_operational_iq.element_array_addr);
3997 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3998 &request.data.create_operational_iq.ci_addr);
3999 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4000 &request.data.create_operational_iq.num_elements);
4001 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4002 &request.data.create_operational_iq.element_length);
4003 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4004
4005 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4006 &response);
4007 if (rc) {
4008 dev_err(&ctrl_info->pci_dev->dev,
4009 "error creating inbound RAID queue\n");
4010 return rc;
4011 }
4012
4013 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4014 PQI_DEVICE_REGISTERS_OFFSET +
4015 get_unaligned_le64(
4016 &response.data.create_operational_iq.iq_pi_offset);
4017
4018 /*
4019 * Create IQ (Inbound Queue - host to device queue) for
4020 * Advanced I/O (AIO) path.
4021 */
4022 memset(&request, 0, sizeof(request));
4023 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4024 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4025 &request.header.iu_length);
4026 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4027 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4028 &request.data.create_operational_iq.queue_id);
4029 put_unaligned_le64((u64)queue_group->
4030 iq_element_array_bus_addr[AIO_PATH],
4031 &request.data.create_operational_iq.element_array_addr);
4032 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4033 &request.data.create_operational_iq.ci_addr);
4034 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4035 &request.data.create_operational_iq.num_elements);
4036 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4037 &request.data.create_operational_iq.element_length);
4038 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4039
4040 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4041 &response);
4042 if (rc) {
4043 dev_err(&ctrl_info->pci_dev->dev,
4044 "error creating inbound AIO queue\n");
4045 goto delete_inbound_queue_raid;
4046 }
4047
4048 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4049 PQI_DEVICE_REGISTERS_OFFSET +
4050 get_unaligned_le64(
4051 &response.data.create_operational_iq.iq_pi_offset);
4052
4053 /*
4054 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4055 * assumed to be for RAID path I/O unless we change the queue's
4056 * property.
4057 */
4058 memset(&request, 0, sizeof(request));
4059 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4060 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4061 &request.header.iu_length);
4062 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4063 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4064 &request.data.change_operational_iq_properties.queue_id);
4065 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4066 &request.data.change_operational_iq_properties.vendor_specific);
4067
4068 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4069 &response);
4070 if (rc) {
4071 dev_err(&ctrl_info->pci_dev->dev,
4072 "error changing queue property\n");
4073 goto delete_inbound_queue_aio;
4074 }
4075
4076 /*
4077 * Create OQ (Outbound Queue - device to host queue).
4078 */
4079 memset(&request, 0, sizeof(request));
4080 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4081 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4082 &request.header.iu_length);
4083 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4084 put_unaligned_le16(queue_group->oq_id,
4085 &request.data.create_operational_oq.queue_id);
4086 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4087 &request.data.create_operational_oq.element_array_addr);
4088 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4089 &request.data.create_operational_oq.pi_addr);
4090 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4091 &request.data.create_operational_oq.num_elements);
4092 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4093 &request.data.create_operational_oq.element_length);
4094 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4095 put_unaligned_le16(queue_group->int_msg_num,
4096 &request.data.create_operational_oq.int_msg_num);
4097
4098 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4099 &response);
4100 if (rc) {
4101 dev_err(&ctrl_info->pci_dev->dev,
4102 "error creating outbound queue\n");
4103 goto delete_inbound_queue_aio;
4104 }
4105
4106 queue_group->oq_ci = ctrl_info->iomem_base +
4107 PQI_DEVICE_REGISTERS_OFFSET +
4108 get_unaligned_le64(
4109 &response.data.create_operational_oq.oq_ci_offset);
4110
6c223761
KB
4111 return 0;
4112
4113delete_inbound_queue_aio:
4114 pqi_delete_operational_queue(ctrl_info, true,
4115 queue_group->iq_id[AIO_PATH]);
4116
4117delete_inbound_queue_raid:
4118 pqi_delete_operational_queue(ctrl_info, true,
4119 queue_group->iq_id[RAID_PATH]);
4120
4121 return rc;
4122}
4123
4124static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4125{
4126 int rc;
4127 unsigned int i;
4128
4129 rc = pqi_create_event_queue(ctrl_info);
4130 if (rc) {
4131 dev_err(&ctrl_info->pci_dev->dev,
4132 "error creating event queue\n");
4133 return rc;
4134 }
4135
4136 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
061ef06a 4137 rc = pqi_create_queue_group(ctrl_info, i);
6c223761
KB
4138 if (rc) {
4139 dev_err(&ctrl_info->pci_dev->dev,
4140 "error creating queue group number %u/%u\n",
4141 i, ctrl_info->num_queue_groups);
4142 return rc;
4143 }
4144 }
4145
4146 return 0;
4147}
4148
4149#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4150 (offsetof(struct pqi_event_config, descriptors) + \
4151 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4152
6a50d6ad
KB
4153static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4154 bool enable_events)
6c223761
KB
4155{
4156 int rc;
4157 unsigned int i;
4158 struct pqi_event_config *event_config;
6a50d6ad 4159 struct pqi_event_descriptor *event_descriptor;
6c223761
KB
4160 struct pqi_general_management_request request;
4161
4162 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4163 GFP_KERNEL);
4164 if (!event_config)
4165 return -ENOMEM;
4166
4167 memset(&request, 0, sizeof(request));
4168
4169 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4170 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4171 data.report_event_configuration.sg_descriptors[1]) -
4172 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4173 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4174 &request.data.report_event_configuration.buffer_length);
4175
4176 rc = pqi_map_single(ctrl_info->pci_dev,
4177 request.data.report_event_configuration.sg_descriptors,
4178 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4179 PCI_DMA_FROMDEVICE);
4180 if (rc)
4181 goto out;
4182
4183 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4184 0, NULL, NO_TIMEOUT);
4185
4186 pqi_pci_unmap(ctrl_info->pci_dev,
4187 request.data.report_event_configuration.sg_descriptors, 1,
4188 PCI_DMA_FROMDEVICE);
4189
4190 if (rc)
4191 goto out;
4192
6a50d6ad
KB
4193 for (i = 0; i < event_config->num_event_descriptors; i++) {
4194 event_descriptor = &event_config->descriptors[i];
4195 if (enable_events &&
4196 pqi_is_supported_event(event_descriptor->event_type))
4197 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4198 &event_descriptor->oq_id);
4199 else
4200 put_unaligned_le16(0, &event_descriptor->oq_id);
4201 }
6c223761
KB
4202
4203 memset(&request, 0, sizeof(request));
4204
4205 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4206 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4207 data.report_event_configuration.sg_descriptors[1]) -
4208 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4209 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4210 &request.data.report_event_configuration.buffer_length);
4211
4212 rc = pqi_map_single(ctrl_info->pci_dev,
4213 request.data.report_event_configuration.sg_descriptors,
4214 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4215 PCI_DMA_TODEVICE);
4216 if (rc)
4217 goto out;
4218
4219 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4220 NULL, NO_TIMEOUT);
4221
4222 pqi_pci_unmap(ctrl_info->pci_dev,
4223 request.data.report_event_configuration.sg_descriptors, 1,
4224 PCI_DMA_TODEVICE);
4225
4226out:
4227 kfree(event_config);
4228
4229 return rc;
4230}
4231
6a50d6ad
KB
4232static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4233{
4234 return pqi_configure_events(ctrl_info, true);
4235}
4236
4237static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4238{
4239 return pqi_configure_events(ctrl_info, false);
4240}
4241
6c223761
KB
4242static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4243{
4244 unsigned int i;
4245 struct device *dev;
4246 size_t sg_chain_buffer_length;
4247 struct pqi_io_request *io_request;
4248
4249 if (!ctrl_info->io_request_pool)
4250 return;
4251
4252 dev = &ctrl_info->pci_dev->dev;
4253 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4254 io_request = ctrl_info->io_request_pool;
4255
4256 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4257 kfree(io_request->iu);
4258 if (!io_request->sg_chain_buffer)
4259 break;
4260 dma_free_coherent(dev, sg_chain_buffer_length,
4261 io_request->sg_chain_buffer,
4262 io_request->sg_chain_buffer_dma_handle);
4263 io_request++;
4264 }
4265
4266 kfree(ctrl_info->io_request_pool);
4267 ctrl_info->io_request_pool = NULL;
4268}
4269
4270static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4271{
4272 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4273 ctrl_info->error_buffer_length,
4274 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4275
4276 if (!ctrl_info->error_buffer)
4277 return -ENOMEM;
4278
4279 return 0;
4280}
4281
4282static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4283{
4284 unsigned int i;
4285 void *sg_chain_buffer;
4286 size_t sg_chain_buffer_length;
4287 dma_addr_t sg_chain_buffer_dma_handle;
4288 struct device *dev;
4289 struct pqi_io_request *io_request;
4290
4291 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4292 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4293
4294 if (!ctrl_info->io_request_pool) {
4295 dev_err(&ctrl_info->pci_dev->dev,
4296 "failed to allocate I/O request pool\n");
4297 goto error;
4298 }
4299
4300 dev = &ctrl_info->pci_dev->dev;
4301 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4302 io_request = ctrl_info->io_request_pool;
4303
4304 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4305 io_request->iu =
4306 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4307
4308 if (!io_request->iu) {
4309 dev_err(&ctrl_info->pci_dev->dev,
4310 "failed to allocate IU buffers\n");
4311 goto error;
4312 }
4313
4314 sg_chain_buffer = dma_alloc_coherent(dev,
4315 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4316 GFP_KERNEL);
4317
4318 if (!sg_chain_buffer) {
4319 dev_err(&ctrl_info->pci_dev->dev,
4320 "failed to allocate PQI scatter-gather chain buffers\n");
4321 goto error;
4322 }
4323
4324 io_request->index = i;
4325 io_request->sg_chain_buffer = sg_chain_buffer;
4326 io_request->sg_chain_buffer_dma_handle =
4327 sg_chain_buffer_dma_handle;
4328 io_request++;
4329 }
4330
4331 return 0;
4332
4333error:
4334 pqi_free_all_io_requests(ctrl_info);
4335
4336 return -ENOMEM;
4337}
4338
4339/*
4340 * Calculate required resources that are sized based on max. outstanding
4341 * requests and max. transfer size.
4342 */
4343
4344static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4345{
4346 u32 max_transfer_size;
4347 u32 max_sg_entries;
4348
4349 ctrl_info->scsi_ml_can_queue =
4350 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4351 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4352
4353 ctrl_info->error_buffer_length =
4354 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4355
4356 max_transfer_size =
4357 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4358
4359 max_sg_entries = max_transfer_size / PAGE_SIZE;
4360
4361 /* +1 to cover when the buffer is not page-aligned. */
4362 max_sg_entries++;
4363
4364 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4365
4366 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4367
4368 ctrl_info->sg_chain_buffer_length =
e1d213bd
KB
4369 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4370 PQI_EXTRA_SGL_MEMORY;
6c223761
KB
4371 ctrl_info->sg_tablesize = max_sg_entries;
4372 ctrl_info->max_sectors = max_transfer_size / 512;
4373}
4374
4375static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4376{
4377 int num_cpus;
4378 int max_queue_groups;
4379 int num_queue_groups;
4380 u16 num_elements_per_iq;
4381 u16 num_elements_per_oq;
4382
4383 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4384 ctrl_info->max_outbound_queues - 1);
4385 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4386
4387 num_cpus = num_online_cpus();
4388 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4389 num_queue_groups = min(num_queue_groups, max_queue_groups);
4390
4391 ctrl_info->num_queue_groups = num_queue_groups;
061ef06a 4392 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
6c223761 4393
77668f41
KB
4394 /*
4395 * Make sure that the max. inbound IU length is an even multiple
4396 * of our inbound element length.
4397 */
4398 ctrl_info->max_inbound_iu_length =
4399 (ctrl_info->max_inbound_iu_length_per_firmware /
4400 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4401 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
4402
4403 num_elements_per_iq =
4404 (ctrl_info->max_inbound_iu_length /
4405 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4406
4407 /* Add one because one element in each queue is unusable. */
4408 num_elements_per_iq++;
4409
4410 num_elements_per_iq = min(num_elements_per_iq,
4411 ctrl_info->max_elements_per_iq);
4412
4413 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4414 num_elements_per_oq = min(num_elements_per_oq,
4415 ctrl_info->max_elements_per_oq);
4416
4417 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4418 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4419
4420 ctrl_info->max_sg_per_iu =
4421 ((ctrl_info->max_inbound_iu_length -
4422 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4423 sizeof(struct pqi_sg_descriptor)) +
4424 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4425}
4426
4427static inline void pqi_set_sg_descriptor(
4428 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4429{
4430 u64 address = (u64)sg_dma_address(sg);
4431 unsigned int length = sg_dma_len(sg);
4432
4433 put_unaligned_le64(address, &sg_descriptor->address);
4434 put_unaligned_le32(length, &sg_descriptor->length);
4435 put_unaligned_le32(0, &sg_descriptor->flags);
4436}
4437
4438static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4439 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4440 struct pqi_io_request *io_request)
4441{
4442 int i;
4443 u16 iu_length;
4444 int sg_count;
4445 bool chained;
4446 unsigned int num_sg_in_iu;
4447 unsigned int max_sg_per_iu;
4448 struct scatterlist *sg;
4449 struct pqi_sg_descriptor *sg_descriptor;
4450
4451 sg_count = scsi_dma_map(scmd);
4452 if (sg_count < 0)
4453 return sg_count;
4454
4455 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4456 PQI_REQUEST_HEADER_LENGTH;
4457
4458 if (sg_count == 0)
4459 goto out;
4460
4461 sg = scsi_sglist(scmd);
4462 sg_descriptor = request->sg_descriptors;
4463 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4464 chained = false;
4465 num_sg_in_iu = 0;
4466 i = 0;
4467
4468 while (1) {
4469 pqi_set_sg_descriptor(sg_descriptor, sg);
4470 if (!chained)
4471 num_sg_in_iu++;
4472 i++;
4473 if (i == sg_count)
4474 break;
4475 sg_descriptor++;
4476 if (i == max_sg_per_iu) {
4477 put_unaligned_le64(
4478 (u64)io_request->sg_chain_buffer_dma_handle,
4479 &sg_descriptor->address);
4480 put_unaligned_le32((sg_count - num_sg_in_iu)
4481 * sizeof(*sg_descriptor),
4482 &sg_descriptor->length);
4483 put_unaligned_le32(CISS_SG_CHAIN,
4484 &sg_descriptor->flags);
4485 chained = true;
4486 num_sg_in_iu++;
4487 sg_descriptor = io_request->sg_chain_buffer;
4488 }
4489 sg = sg_next(sg);
4490 }
4491
4492 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4493 request->partial = chained;
4494 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4495
4496out:
4497 put_unaligned_le16(iu_length, &request->header.iu_length);
4498
4499 return 0;
4500}
4501
4502static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4503 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4504 struct pqi_io_request *io_request)
4505{
4506 int i;
4507 u16 iu_length;
4508 int sg_count;
a60eec02
KB
4509 bool chained;
4510 unsigned int num_sg_in_iu;
4511 unsigned int max_sg_per_iu;
6c223761
KB
4512 struct scatterlist *sg;
4513 struct pqi_sg_descriptor *sg_descriptor;
4514
4515 sg_count = scsi_dma_map(scmd);
4516 if (sg_count < 0)
4517 return sg_count;
a60eec02
KB
4518
4519 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4520 PQI_REQUEST_HEADER_LENGTH;
4521 num_sg_in_iu = 0;
4522
6c223761
KB
4523 if (sg_count == 0)
4524 goto out;
4525
a60eec02
KB
4526 sg = scsi_sglist(scmd);
4527 sg_descriptor = request->sg_descriptors;
4528 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4529 chained = false;
4530 i = 0;
4531
4532 while (1) {
4533 pqi_set_sg_descriptor(sg_descriptor, sg);
4534 if (!chained)
4535 num_sg_in_iu++;
4536 i++;
4537 if (i == sg_count)
4538 break;
4539 sg_descriptor++;
4540 if (i == max_sg_per_iu) {
4541 put_unaligned_le64(
4542 (u64)io_request->sg_chain_buffer_dma_handle,
4543 &sg_descriptor->address);
4544 put_unaligned_le32((sg_count - num_sg_in_iu)
4545 * sizeof(*sg_descriptor),
4546 &sg_descriptor->length);
4547 put_unaligned_le32(CISS_SG_CHAIN,
4548 &sg_descriptor->flags);
4549 chained = true;
4550 num_sg_in_iu++;
4551 sg_descriptor = io_request->sg_chain_buffer;
6c223761 4552 }
a60eec02 4553 sg = sg_next(sg);
6c223761
KB
4554 }
4555
a60eec02
KB
4556 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4557 request->partial = chained;
6c223761 4558 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
4559
4560out:
6c223761
KB
4561 put_unaligned_le16(iu_length, &request->header.iu_length);
4562 request->num_sg_descriptors = num_sg_in_iu;
4563
4564 return 0;
4565}
4566
4567static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4568 void *context)
4569{
4570 struct scsi_cmnd *scmd;
4571
4572 scmd = io_request->scmd;
4573 pqi_free_io_request(io_request);
4574 scsi_dma_unmap(scmd);
4575 pqi_scsi_done(scmd);
4576}
4577
4578static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4579 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4580 struct pqi_queue_group *queue_group)
4581{
4582 int rc;
4583 size_t cdb_length;
4584 struct pqi_io_request *io_request;
4585 struct pqi_raid_path_request *request;
4586
4587 io_request = pqi_alloc_io_request(ctrl_info);
4588 io_request->io_complete_callback = pqi_raid_io_complete;
4589 io_request->scmd = scmd;
4590
4591 scmd->host_scribble = (unsigned char *)io_request;
4592
4593 request = io_request->iu;
4594 memset(request, 0,
4595 offsetof(struct pqi_raid_path_request, sg_descriptors));
4596
4597 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4598 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4599 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4600 put_unaligned_le16(io_request->index, &request->request_id);
4601 request->error_index = request->request_id;
4602 memcpy(request->lun_number, device->scsi3addr,
4603 sizeof(request->lun_number));
4604
4605 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4606 memcpy(request->cdb, scmd->cmnd, cdb_length);
4607
4608 switch (cdb_length) {
4609 case 6:
4610 case 10:
4611 case 12:
4612 case 16:
4613 /* No bytes in the Additional CDB bytes field */
4614 request->additional_cdb_bytes_usage =
4615 SOP_ADDITIONAL_CDB_BYTES_0;
4616 break;
4617 case 20:
4618 /* 4 bytes in the Additional cdb field */
4619 request->additional_cdb_bytes_usage =
4620 SOP_ADDITIONAL_CDB_BYTES_4;
4621 break;
4622 case 24:
4623 /* 8 bytes in the Additional cdb field */
4624 request->additional_cdb_bytes_usage =
4625 SOP_ADDITIONAL_CDB_BYTES_8;
4626 break;
4627 case 28:
4628 /* 12 bytes in the Additional cdb field */
4629 request->additional_cdb_bytes_usage =
4630 SOP_ADDITIONAL_CDB_BYTES_12;
4631 break;
4632 case 32:
4633 default:
4634 /* 16 bytes in the Additional cdb field */
4635 request->additional_cdb_bytes_usage =
4636 SOP_ADDITIONAL_CDB_BYTES_16;
4637 break;
4638 }
4639
4640 switch (scmd->sc_data_direction) {
4641 case DMA_TO_DEVICE:
4642 request->data_direction = SOP_READ_FLAG;
4643 break;
4644 case DMA_FROM_DEVICE:
4645 request->data_direction = SOP_WRITE_FLAG;
4646 break;
4647 case DMA_NONE:
4648 request->data_direction = SOP_NO_DIRECTION_FLAG;
4649 break;
4650 case DMA_BIDIRECTIONAL:
4651 request->data_direction = SOP_BIDIRECTIONAL;
4652 break;
4653 default:
4654 dev_err(&ctrl_info->pci_dev->dev,
4655 "unknown data direction: %d\n",
4656 scmd->sc_data_direction);
6c223761
KB
4657 break;
4658 }
4659
4660 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4661 if (rc) {
4662 pqi_free_io_request(io_request);
4663 return SCSI_MLQUEUE_HOST_BUSY;
4664 }
4665
4666 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4667
4668 return 0;
4669}
4670
4671static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4672 void *context)
4673{
4674 struct scsi_cmnd *scmd;
4675
4676 scmd = io_request->scmd;
4677 scsi_dma_unmap(scmd);
4678 if (io_request->status == -EAGAIN)
4679 set_host_byte(scmd, DID_IMM_RETRY);
4680 pqi_free_io_request(io_request);
4681 pqi_scsi_done(scmd);
4682}
4683
4684static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4685 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4686 struct pqi_queue_group *queue_group)
4687{
4688 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4689 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4690}
4691
4692static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4693 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4694 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4695 struct pqi_encryption_info *encryption_info)
4696{
4697 int rc;
4698 struct pqi_io_request *io_request;
4699 struct pqi_aio_path_request *request;
4700
4701 io_request = pqi_alloc_io_request(ctrl_info);
4702 io_request->io_complete_callback = pqi_aio_io_complete;
4703 io_request->scmd = scmd;
4704
4705 scmd->host_scribble = (unsigned char *)io_request;
4706
4707 request = io_request->iu;
4708 memset(request, 0,
4709 offsetof(struct pqi_raid_path_request, sg_descriptors));
4710
4711 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4712 put_unaligned_le32(aio_handle, &request->nexus_id);
4713 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4714 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4715 put_unaligned_le16(io_request->index, &request->request_id);
4716 request->error_index = request->request_id;
4717 if (cdb_length > sizeof(request->cdb))
4718 cdb_length = sizeof(request->cdb);
4719 request->cdb_length = cdb_length;
4720 memcpy(request->cdb, cdb, cdb_length);
4721
4722 switch (scmd->sc_data_direction) {
4723 case DMA_TO_DEVICE:
4724 request->data_direction = SOP_READ_FLAG;
4725 break;
4726 case DMA_FROM_DEVICE:
4727 request->data_direction = SOP_WRITE_FLAG;
4728 break;
4729 case DMA_NONE:
4730 request->data_direction = SOP_NO_DIRECTION_FLAG;
4731 break;
4732 case DMA_BIDIRECTIONAL:
4733 request->data_direction = SOP_BIDIRECTIONAL;
4734 break;
4735 default:
4736 dev_err(&ctrl_info->pci_dev->dev,
4737 "unknown data direction: %d\n",
4738 scmd->sc_data_direction);
6c223761
KB
4739 break;
4740 }
4741
4742 if (encryption_info) {
4743 request->encryption_enable = true;
4744 put_unaligned_le16(encryption_info->data_encryption_key_index,
4745 &request->data_encryption_key_index);
4746 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4747 &request->encrypt_tweak_lower);
4748 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4749 &request->encrypt_tweak_upper);
4750 }
4751
4752 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4753 if (rc) {
4754 pqi_free_io_request(io_request);
4755 return SCSI_MLQUEUE_HOST_BUSY;
4756 }
4757
4758 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4759
4760 return 0;
4761}
4762
061ef06a
KB
4763static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4764 struct scsi_cmnd *scmd)
4765{
4766 u16 hw_queue;
4767
4768 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4769 if (hw_queue > ctrl_info->max_hw_queue_index)
4770 hw_queue = 0;
4771
4772 return hw_queue;
4773}
4774
7561a7e4
KB
4775/*
4776 * This function gets called just before we hand the completed SCSI request
4777 * back to the SML.
4778 */
4779
4780void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4781{
4782 struct pqi_scsi_dev *device;
4783
4784 device = scmd->device->hostdata;
4785 atomic_dec(&device->scsi_cmds_outstanding);
4786}
4787
6c223761 4788static int pqi_scsi_queue_command(struct Scsi_Host *shost,
7d81d2b8 4789 struct scsi_cmnd *scmd)
6c223761
KB
4790{
4791 int rc;
4792 struct pqi_ctrl_info *ctrl_info;
4793 struct pqi_scsi_dev *device;
061ef06a 4794 u16 hw_queue;
6c223761
KB
4795 struct pqi_queue_group *queue_group;
4796 bool raid_bypassed;
4797
4798 device = scmd->device->hostdata;
6c223761
KB
4799 ctrl_info = shost_to_hba(shost);
4800
7561a7e4
KB
4801 atomic_inc(&device->scsi_cmds_outstanding);
4802
6c223761
KB
4803 if (pqi_ctrl_offline(ctrl_info)) {
4804 set_host_byte(scmd, DID_NO_CONNECT);
4805 pqi_scsi_done(scmd);
4806 return 0;
4807 }
4808
7561a7e4
KB
4809 pqi_ctrl_busy(ctrl_info);
4810 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4811 rc = SCSI_MLQUEUE_HOST_BUSY;
4812 goto out;
4813 }
4814
7d81d2b8
KB
4815 /*
4816 * This is necessary because the SML doesn't zero out this field during
4817 * error recovery.
4818 */
4819 scmd->result = 0;
4820
061ef06a
KB
4821 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
4822 queue_group = &ctrl_info->queue_groups[hw_queue];
6c223761
KB
4823
4824 if (pqi_is_logical_device(device)) {
4825 raid_bypassed = false;
4826 if (device->offload_enabled &&
57292b58 4827 !blk_rq_is_passthrough(scmd->request)) {
6c223761
KB
4828 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4829 scmd, queue_group);
4830 if (rc == 0 ||
4831 rc == SCSI_MLQUEUE_HOST_BUSY ||
4832 rc == SAM_STAT_CHECK_CONDITION ||
4833 rc == SAM_STAT_RESERVATION_CONFLICT)
4834 raid_bypassed = true;
4835 }
4836 if (!raid_bypassed)
4837 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4838 queue_group);
4839 } else {
4840 if (device->aio_enabled)
4841 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4842 queue_group);
4843 else
4844 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4845 queue_group);
4846 }
4847
7561a7e4
KB
4848out:
4849 pqi_ctrl_unbusy(ctrl_info);
4850 if (rc)
4851 atomic_dec(&device->scsi_cmds_outstanding);
4852
6c223761
KB
4853 return rc;
4854}
4855
7561a7e4
KB
4856static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
4857 struct pqi_queue_group *queue_group)
4858{
4859 unsigned int path;
4860 unsigned long flags;
4861 bool list_is_empty;
4862
4863 for (path = 0; path < 2; path++) {
4864 while (1) {
4865 spin_lock_irqsave(
4866 &queue_group->submit_lock[path], flags);
4867 list_is_empty =
4868 list_empty(&queue_group->request_list[path]);
4869 spin_unlock_irqrestore(
4870 &queue_group->submit_lock[path], flags);
4871 if (list_is_empty)
4872 break;
4873 pqi_check_ctrl_health(ctrl_info);
4874 if (pqi_ctrl_offline(ctrl_info))
4875 return -ENXIO;
4876 usleep_range(1000, 2000);
4877 }
4878 }
4879
4880 return 0;
4881}
4882
4883static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
4884{
4885 int rc;
4886 unsigned int i;
4887 unsigned int path;
4888 struct pqi_queue_group *queue_group;
4889 pqi_index_t iq_pi;
4890 pqi_index_t iq_ci;
4891
4892 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4893 queue_group = &ctrl_info->queue_groups[i];
4894
4895 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
4896 if (rc)
4897 return rc;
4898
4899 for (path = 0; path < 2; path++) {
4900 iq_pi = queue_group->iq_pi_copy[path];
4901
4902 while (1) {
4903 iq_ci = *queue_group->iq_ci[path];
4904 if (iq_ci == iq_pi)
4905 break;
4906 pqi_check_ctrl_health(ctrl_info);
4907 if (pqi_ctrl_offline(ctrl_info))
4908 return -ENXIO;
4909 usleep_range(1000, 2000);
4910 }
4911 }
4912 }
4913
4914 return 0;
4915}
4916
4917static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
4918 struct pqi_scsi_dev *device)
4919{
4920 unsigned int i;
4921 unsigned int path;
4922 struct pqi_queue_group *queue_group;
4923 unsigned long flags;
4924 struct pqi_io_request *io_request;
4925 struct pqi_io_request *next;
4926 struct scsi_cmnd *scmd;
4927 struct pqi_scsi_dev *scsi_device;
4928
4929 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4930 queue_group = &ctrl_info->queue_groups[i];
4931
4932 for (path = 0; path < 2; path++) {
4933 spin_lock_irqsave(
4934 &queue_group->submit_lock[path], flags);
4935
4936 list_for_each_entry_safe(io_request, next,
4937 &queue_group->request_list[path],
4938 request_list_entry) {
4939 scmd = io_request->scmd;
4940 if (!scmd)
4941 continue;
4942
4943 scsi_device = scmd->device->hostdata;
4944 if (scsi_device != device)
4945 continue;
4946
4947 list_del(&io_request->request_list_entry);
4948 set_host_byte(scmd, DID_RESET);
4949 pqi_scsi_done(scmd);
4950 }
4951
4952 spin_unlock_irqrestore(
4953 &queue_group->submit_lock[path], flags);
4954 }
4955 }
4956}
4957
061ef06a
KB
4958static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
4959 struct pqi_scsi_dev *device)
4960{
4961 while (atomic_read(&device->scsi_cmds_outstanding)) {
4962 pqi_check_ctrl_health(ctrl_info);
4963 if (pqi_ctrl_offline(ctrl_info))
4964 return -ENXIO;
4965 usleep_range(1000, 2000);
4966 }
4967
4968 return 0;
4969}
4970
4971static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
4972{
4973 bool io_pending;
4974 unsigned long flags;
4975 struct pqi_scsi_dev *device;
4976
4977 while (1) {
4978 io_pending = false;
4979
4980 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4981 list_for_each_entry(device, &ctrl_info->scsi_device_list,
4982 scsi_device_list_entry) {
4983 if (atomic_read(&device->scsi_cmds_outstanding)) {
4984 io_pending = true;
4985 break;
4986 }
4987 }
4988 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
4989 flags);
4990
4991 if (!io_pending)
4992 break;
4993
4994 pqi_check_ctrl_health(ctrl_info);
4995 if (pqi_ctrl_offline(ctrl_info))
4996 return -ENXIO;
4997
4998 usleep_range(1000, 2000);
4999 }
5000
5001 return 0;
5002}
5003
14bb215d
KB
5004static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5005 void *context)
6c223761 5006{
14bb215d 5007 struct completion *waiting = context;
6c223761 5008
14bb215d
KB
5009 complete(waiting);
5010}
6c223761 5011
14bb215d
KB
5012#define PQI_LUN_RESET_TIMEOUT_SECS 10
5013
5014static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5015 struct pqi_scsi_dev *device, struct completion *wait)
5016{
5017 int rc;
14bb215d
KB
5018
5019 while (1) {
5020 if (wait_for_completion_io_timeout(wait,
5021 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
5022 rc = 0;
5023 break;
6c223761
KB
5024 }
5025
14bb215d
KB
5026 pqi_check_ctrl_health(ctrl_info);
5027 if (pqi_ctrl_offline(ctrl_info)) {
4e8415e3 5028 rc = -ENXIO;
14bb215d
KB
5029 break;
5030 }
6c223761 5031 }
6c223761 5032
14bb215d 5033 return rc;
6c223761
KB
5034}
5035
14bb215d 5036static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
5037 struct pqi_scsi_dev *device)
5038{
5039 int rc;
5040 struct pqi_io_request *io_request;
5041 DECLARE_COMPLETION_ONSTACK(wait);
5042 struct pqi_task_management_request *request;
5043
6c223761 5044 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 5045 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
5046 io_request->context = &wait;
5047
5048 request = io_request->iu;
5049 memset(request, 0, sizeof(*request));
5050
5051 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5052 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5053 &request->header.iu_length);
5054 put_unaligned_le16(io_request->index, &request->request_id);
5055 memcpy(request->lun_number, device->scsi3addr,
5056 sizeof(request->lun_number));
5057 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5058
5059 pqi_start_io(ctrl_info,
5060 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5061 io_request);
5062
14bb215d
KB
5063 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5064 if (rc == 0)
6c223761 5065 rc = io_request->status;
6c223761
KB
5066
5067 pqi_free_io_request(io_request);
6c223761
KB
5068
5069 return rc;
5070}
5071
5072/* Performs a reset at the LUN level. */
5073
5074static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5075 struct pqi_scsi_dev *device)
5076{
5077 int rc;
5078
14bb215d 5079 rc = pqi_lun_reset(ctrl_info, device);
061ef06a
KB
5080 if (rc == 0)
5081 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
6c223761 5082
14bb215d 5083 return rc == 0 ? SUCCESS : FAILED;
6c223761
KB
5084}
5085
5086static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5087{
5088 int rc;
7561a7e4 5089 struct Scsi_Host *shost;
6c223761
KB
5090 struct pqi_ctrl_info *ctrl_info;
5091 struct pqi_scsi_dev *device;
5092
7561a7e4
KB
5093 shost = scmd->device->host;
5094 ctrl_info = shost_to_hba(shost);
6c223761
KB
5095 device = scmd->device->hostdata;
5096
5097 dev_err(&ctrl_info->pci_dev->dev,
5098 "resetting scsi %d:%d:%d:%d\n",
7561a7e4 5099 shost->host_no, device->bus, device->target, device->lun);
6c223761 5100
7561a7e4
KB
5101 pqi_check_ctrl_health(ctrl_info);
5102 if (pqi_ctrl_offline(ctrl_info)) {
5103 rc = FAILED;
5104 goto out;
5105 }
6c223761 5106
7561a7e4
KB
5107 mutex_lock(&ctrl_info->lun_reset_mutex);
5108
5109 pqi_ctrl_block_requests(ctrl_info);
5110 pqi_ctrl_wait_until_quiesced(ctrl_info);
5111 pqi_fail_io_queued_for_device(ctrl_info, device);
5112 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5113 pqi_device_reset_start(device);
5114 pqi_ctrl_unblock_requests(ctrl_info);
5115
5116 if (rc)
5117 rc = FAILED;
5118 else
5119 rc = pqi_device_reset(ctrl_info, device);
5120
5121 pqi_device_reset_done(device);
5122
5123 mutex_unlock(&ctrl_info->lun_reset_mutex);
5124
5125out:
6c223761
KB
5126 dev_err(&ctrl_info->pci_dev->dev,
5127 "reset of scsi %d:%d:%d:%d: %s\n",
7561a7e4 5128 shost->host_no, device->bus, device->target, device->lun,
6c223761
KB
5129 rc == SUCCESS ? "SUCCESS" : "FAILED");
5130
5131 return rc;
5132}
5133
5134static int pqi_slave_alloc(struct scsi_device *sdev)
5135{
5136 struct pqi_scsi_dev *device;
5137 unsigned long flags;
5138 struct pqi_ctrl_info *ctrl_info;
5139 struct scsi_target *starget;
5140 struct sas_rphy *rphy;
5141
5142 ctrl_info = shost_to_hba(sdev->host);
5143
5144 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5145
5146 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5147 starget = scsi_target(sdev);
5148 rphy = target_to_rphy(starget);
5149 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5150 if (device) {
5151 device->target = sdev_id(sdev);
5152 device->lun = sdev->lun;
5153 device->target_lun_valid = true;
5154 }
5155 } else {
5156 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5157 sdev_id(sdev), sdev->lun);
5158 }
5159
5160 if (device && device->expose_device) {
5161 sdev->hostdata = device;
5162 device->sdev = sdev;
5163 if (device->queue_depth) {
5164 device->advertised_queue_depth = device->queue_depth;
5165 scsi_change_queue_depth(sdev,
5166 device->advertised_queue_depth);
5167 }
5168 }
5169
5170 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5171
5172 return 0;
5173}
5174
5175static int pqi_slave_configure(struct scsi_device *sdev)
5176{
5177 struct pqi_scsi_dev *device;
5178
5179 device = sdev->hostdata;
5180 if (!device->expose_device)
5181 sdev->no_uld_attach = true;
5182
5183 return 0;
5184}
5185
52198226
CH
5186static int pqi_map_queues(struct Scsi_Host *shost)
5187{
5188 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5189
5190 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
5191}
5192
6c223761
KB
5193static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5194 void __user *arg)
5195{
5196 struct pci_dev *pci_dev;
5197 u32 subsystem_vendor;
5198 u32 subsystem_device;
5199 cciss_pci_info_struct pciinfo;
5200
5201 if (!arg)
5202 return -EINVAL;
5203
5204 pci_dev = ctrl_info->pci_dev;
5205
5206 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5207 pciinfo.bus = pci_dev->bus->number;
5208 pciinfo.dev_fn = pci_dev->devfn;
5209 subsystem_vendor = pci_dev->subsystem_vendor;
5210 subsystem_device = pci_dev->subsystem_device;
5211 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5212 subsystem_vendor;
5213
5214 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5215 return -EFAULT;
5216
5217 return 0;
5218}
5219
5220static int pqi_getdrivver_ioctl(void __user *arg)
5221{
5222 u32 version;
5223
5224 if (!arg)
5225 return -EINVAL;
5226
5227 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5228 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5229
5230 if (copy_to_user(arg, &version, sizeof(version)))
5231 return -EFAULT;
5232
5233 return 0;
5234}
5235
5236struct ciss_error_info {
5237 u8 scsi_status;
5238 int command_status;
5239 size_t sense_data_length;
5240};
5241
5242static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5243 struct ciss_error_info *ciss_error_info)
5244{
5245 int ciss_cmd_status;
5246 size_t sense_data_length;
5247
5248 switch (pqi_error_info->data_out_result) {
5249 case PQI_DATA_IN_OUT_GOOD:
5250 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5251 break;
5252 case PQI_DATA_IN_OUT_UNDERFLOW:
5253 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5254 break;
5255 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5256 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5257 break;
5258 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5259 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5260 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5261 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5262 case PQI_DATA_IN_OUT_ERROR:
5263 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5264 break;
5265 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5266 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5267 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5268 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5269 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5270 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5271 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5272 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5273 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5274 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5275 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5276 break;
5277 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5278 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5279 break;
5280 case PQI_DATA_IN_OUT_ABORTED:
5281 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5282 break;
5283 case PQI_DATA_IN_OUT_TIMEOUT:
5284 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5285 break;
5286 default:
5287 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5288 break;
5289 }
5290
5291 sense_data_length =
5292 get_unaligned_le16(&pqi_error_info->sense_data_length);
5293 if (sense_data_length == 0)
5294 sense_data_length =
5295 get_unaligned_le16(&pqi_error_info->response_data_length);
5296 if (sense_data_length)
5297 if (sense_data_length > sizeof(pqi_error_info->data))
5298 sense_data_length = sizeof(pqi_error_info->data);
5299
5300 ciss_error_info->scsi_status = pqi_error_info->status;
5301 ciss_error_info->command_status = ciss_cmd_status;
5302 ciss_error_info->sense_data_length = sense_data_length;
5303}
5304
5305static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5306{
5307 int rc;
5308 char *kernel_buffer = NULL;
5309 u16 iu_length;
5310 size_t sense_data_length;
5311 IOCTL_Command_struct iocommand;
5312 struct pqi_raid_path_request request;
5313 struct pqi_raid_error_info pqi_error_info;
5314 struct ciss_error_info ciss_error_info;
5315
5316 if (pqi_ctrl_offline(ctrl_info))
5317 return -ENXIO;
5318 if (!arg)
5319 return -EINVAL;
5320 if (!capable(CAP_SYS_RAWIO))
5321 return -EPERM;
5322 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5323 return -EFAULT;
5324 if (iocommand.buf_size < 1 &&
5325 iocommand.Request.Type.Direction != XFER_NONE)
5326 return -EINVAL;
5327 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5328 return -EINVAL;
5329 if (iocommand.Request.Type.Type != TYPE_CMD)
5330 return -EINVAL;
5331
5332 switch (iocommand.Request.Type.Direction) {
5333 case XFER_NONE:
5334 case XFER_WRITE:
5335 case XFER_READ:
5336 break;
5337 default:
5338 return -EINVAL;
5339 }
5340
5341 if (iocommand.buf_size > 0) {
5342 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5343 if (!kernel_buffer)
5344 return -ENOMEM;
5345 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5346 if (copy_from_user(kernel_buffer, iocommand.buf,
5347 iocommand.buf_size)) {
5348 rc = -EFAULT;
5349 goto out;
5350 }
5351 } else {
5352 memset(kernel_buffer, 0, iocommand.buf_size);
5353 }
5354 }
5355
5356 memset(&request, 0, sizeof(request));
5357
5358 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5359 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5360 PQI_REQUEST_HEADER_LENGTH;
5361 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5362 sizeof(request.lun_number));
5363 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5364 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5365
5366 switch (iocommand.Request.Type.Direction) {
5367 case XFER_NONE:
5368 request.data_direction = SOP_NO_DIRECTION_FLAG;
5369 break;
5370 case XFER_WRITE:
5371 request.data_direction = SOP_WRITE_FLAG;
5372 break;
5373 case XFER_READ:
5374 request.data_direction = SOP_READ_FLAG;
5375 break;
5376 }
5377
5378 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5379
5380 if (iocommand.buf_size > 0) {
5381 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5382
5383 rc = pqi_map_single(ctrl_info->pci_dev,
5384 &request.sg_descriptors[0], kernel_buffer,
5385 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5386 if (rc)
5387 goto out;
5388
5389 iu_length += sizeof(request.sg_descriptors[0]);
5390 }
5391
5392 put_unaligned_le16(iu_length, &request.header.iu_length);
5393
5394 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5395 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5396
5397 if (iocommand.buf_size > 0)
5398 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5399 PCI_DMA_BIDIRECTIONAL);
5400
5401 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5402
5403 if (rc == 0) {
5404 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5405 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5406 iocommand.error_info.CommandStatus =
5407 ciss_error_info.command_status;
5408 sense_data_length = ciss_error_info.sense_data_length;
5409 if (sense_data_length) {
5410 if (sense_data_length >
5411 sizeof(iocommand.error_info.SenseInfo))
5412 sense_data_length =
5413 sizeof(iocommand.error_info.SenseInfo);
5414 memcpy(iocommand.error_info.SenseInfo,
5415 pqi_error_info.data, sense_data_length);
5416 iocommand.error_info.SenseLen = sense_data_length;
5417 }
5418 }
5419
5420 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5421 rc = -EFAULT;
5422 goto out;
5423 }
5424
5425 if (rc == 0 && iocommand.buf_size > 0 &&
5426 (iocommand.Request.Type.Direction & XFER_READ)) {
5427 if (copy_to_user(iocommand.buf, kernel_buffer,
5428 iocommand.buf_size)) {
5429 rc = -EFAULT;
5430 }
5431 }
5432
5433out:
5434 kfree(kernel_buffer);
5435
5436 return rc;
5437}
5438
5439static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5440{
5441 int rc;
5442 struct pqi_ctrl_info *ctrl_info;
5443
5444 ctrl_info = shost_to_hba(sdev->host);
5445
5446 switch (cmd) {
5447 case CCISS_DEREGDISK:
5448 case CCISS_REGNEWDISK:
5449 case CCISS_REGNEWD:
5450 rc = pqi_scan_scsi_devices(ctrl_info);
5451 break;
5452 case CCISS_GETPCIINFO:
5453 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5454 break;
5455 case CCISS_GETDRIVVER:
5456 rc = pqi_getdrivver_ioctl(arg);
5457 break;
5458 case CCISS_PASSTHRU:
5459 rc = pqi_passthru_ioctl(ctrl_info, arg);
5460 break;
5461 default:
5462 rc = -EINVAL;
5463 break;
5464 }
5465
5466 return rc;
5467}
5468
5469static ssize_t pqi_version_show(struct device *dev,
5470 struct device_attribute *attr, char *buffer)
5471{
5472 ssize_t count = 0;
5473 struct Scsi_Host *shost;
5474 struct pqi_ctrl_info *ctrl_info;
5475
5476 shost = class_to_shost(dev);
5477 ctrl_info = shost_to_hba(shost);
5478
5479 count += snprintf(buffer + count, PAGE_SIZE - count,
5480 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5481
5482 count += snprintf(buffer + count, PAGE_SIZE - count,
5483 "firmware: %s\n", ctrl_info->firmware_version);
5484
5485 return count;
5486}
5487
5488static ssize_t pqi_host_rescan_store(struct device *dev,
5489 struct device_attribute *attr, const char *buffer, size_t count)
5490{
5491 struct Scsi_Host *shost = class_to_shost(dev);
5492
5493 pqi_scan_start(shost);
5494
5495 return count;
5496}
5497
cbe0c7b1
KB
5498static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5499static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6c223761
KB
5500
5501static struct device_attribute *pqi_shost_attrs[] = {
5502 &dev_attr_version,
5503 &dev_attr_rescan,
5504 NULL
5505};
5506
5507static ssize_t pqi_sas_address_show(struct device *dev,
5508 struct device_attribute *attr, char *buffer)
5509{
5510 struct pqi_ctrl_info *ctrl_info;
5511 struct scsi_device *sdev;
5512 struct pqi_scsi_dev *device;
5513 unsigned long flags;
5514 u64 sas_address;
5515
5516 sdev = to_scsi_device(dev);
5517 ctrl_info = shost_to_hba(sdev->host);
5518
5519 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5520
5521 device = sdev->hostdata;
5522 if (pqi_is_logical_device(device)) {
5523 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5524 flags);
5525 return -ENODEV;
5526 }
5527 sas_address = device->sas_address;
5528
5529 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5530
5531 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5532}
5533
5534static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5535 struct device_attribute *attr, char *buffer)
5536{
5537 struct pqi_ctrl_info *ctrl_info;
5538 struct scsi_device *sdev;
5539 struct pqi_scsi_dev *device;
5540 unsigned long flags;
5541
5542 sdev = to_scsi_device(dev);
5543 ctrl_info = shost_to_hba(sdev->host);
5544
5545 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5546
5547 device = sdev->hostdata;
5548 buffer[0] = device->offload_enabled ? '1' : '0';
5549 buffer[1] = '\n';
5550 buffer[2] = '\0';
5551
5552 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5553
5554 return 2;
5555}
5556
cbe0c7b1
KB
5557static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5558static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
6c223761
KB
5559 pqi_ssd_smart_path_enabled_show, NULL);
5560
5561static struct device_attribute *pqi_sdev_attrs[] = {
5562 &dev_attr_sas_address,
5563 &dev_attr_ssd_smart_path_enabled,
5564 NULL
5565};
5566
5567static struct scsi_host_template pqi_driver_template = {
5568 .module = THIS_MODULE,
5569 .name = DRIVER_NAME_SHORT,
5570 .proc_name = DRIVER_NAME_SHORT,
5571 .queuecommand = pqi_scsi_queue_command,
5572 .scan_start = pqi_scan_start,
5573 .scan_finished = pqi_scan_finished,
5574 .this_id = -1,
5575 .use_clustering = ENABLE_CLUSTERING,
5576 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5577 .ioctl = pqi_ioctl,
5578 .slave_alloc = pqi_slave_alloc,
5579 .slave_configure = pqi_slave_configure,
52198226 5580 .map_queues = pqi_map_queues,
6c223761
KB
5581 .sdev_attrs = pqi_sdev_attrs,
5582 .shost_attrs = pqi_shost_attrs,
5583};
5584
5585static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5586{
5587 int rc;
5588 struct Scsi_Host *shost;
5589
5590 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5591 if (!shost) {
5592 dev_err(&ctrl_info->pci_dev->dev,
5593 "scsi_host_alloc failed for controller %u\n",
5594 ctrl_info->ctrl_id);
5595 return -ENOMEM;
5596 }
5597
5598 shost->io_port = 0;
5599 shost->n_io_port = 0;
5600 shost->this_id = -1;
5601 shost->max_channel = PQI_MAX_BUS;
5602 shost->max_cmd_len = MAX_COMMAND_SIZE;
5603 shost->max_lun = ~0;
5604 shost->max_id = ~0;
5605 shost->max_sectors = ctrl_info->max_sectors;
5606 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5607 shost->cmd_per_lun = shost->can_queue;
5608 shost->sg_tablesize = ctrl_info->sg_tablesize;
5609 shost->transportt = pqi_sas_transport_template;
52198226 5610 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
5611 shost->unique_id = shost->irq;
5612 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5613 shost->hostdata[0] = (unsigned long)ctrl_info;
5614
5615 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5616 if (rc) {
5617 dev_err(&ctrl_info->pci_dev->dev,
5618 "scsi_add_host failed for controller %u\n",
5619 ctrl_info->ctrl_id);
5620 goto free_host;
5621 }
5622
5623 rc = pqi_add_sas_host(shost, ctrl_info);
5624 if (rc) {
5625 dev_err(&ctrl_info->pci_dev->dev,
5626 "add SAS host failed for controller %u\n",
5627 ctrl_info->ctrl_id);
5628 goto remove_host;
5629 }
5630
5631 ctrl_info->scsi_host = shost;
5632
5633 return 0;
5634
5635remove_host:
5636 scsi_remove_host(shost);
5637free_host:
5638 scsi_host_put(shost);
5639
5640 return rc;
5641}
5642
5643static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5644{
5645 struct Scsi_Host *shost;
5646
5647 pqi_delete_sas_host(ctrl_info);
5648
5649 shost = ctrl_info->scsi_host;
5650 if (!shost)
5651 return;
5652
5653 scsi_remove_host(shost);
5654 scsi_host_put(shost);
5655}
5656
5657#define PQI_RESET_ACTION_RESET 0x1
5658
5659#define PQI_RESET_TYPE_NO_RESET 0x0
5660#define PQI_RESET_TYPE_SOFT_RESET 0x1
5661#define PQI_RESET_TYPE_FIRM_RESET 0x2
5662#define PQI_RESET_TYPE_HARD_RESET 0x3
5663
5664static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5665{
5666 int rc;
5667 u32 reset_params;
5668
5669 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5670 PQI_RESET_TYPE_HARD_RESET;
5671
5672 writel(reset_params,
5673 &ctrl_info->pqi_registers->device_reset);
5674
5675 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5676 if (rc)
5677 dev_err(&ctrl_info->pci_dev->dev,
5678 "PQI reset failed\n");
5679
5680 return rc;
5681}
5682
5683static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5684{
5685 int rc;
5686 struct bmic_identify_controller *identify;
5687
5688 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5689 if (!identify)
5690 return -ENOMEM;
5691
5692 rc = pqi_identify_controller(ctrl_info, identify);
5693 if (rc)
5694 goto out;
5695
5696 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5697 sizeof(identify->firmware_version));
5698 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5699 snprintf(ctrl_info->firmware_version +
5700 strlen(ctrl_info->firmware_version),
5701 sizeof(ctrl_info->firmware_version),
5702 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5703
5704out:
5705 kfree(identify);
5706
5707 return rc;
5708}
5709
98f87667
KB
5710static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5711{
5712 u32 table_length;
5713 u32 section_offset;
5714 void __iomem *table_iomem_addr;
5715 struct pqi_config_table *config_table;
5716 struct pqi_config_table_section_header *section;
5717
5718 table_length = ctrl_info->config_table_length;
5719
5720 config_table = kmalloc(table_length, GFP_KERNEL);
5721 if (!config_table) {
5722 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 5723 "failed to allocate memory for PQI configuration table\n");
98f87667
KB
5724 return -ENOMEM;
5725 }
5726
5727 /*
5728 * Copy the config table contents from I/O memory space into the
5729 * temporary buffer.
5730 */
5731 table_iomem_addr = ctrl_info->iomem_base +
5732 ctrl_info->config_table_offset;
5733 memcpy_fromio(config_table, table_iomem_addr, table_length);
5734
5735 section_offset =
5736 get_unaligned_le32(&config_table->first_section_offset);
5737
5738 while (section_offset) {
5739 section = (void *)config_table + section_offset;
5740
5741 switch (get_unaligned_le16(&section->section_id)) {
5742 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5743 ctrl_info->heartbeat_counter = table_iomem_addr +
5744 section_offset +
5745 offsetof(struct pqi_config_table_heartbeat,
5746 heartbeat_counter);
5747 break;
5748 }
5749
5750 section_offset =
5751 get_unaligned_le16(&section->next_section_offset);
5752 }
5753
5754 kfree(config_table);
5755
5756 return 0;
5757}
5758
162d7753
KB
5759/* Switches the controller from PQI mode back into SIS mode. */
5760
5761static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
5762{
5763 int rc;
5764
061ef06a 5765 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
162d7753
KB
5766 rc = pqi_reset(ctrl_info);
5767 if (rc)
5768 return rc;
5769 sis_reenable_sis_mode(ctrl_info);
5770 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5771
5772 return 0;
5773}
5774
5775/*
5776 * If the controller isn't already in SIS mode, this function forces it into
5777 * SIS mode.
5778 */
5779
5780static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73
KB
5781{
5782 if (!sis_is_firmware_running(ctrl_info))
5783 return -ENXIO;
5784
162d7753
KB
5785 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
5786 return 0;
5787
5788 if (sis_is_kernel_up(ctrl_info)) {
5789 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5790 return 0;
ff6abb73
KB
5791 }
5792
162d7753 5793 return pqi_revert_to_sis_mode(ctrl_info);
ff6abb73
KB
5794}
5795
6c223761
KB
5796static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5797{
5798 int rc;
5799
162d7753
KB
5800 rc = pqi_force_sis_mode(ctrl_info);
5801 if (rc)
5802 return rc;
6c223761
KB
5803
5804 /*
5805 * Wait until the controller is ready to start accepting SIS
5806 * commands.
5807 */
5808 rc = sis_wait_for_ctrl_ready(ctrl_info);
8845fdfa 5809 if (rc)
6c223761 5810 return rc;
6c223761
KB
5811
5812 /*
5813 * Get the controller properties. This allows us to determine
5814 * whether or not it supports PQI mode.
5815 */
5816 rc = sis_get_ctrl_properties(ctrl_info);
5817 if (rc) {
5818 dev_err(&ctrl_info->pci_dev->dev,
5819 "error obtaining controller properties\n");
5820 return rc;
5821 }
5822
5823 rc = sis_get_pqi_capabilities(ctrl_info);
5824 if (rc) {
5825 dev_err(&ctrl_info->pci_dev->dev,
5826 "error obtaining controller capabilities\n");
5827 return rc;
5828 }
5829
5830 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5831 ctrl_info->max_outstanding_requests =
5832 PQI_MAX_OUTSTANDING_REQUESTS;
5833
5834 pqi_calculate_io_resources(ctrl_info);
5835
5836 rc = pqi_alloc_error_buffer(ctrl_info);
5837 if (rc) {
5838 dev_err(&ctrl_info->pci_dev->dev,
5839 "failed to allocate PQI error buffer\n");
5840 return rc;
5841 }
5842
5843 /*
5844 * If the function we are about to call succeeds, the
5845 * controller will transition from legacy SIS mode
5846 * into PQI mode.
5847 */
5848 rc = sis_init_base_struct_addr(ctrl_info);
5849 if (rc) {
5850 dev_err(&ctrl_info->pci_dev->dev,
5851 "error initializing PQI mode\n");
5852 return rc;
5853 }
5854
5855 /* Wait for the controller to complete the SIS -> PQI transition. */
5856 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5857 if (rc) {
5858 dev_err(&ctrl_info->pci_dev->dev,
5859 "transition to PQI mode failed\n");
5860 return rc;
5861 }
5862
5863 /* From here on, we are running in PQI mode. */
5864 ctrl_info->pqi_mode_enabled = true;
ff6abb73 5865 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761 5866
98f87667
KB
5867 rc = pqi_process_config_table(ctrl_info);
5868 if (rc)
5869 return rc;
5870
6c223761
KB
5871 rc = pqi_alloc_admin_queues(ctrl_info);
5872 if (rc) {
5873 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 5874 "failed to allocate admin queues\n");
6c223761
KB
5875 return rc;
5876 }
5877
5878 rc = pqi_create_admin_queues(ctrl_info);
5879 if (rc) {
5880 dev_err(&ctrl_info->pci_dev->dev,
5881 "error creating admin queues\n");
5882 return rc;
5883 }
5884
5885 rc = pqi_report_device_capability(ctrl_info);
5886 if (rc) {
5887 dev_err(&ctrl_info->pci_dev->dev,
5888 "obtaining device capability failed\n");
5889 return rc;
5890 }
5891
5892 rc = pqi_validate_device_capability(ctrl_info);
5893 if (rc)
5894 return rc;
5895
5896 pqi_calculate_queue_resources(ctrl_info);
5897
5898 rc = pqi_enable_msix_interrupts(ctrl_info);
5899 if (rc)
5900 return rc;
5901
5902 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5903 ctrl_info->max_msix_vectors =
5904 ctrl_info->num_msix_vectors_enabled;
5905 pqi_calculate_queue_resources(ctrl_info);
5906 }
5907
5908 rc = pqi_alloc_io_resources(ctrl_info);
5909 if (rc)
5910 return rc;
5911
5912 rc = pqi_alloc_operational_queues(ctrl_info);
d87d5474
KB
5913 if (rc) {
5914 dev_err(&ctrl_info->pci_dev->dev,
5915 "failed to allocate operational queues\n");
6c223761 5916 return rc;
d87d5474 5917 }
6c223761
KB
5918
5919 pqi_init_operational_queues(ctrl_info);
5920
5921 rc = pqi_request_irqs(ctrl_info);
5922 if (rc)
5923 return rc;
5924
6c223761
KB
5925 rc = pqi_create_queues(ctrl_info);
5926 if (rc)
5927 return rc;
5928
061ef06a
KB
5929 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
5930
5931 ctrl_info->controller_online = true;
5932 pqi_start_heartbeat_timer(ctrl_info);
6c223761 5933
6a50d6ad 5934 rc = pqi_enable_events(ctrl_info);
6c223761
KB
5935 if (rc) {
5936 dev_err(&ctrl_info->pci_dev->dev,
6a50d6ad 5937 "error enabling events\n");
6c223761
KB
5938 return rc;
5939 }
5940
6c223761
KB
5941 /* Register with the SCSI subsystem. */
5942 rc = pqi_register_scsi(ctrl_info);
5943 if (rc)
5944 return rc;
5945
5946 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5947 if (rc) {
5948 dev_err(&ctrl_info->pci_dev->dev,
5949 "error obtaining firmware version\n");
5950 return rc;
5951 }
5952
5953 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5954 if (rc) {
5955 dev_err(&ctrl_info->pci_dev->dev,
5956 "error updating host wellness\n");
5957 return rc;
5958 }
5959
5960 pqi_schedule_update_time_worker(ctrl_info);
5961
5962 pqi_scan_scsi_devices(ctrl_info);
5963
5964 return 0;
5965}
5966
061ef06a
KB
5967#if defined(CONFIG_PM)
5968
5969static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
5970{
5971 unsigned int i;
5972 struct pqi_admin_queues *admin_queues;
5973 struct pqi_event_queue *event_queue;
5974
5975 admin_queues = &ctrl_info->admin_queues;
5976 admin_queues->iq_pi_copy = 0;
5977 admin_queues->oq_ci_copy = 0;
5978 *admin_queues->oq_pi = 0;
5979
5980 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5981 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
5982 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
5983 ctrl_info->queue_groups[i].oq_ci_copy = 0;
5984
5985 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
5986 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
5987 *ctrl_info->queue_groups[i].oq_pi = 0;
5988 }
5989
5990 event_queue = &ctrl_info->event_queue;
5991 *event_queue->oq_pi = 0;
5992 event_queue->oq_ci_copy = 0;
5993}
5994
5995static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
5996{
5997 int rc;
5998
5999 rc = pqi_force_sis_mode(ctrl_info);
6000 if (rc)
6001 return rc;
6002
6003 /*
6004 * Wait until the controller is ready to start accepting SIS
6005 * commands.
6006 */
6007 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
6008 if (rc)
6009 return rc;
6010
6011 /*
6012 * If the function we are about to call succeeds, the
6013 * controller will transition from legacy SIS mode
6014 * into PQI mode.
6015 */
6016 rc = sis_init_base_struct_addr(ctrl_info);
6017 if (rc) {
6018 dev_err(&ctrl_info->pci_dev->dev,
6019 "error initializing PQI mode\n");
6020 return rc;
6021 }
6022
6023 /* Wait for the controller to complete the SIS -> PQI transition. */
6024 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6025 if (rc) {
6026 dev_err(&ctrl_info->pci_dev->dev,
6027 "transition to PQI mode failed\n");
6028 return rc;
6029 }
6030
6031 /* From here on, we are running in PQI mode. */
6032 ctrl_info->pqi_mode_enabled = true;
6033 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6034
6035 pqi_reinit_queues(ctrl_info);
6036
6037 rc = pqi_create_admin_queues(ctrl_info);
6038 if (rc) {
6039 dev_err(&ctrl_info->pci_dev->dev,
6040 "error creating admin queues\n");
6041 return rc;
6042 }
6043
6044 rc = pqi_create_queues(ctrl_info);
6045 if (rc)
6046 return rc;
6047
6048 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6049
6050 ctrl_info->controller_online = true;
6051 pqi_start_heartbeat_timer(ctrl_info);
6052 pqi_ctrl_unblock_requests(ctrl_info);
6053
6054 rc = pqi_enable_events(ctrl_info);
6055 if (rc) {
6056 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 6057 "error enabling events\n");
061ef06a
KB
6058 return rc;
6059 }
6060
6061 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6062 if (rc) {
6063 dev_err(&ctrl_info->pci_dev->dev,
6064 "error updating host wellness\n");
6065 return rc;
6066 }
6067
6068 pqi_schedule_update_time_worker(ctrl_info);
6069
6070 pqi_scan_scsi_devices(ctrl_info);
6071
6072 return 0;
6073}
6074
6075#endif /* CONFIG_PM */
6076
a81ed5f3
KB
6077static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6078 u16 timeout)
6079{
6080 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6081 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6082}
6083
6c223761
KB
6084static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6085{
6086 int rc;
6087 u64 mask;
6088
6089 rc = pci_enable_device(ctrl_info->pci_dev);
6090 if (rc) {
6091 dev_err(&ctrl_info->pci_dev->dev,
6092 "failed to enable PCI device\n");
6093 return rc;
6094 }
6095
6096 if (sizeof(dma_addr_t) > 4)
6097 mask = DMA_BIT_MASK(64);
6098 else
6099 mask = DMA_BIT_MASK(32);
6100
6101 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6102 if (rc) {
6103 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6104 goto disable_device;
6105 }
6106
6107 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6108 if (rc) {
6109 dev_err(&ctrl_info->pci_dev->dev,
6110 "failed to obtain PCI resources\n");
6111 goto disable_device;
6112 }
6113
6114 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6115 ctrl_info->pci_dev, 0),
6116 sizeof(struct pqi_ctrl_registers));
6117 if (!ctrl_info->iomem_base) {
6118 dev_err(&ctrl_info->pci_dev->dev,
6119 "failed to map memory for controller registers\n");
6120 rc = -ENOMEM;
6121 goto release_regions;
6122 }
6123
a81ed5f3
KB
6124#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6125
6126 /* Increase the PCIe completion timeout. */
6127 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6128 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6129 if (rc) {
6130 dev_err(&ctrl_info->pci_dev->dev,
6131 "failed to set PCIe completion timeout\n");
6132 goto release_regions;
6133 }
6134
6c223761
KB
6135 /* Enable bus mastering. */
6136 pci_set_master(ctrl_info->pci_dev);
6137
cbe0c7b1
KB
6138 ctrl_info->registers = ctrl_info->iomem_base;
6139 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6140
6c223761
KB
6141 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6142
6143 return 0;
6144
6145release_regions:
6146 pci_release_regions(ctrl_info->pci_dev);
6147disable_device:
6148 pci_disable_device(ctrl_info->pci_dev);
6149
6150 return rc;
6151}
6152
6153static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6154{
6155 iounmap(ctrl_info->iomem_base);
6156 pci_release_regions(ctrl_info->pci_dev);
cbe0c7b1
KB
6157 if (pci_is_enabled(ctrl_info->pci_dev))
6158 pci_disable_device(ctrl_info->pci_dev);
6c223761
KB
6159 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6160}
6161
6162static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6163{
6164 struct pqi_ctrl_info *ctrl_info;
6165
6166 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6167 GFP_KERNEL, numa_node);
6168 if (!ctrl_info)
6169 return NULL;
6170
6171 mutex_init(&ctrl_info->scan_mutex);
7561a7e4 6172 mutex_init(&ctrl_info->lun_reset_mutex);
6c223761
KB
6173
6174 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6175 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6176
6177 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6178 atomic_set(&ctrl_info->num_interrupts, 0);
6179
6180 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6181 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6182
98f87667
KB
6183 init_timer(&ctrl_info->heartbeat_timer);
6184
6c223761
KB
6185 sema_init(&ctrl_info->sync_request_sem,
6186 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7561a7e4 6187 init_waitqueue_head(&ctrl_info->block_requests_wait);
6c223761
KB
6188
6189 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
061ef06a 6190 ctrl_info->irq_mode = IRQ_MODE_NONE;
6c223761
KB
6191 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6192
6193 return ctrl_info;
6194}
6195
6196static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6197{
6198 kfree(ctrl_info);
6199}
6200
6201static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6202{
98bf061b
KB
6203 pqi_free_irqs(ctrl_info);
6204 pqi_disable_msix_interrupts(ctrl_info);
6c223761
KB
6205}
6206
6207static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6208{
6209 pqi_stop_heartbeat_timer(ctrl_info);
6210 pqi_free_interrupts(ctrl_info);
6211 if (ctrl_info->queue_memory_base)
6212 dma_free_coherent(&ctrl_info->pci_dev->dev,
6213 ctrl_info->queue_memory_length,
6214 ctrl_info->queue_memory_base,
6215 ctrl_info->queue_memory_base_dma_handle);
6216 if (ctrl_info->admin_queue_memory_base)
6217 dma_free_coherent(&ctrl_info->pci_dev->dev,
6218 ctrl_info->admin_queue_memory_length,
6219 ctrl_info->admin_queue_memory_base,
6220 ctrl_info->admin_queue_memory_base_dma_handle);
6221 pqi_free_all_io_requests(ctrl_info);
6222 if (ctrl_info->error_buffer)
6223 dma_free_coherent(&ctrl_info->pci_dev->dev,
6224 ctrl_info->error_buffer_length,
6225 ctrl_info->error_buffer,
6226 ctrl_info->error_buffer_dma_handle);
6227 if (ctrl_info->iomem_base)
6228 pqi_cleanup_pci_init(ctrl_info);
6229 pqi_free_ctrl_info(ctrl_info);
6230}
6231
6232static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6233{
061ef06a
KB
6234 pqi_cancel_rescan_worker(ctrl_info);
6235 pqi_cancel_update_time_worker(ctrl_info);
e57a1f9b
KB
6236 pqi_remove_all_scsi_devices(ctrl_info);
6237 pqi_unregister_scsi(ctrl_info);
162d7753
KB
6238 if (ctrl_info->pqi_mode_enabled)
6239 pqi_revert_to_sis_mode(ctrl_info);
6c223761
KB
6240 pqi_free_ctrl_resources(ctrl_info);
6241}
6242
d91d7820 6243static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6c223761
KB
6244 const struct pci_device_id *id)
6245{
6246 char *ctrl_description;
6247
6248 if (id->driver_data) {
6249 ctrl_description = (char *)id->driver_data;
6250 } else {
6251 switch (id->subvendor) {
6252 case PCI_VENDOR_ID_HP:
6253 ctrl_description = hpe_branded_controller;
6254 break;
6255 case PCI_VENDOR_ID_ADAPTEC2:
6256 default:
6257 ctrl_description = microsemi_branded_controller;
6258 break;
6259 }
6260 }
6261
d91d7820 6262 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6c223761
KB
6263}
6264
d91d7820
KB
6265static int pqi_pci_probe(struct pci_dev *pci_dev,
6266 const struct pci_device_id *id)
6c223761
KB
6267{
6268 int rc;
6269 int node;
6270 struct pqi_ctrl_info *ctrl_info;
6271
d91d7820 6272 pqi_print_ctrl_info(pci_dev, id);
6c223761
KB
6273
6274 if (pqi_disable_device_id_wildcards &&
6275 id->subvendor == PCI_ANY_ID &&
6276 id->subdevice == PCI_ANY_ID) {
d91d7820 6277 dev_warn(&pci_dev->dev,
6c223761
KB
6278 "controller not probed because device ID wildcards are disabled\n");
6279 return -ENODEV;
6280 }
6281
6282 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
d91d7820 6283 dev_warn(&pci_dev->dev,
6c223761
KB
6284 "controller device ID matched using wildcards\n");
6285
d91d7820 6286 node = dev_to_node(&pci_dev->dev);
6c223761 6287 if (node == NUMA_NO_NODE)
d91d7820 6288 set_dev_node(&pci_dev->dev, 0);
6c223761
KB
6289
6290 ctrl_info = pqi_alloc_ctrl_info(node);
6291 if (!ctrl_info) {
d91d7820 6292 dev_err(&pci_dev->dev,
6c223761
KB
6293 "failed to allocate controller info block\n");
6294 return -ENOMEM;
6295 }
6296
d91d7820 6297 ctrl_info->pci_dev = pci_dev;
6c223761
KB
6298
6299 rc = pqi_pci_init(ctrl_info);
6300 if (rc)
6301 goto error;
6302
6303 rc = pqi_ctrl_init(ctrl_info);
6304 if (rc)
6305 goto error;
6306
6307 return 0;
6308
6309error:
6310 pqi_remove_ctrl(ctrl_info);
6311
6312 return rc;
6313}
6314
d91d7820 6315static void pqi_pci_remove(struct pci_dev *pci_dev)
6c223761
KB
6316{
6317 struct pqi_ctrl_info *ctrl_info;
6318
d91d7820 6319 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
6320 if (!ctrl_info)
6321 return;
6322
6323 pqi_remove_ctrl(ctrl_info);
6324}
6325
d91d7820 6326static void pqi_shutdown(struct pci_dev *pci_dev)
6c223761
KB
6327{
6328 int rc;
6329 struct pqi_ctrl_info *ctrl_info;
6330
d91d7820 6331 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
6332 if (!ctrl_info)
6333 goto error;
6334
6335 /*
6336 * Write all data in the controller's battery-backed cache to
6337 * storage.
6338 */
6339 rc = pqi_flush_cache(ctrl_info);
6340 if (rc == 0)
6341 return;
6342
6343error:
d91d7820 6344 dev_warn(&pci_dev->dev,
6c223761
KB
6345 "unable to flush controller cache\n");
6346}
6347
061ef06a
KB
6348#if defined(CONFIG_PM)
6349
6350static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
6351{
6352 struct pqi_ctrl_info *ctrl_info;
6353
6354 ctrl_info = pci_get_drvdata(pci_dev);
6355
6356 pqi_disable_events(ctrl_info);
6357 pqi_cancel_update_time_worker(ctrl_info);
6358 pqi_cancel_rescan_worker(ctrl_info);
6359 pqi_wait_until_scan_finished(ctrl_info);
6360 pqi_wait_until_lun_reset_finished(ctrl_info);
6361 pqi_flush_cache(ctrl_info);
6362 pqi_ctrl_block_requests(ctrl_info);
6363 pqi_ctrl_wait_until_quiesced(ctrl_info);
6364 pqi_wait_until_inbound_queues_empty(ctrl_info);
6365 pqi_ctrl_wait_for_pending_io(ctrl_info);
6366 pqi_stop_heartbeat_timer(ctrl_info);
6367
6368 if (state.event == PM_EVENT_FREEZE)
6369 return 0;
6370
6371 pci_save_state(pci_dev);
6372 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6373
6374 ctrl_info->controller_online = false;
6375 ctrl_info->pqi_mode_enabled = false;
6376
6377 return 0;
6378}
6379
6380static int pqi_resume(struct pci_dev *pci_dev)
6381{
6382 int rc;
6383 struct pqi_ctrl_info *ctrl_info;
6384
6385 ctrl_info = pci_get_drvdata(pci_dev);
6386
6387 if (pci_dev->current_state != PCI_D0) {
6388 ctrl_info->max_hw_queue_index = 0;
6389 pqi_free_interrupts(ctrl_info);
6390 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6391 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6392 IRQF_SHARED, DRIVER_NAME_SHORT,
6393 &ctrl_info->queue_groups[0]);
6394 if (rc) {
6395 dev_err(&ctrl_info->pci_dev->dev,
6396 "irq %u init failed with error %d\n",
6397 pci_dev->irq, rc);
6398 return rc;
6399 }
6400 pqi_start_heartbeat_timer(ctrl_info);
6401 pqi_ctrl_unblock_requests(ctrl_info);
6402 return 0;
6403 }
6404
6405 pci_set_power_state(pci_dev, PCI_D0);
6406 pci_restore_state(pci_dev);
6407
6408 return pqi_ctrl_init_resume(ctrl_info);
6409}
6410
6411#endif /* CONFIG_PM */
6412
6c223761
KB
6413/* Define the PCI IDs for the controllers that we support. */
6414static const struct pci_device_id pqi_pci_id_table[] = {
7eddabff
KB
6415 {
6416 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6417 0x152d, 0x8a22)
6418 },
6419 {
6420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6421 0x152d, 0x8a23)
6422 },
6423 {
6424 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6425 0x152d, 0x8a24)
6426 },
6427 {
6428 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6429 0x152d, 0x8a36)
6430 },
6431 {
6432 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6433 0x152d, 0x8a37)
6434 },
6c223761
KB
6435 {
6436 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6437 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6438 },
6439 {
6440 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6441 PCI_VENDOR_ID_ADAPTEC2, 0x0605)
6c223761
KB
6442 },
6443 {
6444 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6445 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6c223761
KB
6446 },
6447 {
6448 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6449 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6c223761
KB
6450 },
6451 {
6452 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6453 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6c223761
KB
6454 },
6455 {
6456 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6457 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6c223761
KB
6458 },
6459 {
6460 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6461 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6c223761
KB
6462 },
6463 {
6464 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6465 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6c223761
KB
6466 },
6467 {
6468 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6469 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6c223761
KB
6470 },
6471 {
6472 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6473 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6c223761
KB
6474 },
6475 {
6476 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6477 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6c223761
KB
6478 },
6479 {
6480 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6481 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6c223761
KB
6482 },
6483 {
6484 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6485 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6c223761
KB
6486 },
6487 {
6488 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6489 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6c223761
KB
6490 },
6491 {
6492 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6493 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6c223761
KB
6494 },
6495 {
6496 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6497 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6c223761
KB
6498 },
6499 {
6500 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6501 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6c223761
KB
6502 },
6503 {
6504 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6505 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6c223761
KB
6506 },
6507 {
6508 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6509 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6c223761
KB
6510 },
6511 {
6512 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6513 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6c223761
KB
6514 },
6515 {
6516 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6517 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6c223761
KB
6518 },
6519 {
6520 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6521 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6c223761
KB
6522 },
6523 {
6524 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6525 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6c223761
KB
6526 },
6527 {
6528 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6529 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6c223761
KB
6530 },
6531 {
6532 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6533 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6c223761
KB
6534 },
6535 {
6536 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff
KB
6537 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
6538 },
6539 {
6540 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6541 PCI_VENDOR_ID_HP, 0x0600)
6542 },
6543 {
6544 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6545 PCI_VENDOR_ID_HP, 0x0601)
6546 },
6547 {
6548 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6549 PCI_VENDOR_ID_HP, 0x0602)
6550 },
6551 {
6552 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6553 PCI_VENDOR_ID_HP, 0x0603)
6554 },
6555 {
6556 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6557 PCI_VENDOR_ID_HP, 0x0604)
6558 },
6559 {
6560 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6561 PCI_VENDOR_ID_HP, 0x0606)
6562 },
6563 {
6564 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6565 PCI_VENDOR_ID_HP, 0x0650)
6566 },
6567 {
6568 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6569 PCI_VENDOR_ID_HP, 0x0651)
6570 },
6571 {
6572 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6573 PCI_VENDOR_ID_HP, 0x0652)
6574 },
6575 {
6576 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6577 PCI_VENDOR_ID_HP, 0x0653)
6578 },
6579 {
6580 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6581 PCI_VENDOR_ID_HP, 0x0654)
6582 },
6583 {
6584 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6585 PCI_VENDOR_ID_HP, 0x0655)
6586 },
6587 {
6588 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6589 PCI_VENDOR_ID_HP, 0x0656)
6590 },
6591 {
6592 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6593 PCI_VENDOR_ID_HP, 0x0657)
6594 },
6595 {
6596 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6597 PCI_VENDOR_ID_HP, 0x0700)
6598 },
6599 {
6600 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6601 PCI_VENDOR_ID_HP, 0x0701)
6c223761
KB
6602 },
6603 {
6604 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6605 PCI_VENDOR_ID_HP, 0x1001)
6606 },
6607 {
6608 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6609 PCI_VENDOR_ID_HP, 0x1100)
6610 },
6611 {
6612 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6613 PCI_VENDOR_ID_HP, 0x1101)
6614 },
6615 {
6616 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6617 PCI_VENDOR_ID_HP, 0x1102)
6618 },
6619 {
6620 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6621 PCI_VENDOR_ID_HP, 0x1150)
6622 },
6623 {
6624 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6625 PCI_ANY_ID, PCI_ANY_ID)
6626 },
6627 { 0 }
6628};
6629
6630MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
6631
6632static struct pci_driver pqi_pci_driver = {
6633 .name = DRIVER_NAME_SHORT,
6634 .id_table = pqi_pci_id_table,
6635 .probe = pqi_pci_probe,
6636 .remove = pqi_pci_remove,
6637 .shutdown = pqi_shutdown,
061ef06a
KB
6638#if defined(CONFIG_PM)
6639 .suspend = pqi_suspend,
6640 .resume = pqi_resume,
6641#endif
6c223761
KB
6642};
6643
6644static int __init pqi_init(void)
6645{
6646 int rc;
6647
6648 pr_info(DRIVER_NAME "\n");
6649
6650 pqi_sas_transport_template =
6651 sas_attach_transport(&pqi_sas_transport_functions);
6652 if (!pqi_sas_transport_template)
6653 return -ENODEV;
6654
6655 rc = pci_register_driver(&pqi_pci_driver);
6656 if (rc)
6657 sas_release_transport(pqi_sas_transport_template);
6658
6659 return rc;
6660}
6661
6662static void __exit pqi_cleanup(void)
6663{
6664 pci_unregister_driver(&pqi_pci_driver);
6665 sas_release_transport(pqi_sas_transport_template);
6666}
6667
6668module_init(pqi_init);
6669module_exit(pqi_cleanup);
6670
6671static void __attribute__((unused)) verify_structures(void)
6672{
6673 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6674 sis_host_to_ctrl_doorbell) != 0x20);
6675 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6676 sis_interrupt_mask) != 0x34);
6677 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6678 sis_ctrl_to_host_doorbell) != 0x9c);
6679 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6680 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
6681 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6682 sis_driver_scratch) != 0xb0);
6c223761
KB
6683 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6684 sis_firmware_status) != 0xbc);
6685 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6686 sis_mailbox) != 0x1000);
6687 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6688 pqi_registers) != 0x4000);
6689
6690 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6691 iu_type) != 0x0);
6692 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6693 iu_length) != 0x2);
6694 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6695 response_queue_id) != 0x4);
6696 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6697 work_area) != 0x6);
6698 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
6699
6700 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6701 status) != 0x0);
6702 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6703 service_response) != 0x1);
6704 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6705 data_present) != 0x2);
6706 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6707 reserved) != 0x3);
6708 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6709 residual_count) != 0x4);
6710 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6711 data_length) != 0x8);
6712 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6713 reserved1) != 0xa);
6714 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6715 data) != 0xc);
6716 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
6717
6718 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6719 data_in_result) != 0x0);
6720 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6721 data_out_result) != 0x1);
6722 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6723 reserved) != 0x2);
6724 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6725 status) != 0x5);
6726 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6727 status_qualifier) != 0x6);
6728 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6729 sense_data_length) != 0x8);
6730 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6731 response_data_length) != 0xa);
6732 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6733 data_in_transferred) != 0xc);
6734 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6735 data_out_transferred) != 0x10);
6736 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6737 data) != 0x14);
6738 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
6739
6740 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6741 signature) != 0x0);
6742 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6743 function_and_status_code) != 0x8);
6744 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6745 max_admin_iq_elements) != 0x10);
6746 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6747 max_admin_oq_elements) != 0x11);
6748 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6749 admin_iq_element_length) != 0x12);
6750 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6751 admin_oq_element_length) != 0x13);
6752 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6753 max_reset_timeout) != 0x14);
6754 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6755 legacy_intx_status) != 0x18);
6756 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6757 legacy_intx_mask_set) != 0x1c);
6758 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6759 legacy_intx_mask_clear) != 0x20);
6760 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6761 device_status) != 0x40);
6762 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6763 admin_iq_pi_offset) != 0x48);
6764 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6765 admin_oq_ci_offset) != 0x50);
6766 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6767 admin_iq_element_array_addr) != 0x58);
6768 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6769 admin_oq_element_array_addr) != 0x60);
6770 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6771 admin_iq_ci_addr) != 0x68);
6772 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6773 admin_oq_pi_addr) != 0x70);
6774 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6775 admin_iq_num_elements) != 0x78);
6776 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6777 admin_oq_num_elements) != 0x79);
6778 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6779 admin_queue_int_msg_num) != 0x7a);
6780 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6781 device_error) != 0x80);
6782 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6783 error_details) != 0x88);
6784 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6785 device_reset) != 0x90);
6786 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6787 power_action) != 0x94);
6788 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6789
6790 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6791 header.iu_type) != 0);
6792 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6793 header.iu_length) != 2);
6794 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6795 header.work_area) != 6);
6796 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6797 request_id) != 8);
6798 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6799 function_code) != 10);
6800 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6801 data.report_device_capability.buffer_length) != 44);
6802 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6803 data.report_device_capability.sg_descriptor) != 48);
6804 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6805 data.create_operational_iq.queue_id) != 12);
6806 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6807 data.create_operational_iq.element_array_addr) != 16);
6808 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6809 data.create_operational_iq.ci_addr) != 24);
6810 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6811 data.create_operational_iq.num_elements) != 32);
6812 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6813 data.create_operational_iq.element_length) != 34);
6814 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6815 data.create_operational_iq.queue_protocol) != 36);
6816 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6817 data.create_operational_oq.queue_id) != 12);
6818 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6819 data.create_operational_oq.element_array_addr) != 16);
6820 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6821 data.create_operational_oq.pi_addr) != 24);
6822 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6823 data.create_operational_oq.num_elements) != 32);
6824 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6825 data.create_operational_oq.element_length) != 34);
6826 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6827 data.create_operational_oq.queue_protocol) != 36);
6828 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6829 data.create_operational_oq.int_msg_num) != 40);
6830 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6831 data.create_operational_oq.coalescing_count) != 42);
6832 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6833 data.create_operational_oq.min_coalescing_time) != 44);
6834 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6835 data.create_operational_oq.max_coalescing_time) != 48);
6836 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6837 data.delete_operational_queue.queue_id) != 12);
6838 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6839 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6840 data.create_operational_iq) != 64 - 11);
6841 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6842 data.create_operational_oq) != 64 - 11);
6843 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6844 data.delete_operational_queue) != 64 - 11);
6845
6846 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6847 header.iu_type) != 0);
6848 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6849 header.iu_length) != 2);
6850 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6851 header.work_area) != 6);
6852 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6853 request_id) != 8);
6854 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6855 function_code) != 10);
6856 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6857 status) != 11);
6858 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6859 data.create_operational_iq.status_descriptor) != 12);
6860 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6861 data.create_operational_iq.iq_pi_offset) != 16);
6862 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6863 data.create_operational_oq.status_descriptor) != 12);
6864 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6865 data.create_operational_oq.oq_ci_offset) != 16);
6866 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6867
6868 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6869 header.iu_type) != 0);
6870 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6871 header.iu_length) != 2);
6872 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6873 header.response_queue_id) != 4);
6874 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6875 header.work_area) != 6);
6876 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6877 request_id) != 8);
6878 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6879 nexus_id) != 10);
6880 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6881 buffer_length) != 12);
6882 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6883 lun_number) != 16);
6884 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6885 protocol_specific) != 24);
6886 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6887 error_index) != 27);
6888 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6889 cdb) != 32);
6890 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6891 sg_descriptors) != 64);
6892 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6893 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6894
6895 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6896 header.iu_type) != 0);
6897 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6898 header.iu_length) != 2);
6899 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6900 header.response_queue_id) != 4);
6901 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6902 header.work_area) != 6);
6903 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6904 request_id) != 8);
6905 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6906 nexus_id) != 12);
6907 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6908 buffer_length) != 16);
6909 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6910 data_encryption_key_index) != 22);
6911 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6912 encrypt_tweak_lower) != 24);
6913 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6914 encrypt_tweak_upper) != 28);
6915 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6916 cdb) != 32);
6917 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6918 error_index) != 48);
6919 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6920 num_sg_descriptors) != 50);
6921 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6922 cdb_length) != 51);
6923 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6924 lun_number) != 52);
6925 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6926 sg_descriptors) != 64);
6927 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6928 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6929
6930 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6931 header.iu_type) != 0);
6932 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6933 header.iu_length) != 2);
6934 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6935 request_id) != 8);
6936 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6937 error_index) != 10);
6938
6939 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6940 header.iu_type) != 0);
6941 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6942 header.iu_length) != 2);
6943 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6944 header.response_queue_id) != 4);
6945 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6946 request_id) != 8);
6947 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6948 data.report_event_configuration.buffer_length) != 12);
6949 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6950 data.report_event_configuration.sg_descriptors) != 16);
6951 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6952 data.set_event_configuration.global_event_oq_id) != 10);
6953 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6954 data.set_event_configuration.buffer_length) != 12);
6955 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6956 data.set_event_configuration.sg_descriptors) != 16);
6957
6958 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6959 max_inbound_iu_length) != 6);
6960 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6961 max_outbound_iu_length) != 14);
6962 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6963
6964 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6965 data_length) != 0);
6966 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6967 iq_arbitration_priority_support_bitmask) != 8);
6968 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6969 maximum_aw_a) != 9);
6970 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6971 maximum_aw_b) != 10);
6972 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6973 maximum_aw_c) != 11);
6974 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6975 max_inbound_queues) != 16);
6976 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6977 max_elements_per_iq) != 18);
6978 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6979 max_iq_element_length) != 24);
6980 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6981 min_iq_element_length) != 26);
6982 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6983 max_outbound_queues) != 30);
6984 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6985 max_elements_per_oq) != 32);
6986 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6987 intr_coalescing_time_granularity) != 34);
6988 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6989 max_oq_element_length) != 36);
6990 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6991 min_oq_element_length) != 38);
6992 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6993 iu_layer_descriptors) != 64);
6994 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6995
6996 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6997 event_type) != 0);
6998 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6999 oq_id) != 2);
7000 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
7001
7002 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7003 num_event_descriptors) != 2);
7004 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7005 descriptors) != 4);
7006
061ef06a
KB
7007 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
7008 ARRAY_SIZE(pqi_supported_event_types));
7009
6c223761
KB
7010 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7011 header.iu_type) != 0);
7012 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7013 header.iu_length) != 2);
7014 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7015 event_type) != 8);
7016 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7017 event_id) != 10);
7018 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7019 additional_event_id) != 12);
7020 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7021 data) != 16);
7022 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
7023
7024 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7025 header.iu_type) != 0);
7026 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7027 header.iu_length) != 2);
7028 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7029 event_type) != 8);
7030 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7031 event_id) != 10);
7032 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7033 additional_event_id) != 12);
7034 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
7035
7036 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7037 header.iu_type) != 0);
7038 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7039 header.iu_length) != 2);
7040 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7041 request_id) != 8);
7042 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7043 nexus_id) != 10);
7044 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7045 lun_number) != 16);
7046 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7047 protocol_specific) != 24);
7048 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7049 outbound_queue_id_to_manage) != 26);
7050 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7051 request_id_to_manage) != 28);
7052 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7053 task_management_function) != 30);
7054 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
7055
7056 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7057 header.iu_type) != 0);
7058 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7059 header.iu_length) != 2);
7060 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7061 request_id) != 8);
7062 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7063 nexus_id) != 10);
7064 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7065 additional_response_info) != 12);
7066 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7067 response_code) != 15);
7068 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
7069
7070 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7071 configured_logical_drive_count) != 0);
7072 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7073 configuration_signature) != 1);
7074 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7075 firmware_version) != 5);
7076 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7077 extended_logical_unit_count) != 154);
7078 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7079 firmware_build_number) != 190);
7080 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7081 controller_mode) != 292);
7082
1be42f46
KB
7083 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7084 phys_bay_in_box) != 115);
7085 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7086 device_type) != 120);
7087 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7088 redundant_path_present_map) != 1736);
7089 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7090 active_path_number) != 1738);
7091 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7092 alternate_paths_phys_connector) != 1739);
7093 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7094 alternate_paths_phys_box_on_port) != 1755);
7095 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7096 current_queue_depth_limit) != 1796);
7097 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
7098
6c223761
KB
7099 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
7100 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
7101 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
7102 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7103 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
7104 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7105 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
7106 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
7107 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7108 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
7109 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
7110 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7111
7112 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
7113}