scsi: vmw_pscsi: switch to generic DMA API
[linux-2.6-block.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
6c223761
KB
1/*
2 * driver for Microsemi PQI-based storage controllers
b805dbfe 3 * Copyright (c) 2016-2017 Microsemi Corporation
6c223761
KB
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
3c50976f 27#include <linux/reboot.h>
6c223761 28#include <linux/cciss_ioctl.h>
52198226 29#include <linux/blk-mq-pci.h>
6c223761
KB
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_transport_sas.h>
35#include <asm/unaligned.h>
36#include "smartpqi.h"
37#include "smartpqi_sis.h"
38
39#if !defined(BUILD_TIMESTAMP)
40#define BUILD_TIMESTAMP
41#endif
42
4ae5e9d1 43#define DRIVER_VERSION "1.1.4-130"
2d154f5f 44#define DRIVER_MAJOR 1
b98117ca 45#define DRIVER_MINOR 1
61c187e4 46#define DRIVER_RELEASE 4
4ae5e9d1 47#define DRIVER_REVISION 130
6c223761 48
2d154f5f
KB
49#define DRIVER_NAME "Microsemi PQI Driver (v" \
50 DRIVER_VERSION BUILD_TIMESTAMP ")"
6c223761
KB
51#define DRIVER_NAME_SHORT "smartpqi"
52
e1d213bd
KB
53#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
54
6c223761
KB
55MODULE_AUTHOR("Microsemi");
56MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
57 DRIVER_VERSION);
58MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
59MODULE_VERSION(DRIVER_VERSION);
60MODULE_LICENSE("GPL");
61
6c223761 62static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
5f310425 63static void pqi_ctrl_offline_worker(struct work_struct *work);
376fb880 64static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
6c223761
KB
65static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
66static void pqi_scan_start(struct Scsi_Host *shost);
67static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
68 struct pqi_queue_group *queue_group, enum pqi_io_path path,
69 struct pqi_io_request *io_request);
70static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
71 struct pqi_iu_header *request, unsigned int flags,
72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
73static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
75 unsigned int cdb_length, struct pqi_queue_group *queue_group,
376fb880 76 struct pqi_encryption_info *encryption_info, bool raid_bypass);
6c223761
KB
77
78/* for flags argument to pqi_submit_raid_request_synchronous() */
79#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
80
81static struct scsi_transport_template *pqi_sas_transport_template;
82
83static atomic_t pqi_controller_count = ATOMIC_INIT(0);
84
3c50976f
KB
85enum pqi_lockup_action {
86 NONE,
87 REBOOT,
88 PANIC
89};
90
91static enum pqi_lockup_action pqi_lockup_action = NONE;
92
93static struct {
94 enum pqi_lockup_action action;
95 char *name;
96} pqi_lockup_actions[] = {
97 {
98 .action = NONE,
99 .name = "none",
100 },
101 {
102 .action = REBOOT,
103 .name = "reboot",
104 },
105 {
106 .action = PANIC,
107 .name = "panic",
108 },
109};
110
6a50d6ad
KB
111static unsigned int pqi_supported_event_types[] = {
112 PQI_EVENT_TYPE_HOTPLUG,
113 PQI_EVENT_TYPE_HARDWARE,
114 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
115 PQI_EVENT_TYPE_LOGICAL_DEVICE,
116 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
118};
119
6c223761
KB
120static int pqi_disable_device_id_wildcards;
121module_param_named(disable_device_id_wildcards,
cbe0c7b1 122 pqi_disable_device_id_wildcards, int, 0644);
6c223761
KB
123MODULE_PARM_DESC(disable_device_id_wildcards,
124 "Disable device ID wildcards.");
125
5a259e32
KB
126static int pqi_disable_heartbeat;
127module_param_named(disable_heartbeat,
128 pqi_disable_heartbeat, int, 0644);
129MODULE_PARM_DESC(disable_heartbeat,
130 "Disable heartbeat.");
131
132static int pqi_disable_ctrl_shutdown;
133module_param_named(disable_ctrl_shutdown,
134 pqi_disable_ctrl_shutdown, int, 0644);
135MODULE_PARM_DESC(disable_ctrl_shutdown,
136 "Disable controller shutdown when controller locked up.");
137
3c50976f
KB
138static char *pqi_lockup_action_param;
139module_param_named(lockup_action,
140 pqi_lockup_action_param, charp, 0644);
141MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
142 "\t\tSupported: none, reboot, panic\n"
143 "\t\tDefault: none");
144
6c223761
KB
145static char *raid_levels[] = {
146 "RAID-0",
147 "RAID-4",
148 "RAID-1(1+0)",
149 "RAID-5",
150 "RAID-5+1",
151 "RAID-ADG",
152 "RAID-1(ADM)",
153};
154
155static char *pqi_raid_level_to_string(u8 raid_level)
156{
157 if (raid_level < ARRAY_SIZE(raid_levels))
158 return raid_levels[raid_level];
159
a9f93392 160 return "RAID UNKNOWN";
6c223761
KB
161}
162
163#define SA_RAID_0 0
164#define SA_RAID_4 1
165#define SA_RAID_1 2 /* also used for RAID 10 */
166#define SA_RAID_5 3 /* also used for RAID 50 */
167#define SA_RAID_51 4
168#define SA_RAID_6 5 /* also used for RAID 60 */
169#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
170#define SA_RAID_MAX SA_RAID_ADM
171#define SA_RAID_UNKNOWN 0xff
172
173static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
174{
7561a7e4 175 pqi_prep_for_scsi_done(scmd);
6c223761
KB
176 scmd->scsi_done(scmd);
177}
178
179static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
180{
181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
182}
183
184static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
185{
186 void *hostdata = shost_priv(shost);
187
188 return *((struct pqi_ctrl_info **)hostdata);
189}
190
191static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
192{
193 return !device->is_physical_device;
194}
195
bd10cf0b
KB
196static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
197{
198 return scsi3addr[2] != 0;
199}
200
6c223761
KB
201static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
202{
203 return !ctrl_info->controller_online;
204}
205
206static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
207{
208 if (ctrl_info->controller_online)
209 if (!sis_is_firmware_running(ctrl_info))
210 pqi_take_ctrl_offline(ctrl_info);
211}
212
213static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
214{
215 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
216}
217
ff6abb73
KB
218static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
219 struct pqi_ctrl_info *ctrl_info)
220{
221 return sis_read_driver_scratch(ctrl_info);
222}
223
224static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
225 enum pqi_ctrl_mode mode)
226{
227 sis_write_driver_scratch(ctrl_info, mode);
228}
229
7561a7e4
KB
230static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
231{
232 ctrl_info->block_requests = true;
233 scsi_block_requests(ctrl_info->scsi_host);
234}
235
236static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
237{
238 ctrl_info->block_requests = false;
239 wake_up_all(&ctrl_info->block_requests_wait);
376fb880 240 pqi_retry_raid_bypass_requests(ctrl_info);
7561a7e4
KB
241 scsi_unblock_requests(ctrl_info->scsi_host);
242}
243
244static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
245{
246 return ctrl_info->block_requests;
247}
248
249static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
250 unsigned long timeout_msecs)
251{
252 unsigned long remaining_msecs;
253
254 if (!pqi_ctrl_blocked(ctrl_info))
255 return timeout_msecs;
256
257 atomic_inc(&ctrl_info->num_blocked_threads);
258
259 if (timeout_msecs == NO_TIMEOUT) {
260 wait_event(ctrl_info->block_requests_wait,
261 !pqi_ctrl_blocked(ctrl_info));
262 remaining_msecs = timeout_msecs;
263 } else {
264 unsigned long remaining_jiffies;
265
266 remaining_jiffies =
267 wait_event_timeout(ctrl_info->block_requests_wait,
268 !pqi_ctrl_blocked(ctrl_info),
269 msecs_to_jiffies(timeout_msecs));
270 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
271 }
272
273 atomic_dec(&ctrl_info->num_blocked_threads);
274
275 return remaining_msecs;
276}
277
278static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
279{
280 atomic_inc(&ctrl_info->num_busy_threads);
281}
282
283static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
284{
285 atomic_dec(&ctrl_info->num_busy_threads);
286}
287
288static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
289{
290 while (atomic_read(&ctrl_info->num_busy_threads) >
291 atomic_read(&ctrl_info->num_blocked_threads))
292 usleep_range(1000, 2000);
293}
294
03b288cf
KB
295static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
296{
297 return device->device_offline;
298}
299
7561a7e4
KB
300static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
301{
302 device->in_reset = true;
303}
304
305static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
306{
307 device->in_reset = false;
308}
309
310static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
311{
312 return device->in_reset;
313}
6c223761 314
5f310425
KB
315static inline void pqi_schedule_rescan_worker_with_delay(
316 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
317{
318 if (pqi_ctrl_offline(ctrl_info))
319 return;
320
321 schedule_delayed_work(&ctrl_info->rescan_work, delay);
322}
323
6c223761
KB
324static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
325{
5f310425
KB
326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
327}
328
329#define PQI_RESCAN_WORK_DELAY (10 * HZ)
330
331static inline void pqi_schedule_rescan_worker_delayed(
332 struct pqi_ctrl_info *ctrl_info)
333{
334 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
6c223761
KB
335}
336
061ef06a
KB
337static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
338{
339 cancel_delayed_work_sync(&ctrl_info->rescan_work);
340}
341
98f87667
KB
342static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
343{
344 if (!ctrl_info->heartbeat_counter)
345 return 0;
346
347 return readl(ctrl_info->heartbeat_counter);
348}
349
6c223761
KB
350static int pqi_map_single(struct pci_dev *pci_dev,
351 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
352 size_t buffer_length, int data_direction)
353{
354 dma_addr_t bus_address;
355
356 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
357 return 0;
358
359 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
360 data_direction);
361 if (pci_dma_mapping_error(pci_dev, bus_address))
362 return -ENOMEM;
363
364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
365 put_unaligned_le32(buffer_length, &sg_descriptor->length);
366 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
367
368 return 0;
369}
370
371static void pqi_pci_unmap(struct pci_dev *pci_dev,
372 struct pqi_sg_descriptor *descriptors, int num_descriptors,
373 int data_direction)
374{
375 int i;
376
377 if (data_direction == PCI_DMA_NONE)
378 return;
379
380 for (i = 0; i < num_descriptors; i++)
381 pci_unmap_single(pci_dev,
382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
383 get_unaligned_le32(&descriptors[i].length),
384 data_direction);
385}
386
387static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
388 struct pqi_raid_path_request *request, u8 cmd,
389 u8 *scsi3addr, void *buffer, size_t buffer_length,
390 u16 vpd_page, int *pci_direction)
391{
392 u8 *cdb;
393 int pci_dir;
394
395 memset(request, 0, sizeof(*request));
396
397 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
398 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
399 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
400 &request->header.iu_length);
401 put_unaligned_le32(buffer_length, &request->buffer_length);
402 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
403 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
404 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
405
406 cdb = request->cdb;
407
408 switch (cmd) {
409 case INQUIRY:
410 request->data_direction = SOP_READ_FLAG;
411 cdb[0] = INQUIRY;
412 if (vpd_page & VPD_PAGE) {
413 cdb[1] = 0x1;
414 cdb[2] = (u8)vpd_page;
415 }
416 cdb[4] = (u8)buffer_length;
417 break;
418 case CISS_REPORT_LOG:
419 case CISS_REPORT_PHYS:
420 request->data_direction = SOP_READ_FLAG;
421 cdb[0] = cmd;
422 if (cmd == CISS_REPORT_PHYS)
423 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
424 else
425 cdb[1] = CISS_REPORT_LOG_EXTENDED;
426 put_unaligned_be32(buffer_length, &cdb[6]);
427 break;
428 case CISS_GET_RAID_MAP:
429 request->data_direction = SOP_READ_FLAG;
430 cdb[0] = CISS_READ;
431 cdb[1] = CISS_GET_RAID_MAP;
432 put_unaligned_be32(buffer_length, &cdb[6]);
433 break;
58322fe0 434 case SA_FLUSH_CACHE:
6c223761
KB
435 request->data_direction = SOP_WRITE_FLAG;
436 cdb[0] = BMIC_WRITE;
58322fe0 437 cdb[6] = BMIC_FLUSH_CACHE;
6c223761
KB
438 put_unaligned_be16(buffer_length, &cdb[7]);
439 break;
440 case BMIC_IDENTIFY_CONTROLLER:
441 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
442 request->data_direction = SOP_READ_FLAG;
443 cdb[0] = BMIC_READ;
444 cdb[6] = cmd;
445 put_unaligned_be16(buffer_length, &cdb[7]);
446 break;
447 case BMIC_WRITE_HOST_WELLNESS:
448 request->data_direction = SOP_WRITE_FLAG;
449 cdb[0] = BMIC_WRITE;
450 cdb[6] = cmd;
451 put_unaligned_be16(buffer_length, &cdb[7]);
452 break;
453 default:
454 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
455 cmd);
6c223761
KB
456 break;
457 }
458
459 switch (request->data_direction) {
460 case SOP_READ_FLAG:
461 pci_dir = PCI_DMA_FROMDEVICE;
462 break;
463 case SOP_WRITE_FLAG:
464 pci_dir = PCI_DMA_TODEVICE;
465 break;
466 case SOP_NO_DIRECTION_FLAG:
467 pci_dir = PCI_DMA_NONE;
468 break;
469 default:
470 pci_dir = PCI_DMA_BIDIRECTIONAL;
471 break;
472 }
473
474 *pci_direction = pci_dir;
475
476 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
477 buffer, buffer_length, pci_dir);
478}
479
376fb880
KB
480static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
481{
482 io_request->scmd = NULL;
483 io_request->status = 0;
484 io_request->error_info = NULL;
485 io_request->raid_bypass = false;
486}
487
6c223761
KB
488static struct pqi_io_request *pqi_alloc_io_request(
489 struct pqi_ctrl_info *ctrl_info)
490{
491 struct pqi_io_request *io_request;
492 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
493
494 while (1) {
495 io_request = &ctrl_info->io_request_pool[i];
496 if (atomic_inc_return(&io_request->refcount) == 1)
497 break;
498 atomic_dec(&io_request->refcount);
499 i = (i + 1) % ctrl_info->max_io_slots;
500 }
501
502 /* benignly racy */
503 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
504
376fb880 505 pqi_reinit_io_request(io_request);
6c223761
KB
506
507 return io_request;
508}
509
510static void pqi_free_io_request(struct pqi_io_request *io_request)
511{
512 atomic_dec(&io_request->refcount);
513}
514
515static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
516 struct bmic_identify_controller *buffer)
517{
518 int rc;
519 int pci_direction;
520 struct pqi_raid_path_request request;
521
522 rc = pqi_build_raid_path_request(ctrl_info, &request,
523 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
524 sizeof(*buffer), 0, &pci_direction);
525 if (rc)
526 return rc;
527
528 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
529 NULL, NO_TIMEOUT);
530
531 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
532 pci_direction);
533
534 return rc;
535}
536
537static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
538 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
539{
540 int rc;
541 int pci_direction;
542 struct pqi_raid_path_request request;
543
544 rc = pqi_build_raid_path_request(ctrl_info, &request,
545 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
546 &pci_direction);
547 if (rc)
548 return rc;
549
550 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
551 NULL, NO_TIMEOUT);
552
553 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
554 pci_direction);
555
556 return rc;
557}
558
559static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
560 struct pqi_scsi_dev *device,
561 struct bmic_identify_physical_device *buffer,
562 size_t buffer_length)
563{
564 int rc;
565 int pci_direction;
566 u16 bmic_device_index;
567 struct pqi_raid_path_request request;
568
569 rc = pqi_build_raid_path_request(ctrl_info, &request,
570 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
571 buffer_length, 0, &pci_direction);
572 if (rc)
573 return rc;
574
575 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
576 request.cdb[2] = (u8)bmic_device_index;
577 request.cdb[9] = (u8)(bmic_device_index >> 8);
578
579 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
580 0, NULL, NO_TIMEOUT);
581
582 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
583 pci_direction);
584
585 return rc;
586}
587
58322fe0
KB
588static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
589 enum bmic_flush_cache_shutdown_event shutdown_event)
6c223761
KB
590{
591 int rc;
592 struct pqi_raid_path_request request;
593 int pci_direction;
58322fe0 594 struct bmic_flush_cache *flush_cache;
6c223761
KB
595
596 /*
597 * Don't bother trying to flush the cache if the controller is
598 * locked up.
599 */
600 if (pqi_ctrl_offline(ctrl_info))
601 return -ENXIO;
602
58322fe0
KB
603 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
604 if (!flush_cache)
6c223761
KB
605 return -ENOMEM;
606
58322fe0
KB
607 flush_cache->shutdown_event = shutdown_event;
608
6c223761 609 rc = pqi_build_raid_path_request(ctrl_info, &request,
58322fe0
KB
610 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
611 sizeof(*flush_cache), 0, &pci_direction);
6c223761
KB
612 if (rc)
613 goto out;
614
615 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
d48f8fad 616 0, NULL, NO_TIMEOUT);
6c223761
KB
617
618 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
619 pci_direction);
620
621out:
58322fe0 622 kfree(flush_cache);
6c223761
KB
623
624 return rc;
625}
626
627static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
628 void *buffer, size_t buffer_length)
629{
630 int rc;
631 struct pqi_raid_path_request request;
632 int pci_direction;
633
634 rc = pqi_build_raid_path_request(ctrl_info, &request,
635 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
636 buffer_length, 0, &pci_direction);
637 if (rc)
638 return rc;
639
640 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
641 0, NULL, NO_TIMEOUT);
642
643 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
644 pci_direction);
645
646 return rc;
647}
648
649#pragma pack(1)
650
651struct bmic_host_wellness_driver_version {
652 u8 start_tag[4];
653 u8 driver_version_tag[2];
654 __le16 driver_version_length;
655 char driver_version[32];
656 u8 end_tag[2];
657};
658
659#pragma pack()
660
661static int pqi_write_driver_version_to_host_wellness(
662 struct pqi_ctrl_info *ctrl_info)
663{
664 int rc;
665 struct bmic_host_wellness_driver_version *buffer;
666 size_t buffer_length;
667
668 buffer_length = sizeof(*buffer);
669
670 buffer = kmalloc(buffer_length, GFP_KERNEL);
671 if (!buffer)
672 return -ENOMEM;
673
674 buffer->start_tag[0] = '<';
675 buffer->start_tag[1] = 'H';
676 buffer->start_tag[2] = 'W';
677 buffer->start_tag[3] = '>';
678 buffer->driver_version_tag[0] = 'D';
679 buffer->driver_version_tag[1] = 'V';
680 put_unaligned_le16(sizeof(buffer->driver_version),
681 &buffer->driver_version_length);
061ef06a 682 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
6c223761
KB
683 sizeof(buffer->driver_version) - 1);
684 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
685 buffer->end_tag[0] = 'Z';
686 buffer->end_tag[1] = 'Z';
687
688 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
689
690 kfree(buffer);
691
692 return rc;
693}
694
695#pragma pack(1)
696
697struct bmic_host_wellness_time {
698 u8 start_tag[4];
699 u8 time_tag[2];
700 __le16 time_length;
701 u8 time[8];
702 u8 dont_write_tag[2];
703 u8 end_tag[2];
704};
705
706#pragma pack()
707
708static int pqi_write_current_time_to_host_wellness(
709 struct pqi_ctrl_info *ctrl_info)
710{
711 int rc;
712 struct bmic_host_wellness_time *buffer;
713 size_t buffer_length;
714 time64_t local_time;
715 unsigned int year;
ed10858e 716 struct tm tm;
6c223761
KB
717
718 buffer_length = sizeof(*buffer);
719
720 buffer = kmalloc(buffer_length, GFP_KERNEL);
721 if (!buffer)
722 return -ENOMEM;
723
724 buffer->start_tag[0] = '<';
725 buffer->start_tag[1] = 'H';
726 buffer->start_tag[2] = 'W';
727 buffer->start_tag[3] = '>';
728 buffer->time_tag[0] = 'T';
729 buffer->time_tag[1] = 'D';
730 put_unaligned_le16(sizeof(buffer->time),
731 &buffer->time_length);
732
ed10858e
AB
733 local_time = ktime_get_real_seconds();
734 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
6c223761
KB
735 year = tm.tm_year + 1900;
736
737 buffer->time[0] = bin2bcd(tm.tm_hour);
738 buffer->time[1] = bin2bcd(tm.tm_min);
739 buffer->time[2] = bin2bcd(tm.tm_sec);
740 buffer->time[3] = 0;
741 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
742 buffer->time[5] = bin2bcd(tm.tm_mday);
743 buffer->time[6] = bin2bcd(year / 100);
744 buffer->time[7] = bin2bcd(year % 100);
745
746 buffer->dont_write_tag[0] = 'D';
747 buffer->dont_write_tag[1] = 'W';
748 buffer->end_tag[0] = 'Z';
749 buffer->end_tag[1] = 'Z';
750
751 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
752
753 kfree(buffer);
754
755 return rc;
756}
757
758#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
759
760static void pqi_update_time_worker(struct work_struct *work)
761{
762 int rc;
763 struct pqi_ctrl_info *ctrl_info;
764
765 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
766 update_time_work);
767
5f310425
KB
768 if (pqi_ctrl_offline(ctrl_info))
769 return;
770
6c223761
KB
771 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
772 if (rc)
773 dev_warn(&ctrl_info->pci_dev->dev,
774 "error updating time on controller\n");
775
776 schedule_delayed_work(&ctrl_info->update_time_work,
777 PQI_UPDATE_TIME_WORK_INTERVAL);
778}
779
780static inline void pqi_schedule_update_time_worker(
4fbebf1a 781 struct pqi_ctrl_info *ctrl_info)
6c223761 782{
4fbebf1a 783 schedule_delayed_work(&ctrl_info->update_time_work, 0);
061ef06a
KB
784}
785
786static inline void pqi_cancel_update_time_worker(
787 struct pqi_ctrl_info *ctrl_info)
788{
061ef06a 789 cancel_delayed_work_sync(&ctrl_info->update_time_work);
6c223761
KB
790}
791
792static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
793 void *buffer, size_t buffer_length)
794{
795 int rc;
796 int pci_direction;
797 struct pqi_raid_path_request request;
798
799 rc = pqi_build_raid_path_request(ctrl_info, &request,
800 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
801 if (rc)
802 return rc;
803
804 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
805 NULL, NO_TIMEOUT);
806
807 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
808 pci_direction);
809
810 return rc;
811}
812
813static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
814 void **buffer)
815{
816 int rc;
817 size_t lun_list_length;
818 size_t lun_data_length;
819 size_t new_lun_list_length;
820 void *lun_data = NULL;
821 struct report_lun_header *report_lun_header;
822
823 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
824 if (!report_lun_header) {
825 rc = -ENOMEM;
826 goto out;
827 }
828
829 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
830 sizeof(*report_lun_header));
831 if (rc)
832 goto out;
833
834 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
835
836again:
837 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
838
839 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
840 if (!lun_data) {
841 rc = -ENOMEM;
842 goto out;
843 }
844
845 if (lun_list_length == 0) {
846 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
847 goto out;
848 }
849
850 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
851 if (rc)
852 goto out;
853
854 new_lun_list_length = get_unaligned_be32(
855 &((struct report_lun_header *)lun_data)->list_length);
856
857 if (new_lun_list_length > lun_list_length) {
858 lun_list_length = new_lun_list_length;
859 kfree(lun_data);
860 goto again;
861 }
862
863out:
864 kfree(report_lun_header);
865
866 if (rc) {
867 kfree(lun_data);
868 lun_data = NULL;
869 }
870
871 *buffer = lun_data;
872
873 return rc;
874}
875
876static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
877 void **buffer)
878{
879 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
880 buffer);
881}
882
883static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
884 void **buffer)
885{
886 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
887}
888
889static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
890 struct report_phys_lun_extended **physdev_list,
891 struct report_log_lun_extended **logdev_list)
892{
893 int rc;
894 size_t logdev_list_length;
895 size_t logdev_data_length;
896 struct report_log_lun_extended *internal_logdev_list;
897 struct report_log_lun_extended *logdev_data;
898 struct report_lun_header report_lun_header;
899
900 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
901 if (rc)
902 dev_err(&ctrl_info->pci_dev->dev,
903 "report physical LUNs failed\n");
904
905 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
906 if (rc)
907 dev_err(&ctrl_info->pci_dev->dev,
908 "report logical LUNs failed\n");
909
910 /*
911 * Tack the controller itself onto the end of the logical device list.
912 */
913
914 logdev_data = *logdev_list;
915
916 if (logdev_data) {
917 logdev_list_length =
918 get_unaligned_be32(&logdev_data->header.list_length);
919 } else {
920 memset(&report_lun_header, 0, sizeof(report_lun_header));
921 logdev_data =
922 (struct report_log_lun_extended *)&report_lun_header;
923 logdev_list_length = 0;
924 }
925
926 logdev_data_length = sizeof(struct report_lun_header) +
927 logdev_list_length;
928
929 internal_logdev_list = kmalloc(logdev_data_length +
930 sizeof(struct report_log_lun_extended), GFP_KERNEL);
931 if (!internal_logdev_list) {
932 kfree(*logdev_list);
933 *logdev_list = NULL;
934 return -ENOMEM;
935 }
936
937 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
938 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
939 sizeof(struct report_log_lun_extended_entry));
940 put_unaligned_be32(logdev_list_length +
941 sizeof(struct report_log_lun_extended_entry),
942 &internal_logdev_list->header.list_length);
943
944 kfree(*logdev_list);
945 *logdev_list = internal_logdev_list;
946
947 return 0;
948}
949
950static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
951 int bus, int target, int lun)
952{
953 device->bus = bus;
954 device->target = target;
955 device->lun = lun;
956}
957
958static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
959{
960 u8 *scsi3addr;
961 u32 lunid;
bd10cf0b
KB
962 int bus;
963 int target;
964 int lun;
6c223761
KB
965
966 scsi3addr = device->scsi3addr;
967 lunid = get_unaligned_le32(scsi3addr);
968
969 if (pqi_is_hba_lunid(scsi3addr)) {
970 /* The specified device is the controller. */
971 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
972 device->target_lun_valid = true;
973 return;
974 }
975
976 if (pqi_is_logical_device(device)) {
bd10cf0b
KB
977 if (device->is_external_raid_device) {
978 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
979 target = (lunid >> 16) & 0x3fff;
980 lun = lunid & 0xff;
981 } else {
982 bus = PQI_RAID_VOLUME_BUS;
983 target = 0;
984 lun = lunid & 0x3fff;
985 }
986 pqi_set_bus_target_lun(device, bus, target, lun);
6c223761
KB
987 device->target_lun_valid = true;
988 return;
989 }
990
991 /*
992 * Defer target and LUN assignment for non-controller physical devices
993 * because the SAS transport layer will make these assignments later.
994 */
995 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
996}
997
998static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
999 struct pqi_scsi_dev *device)
1000{
1001 int rc;
1002 u8 raid_level;
1003 u8 *buffer;
1004
1005 raid_level = SA_RAID_UNKNOWN;
1006
1007 buffer = kmalloc(64, GFP_KERNEL);
1008 if (buffer) {
1009 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1010 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1011 if (rc == 0) {
1012 raid_level = buffer[8];
1013 if (raid_level > SA_RAID_MAX)
1014 raid_level = SA_RAID_UNKNOWN;
1015 }
1016 kfree(buffer);
1017 }
1018
1019 device->raid_level = raid_level;
1020}
1021
1022static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1023 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1024{
1025 char *err_msg;
1026 u32 raid_map_size;
1027 u32 r5or6_blocks_per_row;
1028 unsigned int num_phys_disks;
1029 unsigned int num_raid_map_entries;
1030
1031 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1032
1033 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1034 err_msg = "RAID map too small";
1035 goto bad_raid_map;
1036 }
1037
1038 if (raid_map_size > sizeof(*raid_map)) {
1039 err_msg = "RAID map too large";
1040 goto bad_raid_map;
1041 }
1042
1043 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1044 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1045 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1046 num_raid_map_entries = num_phys_disks *
1047 get_unaligned_le16(&raid_map->row_cnt);
1048
1049 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
1050 err_msg = "invalid number of map entries in RAID map";
1051 goto bad_raid_map;
1052 }
1053
1054 if (device->raid_level == SA_RAID_1) {
1055 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1056 err_msg = "invalid RAID-1 map";
1057 goto bad_raid_map;
1058 }
1059 } else if (device->raid_level == SA_RAID_ADM) {
1060 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1061 err_msg = "invalid RAID-1(ADM) map";
1062 goto bad_raid_map;
1063 }
1064 } else if ((device->raid_level == SA_RAID_5 ||
1065 device->raid_level == SA_RAID_6) &&
1066 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1067 /* RAID 50/60 */
1068 r5or6_blocks_per_row =
1069 get_unaligned_le16(&raid_map->strip_size) *
1070 get_unaligned_le16(&raid_map->data_disks_per_row);
1071 if (r5or6_blocks_per_row == 0) {
1072 err_msg = "invalid RAID-5 or RAID-6 map";
1073 goto bad_raid_map;
1074 }
1075 }
1076
1077 return 0;
1078
1079bad_raid_map:
d87d5474 1080 dev_warn(&ctrl_info->pci_dev->dev,
38a7338a
KB
1081 "logical device %08x%08x %s\n",
1082 *((u32 *)&device->scsi3addr),
1083 *((u32 *)&device->scsi3addr[4]), err_msg);
6c223761
KB
1084
1085 return -EINVAL;
1086}
1087
1088static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1089 struct pqi_scsi_dev *device)
1090{
1091 int rc;
1092 int pci_direction;
1093 struct pqi_raid_path_request request;
1094 struct raid_map *raid_map;
1095
1096 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1097 if (!raid_map)
1098 return -ENOMEM;
1099
1100 rc = pqi_build_raid_path_request(ctrl_info, &request,
1101 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1102 sizeof(*raid_map), 0, &pci_direction);
1103 if (rc)
1104 goto error;
1105
1106 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1107 NULL, NO_TIMEOUT);
1108
1109 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1110 pci_direction);
1111
1112 if (rc)
1113 goto error;
1114
1115 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1116 if (rc)
1117 goto error;
1118
1119 device->raid_map = raid_map;
1120
1121 return 0;
1122
1123error:
1124 kfree(raid_map);
1125
1126 return rc;
1127}
1128
588a63fe 1129static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1130 struct pqi_scsi_dev *device)
1131{
1132 int rc;
1133 u8 *buffer;
588a63fe 1134 u8 bypass_status;
6c223761
KB
1135
1136 buffer = kmalloc(64, GFP_KERNEL);
1137 if (!buffer)
1138 return;
1139
1140 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
588a63fe 1141 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
6c223761
KB
1142 if (rc)
1143 goto out;
1144
588a63fe
KB
1145#define RAID_BYPASS_STATUS 4
1146#define RAID_BYPASS_CONFIGURED 0x1
1147#define RAID_BYPASS_ENABLED 0x2
6c223761 1148
588a63fe
KB
1149 bypass_status = buffer[RAID_BYPASS_STATUS];
1150 device->raid_bypass_configured =
1151 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1152 if (device->raid_bypass_configured &&
1153 (bypass_status & RAID_BYPASS_ENABLED) &&
1154 pqi_get_raid_map(ctrl_info, device) == 0)
1155 device->raid_bypass_enabled = true;
6c223761
KB
1156
1157out:
1158 kfree(buffer);
1159}
1160
1161/*
1162 * Use vendor-specific VPD to determine online/offline status of a volume.
1163 */
1164
1165static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1166 struct pqi_scsi_dev *device)
1167{
1168 int rc;
1169 size_t page_length;
1170 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1171 bool volume_offline = true;
1172 u32 volume_flags;
1173 struct ciss_vpd_logical_volume_status *vpd;
1174
1175 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1176 if (!vpd)
1177 goto no_buffer;
1178
1179 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1180 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1181 if (rc)
1182 goto out;
1183
1184 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1185 volume_status) + vpd->page_length;
1186 if (page_length < sizeof(*vpd))
1187 goto out;
1188
1189 volume_status = vpd->volume_status;
1190 volume_flags = get_unaligned_be32(&vpd->flags);
1191 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1192
1193out:
1194 kfree(vpd);
1195no_buffer:
1196 device->volume_status = volume_status;
1197 device->volume_offline = volume_offline;
1198}
1199
26b390ab
KB
1200#define PQI_INQUIRY_PAGE0_RETRIES 3
1201
6c223761
KB
1202static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1203 struct pqi_scsi_dev *device)
1204{
1205 int rc;
1206 u8 *buffer;
26b390ab 1207 unsigned int retries;
6c223761
KB
1208
1209 buffer = kmalloc(64, GFP_KERNEL);
1210 if (!buffer)
1211 return -ENOMEM;
1212
1213 /* Send an inquiry to the device to see what it is. */
26b390ab
KB
1214 for (retries = 0;;) {
1215 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
1216 buffer, 64);
1217 if (rc == 0)
1218 break;
1219 if (pqi_is_logical_device(device) ||
1220 rc != PQI_CMD_STATUS_ABORTED ||
1221 ++retries > PQI_INQUIRY_PAGE0_RETRIES)
1222 goto out;
1223 }
6c223761
KB
1224
1225 scsi_sanitize_inquiry_string(&buffer[8], 8);
1226 scsi_sanitize_inquiry_string(&buffer[16], 16);
1227
1228 device->devtype = buffer[0] & 0x1f;
cbe0c7b1
KB
1229 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1230 memcpy(device->model, &buffer[16], sizeof(device->model));
6c223761
KB
1231
1232 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
bd10cf0b
KB
1233 if (device->is_external_raid_device) {
1234 device->raid_level = SA_RAID_UNKNOWN;
1235 device->volume_status = CISS_LV_OK;
1236 device->volume_offline = false;
1237 } else {
1238 pqi_get_raid_level(ctrl_info, device);
588a63fe 1239 pqi_get_raid_bypass_status(ctrl_info, device);
bd10cf0b
KB
1240 pqi_get_volume_status(ctrl_info, device);
1241 }
6c223761
KB
1242 }
1243
1244out:
1245 kfree(buffer);
1246
1247 return rc;
1248}
1249
1250static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1251 struct pqi_scsi_dev *device,
1252 struct bmic_identify_physical_device *id_phys)
1253{
1254 int rc;
1255
1256 memset(id_phys, 0, sizeof(*id_phys));
1257
1258 rc = pqi_identify_physical_device(ctrl_info, device,
1259 id_phys, sizeof(*id_phys));
1260 if (rc) {
1261 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1262 return;
1263 }
1264
1265 device->queue_depth =
1266 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1267 device->device_type = id_phys->device_type;
1268 device->active_path_index = id_phys->active_path_number;
1269 device->path_map = id_phys->redundant_path_present_map;
1270 memcpy(&device->box,
1271 &id_phys->alternate_paths_phys_box_on_port,
1272 sizeof(device->box));
1273 memcpy(&device->phys_connector,
1274 &id_phys->alternate_paths_phys_connector,
1275 sizeof(device->phys_connector));
1276 device->bay = id_phys->phys_bay_in_box;
1277}
1278
1279static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1280 struct pqi_scsi_dev *device)
1281{
1282 char *status;
1283 static const char unknown_state_str[] =
1284 "Volume is in an unknown state (%u)";
1285 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1286
1287 switch (device->volume_status) {
1288 case CISS_LV_OK:
1289 status = "Volume online";
1290 break;
1291 case CISS_LV_FAILED:
1292 status = "Volume failed";
1293 break;
1294 case CISS_LV_NOT_CONFIGURED:
1295 status = "Volume not configured";
1296 break;
1297 case CISS_LV_DEGRADED:
1298 status = "Volume degraded";
1299 break;
1300 case CISS_LV_READY_FOR_RECOVERY:
1301 status = "Volume ready for recovery operation";
1302 break;
1303 case CISS_LV_UNDERGOING_RECOVERY:
1304 status = "Volume undergoing recovery";
1305 break;
1306 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1307 status = "Wrong physical drive was replaced";
1308 break;
1309 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1310 status = "A physical drive not properly connected";
1311 break;
1312 case CISS_LV_HARDWARE_OVERHEATING:
1313 status = "Hardware is overheating";
1314 break;
1315 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1316 status = "Hardware has overheated";
1317 break;
1318 case CISS_LV_UNDERGOING_EXPANSION:
1319 status = "Volume undergoing expansion";
1320 break;
1321 case CISS_LV_NOT_AVAILABLE:
1322 status = "Volume waiting for transforming volume";
1323 break;
1324 case CISS_LV_QUEUED_FOR_EXPANSION:
1325 status = "Volume queued for expansion";
1326 break;
1327 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1328 status = "Volume disabled due to SCSI ID conflict";
1329 break;
1330 case CISS_LV_EJECTED:
1331 status = "Volume has been ejected";
1332 break;
1333 case CISS_LV_UNDERGOING_ERASE:
1334 status = "Volume undergoing background erase";
1335 break;
1336 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1337 status = "Volume ready for predictive spare rebuild";
1338 break;
1339 case CISS_LV_UNDERGOING_RPI:
1340 status = "Volume undergoing rapid parity initialization";
1341 break;
1342 case CISS_LV_PENDING_RPI:
1343 status = "Volume queued for rapid parity initialization";
1344 break;
1345 case CISS_LV_ENCRYPTED_NO_KEY:
1346 status = "Encrypted volume inaccessible - key not present";
1347 break;
1348 case CISS_LV_UNDERGOING_ENCRYPTION:
1349 status = "Volume undergoing encryption process";
1350 break;
1351 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1352 status = "Volume undergoing encryption re-keying process";
1353 break;
1354 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
d87d5474 1355 status = "Volume encrypted but encryption is disabled";
6c223761
KB
1356 break;
1357 case CISS_LV_PENDING_ENCRYPTION:
1358 status = "Volume pending migration to encrypted state";
1359 break;
1360 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1361 status = "Volume pending encryption rekeying";
1362 break;
1363 case CISS_LV_NOT_SUPPORTED:
1364 status = "Volume not supported on this controller";
1365 break;
1366 case CISS_LV_STATUS_UNAVAILABLE:
1367 status = "Volume status not available";
1368 break;
1369 default:
1370 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1371 unknown_state_str, device->volume_status);
1372 status = unknown_state_buffer;
1373 break;
1374 }
1375
1376 dev_info(&ctrl_info->pci_dev->dev,
1377 "scsi %d:%d:%d:%d %s\n",
1378 ctrl_info->scsi_host->host_no,
1379 device->bus, device->target, device->lun, status);
1380}
1381
6c223761
KB
1382static void pqi_rescan_worker(struct work_struct *work)
1383{
1384 struct pqi_ctrl_info *ctrl_info;
1385
1386 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1387 rescan_work);
1388
1389 pqi_scan_scsi_devices(ctrl_info);
1390}
1391
1392static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1393 struct pqi_scsi_dev *device)
1394{
1395 int rc;
1396
1397 if (pqi_is_logical_device(device))
1398 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1399 device->target, device->lun);
1400 else
1401 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1402
1403 return rc;
1404}
1405
1406static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1407 struct pqi_scsi_dev *device)
1408{
1409 if (pqi_is_logical_device(device))
1410 scsi_remove_device(device->sdev);
1411 else
1412 pqi_remove_sas_device(device);
1413}
1414
1415/* Assumes the SCSI device list lock is held. */
1416
1417static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1418 int bus, int target, int lun)
1419{
1420 struct pqi_scsi_dev *device;
1421
1422 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1423 scsi_device_list_entry)
1424 if (device->bus == bus && device->target == target &&
1425 device->lun == lun)
1426 return device;
1427
1428 return NULL;
1429}
1430
1431static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1432 struct pqi_scsi_dev *dev2)
1433{
1434 if (dev1->is_physical_device != dev2->is_physical_device)
1435 return false;
1436
1437 if (dev1->is_physical_device)
1438 return dev1->wwid == dev2->wwid;
1439
1440 return memcmp(dev1->volume_id, dev2->volume_id,
1441 sizeof(dev1->volume_id)) == 0;
1442}
1443
1444enum pqi_find_result {
1445 DEVICE_NOT_FOUND,
1446 DEVICE_CHANGED,
1447 DEVICE_SAME,
1448};
1449
1450static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1451 struct pqi_scsi_dev *device_to_find,
1452 struct pqi_scsi_dev **matching_device)
1453{
1454 struct pqi_scsi_dev *device;
1455
1456 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1457 scsi_device_list_entry) {
1458 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1459 device->scsi3addr)) {
1460 *matching_device = device;
1461 if (pqi_device_equal(device_to_find, device)) {
1462 if (device_to_find->volume_offline)
1463 return DEVICE_CHANGED;
1464 return DEVICE_SAME;
1465 }
1466 return DEVICE_CHANGED;
1467 }
1468 }
1469
1470 return DEVICE_NOT_FOUND;
1471}
1472
6de783f6
KB
1473#define PQI_DEV_INFO_BUFFER_LENGTH 128
1474
6c223761
KB
1475static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1476 char *action, struct pqi_scsi_dev *device)
1477{
6de783f6
KB
1478 ssize_t count;
1479 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1480
1481 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1482 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1483
1484 if (device->target_lun_valid)
1485 count += snprintf(buffer + count,
1486 PQI_DEV_INFO_BUFFER_LENGTH - count,
1487 "%d:%d",
1488 device->target,
1489 device->lun);
1490 else
1491 count += snprintf(buffer + count,
1492 PQI_DEV_INFO_BUFFER_LENGTH - count,
1493 "-:-");
1494
1495 if (pqi_is_logical_device(device))
1496 count += snprintf(buffer + count,
1497 PQI_DEV_INFO_BUFFER_LENGTH - count,
1498 " %08x%08x",
1499 *((u32 *)&device->scsi3addr),
1500 *((u32 *)&device->scsi3addr[4]));
1501 else
1502 count += snprintf(buffer + count,
1503 PQI_DEV_INFO_BUFFER_LENGTH - count,
1504 " %016llx", device->sas_address);
1505
1506 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1507 " %s %.8s %.16s ",
6c223761
KB
1508 scsi_device_type(device->devtype),
1509 device->vendor,
6de783f6
KB
1510 device->model);
1511
1512 if (pqi_is_logical_device(device)) {
1513 if (device->devtype == TYPE_DISK)
1514 count += snprintf(buffer + count,
1515 PQI_DEV_INFO_BUFFER_LENGTH - count,
1516 "SSDSmartPathCap%c En%c %-12s",
588a63fe
KB
1517 device->raid_bypass_configured ? '+' : '-',
1518 device->raid_bypass_enabled ? '+' : '-',
6de783f6
KB
1519 pqi_raid_level_to_string(device->raid_level));
1520 } else {
1521 count += snprintf(buffer + count,
1522 PQI_DEV_INFO_BUFFER_LENGTH - count,
1523 "AIO%c", device->aio_enabled ? '+' : '-');
1524 if (device->devtype == TYPE_DISK ||
1525 device->devtype == TYPE_ZBC)
1526 count += snprintf(buffer + count,
1527 PQI_DEV_INFO_BUFFER_LENGTH - count,
1528 " qd=%-6d", device->queue_depth);
1529 }
1530
1531 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
6c223761
KB
1532}
1533
1534/* Assumes the SCSI device list lock is held. */
1535
1536static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1537 struct pqi_scsi_dev *new_device)
1538{
1539 existing_device->devtype = new_device->devtype;
1540 existing_device->device_type = new_device->device_type;
1541 existing_device->bus = new_device->bus;
1542 if (new_device->target_lun_valid) {
1543 existing_device->target = new_device->target;
1544 existing_device->lun = new_device->lun;
1545 existing_device->target_lun_valid = true;
1546 }
1547
1548 /* By definition, the scsi3addr and wwid fields are already the same. */
1549
1550 existing_device->is_physical_device = new_device->is_physical_device;
bd10cf0b
KB
1551 existing_device->is_external_raid_device =
1552 new_device->is_external_raid_device;
6c223761
KB
1553 existing_device->aio_enabled = new_device->aio_enabled;
1554 memcpy(existing_device->vendor, new_device->vendor,
1555 sizeof(existing_device->vendor));
1556 memcpy(existing_device->model, new_device->model,
1557 sizeof(existing_device->model));
1558 existing_device->sas_address = new_device->sas_address;
1559 existing_device->raid_level = new_device->raid_level;
1560 existing_device->queue_depth = new_device->queue_depth;
1561 existing_device->aio_handle = new_device->aio_handle;
1562 existing_device->volume_status = new_device->volume_status;
1563 existing_device->active_path_index = new_device->active_path_index;
1564 existing_device->path_map = new_device->path_map;
1565 existing_device->bay = new_device->bay;
1566 memcpy(existing_device->box, new_device->box,
1567 sizeof(existing_device->box));
1568 memcpy(existing_device->phys_connector, new_device->phys_connector,
1569 sizeof(existing_device->phys_connector));
6c223761
KB
1570 existing_device->offload_to_mirror = 0;
1571 kfree(existing_device->raid_map);
1572 existing_device->raid_map = new_device->raid_map;
588a63fe
KB
1573 existing_device->raid_bypass_configured =
1574 new_device->raid_bypass_configured;
1575 existing_device->raid_bypass_enabled =
1576 new_device->raid_bypass_enabled;
6c223761
KB
1577
1578 /* To prevent this from being freed later. */
1579 new_device->raid_map = NULL;
1580}
1581
1582static inline void pqi_free_device(struct pqi_scsi_dev *device)
1583{
1584 if (device) {
1585 kfree(device->raid_map);
1586 kfree(device);
1587 }
1588}
1589
1590/*
1591 * Called when exposing a new device to the OS fails in order to re-adjust
1592 * our internal SCSI device list to match the SCSI ML's view.
1593 */
1594
1595static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1596 struct pqi_scsi_dev *device)
1597{
1598 unsigned long flags;
1599
1600 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1601 list_del(&device->scsi_device_list_entry);
1602 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1603
1604 /* Allow the device structure to be freed later. */
1605 device->keep_device = false;
1606}
1607
1608static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1609 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1610{
1611 int rc;
1612 unsigned int i;
1613 unsigned long flags;
1614 enum pqi_find_result find_result;
1615 struct pqi_scsi_dev *device;
1616 struct pqi_scsi_dev *next;
1617 struct pqi_scsi_dev *matching_device;
8a994a04
KB
1618 LIST_HEAD(add_list);
1619 LIST_HEAD(delete_list);
6c223761
KB
1620
1621 /*
1622 * The idea here is to do as little work as possible while holding the
1623 * spinlock. That's why we go to great pains to defer anything other
1624 * than updating the internal device list until after we release the
1625 * spinlock.
1626 */
1627
1628 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1629
1630 /* Assume that all devices in the existing list have gone away. */
1631 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1632 scsi_device_list_entry)
1633 device->device_gone = true;
1634
1635 for (i = 0; i < num_new_devices; i++) {
1636 device = new_device_list[i];
1637
1638 find_result = pqi_scsi_find_entry(ctrl_info, device,
1639 &matching_device);
1640
1641 switch (find_result) {
1642 case DEVICE_SAME:
1643 /*
1644 * The newly found device is already in the existing
1645 * device list.
1646 */
1647 device->new_device = false;
1648 matching_device->device_gone = false;
1649 pqi_scsi_update_device(matching_device, device);
1650 break;
1651 case DEVICE_NOT_FOUND:
1652 /*
1653 * The newly found device is NOT in the existing device
1654 * list.
1655 */
1656 device->new_device = true;
1657 break;
1658 case DEVICE_CHANGED:
1659 /*
1660 * The original device has gone away and we need to add
1661 * the new device.
1662 */
1663 device->new_device = true;
1664 break;
6c223761
KB
1665 }
1666 }
1667
1668 /* Process all devices that have gone away. */
1669 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1670 scsi_device_list_entry) {
1671 if (device->device_gone) {
1672 list_del(&device->scsi_device_list_entry);
1673 list_add_tail(&device->delete_list_entry, &delete_list);
1674 }
1675 }
1676
1677 /* Process all new devices. */
1678 for (i = 0; i < num_new_devices; i++) {
1679 device = new_device_list[i];
1680 if (!device->new_device)
1681 continue;
1682 if (device->volume_offline)
1683 continue;
1684 list_add_tail(&device->scsi_device_list_entry,
1685 &ctrl_info->scsi_device_list);
1686 list_add_tail(&device->add_list_entry, &add_list);
1687 /* To prevent this device structure from being freed later. */
1688 device->keep_device = true;
1689 }
1690
6c223761
KB
1691 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1692
1693 /* Remove all devices that have gone away. */
1694 list_for_each_entry_safe(device, next, &delete_list,
1695 delete_list_entry) {
6c223761
KB
1696 if (device->volume_offline) {
1697 pqi_dev_info(ctrl_info, "offline", device);
1698 pqi_show_volume_status(ctrl_info, device);
1699 } else {
1700 pqi_dev_info(ctrl_info, "removed", device);
1701 }
6de783f6
KB
1702 if (device->sdev)
1703 pqi_remove_device(ctrl_info, device);
6c223761
KB
1704 list_del(&device->delete_list_entry);
1705 pqi_free_device(device);
1706 }
1707
1708 /*
1709 * Notify the SCSI ML if the queue depth of any existing device has
1710 * changed.
1711 */
1712 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1713 scsi_device_list_entry) {
1714 if (device->sdev && device->queue_depth !=
1715 device->advertised_queue_depth) {
1716 device->advertised_queue_depth = device->queue_depth;
1717 scsi_change_queue_depth(device->sdev,
1718 device->advertised_queue_depth);
1719 }
1720 }
1721
1722 /* Expose any new devices. */
1723 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
94086f5b 1724 if (!device->sdev) {
6de783f6 1725 pqi_dev_info(ctrl_info, "added", device);
6c223761
KB
1726 rc = pqi_add_device(ctrl_info, device);
1727 if (rc) {
1728 dev_warn(&ctrl_info->pci_dev->dev,
1729 "scsi %d:%d:%d:%d addition failed, device not added\n",
1730 ctrl_info->scsi_host->host_no,
1731 device->bus, device->target,
1732 device->lun);
1733 pqi_fixup_botched_add(ctrl_info, device);
6c223761
KB
1734 }
1735 }
6c223761
KB
1736 }
1737}
1738
1739static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1740{
1741 bool is_supported = false;
1742
1743 switch (device->devtype) {
1744 case TYPE_DISK:
1745 case TYPE_ZBC:
1746 case TYPE_TAPE:
1747 case TYPE_MEDIUM_CHANGER:
1748 case TYPE_ENCLOSURE:
1749 is_supported = true;
1750 break;
1751 case TYPE_RAID:
1752 /*
1753 * Only support the HBA controller itself as a RAID
1754 * controller. If it's a RAID controller other than
376fb880
KB
1755 * the HBA itself (an external RAID controller, for
1756 * example), we don't support it.
6c223761
KB
1757 */
1758 if (pqi_is_hba_lunid(device->scsi3addr))
1759 is_supported = true;
1760 break;
1761 }
1762
1763 return is_supported;
1764}
1765
94086f5b 1766static inline bool pqi_skip_device(u8 *scsi3addr)
6c223761 1767{
94086f5b
KB
1768 /* Ignore all masked devices. */
1769 if (MASKED_DEVICE(scsi3addr))
6c223761 1770 return true;
6c223761
KB
1771
1772 return false;
1773}
1774
6c223761
KB
1775static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1776{
1777 int i;
1778 int rc;
8a994a04 1779 LIST_HEAD(new_device_list_head);
6c223761
KB
1780 struct report_phys_lun_extended *physdev_list = NULL;
1781 struct report_log_lun_extended *logdev_list = NULL;
1782 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1783 struct report_log_lun_extended_entry *log_lun_ext_entry;
1784 struct bmic_identify_physical_device *id_phys = NULL;
1785 u32 num_physicals;
1786 u32 num_logicals;
1787 struct pqi_scsi_dev **new_device_list = NULL;
1788 struct pqi_scsi_dev *device;
1789 struct pqi_scsi_dev *next;
1790 unsigned int num_new_devices;
1791 unsigned int num_valid_devices;
1792 bool is_physical_device;
1793 u8 *scsi3addr;
1794 static char *out_of_memory_msg =
6de783f6 1795 "failed to allocate memory, device discovery stopped";
6c223761 1796
6c223761
KB
1797 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1798 if (rc)
1799 goto out;
1800
1801 if (physdev_list)
1802 num_physicals =
1803 get_unaligned_be32(&physdev_list->header.list_length)
1804 / sizeof(physdev_list->lun_entries[0]);
1805 else
1806 num_physicals = 0;
1807
1808 if (logdev_list)
1809 num_logicals =
1810 get_unaligned_be32(&logdev_list->header.list_length)
1811 / sizeof(logdev_list->lun_entries[0]);
1812 else
1813 num_logicals = 0;
1814
1815 if (num_physicals) {
1816 /*
1817 * We need this buffer for calls to pqi_get_physical_disk_info()
1818 * below. We allocate it here instead of inside
1819 * pqi_get_physical_disk_info() because it's a fairly large
1820 * buffer.
1821 */
1822 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1823 if (!id_phys) {
1824 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1825 out_of_memory_msg);
1826 rc = -ENOMEM;
1827 goto out;
1828 }
1829 }
1830
1831 num_new_devices = num_physicals + num_logicals;
1832
6da2ec56
KC
1833 new_device_list = kmalloc_array(num_new_devices,
1834 sizeof(*new_device_list),
1835 GFP_KERNEL);
6c223761
KB
1836 if (!new_device_list) {
1837 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1838 rc = -ENOMEM;
1839 goto out;
1840 }
1841
1842 for (i = 0; i < num_new_devices; i++) {
1843 device = kzalloc(sizeof(*device), GFP_KERNEL);
1844 if (!device) {
1845 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1846 out_of_memory_msg);
1847 rc = -ENOMEM;
1848 goto out;
1849 }
1850 list_add_tail(&device->new_device_list_entry,
1851 &new_device_list_head);
1852 }
1853
1854 device = NULL;
1855 num_valid_devices = 0;
1856
1857 for (i = 0; i < num_new_devices; i++) {
1858
1859 if (i < num_physicals) {
1860 is_physical_device = true;
1861 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1862 log_lun_ext_entry = NULL;
1863 scsi3addr = phys_lun_ext_entry->lunid;
1864 } else {
1865 is_physical_device = false;
1866 phys_lun_ext_entry = NULL;
1867 log_lun_ext_entry =
1868 &logdev_list->lun_entries[i - num_physicals];
1869 scsi3addr = log_lun_ext_entry->lunid;
1870 }
1871
94086f5b 1872 if (is_physical_device && pqi_skip_device(scsi3addr))
6c223761
KB
1873 continue;
1874
1875 if (device)
1876 device = list_next_entry(device, new_device_list_entry);
1877 else
1878 device = list_first_entry(&new_device_list_head,
1879 struct pqi_scsi_dev, new_device_list_entry);
1880
1881 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1882 device->is_physical_device = is_physical_device;
bd10cf0b
KB
1883 if (!is_physical_device)
1884 device->is_external_raid_device =
1885 pqi_is_external_raid_addr(scsi3addr);
6c223761
KB
1886
1887 /* Gather information about the device. */
1888 rc = pqi_get_device_info(ctrl_info, device);
1889 if (rc == -ENOMEM) {
1890 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1891 out_of_memory_msg);
1892 goto out;
1893 }
1894 if (rc) {
6de783f6
KB
1895 if (device->is_physical_device)
1896 dev_warn(&ctrl_info->pci_dev->dev,
1897 "obtaining device info failed, skipping physical device %016llx\n",
1898 get_unaligned_be64(
1899 &phys_lun_ext_entry->wwid));
1900 else
1901 dev_warn(&ctrl_info->pci_dev->dev,
1902 "obtaining device info failed, skipping logical device %08x%08x\n",
1903 *((u32 *)&device->scsi3addr),
1904 *((u32 *)&device->scsi3addr[4]));
6c223761
KB
1905 rc = 0;
1906 continue;
1907 }
1908
1909 if (!pqi_is_supported_device(device))
1910 continue;
1911
1912 pqi_assign_bus_target_lun(device);
1913
6c223761
KB
1914 if (device->is_physical_device) {
1915 device->wwid = phys_lun_ext_entry->wwid;
1916 if ((phys_lun_ext_entry->device_flags &
1917 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1918 phys_lun_ext_entry->aio_handle)
1919 device->aio_enabled = true;
1920 } else {
1921 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1922 sizeof(device->volume_id));
1923 }
1924
1925 switch (device->devtype) {
1926 case TYPE_DISK:
1927 case TYPE_ZBC:
1928 case TYPE_ENCLOSURE:
1929 if (device->is_physical_device) {
1930 device->sas_address =
1931 get_unaligned_be64(&device->wwid);
1932 if (device->devtype == TYPE_DISK ||
1933 device->devtype == TYPE_ZBC) {
1934 device->aio_handle =
1935 phys_lun_ext_entry->aio_handle;
1936 pqi_get_physical_disk_info(ctrl_info,
1937 device, id_phys);
1938 }
1939 }
1940 break;
1941 }
1942
1943 new_device_list[num_valid_devices++] = device;
1944 }
1945
1946 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1947
1948out:
1949 list_for_each_entry_safe(device, next, &new_device_list_head,
1950 new_device_list_entry) {
1951 if (device->keep_device)
1952 continue;
1953 list_del(&device->new_device_list_entry);
1954 pqi_free_device(device);
1955 }
1956
1957 kfree(new_device_list);
1958 kfree(physdev_list);
1959 kfree(logdev_list);
1960 kfree(id_phys);
1961
1962 return rc;
1963}
1964
1965static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1966{
1967 unsigned long flags;
1968 struct pqi_scsi_dev *device;
6c223761 1969
a37ef745
KB
1970 while (1) {
1971 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1972
1973 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1974 struct pqi_scsi_dev, scsi_device_list_entry);
1975 if (device)
1976 list_del(&device->scsi_device_list_entry);
1977
1978 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1979 flags);
1980
1981 if (!device)
1982 break;
6c223761 1983
6c223761
KB
1984 if (device->sdev)
1985 pqi_remove_device(ctrl_info, device);
6c223761
KB
1986 pqi_free_device(device);
1987 }
6c223761
KB
1988}
1989
1990static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1991{
1992 int rc;
1993
1994 if (pqi_ctrl_offline(ctrl_info))
1995 return -ENXIO;
1996
1997 mutex_lock(&ctrl_info->scan_mutex);
1998
1999 rc = pqi_update_scsi_devices(ctrl_info);
2000 if (rc)
5f310425 2001 pqi_schedule_rescan_worker_delayed(ctrl_info);
6c223761
KB
2002
2003 mutex_unlock(&ctrl_info->scan_mutex);
2004
2005 return rc;
2006}
2007
2008static void pqi_scan_start(struct Scsi_Host *shost)
2009{
2010 pqi_scan_scsi_devices(shost_to_hba(shost));
2011}
2012
2013/* Returns TRUE if scan is finished. */
2014
2015static int pqi_scan_finished(struct Scsi_Host *shost,
2016 unsigned long elapsed_time)
2017{
2018 struct pqi_ctrl_info *ctrl_info;
2019
2020 ctrl_info = shost_priv(shost);
2021
2022 return !mutex_is_locked(&ctrl_info->scan_mutex);
2023}
2024
061ef06a
KB
2025static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2026{
2027 mutex_lock(&ctrl_info->scan_mutex);
2028 mutex_unlock(&ctrl_info->scan_mutex);
2029}
2030
2031static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2032{
2033 mutex_lock(&ctrl_info->lun_reset_mutex);
2034 mutex_unlock(&ctrl_info->lun_reset_mutex);
2035}
2036
6c223761
KB
2037static inline void pqi_set_encryption_info(
2038 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2039 u64 first_block)
2040{
2041 u32 volume_blk_size;
2042
2043 /*
2044 * Set the encryption tweak values based on logical block address.
2045 * If the block size is 512, the tweak value is equal to the LBA.
2046 * For other block sizes, tweak value is (LBA * block size) / 512.
2047 */
2048 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2049 if (volume_blk_size != 512)
2050 first_block = (first_block * volume_blk_size) / 512;
2051
2052 encryption_info->data_encryption_key_index =
2053 get_unaligned_le16(&raid_map->data_encryption_key_index);
2054 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2055 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2056}
2057
2058/*
588a63fe 2059 * Attempt to perform RAID bypass mapping for a logical volume I/O.
6c223761
KB
2060 */
2061
2062#define PQI_RAID_BYPASS_INELIGIBLE 1
2063
2064static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2065 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2066 struct pqi_queue_group *queue_group)
2067{
2068 struct raid_map *raid_map;
2069 bool is_write = false;
2070 u32 map_index;
2071 u64 first_block;
2072 u64 last_block;
2073 u32 block_cnt;
2074 u32 blocks_per_row;
2075 u64 first_row;
2076 u64 last_row;
2077 u32 first_row_offset;
2078 u32 last_row_offset;
2079 u32 first_column;
2080 u32 last_column;
2081 u64 r0_first_row;
2082 u64 r0_last_row;
2083 u32 r5or6_blocks_per_row;
2084 u64 r5or6_first_row;
2085 u64 r5or6_last_row;
2086 u32 r5or6_first_row_offset;
2087 u32 r5or6_last_row_offset;
2088 u32 r5or6_first_column;
2089 u32 r5or6_last_column;
2090 u16 data_disks_per_row;
2091 u32 total_disks_per_row;
2092 u16 layout_map_count;
2093 u32 stripesize;
2094 u16 strip_size;
2095 u32 first_group;
2096 u32 last_group;
2097 u32 current_group;
2098 u32 map_row;
2099 u32 aio_handle;
2100 u64 disk_block;
2101 u32 disk_block_cnt;
2102 u8 cdb[16];
2103 u8 cdb_length;
2104 int offload_to_mirror;
2105 struct pqi_encryption_info *encryption_info_ptr;
2106 struct pqi_encryption_info encryption_info;
2107#if BITS_PER_LONG == 32
2108 u64 tmpdiv;
2109#endif
2110
2111 /* Check for valid opcode, get LBA and block count. */
2112 switch (scmd->cmnd[0]) {
2113 case WRITE_6:
2114 is_write = true;
2115 /* fall through */
2116 case READ_6:
e018ef57
B
2117 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2118 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
6c223761
KB
2119 block_cnt = (u32)scmd->cmnd[4];
2120 if (block_cnt == 0)
2121 block_cnt = 256;
2122 break;
2123 case WRITE_10:
2124 is_write = true;
2125 /* fall through */
2126 case READ_10:
2127 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2128 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2129 break;
2130 case WRITE_12:
2131 is_write = true;
2132 /* fall through */
2133 case READ_12:
2134 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2135 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2136 break;
2137 case WRITE_16:
2138 is_write = true;
2139 /* fall through */
2140 case READ_16:
2141 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2142 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2143 break;
2144 default:
2145 /* Process via normal I/O path. */
2146 return PQI_RAID_BYPASS_INELIGIBLE;
2147 }
2148
2149 /* Check for write to non-RAID-0. */
2150 if (is_write && device->raid_level != SA_RAID_0)
2151 return PQI_RAID_BYPASS_INELIGIBLE;
2152
2153 if (unlikely(block_cnt == 0))
2154 return PQI_RAID_BYPASS_INELIGIBLE;
2155
2156 last_block = first_block + block_cnt - 1;
2157 raid_map = device->raid_map;
2158
2159 /* Check for invalid block or wraparound. */
2160 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2161 last_block < first_block)
2162 return PQI_RAID_BYPASS_INELIGIBLE;
2163
2164 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2165 strip_size = get_unaligned_le16(&raid_map->strip_size);
2166 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2167
2168 /* Calculate stripe information for the request. */
2169 blocks_per_row = data_disks_per_row * strip_size;
2170#if BITS_PER_LONG == 32
2171 tmpdiv = first_block;
2172 do_div(tmpdiv, blocks_per_row);
2173 first_row = tmpdiv;
2174 tmpdiv = last_block;
2175 do_div(tmpdiv, blocks_per_row);
2176 last_row = tmpdiv;
2177 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2178 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2179 tmpdiv = first_row_offset;
2180 do_div(tmpdiv, strip_size);
2181 first_column = tmpdiv;
2182 tmpdiv = last_row_offset;
2183 do_div(tmpdiv, strip_size);
2184 last_column = tmpdiv;
2185#else
2186 first_row = first_block / blocks_per_row;
2187 last_row = last_block / blocks_per_row;
2188 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2189 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2190 first_column = first_row_offset / strip_size;
2191 last_column = last_row_offset / strip_size;
2192#endif
2193
2194 /* If this isn't a single row/column then give to the controller. */
2195 if (first_row != last_row || first_column != last_column)
2196 return PQI_RAID_BYPASS_INELIGIBLE;
2197
2198 /* Proceeding with driver mapping. */
2199 total_disks_per_row = data_disks_per_row +
2200 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2201 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2202 get_unaligned_le16(&raid_map->row_cnt);
2203 map_index = (map_row * total_disks_per_row) + first_column;
2204
2205 /* RAID 1 */
2206 if (device->raid_level == SA_RAID_1) {
2207 if (device->offload_to_mirror)
2208 map_index += data_disks_per_row;
2209 device->offload_to_mirror = !device->offload_to_mirror;
2210 } else if (device->raid_level == SA_RAID_ADM) {
2211 /* RAID ADM */
2212 /*
2213 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2214 * divisible by 3.
2215 */
2216 offload_to_mirror = device->offload_to_mirror;
2217 if (offload_to_mirror == 0) {
2218 /* use physical disk in the first mirrored group. */
2219 map_index %= data_disks_per_row;
2220 } else {
2221 do {
2222 /*
2223 * Determine mirror group that map_index
2224 * indicates.
2225 */
2226 current_group = map_index / data_disks_per_row;
2227
2228 if (offload_to_mirror != current_group) {
2229 if (current_group <
2230 layout_map_count - 1) {
2231 /*
2232 * Select raid index from
2233 * next group.
2234 */
2235 map_index += data_disks_per_row;
2236 current_group++;
2237 } else {
2238 /*
2239 * Select raid index from first
2240 * group.
2241 */
2242 map_index %= data_disks_per_row;
2243 current_group = 0;
2244 }
2245 }
2246 } while (offload_to_mirror != current_group);
2247 }
2248
2249 /* Set mirror group to use next time. */
2250 offload_to_mirror =
2251 (offload_to_mirror >= layout_map_count - 1) ?
2252 0 : offload_to_mirror + 1;
2253 WARN_ON(offload_to_mirror >= layout_map_count);
2254 device->offload_to_mirror = offload_to_mirror;
2255 /*
2256 * Avoid direct use of device->offload_to_mirror within this
2257 * function since multiple threads might simultaneously
2258 * increment it beyond the range of device->layout_map_count -1.
2259 */
2260 } else if ((device->raid_level == SA_RAID_5 ||
2261 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2262 /* RAID 50/60 */
2263 /* Verify first and last block are in same RAID group */
2264 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2265 stripesize = r5or6_blocks_per_row * layout_map_count;
2266#if BITS_PER_LONG == 32
2267 tmpdiv = first_block;
2268 first_group = do_div(tmpdiv, stripesize);
2269 tmpdiv = first_group;
2270 do_div(tmpdiv, r5or6_blocks_per_row);
2271 first_group = tmpdiv;
2272 tmpdiv = last_block;
2273 last_group = do_div(tmpdiv, stripesize);
2274 tmpdiv = last_group;
2275 do_div(tmpdiv, r5or6_blocks_per_row);
2276 last_group = tmpdiv;
2277#else
2278 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2279 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2280#endif
2281 if (first_group != last_group)
2282 return PQI_RAID_BYPASS_INELIGIBLE;
2283
2284 /* Verify request is in a single row of RAID 5/6 */
2285#if BITS_PER_LONG == 32
2286 tmpdiv = first_block;
2287 do_div(tmpdiv, stripesize);
2288 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2289 tmpdiv = last_block;
2290 do_div(tmpdiv, stripesize);
2291 r5or6_last_row = r0_last_row = tmpdiv;
2292#else
2293 first_row = r5or6_first_row = r0_first_row =
2294 first_block / stripesize;
2295 r5or6_last_row = r0_last_row = last_block / stripesize;
2296#endif
2297 if (r5or6_first_row != r5or6_last_row)
2298 return PQI_RAID_BYPASS_INELIGIBLE;
2299
2300 /* Verify request is in a single column */
2301#if BITS_PER_LONG == 32
2302 tmpdiv = first_block;
2303 first_row_offset = do_div(tmpdiv, stripesize);
2304 tmpdiv = first_row_offset;
2305 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2306 r5or6_first_row_offset = first_row_offset;
2307 tmpdiv = last_block;
2308 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2309 tmpdiv = r5or6_last_row_offset;
2310 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2311 tmpdiv = r5or6_first_row_offset;
2312 do_div(tmpdiv, strip_size);
2313 first_column = r5or6_first_column = tmpdiv;
2314 tmpdiv = r5or6_last_row_offset;
2315 do_div(tmpdiv, strip_size);
2316 r5or6_last_column = tmpdiv;
2317#else
2318 first_row_offset = r5or6_first_row_offset =
2319 (u32)((first_block % stripesize) %
2320 r5or6_blocks_per_row);
2321
2322 r5or6_last_row_offset =
2323 (u32)((last_block % stripesize) %
2324 r5or6_blocks_per_row);
2325
2326 first_column = r5or6_first_row_offset / strip_size;
2327 r5or6_first_column = first_column;
2328 r5or6_last_column = r5or6_last_row_offset / strip_size;
2329#endif
2330 if (r5or6_first_column != r5or6_last_column)
2331 return PQI_RAID_BYPASS_INELIGIBLE;
2332
2333 /* Request is eligible */
2334 map_row =
2335 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2336 get_unaligned_le16(&raid_map->row_cnt);
2337
2338 map_index = (first_group *
2339 (get_unaligned_le16(&raid_map->row_cnt) *
2340 total_disks_per_row)) +
2341 (map_row * total_disks_per_row) + first_column;
2342 }
2343
2344 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2345 return PQI_RAID_BYPASS_INELIGIBLE;
2346
2347 aio_handle = raid_map->disk_data[map_index].aio_handle;
2348 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2349 first_row * strip_size +
2350 (first_row_offset - first_column * strip_size);
2351 disk_block_cnt = block_cnt;
2352
2353 /* Handle differing logical/physical block sizes. */
2354 if (raid_map->phys_blk_shift) {
2355 disk_block <<= raid_map->phys_blk_shift;
2356 disk_block_cnt <<= raid_map->phys_blk_shift;
2357 }
2358
2359 if (unlikely(disk_block_cnt > 0xffff))
2360 return PQI_RAID_BYPASS_INELIGIBLE;
2361
2362 /* Build the new CDB for the physical disk I/O. */
2363 if (disk_block > 0xffffffff) {
2364 cdb[0] = is_write ? WRITE_16 : READ_16;
2365 cdb[1] = 0;
2366 put_unaligned_be64(disk_block, &cdb[2]);
2367 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2368 cdb[14] = 0;
2369 cdb[15] = 0;
2370 cdb_length = 16;
2371 } else {
2372 cdb[0] = is_write ? WRITE_10 : READ_10;
2373 cdb[1] = 0;
2374 put_unaligned_be32((u32)disk_block, &cdb[2]);
2375 cdb[6] = 0;
2376 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2377 cdb[9] = 0;
2378 cdb_length = 10;
2379 }
2380
2381 if (get_unaligned_le16(&raid_map->flags) &
2382 RAID_MAP_ENCRYPTION_ENABLED) {
2383 pqi_set_encryption_info(&encryption_info, raid_map,
2384 first_block);
2385 encryption_info_ptr = &encryption_info;
2386 } else {
2387 encryption_info_ptr = NULL;
2388 }
2389
2390 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
376fb880 2391 cdb, cdb_length, queue_group, encryption_info_ptr, true);
6c223761
KB
2392}
2393
2394#define PQI_STATUS_IDLE 0x0
2395
2396#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2397#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2398
2399#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2400#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2401#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2402#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2403#define PQI_DEVICE_STATE_ERROR 0x4
2404
2405#define PQI_MODE_READY_TIMEOUT_SECS 30
2406#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2407
2408static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2409{
2410 struct pqi_device_registers __iomem *pqi_registers;
2411 unsigned long timeout;
2412 u64 signature;
2413 u8 status;
2414
2415 pqi_registers = ctrl_info->pqi_registers;
2416 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2417
2418 while (1) {
2419 signature = readq(&pqi_registers->signature);
2420 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2421 sizeof(signature)) == 0)
2422 break;
2423 if (time_after(jiffies, timeout)) {
2424 dev_err(&ctrl_info->pci_dev->dev,
2425 "timed out waiting for PQI signature\n");
2426 return -ETIMEDOUT;
2427 }
2428 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2429 }
2430
2431 while (1) {
2432 status = readb(&pqi_registers->function_and_status_code);
2433 if (status == PQI_STATUS_IDLE)
2434 break;
2435 if (time_after(jiffies, timeout)) {
2436 dev_err(&ctrl_info->pci_dev->dev,
2437 "timed out waiting for PQI IDLE\n");
2438 return -ETIMEDOUT;
2439 }
2440 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2441 }
2442
2443 while (1) {
2444 if (readl(&pqi_registers->device_status) ==
2445 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2446 break;
2447 if (time_after(jiffies, timeout)) {
2448 dev_err(&ctrl_info->pci_dev->dev,
2449 "timed out waiting for PQI all registers ready\n");
2450 return -ETIMEDOUT;
2451 }
2452 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2453 }
2454
2455 return 0;
2456}
2457
2458static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2459{
2460 struct pqi_scsi_dev *device;
2461
2462 device = io_request->scmd->device->hostdata;
588a63fe 2463 device->raid_bypass_enabled = false;
376fb880 2464 device->aio_enabled = false;
6c223761
KB
2465}
2466
d87d5474 2467static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
6c223761
KB
2468{
2469 struct pqi_ctrl_info *ctrl_info;
e58081a7 2470 struct pqi_scsi_dev *device;
6c223761 2471
03b288cf
KB
2472 device = sdev->hostdata;
2473 if (device->device_offline)
2474 return;
2475
2476 device->device_offline = true;
2477 scsi_device_set_state(sdev, SDEV_OFFLINE);
2478 ctrl_info = shost_to_hba(sdev->host);
2479 pqi_schedule_rescan_worker(ctrl_info);
2480 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
2481 path, ctrl_info->scsi_host->host_no, device->bus,
2482 device->target, device->lun);
6c223761
KB
2483}
2484
2485static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2486{
2487 u8 scsi_status;
2488 u8 host_byte;
2489 struct scsi_cmnd *scmd;
2490 struct pqi_raid_error_info *error_info;
2491 size_t sense_data_length;
2492 int residual_count;
2493 int xfer_count;
2494 struct scsi_sense_hdr sshdr;
2495
2496 scmd = io_request->scmd;
2497 if (!scmd)
2498 return;
2499
2500 error_info = io_request->error_info;
2501 scsi_status = error_info->status;
2502 host_byte = DID_OK;
2503
f5b63206
KB
2504 switch (error_info->data_out_result) {
2505 case PQI_DATA_IN_OUT_GOOD:
2506 break;
2507 case PQI_DATA_IN_OUT_UNDERFLOW:
6c223761
KB
2508 xfer_count =
2509 get_unaligned_le32(&error_info->data_out_transferred);
2510 residual_count = scsi_bufflen(scmd) - xfer_count;
2511 scsi_set_resid(scmd, residual_count);
2512 if (xfer_count < scmd->underflow)
2513 host_byte = DID_SOFT_ERROR;
f5b63206
KB
2514 break;
2515 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2516 case PQI_DATA_IN_OUT_ABORTED:
2517 host_byte = DID_ABORT;
2518 break;
2519 case PQI_DATA_IN_OUT_TIMEOUT:
2520 host_byte = DID_TIME_OUT;
2521 break;
2522 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2523 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2524 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2525 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2526 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2527 case PQI_DATA_IN_OUT_ERROR:
2528 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2529 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2530 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2531 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2532 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2533 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2534 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2535 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2536 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2537 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2538 default:
2539 host_byte = DID_ERROR;
2540 break;
6c223761
KB
2541 }
2542
2543 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2544 if (sense_data_length == 0)
2545 sense_data_length =
2546 get_unaligned_le16(&error_info->response_data_length);
2547 if (sense_data_length) {
2548 if (sense_data_length > sizeof(error_info->data))
2549 sense_data_length = sizeof(error_info->data);
2550
2551 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2552 scsi_normalize_sense(error_info->data,
2553 sense_data_length, &sshdr) &&
2554 sshdr.sense_key == HARDWARE_ERROR &&
2555 sshdr.asc == 0x3e &&
2556 sshdr.ascq == 0x1) {
d87d5474 2557 pqi_take_device_offline(scmd->device, "RAID");
6c223761
KB
2558 host_byte = DID_NO_CONNECT;
2559 }
2560
2561 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2562 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2563 memcpy(scmd->sense_buffer, error_info->data,
2564 sense_data_length);
2565 }
2566
2567 scmd->result = scsi_status;
2568 set_host_byte(scmd, host_byte);
2569}
2570
2571static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2572{
2573 u8 scsi_status;
2574 u8 host_byte;
2575 struct scsi_cmnd *scmd;
2576 struct pqi_aio_error_info *error_info;
2577 size_t sense_data_length;
2578 int residual_count;
2579 int xfer_count;
2580 bool device_offline;
2581
2582 scmd = io_request->scmd;
2583 error_info = io_request->error_info;
2584 host_byte = DID_OK;
2585 sense_data_length = 0;
2586 device_offline = false;
2587
2588 switch (error_info->service_response) {
2589 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2590 scsi_status = error_info->status;
2591 break;
2592 case PQI_AIO_SERV_RESPONSE_FAILURE:
2593 switch (error_info->status) {
2594 case PQI_AIO_STATUS_IO_ABORTED:
2595 scsi_status = SAM_STAT_TASK_ABORTED;
2596 break;
2597 case PQI_AIO_STATUS_UNDERRUN:
2598 scsi_status = SAM_STAT_GOOD;
2599 residual_count = get_unaligned_le32(
2600 &error_info->residual_count);
2601 scsi_set_resid(scmd, residual_count);
2602 xfer_count = scsi_bufflen(scmd) - residual_count;
2603 if (xfer_count < scmd->underflow)
2604 host_byte = DID_SOFT_ERROR;
2605 break;
2606 case PQI_AIO_STATUS_OVERRUN:
2607 scsi_status = SAM_STAT_GOOD;
2608 break;
2609 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2610 pqi_aio_path_disabled(io_request);
2611 scsi_status = SAM_STAT_GOOD;
2612 io_request->status = -EAGAIN;
2613 break;
2614 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2615 case PQI_AIO_STATUS_INVALID_DEVICE:
376fb880
KB
2616 if (!io_request->raid_bypass) {
2617 device_offline = true;
2618 pqi_take_device_offline(scmd->device, "AIO");
2619 host_byte = DID_NO_CONNECT;
2620 }
6c223761
KB
2621 scsi_status = SAM_STAT_CHECK_CONDITION;
2622 break;
2623 case PQI_AIO_STATUS_IO_ERROR:
2624 default:
2625 scsi_status = SAM_STAT_CHECK_CONDITION;
2626 break;
2627 }
2628 break;
2629 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2630 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2631 scsi_status = SAM_STAT_GOOD;
2632 break;
2633 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2634 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2635 default:
2636 scsi_status = SAM_STAT_CHECK_CONDITION;
2637 break;
2638 }
2639
2640 if (error_info->data_present) {
2641 sense_data_length =
2642 get_unaligned_le16(&error_info->data_length);
2643 if (sense_data_length) {
2644 if (sense_data_length > sizeof(error_info->data))
2645 sense_data_length = sizeof(error_info->data);
2646 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2647 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2648 memcpy(scmd->sense_buffer, error_info->data,
2649 sense_data_length);
2650 }
2651 }
2652
2653 if (device_offline && sense_data_length == 0)
2654 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2655 0x3e, 0x1);
2656
2657 scmd->result = scsi_status;
2658 set_host_byte(scmd, host_byte);
2659}
2660
2661static void pqi_process_io_error(unsigned int iu_type,
2662 struct pqi_io_request *io_request)
2663{
2664 switch (iu_type) {
2665 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2666 pqi_process_raid_io_error(io_request);
2667 break;
2668 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2669 pqi_process_aio_io_error(io_request);
2670 break;
2671 }
2672}
2673
2674static int pqi_interpret_task_management_response(
2675 struct pqi_task_management_response *response)
2676{
2677 int rc;
2678
2679 switch (response->response_code) {
b17f0486
KB
2680 case SOP_TMF_COMPLETE:
2681 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
2682 rc = 0;
2683 break;
2684 default:
2685 rc = -EIO;
2686 break;
2687 }
2688
2689 return rc;
2690}
2691
2692static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2693 struct pqi_queue_group *queue_group)
2694{
2695 unsigned int num_responses;
2696 pqi_index_t oq_pi;
2697 pqi_index_t oq_ci;
2698 struct pqi_io_request *io_request;
2699 struct pqi_io_response *response;
2700 u16 request_id;
2701
2702 num_responses = 0;
2703 oq_ci = queue_group->oq_ci_copy;
2704
2705 while (1) {
dac12fbc 2706 oq_pi = readl(queue_group->oq_pi);
6c223761
KB
2707 if (oq_pi == oq_ci)
2708 break;
2709
2710 num_responses++;
2711 response = queue_group->oq_element_array +
2712 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2713
2714 request_id = get_unaligned_le16(&response->request_id);
2715 WARN_ON(request_id >= ctrl_info->max_io_slots);
2716
2717 io_request = &ctrl_info->io_request_pool[request_id];
2718 WARN_ON(atomic_read(&io_request->refcount) == 0);
2719
2720 switch (response->header.iu_type) {
2721 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2722 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2723 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2724 break;
2725 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2726 io_request->status =
2727 pqi_interpret_task_management_response(
2728 (void *)response);
2729 break;
2730 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2731 pqi_aio_path_disabled(io_request);
2732 io_request->status = -EAGAIN;
2733 break;
2734 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2735 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2736 io_request->error_info = ctrl_info->error_buffer +
2737 (get_unaligned_le16(&response->error_index) *
2738 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2739 pqi_process_io_error(response->header.iu_type,
2740 io_request);
2741 break;
2742 default:
2743 dev_err(&ctrl_info->pci_dev->dev,
2744 "unexpected IU type: 0x%x\n",
2745 response->header.iu_type);
6c223761
KB
2746 break;
2747 }
2748
2749 io_request->io_complete_callback(io_request,
2750 io_request->context);
2751
2752 /*
2753 * Note that the I/O request structure CANNOT BE TOUCHED after
2754 * returning from the I/O completion callback!
2755 */
2756
2757 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2758 }
2759
2760 if (num_responses) {
2761 queue_group->oq_ci_copy = oq_ci;
2762 writel(oq_ci, queue_group->oq_ci);
2763 }
2764
2765 return num_responses;
2766}
2767
2768static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 2769 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
2770{
2771 unsigned int num_elements_used;
2772
2773 if (pi >= ci)
2774 num_elements_used = pi - ci;
2775 else
2776 num_elements_used = elements_in_queue - ci + pi;
2777
2778 return elements_in_queue - num_elements_used - 1;
2779}
2780
98f87667 2781static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
2782 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2783{
2784 pqi_index_t iq_pi;
2785 pqi_index_t iq_ci;
2786 unsigned long flags;
2787 void *next_element;
6c223761
KB
2788 struct pqi_queue_group *queue_group;
2789
2790 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2791 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2792
6c223761
KB
2793 while (1) {
2794 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2795
2796 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
dac12fbc 2797 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
6c223761
KB
2798
2799 if (pqi_num_elements_free(iq_pi, iq_ci,
2800 ctrl_info->num_elements_per_iq))
2801 break;
2802
2803 spin_unlock_irqrestore(
2804 &queue_group->submit_lock[RAID_PATH], flags);
2805
98f87667 2806 if (pqi_ctrl_offline(ctrl_info))
6c223761 2807 return;
6c223761
KB
2808 }
2809
2810 next_element = queue_group->iq_element_array[RAID_PATH] +
2811 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2812
2813 memcpy(next_element, iu, iu_length);
2814
2815 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
6c223761
KB
2816 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2817
2818 /*
2819 * This write notifies the controller that an IU is available to be
2820 * processed.
2821 */
2822 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2823
2824 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
2825}
2826
2827static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2828 struct pqi_event *event)
2829{
2830 struct pqi_event_acknowledge_request request;
2831
2832 memset(&request, 0, sizeof(request));
2833
2834 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2835 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2836 &request.header.iu_length);
2837 request.event_type = event->event_type;
2838 request.event_id = event->event_id;
2839 request.additional_event_id = event->additional_event_id;
2840
98f87667 2841 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
6c223761
KB
2842}
2843
2844static void pqi_event_worker(struct work_struct *work)
2845{
2846 unsigned int i;
2847 struct pqi_ctrl_info *ctrl_info;
6a50d6ad 2848 struct pqi_event *event;
6c223761
KB
2849
2850 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2851
7561a7e4
KB
2852 pqi_ctrl_busy(ctrl_info);
2853 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
5f310425
KB
2854 if (pqi_ctrl_offline(ctrl_info))
2855 goto out;
2856
2857 pqi_schedule_rescan_worker_delayed(ctrl_info);
7561a7e4 2858
6a50d6ad 2859 event = ctrl_info->events;
6c223761 2860 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
6a50d6ad
KB
2861 if (event->pending) {
2862 event->pending = false;
2863 pqi_acknowledge_event(ctrl_info, event);
6c223761 2864 }
6a50d6ad 2865 event++;
6c223761
KB
2866 }
2867
5f310425 2868out:
7561a7e4 2869 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
2870}
2871
98f87667 2872#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
6c223761 2873
74a0f573 2874static void pqi_heartbeat_timer_handler(struct timer_list *t)
6c223761
KB
2875{
2876 int num_interrupts;
98f87667 2877 u32 heartbeat_count;
74a0f573
KC
2878 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
2879 heartbeat_timer);
6c223761 2880
98f87667
KB
2881 pqi_check_ctrl_health(ctrl_info);
2882 if (pqi_ctrl_offline(ctrl_info))
061ef06a
KB
2883 return;
2884
6c223761 2885 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
98f87667 2886 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
6c223761
KB
2887
2888 if (num_interrupts == ctrl_info->previous_num_interrupts) {
98f87667
KB
2889 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
2890 dev_err(&ctrl_info->pci_dev->dev,
2891 "no heartbeat detected - last heartbeat count: %u\n",
2892 heartbeat_count);
6c223761
KB
2893 pqi_take_ctrl_offline(ctrl_info);
2894 return;
2895 }
6c223761 2896 } else {
98f87667 2897 ctrl_info->previous_num_interrupts = num_interrupts;
6c223761
KB
2898 }
2899
98f87667 2900 ctrl_info->previous_heartbeat_count = heartbeat_count;
6c223761
KB
2901 mod_timer(&ctrl_info->heartbeat_timer,
2902 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2903}
2904
2905static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2906{
98f87667
KB
2907 if (!ctrl_info->heartbeat_counter)
2908 return;
2909
6c223761
KB
2910 ctrl_info->previous_num_interrupts =
2911 atomic_read(&ctrl_info->num_interrupts);
98f87667
KB
2912 ctrl_info->previous_heartbeat_count =
2913 pqi_read_heartbeat_counter(ctrl_info);
6c223761 2914
6c223761
KB
2915 ctrl_info->heartbeat_timer.expires =
2916 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
061ef06a 2917 add_timer(&ctrl_info->heartbeat_timer);
6c223761
KB
2918}
2919
2920static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2921{
98f87667 2922 del_timer_sync(&ctrl_info->heartbeat_timer);
6c223761
KB
2923}
2924
6a50d6ad 2925static inline int pqi_event_type_to_event_index(unsigned int event_type)
6c223761
KB
2926{
2927 int index;
2928
6a50d6ad
KB
2929 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2930 if (event_type == pqi_supported_event_types[index])
2931 return index;
6c223761 2932
6a50d6ad
KB
2933 return -1;
2934}
2935
2936static inline bool pqi_is_supported_event(unsigned int event_type)
2937{
2938 return pqi_event_type_to_event_index(event_type) != -1;
6c223761
KB
2939}
2940
2941static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2942{
2943 unsigned int num_events;
2944 pqi_index_t oq_pi;
2945 pqi_index_t oq_ci;
2946 struct pqi_event_queue *event_queue;
2947 struct pqi_event_response *response;
6a50d6ad 2948 struct pqi_event *event;
6c223761
KB
2949 int event_index;
2950
2951 event_queue = &ctrl_info->event_queue;
2952 num_events = 0;
6c223761
KB
2953 oq_ci = event_queue->oq_ci_copy;
2954
2955 while (1) {
dac12fbc 2956 oq_pi = readl(event_queue->oq_pi);
6c223761
KB
2957 if (oq_pi == oq_ci)
2958 break;
2959
2960 num_events++;
2961 response = event_queue->oq_element_array +
2962 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2963
2964 event_index =
2965 pqi_event_type_to_event_index(response->event_type);
2966
2967 if (event_index >= 0) {
2968 if (response->request_acknowlege) {
6a50d6ad
KB
2969 event = &ctrl_info->events[event_index];
2970 event->pending = true;
2971 event->event_type = response->event_type;
2972 event->event_id = response->event_id;
2973 event->additional_event_id =
6c223761 2974 response->additional_event_id;
6c223761
KB
2975 }
2976 }
2977
2978 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2979 }
2980
2981 if (num_events) {
2982 event_queue->oq_ci_copy = oq_ci;
2983 writel(oq_ci, event_queue->oq_ci);
98f87667 2984 schedule_work(&ctrl_info->event_work);
6c223761
KB
2985 }
2986
2987 return num_events;
2988}
2989
061ef06a
KB
2990#define PQI_LEGACY_INTX_MASK 0x1
2991
2992static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2993 bool enable_intx)
2994{
2995 u32 intx_mask;
2996 struct pqi_device_registers __iomem *pqi_registers;
2997 volatile void __iomem *register_addr;
2998
2999 pqi_registers = ctrl_info->pqi_registers;
3000
3001 if (enable_intx)
3002 register_addr = &pqi_registers->legacy_intx_mask_clear;
3003 else
3004 register_addr = &pqi_registers->legacy_intx_mask_set;
3005
3006 intx_mask = readl(register_addr);
3007 intx_mask |= PQI_LEGACY_INTX_MASK;
3008 writel(intx_mask, register_addr);
3009}
3010
3011static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3012 enum pqi_irq_mode new_mode)
3013{
3014 switch (ctrl_info->irq_mode) {
3015 case IRQ_MODE_MSIX:
3016 switch (new_mode) {
3017 case IRQ_MODE_MSIX:
3018 break;
3019 case IRQ_MODE_INTX:
3020 pqi_configure_legacy_intx(ctrl_info, true);
061ef06a
KB
3021 sis_enable_intx(ctrl_info);
3022 break;
3023 case IRQ_MODE_NONE:
061ef06a
KB
3024 break;
3025 }
3026 break;
3027 case IRQ_MODE_INTX:
3028 switch (new_mode) {
3029 case IRQ_MODE_MSIX:
3030 pqi_configure_legacy_intx(ctrl_info, false);
061ef06a
KB
3031 sis_enable_msix(ctrl_info);
3032 break;
3033 case IRQ_MODE_INTX:
3034 break;
3035 case IRQ_MODE_NONE:
3036 pqi_configure_legacy_intx(ctrl_info, false);
061ef06a
KB
3037 break;
3038 }
3039 break;
3040 case IRQ_MODE_NONE:
3041 switch (new_mode) {
3042 case IRQ_MODE_MSIX:
3043 sis_enable_msix(ctrl_info);
3044 break;
3045 case IRQ_MODE_INTX:
3046 pqi_configure_legacy_intx(ctrl_info, true);
3047 sis_enable_intx(ctrl_info);
3048 break;
3049 case IRQ_MODE_NONE:
3050 break;
3051 }
3052 break;
3053 }
3054
3055 ctrl_info->irq_mode = new_mode;
3056}
3057
3058#define PQI_LEGACY_INTX_PENDING 0x1
3059
3060static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3061{
3062 bool valid_irq;
3063 u32 intx_status;
3064
3065 switch (ctrl_info->irq_mode) {
3066 case IRQ_MODE_MSIX:
3067 valid_irq = true;
3068 break;
3069 case IRQ_MODE_INTX:
3070 intx_status =
3071 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3072 if (intx_status & PQI_LEGACY_INTX_PENDING)
3073 valid_irq = true;
3074 else
3075 valid_irq = false;
3076 break;
3077 case IRQ_MODE_NONE:
3078 default:
3079 valid_irq = false;
3080 break;
3081 }
3082
3083 return valid_irq;
3084}
3085
6c223761
KB
3086static irqreturn_t pqi_irq_handler(int irq, void *data)
3087{
3088 struct pqi_ctrl_info *ctrl_info;
3089 struct pqi_queue_group *queue_group;
3090 unsigned int num_responses_handled;
3091
3092 queue_group = data;
3093 ctrl_info = queue_group->ctrl_info;
3094
061ef06a 3095 if (!pqi_is_valid_irq(ctrl_info))
6c223761
KB
3096 return IRQ_NONE;
3097
3098 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3099
3100 if (irq == ctrl_info->event_irq)
3101 num_responses_handled += pqi_process_event_intr(ctrl_info);
3102
3103 if (num_responses_handled)
3104 atomic_inc(&ctrl_info->num_interrupts);
3105
3106 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3107 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3108
3109 return IRQ_HANDLED;
3110}
3111
3112static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3113{
d91d7820 3114 struct pci_dev *pci_dev = ctrl_info->pci_dev;
6c223761
KB
3115 int i;
3116 int rc;
3117
d91d7820 3118 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
6c223761
KB
3119
3120 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
d91d7820 3121 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
52198226 3122 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 3123 if (rc) {
d91d7820 3124 dev_err(&pci_dev->dev,
6c223761 3125 "irq %u init failed with error %d\n",
d91d7820 3126 pci_irq_vector(pci_dev, i), rc);
6c223761
KB
3127 return rc;
3128 }
3129 ctrl_info->num_msix_vectors_initialized++;
3130 }
3131
3132 return 0;
3133}
3134
98bf061b
KB
3135static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3136{
3137 int i;
3138
3139 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3140 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3141 &ctrl_info->queue_groups[i]);
3142
3143 ctrl_info->num_msix_vectors_initialized = 0;
3144}
3145
6c223761
KB
3146static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3147{
98bf061b 3148 int num_vectors_enabled;
6c223761 3149
98bf061b 3150 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
52198226
CH
3151 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3152 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
98bf061b 3153 if (num_vectors_enabled < 0) {
6c223761 3154 dev_err(&ctrl_info->pci_dev->dev,
98bf061b
KB
3155 "MSI-X init failed with error %d\n",
3156 num_vectors_enabled);
3157 return num_vectors_enabled;
6c223761
KB
3158 }
3159
98bf061b 3160 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
061ef06a 3161 ctrl_info->irq_mode = IRQ_MODE_MSIX;
6c223761
KB
3162 return 0;
3163}
3164
98bf061b
KB
3165static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3166{
3167 if (ctrl_info->num_msix_vectors_enabled) {
3168 pci_free_irq_vectors(ctrl_info->pci_dev);
3169 ctrl_info->num_msix_vectors_enabled = 0;
3170 }
3171}
3172
6c223761
KB
3173static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3174{
3175 unsigned int i;
3176 size_t alloc_length;
3177 size_t element_array_length_per_iq;
3178 size_t element_array_length_per_oq;
3179 void *element_array;
dac12fbc 3180 void __iomem *next_queue_index;
6c223761
KB
3181 void *aligned_pointer;
3182 unsigned int num_inbound_queues;
3183 unsigned int num_outbound_queues;
3184 unsigned int num_queue_indexes;
3185 struct pqi_queue_group *queue_group;
3186
3187 element_array_length_per_iq =
3188 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3189 ctrl_info->num_elements_per_iq;
3190 element_array_length_per_oq =
3191 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3192 ctrl_info->num_elements_per_oq;
3193 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3194 num_outbound_queues = ctrl_info->num_queue_groups;
3195 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3196
3197 aligned_pointer = NULL;
3198
3199 for (i = 0; i < num_inbound_queues; i++) {
3200 aligned_pointer = PTR_ALIGN(aligned_pointer,
3201 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3202 aligned_pointer += element_array_length_per_iq;
3203 }
3204
3205 for (i = 0; i < num_outbound_queues; i++) {
3206 aligned_pointer = PTR_ALIGN(aligned_pointer,
3207 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3208 aligned_pointer += element_array_length_per_oq;
3209 }
3210
3211 aligned_pointer = PTR_ALIGN(aligned_pointer,
3212 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3213 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3214 PQI_EVENT_OQ_ELEMENT_LENGTH;
3215
3216 for (i = 0; i < num_queue_indexes; i++) {
3217 aligned_pointer = PTR_ALIGN(aligned_pointer,
3218 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3219 aligned_pointer += sizeof(pqi_index_t);
3220 }
3221
3222 alloc_length = (size_t)aligned_pointer +
3223 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3224
e1d213bd
KB
3225 alloc_length += PQI_EXTRA_SGL_MEMORY;
3226
6c223761
KB
3227 ctrl_info->queue_memory_base =
3228 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3229 alloc_length,
3230 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3231
d87d5474 3232 if (!ctrl_info->queue_memory_base)
6c223761 3233 return -ENOMEM;
6c223761
KB
3234
3235 ctrl_info->queue_memory_length = alloc_length;
3236
3237 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3238 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3239
3240 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3241 queue_group = &ctrl_info->queue_groups[i];
3242 queue_group->iq_element_array[RAID_PATH] = element_array;
3243 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3244 ctrl_info->queue_memory_base_dma_handle +
3245 (element_array - ctrl_info->queue_memory_base);
3246 element_array += element_array_length_per_iq;
3247 element_array = PTR_ALIGN(element_array,
3248 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3249 queue_group->iq_element_array[AIO_PATH] = element_array;
3250 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3251 ctrl_info->queue_memory_base_dma_handle +
3252 (element_array - ctrl_info->queue_memory_base);
3253 element_array += element_array_length_per_iq;
3254 element_array = PTR_ALIGN(element_array,
3255 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3256 }
3257
3258 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3259 queue_group = &ctrl_info->queue_groups[i];
3260 queue_group->oq_element_array = element_array;
3261 queue_group->oq_element_array_bus_addr =
3262 ctrl_info->queue_memory_base_dma_handle +
3263 (element_array - ctrl_info->queue_memory_base);
3264 element_array += element_array_length_per_oq;
3265 element_array = PTR_ALIGN(element_array,
3266 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3267 }
3268
3269 ctrl_info->event_queue.oq_element_array = element_array;
3270 ctrl_info->event_queue.oq_element_array_bus_addr =
3271 ctrl_info->queue_memory_base_dma_handle +
3272 (element_array - ctrl_info->queue_memory_base);
3273 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3274 PQI_EVENT_OQ_ELEMENT_LENGTH;
3275
dac12fbc 3276 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
6c223761
KB
3277 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3278
3279 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3280 queue_group = &ctrl_info->queue_groups[i];
3281 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3282 queue_group->iq_ci_bus_addr[RAID_PATH] =
3283 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
3284 (next_queue_index -
3285 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
3286 next_queue_index += sizeof(pqi_index_t);
3287 next_queue_index = PTR_ALIGN(next_queue_index,
3288 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3289 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3290 queue_group->iq_ci_bus_addr[AIO_PATH] =
3291 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
3292 (next_queue_index -
3293 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
3294 next_queue_index += sizeof(pqi_index_t);
3295 next_queue_index = PTR_ALIGN(next_queue_index,
3296 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3297 queue_group->oq_pi = next_queue_index;
3298 queue_group->oq_pi_bus_addr =
3299 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
3300 (next_queue_index -
3301 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
3302 next_queue_index += sizeof(pqi_index_t);
3303 next_queue_index = PTR_ALIGN(next_queue_index,
3304 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3305 }
3306
3307 ctrl_info->event_queue.oq_pi = next_queue_index;
3308 ctrl_info->event_queue.oq_pi_bus_addr =
3309 ctrl_info->queue_memory_base_dma_handle +
dac12fbc
KB
3310 (next_queue_index -
3311 (void __iomem *)ctrl_info->queue_memory_base);
6c223761
KB
3312
3313 return 0;
3314}
3315
3316static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3317{
3318 unsigned int i;
3319 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3320 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3321
3322 /*
3323 * Initialize the backpointers to the controller structure in
3324 * each operational queue group structure.
3325 */
3326 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3327 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3328
3329 /*
3330 * Assign IDs to all operational queues. Note that the IDs
3331 * assigned to operational IQs are independent of the IDs
3332 * assigned to operational OQs.
3333 */
3334 ctrl_info->event_queue.oq_id = next_oq_id++;
3335 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3336 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3337 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3338 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3339 }
3340
3341 /*
3342 * Assign MSI-X table entry indexes to all queues. Note that the
3343 * interrupt for the event queue is shared with the first queue group.
3344 */
3345 ctrl_info->event_queue.int_msg_num = 0;
3346 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3347 ctrl_info->queue_groups[i].int_msg_num = i;
3348
3349 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3350 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3351 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3352 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3353 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3354 }
3355}
3356
3357static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3358{
3359 size_t alloc_length;
3360 struct pqi_admin_queues_aligned *admin_queues_aligned;
3361 struct pqi_admin_queues *admin_queues;
3362
3363 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3364 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3365
3366 ctrl_info->admin_queue_memory_base =
3367 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3368 alloc_length,
3369 &ctrl_info->admin_queue_memory_base_dma_handle,
3370 GFP_KERNEL);
3371
3372 if (!ctrl_info->admin_queue_memory_base)
3373 return -ENOMEM;
3374
3375 ctrl_info->admin_queue_memory_length = alloc_length;
3376
3377 admin_queues = &ctrl_info->admin_queues;
3378 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3379 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3380 admin_queues->iq_element_array =
3381 &admin_queues_aligned->iq_element_array;
3382 admin_queues->oq_element_array =
3383 &admin_queues_aligned->oq_element_array;
3384 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
dac12fbc
KB
3385 admin_queues->oq_pi =
3386 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
6c223761
KB
3387
3388 admin_queues->iq_element_array_bus_addr =
3389 ctrl_info->admin_queue_memory_base_dma_handle +
3390 (admin_queues->iq_element_array -
3391 ctrl_info->admin_queue_memory_base);
3392 admin_queues->oq_element_array_bus_addr =
3393 ctrl_info->admin_queue_memory_base_dma_handle +
3394 (admin_queues->oq_element_array -
3395 ctrl_info->admin_queue_memory_base);
3396 admin_queues->iq_ci_bus_addr =
3397 ctrl_info->admin_queue_memory_base_dma_handle +
3398 ((void *)admin_queues->iq_ci -
3399 ctrl_info->admin_queue_memory_base);
3400 admin_queues->oq_pi_bus_addr =
3401 ctrl_info->admin_queue_memory_base_dma_handle +
dac12fbc
KB
3402 ((void __iomem *)admin_queues->oq_pi -
3403 (void __iomem *)ctrl_info->admin_queue_memory_base);
6c223761
KB
3404
3405 return 0;
3406}
3407
3408#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3409#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3410
3411static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3412{
3413 struct pqi_device_registers __iomem *pqi_registers;
3414 struct pqi_admin_queues *admin_queues;
3415 unsigned long timeout;
3416 u8 status;
3417 u32 reg;
3418
3419 pqi_registers = ctrl_info->pqi_registers;
3420 admin_queues = &ctrl_info->admin_queues;
3421
3422 writeq((u64)admin_queues->iq_element_array_bus_addr,
3423 &pqi_registers->admin_iq_element_array_addr);
3424 writeq((u64)admin_queues->oq_element_array_bus_addr,
3425 &pqi_registers->admin_oq_element_array_addr);
3426 writeq((u64)admin_queues->iq_ci_bus_addr,
3427 &pqi_registers->admin_iq_ci_addr);
3428 writeq((u64)admin_queues->oq_pi_bus_addr,
3429 &pqi_registers->admin_oq_pi_addr);
3430
3431 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3432 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3433 (admin_queues->int_msg_num << 16);
3434 writel(reg, &pqi_registers->admin_iq_num_elements);
3435 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3436 &pqi_registers->function_and_status_code);
3437
3438 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3439 while (1) {
3440 status = readb(&pqi_registers->function_and_status_code);
3441 if (status == PQI_STATUS_IDLE)
3442 break;
3443 if (time_after(jiffies, timeout))
3444 return -ETIMEDOUT;
3445 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3446 }
3447
3448 /*
3449 * The offset registers are not initialized to the correct
3450 * offsets until *after* the create admin queue pair command
3451 * completes successfully.
3452 */
3453 admin_queues->iq_pi = ctrl_info->iomem_base +
3454 PQI_DEVICE_REGISTERS_OFFSET +
3455 readq(&pqi_registers->admin_iq_pi_offset);
3456 admin_queues->oq_ci = ctrl_info->iomem_base +
3457 PQI_DEVICE_REGISTERS_OFFSET +
3458 readq(&pqi_registers->admin_oq_ci_offset);
3459
3460 return 0;
3461}
3462
3463static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3464 struct pqi_general_admin_request *request)
3465{
3466 struct pqi_admin_queues *admin_queues;
3467 void *next_element;
3468 pqi_index_t iq_pi;
3469
3470 admin_queues = &ctrl_info->admin_queues;
3471 iq_pi = admin_queues->iq_pi_copy;
3472
3473 next_element = admin_queues->iq_element_array +
3474 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3475
3476 memcpy(next_element, request, sizeof(*request));
3477
3478 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3479 admin_queues->iq_pi_copy = iq_pi;
3480
3481 /*
3482 * This write notifies the controller that an IU is available to be
3483 * processed.
3484 */
3485 writel(iq_pi, admin_queues->iq_pi);
3486}
3487
13bede67
KB
3488#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3489
6c223761
KB
3490static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3491 struct pqi_general_admin_response *response)
3492{
3493 struct pqi_admin_queues *admin_queues;
3494 pqi_index_t oq_pi;
3495 pqi_index_t oq_ci;
3496 unsigned long timeout;
3497
3498 admin_queues = &ctrl_info->admin_queues;
3499 oq_ci = admin_queues->oq_ci_copy;
3500
13bede67 3501 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
6c223761
KB
3502
3503 while (1) {
dac12fbc 3504 oq_pi = readl(admin_queues->oq_pi);
6c223761
KB
3505 if (oq_pi != oq_ci)
3506 break;
3507 if (time_after(jiffies, timeout)) {
3508 dev_err(&ctrl_info->pci_dev->dev,
3509 "timed out waiting for admin response\n");
3510 return -ETIMEDOUT;
3511 }
13bede67
KB
3512 if (!sis_is_firmware_running(ctrl_info))
3513 return -ENXIO;
6c223761
KB
3514 usleep_range(1000, 2000);
3515 }
3516
3517 memcpy(response, admin_queues->oq_element_array +
3518 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3519
3520 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3521 admin_queues->oq_ci_copy = oq_ci;
3522 writel(oq_ci, admin_queues->oq_ci);
3523
3524 return 0;
3525}
3526
3527static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3528 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3529 struct pqi_io_request *io_request)
3530{
3531 struct pqi_io_request *next;
3532 void *next_element;
3533 pqi_index_t iq_pi;
3534 pqi_index_t iq_ci;
3535 size_t iu_length;
3536 unsigned long flags;
3537 unsigned int num_elements_needed;
3538 unsigned int num_elements_to_end_of_queue;
3539 size_t copy_count;
3540 struct pqi_iu_header *request;
3541
3542 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3543
376fb880
KB
3544 if (io_request) {
3545 io_request->queue_group = queue_group;
6c223761
KB
3546 list_add_tail(&io_request->request_list_entry,
3547 &queue_group->request_list[path]);
376fb880 3548 }
6c223761
KB
3549
3550 iq_pi = queue_group->iq_pi_copy[path];
3551
3552 list_for_each_entry_safe(io_request, next,
3553 &queue_group->request_list[path], request_list_entry) {
3554
3555 request = io_request->iu;
3556
3557 iu_length = get_unaligned_le16(&request->iu_length) +
3558 PQI_REQUEST_HEADER_LENGTH;
3559 num_elements_needed =
3560 DIV_ROUND_UP(iu_length,
3561 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3562
dac12fbc 3563 iq_ci = readl(queue_group->iq_ci[path]);
6c223761
KB
3564
3565 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3566 ctrl_info->num_elements_per_iq))
3567 break;
3568
3569 put_unaligned_le16(queue_group->oq_id,
3570 &request->response_queue_id);
3571
3572 next_element = queue_group->iq_element_array[path] +
3573 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3574
3575 num_elements_to_end_of_queue =
3576 ctrl_info->num_elements_per_iq - iq_pi;
3577
3578 if (num_elements_needed <= num_elements_to_end_of_queue) {
3579 memcpy(next_element, request, iu_length);
3580 } else {
3581 copy_count = num_elements_to_end_of_queue *
3582 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3583 memcpy(next_element, request, copy_count);
3584 memcpy(queue_group->iq_element_array[path],
3585 (u8 *)request + copy_count,
3586 iu_length - copy_count);
3587 }
3588
3589 iq_pi = (iq_pi + num_elements_needed) %
3590 ctrl_info->num_elements_per_iq;
3591
3592 list_del(&io_request->request_list_entry);
3593 }
3594
3595 if (iq_pi != queue_group->iq_pi_copy[path]) {
3596 queue_group->iq_pi_copy[path] = iq_pi;
3597 /*
3598 * This write notifies the controller that one or more IUs are
3599 * available to be processed.
3600 */
3601 writel(iq_pi, queue_group->iq_pi[path]);
3602 }
3603
3604 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3605}
3606
1f37e992
KB
3607#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3608
3609static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3610 struct completion *wait)
3611{
3612 int rc;
1f37e992
KB
3613
3614 while (1) {
3615 if (wait_for_completion_io_timeout(wait,
3616 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3617 rc = 0;
3618 break;
3619 }
3620
3621 pqi_check_ctrl_health(ctrl_info);
3622 if (pqi_ctrl_offline(ctrl_info)) {
3623 rc = -ENXIO;
3624 break;
3625 }
1f37e992
KB
3626 }
3627
3628 return rc;
3629}
3630
6c223761
KB
3631static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3632 void *context)
3633{
3634 struct completion *waiting = context;
3635
3636 complete(waiting);
3637}
3638
26b390ab
KB
3639static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info
3640 *error_info)
3641{
3642 int rc = -EIO;
3643
3644 switch (error_info->data_out_result) {
3645 case PQI_DATA_IN_OUT_GOOD:
3646 if (error_info->status == SAM_STAT_GOOD)
3647 rc = 0;
3648 break;
3649 case PQI_DATA_IN_OUT_UNDERFLOW:
3650 if (error_info->status == SAM_STAT_GOOD ||
3651 error_info->status == SAM_STAT_CHECK_CONDITION)
3652 rc = 0;
3653 break;
3654 case PQI_DATA_IN_OUT_ABORTED:
3655 rc = PQI_CMD_STATUS_ABORTED;
3656 break;
3657 }
3658
3659 return rc;
3660}
3661
6c223761
KB
3662static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3663 struct pqi_iu_header *request, unsigned int flags,
3664 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3665{
957c5ab1 3666 int rc = 0;
6c223761
KB
3667 struct pqi_io_request *io_request;
3668 unsigned long start_jiffies;
3669 unsigned long msecs_blocked;
3670 size_t iu_length;
957c5ab1 3671 DECLARE_COMPLETION_ONSTACK(wait);
6c223761
KB
3672
3673 /*
3674 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3675 * are mutually exclusive.
3676 */
3677
3678 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3679 if (down_interruptible(&ctrl_info->sync_request_sem))
3680 return -ERESTARTSYS;
3681 } else {
3682 if (timeout_msecs == NO_TIMEOUT) {
3683 down(&ctrl_info->sync_request_sem);
3684 } else {
3685 start_jiffies = jiffies;
3686 if (down_timeout(&ctrl_info->sync_request_sem,
3687 msecs_to_jiffies(timeout_msecs)))
3688 return -ETIMEDOUT;
3689 msecs_blocked =
3690 jiffies_to_msecs(jiffies - start_jiffies);
3691 if (msecs_blocked >= timeout_msecs)
3692 return -ETIMEDOUT;
3693 timeout_msecs -= msecs_blocked;
3694 }
3695 }
3696
7561a7e4
KB
3697 pqi_ctrl_busy(ctrl_info);
3698 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3699 if (timeout_msecs == 0) {
957c5ab1 3700 pqi_ctrl_unbusy(ctrl_info);
7561a7e4
KB
3701 rc = -ETIMEDOUT;
3702 goto out;
3703 }
3704
376fb880 3705 if (pqi_ctrl_offline(ctrl_info)) {
957c5ab1 3706 pqi_ctrl_unbusy(ctrl_info);
376fb880
KB
3707 rc = -ENXIO;
3708 goto out;
3709 }
3710
6c223761
KB
3711 io_request = pqi_alloc_io_request(ctrl_info);
3712
3713 put_unaligned_le16(io_request->index,
3714 &(((struct pqi_raid_path_request *)request)->request_id));
3715
3716 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3717 ((struct pqi_raid_path_request *)request)->error_index =
3718 ((struct pqi_raid_path_request *)request)->request_id;
3719
3720 iu_length = get_unaligned_le16(&request->iu_length) +
3721 PQI_REQUEST_HEADER_LENGTH;
3722 memcpy(io_request->iu, request, iu_length);
3723
957c5ab1
KB
3724 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3725 io_request->context = &wait;
3726
3727 pqi_start_io(ctrl_info,
3728 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3729 io_request);
3730
3731 pqi_ctrl_unbusy(ctrl_info);
3732
3733 if (timeout_msecs == NO_TIMEOUT) {
3734 pqi_wait_for_completion_io(ctrl_info, &wait);
3735 } else {
3736 if (!wait_for_completion_io_timeout(&wait,
3737 msecs_to_jiffies(timeout_msecs))) {
3738 dev_warn(&ctrl_info->pci_dev->dev,
3739 "command timed out\n");
3740 rc = -ETIMEDOUT;
3741 }
3742 }
6c223761
KB
3743
3744 if (error_info) {
3745 if (io_request->error_info)
3746 memcpy(error_info, io_request->error_info,
3747 sizeof(*error_info));
3748 else
3749 memset(error_info, 0, sizeof(*error_info));
3750 } else if (rc == 0 && io_request->error_info) {
26b390ab
KB
3751 rc = pqi_process_raid_io_error_synchronous(
3752 io_request->error_info);
6c223761
KB
3753 }
3754
3755 pqi_free_io_request(io_request);
3756
7561a7e4 3757out:
6c223761
KB
3758 up(&ctrl_info->sync_request_sem);
3759
3760 return rc;
3761}
3762
3763static int pqi_validate_admin_response(
3764 struct pqi_general_admin_response *response, u8 expected_function_code)
3765{
3766 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3767 return -EINVAL;
3768
3769 if (get_unaligned_le16(&response->header.iu_length) !=
3770 PQI_GENERAL_ADMIN_IU_LENGTH)
3771 return -EINVAL;
3772
3773 if (response->function_code != expected_function_code)
3774 return -EINVAL;
3775
3776 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3777 return -EINVAL;
3778
3779 return 0;
3780}
3781
3782static int pqi_submit_admin_request_synchronous(
3783 struct pqi_ctrl_info *ctrl_info,
3784 struct pqi_general_admin_request *request,
3785 struct pqi_general_admin_response *response)
3786{
3787 int rc;
3788
3789 pqi_submit_admin_request(ctrl_info, request);
3790
3791 rc = pqi_poll_for_admin_response(ctrl_info, response);
3792
3793 if (rc == 0)
3794 rc = pqi_validate_admin_response(response,
3795 request->function_code);
3796
3797 return rc;
3798}
3799
3800static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3801{
3802 int rc;
3803 struct pqi_general_admin_request request;
3804 struct pqi_general_admin_response response;
3805 struct pqi_device_capability *capability;
3806 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3807
3808 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3809 if (!capability)
3810 return -ENOMEM;
3811
3812 memset(&request, 0, sizeof(request));
3813
3814 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3815 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3816 &request.header.iu_length);
3817 request.function_code =
3818 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3819 put_unaligned_le32(sizeof(*capability),
3820 &request.data.report_device_capability.buffer_length);
3821
3822 rc = pqi_map_single(ctrl_info->pci_dev,
3823 &request.data.report_device_capability.sg_descriptor,
3824 capability, sizeof(*capability),
3825 PCI_DMA_FROMDEVICE);
3826 if (rc)
3827 goto out;
3828
3829 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3830 &response);
3831
3832 pqi_pci_unmap(ctrl_info->pci_dev,
3833 &request.data.report_device_capability.sg_descriptor, 1,
3834 PCI_DMA_FROMDEVICE);
3835
3836 if (rc)
3837 goto out;
3838
3839 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3840 rc = -EIO;
3841 goto out;
3842 }
3843
3844 ctrl_info->max_inbound_queues =
3845 get_unaligned_le16(&capability->max_inbound_queues);
3846 ctrl_info->max_elements_per_iq =
3847 get_unaligned_le16(&capability->max_elements_per_iq);
3848 ctrl_info->max_iq_element_length =
3849 get_unaligned_le16(&capability->max_iq_element_length)
3850 * 16;
3851 ctrl_info->max_outbound_queues =
3852 get_unaligned_le16(&capability->max_outbound_queues);
3853 ctrl_info->max_elements_per_oq =
3854 get_unaligned_le16(&capability->max_elements_per_oq);
3855 ctrl_info->max_oq_element_length =
3856 get_unaligned_le16(&capability->max_oq_element_length)
3857 * 16;
3858
3859 sop_iu_layer_descriptor =
3860 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3861
3862 ctrl_info->max_inbound_iu_length_per_firmware =
3863 get_unaligned_le16(
3864 &sop_iu_layer_descriptor->max_inbound_iu_length);
3865 ctrl_info->inbound_spanning_supported =
3866 sop_iu_layer_descriptor->inbound_spanning_supported;
3867 ctrl_info->outbound_spanning_supported =
3868 sop_iu_layer_descriptor->outbound_spanning_supported;
3869
3870out:
3871 kfree(capability);
3872
3873 return rc;
3874}
3875
3876static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3877{
3878 if (ctrl_info->max_iq_element_length <
3879 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3880 dev_err(&ctrl_info->pci_dev->dev,
3881 "max. inbound queue element length of %d is less than the required length of %d\n",
3882 ctrl_info->max_iq_element_length,
3883 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3884 return -EINVAL;
3885 }
3886
3887 if (ctrl_info->max_oq_element_length <
3888 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3889 dev_err(&ctrl_info->pci_dev->dev,
3890 "max. outbound queue element length of %d is less than the required length of %d\n",
3891 ctrl_info->max_oq_element_length,
3892 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3893 return -EINVAL;
3894 }
3895
3896 if (ctrl_info->max_inbound_iu_length_per_firmware <
3897 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3898 dev_err(&ctrl_info->pci_dev->dev,
3899 "max. inbound IU length of %u is less than the min. required length of %d\n",
3900 ctrl_info->max_inbound_iu_length_per_firmware,
3901 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3902 return -EINVAL;
3903 }
3904
77668f41
KB
3905 if (!ctrl_info->inbound_spanning_supported) {
3906 dev_err(&ctrl_info->pci_dev->dev,
3907 "the controller does not support inbound spanning\n");
3908 return -EINVAL;
3909 }
3910
3911 if (ctrl_info->outbound_spanning_supported) {
3912 dev_err(&ctrl_info->pci_dev->dev,
3913 "the controller supports outbound spanning but this driver does not\n");
3914 return -EINVAL;
3915 }
3916
6c223761
KB
3917 return 0;
3918}
3919
6c223761
KB
3920static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3921{
3922 int rc;
3923 struct pqi_event_queue *event_queue;
3924 struct pqi_general_admin_request request;
3925 struct pqi_general_admin_response response;
3926
3927 event_queue = &ctrl_info->event_queue;
3928
3929 /*
3930 * Create OQ (Outbound Queue - device to host queue) to dedicate
3931 * to events.
3932 */
3933 memset(&request, 0, sizeof(request));
3934 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3935 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3936 &request.header.iu_length);
3937 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3938 put_unaligned_le16(event_queue->oq_id,
3939 &request.data.create_operational_oq.queue_id);
3940 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3941 &request.data.create_operational_oq.element_array_addr);
3942 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3943 &request.data.create_operational_oq.pi_addr);
3944 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3945 &request.data.create_operational_oq.num_elements);
3946 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3947 &request.data.create_operational_oq.element_length);
3948 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3949 put_unaligned_le16(event_queue->int_msg_num,
3950 &request.data.create_operational_oq.int_msg_num);
3951
3952 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3953 &response);
3954 if (rc)
3955 return rc;
3956
3957 event_queue->oq_ci = ctrl_info->iomem_base +
3958 PQI_DEVICE_REGISTERS_OFFSET +
3959 get_unaligned_le64(
3960 &response.data.create_operational_oq.oq_ci_offset);
3961
3962 return 0;
3963}
3964
061ef06a
KB
3965static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3966 unsigned int group_number)
6c223761 3967{
6c223761
KB
3968 int rc;
3969 struct pqi_queue_group *queue_group;
3970 struct pqi_general_admin_request request;
3971 struct pqi_general_admin_response response;
3972
061ef06a 3973 queue_group = &ctrl_info->queue_groups[group_number];
6c223761
KB
3974
3975 /*
3976 * Create IQ (Inbound Queue - host to device queue) for
3977 * RAID path.
3978 */
3979 memset(&request, 0, sizeof(request));
3980 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3981 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3982 &request.header.iu_length);
3983 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3984 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3985 &request.data.create_operational_iq.queue_id);
3986 put_unaligned_le64(
3987 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3988 &request.data.create_operational_iq.element_array_addr);
3989 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3990 &request.data.create_operational_iq.ci_addr);
3991 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3992 &request.data.create_operational_iq.num_elements);
3993 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3994 &request.data.create_operational_iq.element_length);
3995 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3996
3997 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3998 &response);
3999 if (rc) {
4000 dev_err(&ctrl_info->pci_dev->dev,
4001 "error creating inbound RAID queue\n");
4002 return rc;
4003 }
4004
4005 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4006 PQI_DEVICE_REGISTERS_OFFSET +
4007 get_unaligned_le64(
4008 &response.data.create_operational_iq.iq_pi_offset);
4009
4010 /*
4011 * Create IQ (Inbound Queue - host to device queue) for
4012 * Advanced I/O (AIO) path.
4013 */
4014 memset(&request, 0, sizeof(request));
4015 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4016 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4017 &request.header.iu_length);
4018 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4019 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4020 &request.data.create_operational_iq.queue_id);
4021 put_unaligned_le64((u64)queue_group->
4022 iq_element_array_bus_addr[AIO_PATH],
4023 &request.data.create_operational_iq.element_array_addr);
4024 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4025 &request.data.create_operational_iq.ci_addr);
4026 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4027 &request.data.create_operational_iq.num_elements);
4028 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4029 &request.data.create_operational_iq.element_length);
4030 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4031
4032 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4033 &response);
4034 if (rc) {
4035 dev_err(&ctrl_info->pci_dev->dev,
4036 "error creating inbound AIO queue\n");
339faa81 4037 return rc;
6c223761
KB
4038 }
4039
4040 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4041 PQI_DEVICE_REGISTERS_OFFSET +
4042 get_unaligned_le64(
4043 &response.data.create_operational_iq.iq_pi_offset);
4044
4045 /*
4046 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4047 * assumed to be for RAID path I/O unless we change the queue's
4048 * property.
4049 */
4050 memset(&request, 0, sizeof(request));
4051 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4052 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4053 &request.header.iu_length);
4054 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4055 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4056 &request.data.change_operational_iq_properties.queue_id);
4057 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4058 &request.data.change_operational_iq_properties.vendor_specific);
4059
4060 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4061 &response);
4062 if (rc) {
4063 dev_err(&ctrl_info->pci_dev->dev,
4064 "error changing queue property\n");
339faa81 4065 return rc;
6c223761
KB
4066 }
4067
4068 /*
4069 * Create OQ (Outbound Queue - device to host queue).
4070 */
4071 memset(&request, 0, sizeof(request));
4072 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4073 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4074 &request.header.iu_length);
4075 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4076 put_unaligned_le16(queue_group->oq_id,
4077 &request.data.create_operational_oq.queue_id);
4078 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4079 &request.data.create_operational_oq.element_array_addr);
4080 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4081 &request.data.create_operational_oq.pi_addr);
4082 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4083 &request.data.create_operational_oq.num_elements);
4084 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4085 &request.data.create_operational_oq.element_length);
4086 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4087 put_unaligned_le16(queue_group->int_msg_num,
4088 &request.data.create_operational_oq.int_msg_num);
4089
4090 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4091 &response);
4092 if (rc) {
4093 dev_err(&ctrl_info->pci_dev->dev,
4094 "error creating outbound queue\n");
339faa81 4095 return rc;
6c223761
KB
4096 }
4097
4098 queue_group->oq_ci = ctrl_info->iomem_base +
4099 PQI_DEVICE_REGISTERS_OFFSET +
4100 get_unaligned_le64(
4101 &response.data.create_operational_oq.oq_ci_offset);
4102
6c223761 4103 return 0;
6c223761
KB
4104}
4105
4106static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4107{
4108 int rc;
4109 unsigned int i;
4110
4111 rc = pqi_create_event_queue(ctrl_info);
4112 if (rc) {
4113 dev_err(&ctrl_info->pci_dev->dev,
4114 "error creating event queue\n");
4115 return rc;
4116 }
4117
4118 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
061ef06a 4119 rc = pqi_create_queue_group(ctrl_info, i);
6c223761
KB
4120 if (rc) {
4121 dev_err(&ctrl_info->pci_dev->dev,
4122 "error creating queue group number %u/%u\n",
4123 i, ctrl_info->num_queue_groups);
4124 return rc;
4125 }
4126 }
4127
4128 return 0;
4129}
4130
4131#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4132 (offsetof(struct pqi_event_config, descriptors) + \
4133 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4134
6a50d6ad
KB
4135static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4136 bool enable_events)
6c223761
KB
4137{
4138 int rc;
4139 unsigned int i;
4140 struct pqi_event_config *event_config;
6a50d6ad 4141 struct pqi_event_descriptor *event_descriptor;
6c223761
KB
4142 struct pqi_general_management_request request;
4143
4144 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4145 GFP_KERNEL);
4146 if (!event_config)
4147 return -ENOMEM;
4148
4149 memset(&request, 0, sizeof(request));
4150
4151 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4152 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4153 data.report_event_configuration.sg_descriptors[1]) -
4154 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4155 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4156 &request.data.report_event_configuration.buffer_length);
4157
4158 rc = pqi_map_single(ctrl_info->pci_dev,
4159 request.data.report_event_configuration.sg_descriptors,
4160 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4161 PCI_DMA_FROMDEVICE);
4162 if (rc)
4163 goto out;
4164
4165 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4166 0, NULL, NO_TIMEOUT);
4167
4168 pqi_pci_unmap(ctrl_info->pci_dev,
4169 request.data.report_event_configuration.sg_descriptors, 1,
4170 PCI_DMA_FROMDEVICE);
4171
4172 if (rc)
4173 goto out;
4174
6a50d6ad
KB
4175 for (i = 0; i < event_config->num_event_descriptors; i++) {
4176 event_descriptor = &event_config->descriptors[i];
4177 if (enable_events &&
4178 pqi_is_supported_event(event_descriptor->event_type))
4179 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4180 &event_descriptor->oq_id);
4181 else
4182 put_unaligned_le16(0, &event_descriptor->oq_id);
4183 }
6c223761
KB
4184
4185 memset(&request, 0, sizeof(request));
4186
4187 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4188 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4189 data.report_event_configuration.sg_descriptors[1]) -
4190 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4191 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4192 &request.data.report_event_configuration.buffer_length);
4193
4194 rc = pqi_map_single(ctrl_info->pci_dev,
4195 request.data.report_event_configuration.sg_descriptors,
4196 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4197 PCI_DMA_TODEVICE);
4198 if (rc)
4199 goto out;
4200
4201 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4202 NULL, NO_TIMEOUT);
4203
4204 pqi_pci_unmap(ctrl_info->pci_dev,
4205 request.data.report_event_configuration.sg_descriptors, 1,
4206 PCI_DMA_TODEVICE);
4207
4208out:
4209 kfree(event_config);
4210
4211 return rc;
4212}
4213
6a50d6ad
KB
4214static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4215{
4216 return pqi_configure_events(ctrl_info, true);
4217}
4218
4219static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4220{
4221 return pqi_configure_events(ctrl_info, false);
4222}
4223
6c223761
KB
4224static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4225{
4226 unsigned int i;
4227 struct device *dev;
4228 size_t sg_chain_buffer_length;
4229 struct pqi_io_request *io_request;
4230
4231 if (!ctrl_info->io_request_pool)
4232 return;
4233
4234 dev = &ctrl_info->pci_dev->dev;
4235 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4236 io_request = ctrl_info->io_request_pool;
4237
4238 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4239 kfree(io_request->iu);
4240 if (!io_request->sg_chain_buffer)
4241 break;
4242 dma_free_coherent(dev, sg_chain_buffer_length,
4243 io_request->sg_chain_buffer,
4244 io_request->sg_chain_buffer_dma_handle);
4245 io_request++;
4246 }
4247
4248 kfree(ctrl_info->io_request_pool);
4249 ctrl_info->io_request_pool = NULL;
4250}
4251
4252static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4253{
4254 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4255 ctrl_info->error_buffer_length,
4256 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4257
4258 if (!ctrl_info->error_buffer)
4259 return -ENOMEM;
4260
4261 return 0;
4262}
4263
4264static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4265{
4266 unsigned int i;
4267 void *sg_chain_buffer;
4268 size_t sg_chain_buffer_length;
4269 dma_addr_t sg_chain_buffer_dma_handle;
4270 struct device *dev;
4271 struct pqi_io_request *io_request;
4272
6396bb22
KC
4273 ctrl_info->io_request_pool =
4274 kcalloc(ctrl_info->max_io_slots,
4275 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
6c223761
KB
4276
4277 if (!ctrl_info->io_request_pool) {
4278 dev_err(&ctrl_info->pci_dev->dev,
4279 "failed to allocate I/O request pool\n");
4280 goto error;
4281 }
4282
4283 dev = &ctrl_info->pci_dev->dev;
4284 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4285 io_request = ctrl_info->io_request_pool;
4286
4287 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4288 io_request->iu =
4289 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4290
4291 if (!io_request->iu) {
4292 dev_err(&ctrl_info->pci_dev->dev,
4293 "failed to allocate IU buffers\n");
4294 goto error;
4295 }
4296
4297 sg_chain_buffer = dma_alloc_coherent(dev,
4298 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4299 GFP_KERNEL);
4300
4301 if (!sg_chain_buffer) {
4302 dev_err(&ctrl_info->pci_dev->dev,
4303 "failed to allocate PQI scatter-gather chain buffers\n");
4304 goto error;
4305 }
4306
4307 io_request->index = i;
4308 io_request->sg_chain_buffer = sg_chain_buffer;
4309 io_request->sg_chain_buffer_dma_handle =
4310 sg_chain_buffer_dma_handle;
4311 io_request++;
4312 }
4313
4314 return 0;
4315
4316error:
4317 pqi_free_all_io_requests(ctrl_info);
4318
4319 return -ENOMEM;
4320}
4321
4322/*
4323 * Calculate required resources that are sized based on max. outstanding
4324 * requests and max. transfer size.
4325 */
4326
4327static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4328{
4329 u32 max_transfer_size;
4330 u32 max_sg_entries;
4331
4332 ctrl_info->scsi_ml_can_queue =
4333 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4334 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4335
4336 ctrl_info->error_buffer_length =
4337 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4338
d727a776
KB
4339 if (reset_devices)
4340 max_transfer_size = min(ctrl_info->max_transfer_size,
4341 PQI_MAX_TRANSFER_SIZE_KDUMP);
4342 else
4343 max_transfer_size = min(ctrl_info->max_transfer_size,
4344 PQI_MAX_TRANSFER_SIZE);
6c223761
KB
4345
4346 max_sg_entries = max_transfer_size / PAGE_SIZE;
4347
4348 /* +1 to cover when the buffer is not page-aligned. */
4349 max_sg_entries++;
4350
4351 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4352
4353 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4354
4355 ctrl_info->sg_chain_buffer_length =
e1d213bd
KB
4356 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4357 PQI_EXTRA_SGL_MEMORY;
6c223761
KB
4358 ctrl_info->sg_tablesize = max_sg_entries;
4359 ctrl_info->max_sectors = max_transfer_size / 512;
4360}
4361
4362static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4363{
6c223761
KB
4364 int num_queue_groups;
4365 u16 num_elements_per_iq;
4366 u16 num_elements_per_oq;
4367
d727a776
KB
4368 if (reset_devices) {
4369 num_queue_groups = 1;
4370 } else {
4371 int num_cpus;
4372 int max_queue_groups;
4373
4374 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4375 ctrl_info->max_outbound_queues - 1);
4376 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
6c223761 4377
d727a776
KB
4378 num_cpus = num_online_cpus();
4379 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4380 num_queue_groups = min(num_queue_groups, max_queue_groups);
4381 }
6c223761
KB
4382
4383 ctrl_info->num_queue_groups = num_queue_groups;
061ef06a 4384 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
6c223761 4385
77668f41
KB
4386 /*
4387 * Make sure that the max. inbound IU length is an even multiple
4388 * of our inbound element length.
4389 */
4390 ctrl_info->max_inbound_iu_length =
4391 (ctrl_info->max_inbound_iu_length_per_firmware /
4392 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4393 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
4394
4395 num_elements_per_iq =
4396 (ctrl_info->max_inbound_iu_length /
4397 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4398
4399 /* Add one because one element in each queue is unusable. */
4400 num_elements_per_iq++;
4401
4402 num_elements_per_iq = min(num_elements_per_iq,
4403 ctrl_info->max_elements_per_iq);
4404
4405 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4406 num_elements_per_oq = min(num_elements_per_oq,
4407 ctrl_info->max_elements_per_oq);
4408
4409 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4410 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4411
4412 ctrl_info->max_sg_per_iu =
4413 ((ctrl_info->max_inbound_iu_length -
4414 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4415 sizeof(struct pqi_sg_descriptor)) +
4416 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4417}
4418
4419static inline void pqi_set_sg_descriptor(
4420 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4421{
4422 u64 address = (u64)sg_dma_address(sg);
4423 unsigned int length = sg_dma_len(sg);
4424
4425 put_unaligned_le64(address, &sg_descriptor->address);
4426 put_unaligned_le32(length, &sg_descriptor->length);
4427 put_unaligned_le32(0, &sg_descriptor->flags);
4428}
4429
4430static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4431 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4432 struct pqi_io_request *io_request)
4433{
4434 int i;
4435 u16 iu_length;
4436 int sg_count;
4437 bool chained;
4438 unsigned int num_sg_in_iu;
4439 unsigned int max_sg_per_iu;
4440 struct scatterlist *sg;
4441 struct pqi_sg_descriptor *sg_descriptor;
4442
4443 sg_count = scsi_dma_map(scmd);
4444 if (sg_count < 0)
4445 return sg_count;
4446
4447 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4448 PQI_REQUEST_HEADER_LENGTH;
4449
4450 if (sg_count == 0)
4451 goto out;
4452
4453 sg = scsi_sglist(scmd);
4454 sg_descriptor = request->sg_descriptors;
4455 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4456 chained = false;
4457 num_sg_in_iu = 0;
4458 i = 0;
4459
4460 while (1) {
4461 pqi_set_sg_descriptor(sg_descriptor, sg);
4462 if (!chained)
4463 num_sg_in_iu++;
4464 i++;
4465 if (i == sg_count)
4466 break;
4467 sg_descriptor++;
4468 if (i == max_sg_per_iu) {
4469 put_unaligned_le64(
4470 (u64)io_request->sg_chain_buffer_dma_handle,
4471 &sg_descriptor->address);
4472 put_unaligned_le32((sg_count - num_sg_in_iu)
4473 * sizeof(*sg_descriptor),
4474 &sg_descriptor->length);
4475 put_unaligned_le32(CISS_SG_CHAIN,
4476 &sg_descriptor->flags);
4477 chained = true;
4478 num_sg_in_iu++;
4479 sg_descriptor = io_request->sg_chain_buffer;
4480 }
4481 sg = sg_next(sg);
4482 }
4483
4484 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4485 request->partial = chained;
4486 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4487
4488out:
4489 put_unaligned_le16(iu_length, &request->header.iu_length);
4490
4491 return 0;
4492}
4493
4494static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4495 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4496 struct pqi_io_request *io_request)
4497{
4498 int i;
4499 u16 iu_length;
4500 int sg_count;
a60eec02
KB
4501 bool chained;
4502 unsigned int num_sg_in_iu;
4503 unsigned int max_sg_per_iu;
6c223761
KB
4504 struct scatterlist *sg;
4505 struct pqi_sg_descriptor *sg_descriptor;
4506
4507 sg_count = scsi_dma_map(scmd);
4508 if (sg_count < 0)
4509 return sg_count;
a60eec02
KB
4510
4511 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4512 PQI_REQUEST_HEADER_LENGTH;
4513 num_sg_in_iu = 0;
4514
6c223761
KB
4515 if (sg_count == 0)
4516 goto out;
4517
a60eec02
KB
4518 sg = scsi_sglist(scmd);
4519 sg_descriptor = request->sg_descriptors;
4520 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4521 chained = false;
4522 i = 0;
4523
4524 while (1) {
4525 pqi_set_sg_descriptor(sg_descriptor, sg);
4526 if (!chained)
4527 num_sg_in_iu++;
4528 i++;
4529 if (i == sg_count)
4530 break;
4531 sg_descriptor++;
4532 if (i == max_sg_per_iu) {
4533 put_unaligned_le64(
4534 (u64)io_request->sg_chain_buffer_dma_handle,
4535 &sg_descriptor->address);
4536 put_unaligned_le32((sg_count - num_sg_in_iu)
4537 * sizeof(*sg_descriptor),
4538 &sg_descriptor->length);
4539 put_unaligned_le32(CISS_SG_CHAIN,
4540 &sg_descriptor->flags);
4541 chained = true;
4542 num_sg_in_iu++;
4543 sg_descriptor = io_request->sg_chain_buffer;
6c223761 4544 }
a60eec02 4545 sg = sg_next(sg);
6c223761
KB
4546 }
4547
a60eec02
KB
4548 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4549 request->partial = chained;
6c223761 4550 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
4551
4552out:
6c223761
KB
4553 put_unaligned_le16(iu_length, &request->header.iu_length);
4554 request->num_sg_descriptors = num_sg_in_iu;
4555
4556 return 0;
4557}
4558
4559static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4560 void *context)
4561{
4562 struct scsi_cmnd *scmd;
4563
4564 scmd = io_request->scmd;
4565 pqi_free_io_request(io_request);
4566 scsi_dma_unmap(scmd);
4567 pqi_scsi_done(scmd);
4568}
4569
376fb880
KB
4570static int pqi_raid_submit_scsi_cmd_with_io_request(
4571 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
6c223761
KB
4572 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4573 struct pqi_queue_group *queue_group)
4574{
4575 int rc;
4576 size_t cdb_length;
6c223761
KB
4577 struct pqi_raid_path_request *request;
4578
6c223761
KB
4579 io_request->io_complete_callback = pqi_raid_io_complete;
4580 io_request->scmd = scmd;
4581
6c223761
KB
4582 request = io_request->iu;
4583 memset(request, 0,
4584 offsetof(struct pqi_raid_path_request, sg_descriptors));
4585
4586 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4587 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4588 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4589 put_unaligned_le16(io_request->index, &request->request_id);
4590 request->error_index = request->request_id;
4591 memcpy(request->lun_number, device->scsi3addr,
4592 sizeof(request->lun_number));
4593
4594 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4595 memcpy(request->cdb, scmd->cmnd, cdb_length);
4596
4597 switch (cdb_length) {
4598 case 6:
4599 case 10:
4600 case 12:
4601 case 16:
4602 /* No bytes in the Additional CDB bytes field */
4603 request->additional_cdb_bytes_usage =
4604 SOP_ADDITIONAL_CDB_BYTES_0;
4605 break;
4606 case 20:
4607 /* 4 bytes in the Additional cdb field */
4608 request->additional_cdb_bytes_usage =
4609 SOP_ADDITIONAL_CDB_BYTES_4;
4610 break;
4611 case 24:
4612 /* 8 bytes in the Additional cdb field */
4613 request->additional_cdb_bytes_usage =
4614 SOP_ADDITIONAL_CDB_BYTES_8;
4615 break;
4616 case 28:
4617 /* 12 bytes in the Additional cdb field */
4618 request->additional_cdb_bytes_usage =
4619 SOP_ADDITIONAL_CDB_BYTES_12;
4620 break;
4621 case 32:
4622 default:
4623 /* 16 bytes in the Additional cdb field */
4624 request->additional_cdb_bytes_usage =
4625 SOP_ADDITIONAL_CDB_BYTES_16;
4626 break;
4627 }
4628
4629 switch (scmd->sc_data_direction) {
4630 case DMA_TO_DEVICE:
4631 request->data_direction = SOP_READ_FLAG;
4632 break;
4633 case DMA_FROM_DEVICE:
4634 request->data_direction = SOP_WRITE_FLAG;
4635 break;
4636 case DMA_NONE:
4637 request->data_direction = SOP_NO_DIRECTION_FLAG;
4638 break;
4639 case DMA_BIDIRECTIONAL:
4640 request->data_direction = SOP_BIDIRECTIONAL;
4641 break;
4642 default:
4643 dev_err(&ctrl_info->pci_dev->dev,
4644 "unknown data direction: %d\n",
4645 scmd->sc_data_direction);
6c223761
KB
4646 break;
4647 }
4648
4649 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4650 if (rc) {
4651 pqi_free_io_request(io_request);
4652 return SCSI_MLQUEUE_HOST_BUSY;
4653 }
4654
4655 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4656
4657 return 0;
4658}
4659
376fb880
KB
4660static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4661 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4662 struct pqi_queue_group *queue_group)
4663{
4664 struct pqi_io_request *io_request;
4665
4666 io_request = pqi_alloc_io_request(ctrl_info);
4667
4668 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4669 device, scmd, queue_group);
4670}
4671
4672static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
4673{
4674 if (!pqi_ctrl_blocked(ctrl_info))
4675 schedule_work(&ctrl_info->raid_bypass_retry_work);
4676}
4677
4678static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
4679{
4680 struct scsi_cmnd *scmd;
03b288cf 4681 struct pqi_scsi_dev *device;
376fb880
KB
4682 struct pqi_ctrl_info *ctrl_info;
4683
4684 if (!io_request->raid_bypass)
4685 return false;
4686
4687 scmd = io_request->scmd;
4688 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
4689 return false;
4690 if (host_byte(scmd->result) == DID_NO_CONNECT)
4691 return false;
4692
03b288cf
KB
4693 device = scmd->device->hostdata;
4694 if (pqi_device_offline(device))
4695 return false;
4696
376fb880
KB
4697 ctrl_info = shost_to_hba(scmd->device->host);
4698 if (pqi_ctrl_offline(ctrl_info))
4699 return false;
4700
4701 return true;
4702}
4703
4704static inline void pqi_add_to_raid_bypass_retry_list(
4705 struct pqi_ctrl_info *ctrl_info,
4706 struct pqi_io_request *io_request, bool at_head)
4707{
4708 unsigned long flags;
4709
4710 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4711 if (at_head)
4712 list_add(&io_request->request_list_entry,
4713 &ctrl_info->raid_bypass_retry_list);
4714 else
4715 list_add_tail(&io_request->request_list_entry,
4716 &ctrl_info->raid_bypass_retry_list);
4717 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4718}
4719
4720static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
4721 void *context)
4722{
4723 struct scsi_cmnd *scmd;
4724
4725 scmd = io_request->scmd;
4726 pqi_free_io_request(io_request);
4727 pqi_scsi_done(scmd);
4728}
4729
4730static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
4731{
4732 struct scsi_cmnd *scmd;
4733 struct pqi_ctrl_info *ctrl_info;
4734
4735 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
4736 scmd = io_request->scmd;
4737 scmd->result = 0;
4738 ctrl_info = shost_to_hba(scmd->device->host);
4739
4740 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
4741 pqi_schedule_bypass_retry(ctrl_info);
4742}
4743
4744static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
4745{
4746 struct scsi_cmnd *scmd;
4747 struct pqi_scsi_dev *device;
4748 struct pqi_ctrl_info *ctrl_info;
4749 struct pqi_queue_group *queue_group;
4750
4751 scmd = io_request->scmd;
4752 device = scmd->device->hostdata;
4753 if (pqi_device_in_reset(device)) {
4754 pqi_free_io_request(io_request);
4755 set_host_byte(scmd, DID_RESET);
4756 pqi_scsi_done(scmd);
4757 return 0;
4758 }
4759
4760 ctrl_info = shost_to_hba(scmd->device->host);
4761 queue_group = io_request->queue_group;
4762
4763 pqi_reinit_io_request(io_request);
4764
4765 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4766 device, scmd, queue_group);
4767}
4768
4769static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
4770 struct pqi_ctrl_info *ctrl_info)
4771{
4772 unsigned long flags;
4773 struct pqi_io_request *io_request;
4774
4775 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4776 io_request = list_first_entry_or_null(
4777 &ctrl_info->raid_bypass_retry_list,
4778 struct pqi_io_request, request_list_entry);
4779 if (io_request)
4780 list_del(&io_request->request_list_entry);
4781 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4782
4783 return io_request;
4784}
4785
4786static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
4787{
4788 int rc;
4789 struct pqi_io_request *io_request;
4790
4791 pqi_ctrl_busy(ctrl_info);
4792
4793 while (1) {
4794 if (pqi_ctrl_blocked(ctrl_info))
4795 break;
4796 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
4797 if (!io_request)
4798 break;
4799 rc = pqi_retry_raid_bypass(io_request);
4800 if (rc) {
4801 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
4802 true);
4803 pqi_schedule_bypass_retry(ctrl_info);
4804 break;
4805 }
4806 }
4807
4808 pqi_ctrl_unbusy(ctrl_info);
4809}
4810
4811static void pqi_raid_bypass_retry_worker(struct work_struct *work)
4812{
4813 struct pqi_ctrl_info *ctrl_info;
4814
4815 ctrl_info = container_of(work, struct pqi_ctrl_info,
4816 raid_bypass_retry_work);
4817 pqi_retry_raid_bypass_requests(ctrl_info);
4818}
4819
5f310425
KB
4820static void pqi_clear_all_queued_raid_bypass_retries(
4821 struct pqi_ctrl_info *ctrl_info)
376fb880
KB
4822{
4823 unsigned long flags;
376fb880
KB
4824
4825 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5f310425 4826 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
376fb880
KB
4827 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4828}
4829
6c223761
KB
4830static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4831 void *context)
4832{
4833 struct scsi_cmnd *scmd;
4834
4835 scmd = io_request->scmd;
4836 scsi_dma_unmap(scmd);
4837 if (io_request->status == -EAGAIN)
4838 set_host_byte(scmd, DID_IMM_RETRY);
376fb880
KB
4839 else if (pqi_raid_bypass_retry_needed(io_request)) {
4840 pqi_queue_raid_bypass_retry(io_request);
4841 return;
4842 }
6c223761
KB
4843 pqi_free_io_request(io_request);
4844 pqi_scsi_done(scmd);
4845}
4846
4847static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4848 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4849 struct pqi_queue_group *queue_group)
4850{
4851 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
376fb880 4852 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
6c223761
KB
4853}
4854
4855static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4856 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4857 unsigned int cdb_length, struct pqi_queue_group *queue_group,
376fb880 4858 struct pqi_encryption_info *encryption_info, bool raid_bypass)
6c223761
KB
4859{
4860 int rc;
4861 struct pqi_io_request *io_request;
4862 struct pqi_aio_path_request *request;
4863
4864 io_request = pqi_alloc_io_request(ctrl_info);
4865 io_request->io_complete_callback = pqi_aio_io_complete;
4866 io_request->scmd = scmd;
376fb880 4867 io_request->raid_bypass = raid_bypass;
6c223761
KB
4868
4869 request = io_request->iu;
4870 memset(request, 0,
4871 offsetof(struct pqi_raid_path_request, sg_descriptors));
4872
4873 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4874 put_unaligned_le32(aio_handle, &request->nexus_id);
4875 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4876 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4877 put_unaligned_le16(io_request->index, &request->request_id);
4878 request->error_index = request->request_id;
4879 if (cdb_length > sizeof(request->cdb))
4880 cdb_length = sizeof(request->cdb);
4881 request->cdb_length = cdb_length;
4882 memcpy(request->cdb, cdb, cdb_length);
4883
4884 switch (scmd->sc_data_direction) {
4885 case DMA_TO_DEVICE:
4886 request->data_direction = SOP_READ_FLAG;
4887 break;
4888 case DMA_FROM_DEVICE:
4889 request->data_direction = SOP_WRITE_FLAG;
4890 break;
4891 case DMA_NONE:
4892 request->data_direction = SOP_NO_DIRECTION_FLAG;
4893 break;
4894 case DMA_BIDIRECTIONAL:
4895 request->data_direction = SOP_BIDIRECTIONAL;
4896 break;
4897 default:
4898 dev_err(&ctrl_info->pci_dev->dev,
4899 "unknown data direction: %d\n",
4900 scmd->sc_data_direction);
6c223761
KB
4901 break;
4902 }
4903
4904 if (encryption_info) {
4905 request->encryption_enable = true;
4906 put_unaligned_le16(encryption_info->data_encryption_key_index,
4907 &request->data_encryption_key_index);
4908 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4909 &request->encrypt_tweak_lower);
4910 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4911 &request->encrypt_tweak_upper);
4912 }
4913
4914 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4915 if (rc) {
4916 pqi_free_io_request(io_request);
4917 return SCSI_MLQUEUE_HOST_BUSY;
4918 }
4919
4920 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4921
4922 return 0;
4923}
4924
061ef06a
KB
4925static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4926 struct scsi_cmnd *scmd)
4927{
4928 u16 hw_queue;
4929
4930 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4931 if (hw_queue > ctrl_info->max_hw_queue_index)
4932 hw_queue = 0;
4933
4934 return hw_queue;
4935}
4936
7561a7e4
KB
4937/*
4938 * This function gets called just before we hand the completed SCSI request
4939 * back to the SML.
4940 */
4941
4942void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4943{
4944 struct pqi_scsi_dev *device;
4945
4946 device = scmd->device->hostdata;
4947 atomic_dec(&device->scsi_cmds_outstanding);
4948}
4949
6c223761 4950static int pqi_scsi_queue_command(struct Scsi_Host *shost,
7d81d2b8 4951 struct scsi_cmnd *scmd)
6c223761
KB
4952{
4953 int rc;
4954 struct pqi_ctrl_info *ctrl_info;
4955 struct pqi_scsi_dev *device;
061ef06a 4956 u16 hw_queue;
6c223761
KB
4957 struct pqi_queue_group *queue_group;
4958 bool raid_bypassed;
4959
4960 device = scmd->device->hostdata;
6c223761
KB
4961 ctrl_info = shost_to_hba(shost);
4962
7561a7e4
KB
4963 atomic_inc(&device->scsi_cmds_outstanding);
4964
6c223761
KB
4965 if (pqi_ctrl_offline(ctrl_info)) {
4966 set_host_byte(scmd, DID_NO_CONNECT);
4967 pqi_scsi_done(scmd);
4968 return 0;
4969 }
4970
7561a7e4
KB
4971 pqi_ctrl_busy(ctrl_info);
4972 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4973 rc = SCSI_MLQUEUE_HOST_BUSY;
4974 goto out;
4975 }
4976
7d81d2b8
KB
4977 /*
4978 * This is necessary because the SML doesn't zero out this field during
4979 * error recovery.
4980 */
4981 scmd->result = 0;
4982
061ef06a
KB
4983 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
4984 queue_group = &ctrl_info->queue_groups[hw_queue];
6c223761
KB
4985
4986 if (pqi_is_logical_device(device)) {
4987 raid_bypassed = false;
588a63fe 4988 if (device->raid_bypass_enabled &&
57292b58 4989 !blk_rq_is_passthrough(scmd->request)) {
6c223761
KB
4990 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4991 scmd, queue_group);
376fb880
KB
4992 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
4993 raid_bypassed = true;
6c223761
KB
4994 }
4995 if (!raid_bypassed)
4996 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4997 queue_group);
4998 } else {
4999 if (device->aio_enabled)
5000 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5001 queue_group);
5002 else
5003 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5004 queue_group);
5005 }
5006
7561a7e4
KB
5007out:
5008 pqi_ctrl_unbusy(ctrl_info);
5009 if (rc)
5010 atomic_dec(&device->scsi_cmds_outstanding);
5011
6c223761
KB
5012 return rc;
5013}
5014
7561a7e4
KB
5015static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5016 struct pqi_queue_group *queue_group)
5017{
5018 unsigned int path;
5019 unsigned long flags;
5020 bool list_is_empty;
5021
5022 for (path = 0; path < 2; path++) {
5023 while (1) {
5024 spin_lock_irqsave(
5025 &queue_group->submit_lock[path], flags);
5026 list_is_empty =
5027 list_empty(&queue_group->request_list[path]);
5028 spin_unlock_irqrestore(
5029 &queue_group->submit_lock[path], flags);
5030 if (list_is_empty)
5031 break;
5032 pqi_check_ctrl_health(ctrl_info);
5033 if (pqi_ctrl_offline(ctrl_info))
5034 return -ENXIO;
5035 usleep_range(1000, 2000);
5036 }
5037 }
5038
5039 return 0;
5040}
5041
5042static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5043{
5044 int rc;
5045 unsigned int i;
5046 unsigned int path;
5047 struct pqi_queue_group *queue_group;
5048 pqi_index_t iq_pi;
5049 pqi_index_t iq_ci;
5050
5051 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5052 queue_group = &ctrl_info->queue_groups[i];
5053
5054 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5055 if (rc)
5056 return rc;
5057
5058 for (path = 0; path < 2; path++) {
5059 iq_pi = queue_group->iq_pi_copy[path];
5060
5061 while (1) {
dac12fbc 5062 iq_ci = readl(queue_group->iq_ci[path]);
7561a7e4
KB
5063 if (iq_ci == iq_pi)
5064 break;
5065 pqi_check_ctrl_health(ctrl_info);
5066 if (pqi_ctrl_offline(ctrl_info))
5067 return -ENXIO;
5068 usleep_range(1000, 2000);
5069 }
5070 }
5071 }
5072
5073 return 0;
5074}
5075
5076static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5077 struct pqi_scsi_dev *device)
5078{
5079 unsigned int i;
5080 unsigned int path;
5081 struct pqi_queue_group *queue_group;
5082 unsigned long flags;
5083 struct pqi_io_request *io_request;
5084 struct pqi_io_request *next;
5085 struct scsi_cmnd *scmd;
5086 struct pqi_scsi_dev *scsi_device;
5087
5088 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5089 queue_group = &ctrl_info->queue_groups[i];
5090
5091 for (path = 0; path < 2; path++) {
5092 spin_lock_irqsave(
5093 &queue_group->submit_lock[path], flags);
5094
5095 list_for_each_entry_safe(io_request, next,
5096 &queue_group->request_list[path],
5097 request_list_entry) {
5098 scmd = io_request->scmd;
5099 if (!scmd)
5100 continue;
5101
5102 scsi_device = scmd->device->hostdata;
5103 if (scsi_device != device)
5104 continue;
5105
5106 list_del(&io_request->request_list_entry);
5107 set_host_byte(scmd, DID_RESET);
5108 pqi_scsi_done(scmd);
5109 }
5110
5111 spin_unlock_irqrestore(
5112 &queue_group->submit_lock[path], flags);
5113 }
5114 }
5115}
5116
061ef06a
KB
5117static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5118 struct pqi_scsi_dev *device)
5119{
5120 while (atomic_read(&device->scsi_cmds_outstanding)) {
5121 pqi_check_ctrl_health(ctrl_info);
5122 if (pqi_ctrl_offline(ctrl_info))
5123 return -ENXIO;
5124 usleep_range(1000, 2000);
5125 }
5126
5127 return 0;
5128}
5129
5130static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
5131{
5132 bool io_pending;
5133 unsigned long flags;
5134 struct pqi_scsi_dev *device;
5135
5136 while (1) {
5137 io_pending = false;
5138
5139 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5140 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5141 scsi_device_list_entry) {
5142 if (atomic_read(&device->scsi_cmds_outstanding)) {
5143 io_pending = true;
5144 break;
5145 }
5146 }
5147 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5148 flags);
5149
5150 if (!io_pending)
5151 break;
5152
5153 pqi_check_ctrl_health(ctrl_info);
5154 if (pqi_ctrl_offline(ctrl_info))
5155 return -ENXIO;
5156
5157 usleep_range(1000, 2000);
5158 }
5159
5160 return 0;
5161}
5162
14bb215d
KB
5163static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5164 void *context)
6c223761 5165{
14bb215d 5166 struct completion *waiting = context;
6c223761 5167
14bb215d
KB
5168 complete(waiting);
5169}
6c223761 5170
14bb215d
KB
5171#define PQI_LUN_RESET_TIMEOUT_SECS 10
5172
5173static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5174 struct pqi_scsi_dev *device, struct completion *wait)
5175{
5176 int rc;
14bb215d
KB
5177
5178 while (1) {
5179 if (wait_for_completion_io_timeout(wait,
5180 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
5181 rc = 0;
5182 break;
6c223761
KB
5183 }
5184
14bb215d
KB
5185 pqi_check_ctrl_health(ctrl_info);
5186 if (pqi_ctrl_offline(ctrl_info)) {
4e8415e3 5187 rc = -ENXIO;
14bb215d
KB
5188 break;
5189 }
6c223761 5190 }
6c223761 5191
14bb215d 5192 return rc;
6c223761
KB
5193}
5194
14bb215d 5195static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
5196 struct pqi_scsi_dev *device)
5197{
5198 int rc;
5199 struct pqi_io_request *io_request;
5200 DECLARE_COMPLETION_ONSTACK(wait);
5201 struct pqi_task_management_request *request;
5202
6c223761 5203 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 5204 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
5205 io_request->context = &wait;
5206
5207 request = io_request->iu;
5208 memset(request, 0, sizeof(*request));
5209
5210 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5211 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5212 &request->header.iu_length);
5213 put_unaligned_le16(io_request->index, &request->request_id);
5214 memcpy(request->lun_number, device->scsi3addr,
5215 sizeof(request->lun_number));
5216 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5217
5218 pqi_start_io(ctrl_info,
5219 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5220 io_request);
5221
14bb215d
KB
5222 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5223 if (rc == 0)
6c223761 5224 rc = io_request->status;
6c223761
KB
5225
5226 pqi_free_io_request(io_request);
6c223761
KB
5227
5228 return rc;
5229}
5230
5231/* Performs a reset at the LUN level. */
5232
5233static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5234 struct pqi_scsi_dev *device)
5235{
5236 int rc;
5237
14bb215d 5238 rc = pqi_lun_reset(ctrl_info, device);
061ef06a
KB
5239 if (rc == 0)
5240 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
6c223761 5241
14bb215d 5242 return rc == 0 ? SUCCESS : FAILED;
6c223761
KB
5243}
5244
5245static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5246{
5247 int rc;
7561a7e4 5248 struct Scsi_Host *shost;
6c223761
KB
5249 struct pqi_ctrl_info *ctrl_info;
5250 struct pqi_scsi_dev *device;
5251
7561a7e4
KB
5252 shost = scmd->device->host;
5253 ctrl_info = shost_to_hba(shost);
6c223761
KB
5254 device = scmd->device->hostdata;
5255
5256 dev_err(&ctrl_info->pci_dev->dev,
5257 "resetting scsi %d:%d:%d:%d\n",
7561a7e4 5258 shost->host_no, device->bus, device->target, device->lun);
6c223761 5259
7561a7e4
KB
5260 pqi_check_ctrl_health(ctrl_info);
5261 if (pqi_ctrl_offline(ctrl_info)) {
5262 rc = FAILED;
5263 goto out;
5264 }
6c223761 5265
7561a7e4
KB
5266 mutex_lock(&ctrl_info->lun_reset_mutex);
5267
5268 pqi_ctrl_block_requests(ctrl_info);
5269 pqi_ctrl_wait_until_quiesced(ctrl_info);
5270 pqi_fail_io_queued_for_device(ctrl_info, device);
5271 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5272 pqi_device_reset_start(device);
5273 pqi_ctrl_unblock_requests(ctrl_info);
5274
5275 if (rc)
5276 rc = FAILED;
5277 else
5278 rc = pqi_device_reset(ctrl_info, device);
5279
5280 pqi_device_reset_done(device);
5281
5282 mutex_unlock(&ctrl_info->lun_reset_mutex);
5283
5284out:
6c223761
KB
5285 dev_err(&ctrl_info->pci_dev->dev,
5286 "reset of scsi %d:%d:%d:%d: %s\n",
7561a7e4 5287 shost->host_no, device->bus, device->target, device->lun,
6c223761
KB
5288 rc == SUCCESS ? "SUCCESS" : "FAILED");
5289
5290 return rc;
5291}
5292
5293static int pqi_slave_alloc(struct scsi_device *sdev)
5294{
5295 struct pqi_scsi_dev *device;
5296 unsigned long flags;
5297 struct pqi_ctrl_info *ctrl_info;
5298 struct scsi_target *starget;
5299 struct sas_rphy *rphy;
5300
5301 ctrl_info = shost_to_hba(sdev->host);
5302
5303 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5304
5305 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5306 starget = scsi_target(sdev);
5307 rphy = target_to_rphy(starget);
5308 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5309 if (device) {
5310 device->target = sdev_id(sdev);
5311 device->lun = sdev->lun;
5312 device->target_lun_valid = true;
5313 }
5314 } else {
5315 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5316 sdev_id(sdev), sdev->lun);
5317 }
5318
94086f5b 5319 if (device) {
6c223761
KB
5320 sdev->hostdata = device;
5321 device->sdev = sdev;
5322 if (device->queue_depth) {
5323 device->advertised_queue_depth = device->queue_depth;
5324 scsi_change_queue_depth(sdev,
5325 device->advertised_queue_depth);
5326 }
5327 }
5328
5329 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5330
5331 return 0;
5332}
5333
52198226
CH
5334static int pqi_map_queues(struct Scsi_Host *shost)
5335{
5336 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5337
f23f5bec 5338 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
52198226
CH
5339}
5340
6c223761
KB
5341static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5342 void __user *arg)
5343{
5344 struct pci_dev *pci_dev;
5345 u32 subsystem_vendor;
5346 u32 subsystem_device;
5347 cciss_pci_info_struct pciinfo;
5348
5349 if (!arg)
5350 return -EINVAL;
5351
5352 pci_dev = ctrl_info->pci_dev;
5353
5354 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5355 pciinfo.bus = pci_dev->bus->number;
5356 pciinfo.dev_fn = pci_dev->devfn;
5357 subsystem_vendor = pci_dev->subsystem_vendor;
5358 subsystem_device = pci_dev->subsystem_device;
5359 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5360 subsystem_vendor;
5361
5362 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5363 return -EFAULT;
5364
5365 return 0;
5366}
5367
5368static int pqi_getdrivver_ioctl(void __user *arg)
5369{
5370 u32 version;
5371
5372 if (!arg)
5373 return -EINVAL;
5374
5375 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5376 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5377
5378 if (copy_to_user(arg, &version, sizeof(version)))
5379 return -EFAULT;
5380
5381 return 0;
5382}
5383
5384struct ciss_error_info {
5385 u8 scsi_status;
5386 int command_status;
5387 size_t sense_data_length;
5388};
5389
5390static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5391 struct ciss_error_info *ciss_error_info)
5392{
5393 int ciss_cmd_status;
5394 size_t sense_data_length;
5395
5396 switch (pqi_error_info->data_out_result) {
5397 case PQI_DATA_IN_OUT_GOOD:
5398 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5399 break;
5400 case PQI_DATA_IN_OUT_UNDERFLOW:
5401 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5402 break;
5403 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5404 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5405 break;
5406 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5407 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5408 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5409 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5410 case PQI_DATA_IN_OUT_ERROR:
5411 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5412 break;
5413 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5414 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5415 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5416 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5417 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5418 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5419 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5420 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5421 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5422 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5423 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5424 break;
5425 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5426 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5427 break;
5428 case PQI_DATA_IN_OUT_ABORTED:
5429 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5430 break;
5431 case PQI_DATA_IN_OUT_TIMEOUT:
5432 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5433 break;
5434 default:
5435 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5436 break;
5437 }
5438
5439 sense_data_length =
5440 get_unaligned_le16(&pqi_error_info->sense_data_length);
5441 if (sense_data_length == 0)
5442 sense_data_length =
5443 get_unaligned_le16(&pqi_error_info->response_data_length);
5444 if (sense_data_length)
5445 if (sense_data_length > sizeof(pqi_error_info->data))
5446 sense_data_length = sizeof(pqi_error_info->data);
5447
5448 ciss_error_info->scsi_status = pqi_error_info->status;
5449 ciss_error_info->command_status = ciss_cmd_status;
5450 ciss_error_info->sense_data_length = sense_data_length;
5451}
5452
5453static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5454{
5455 int rc;
5456 char *kernel_buffer = NULL;
5457 u16 iu_length;
5458 size_t sense_data_length;
5459 IOCTL_Command_struct iocommand;
5460 struct pqi_raid_path_request request;
5461 struct pqi_raid_error_info pqi_error_info;
5462 struct ciss_error_info ciss_error_info;
5463
5464 if (pqi_ctrl_offline(ctrl_info))
5465 return -ENXIO;
5466 if (!arg)
5467 return -EINVAL;
5468 if (!capable(CAP_SYS_RAWIO))
5469 return -EPERM;
5470 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5471 return -EFAULT;
5472 if (iocommand.buf_size < 1 &&
5473 iocommand.Request.Type.Direction != XFER_NONE)
5474 return -EINVAL;
5475 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5476 return -EINVAL;
5477 if (iocommand.Request.Type.Type != TYPE_CMD)
5478 return -EINVAL;
5479
5480 switch (iocommand.Request.Type.Direction) {
5481 case XFER_NONE:
5482 case XFER_WRITE:
5483 case XFER_READ:
41555d54 5484 case XFER_READ | XFER_WRITE:
6c223761
KB
5485 break;
5486 default:
5487 return -EINVAL;
5488 }
5489
5490 if (iocommand.buf_size > 0) {
5491 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5492 if (!kernel_buffer)
5493 return -ENOMEM;
5494 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5495 if (copy_from_user(kernel_buffer, iocommand.buf,
5496 iocommand.buf_size)) {
5497 rc = -EFAULT;
5498 goto out;
5499 }
5500 } else {
5501 memset(kernel_buffer, 0, iocommand.buf_size);
5502 }
5503 }
5504
5505 memset(&request, 0, sizeof(request));
5506
5507 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5508 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5509 PQI_REQUEST_HEADER_LENGTH;
5510 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5511 sizeof(request.lun_number));
5512 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5513 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5514
5515 switch (iocommand.Request.Type.Direction) {
5516 case XFER_NONE:
5517 request.data_direction = SOP_NO_DIRECTION_FLAG;
5518 break;
5519 case XFER_WRITE:
5520 request.data_direction = SOP_WRITE_FLAG;
5521 break;
5522 case XFER_READ:
5523 request.data_direction = SOP_READ_FLAG;
5524 break;
41555d54
KB
5525 case XFER_READ | XFER_WRITE:
5526 request.data_direction = SOP_BIDIRECTIONAL;
5527 break;
6c223761
KB
5528 }
5529
5530 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5531
5532 if (iocommand.buf_size > 0) {
5533 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5534
5535 rc = pqi_map_single(ctrl_info->pci_dev,
5536 &request.sg_descriptors[0], kernel_buffer,
5537 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5538 if (rc)
5539 goto out;
5540
5541 iu_length += sizeof(request.sg_descriptors[0]);
5542 }
5543
5544 put_unaligned_le16(iu_length, &request.header.iu_length);
5545
5546 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5547 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5548
5549 if (iocommand.buf_size > 0)
5550 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5551 PCI_DMA_BIDIRECTIONAL);
5552
5553 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5554
5555 if (rc == 0) {
5556 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5557 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5558 iocommand.error_info.CommandStatus =
5559 ciss_error_info.command_status;
5560 sense_data_length = ciss_error_info.sense_data_length;
5561 if (sense_data_length) {
5562 if (sense_data_length >
5563 sizeof(iocommand.error_info.SenseInfo))
5564 sense_data_length =
5565 sizeof(iocommand.error_info.SenseInfo);
5566 memcpy(iocommand.error_info.SenseInfo,
5567 pqi_error_info.data, sense_data_length);
5568 iocommand.error_info.SenseLen = sense_data_length;
5569 }
5570 }
5571
5572 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5573 rc = -EFAULT;
5574 goto out;
5575 }
5576
5577 if (rc == 0 && iocommand.buf_size > 0 &&
5578 (iocommand.Request.Type.Direction & XFER_READ)) {
5579 if (copy_to_user(iocommand.buf, kernel_buffer,
5580 iocommand.buf_size)) {
5581 rc = -EFAULT;
5582 }
5583 }
5584
5585out:
5586 kfree(kernel_buffer);
5587
5588 return rc;
5589}
5590
5591static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5592{
5593 int rc;
5594 struct pqi_ctrl_info *ctrl_info;
5595
5596 ctrl_info = shost_to_hba(sdev->host);
5597
5598 switch (cmd) {
5599 case CCISS_DEREGDISK:
5600 case CCISS_REGNEWDISK:
5601 case CCISS_REGNEWD:
5602 rc = pqi_scan_scsi_devices(ctrl_info);
5603 break;
5604 case CCISS_GETPCIINFO:
5605 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5606 break;
5607 case CCISS_GETDRIVVER:
5608 rc = pqi_getdrivver_ioctl(arg);
5609 break;
5610 case CCISS_PASSTHRU:
5611 rc = pqi_passthru_ioctl(ctrl_info, arg);
5612 break;
5613 default:
5614 rc = -EINVAL;
5615 break;
5616 }
5617
5618 return rc;
5619}
5620
5621static ssize_t pqi_version_show(struct device *dev,
5622 struct device_attribute *attr, char *buffer)
5623{
5624 ssize_t count = 0;
5625 struct Scsi_Host *shost;
5626 struct pqi_ctrl_info *ctrl_info;
5627
5628 shost = class_to_shost(dev);
5629 ctrl_info = shost_to_hba(shost);
5630
5631 count += snprintf(buffer + count, PAGE_SIZE - count,
5632 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5633
5634 count += snprintf(buffer + count, PAGE_SIZE - count,
5635 "firmware: %s\n", ctrl_info->firmware_version);
5636
5637 return count;
5638}
5639
5640static ssize_t pqi_host_rescan_store(struct device *dev,
5641 struct device_attribute *attr, const char *buffer, size_t count)
5642{
5643 struct Scsi_Host *shost = class_to_shost(dev);
5644
5645 pqi_scan_start(shost);
5646
5647 return count;
5648}
5649
3c50976f
KB
5650static ssize_t pqi_lockup_action_show(struct device *dev,
5651 struct device_attribute *attr, char *buffer)
5652{
5653 int count = 0;
5654 unsigned int i;
5655
5656 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5657 if (pqi_lockup_actions[i].action == pqi_lockup_action)
5658 count += snprintf(buffer + count, PAGE_SIZE - count,
5659 "[%s] ", pqi_lockup_actions[i].name);
5660 else
5661 count += snprintf(buffer + count, PAGE_SIZE - count,
5662 "%s ", pqi_lockup_actions[i].name);
5663 }
5664
5665 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
5666
5667 return count;
5668}
5669
5670static ssize_t pqi_lockup_action_store(struct device *dev,
5671 struct device_attribute *attr, const char *buffer, size_t count)
5672{
5673 unsigned int i;
5674 char *action_name;
5675 char action_name_buffer[32];
5676
5677 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
5678 action_name = strstrip(action_name_buffer);
5679
5680 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5681 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
5682 pqi_lockup_action = pqi_lockup_actions[i].action;
5683 return count;
5684 }
5685 }
5686
5687 return -EINVAL;
5688}
5689
cbe0c7b1
KB
5690static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5691static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
3c50976f
KB
5692static DEVICE_ATTR(lockup_action, 0644,
5693 pqi_lockup_action_show, pqi_lockup_action_store);
6c223761
KB
5694
5695static struct device_attribute *pqi_shost_attrs[] = {
5696 &dev_attr_version,
5697 &dev_attr_rescan,
3c50976f 5698 &dev_attr_lockup_action,
6c223761
KB
5699 NULL
5700};
5701
5702static ssize_t pqi_sas_address_show(struct device *dev,
5703 struct device_attribute *attr, char *buffer)
5704{
5705 struct pqi_ctrl_info *ctrl_info;
5706 struct scsi_device *sdev;
5707 struct pqi_scsi_dev *device;
5708 unsigned long flags;
5709 u64 sas_address;
5710
5711 sdev = to_scsi_device(dev);
5712 ctrl_info = shost_to_hba(sdev->host);
5713
5714 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5715
5716 device = sdev->hostdata;
5717 if (pqi_is_logical_device(device)) {
5718 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5719 flags);
5720 return -ENODEV;
5721 }
5722 sas_address = device->sas_address;
5723
5724 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5725
5726 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5727}
5728
5729static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5730 struct device_attribute *attr, char *buffer)
5731{
5732 struct pqi_ctrl_info *ctrl_info;
5733 struct scsi_device *sdev;
5734 struct pqi_scsi_dev *device;
5735 unsigned long flags;
5736
5737 sdev = to_scsi_device(dev);
5738 ctrl_info = shost_to_hba(sdev->host);
5739
5740 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5741
5742 device = sdev->hostdata;
588a63fe 5743 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6c223761
KB
5744 buffer[1] = '\n';
5745 buffer[2] = '\0';
5746
5747 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5748
5749 return 2;
5750}
5751
a9f93392
KB
5752static ssize_t pqi_raid_level_show(struct device *dev,
5753 struct device_attribute *attr, char *buffer)
5754{
5755 struct pqi_ctrl_info *ctrl_info;
5756 struct scsi_device *sdev;
5757 struct pqi_scsi_dev *device;
5758 unsigned long flags;
5759 char *raid_level;
5760
5761 sdev = to_scsi_device(dev);
5762 ctrl_info = shost_to_hba(sdev->host);
5763
5764 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5765
5766 device = sdev->hostdata;
5767
5768 if (pqi_is_logical_device(device))
5769 raid_level = pqi_raid_level_to_string(device->raid_level);
5770 else
5771 raid_level = "N/A";
5772
5773 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5774
5775 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
5776}
5777
cbe0c7b1
KB
5778static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5779static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
6c223761 5780 pqi_ssd_smart_path_enabled_show, NULL);
a9f93392 5781static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6c223761
KB
5782
5783static struct device_attribute *pqi_sdev_attrs[] = {
5784 &dev_attr_sas_address,
5785 &dev_attr_ssd_smart_path_enabled,
a9f93392 5786 &dev_attr_raid_level,
6c223761
KB
5787 NULL
5788};
5789
5790static struct scsi_host_template pqi_driver_template = {
5791 .module = THIS_MODULE,
5792 .name = DRIVER_NAME_SHORT,
5793 .proc_name = DRIVER_NAME_SHORT,
5794 .queuecommand = pqi_scsi_queue_command,
5795 .scan_start = pqi_scan_start,
5796 .scan_finished = pqi_scan_finished,
5797 .this_id = -1,
5798 .use_clustering = ENABLE_CLUSTERING,
5799 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5800 .ioctl = pqi_ioctl,
5801 .slave_alloc = pqi_slave_alloc,
52198226 5802 .map_queues = pqi_map_queues,
6c223761
KB
5803 .sdev_attrs = pqi_sdev_attrs,
5804 .shost_attrs = pqi_shost_attrs,
5805};
5806
5807static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5808{
5809 int rc;
5810 struct Scsi_Host *shost;
5811
5812 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5813 if (!shost) {
5814 dev_err(&ctrl_info->pci_dev->dev,
5815 "scsi_host_alloc failed for controller %u\n",
5816 ctrl_info->ctrl_id);
5817 return -ENOMEM;
5818 }
5819
5820 shost->io_port = 0;
5821 shost->n_io_port = 0;
5822 shost->this_id = -1;
5823 shost->max_channel = PQI_MAX_BUS;
5824 shost->max_cmd_len = MAX_COMMAND_SIZE;
5825 shost->max_lun = ~0;
5826 shost->max_id = ~0;
5827 shost->max_sectors = ctrl_info->max_sectors;
5828 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5829 shost->cmd_per_lun = shost->can_queue;
5830 shost->sg_tablesize = ctrl_info->sg_tablesize;
5831 shost->transportt = pqi_sas_transport_template;
52198226 5832 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
5833 shost->unique_id = shost->irq;
5834 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5835 shost->hostdata[0] = (unsigned long)ctrl_info;
5836
5837 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5838 if (rc) {
5839 dev_err(&ctrl_info->pci_dev->dev,
5840 "scsi_add_host failed for controller %u\n",
5841 ctrl_info->ctrl_id);
5842 goto free_host;
5843 }
5844
5845 rc = pqi_add_sas_host(shost, ctrl_info);
5846 if (rc) {
5847 dev_err(&ctrl_info->pci_dev->dev,
5848 "add SAS host failed for controller %u\n",
5849 ctrl_info->ctrl_id);
5850 goto remove_host;
5851 }
5852
5853 ctrl_info->scsi_host = shost;
5854
5855 return 0;
5856
5857remove_host:
5858 scsi_remove_host(shost);
5859free_host:
5860 scsi_host_put(shost);
5861
5862 return rc;
5863}
5864
5865static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5866{
5867 struct Scsi_Host *shost;
5868
5869 pqi_delete_sas_host(ctrl_info);
5870
5871 shost = ctrl_info->scsi_host;
5872 if (!shost)
5873 return;
5874
5875 scsi_remove_host(shost);
5876 scsi_host_put(shost);
5877}
5878
336b6819
KB
5879static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
5880{
5881 int rc = 0;
5882 struct pqi_device_registers __iomem *pqi_registers;
5883 unsigned long timeout;
5884 unsigned int timeout_msecs;
5885 union pqi_reset_register reset_reg;
6c223761 5886
336b6819
KB
5887 pqi_registers = ctrl_info->pqi_registers;
5888 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
5889 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
5890
5891 while (1) {
5892 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
5893 reset_reg.all_bits = readl(&pqi_registers->device_reset);
5894 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
5895 break;
5896 pqi_check_ctrl_health(ctrl_info);
5897 if (pqi_ctrl_offline(ctrl_info)) {
5898 rc = -ENXIO;
5899 break;
5900 }
5901 if (time_after(jiffies, timeout)) {
5902 rc = -ETIMEDOUT;
5903 break;
5904 }
5905 }
5906
5907 return rc;
5908}
6c223761
KB
5909
5910static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5911{
5912 int rc;
336b6819
KB
5913 union pqi_reset_register reset_reg;
5914
5915 if (ctrl_info->pqi_reset_quiesce_supported) {
5916 rc = sis_pqi_reset_quiesce(ctrl_info);
5917 if (rc) {
5918 dev_err(&ctrl_info->pci_dev->dev,
5919 "PQI reset failed during quiesce with error %d\n",
5920 rc);
5921 return rc;
5922 }
5923 }
6c223761 5924
336b6819
KB
5925 reset_reg.all_bits = 0;
5926 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
5927 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6c223761 5928
336b6819 5929 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6c223761 5930
336b6819 5931 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6c223761
KB
5932 if (rc)
5933 dev_err(&ctrl_info->pci_dev->dev,
336b6819 5934 "PQI reset failed with error %d\n", rc);
6c223761
KB
5935
5936 return rc;
5937}
5938
5939static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5940{
5941 int rc;
5942 struct bmic_identify_controller *identify;
5943
5944 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5945 if (!identify)
5946 return -ENOMEM;
5947
5948 rc = pqi_identify_controller(ctrl_info, identify);
5949 if (rc)
5950 goto out;
5951
5952 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5953 sizeof(identify->firmware_version));
5954 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5955 snprintf(ctrl_info->firmware_version +
5956 strlen(ctrl_info->firmware_version),
5957 sizeof(ctrl_info->firmware_version),
5958 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5959
5960out:
5961 kfree(identify);
5962
5963 return rc;
5964}
5965
98f87667
KB
5966static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5967{
5968 u32 table_length;
5969 u32 section_offset;
5970 void __iomem *table_iomem_addr;
5971 struct pqi_config_table *config_table;
5972 struct pqi_config_table_section_header *section;
5973
5974 table_length = ctrl_info->config_table_length;
5975
5976 config_table = kmalloc(table_length, GFP_KERNEL);
5977 if (!config_table) {
5978 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 5979 "failed to allocate memory for PQI configuration table\n");
98f87667
KB
5980 return -ENOMEM;
5981 }
5982
5983 /*
5984 * Copy the config table contents from I/O memory space into the
5985 * temporary buffer.
5986 */
5987 table_iomem_addr = ctrl_info->iomem_base +
5988 ctrl_info->config_table_offset;
5989 memcpy_fromio(config_table, table_iomem_addr, table_length);
5990
5991 section_offset =
5992 get_unaligned_le32(&config_table->first_section_offset);
5993
5994 while (section_offset) {
5995 section = (void *)config_table + section_offset;
5996
5997 switch (get_unaligned_le16(&section->section_id)) {
5998 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5a259e32
KB
5999 if (pqi_disable_heartbeat)
6000 dev_warn(&ctrl_info->pci_dev->dev,
6001 "heartbeat disabled by module parameter\n");
6002 else
6003 ctrl_info->heartbeat_counter =
6004 table_iomem_addr +
6005 section_offset +
6006 offsetof(
6007 struct pqi_config_table_heartbeat,
6008 heartbeat_counter);
98f87667
KB
6009 break;
6010 }
6011
6012 section_offset =
6013 get_unaligned_le16(&section->next_section_offset);
6014 }
6015
6016 kfree(config_table);
6017
6018 return 0;
6019}
6020
162d7753
KB
6021/* Switches the controller from PQI mode back into SIS mode. */
6022
6023static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
6024{
6025 int rc;
6026
061ef06a 6027 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
162d7753
KB
6028 rc = pqi_reset(ctrl_info);
6029 if (rc)
6030 return rc;
4f078e24
KB
6031 rc = sis_reenable_sis_mode(ctrl_info);
6032 if (rc) {
6033 dev_err(&ctrl_info->pci_dev->dev,
6034 "re-enabling SIS mode failed with error %d\n", rc);
6035 return rc;
6036 }
162d7753
KB
6037 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6038
6039 return 0;
6040}
6041
6042/*
6043 * If the controller isn't already in SIS mode, this function forces it into
6044 * SIS mode.
6045 */
6046
6047static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73
KB
6048{
6049 if (!sis_is_firmware_running(ctrl_info))
6050 return -ENXIO;
6051
162d7753
KB
6052 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
6053 return 0;
6054
6055 if (sis_is_kernel_up(ctrl_info)) {
6056 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6057 return 0;
ff6abb73
KB
6058 }
6059
162d7753 6060 return pqi_revert_to_sis_mode(ctrl_info);
ff6abb73
KB
6061}
6062
6c223761
KB
6063static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6064{
6065 int rc;
6066
162d7753
KB
6067 rc = pqi_force_sis_mode(ctrl_info);
6068 if (rc)
6069 return rc;
6c223761
KB
6070
6071 /*
6072 * Wait until the controller is ready to start accepting SIS
6073 * commands.
6074 */
6075 rc = sis_wait_for_ctrl_ready(ctrl_info);
8845fdfa 6076 if (rc)
6c223761 6077 return rc;
6c223761
KB
6078
6079 /*
6080 * Get the controller properties. This allows us to determine
6081 * whether or not it supports PQI mode.
6082 */
6083 rc = sis_get_ctrl_properties(ctrl_info);
6084 if (rc) {
6085 dev_err(&ctrl_info->pci_dev->dev,
6086 "error obtaining controller properties\n");
6087 return rc;
6088 }
6089
6090 rc = sis_get_pqi_capabilities(ctrl_info);
6091 if (rc) {
6092 dev_err(&ctrl_info->pci_dev->dev,
6093 "error obtaining controller capabilities\n");
6094 return rc;
6095 }
6096
d727a776
KB
6097 if (reset_devices) {
6098 if (ctrl_info->max_outstanding_requests >
6099 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
6100 ctrl_info->max_outstanding_requests =
6101 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
6102 } else {
6103 if (ctrl_info->max_outstanding_requests >
6104 PQI_MAX_OUTSTANDING_REQUESTS)
6105 ctrl_info->max_outstanding_requests =
6106 PQI_MAX_OUTSTANDING_REQUESTS;
6107 }
6c223761
KB
6108
6109 pqi_calculate_io_resources(ctrl_info);
6110
6111 rc = pqi_alloc_error_buffer(ctrl_info);
6112 if (rc) {
6113 dev_err(&ctrl_info->pci_dev->dev,
6114 "failed to allocate PQI error buffer\n");
6115 return rc;
6116 }
6117
6118 /*
6119 * If the function we are about to call succeeds, the
6120 * controller will transition from legacy SIS mode
6121 * into PQI mode.
6122 */
6123 rc = sis_init_base_struct_addr(ctrl_info);
6124 if (rc) {
6125 dev_err(&ctrl_info->pci_dev->dev,
6126 "error initializing PQI mode\n");
6127 return rc;
6128 }
6129
6130 /* Wait for the controller to complete the SIS -> PQI transition. */
6131 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6132 if (rc) {
6133 dev_err(&ctrl_info->pci_dev->dev,
6134 "transition to PQI mode failed\n");
6135 return rc;
6136 }
6137
6138 /* From here on, we are running in PQI mode. */
6139 ctrl_info->pqi_mode_enabled = true;
ff6abb73 6140 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761 6141
98f87667
KB
6142 rc = pqi_process_config_table(ctrl_info);
6143 if (rc)
6144 return rc;
6145
6c223761
KB
6146 rc = pqi_alloc_admin_queues(ctrl_info);
6147 if (rc) {
6148 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 6149 "failed to allocate admin queues\n");
6c223761
KB
6150 return rc;
6151 }
6152
6153 rc = pqi_create_admin_queues(ctrl_info);
6154 if (rc) {
6155 dev_err(&ctrl_info->pci_dev->dev,
6156 "error creating admin queues\n");
6157 return rc;
6158 }
6159
6160 rc = pqi_report_device_capability(ctrl_info);
6161 if (rc) {
6162 dev_err(&ctrl_info->pci_dev->dev,
6163 "obtaining device capability failed\n");
6164 return rc;
6165 }
6166
6167 rc = pqi_validate_device_capability(ctrl_info);
6168 if (rc)
6169 return rc;
6170
6171 pqi_calculate_queue_resources(ctrl_info);
6172
6173 rc = pqi_enable_msix_interrupts(ctrl_info);
6174 if (rc)
6175 return rc;
6176
6177 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
6178 ctrl_info->max_msix_vectors =
6179 ctrl_info->num_msix_vectors_enabled;
6180 pqi_calculate_queue_resources(ctrl_info);
6181 }
6182
6183 rc = pqi_alloc_io_resources(ctrl_info);
6184 if (rc)
6185 return rc;
6186
6187 rc = pqi_alloc_operational_queues(ctrl_info);
d87d5474
KB
6188 if (rc) {
6189 dev_err(&ctrl_info->pci_dev->dev,
6190 "failed to allocate operational queues\n");
6c223761 6191 return rc;
d87d5474 6192 }
6c223761
KB
6193
6194 pqi_init_operational_queues(ctrl_info);
6195
6196 rc = pqi_request_irqs(ctrl_info);
6197 if (rc)
6198 return rc;
6199
6c223761
KB
6200 rc = pqi_create_queues(ctrl_info);
6201 if (rc)
6202 return rc;
6203
061ef06a
KB
6204 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6205
6206 ctrl_info->controller_online = true;
6207 pqi_start_heartbeat_timer(ctrl_info);
6c223761 6208
6a50d6ad 6209 rc = pqi_enable_events(ctrl_info);
6c223761
KB
6210 if (rc) {
6211 dev_err(&ctrl_info->pci_dev->dev,
6a50d6ad 6212 "error enabling events\n");
6c223761
KB
6213 return rc;
6214 }
6215
6c223761
KB
6216 /* Register with the SCSI subsystem. */
6217 rc = pqi_register_scsi(ctrl_info);
6218 if (rc)
6219 return rc;
6220
6221 rc = pqi_get_ctrl_firmware_version(ctrl_info);
6222 if (rc) {
6223 dev_err(&ctrl_info->pci_dev->dev,
6224 "error obtaining firmware version\n");
6225 return rc;
6226 }
6227
6228 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6229 if (rc) {
6230 dev_err(&ctrl_info->pci_dev->dev,
6231 "error updating host wellness\n");
6232 return rc;
6233 }
6234
6235 pqi_schedule_update_time_worker(ctrl_info);
6236
6237 pqi_scan_scsi_devices(ctrl_info);
6238
6239 return 0;
6240}
6241
061ef06a
KB
6242static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
6243{
6244 unsigned int i;
6245 struct pqi_admin_queues *admin_queues;
6246 struct pqi_event_queue *event_queue;
6247
6248 admin_queues = &ctrl_info->admin_queues;
6249 admin_queues->iq_pi_copy = 0;
6250 admin_queues->oq_ci_copy = 0;
dac12fbc 6251 writel(0, admin_queues->oq_pi);
061ef06a
KB
6252
6253 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6254 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
6255 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
6256 ctrl_info->queue_groups[i].oq_ci_copy = 0;
6257
dac12fbc
KB
6258 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
6259 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
6260 writel(0, ctrl_info->queue_groups[i].oq_pi);
061ef06a
KB
6261 }
6262
6263 event_queue = &ctrl_info->event_queue;
dac12fbc 6264 writel(0, event_queue->oq_pi);
061ef06a
KB
6265 event_queue->oq_ci_copy = 0;
6266}
6267
6268static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6269{
6270 int rc;
6271
6272 rc = pqi_force_sis_mode(ctrl_info);
6273 if (rc)
6274 return rc;
6275
6276 /*
6277 * Wait until the controller is ready to start accepting SIS
6278 * commands.
6279 */
6280 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
6281 if (rc)
6282 return rc;
6283
6284 /*
6285 * If the function we are about to call succeeds, the
6286 * controller will transition from legacy SIS mode
6287 * into PQI mode.
6288 */
6289 rc = sis_init_base_struct_addr(ctrl_info);
6290 if (rc) {
6291 dev_err(&ctrl_info->pci_dev->dev,
6292 "error initializing PQI mode\n");
6293 return rc;
6294 }
6295
6296 /* Wait for the controller to complete the SIS -> PQI transition. */
6297 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6298 if (rc) {
6299 dev_err(&ctrl_info->pci_dev->dev,
6300 "transition to PQI mode failed\n");
6301 return rc;
6302 }
6303
6304 /* From here on, we are running in PQI mode. */
6305 ctrl_info->pqi_mode_enabled = true;
6306 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6307
6308 pqi_reinit_queues(ctrl_info);
6309
6310 rc = pqi_create_admin_queues(ctrl_info);
6311 if (rc) {
6312 dev_err(&ctrl_info->pci_dev->dev,
6313 "error creating admin queues\n");
6314 return rc;
6315 }
6316
6317 rc = pqi_create_queues(ctrl_info);
6318 if (rc)
6319 return rc;
6320
6321 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6322
6323 ctrl_info->controller_online = true;
6324 pqi_start_heartbeat_timer(ctrl_info);
6325 pqi_ctrl_unblock_requests(ctrl_info);
6326
6327 rc = pqi_enable_events(ctrl_info);
6328 if (rc) {
6329 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 6330 "error enabling events\n");
061ef06a
KB
6331 return rc;
6332 }
6333
6334 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6335 if (rc) {
6336 dev_err(&ctrl_info->pci_dev->dev,
6337 "error updating host wellness\n");
6338 return rc;
6339 }
6340
6341 pqi_schedule_update_time_worker(ctrl_info);
6342
6343 pqi_scan_scsi_devices(ctrl_info);
6344
6345 return 0;
6346}
6347
a81ed5f3
KB
6348static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6349 u16 timeout)
6350{
6351 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6352 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6353}
6354
6c223761
KB
6355static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6356{
6357 int rc;
6358 u64 mask;
6359
6360 rc = pci_enable_device(ctrl_info->pci_dev);
6361 if (rc) {
6362 dev_err(&ctrl_info->pci_dev->dev,
6363 "failed to enable PCI device\n");
6364 return rc;
6365 }
6366
6367 if (sizeof(dma_addr_t) > 4)
6368 mask = DMA_BIT_MASK(64);
6369 else
6370 mask = DMA_BIT_MASK(32);
6371
6372 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6373 if (rc) {
6374 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6375 goto disable_device;
6376 }
6377
6378 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6379 if (rc) {
6380 dev_err(&ctrl_info->pci_dev->dev,
6381 "failed to obtain PCI resources\n");
6382 goto disable_device;
6383 }
6384
6385 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6386 ctrl_info->pci_dev, 0),
6387 sizeof(struct pqi_ctrl_registers));
6388 if (!ctrl_info->iomem_base) {
6389 dev_err(&ctrl_info->pci_dev->dev,
6390 "failed to map memory for controller registers\n");
6391 rc = -ENOMEM;
6392 goto release_regions;
6393 }
6394
a81ed5f3
KB
6395#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6396
6397 /* Increase the PCIe completion timeout. */
6398 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6399 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6400 if (rc) {
6401 dev_err(&ctrl_info->pci_dev->dev,
6402 "failed to set PCIe completion timeout\n");
6403 goto release_regions;
6404 }
6405
6c223761
KB
6406 /* Enable bus mastering. */
6407 pci_set_master(ctrl_info->pci_dev);
6408
cbe0c7b1
KB
6409 ctrl_info->registers = ctrl_info->iomem_base;
6410 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6411
6c223761
KB
6412 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6413
6414 return 0;
6415
6416release_regions:
6417 pci_release_regions(ctrl_info->pci_dev);
6418disable_device:
6419 pci_disable_device(ctrl_info->pci_dev);
6420
6421 return rc;
6422}
6423
6424static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6425{
6426 iounmap(ctrl_info->iomem_base);
6427 pci_release_regions(ctrl_info->pci_dev);
cbe0c7b1
KB
6428 if (pci_is_enabled(ctrl_info->pci_dev))
6429 pci_disable_device(ctrl_info->pci_dev);
6c223761
KB
6430 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6431}
6432
6433static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6434{
6435 struct pqi_ctrl_info *ctrl_info;
6436
6437 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6438 GFP_KERNEL, numa_node);
6439 if (!ctrl_info)
6440 return NULL;
6441
6442 mutex_init(&ctrl_info->scan_mutex);
7561a7e4 6443 mutex_init(&ctrl_info->lun_reset_mutex);
6c223761
KB
6444
6445 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6446 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6447
6448 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6449 atomic_set(&ctrl_info->num_interrupts, 0);
6450
6451 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6452 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6453
74a0f573 6454 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
5f310425 6455 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
98f87667 6456
6c223761
KB
6457 sema_init(&ctrl_info->sync_request_sem,
6458 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7561a7e4 6459 init_waitqueue_head(&ctrl_info->block_requests_wait);
6c223761 6460
376fb880
KB
6461 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
6462 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
6463 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
6464 pqi_raid_bypass_retry_worker);
6465
6c223761 6466 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
061ef06a 6467 ctrl_info->irq_mode = IRQ_MODE_NONE;
6c223761
KB
6468 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6469
6470 return ctrl_info;
6471}
6472
6473static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6474{
6475 kfree(ctrl_info);
6476}
6477
6478static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6479{
98bf061b
KB
6480 pqi_free_irqs(ctrl_info);
6481 pqi_disable_msix_interrupts(ctrl_info);
6c223761
KB
6482}
6483
6484static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6485{
6486 pqi_stop_heartbeat_timer(ctrl_info);
6487 pqi_free_interrupts(ctrl_info);
6488 if (ctrl_info->queue_memory_base)
6489 dma_free_coherent(&ctrl_info->pci_dev->dev,
6490 ctrl_info->queue_memory_length,
6491 ctrl_info->queue_memory_base,
6492 ctrl_info->queue_memory_base_dma_handle);
6493 if (ctrl_info->admin_queue_memory_base)
6494 dma_free_coherent(&ctrl_info->pci_dev->dev,
6495 ctrl_info->admin_queue_memory_length,
6496 ctrl_info->admin_queue_memory_base,
6497 ctrl_info->admin_queue_memory_base_dma_handle);
6498 pqi_free_all_io_requests(ctrl_info);
6499 if (ctrl_info->error_buffer)
6500 dma_free_coherent(&ctrl_info->pci_dev->dev,
6501 ctrl_info->error_buffer_length,
6502 ctrl_info->error_buffer,
6503 ctrl_info->error_buffer_dma_handle);
6504 if (ctrl_info->iomem_base)
6505 pqi_cleanup_pci_init(ctrl_info);
6506 pqi_free_ctrl_info(ctrl_info);
6507}
6508
6509static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6510{
061ef06a
KB
6511 pqi_cancel_rescan_worker(ctrl_info);
6512 pqi_cancel_update_time_worker(ctrl_info);
e57a1f9b
KB
6513 pqi_remove_all_scsi_devices(ctrl_info);
6514 pqi_unregister_scsi(ctrl_info);
162d7753
KB
6515 if (ctrl_info->pqi_mode_enabled)
6516 pqi_revert_to_sis_mode(ctrl_info);
6c223761
KB
6517 pqi_free_ctrl_resources(ctrl_info);
6518}
6519
3c50976f
KB
6520static void pqi_perform_lockup_action(void)
6521{
6522 switch (pqi_lockup_action) {
6523 case PANIC:
6524 panic("FATAL: Smart Family Controller lockup detected");
6525 break;
6526 case REBOOT:
6527 emergency_restart();
6528 break;
6529 case NONE:
6530 default:
6531 break;
6532 }
6533}
6534
5f310425
KB
6535static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
6536 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
6537 .status = SAM_STAT_CHECK_CONDITION,
6538};
6539
6540static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
376fb880
KB
6541{
6542 unsigned int i;
376fb880 6543 struct pqi_io_request *io_request;
376fb880
KB
6544 struct scsi_cmnd *scmd;
6545
5f310425
KB
6546 for (i = 0; i < ctrl_info->max_io_slots; i++) {
6547 io_request = &ctrl_info->io_request_pool[i];
6548 if (atomic_read(&io_request->refcount) == 0)
6549 continue;
376fb880 6550
5f310425
KB
6551 scmd = io_request->scmd;
6552 if (scmd) {
6553 set_host_byte(scmd, DID_NO_CONNECT);
6554 } else {
6555 io_request->status = -ENXIO;
6556 io_request->error_info =
6557 &pqi_ctrl_offline_raid_error_info;
376fb880 6558 }
5f310425
KB
6559
6560 io_request->io_complete_callback(io_request,
6561 io_request->context);
376fb880
KB
6562 }
6563}
6564
5f310425 6565static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
376fb880 6566{
5f310425
KB
6567 pqi_perform_lockup_action();
6568 pqi_stop_heartbeat_timer(ctrl_info);
6569 pqi_free_interrupts(ctrl_info);
6570 pqi_cancel_rescan_worker(ctrl_info);
6571 pqi_cancel_update_time_worker(ctrl_info);
6572 pqi_ctrl_wait_until_quiesced(ctrl_info);
6573 pqi_fail_all_outstanding_requests(ctrl_info);
6574 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
6575 pqi_ctrl_unblock_requests(ctrl_info);
6576}
6577
6578static void pqi_ctrl_offline_worker(struct work_struct *work)
6579{
6580 struct pqi_ctrl_info *ctrl_info;
6581
6582 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
6583 pqi_take_ctrl_offline_deferred(ctrl_info);
376fb880
KB
6584}
6585
6586static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
6587{
5f310425
KB
6588 if (!ctrl_info->controller_online)
6589 return;
6590
376fb880 6591 ctrl_info->controller_online = false;
5f310425
KB
6592 ctrl_info->pqi_mode_enabled = false;
6593 pqi_ctrl_block_requests(ctrl_info);
5a259e32
KB
6594 if (!pqi_disable_ctrl_shutdown)
6595 sis_shutdown_ctrl(ctrl_info);
376fb880
KB
6596 pci_disable_device(ctrl_info->pci_dev);
6597 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
5f310425 6598 schedule_work(&ctrl_info->ctrl_offline_work);
376fb880
KB
6599}
6600
d91d7820 6601static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6c223761
KB
6602 const struct pci_device_id *id)
6603{
6604 char *ctrl_description;
6605
37b36847 6606 if (id->driver_data)
6c223761 6607 ctrl_description = (char *)id->driver_data;
37b36847
KB
6608 else
6609 ctrl_description = "Microsemi Smart Family Controller";
6c223761 6610
d91d7820 6611 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6c223761
KB
6612}
6613
d91d7820
KB
6614static int pqi_pci_probe(struct pci_dev *pci_dev,
6615 const struct pci_device_id *id)
6c223761
KB
6616{
6617 int rc;
6618 int node;
6619 struct pqi_ctrl_info *ctrl_info;
6620
d91d7820 6621 pqi_print_ctrl_info(pci_dev, id);
6c223761
KB
6622
6623 if (pqi_disable_device_id_wildcards &&
6624 id->subvendor == PCI_ANY_ID &&
6625 id->subdevice == PCI_ANY_ID) {
d91d7820 6626 dev_warn(&pci_dev->dev,
6c223761
KB
6627 "controller not probed because device ID wildcards are disabled\n");
6628 return -ENODEV;
6629 }
6630
6631 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
d91d7820 6632 dev_warn(&pci_dev->dev,
6c223761
KB
6633 "controller device ID matched using wildcards\n");
6634
d91d7820 6635 node = dev_to_node(&pci_dev->dev);
6c223761 6636 if (node == NUMA_NO_NODE)
d91d7820 6637 set_dev_node(&pci_dev->dev, 0);
6c223761
KB
6638
6639 ctrl_info = pqi_alloc_ctrl_info(node);
6640 if (!ctrl_info) {
d91d7820 6641 dev_err(&pci_dev->dev,
6c223761
KB
6642 "failed to allocate controller info block\n");
6643 return -ENOMEM;
6644 }
6645
d91d7820 6646 ctrl_info->pci_dev = pci_dev;
6c223761
KB
6647
6648 rc = pqi_pci_init(ctrl_info);
6649 if (rc)
6650 goto error;
6651
6652 rc = pqi_ctrl_init(ctrl_info);
6653 if (rc)
6654 goto error;
6655
6656 return 0;
6657
6658error:
6659 pqi_remove_ctrl(ctrl_info);
6660
6661 return rc;
6662}
6663
d91d7820 6664static void pqi_pci_remove(struct pci_dev *pci_dev)
6c223761
KB
6665{
6666 struct pqi_ctrl_info *ctrl_info;
6667
d91d7820 6668 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
6669 if (!ctrl_info)
6670 return;
6671
6672 pqi_remove_ctrl(ctrl_info);
6673}
6674
d91d7820 6675static void pqi_shutdown(struct pci_dev *pci_dev)
6c223761
KB
6676{
6677 int rc;
6678 struct pqi_ctrl_info *ctrl_info;
6679
d91d7820 6680 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
6681 if (!ctrl_info)
6682 goto error;
6683
6684 /*
6685 * Write all data in the controller's battery-backed cache to
6686 * storage.
6687 */
58322fe0 6688 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
b6d47811 6689 pqi_reset(ctrl_info);
6c223761
KB
6690 if (rc == 0)
6691 return;
6692
6693error:
d91d7820 6694 dev_warn(&pci_dev->dev,
6c223761
KB
6695 "unable to flush controller cache\n");
6696}
6697
3c50976f
KB
6698static void pqi_process_lockup_action_param(void)
6699{
6700 unsigned int i;
6701
6702 if (!pqi_lockup_action_param)
6703 return;
6704
6705 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6706 if (strcmp(pqi_lockup_action_param,
6707 pqi_lockup_actions[i].name) == 0) {
6708 pqi_lockup_action = pqi_lockup_actions[i].action;
6709 return;
6710 }
6711 }
6712
6713 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
6714 DRIVER_NAME_SHORT, pqi_lockup_action_param);
6715}
6716
6717static void pqi_process_module_params(void)
6718{
6719 pqi_process_lockup_action_param();
6720}
6721
5c146686 6722static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
061ef06a
KB
6723{
6724 struct pqi_ctrl_info *ctrl_info;
6725
6726 ctrl_info = pci_get_drvdata(pci_dev);
6727
6728 pqi_disable_events(ctrl_info);
6729 pqi_cancel_update_time_worker(ctrl_info);
6730 pqi_cancel_rescan_worker(ctrl_info);
6731 pqi_wait_until_scan_finished(ctrl_info);
6732 pqi_wait_until_lun_reset_finished(ctrl_info);
58322fe0 6733 pqi_flush_cache(ctrl_info, SUSPEND);
061ef06a
KB
6734 pqi_ctrl_block_requests(ctrl_info);
6735 pqi_ctrl_wait_until_quiesced(ctrl_info);
6736 pqi_wait_until_inbound_queues_empty(ctrl_info);
6737 pqi_ctrl_wait_for_pending_io(ctrl_info);
6738 pqi_stop_heartbeat_timer(ctrl_info);
6739
6740 if (state.event == PM_EVENT_FREEZE)
6741 return 0;
6742
6743 pci_save_state(pci_dev);
6744 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6745
6746 ctrl_info->controller_online = false;
6747 ctrl_info->pqi_mode_enabled = false;
6748
6749 return 0;
6750}
6751
5c146686 6752static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
061ef06a
KB
6753{
6754 int rc;
6755 struct pqi_ctrl_info *ctrl_info;
6756
6757 ctrl_info = pci_get_drvdata(pci_dev);
6758
6759 if (pci_dev->current_state != PCI_D0) {
6760 ctrl_info->max_hw_queue_index = 0;
6761 pqi_free_interrupts(ctrl_info);
6762 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6763 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6764 IRQF_SHARED, DRIVER_NAME_SHORT,
6765 &ctrl_info->queue_groups[0]);
6766 if (rc) {
6767 dev_err(&ctrl_info->pci_dev->dev,
6768 "irq %u init failed with error %d\n",
6769 pci_dev->irq, rc);
6770 return rc;
6771 }
6772 pqi_start_heartbeat_timer(ctrl_info);
6773 pqi_ctrl_unblock_requests(ctrl_info);
6774 return 0;
6775 }
6776
6777 pci_set_power_state(pci_dev, PCI_D0);
6778 pci_restore_state(pci_dev);
6779
6780 return pqi_ctrl_init_resume(ctrl_info);
6781}
6782
6c223761
KB
6783/* Define the PCI IDs for the controllers that we support. */
6784static const struct pci_device_id pqi_pci_id_table[] = {
b0f9408b
KB
6785 {
6786 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6787 0x105b, 0x1211)
6788 },
6789 {
6790 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6791 0x105b, 0x1321)
6792 },
7eddabff
KB
6793 {
6794 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6795 0x152d, 0x8a22)
6796 },
6797 {
6798 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6799 0x152d, 0x8a23)
6800 },
6801 {
6802 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6803 0x152d, 0x8a24)
6804 },
6805 {
6806 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6807 0x152d, 0x8a36)
6808 },
6809 {
6810 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6811 0x152d, 0x8a37)
6812 },
b0f9408b
KB
6813 {
6814 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6815 0x193d, 0x8460)
6816 },
6817 {
6818 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6819 0x193d, 0x8461)
6820 },
6821 {
6822 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6823 0x193d, 0xf460)
6824 },
6825 {
6826 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6827 0x193d, 0xf461)
6828 },
6829 {
6830 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6831 0x1bd4, 0x0045)
6832 },
6833 {
6834 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6835 0x1bd4, 0x0046)
6836 },
6837 {
6838 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6839 0x1bd4, 0x0047)
6840 },
6841 {
6842 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6843 0x1bd4, 0x0048)
6844 },
9f8d05fa
KB
6845 {
6846 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6847 0x1bd4, 0x004a)
6848 },
6849 {
6850 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6851 0x1bd4, 0x004b)
6852 },
6853 {
6854 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6855 0x1bd4, 0x004c)
6856 },
6c223761
KB
6857 {
6858 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6859 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6860 },
6861 {
6862 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
55790064 6863 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
6c223761
KB
6864 },
6865 {
6866 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6867 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6c223761
KB
6868 },
6869 {
6870 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6871 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6c223761
KB
6872 },
6873 {
6874 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6875 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6c223761
KB
6876 },
6877 {
6878 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6879 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6c223761
KB
6880 },
6881 {
6882 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6883 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6c223761
KB
6884 },
6885 {
6886 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6887 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6c223761
KB
6888 },
6889 {
6890 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6891 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6c223761 6892 },
55790064
KB
6893 {
6894 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6895 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
6896 },
6c223761
KB
6897 {
6898 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6899 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6c223761
KB
6900 },
6901 {
6902 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6903 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6c223761
KB
6904 },
6905 {
6906 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6907 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6c223761
KB
6908 },
6909 {
6910 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6911 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6c223761
KB
6912 },
6913 {
6914 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6915 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6c223761
KB
6916 },
6917 {
6918 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6919 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6c223761
KB
6920 },
6921 {
6922 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6923 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6c223761
KB
6924 },
6925 {
6926 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6927 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6c223761
KB
6928 },
6929 {
6930 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6931 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6c223761 6932 },
55790064
KB
6933 {
6934 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6935 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
6936 },
6c223761
KB
6937 {
6938 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6939 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6c223761
KB
6940 },
6941 {
6942 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6943 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6c223761
KB
6944 },
6945 {
6946 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6947 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6c223761
KB
6948 },
6949 {
6950 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6951 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6c223761
KB
6952 },
6953 {
6954 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6955 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6c223761 6956 },
b0f9408b
KB
6957 {
6958 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6959 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
6960 },
6c223761
KB
6961 {
6962 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6963 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6c223761
KB
6964 },
6965 {
6966 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6967 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6c223761 6968 },
bd809e8d
KB
6969 {
6970 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6971 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
6972 },
6973 {
6974 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6975 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
6976 },
6c223761
KB
6977 {
6978 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff
KB
6979 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
6980 },
9f8d05fa
KB
6981 {
6982 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6983 PCI_VENDOR_ID_ADVANTECH, 0x8312)
6984 },
55790064
KB
6985 {
6986 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6987 PCI_VENDOR_ID_DELL, 0x1fe0)
6988 },
7eddabff
KB
6989 {
6990 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6991 PCI_VENDOR_ID_HP, 0x0600)
6992 },
6993 {
6994 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6995 PCI_VENDOR_ID_HP, 0x0601)
6996 },
6997 {
6998 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6999 PCI_VENDOR_ID_HP, 0x0602)
7000 },
7001 {
7002 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7003 PCI_VENDOR_ID_HP, 0x0603)
7004 },
7005 {
7006 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
55790064 7007 PCI_VENDOR_ID_HP, 0x0609)
7eddabff
KB
7008 },
7009 {
7010 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7011 PCI_VENDOR_ID_HP, 0x0650)
7012 },
7013 {
7014 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7015 PCI_VENDOR_ID_HP, 0x0651)
7016 },
7017 {
7018 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7019 PCI_VENDOR_ID_HP, 0x0652)
7020 },
7021 {
7022 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7023 PCI_VENDOR_ID_HP, 0x0653)
7024 },
7025 {
7026 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7027 PCI_VENDOR_ID_HP, 0x0654)
7028 },
7029 {
7030 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7031 PCI_VENDOR_ID_HP, 0x0655)
7032 },
7eddabff
KB
7033 {
7034 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7035 PCI_VENDOR_ID_HP, 0x0700)
7036 },
7037 {
7038 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7039 PCI_VENDOR_ID_HP, 0x0701)
6c223761
KB
7040 },
7041 {
7042 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7043 PCI_VENDOR_ID_HP, 0x1001)
7044 },
7045 {
7046 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7047 PCI_VENDOR_ID_HP, 0x1100)
7048 },
7049 {
7050 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7051 PCI_VENDOR_ID_HP, 0x1101)
7052 },
6c223761
KB
7053 {
7054 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7055 PCI_ANY_ID, PCI_ANY_ID)
7056 },
7057 { 0 }
7058};
7059
7060MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
7061
7062static struct pci_driver pqi_pci_driver = {
7063 .name = DRIVER_NAME_SHORT,
7064 .id_table = pqi_pci_id_table,
7065 .probe = pqi_pci_probe,
7066 .remove = pqi_pci_remove,
7067 .shutdown = pqi_shutdown,
061ef06a
KB
7068#if defined(CONFIG_PM)
7069 .suspend = pqi_suspend,
7070 .resume = pqi_resume,
7071#endif
6c223761
KB
7072};
7073
7074static int __init pqi_init(void)
7075{
7076 int rc;
7077
7078 pr_info(DRIVER_NAME "\n");
7079
7080 pqi_sas_transport_template =
7081 sas_attach_transport(&pqi_sas_transport_functions);
7082 if (!pqi_sas_transport_template)
7083 return -ENODEV;
7084
3c50976f
KB
7085 pqi_process_module_params();
7086
6c223761
KB
7087 rc = pci_register_driver(&pqi_pci_driver);
7088 if (rc)
7089 sas_release_transport(pqi_sas_transport_template);
7090
7091 return rc;
7092}
7093
7094static void __exit pqi_cleanup(void)
7095{
7096 pci_unregister_driver(&pqi_pci_driver);
7097 sas_release_transport(pqi_sas_transport_template);
7098}
7099
7100module_init(pqi_init);
7101module_exit(pqi_cleanup);
7102
7103static void __attribute__((unused)) verify_structures(void)
7104{
7105 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7106 sis_host_to_ctrl_doorbell) != 0x20);
7107 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7108 sis_interrupt_mask) != 0x34);
7109 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7110 sis_ctrl_to_host_doorbell) != 0x9c);
7111 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7112 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
7113 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7114 sis_driver_scratch) != 0xb0);
6c223761
KB
7115 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7116 sis_firmware_status) != 0xbc);
7117 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7118 sis_mailbox) != 0x1000);
7119 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7120 pqi_registers) != 0x4000);
7121
7122 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7123 iu_type) != 0x0);
7124 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7125 iu_length) != 0x2);
7126 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7127 response_queue_id) != 0x4);
7128 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7129 work_area) != 0x6);
7130 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
7131
7132 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7133 status) != 0x0);
7134 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7135 service_response) != 0x1);
7136 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7137 data_present) != 0x2);
7138 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7139 reserved) != 0x3);
7140 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7141 residual_count) != 0x4);
7142 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7143 data_length) != 0x8);
7144 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7145 reserved1) != 0xa);
7146 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7147 data) != 0xc);
7148 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
7149
7150 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7151 data_in_result) != 0x0);
7152 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7153 data_out_result) != 0x1);
7154 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7155 reserved) != 0x2);
7156 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7157 status) != 0x5);
7158 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7159 status_qualifier) != 0x6);
7160 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7161 sense_data_length) != 0x8);
7162 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7163 response_data_length) != 0xa);
7164 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7165 data_in_transferred) != 0xc);
7166 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7167 data_out_transferred) != 0x10);
7168 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7169 data) != 0x14);
7170 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
7171
7172 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7173 signature) != 0x0);
7174 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7175 function_and_status_code) != 0x8);
7176 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7177 max_admin_iq_elements) != 0x10);
7178 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7179 max_admin_oq_elements) != 0x11);
7180 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7181 admin_iq_element_length) != 0x12);
7182 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7183 admin_oq_element_length) != 0x13);
7184 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7185 max_reset_timeout) != 0x14);
7186 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7187 legacy_intx_status) != 0x18);
7188 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7189 legacy_intx_mask_set) != 0x1c);
7190 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7191 legacy_intx_mask_clear) != 0x20);
7192 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7193 device_status) != 0x40);
7194 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7195 admin_iq_pi_offset) != 0x48);
7196 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7197 admin_oq_ci_offset) != 0x50);
7198 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7199 admin_iq_element_array_addr) != 0x58);
7200 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7201 admin_oq_element_array_addr) != 0x60);
7202 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7203 admin_iq_ci_addr) != 0x68);
7204 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7205 admin_oq_pi_addr) != 0x70);
7206 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7207 admin_iq_num_elements) != 0x78);
7208 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7209 admin_oq_num_elements) != 0x79);
7210 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7211 admin_queue_int_msg_num) != 0x7a);
7212 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7213 device_error) != 0x80);
7214 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7215 error_details) != 0x88);
7216 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7217 device_reset) != 0x90);
7218 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7219 power_action) != 0x94);
7220 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
7221
7222 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7223 header.iu_type) != 0);
7224 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7225 header.iu_length) != 2);
7226 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7227 header.work_area) != 6);
7228 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7229 request_id) != 8);
7230 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7231 function_code) != 10);
7232 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7233 data.report_device_capability.buffer_length) != 44);
7234 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7235 data.report_device_capability.sg_descriptor) != 48);
7236 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7237 data.create_operational_iq.queue_id) != 12);
7238 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7239 data.create_operational_iq.element_array_addr) != 16);
7240 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7241 data.create_operational_iq.ci_addr) != 24);
7242 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7243 data.create_operational_iq.num_elements) != 32);
7244 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7245 data.create_operational_iq.element_length) != 34);
7246 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7247 data.create_operational_iq.queue_protocol) != 36);
7248 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7249 data.create_operational_oq.queue_id) != 12);
7250 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7251 data.create_operational_oq.element_array_addr) != 16);
7252 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7253 data.create_operational_oq.pi_addr) != 24);
7254 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7255 data.create_operational_oq.num_elements) != 32);
7256 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7257 data.create_operational_oq.element_length) != 34);
7258 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7259 data.create_operational_oq.queue_protocol) != 36);
7260 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7261 data.create_operational_oq.int_msg_num) != 40);
7262 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7263 data.create_operational_oq.coalescing_count) != 42);
7264 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7265 data.create_operational_oq.min_coalescing_time) != 44);
7266 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7267 data.create_operational_oq.max_coalescing_time) != 48);
7268 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7269 data.delete_operational_queue.queue_id) != 12);
7270 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
7271 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7272 data.create_operational_iq) != 64 - 11);
7273 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7274 data.create_operational_oq) != 64 - 11);
7275 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7276 data.delete_operational_queue) != 64 - 11);
7277
7278 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7279 header.iu_type) != 0);
7280 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7281 header.iu_length) != 2);
7282 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7283 header.work_area) != 6);
7284 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7285 request_id) != 8);
7286 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7287 function_code) != 10);
7288 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7289 status) != 11);
7290 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7291 data.create_operational_iq.status_descriptor) != 12);
7292 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7293 data.create_operational_iq.iq_pi_offset) != 16);
7294 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7295 data.create_operational_oq.status_descriptor) != 12);
7296 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7297 data.create_operational_oq.oq_ci_offset) != 16);
7298 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
7299
7300 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7301 header.iu_type) != 0);
7302 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7303 header.iu_length) != 2);
7304 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7305 header.response_queue_id) != 4);
7306 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7307 header.work_area) != 6);
7308 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7309 request_id) != 8);
7310 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7311 nexus_id) != 10);
7312 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7313 buffer_length) != 12);
7314 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7315 lun_number) != 16);
7316 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7317 protocol_specific) != 24);
7318 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7319 error_index) != 27);
7320 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7321 cdb) != 32);
7322 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7323 sg_descriptors) != 64);
7324 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
7325 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7326
7327 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7328 header.iu_type) != 0);
7329 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7330 header.iu_length) != 2);
7331 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7332 header.response_queue_id) != 4);
7333 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7334 header.work_area) != 6);
7335 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7336 request_id) != 8);
7337 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7338 nexus_id) != 12);
7339 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7340 buffer_length) != 16);
7341 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7342 data_encryption_key_index) != 22);
7343 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7344 encrypt_tweak_lower) != 24);
7345 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7346 encrypt_tweak_upper) != 28);
7347 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7348 cdb) != 32);
7349 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7350 error_index) != 48);
7351 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7352 num_sg_descriptors) != 50);
7353 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7354 cdb_length) != 51);
7355 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7356 lun_number) != 52);
7357 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7358 sg_descriptors) != 64);
7359 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
7360 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7361
7362 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7363 header.iu_type) != 0);
7364 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7365 header.iu_length) != 2);
7366 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7367 request_id) != 8);
7368 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7369 error_index) != 10);
7370
7371 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7372 header.iu_type) != 0);
7373 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7374 header.iu_length) != 2);
7375 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7376 header.response_queue_id) != 4);
7377 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7378 request_id) != 8);
7379 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7380 data.report_event_configuration.buffer_length) != 12);
7381 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7382 data.report_event_configuration.sg_descriptors) != 16);
7383 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7384 data.set_event_configuration.global_event_oq_id) != 10);
7385 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7386 data.set_event_configuration.buffer_length) != 12);
7387 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7388 data.set_event_configuration.sg_descriptors) != 16);
7389
7390 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7391 max_inbound_iu_length) != 6);
7392 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7393 max_outbound_iu_length) != 14);
7394 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
7395
7396 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7397 data_length) != 0);
7398 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7399 iq_arbitration_priority_support_bitmask) != 8);
7400 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7401 maximum_aw_a) != 9);
7402 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7403 maximum_aw_b) != 10);
7404 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7405 maximum_aw_c) != 11);
7406 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7407 max_inbound_queues) != 16);
7408 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7409 max_elements_per_iq) != 18);
7410 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7411 max_iq_element_length) != 24);
7412 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7413 min_iq_element_length) != 26);
7414 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7415 max_outbound_queues) != 30);
7416 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7417 max_elements_per_oq) != 32);
7418 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7419 intr_coalescing_time_granularity) != 34);
7420 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7421 max_oq_element_length) != 36);
7422 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7423 min_oq_element_length) != 38);
7424 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7425 iu_layer_descriptors) != 64);
7426 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
7427
7428 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7429 event_type) != 0);
7430 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7431 oq_id) != 2);
7432 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
7433
7434 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7435 num_event_descriptors) != 2);
7436 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7437 descriptors) != 4);
7438
061ef06a
KB
7439 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
7440 ARRAY_SIZE(pqi_supported_event_types));
7441
6c223761
KB
7442 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7443 header.iu_type) != 0);
7444 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7445 header.iu_length) != 2);
7446 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7447 event_type) != 8);
7448 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7449 event_id) != 10);
7450 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7451 additional_event_id) != 12);
7452 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7453 data) != 16);
7454 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
7455
7456 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7457 header.iu_type) != 0);
7458 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7459 header.iu_length) != 2);
7460 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7461 event_type) != 8);
7462 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7463 event_id) != 10);
7464 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7465 additional_event_id) != 12);
7466 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
7467
7468 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7469 header.iu_type) != 0);
7470 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7471 header.iu_length) != 2);
7472 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7473 request_id) != 8);
7474 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7475 nexus_id) != 10);
7476 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7477 lun_number) != 16);
7478 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7479 protocol_specific) != 24);
7480 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7481 outbound_queue_id_to_manage) != 26);
7482 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7483 request_id_to_manage) != 28);
7484 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7485 task_management_function) != 30);
7486 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
7487
7488 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7489 header.iu_type) != 0);
7490 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7491 header.iu_length) != 2);
7492 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7493 request_id) != 8);
7494 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7495 nexus_id) != 10);
7496 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7497 additional_response_info) != 12);
7498 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7499 response_code) != 15);
7500 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
7501
7502 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7503 configured_logical_drive_count) != 0);
7504 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7505 configuration_signature) != 1);
7506 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7507 firmware_version) != 5);
7508 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7509 extended_logical_unit_count) != 154);
7510 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7511 firmware_build_number) != 190);
7512 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7513 controller_mode) != 292);
7514
1be42f46
KB
7515 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7516 phys_bay_in_box) != 115);
7517 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7518 device_type) != 120);
7519 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7520 redundant_path_present_map) != 1736);
7521 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7522 active_path_number) != 1738);
7523 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7524 alternate_paths_phys_connector) != 1739);
7525 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7526 alternate_paths_phys_box_on_port) != 1755);
7527 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7528 current_queue_depth_limit) != 1796);
7529 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
7530
6c223761
KB
7531 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
7532 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
7533 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
7534 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7535 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
7536 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7537 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
7538 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
7539 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7540 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
7541 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
7542 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7543
7544 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
d727a776
KB
7545 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
7546 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
6c223761 7547}