scsi: smartpqi: update pqi passthru ioctl
[linux-2.6-block.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
6c223761
KB
1/*
2 * driver for Microsemi PQI-based storage controllers
b805dbfe 3 * Copyright (c) 2016-2017 Microsemi Corporation
6c223761
KB
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
3c50976f 27#include <linux/reboot.h>
6c223761 28#include <linux/cciss_ioctl.h>
52198226 29#include <linux/blk-mq-pci.h>
6c223761
KB
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_transport_sas.h>
35#include <asm/unaligned.h>
36#include "smartpqi.h"
37#include "smartpqi_sis.h"
38
39#if !defined(BUILD_TIMESTAMP)
40#define BUILD_TIMESTAMP
41#endif
42
2d154f5f
KB
43#define DRIVER_VERSION "1.0.4-100"
44#define DRIVER_MAJOR 1
45#define DRIVER_MINOR 0
46#define DRIVER_RELEASE 4
47#define DRIVER_REVISION 100
6c223761 48
2d154f5f
KB
49#define DRIVER_NAME "Microsemi PQI Driver (v" \
50 DRIVER_VERSION BUILD_TIMESTAMP ")"
6c223761
KB
51#define DRIVER_NAME_SHORT "smartpqi"
52
e1d213bd
KB
53#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
54
6c223761
KB
55MODULE_AUTHOR("Microsemi");
56MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
57 DRIVER_VERSION);
58MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
59MODULE_VERSION(DRIVER_VERSION);
60MODULE_LICENSE("GPL");
61
6c223761 62static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
5f310425 63static void pqi_ctrl_offline_worker(struct work_struct *work);
376fb880 64static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
6c223761
KB
65static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
66static void pqi_scan_start(struct Scsi_Host *shost);
67static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
68 struct pqi_queue_group *queue_group, enum pqi_io_path path,
69 struct pqi_io_request *io_request);
70static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
71 struct pqi_iu_header *request, unsigned int flags,
72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
73static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
75 unsigned int cdb_length, struct pqi_queue_group *queue_group,
376fb880 76 struct pqi_encryption_info *encryption_info, bool raid_bypass);
6c223761
KB
77
78/* for flags argument to pqi_submit_raid_request_synchronous() */
79#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
80
81static struct scsi_transport_template *pqi_sas_transport_template;
82
83static atomic_t pqi_controller_count = ATOMIC_INIT(0);
84
3c50976f
KB
85enum pqi_lockup_action {
86 NONE,
87 REBOOT,
88 PANIC
89};
90
91static enum pqi_lockup_action pqi_lockup_action = NONE;
92
93static struct {
94 enum pqi_lockup_action action;
95 char *name;
96} pqi_lockup_actions[] = {
97 {
98 .action = NONE,
99 .name = "none",
100 },
101 {
102 .action = REBOOT,
103 .name = "reboot",
104 },
105 {
106 .action = PANIC,
107 .name = "panic",
108 },
109};
110
6a50d6ad
KB
111static unsigned int pqi_supported_event_types[] = {
112 PQI_EVENT_TYPE_HOTPLUG,
113 PQI_EVENT_TYPE_HARDWARE,
114 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
115 PQI_EVENT_TYPE_LOGICAL_DEVICE,
116 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
118};
119
6c223761
KB
120static int pqi_disable_device_id_wildcards;
121module_param_named(disable_device_id_wildcards,
cbe0c7b1 122 pqi_disable_device_id_wildcards, int, 0644);
6c223761
KB
123MODULE_PARM_DESC(disable_device_id_wildcards,
124 "Disable device ID wildcards.");
125
5a259e32
KB
126static int pqi_disable_heartbeat;
127module_param_named(disable_heartbeat,
128 pqi_disable_heartbeat, int, 0644);
129MODULE_PARM_DESC(disable_heartbeat,
130 "Disable heartbeat.");
131
132static int pqi_disable_ctrl_shutdown;
133module_param_named(disable_ctrl_shutdown,
134 pqi_disable_ctrl_shutdown, int, 0644);
135MODULE_PARM_DESC(disable_ctrl_shutdown,
136 "Disable controller shutdown when controller locked up.");
137
3c50976f
KB
138static char *pqi_lockup_action_param;
139module_param_named(lockup_action,
140 pqi_lockup_action_param, charp, 0644);
141MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
142 "\t\tSupported: none, reboot, panic\n"
143 "\t\tDefault: none");
144
6c223761
KB
145static char *raid_levels[] = {
146 "RAID-0",
147 "RAID-4",
148 "RAID-1(1+0)",
149 "RAID-5",
150 "RAID-5+1",
151 "RAID-ADG",
152 "RAID-1(ADM)",
153};
154
155static char *pqi_raid_level_to_string(u8 raid_level)
156{
157 if (raid_level < ARRAY_SIZE(raid_levels))
158 return raid_levels[raid_level];
159
a9f93392 160 return "RAID UNKNOWN";
6c223761
KB
161}
162
163#define SA_RAID_0 0
164#define SA_RAID_4 1
165#define SA_RAID_1 2 /* also used for RAID 10 */
166#define SA_RAID_5 3 /* also used for RAID 50 */
167#define SA_RAID_51 4
168#define SA_RAID_6 5 /* also used for RAID 60 */
169#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
170#define SA_RAID_MAX SA_RAID_ADM
171#define SA_RAID_UNKNOWN 0xff
172
173static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
174{
7561a7e4 175 pqi_prep_for_scsi_done(scmd);
6c223761
KB
176 scmd->scsi_done(scmd);
177}
178
179static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
180{
181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
182}
183
184static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
185{
186 void *hostdata = shost_priv(shost);
187
188 return *((struct pqi_ctrl_info **)hostdata);
189}
190
191static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
192{
193 return !device->is_physical_device;
194}
195
bd10cf0b
KB
196static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
197{
198 return scsi3addr[2] != 0;
199}
200
6c223761
KB
201static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
202{
203 return !ctrl_info->controller_online;
204}
205
206static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
207{
208 if (ctrl_info->controller_online)
209 if (!sis_is_firmware_running(ctrl_info))
210 pqi_take_ctrl_offline(ctrl_info);
211}
212
213static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
214{
215 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
216}
217
ff6abb73
KB
218static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
219 struct pqi_ctrl_info *ctrl_info)
220{
221 return sis_read_driver_scratch(ctrl_info);
222}
223
224static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
225 enum pqi_ctrl_mode mode)
226{
227 sis_write_driver_scratch(ctrl_info, mode);
228}
229
7561a7e4
KB
230static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
231{
232 ctrl_info->block_requests = true;
233 scsi_block_requests(ctrl_info->scsi_host);
234}
235
236static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
237{
238 ctrl_info->block_requests = false;
239 wake_up_all(&ctrl_info->block_requests_wait);
376fb880 240 pqi_retry_raid_bypass_requests(ctrl_info);
7561a7e4
KB
241 scsi_unblock_requests(ctrl_info->scsi_host);
242}
243
244static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
245{
246 return ctrl_info->block_requests;
247}
248
249static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
250 unsigned long timeout_msecs)
251{
252 unsigned long remaining_msecs;
253
254 if (!pqi_ctrl_blocked(ctrl_info))
255 return timeout_msecs;
256
257 atomic_inc(&ctrl_info->num_blocked_threads);
258
259 if (timeout_msecs == NO_TIMEOUT) {
260 wait_event(ctrl_info->block_requests_wait,
261 !pqi_ctrl_blocked(ctrl_info));
262 remaining_msecs = timeout_msecs;
263 } else {
264 unsigned long remaining_jiffies;
265
266 remaining_jiffies =
267 wait_event_timeout(ctrl_info->block_requests_wait,
268 !pqi_ctrl_blocked(ctrl_info),
269 msecs_to_jiffies(timeout_msecs));
270 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
271 }
272
273 atomic_dec(&ctrl_info->num_blocked_threads);
274
275 return remaining_msecs;
276}
277
278static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
279{
280 atomic_inc(&ctrl_info->num_busy_threads);
281}
282
283static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
284{
285 atomic_dec(&ctrl_info->num_busy_threads);
286}
287
288static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
289{
290 while (atomic_read(&ctrl_info->num_busy_threads) >
291 atomic_read(&ctrl_info->num_blocked_threads))
292 usleep_range(1000, 2000);
293}
294
03b288cf
KB
295static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
296{
297 return device->device_offline;
298}
299
7561a7e4
KB
300static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
301{
302 device->in_reset = true;
303}
304
305static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
306{
307 device->in_reset = false;
308}
309
310static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
311{
312 return device->in_reset;
313}
6c223761 314
5f310425
KB
315static inline void pqi_schedule_rescan_worker_with_delay(
316 struct pqi_ctrl_info *ctrl_info, unsigned long delay)
317{
318 if (pqi_ctrl_offline(ctrl_info))
319 return;
320
321 schedule_delayed_work(&ctrl_info->rescan_work, delay);
322}
323
6c223761
KB
324static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
325{
5f310425
KB
326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
327}
328
329#define PQI_RESCAN_WORK_DELAY (10 * HZ)
330
331static inline void pqi_schedule_rescan_worker_delayed(
332 struct pqi_ctrl_info *ctrl_info)
333{
334 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
6c223761
KB
335}
336
061ef06a
KB
337static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
338{
339 cancel_delayed_work_sync(&ctrl_info->rescan_work);
340}
341
98f87667
KB
342static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
343{
344 if (!ctrl_info->heartbeat_counter)
345 return 0;
346
347 return readl(ctrl_info->heartbeat_counter);
348}
349
6c223761
KB
350static int pqi_map_single(struct pci_dev *pci_dev,
351 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
352 size_t buffer_length, int data_direction)
353{
354 dma_addr_t bus_address;
355
356 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
357 return 0;
358
359 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
360 data_direction);
361 if (pci_dma_mapping_error(pci_dev, bus_address))
362 return -ENOMEM;
363
364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
365 put_unaligned_le32(buffer_length, &sg_descriptor->length);
366 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
367
368 return 0;
369}
370
371static void pqi_pci_unmap(struct pci_dev *pci_dev,
372 struct pqi_sg_descriptor *descriptors, int num_descriptors,
373 int data_direction)
374{
375 int i;
376
377 if (data_direction == PCI_DMA_NONE)
378 return;
379
380 for (i = 0; i < num_descriptors; i++)
381 pci_unmap_single(pci_dev,
382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
383 get_unaligned_le32(&descriptors[i].length),
384 data_direction);
385}
386
387static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
388 struct pqi_raid_path_request *request, u8 cmd,
389 u8 *scsi3addr, void *buffer, size_t buffer_length,
390 u16 vpd_page, int *pci_direction)
391{
392 u8 *cdb;
393 int pci_dir;
394
395 memset(request, 0, sizeof(*request));
396
397 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
398 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
399 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
400 &request->header.iu_length);
401 put_unaligned_le32(buffer_length, &request->buffer_length);
402 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
403 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
404 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
405
406 cdb = request->cdb;
407
408 switch (cmd) {
409 case INQUIRY:
410 request->data_direction = SOP_READ_FLAG;
411 cdb[0] = INQUIRY;
412 if (vpd_page & VPD_PAGE) {
413 cdb[1] = 0x1;
414 cdb[2] = (u8)vpd_page;
415 }
416 cdb[4] = (u8)buffer_length;
417 break;
418 case CISS_REPORT_LOG:
419 case CISS_REPORT_PHYS:
420 request->data_direction = SOP_READ_FLAG;
421 cdb[0] = cmd;
422 if (cmd == CISS_REPORT_PHYS)
423 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
424 else
425 cdb[1] = CISS_REPORT_LOG_EXTENDED;
426 put_unaligned_be32(buffer_length, &cdb[6]);
427 break;
428 case CISS_GET_RAID_MAP:
429 request->data_direction = SOP_READ_FLAG;
430 cdb[0] = CISS_READ;
431 cdb[1] = CISS_GET_RAID_MAP;
432 put_unaligned_be32(buffer_length, &cdb[6]);
433 break;
58322fe0 434 case SA_FLUSH_CACHE:
6c223761
KB
435 request->data_direction = SOP_WRITE_FLAG;
436 cdb[0] = BMIC_WRITE;
58322fe0 437 cdb[6] = BMIC_FLUSH_CACHE;
6c223761
KB
438 put_unaligned_be16(buffer_length, &cdb[7]);
439 break;
440 case BMIC_IDENTIFY_CONTROLLER:
441 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
442 request->data_direction = SOP_READ_FLAG;
443 cdb[0] = BMIC_READ;
444 cdb[6] = cmd;
445 put_unaligned_be16(buffer_length, &cdb[7]);
446 break;
447 case BMIC_WRITE_HOST_WELLNESS:
448 request->data_direction = SOP_WRITE_FLAG;
449 cdb[0] = BMIC_WRITE;
450 cdb[6] = cmd;
451 put_unaligned_be16(buffer_length, &cdb[7]);
452 break;
453 default:
454 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
455 cmd);
6c223761
KB
456 break;
457 }
458
459 switch (request->data_direction) {
460 case SOP_READ_FLAG:
461 pci_dir = PCI_DMA_FROMDEVICE;
462 break;
463 case SOP_WRITE_FLAG:
464 pci_dir = PCI_DMA_TODEVICE;
465 break;
466 case SOP_NO_DIRECTION_FLAG:
467 pci_dir = PCI_DMA_NONE;
468 break;
469 default:
470 pci_dir = PCI_DMA_BIDIRECTIONAL;
471 break;
472 }
473
474 *pci_direction = pci_dir;
475
476 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
477 buffer, buffer_length, pci_dir);
478}
479
376fb880
KB
480static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
481{
482 io_request->scmd = NULL;
483 io_request->status = 0;
484 io_request->error_info = NULL;
485 io_request->raid_bypass = false;
486}
487
6c223761
KB
488static struct pqi_io_request *pqi_alloc_io_request(
489 struct pqi_ctrl_info *ctrl_info)
490{
491 struct pqi_io_request *io_request;
492 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
493
494 while (1) {
495 io_request = &ctrl_info->io_request_pool[i];
496 if (atomic_inc_return(&io_request->refcount) == 1)
497 break;
498 atomic_dec(&io_request->refcount);
499 i = (i + 1) % ctrl_info->max_io_slots;
500 }
501
502 /* benignly racy */
503 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
504
376fb880 505 pqi_reinit_io_request(io_request);
6c223761
KB
506
507 return io_request;
508}
509
510static void pqi_free_io_request(struct pqi_io_request *io_request)
511{
512 atomic_dec(&io_request->refcount);
513}
514
515static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
516 struct bmic_identify_controller *buffer)
517{
518 int rc;
519 int pci_direction;
520 struct pqi_raid_path_request request;
521
522 rc = pqi_build_raid_path_request(ctrl_info, &request,
523 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
524 sizeof(*buffer), 0, &pci_direction);
525 if (rc)
526 return rc;
527
528 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
529 NULL, NO_TIMEOUT);
530
531 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
532 pci_direction);
533
534 return rc;
535}
536
537static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
538 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
539{
540 int rc;
541 int pci_direction;
542 struct pqi_raid_path_request request;
543
544 rc = pqi_build_raid_path_request(ctrl_info, &request,
545 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
546 &pci_direction);
547 if (rc)
548 return rc;
549
550 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
551 NULL, NO_TIMEOUT);
552
553 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
554 pci_direction);
555
556 return rc;
557}
558
559static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
560 struct pqi_scsi_dev *device,
561 struct bmic_identify_physical_device *buffer,
562 size_t buffer_length)
563{
564 int rc;
565 int pci_direction;
566 u16 bmic_device_index;
567 struct pqi_raid_path_request request;
568
569 rc = pqi_build_raid_path_request(ctrl_info, &request,
570 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
571 buffer_length, 0, &pci_direction);
572 if (rc)
573 return rc;
574
575 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
576 request.cdb[2] = (u8)bmic_device_index;
577 request.cdb[9] = (u8)(bmic_device_index >> 8);
578
579 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
580 0, NULL, NO_TIMEOUT);
581
582 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
583 pci_direction);
584
585 return rc;
586}
587
58322fe0
KB
588static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
589 enum bmic_flush_cache_shutdown_event shutdown_event)
6c223761
KB
590{
591 int rc;
592 struct pqi_raid_path_request request;
593 int pci_direction;
58322fe0 594 struct bmic_flush_cache *flush_cache;
6c223761
KB
595
596 /*
597 * Don't bother trying to flush the cache if the controller is
598 * locked up.
599 */
600 if (pqi_ctrl_offline(ctrl_info))
601 return -ENXIO;
602
58322fe0
KB
603 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
604 if (!flush_cache)
6c223761
KB
605 return -ENOMEM;
606
58322fe0
KB
607 flush_cache->shutdown_event = shutdown_event;
608
6c223761 609 rc = pqi_build_raid_path_request(ctrl_info, &request,
58322fe0
KB
610 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
611 sizeof(*flush_cache), 0, &pci_direction);
6c223761
KB
612 if (rc)
613 goto out;
614
615 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
d48f8fad 616 0, NULL, NO_TIMEOUT);
6c223761
KB
617
618 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
619 pci_direction);
620
621out:
58322fe0 622 kfree(flush_cache);
6c223761
KB
623
624 return rc;
625}
626
627static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
628 void *buffer, size_t buffer_length)
629{
630 int rc;
631 struct pqi_raid_path_request request;
632 int pci_direction;
633
634 rc = pqi_build_raid_path_request(ctrl_info, &request,
635 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
636 buffer_length, 0, &pci_direction);
637 if (rc)
638 return rc;
639
640 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
641 0, NULL, NO_TIMEOUT);
642
643 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
644 pci_direction);
645
646 return rc;
647}
648
649#pragma pack(1)
650
651struct bmic_host_wellness_driver_version {
652 u8 start_tag[4];
653 u8 driver_version_tag[2];
654 __le16 driver_version_length;
655 char driver_version[32];
656 u8 end_tag[2];
657};
658
659#pragma pack()
660
661static int pqi_write_driver_version_to_host_wellness(
662 struct pqi_ctrl_info *ctrl_info)
663{
664 int rc;
665 struct bmic_host_wellness_driver_version *buffer;
666 size_t buffer_length;
667
668 buffer_length = sizeof(*buffer);
669
670 buffer = kmalloc(buffer_length, GFP_KERNEL);
671 if (!buffer)
672 return -ENOMEM;
673
674 buffer->start_tag[0] = '<';
675 buffer->start_tag[1] = 'H';
676 buffer->start_tag[2] = 'W';
677 buffer->start_tag[3] = '>';
678 buffer->driver_version_tag[0] = 'D';
679 buffer->driver_version_tag[1] = 'V';
680 put_unaligned_le16(sizeof(buffer->driver_version),
681 &buffer->driver_version_length);
061ef06a 682 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
6c223761
KB
683 sizeof(buffer->driver_version) - 1);
684 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
685 buffer->end_tag[0] = 'Z';
686 buffer->end_tag[1] = 'Z';
687
688 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
689
690 kfree(buffer);
691
692 return rc;
693}
694
695#pragma pack(1)
696
697struct bmic_host_wellness_time {
698 u8 start_tag[4];
699 u8 time_tag[2];
700 __le16 time_length;
701 u8 time[8];
702 u8 dont_write_tag[2];
703 u8 end_tag[2];
704};
705
706#pragma pack()
707
708static int pqi_write_current_time_to_host_wellness(
709 struct pqi_ctrl_info *ctrl_info)
710{
711 int rc;
712 struct bmic_host_wellness_time *buffer;
713 size_t buffer_length;
714 time64_t local_time;
715 unsigned int year;
ed10858e 716 struct tm tm;
6c223761
KB
717
718 buffer_length = sizeof(*buffer);
719
720 buffer = kmalloc(buffer_length, GFP_KERNEL);
721 if (!buffer)
722 return -ENOMEM;
723
724 buffer->start_tag[0] = '<';
725 buffer->start_tag[1] = 'H';
726 buffer->start_tag[2] = 'W';
727 buffer->start_tag[3] = '>';
728 buffer->time_tag[0] = 'T';
729 buffer->time_tag[1] = 'D';
730 put_unaligned_le16(sizeof(buffer->time),
731 &buffer->time_length);
732
ed10858e
AB
733 local_time = ktime_get_real_seconds();
734 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
6c223761
KB
735 year = tm.tm_year + 1900;
736
737 buffer->time[0] = bin2bcd(tm.tm_hour);
738 buffer->time[1] = bin2bcd(tm.tm_min);
739 buffer->time[2] = bin2bcd(tm.tm_sec);
740 buffer->time[3] = 0;
741 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
742 buffer->time[5] = bin2bcd(tm.tm_mday);
743 buffer->time[6] = bin2bcd(year / 100);
744 buffer->time[7] = bin2bcd(year % 100);
745
746 buffer->dont_write_tag[0] = 'D';
747 buffer->dont_write_tag[1] = 'W';
748 buffer->end_tag[0] = 'Z';
749 buffer->end_tag[1] = 'Z';
750
751 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
752
753 kfree(buffer);
754
755 return rc;
756}
757
758#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
759
760static void pqi_update_time_worker(struct work_struct *work)
761{
762 int rc;
763 struct pqi_ctrl_info *ctrl_info;
764
765 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
766 update_time_work);
767
5f310425
KB
768 if (pqi_ctrl_offline(ctrl_info))
769 return;
770
6c223761
KB
771 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
772 if (rc)
773 dev_warn(&ctrl_info->pci_dev->dev,
774 "error updating time on controller\n");
775
776 schedule_delayed_work(&ctrl_info->update_time_work,
777 PQI_UPDATE_TIME_WORK_INTERVAL);
778}
779
780static inline void pqi_schedule_update_time_worker(
4fbebf1a 781 struct pqi_ctrl_info *ctrl_info)
6c223761 782{
4fbebf1a 783 schedule_delayed_work(&ctrl_info->update_time_work, 0);
061ef06a
KB
784}
785
786static inline void pqi_cancel_update_time_worker(
787 struct pqi_ctrl_info *ctrl_info)
788{
061ef06a 789 cancel_delayed_work_sync(&ctrl_info->update_time_work);
6c223761
KB
790}
791
792static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
793 void *buffer, size_t buffer_length)
794{
795 int rc;
796 int pci_direction;
797 struct pqi_raid_path_request request;
798
799 rc = pqi_build_raid_path_request(ctrl_info, &request,
800 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
801 if (rc)
802 return rc;
803
804 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
805 NULL, NO_TIMEOUT);
806
807 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
808 pci_direction);
809
810 return rc;
811}
812
813static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
814 void **buffer)
815{
816 int rc;
817 size_t lun_list_length;
818 size_t lun_data_length;
819 size_t new_lun_list_length;
820 void *lun_data = NULL;
821 struct report_lun_header *report_lun_header;
822
823 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
824 if (!report_lun_header) {
825 rc = -ENOMEM;
826 goto out;
827 }
828
829 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
830 sizeof(*report_lun_header));
831 if (rc)
832 goto out;
833
834 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
835
836again:
837 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
838
839 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
840 if (!lun_data) {
841 rc = -ENOMEM;
842 goto out;
843 }
844
845 if (lun_list_length == 0) {
846 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
847 goto out;
848 }
849
850 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
851 if (rc)
852 goto out;
853
854 new_lun_list_length = get_unaligned_be32(
855 &((struct report_lun_header *)lun_data)->list_length);
856
857 if (new_lun_list_length > lun_list_length) {
858 lun_list_length = new_lun_list_length;
859 kfree(lun_data);
860 goto again;
861 }
862
863out:
864 kfree(report_lun_header);
865
866 if (rc) {
867 kfree(lun_data);
868 lun_data = NULL;
869 }
870
871 *buffer = lun_data;
872
873 return rc;
874}
875
876static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
877 void **buffer)
878{
879 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
880 buffer);
881}
882
883static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
884 void **buffer)
885{
886 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
887}
888
889static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
890 struct report_phys_lun_extended **physdev_list,
891 struct report_log_lun_extended **logdev_list)
892{
893 int rc;
894 size_t logdev_list_length;
895 size_t logdev_data_length;
896 struct report_log_lun_extended *internal_logdev_list;
897 struct report_log_lun_extended *logdev_data;
898 struct report_lun_header report_lun_header;
899
900 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
901 if (rc)
902 dev_err(&ctrl_info->pci_dev->dev,
903 "report physical LUNs failed\n");
904
905 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
906 if (rc)
907 dev_err(&ctrl_info->pci_dev->dev,
908 "report logical LUNs failed\n");
909
910 /*
911 * Tack the controller itself onto the end of the logical device list.
912 */
913
914 logdev_data = *logdev_list;
915
916 if (logdev_data) {
917 logdev_list_length =
918 get_unaligned_be32(&logdev_data->header.list_length);
919 } else {
920 memset(&report_lun_header, 0, sizeof(report_lun_header));
921 logdev_data =
922 (struct report_log_lun_extended *)&report_lun_header;
923 logdev_list_length = 0;
924 }
925
926 logdev_data_length = sizeof(struct report_lun_header) +
927 logdev_list_length;
928
929 internal_logdev_list = kmalloc(logdev_data_length +
930 sizeof(struct report_log_lun_extended), GFP_KERNEL);
931 if (!internal_logdev_list) {
932 kfree(*logdev_list);
933 *logdev_list = NULL;
934 return -ENOMEM;
935 }
936
937 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
938 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
939 sizeof(struct report_log_lun_extended_entry));
940 put_unaligned_be32(logdev_list_length +
941 sizeof(struct report_log_lun_extended_entry),
942 &internal_logdev_list->header.list_length);
943
944 kfree(*logdev_list);
945 *logdev_list = internal_logdev_list;
946
947 return 0;
948}
949
950static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
951 int bus, int target, int lun)
952{
953 device->bus = bus;
954 device->target = target;
955 device->lun = lun;
956}
957
958static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
959{
960 u8 *scsi3addr;
961 u32 lunid;
bd10cf0b
KB
962 int bus;
963 int target;
964 int lun;
6c223761
KB
965
966 scsi3addr = device->scsi3addr;
967 lunid = get_unaligned_le32(scsi3addr);
968
969 if (pqi_is_hba_lunid(scsi3addr)) {
970 /* The specified device is the controller. */
971 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
972 device->target_lun_valid = true;
973 return;
974 }
975
976 if (pqi_is_logical_device(device)) {
bd10cf0b
KB
977 if (device->is_external_raid_device) {
978 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
979 target = (lunid >> 16) & 0x3fff;
980 lun = lunid & 0xff;
981 } else {
982 bus = PQI_RAID_VOLUME_BUS;
983 target = 0;
984 lun = lunid & 0x3fff;
985 }
986 pqi_set_bus_target_lun(device, bus, target, lun);
6c223761
KB
987 device->target_lun_valid = true;
988 return;
989 }
990
991 /*
992 * Defer target and LUN assignment for non-controller physical devices
993 * because the SAS transport layer will make these assignments later.
994 */
995 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
996}
997
998static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
999 struct pqi_scsi_dev *device)
1000{
1001 int rc;
1002 u8 raid_level;
1003 u8 *buffer;
1004
1005 raid_level = SA_RAID_UNKNOWN;
1006
1007 buffer = kmalloc(64, GFP_KERNEL);
1008 if (buffer) {
1009 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1010 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1011 if (rc == 0) {
1012 raid_level = buffer[8];
1013 if (raid_level > SA_RAID_MAX)
1014 raid_level = SA_RAID_UNKNOWN;
1015 }
1016 kfree(buffer);
1017 }
1018
1019 device->raid_level = raid_level;
1020}
1021
1022static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1023 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1024{
1025 char *err_msg;
1026 u32 raid_map_size;
1027 u32 r5or6_blocks_per_row;
1028 unsigned int num_phys_disks;
1029 unsigned int num_raid_map_entries;
1030
1031 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1032
1033 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1034 err_msg = "RAID map too small";
1035 goto bad_raid_map;
1036 }
1037
1038 if (raid_map_size > sizeof(*raid_map)) {
1039 err_msg = "RAID map too large";
1040 goto bad_raid_map;
1041 }
1042
1043 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1044 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1045 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1046 num_raid_map_entries = num_phys_disks *
1047 get_unaligned_le16(&raid_map->row_cnt);
1048
1049 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
1050 err_msg = "invalid number of map entries in RAID map";
1051 goto bad_raid_map;
1052 }
1053
1054 if (device->raid_level == SA_RAID_1) {
1055 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1056 err_msg = "invalid RAID-1 map";
1057 goto bad_raid_map;
1058 }
1059 } else if (device->raid_level == SA_RAID_ADM) {
1060 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1061 err_msg = "invalid RAID-1(ADM) map";
1062 goto bad_raid_map;
1063 }
1064 } else if ((device->raid_level == SA_RAID_5 ||
1065 device->raid_level == SA_RAID_6) &&
1066 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1067 /* RAID 50/60 */
1068 r5or6_blocks_per_row =
1069 get_unaligned_le16(&raid_map->strip_size) *
1070 get_unaligned_le16(&raid_map->data_disks_per_row);
1071 if (r5or6_blocks_per_row == 0) {
1072 err_msg = "invalid RAID-5 or RAID-6 map";
1073 goto bad_raid_map;
1074 }
1075 }
1076
1077 return 0;
1078
1079bad_raid_map:
d87d5474
KB
1080 dev_warn(&ctrl_info->pci_dev->dev,
1081 "scsi %d:%d:%d:%d %s\n",
1082 ctrl_info->scsi_host->host_no,
1083 device->bus, device->target, device->lun, err_msg);
6c223761
KB
1084
1085 return -EINVAL;
1086}
1087
1088static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1089 struct pqi_scsi_dev *device)
1090{
1091 int rc;
1092 int pci_direction;
1093 struct pqi_raid_path_request request;
1094 struct raid_map *raid_map;
1095
1096 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1097 if (!raid_map)
1098 return -ENOMEM;
1099
1100 rc = pqi_build_raid_path_request(ctrl_info, &request,
1101 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1102 sizeof(*raid_map), 0, &pci_direction);
1103 if (rc)
1104 goto error;
1105
1106 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1107 NULL, NO_TIMEOUT);
1108
1109 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1110 pci_direction);
1111
1112 if (rc)
1113 goto error;
1114
1115 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1116 if (rc)
1117 goto error;
1118
1119 device->raid_map = raid_map;
1120
1121 return 0;
1122
1123error:
1124 kfree(raid_map);
1125
1126 return rc;
1127}
1128
588a63fe 1129static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
1130 struct pqi_scsi_dev *device)
1131{
1132 int rc;
1133 u8 *buffer;
588a63fe 1134 u8 bypass_status;
6c223761
KB
1135
1136 buffer = kmalloc(64, GFP_KERNEL);
1137 if (!buffer)
1138 return;
1139
1140 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
588a63fe 1141 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
6c223761
KB
1142 if (rc)
1143 goto out;
1144
588a63fe
KB
1145#define RAID_BYPASS_STATUS 4
1146#define RAID_BYPASS_CONFIGURED 0x1
1147#define RAID_BYPASS_ENABLED 0x2
6c223761 1148
588a63fe
KB
1149 bypass_status = buffer[RAID_BYPASS_STATUS];
1150 device->raid_bypass_configured =
1151 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1152 if (device->raid_bypass_configured &&
1153 (bypass_status & RAID_BYPASS_ENABLED) &&
1154 pqi_get_raid_map(ctrl_info, device) == 0)
1155 device->raid_bypass_enabled = true;
6c223761
KB
1156
1157out:
1158 kfree(buffer);
1159}
1160
1161/*
1162 * Use vendor-specific VPD to determine online/offline status of a volume.
1163 */
1164
1165static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1166 struct pqi_scsi_dev *device)
1167{
1168 int rc;
1169 size_t page_length;
1170 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1171 bool volume_offline = true;
1172 u32 volume_flags;
1173 struct ciss_vpd_logical_volume_status *vpd;
1174
1175 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1176 if (!vpd)
1177 goto no_buffer;
1178
1179 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1180 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1181 if (rc)
1182 goto out;
1183
1184 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1185 volume_status) + vpd->page_length;
1186 if (page_length < sizeof(*vpd))
1187 goto out;
1188
1189 volume_status = vpd->volume_status;
1190 volume_flags = get_unaligned_be32(&vpd->flags);
1191 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1192
1193out:
1194 kfree(vpd);
1195no_buffer:
1196 device->volume_status = volume_status;
1197 device->volume_offline = volume_offline;
1198}
1199
1200static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1201 struct pqi_scsi_dev *device)
1202{
1203 int rc;
1204 u8 *buffer;
1205
1206 buffer = kmalloc(64, GFP_KERNEL);
1207 if (!buffer)
1208 return -ENOMEM;
1209
1210 /* Send an inquiry to the device to see what it is. */
1211 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1212 if (rc)
1213 goto out;
1214
1215 scsi_sanitize_inquiry_string(&buffer[8], 8);
1216 scsi_sanitize_inquiry_string(&buffer[16], 16);
1217
1218 device->devtype = buffer[0] & 0x1f;
cbe0c7b1
KB
1219 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1220 memcpy(device->model, &buffer[16], sizeof(device->model));
6c223761
KB
1221
1222 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
bd10cf0b
KB
1223 if (device->is_external_raid_device) {
1224 device->raid_level = SA_RAID_UNKNOWN;
1225 device->volume_status = CISS_LV_OK;
1226 device->volume_offline = false;
1227 } else {
1228 pqi_get_raid_level(ctrl_info, device);
588a63fe 1229 pqi_get_raid_bypass_status(ctrl_info, device);
bd10cf0b
KB
1230 pqi_get_volume_status(ctrl_info, device);
1231 }
6c223761
KB
1232 }
1233
1234out:
1235 kfree(buffer);
1236
1237 return rc;
1238}
1239
1240static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1241 struct pqi_scsi_dev *device,
1242 struct bmic_identify_physical_device *id_phys)
1243{
1244 int rc;
1245
1246 memset(id_phys, 0, sizeof(*id_phys));
1247
1248 rc = pqi_identify_physical_device(ctrl_info, device,
1249 id_phys, sizeof(*id_phys));
1250 if (rc) {
1251 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1252 return;
1253 }
1254
1255 device->queue_depth =
1256 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1257 device->device_type = id_phys->device_type;
1258 device->active_path_index = id_phys->active_path_number;
1259 device->path_map = id_phys->redundant_path_present_map;
1260 memcpy(&device->box,
1261 &id_phys->alternate_paths_phys_box_on_port,
1262 sizeof(device->box));
1263 memcpy(&device->phys_connector,
1264 &id_phys->alternate_paths_phys_connector,
1265 sizeof(device->phys_connector));
1266 device->bay = id_phys->phys_bay_in_box;
1267}
1268
1269static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1270 struct pqi_scsi_dev *device)
1271{
1272 char *status;
1273 static const char unknown_state_str[] =
1274 "Volume is in an unknown state (%u)";
1275 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1276
1277 switch (device->volume_status) {
1278 case CISS_LV_OK:
1279 status = "Volume online";
1280 break;
1281 case CISS_LV_FAILED:
1282 status = "Volume failed";
1283 break;
1284 case CISS_LV_NOT_CONFIGURED:
1285 status = "Volume not configured";
1286 break;
1287 case CISS_LV_DEGRADED:
1288 status = "Volume degraded";
1289 break;
1290 case CISS_LV_READY_FOR_RECOVERY:
1291 status = "Volume ready for recovery operation";
1292 break;
1293 case CISS_LV_UNDERGOING_RECOVERY:
1294 status = "Volume undergoing recovery";
1295 break;
1296 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1297 status = "Wrong physical drive was replaced";
1298 break;
1299 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1300 status = "A physical drive not properly connected";
1301 break;
1302 case CISS_LV_HARDWARE_OVERHEATING:
1303 status = "Hardware is overheating";
1304 break;
1305 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1306 status = "Hardware has overheated";
1307 break;
1308 case CISS_LV_UNDERGOING_EXPANSION:
1309 status = "Volume undergoing expansion";
1310 break;
1311 case CISS_LV_NOT_AVAILABLE:
1312 status = "Volume waiting for transforming volume";
1313 break;
1314 case CISS_LV_QUEUED_FOR_EXPANSION:
1315 status = "Volume queued for expansion";
1316 break;
1317 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1318 status = "Volume disabled due to SCSI ID conflict";
1319 break;
1320 case CISS_LV_EJECTED:
1321 status = "Volume has been ejected";
1322 break;
1323 case CISS_LV_UNDERGOING_ERASE:
1324 status = "Volume undergoing background erase";
1325 break;
1326 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1327 status = "Volume ready for predictive spare rebuild";
1328 break;
1329 case CISS_LV_UNDERGOING_RPI:
1330 status = "Volume undergoing rapid parity initialization";
1331 break;
1332 case CISS_LV_PENDING_RPI:
1333 status = "Volume queued for rapid parity initialization";
1334 break;
1335 case CISS_LV_ENCRYPTED_NO_KEY:
1336 status = "Encrypted volume inaccessible - key not present";
1337 break;
1338 case CISS_LV_UNDERGOING_ENCRYPTION:
1339 status = "Volume undergoing encryption process";
1340 break;
1341 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1342 status = "Volume undergoing encryption re-keying process";
1343 break;
1344 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
d87d5474 1345 status = "Volume encrypted but encryption is disabled";
6c223761
KB
1346 break;
1347 case CISS_LV_PENDING_ENCRYPTION:
1348 status = "Volume pending migration to encrypted state";
1349 break;
1350 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1351 status = "Volume pending encryption rekeying";
1352 break;
1353 case CISS_LV_NOT_SUPPORTED:
1354 status = "Volume not supported on this controller";
1355 break;
1356 case CISS_LV_STATUS_UNAVAILABLE:
1357 status = "Volume status not available";
1358 break;
1359 default:
1360 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1361 unknown_state_str, device->volume_status);
1362 status = unknown_state_buffer;
1363 break;
1364 }
1365
1366 dev_info(&ctrl_info->pci_dev->dev,
1367 "scsi %d:%d:%d:%d %s\n",
1368 ctrl_info->scsi_host->host_no,
1369 device->bus, device->target, device->lun, status);
1370}
1371
6c223761
KB
1372static void pqi_rescan_worker(struct work_struct *work)
1373{
1374 struct pqi_ctrl_info *ctrl_info;
1375
1376 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1377 rescan_work);
1378
1379 pqi_scan_scsi_devices(ctrl_info);
1380}
1381
1382static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1383 struct pqi_scsi_dev *device)
1384{
1385 int rc;
1386
1387 if (pqi_is_logical_device(device))
1388 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1389 device->target, device->lun);
1390 else
1391 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1392
1393 return rc;
1394}
1395
1396static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1397 struct pqi_scsi_dev *device)
1398{
1399 if (pqi_is_logical_device(device))
1400 scsi_remove_device(device->sdev);
1401 else
1402 pqi_remove_sas_device(device);
1403}
1404
1405/* Assumes the SCSI device list lock is held. */
1406
1407static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1408 int bus, int target, int lun)
1409{
1410 struct pqi_scsi_dev *device;
1411
1412 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1413 scsi_device_list_entry)
1414 if (device->bus == bus && device->target == target &&
1415 device->lun == lun)
1416 return device;
1417
1418 return NULL;
1419}
1420
1421static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1422 struct pqi_scsi_dev *dev2)
1423{
1424 if (dev1->is_physical_device != dev2->is_physical_device)
1425 return false;
1426
1427 if (dev1->is_physical_device)
1428 return dev1->wwid == dev2->wwid;
1429
1430 return memcmp(dev1->volume_id, dev2->volume_id,
1431 sizeof(dev1->volume_id)) == 0;
1432}
1433
1434enum pqi_find_result {
1435 DEVICE_NOT_FOUND,
1436 DEVICE_CHANGED,
1437 DEVICE_SAME,
1438};
1439
1440static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1441 struct pqi_scsi_dev *device_to_find,
1442 struct pqi_scsi_dev **matching_device)
1443{
1444 struct pqi_scsi_dev *device;
1445
1446 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1447 scsi_device_list_entry) {
1448 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1449 device->scsi3addr)) {
1450 *matching_device = device;
1451 if (pqi_device_equal(device_to_find, device)) {
1452 if (device_to_find->volume_offline)
1453 return DEVICE_CHANGED;
1454 return DEVICE_SAME;
1455 }
1456 return DEVICE_CHANGED;
1457 }
1458 }
1459
1460 return DEVICE_NOT_FOUND;
1461}
1462
6de783f6
KB
1463#define PQI_DEV_INFO_BUFFER_LENGTH 128
1464
6c223761
KB
1465static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1466 char *action, struct pqi_scsi_dev *device)
1467{
6de783f6
KB
1468 ssize_t count;
1469 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1470
1471 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1472 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1473
1474 if (device->target_lun_valid)
1475 count += snprintf(buffer + count,
1476 PQI_DEV_INFO_BUFFER_LENGTH - count,
1477 "%d:%d",
1478 device->target,
1479 device->lun);
1480 else
1481 count += snprintf(buffer + count,
1482 PQI_DEV_INFO_BUFFER_LENGTH - count,
1483 "-:-");
1484
1485 if (pqi_is_logical_device(device))
1486 count += snprintf(buffer + count,
1487 PQI_DEV_INFO_BUFFER_LENGTH - count,
1488 " %08x%08x",
1489 *((u32 *)&device->scsi3addr),
1490 *((u32 *)&device->scsi3addr[4]));
1491 else
1492 count += snprintf(buffer + count,
1493 PQI_DEV_INFO_BUFFER_LENGTH - count,
1494 " %016llx", device->sas_address);
1495
1496 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
1497 " %s %.8s %.16s ",
6c223761
KB
1498 scsi_device_type(device->devtype),
1499 device->vendor,
6de783f6
KB
1500 device->model);
1501
1502 if (pqi_is_logical_device(device)) {
1503 if (device->devtype == TYPE_DISK)
1504 count += snprintf(buffer + count,
1505 PQI_DEV_INFO_BUFFER_LENGTH - count,
1506 "SSDSmartPathCap%c En%c %-12s",
588a63fe
KB
1507 device->raid_bypass_configured ? '+' : '-',
1508 device->raid_bypass_enabled ? '+' : '-',
6de783f6
KB
1509 pqi_raid_level_to_string(device->raid_level));
1510 } else {
1511 count += snprintf(buffer + count,
1512 PQI_DEV_INFO_BUFFER_LENGTH - count,
1513 "AIO%c", device->aio_enabled ? '+' : '-');
1514 if (device->devtype == TYPE_DISK ||
1515 device->devtype == TYPE_ZBC)
1516 count += snprintf(buffer + count,
1517 PQI_DEV_INFO_BUFFER_LENGTH - count,
1518 " qd=%-6d", device->queue_depth);
1519 }
1520
1521 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
6c223761
KB
1522}
1523
1524/* Assumes the SCSI device list lock is held. */
1525
1526static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1527 struct pqi_scsi_dev *new_device)
1528{
1529 existing_device->devtype = new_device->devtype;
1530 existing_device->device_type = new_device->device_type;
1531 existing_device->bus = new_device->bus;
1532 if (new_device->target_lun_valid) {
1533 existing_device->target = new_device->target;
1534 existing_device->lun = new_device->lun;
1535 existing_device->target_lun_valid = true;
1536 }
1537
1538 /* By definition, the scsi3addr and wwid fields are already the same. */
1539
1540 existing_device->is_physical_device = new_device->is_physical_device;
bd10cf0b
KB
1541 existing_device->is_external_raid_device =
1542 new_device->is_external_raid_device;
6c223761
KB
1543 existing_device->aio_enabled = new_device->aio_enabled;
1544 memcpy(existing_device->vendor, new_device->vendor,
1545 sizeof(existing_device->vendor));
1546 memcpy(existing_device->model, new_device->model,
1547 sizeof(existing_device->model));
1548 existing_device->sas_address = new_device->sas_address;
1549 existing_device->raid_level = new_device->raid_level;
1550 existing_device->queue_depth = new_device->queue_depth;
1551 existing_device->aio_handle = new_device->aio_handle;
1552 existing_device->volume_status = new_device->volume_status;
1553 existing_device->active_path_index = new_device->active_path_index;
1554 existing_device->path_map = new_device->path_map;
1555 existing_device->bay = new_device->bay;
1556 memcpy(existing_device->box, new_device->box,
1557 sizeof(existing_device->box));
1558 memcpy(existing_device->phys_connector, new_device->phys_connector,
1559 sizeof(existing_device->phys_connector));
6c223761
KB
1560 existing_device->offload_to_mirror = 0;
1561 kfree(existing_device->raid_map);
1562 existing_device->raid_map = new_device->raid_map;
588a63fe
KB
1563 existing_device->raid_bypass_configured =
1564 new_device->raid_bypass_configured;
1565 existing_device->raid_bypass_enabled =
1566 new_device->raid_bypass_enabled;
6c223761
KB
1567
1568 /* To prevent this from being freed later. */
1569 new_device->raid_map = NULL;
1570}
1571
1572static inline void pqi_free_device(struct pqi_scsi_dev *device)
1573{
1574 if (device) {
1575 kfree(device->raid_map);
1576 kfree(device);
1577 }
1578}
1579
1580/*
1581 * Called when exposing a new device to the OS fails in order to re-adjust
1582 * our internal SCSI device list to match the SCSI ML's view.
1583 */
1584
1585static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1586 struct pqi_scsi_dev *device)
1587{
1588 unsigned long flags;
1589
1590 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1591 list_del(&device->scsi_device_list_entry);
1592 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1593
1594 /* Allow the device structure to be freed later. */
1595 device->keep_device = false;
1596}
1597
1598static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1599 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1600{
1601 int rc;
1602 unsigned int i;
1603 unsigned long flags;
1604 enum pqi_find_result find_result;
1605 struct pqi_scsi_dev *device;
1606 struct pqi_scsi_dev *next;
1607 struct pqi_scsi_dev *matching_device;
8a994a04
KB
1608 LIST_HEAD(add_list);
1609 LIST_HEAD(delete_list);
6c223761
KB
1610
1611 /*
1612 * The idea here is to do as little work as possible while holding the
1613 * spinlock. That's why we go to great pains to defer anything other
1614 * than updating the internal device list until after we release the
1615 * spinlock.
1616 */
1617
1618 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1619
1620 /* Assume that all devices in the existing list have gone away. */
1621 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1622 scsi_device_list_entry)
1623 device->device_gone = true;
1624
1625 for (i = 0; i < num_new_devices; i++) {
1626 device = new_device_list[i];
1627
1628 find_result = pqi_scsi_find_entry(ctrl_info, device,
1629 &matching_device);
1630
1631 switch (find_result) {
1632 case DEVICE_SAME:
1633 /*
1634 * The newly found device is already in the existing
1635 * device list.
1636 */
1637 device->new_device = false;
1638 matching_device->device_gone = false;
1639 pqi_scsi_update_device(matching_device, device);
1640 break;
1641 case DEVICE_NOT_FOUND:
1642 /*
1643 * The newly found device is NOT in the existing device
1644 * list.
1645 */
1646 device->new_device = true;
1647 break;
1648 case DEVICE_CHANGED:
1649 /*
1650 * The original device has gone away and we need to add
1651 * the new device.
1652 */
1653 device->new_device = true;
1654 break;
6c223761
KB
1655 }
1656 }
1657
1658 /* Process all devices that have gone away. */
1659 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1660 scsi_device_list_entry) {
1661 if (device->device_gone) {
1662 list_del(&device->scsi_device_list_entry);
1663 list_add_tail(&device->delete_list_entry, &delete_list);
1664 }
1665 }
1666
1667 /* Process all new devices. */
1668 for (i = 0; i < num_new_devices; i++) {
1669 device = new_device_list[i];
1670 if (!device->new_device)
1671 continue;
1672 if (device->volume_offline)
1673 continue;
1674 list_add_tail(&device->scsi_device_list_entry,
1675 &ctrl_info->scsi_device_list);
1676 list_add_tail(&device->add_list_entry, &add_list);
1677 /* To prevent this device structure from being freed later. */
1678 device->keep_device = true;
1679 }
1680
6c223761
KB
1681 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1682
1683 /* Remove all devices that have gone away. */
1684 list_for_each_entry_safe(device, next, &delete_list,
1685 delete_list_entry) {
6c223761
KB
1686 if (device->volume_offline) {
1687 pqi_dev_info(ctrl_info, "offline", device);
1688 pqi_show_volume_status(ctrl_info, device);
1689 } else {
1690 pqi_dev_info(ctrl_info, "removed", device);
1691 }
6de783f6
KB
1692 if (device->sdev)
1693 pqi_remove_device(ctrl_info, device);
6c223761
KB
1694 list_del(&device->delete_list_entry);
1695 pqi_free_device(device);
1696 }
1697
1698 /*
1699 * Notify the SCSI ML if the queue depth of any existing device has
1700 * changed.
1701 */
1702 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1703 scsi_device_list_entry) {
1704 if (device->sdev && device->queue_depth !=
1705 device->advertised_queue_depth) {
1706 device->advertised_queue_depth = device->queue_depth;
1707 scsi_change_queue_depth(device->sdev,
1708 device->advertised_queue_depth);
1709 }
1710 }
1711
1712 /* Expose any new devices. */
1713 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
94086f5b 1714 if (!device->sdev) {
6de783f6 1715 pqi_dev_info(ctrl_info, "added", device);
6c223761
KB
1716 rc = pqi_add_device(ctrl_info, device);
1717 if (rc) {
1718 dev_warn(&ctrl_info->pci_dev->dev,
1719 "scsi %d:%d:%d:%d addition failed, device not added\n",
1720 ctrl_info->scsi_host->host_no,
1721 device->bus, device->target,
1722 device->lun);
1723 pqi_fixup_botched_add(ctrl_info, device);
6c223761
KB
1724 }
1725 }
6c223761
KB
1726 }
1727}
1728
1729static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1730{
1731 bool is_supported = false;
1732
1733 switch (device->devtype) {
1734 case TYPE_DISK:
1735 case TYPE_ZBC:
1736 case TYPE_TAPE:
1737 case TYPE_MEDIUM_CHANGER:
1738 case TYPE_ENCLOSURE:
1739 is_supported = true;
1740 break;
1741 case TYPE_RAID:
1742 /*
1743 * Only support the HBA controller itself as a RAID
1744 * controller. If it's a RAID controller other than
376fb880
KB
1745 * the HBA itself (an external RAID controller, for
1746 * example), we don't support it.
6c223761
KB
1747 */
1748 if (pqi_is_hba_lunid(device->scsi3addr))
1749 is_supported = true;
1750 break;
1751 }
1752
1753 return is_supported;
1754}
1755
94086f5b 1756static inline bool pqi_skip_device(u8 *scsi3addr)
6c223761 1757{
94086f5b
KB
1758 /* Ignore all masked devices. */
1759 if (MASKED_DEVICE(scsi3addr))
6c223761 1760 return true;
6c223761
KB
1761
1762 return false;
1763}
1764
6c223761
KB
1765static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1766{
1767 int i;
1768 int rc;
8a994a04 1769 LIST_HEAD(new_device_list_head);
6c223761
KB
1770 struct report_phys_lun_extended *physdev_list = NULL;
1771 struct report_log_lun_extended *logdev_list = NULL;
1772 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1773 struct report_log_lun_extended_entry *log_lun_ext_entry;
1774 struct bmic_identify_physical_device *id_phys = NULL;
1775 u32 num_physicals;
1776 u32 num_logicals;
1777 struct pqi_scsi_dev **new_device_list = NULL;
1778 struct pqi_scsi_dev *device;
1779 struct pqi_scsi_dev *next;
1780 unsigned int num_new_devices;
1781 unsigned int num_valid_devices;
1782 bool is_physical_device;
1783 u8 *scsi3addr;
1784 static char *out_of_memory_msg =
6de783f6 1785 "failed to allocate memory, device discovery stopped";
6c223761 1786
6c223761
KB
1787 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1788 if (rc)
1789 goto out;
1790
1791 if (physdev_list)
1792 num_physicals =
1793 get_unaligned_be32(&physdev_list->header.list_length)
1794 / sizeof(physdev_list->lun_entries[0]);
1795 else
1796 num_physicals = 0;
1797
1798 if (logdev_list)
1799 num_logicals =
1800 get_unaligned_be32(&logdev_list->header.list_length)
1801 / sizeof(logdev_list->lun_entries[0]);
1802 else
1803 num_logicals = 0;
1804
1805 if (num_physicals) {
1806 /*
1807 * We need this buffer for calls to pqi_get_physical_disk_info()
1808 * below. We allocate it here instead of inside
1809 * pqi_get_physical_disk_info() because it's a fairly large
1810 * buffer.
1811 */
1812 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1813 if (!id_phys) {
1814 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1815 out_of_memory_msg);
1816 rc = -ENOMEM;
1817 goto out;
1818 }
1819 }
1820
1821 num_new_devices = num_physicals + num_logicals;
1822
1823 new_device_list = kmalloc(sizeof(*new_device_list) *
1824 num_new_devices, GFP_KERNEL);
1825 if (!new_device_list) {
1826 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1827 rc = -ENOMEM;
1828 goto out;
1829 }
1830
1831 for (i = 0; i < num_new_devices; i++) {
1832 device = kzalloc(sizeof(*device), GFP_KERNEL);
1833 if (!device) {
1834 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1835 out_of_memory_msg);
1836 rc = -ENOMEM;
1837 goto out;
1838 }
1839 list_add_tail(&device->new_device_list_entry,
1840 &new_device_list_head);
1841 }
1842
1843 device = NULL;
1844 num_valid_devices = 0;
1845
1846 for (i = 0; i < num_new_devices; i++) {
1847
1848 if (i < num_physicals) {
1849 is_physical_device = true;
1850 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1851 log_lun_ext_entry = NULL;
1852 scsi3addr = phys_lun_ext_entry->lunid;
1853 } else {
1854 is_physical_device = false;
1855 phys_lun_ext_entry = NULL;
1856 log_lun_ext_entry =
1857 &logdev_list->lun_entries[i - num_physicals];
1858 scsi3addr = log_lun_ext_entry->lunid;
1859 }
1860
94086f5b 1861 if (is_physical_device && pqi_skip_device(scsi3addr))
6c223761
KB
1862 continue;
1863
1864 if (device)
1865 device = list_next_entry(device, new_device_list_entry);
1866 else
1867 device = list_first_entry(&new_device_list_head,
1868 struct pqi_scsi_dev, new_device_list_entry);
1869
1870 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1871 device->is_physical_device = is_physical_device;
bd10cf0b
KB
1872 if (!is_physical_device)
1873 device->is_external_raid_device =
1874 pqi_is_external_raid_addr(scsi3addr);
6c223761
KB
1875
1876 /* Gather information about the device. */
1877 rc = pqi_get_device_info(ctrl_info, device);
1878 if (rc == -ENOMEM) {
1879 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1880 out_of_memory_msg);
1881 goto out;
1882 }
1883 if (rc) {
6de783f6
KB
1884 if (device->is_physical_device)
1885 dev_warn(&ctrl_info->pci_dev->dev,
1886 "obtaining device info failed, skipping physical device %016llx\n",
1887 get_unaligned_be64(
1888 &phys_lun_ext_entry->wwid));
1889 else
1890 dev_warn(&ctrl_info->pci_dev->dev,
1891 "obtaining device info failed, skipping logical device %08x%08x\n",
1892 *((u32 *)&device->scsi3addr),
1893 *((u32 *)&device->scsi3addr[4]));
6c223761
KB
1894 rc = 0;
1895 continue;
1896 }
1897
1898 if (!pqi_is_supported_device(device))
1899 continue;
1900
1901 pqi_assign_bus_target_lun(device);
1902
6c223761
KB
1903 if (device->is_physical_device) {
1904 device->wwid = phys_lun_ext_entry->wwid;
1905 if ((phys_lun_ext_entry->device_flags &
1906 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1907 phys_lun_ext_entry->aio_handle)
1908 device->aio_enabled = true;
1909 } else {
1910 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1911 sizeof(device->volume_id));
1912 }
1913
1914 switch (device->devtype) {
1915 case TYPE_DISK:
1916 case TYPE_ZBC:
1917 case TYPE_ENCLOSURE:
1918 if (device->is_physical_device) {
1919 device->sas_address =
1920 get_unaligned_be64(&device->wwid);
1921 if (device->devtype == TYPE_DISK ||
1922 device->devtype == TYPE_ZBC) {
1923 device->aio_handle =
1924 phys_lun_ext_entry->aio_handle;
1925 pqi_get_physical_disk_info(ctrl_info,
1926 device, id_phys);
1927 }
1928 }
1929 break;
1930 }
1931
1932 new_device_list[num_valid_devices++] = device;
1933 }
1934
1935 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1936
1937out:
1938 list_for_each_entry_safe(device, next, &new_device_list_head,
1939 new_device_list_entry) {
1940 if (device->keep_device)
1941 continue;
1942 list_del(&device->new_device_list_entry);
1943 pqi_free_device(device);
1944 }
1945
1946 kfree(new_device_list);
1947 kfree(physdev_list);
1948 kfree(logdev_list);
1949 kfree(id_phys);
1950
1951 return rc;
1952}
1953
1954static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1955{
1956 unsigned long flags;
1957 struct pqi_scsi_dev *device;
6c223761 1958
a37ef745
KB
1959 while (1) {
1960 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1961
1962 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1963 struct pqi_scsi_dev, scsi_device_list_entry);
1964 if (device)
1965 list_del(&device->scsi_device_list_entry);
1966
1967 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1968 flags);
1969
1970 if (!device)
1971 break;
6c223761 1972
6c223761
KB
1973 if (device->sdev)
1974 pqi_remove_device(ctrl_info, device);
6c223761
KB
1975 pqi_free_device(device);
1976 }
6c223761
KB
1977}
1978
1979static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1980{
1981 int rc;
1982
1983 if (pqi_ctrl_offline(ctrl_info))
1984 return -ENXIO;
1985
1986 mutex_lock(&ctrl_info->scan_mutex);
1987
1988 rc = pqi_update_scsi_devices(ctrl_info);
1989 if (rc)
5f310425 1990 pqi_schedule_rescan_worker_delayed(ctrl_info);
6c223761
KB
1991
1992 mutex_unlock(&ctrl_info->scan_mutex);
1993
1994 return rc;
1995}
1996
1997static void pqi_scan_start(struct Scsi_Host *shost)
1998{
1999 pqi_scan_scsi_devices(shost_to_hba(shost));
2000}
2001
2002/* Returns TRUE if scan is finished. */
2003
2004static int pqi_scan_finished(struct Scsi_Host *shost,
2005 unsigned long elapsed_time)
2006{
2007 struct pqi_ctrl_info *ctrl_info;
2008
2009 ctrl_info = shost_priv(shost);
2010
2011 return !mutex_is_locked(&ctrl_info->scan_mutex);
2012}
2013
061ef06a
KB
2014static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
2015{
2016 mutex_lock(&ctrl_info->scan_mutex);
2017 mutex_unlock(&ctrl_info->scan_mutex);
2018}
2019
2020static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2021{
2022 mutex_lock(&ctrl_info->lun_reset_mutex);
2023 mutex_unlock(&ctrl_info->lun_reset_mutex);
2024}
2025
6c223761
KB
2026static inline void pqi_set_encryption_info(
2027 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2028 u64 first_block)
2029{
2030 u32 volume_blk_size;
2031
2032 /*
2033 * Set the encryption tweak values based on logical block address.
2034 * If the block size is 512, the tweak value is equal to the LBA.
2035 * For other block sizes, tweak value is (LBA * block size) / 512.
2036 */
2037 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2038 if (volume_blk_size != 512)
2039 first_block = (first_block * volume_blk_size) / 512;
2040
2041 encryption_info->data_encryption_key_index =
2042 get_unaligned_le16(&raid_map->data_encryption_key_index);
2043 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2044 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2045}
2046
2047/*
588a63fe 2048 * Attempt to perform RAID bypass mapping for a logical volume I/O.
6c223761
KB
2049 */
2050
2051#define PQI_RAID_BYPASS_INELIGIBLE 1
2052
2053static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2054 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2055 struct pqi_queue_group *queue_group)
2056{
2057 struct raid_map *raid_map;
2058 bool is_write = false;
2059 u32 map_index;
2060 u64 first_block;
2061 u64 last_block;
2062 u32 block_cnt;
2063 u32 blocks_per_row;
2064 u64 first_row;
2065 u64 last_row;
2066 u32 first_row_offset;
2067 u32 last_row_offset;
2068 u32 first_column;
2069 u32 last_column;
2070 u64 r0_first_row;
2071 u64 r0_last_row;
2072 u32 r5or6_blocks_per_row;
2073 u64 r5or6_first_row;
2074 u64 r5or6_last_row;
2075 u32 r5or6_first_row_offset;
2076 u32 r5or6_last_row_offset;
2077 u32 r5or6_first_column;
2078 u32 r5or6_last_column;
2079 u16 data_disks_per_row;
2080 u32 total_disks_per_row;
2081 u16 layout_map_count;
2082 u32 stripesize;
2083 u16 strip_size;
2084 u32 first_group;
2085 u32 last_group;
2086 u32 current_group;
2087 u32 map_row;
2088 u32 aio_handle;
2089 u64 disk_block;
2090 u32 disk_block_cnt;
2091 u8 cdb[16];
2092 u8 cdb_length;
2093 int offload_to_mirror;
2094 struct pqi_encryption_info *encryption_info_ptr;
2095 struct pqi_encryption_info encryption_info;
2096#if BITS_PER_LONG == 32
2097 u64 tmpdiv;
2098#endif
2099
2100 /* Check for valid opcode, get LBA and block count. */
2101 switch (scmd->cmnd[0]) {
2102 case WRITE_6:
2103 is_write = true;
2104 /* fall through */
2105 case READ_6:
e018ef57
B
2106 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2107 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
6c223761
KB
2108 block_cnt = (u32)scmd->cmnd[4];
2109 if (block_cnt == 0)
2110 block_cnt = 256;
2111 break;
2112 case WRITE_10:
2113 is_write = true;
2114 /* fall through */
2115 case READ_10:
2116 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2117 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2118 break;
2119 case WRITE_12:
2120 is_write = true;
2121 /* fall through */
2122 case READ_12:
2123 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2124 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2125 break;
2126 case WRITE_16:
2127 is_write = true;
2128 /* fall through */
2129 case READ_16:
2130 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2131 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2132 break;
2133 default:
2134 /* Process via normal I/O path. */
2135 return PQI_RAID_BYPASS_INELIGIBLE;
2136 }
2137
2138 /* Check for write to non-RAID-0. */
2139 if (is_write && device->raid_level != SA_RAID_0)
2140 return PQI_RAID_BYPASS_INELIGIBLE;
2141
2142 if (unlikely(block_cnt == 0))
2143 return PQI_RAID_BYPASS_INELIGIBLE;
2144
2145 last_block = first_block + block_cnt - 1;
2146 raid_map = device->raid_map;
2147
2148 /* Check for invalid block or wraparound. */
2149 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2150 last_block < first_block)
2151 return PQI_RAID_BYPASS_INELIGIBLE;
2152
2153 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2154 strip_size = get_unaligned_le16(&raid_map->strip_size);
2155 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2156
2157 /* Calculate stripe information for the request. */
2158 blocks_per_row = data_disks_per_row * strip_size;
2159#if BITS_PER_LONG == 32
2160 tmpdiv = first_block;
2161 do_div(tmpdiv, blocks_per_row);
2162 first_row = tmpdiv;
2163 tmpdiv = last_block;
2164 do_div(tmpdiv, blocks_per_row);
2165 last_row = tmpdiv;
2166 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2167 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2168 tmpdiv = first_row_offset;
2169 do_div(tmpdiv, strip_size);
2170 first_column = tmpdiv;
2171 tmpdiv = last_row_offset;
2172 do_div(tmpdiv, strip_size);
2173 last_column = tmpdiv;
2174#else
2175 first_row = first_block / blocks_per_row;
2176 last_row = last_block / blocks_per_row;
2177 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2178 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2179 first_column = first_row_offset / strip_size;
2180 last_column = last_row_offset / strip_size;
2181#endif
2182
2183 /* If this isn't a single row/column then give to the controller. */
2184 if (first_row != last_row || first_column != last_column)
2185 return PQI_RAID_BYPASS_INELIGIBLE;
2186
2187 /* Proceeding with driver mapping. */
2188 total_disks_per_row = data_disks_per_row +
2189 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2190 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2191 get_unaligned_le16(&raid_map->row_cnt);
2192 map_index = (map_row * total_disks_per_row) + first_column;
2193
2194 /* RAID 1 */
2195 if (device->raid_level == SA_RAID_1) {
2196 if (device->offload_to_mirror)
2197 map_index += data_disks_per_row;
2198 device->offload_to_mirror = !device->offload_to_mirror;
2199 } else if (device->raid_level == SA_RAID_ADM) {
2200 /* RAID ADM */
2201 /*
2202 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2203 * divisible by 3.
2204 */
2205 offload_to_mirror = device->offload_to_mirror;
2206 if (offload_to_mirror == 0) {
2207 /* use physical disk in the first mirrored group. */
2208 map_index %= data_disks_per_row;
2209 } else {
2210 do {
2211 /*
2212 * Determine mirror group that map_index
2213 * indicates.
2214 */
2215 current_group = map_index / data_disks_per_row;
2216
2217 if (offload_to_mirror != current_group) {
2218 if (current_group <
2219 layout_map_count - 1) {
2220 /*
2221 * Select raid index from
2222 * next group.
2223 */
2224 map_index += data_disks_per_row;
2225 current_group++;
2226 } else {
2227 /*
2228 * Select raid index from first
2229 * group.
2230 */
2231 map_index %= data_disks_per_row;
2232 current_group = 0;
2233 }
2234 }
2235 } while (offload_to_mirror != current_group);
2236 }
2237
2238 /* Set mirror group to use next time. */
2239 offload_to_mirror =
2240 (offload_to_mirror >= layout_map_count - 1) ?
2241 0 : offload_to_mirror + 1;
2242 WARN_ON(offload_to_mirror >= layout_map_count);
2243 device->offload_to_mirror = offload_to_mirror;
2244 /*
2245 * Avoid direct use of device->offload_to_mirror within this
2246 * function since multiple threads might simultaneously
2247 * increment it beyond the range of device->layout_map_count -1.
2248 */
2249 } else if ((device->raid_level == SA_RAID_5 ||
2250 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2251 /* RAID 50/60 */
2252 /* Verify first and last block are in same RAID group */
2253 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2254 stripesize = r5or6_blocks_per_row * layout_map_count;
2255#if BITS_PER_LONG == 32
2256 tmpdiv = first_block;
2257 first_group = do_div(tmpdiv, stripesize);
2258 tmpdiv = first_group;
2259 do_div(tmpdiv, r5or6_blocks_per_row);
2260 first_group = tmpdiv;
2261 tmpdiv = last_block;
2262 last_group = do_div(tmpdiv, stripesize);
2263 tmpdiv = last_group;
2264 do_div(tmpdiv, r5or6_blocks_per_row);
2265 last_group = tmpdiv;
2266#else
2267 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2268 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2269#endif
2270 if (first_group != last_group)
2271 return PQI_RAID_BYPASS_INELIGIBLE;
2272
2273 /* Verify request is in a single row of RAID 5/6 */
2274#if BITS_PER_LONG == 32
2275 tmpdiv = first_block;
2276 do_div(tmpdiv, stripesize);
2277 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2278 tmpdiv = last_block;
2279 do_div(tmpdiv, stripesize);
2280 r5or6_last_row = r0_last_row = tmpdiv;
2281#else
2282 first_row = r5or6_first_row = r0_first_row =
2283 first_block / stripesize;
2284 r5or6_last_row = r0_last_row = last_block / stripesize;
2285#endif
2286 if (r5or6_first_row != r5or6_last_row)
2287 return PQI_RAID_BYPASS_INELIGIBLE;
2288
2289 /* Verify request is in a single column */
2290#if BITS_PER_LONG == 32
2291 tmpdiv = first_block;
2292 first_row_offset = do_div(tmpdiv, stripesize);
2293 tmpdiv = first_row_offset;
2294 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2295 r5or6_first_row_offset = first_row_offset;
2296 tmpdiv = last_block;
2297 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2298 tmpdiv = r5or6_last_row_offset;
2299 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2300 tmpdiv = r5or6_first_row_offset;
2301 do_div(tmpdiv, strip_size);
2302 first_column = r5or6_first_column = tmpdiv;
2303 tmpdiv = r5or6_last_row_offset;
2304 do_div(tmpdiv, strip_size);
2305 r5or6_last_column = tmpdiv;
2306#else
2307 first_row_offset = r5or6_first_row_offset =
2308 (u32)((first_block % stripesize) %
2309 r5or6_blocks_per_row);
2310
2311 r5or6_last_row_offset =
2312 (u32)((last_block % stripesize) %
2313 r5or6_blocks_per_row);
2314
2315 first_column = r5or6_first_row_offset / strip_size;
2316 r5or6_first_column = first_column;
2317 r5or6_last_column = r5or6_last_row_offset / strip_size;
2318#endif
2319 if (r5or6_first_column != r5or6_last_column)
2320 return PQI_RAID_BYPASS_INELIGIBLE;
2321
2322 /* Request is eligible */
2323 map_row =
2324 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2325 get_unaligned_le16(&raid_map->row_cnt);
2326
2327 map_index = (first_group *
2328 (get_unaligned_le16(&raid_map->row_cnt) *
2329 total_disks_per_row)) +
2330 (map_row * total_disks_per_row) + first_column;
2331 }
2332
2333 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2334 return PQI_RAID_BYPASS_INELIGIBLE;
2335
2336 aio_handle = raid_map->disk_data[map_index].aio_handle;
2337 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2338 first_row * strip_size +
2339 (first_row_offset - first_column * strip_size);
2340 disk_block_cnt = block_cnt;
2341
2342 /* Handle differing logical/physical block sizes. */
2343 if (raid_map->phys_blk_shift) {
2344 disk_block <<= raid_map->phys_blk_shift;
2345 disk_block_cnt <<= raid_map->phys_blk_shift;
2346 }
2347
2348 if (unlikely(disk_block_cnt > 0xffff))
2349 return PQI_RAID_BYPASS_INELIGIBLE;
2350
2351 /* Build the new CDB for the physical disk I/O. */
2352 if (disk_block > 0xffffffff) {
2353 cdb[0] = is_write ? WRITE_16 : READ_16;
2354 cdb[1] = 0;
2355 put_unaligned_be64(disk_block, &cdb[2]);
2356 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2357 cdb[14] = 0;
2358 cdb[15] = 0;
2359 cdb_length = 16;
2360 } else {
2361 cdb[0] = is_write ? WRITE_10 : READ_10;
2362 cdb[1] = 0;
2363 put_unaligned_be32((u32)disk_block, &cdb[2]);
2364 cdb[6] = 0;
2365 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2366 cdb[9] = 0;
2367 cdb_length = 10;
2368 }
2369
2370 if (get_unaligned_le16(&raid_map->flags) &
2371 RAID_MAP_ENCRYPTION_ENABLED) {
2372 pqi_set_encryption_info(&encryption_info, raid_map,
2373 first_block);
2374 encryption_info_ptr = &encryption_info;
2375 } else {
2376 encryption_info_ptr = NULL;
2377 }
2378
2379 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
376fb880 2380 cdb, cdb_length, queue_group, encryption_info_ptr, true);
6c223761
KB
2381}
2382
2383#define PQI_STATUS_IDLE 0x0
2384
2385#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2386#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2387
2388#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2389#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2390#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2391#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2392#define PQI_DEVICE_STATE_ERROR 0x4
2393
2394#define PQI_MODE_READY_TIMEOUT_SECS 30
2395#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2396
2397static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2398{
2399 struct pqi_device_registers __iomem *pqi_registers;
2400 unsigned long timeout;
2401 u64 signature;
2402 u8 status;
2403
2404 pqi_registers = ctrl_info->pqi_registers;
2405 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2406
2407 while (1) {
2408 signature = readq(&pqi_registers->signature);
2409 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2410 sizeof(signature)) == 0)
2411 break;
2412 if (time_after(jiffies, timeout)) {
2413 dev_err(&ctrl_info->pci_dev->dev,
2414 "timed out waiting for PQI signature\n");
2415 return -ETIMEDOUT;
2416 }
2417 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2418 }
2419
2420 while (1) {
2421 status = readb(&pqi_registers->function_and_status_code);
2422 if (status == PQI_STATUS_IDLE)
2423 break;
2424 if (time_after(jiffies, timeout)) {
2425 dev_err(&ctrl_info->pci_dev->dev,
2426 "timed out waiting for PQI IDLE\n");
2427 return -ETIMEDOUT;
2428 }
2429 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2430 }
2431
2432 while (1) {
2433 if (readl(&pqi_registers->device_status) ==
2434 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2435 break;
2436 if (time_after(jiffies, timeout)) {
2437 dev_err(&ctrl_info->pci_dev->dev,
2438 "timed out waiting for PQI all registers ready\n");
2439 return -ETIMEDOUT;
2440 }
2441 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2442 }
2443
2444 return 0;
2445}
2446
2447static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2448{
2449 struct pqi_scsi_dev *device;
2450
2451 device = io_request->scmd->device->hostdata;
588a63fe 2452 device->raid_bypass_enabled = false;
376fb880 2453 device->aio_enabled = false;
6c223761
KB
2454}
2455
d87d5474 2456static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
6c223761
KB
2457{
2458 struct pqi_ctrl_info *ctrl_info;
e58081a7 2459 struct pqi_scsi_dev *device;
6c223761 2460
03b288cf
KB
2461 device = sdev->hostdata;
2462 if (device->device_offline)
2463 return;
2464
2465 device->device_offline = true;
2466 scsi_device_set_state(sdev, SDEV_OFFLINE);
2467 ctrl_info = shost_to_hba(sdev->host);
2468 pqi_schedule_rescan_worker(ctrl_info);
2469 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
2470 path, ctrl_info->scsi_host->host_no, device->bus,
2471 device->target, device->lun);
6c223761
KB
2472}
2473
2474static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2475{
2476 u8 scsi_status;
2477 u8 host_byte;
2478 struct scsi_cmnd *scmd;
2479 struct pqi_raid_error_info *error_info;
2480 size_t sense_data_length;
2481 int residual_count;
2482 int xfer_count;
2483 struct scsi_sense_hdr sshdr;
2484
2485 scmd = io_request->scmd;
2486 if (!scmd)
2487 return;
2488
2489 error_info = io_request->error_info;
2490 scsi_status = error_info->status;
2491 host_byte = DID_OK;
2492
f5b63206
KB
2493 switch (error_info->data_out_result) {
2494 case PQI_DATA_IN_OUT_GOOD:
2495 break;
2496 case PQI_DATA_IN_OUT_UNDERFLOW:
6c223761
KB
2497 xfer_count =
2498 get_unaligned_le32(&error_info->data_out_transferred);
2499 residual_count = scsi_bufflen(scmd) - xfer_count;
2500 scsi_set_resid(scmd, residual_count);
2501 if (xfer_count < scmd->underflow)
2502 host_byte = DID_SOFT_ERROR;
f5b63206
KB
2503 break;
2504 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
2505 case PQI_DATA_IN_OUT_ABORTED:
2506 host_byte = DID_ABORT;
2507 break;
2508 case PQI_DATA_IN_OUT_TIMEOUT:
2509 host_byte = DID_TIME_OUT;
2510 break;
2511 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
2512 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
2513 case PQI_DATA_IN_OUT_BUFFER_ERROR:
2514 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
2515 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
2516 case PQI_DATA_IN_OUT_ERROR:
2517 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
2518 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
2519 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
2520 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
2521 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
2522 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
2523 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
2524 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
2525 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
2526 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
2527 default:
2528 host_byte = DID_ERROR;
2529 break;
6c223761
KB
2530 }
2531
2532 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2533 if (sense_data_length == 0)
2534 sense_data_length =
2535 get_unaligned_le16(&error_info->response_data_length);
2536 if (sense_data_length) {
2537 if (sense_data_length > sizeof(error_info->data))
2538 sense_data_length = sizeof(error_info->data);
2539
2540 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2541 scsi_normalize_sense(error_info->data,
2542 sense_data_length, &sshdr) &&
2543 sshdr.sense_key == HARDWARE_ERROR &&
2544 sshdr.asc == 0x3e &&
2545 sshdr.ascq == 0x1) {
d87d5474 2546 pqi_take_device_offline(scmd->device, "RAID");
6c223761
KB
2547 host_byte = DID_NO_CONNECT;
2548 }
2549
2550 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2551 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2552 memcpy(scmd->sense_buffer, error_info->data,
2553 sense_data_length);
2554 }
2555
2556 scmd->result = scsi_status;
2557 set_host_byte(scmd, host_byte);
2558}
2559
2560static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2561{
2562 u8 scsi_status;
2563 u8 host_byte;
2564 struct scsi_cmnd *scmd;
2565 struct pqi_aio_error_info *error_info;
2566 size_t sense_data_length;
2567 int residual_count;
2568 int xfer_count;
2569 bool device_offline;
2570
2571 scmd = io_request->scmd;
2572 error_info = io_request->error_info;
2573 host_byte = DID_OK;
2574 sense_data_length = 0;
2575 device_offline = false;
2576
2577 switch (error_info->service_response) {
2578 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2579 scsi_status = error_info->status;
2580 break;
2581 case PQI_AIO_SERV_RESPONSE_FAILURE:
2582 switch (error_info->status) {
2583 case PQI_AIO_STATUS_IO_ABORTED:
2584 scsi_status = SAM_STAT_TASK_ABORTED;
2585 break;
2586 case PQI_AIO_STATUS_UNDERRUN:
2587 scsi_status = SAM_STAT_GOOD;
2588 residual_count = get_unaligned_le32(
2589 &error_info->residual_count);
2590 scsi_set_resid(scmd, residual_count);
2591 xfer_count = scsi_bufflen(scmd) - residual_count;
2592 if (xfer_count < scmd->underflow)
2593 host_byte = DID_SOFT_ERROR;
2594 break;
2595 case PQI_AIO_STATUS_OVERRUN:
2596 scsi_status = SAM_STAT_GOOD;
2597 break;
2598 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2599 pqi_aio_path_disabled(io_request);
2600 scsi_status = SAM_STAT_GOOD;
2601 io_request->status = -EAGAIN;
2602 break;
2603 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2604 case PQI_AIO_STATUS_INVALID_DEVICE:
376fb880
KB
2605 if (!io_request->raid_bypass) {
2606 device_offline = true;
2607 pqi_take_device_offline(scmd->device, "AIO");
2608 host_byte = DID_NO_CONNECT;
2609 }
6c223761
KB
2610 scsi_status = SAM_STAT_CHECK_CONDITION;
2611 break;
2612 case PQI_AIO_STATUS_IO_ERROR:
2613 default:
2614 scsi_status = SAM_STAT_CHECK_CONDITION;
2615 break;
2616 }
2617 break;
2618 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2619 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2620 scsi_status = SAM_STAT_GOOD;
2621 break;
2622 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2623 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2624 default:
2625 scsi_status = SAM_STAT_CHECK_CONDITION;
2626 break;
2627 }
2628
2629 if (error_info->data_present) {
2630 sense_data_length =
2631 get_unaligned_le16(&error_info->data_length);
2632 if (sense_data_length) {
2633 if (sense_data_length > sizeof(error_info->data))
2634 sense_data_length = sizeof(error_info->data);
2635 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2636 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2637 memcpy(scmd->sense_buffer, error_info->data,
2638 sense_data_length);
2639 }
2640 }
2641
2642 if (device_offline && sense_data_length == 0)
2643 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2644 0x3e, 0x1);
2645
2646 scmd->result = scsi_status;
2647 set_host_byte(scmd, host_byte);
2648}
2649
2650static void pqi_process_io_error(unsigned int iu_type,
2651 struct pqi_io_request *io_request)
2652{
2653 switch (iu_type) {
2654 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2655 pqi_process_raid_io_error(io_request);
2656 break;
2657 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2658 pqi_process_aio_io_error(io_request);
2659 break;
2660 }
2661}
2662
2663static int pqi_interpret_task_management_response(
2664 struct pqi_task_management_response *response)
2665{
2666 int rc;
2667
2668 switch (response->response_code) {
b17f0486
KB
2669 case SOP_TMF_COMPLETE:
2670 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
2671 rc = 0;
2672 break;
2673 default:
2674 rc = -EIO;
2675 break;
2676 }
2677
2678 return rc;
2679}
2680
2681static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2682 struct pqi_queue_group *queue_group)
2683{
2684 unsigned int num_responses;
2685 pqi_index_t oq_pi;
2686 pqi_index_t oq_ci;
2687 struct pqi_io_request *io_request;
2688 struct pqi_io_response *response;
2689 u16 request_id;
2690
2691 num_responses = 0;
2692 oq_ci = queue_group->oq_ci_copy;
2693
2694 while (1) {
2695 oq_pi = *queue_group->oq_pi;
2696 if (oq_pi == oq_ci)
2697 break;
2698
2699 num_responses++;
2700 response = queue_group->oq_element_array +
2701 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2702
2703 request_id = get_unaligned_le16(&response->request_id);
2704 WARN_ON(request_id >= ctrl_info->max_io_slots);
2705
2706 io_request = &ctrl_info->io_request_pool[request_id];
2707 WARN_ON(atomic_read(&io_request->refcount) == 0);
2708
2709 switch (response->header.iu_type) {
2710 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2711 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2712 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2713 break;
2714 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2715 io_request->status =
2716 pqi_interpret_task_management_response(
2717 (void *)response);
2718 break;
2719 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2720 pqi_aio_path_disabled(io_request);
2721 io_request->status = -EAGAIN;
2722 break;
2723 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2724 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2725 io_request->error_info = ctrl_info->error_buffer +
2726 (get_unaligned_le16(&response->error_index) *
2727 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2728 pqi_process_io_error(response->header.iu_type,
2729 io_request);
2730 break;
2731 default:
2732 dev_err(&ctrl_info->pci_dev->dev,
2733 "unexpected IU type: 0x%x\n",
2734 response->header.iu_type);
6c223761
KB
2735 break;
2736 }
2737
2738 io_request->io_complete_callback(io_request,
2739 io_request->context);
2740
2741 /*
2742 * Note that the I/O request structure CANNOT BE TOUCHED after
2743 * returning from the I/O completion callback!
2744 */
2745
2746 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2747 }
2748
2749 if (num_responses) {
2750 queue_group->oq_ci_copy = oq_ci;
2751 writel(oq_ci, queue_group->oq_ci);
2752 }
2753
2754 return num_responses;
2755}
2756
2757static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 2758 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
2759{
2760 unsigned int num_elements_used;
2761
2762 if (pi >= ci)
2763 num_elements_used = pi - ci;
2764 else
2765 num_elements_used = elements_in_queue - ci + pi;
2766
2767 return elements_in_queue - num_elements_used - 1;
2768}
2769
98f87667 2770static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
2771 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2772{
2773 pqi_index_t iq_pi;
2774 pqi_index_t iq_ci;
2775 unsigned long flags;
2776 void *next_element;
6c223761
KB
2777 struct pqi_queue_group *queue_group;
2778
2779 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2780 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2781
6c223761
KB
2782 while (1) {
2783 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2784
2785 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2786 iq_ci = *queue_group->iq_ci[RAID_PATH];
2787
2788 if (pqi_num_elements_free(iq_pi, iq_ci,
2789 ctrl_info->num_elements_per_iq))
2790 break;
2791
2792 spin_unlock_irqrestore(
2793 &queue_group->submit_lock[RAID_PATH], flags);
2794
98f87667 2795 if (pqi_ctrl_offline(ctrl_info))
6c223761 2796 return;
6c223761
KB
2797 }
2798
2799 next_element = queue_group->iq_element_array[RAID_PATH] +
2800 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2801
2802 memcpy(next_element, iu, iu_length);
2803
2804 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
6c223761
KB
2805 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2806
2807 /*
2808 * This write notifies the controller that an IU is available to be
2809 * processed.
2810 */
2811 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2812
2813 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
2814}
2815
2816static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2817 struct pqi_event *event)
2818{
2819 struct pqi_event_acknowledge_request request;
2820
2821 memset(&request, 0, sizeof(request));
2822
2823 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2824 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2825 &request.header.iu_length);
2826 request.event_type = event->event_type;
2827 request.event_id = event->event_id;
2828 request.additional_event_id = event->additional_event_id;
2829
98f87667 2830 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
6c223761
KB
2831}
2832
2833static void pqi_event_worker(struct work_struct *work)
2834{
2835 unsigned int i;
2836 struct pqi_ctrl_info *ctrl_info;
6a50d6ad 2837 struct pqi_event *event;
6c223761
KB
2838
2839 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2840
7561a7e4
KB
2841 pqi_ctrl_busy(ctrl_info);
2842 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
5f310425
KB
2843 if (pqi_ctrl_offline(ctrl_info))
2844 goto out;
2845
2846 pqi_schedule_rescan_worker_delayed(ctrl_info);
7561a7e4 2847
6a50d6ad 2848 event = ctrl_info->events;
6c223761 2849 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
6a50d6ad
KB
2850 if (event->pending) {
2851 event->pending = false;
2852 pqi_acknowledge_event(ctrl_info, event);
6c223761 2853 }
6a50d6ad 2854 event++;
6c223761
KB
2855 }
2856
5f310425 2857out:
7561a7e4 2858 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
2859}
2860
98f87667 2861#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
6c223761
KB
2862
2863static void pqi_heartbeat_timer_handler(unsigned long data)
2864{
2865 int num_interrupts;
98f87667 2866 u32 heartbeat_count;
6c223761
KB
2867 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2868
98f87667
KB
2869 pqi_check_ctrl_health(ctrl_info);
2870 if (pqi_ctrl_offline(ctrl_info))
061ef06a
KB
2871 return;
2872
6c223761 2873 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
98f87667 2874 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
6c223761
KB
2875
2876 if (num_interrupts == ctrl_info->previous_num_interrupts) {
98f87667
KB
2877 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
2878 dev_err(&ctrl_info->pci_dev->dev,
2879 "no heartbeat detected - last heartbeat count: %u\n",
2880 heartbeat_count);
6c223761
KB
2881 pqi_take_ctrl_offline(ctrl_info);
2882 return;
2883 }
6c223761 2884 } else {
98f87667 2885 ctrl_info->previous_num_interrupts = num_interrupts;
6c223761
KB
2886 }
2887
98f87667 2888 ctrl_info->previous_heartbeat_count = heartbeat_count;
6c223761
KB
2889 mod_timer(&ctrl_info->heartbeat_timer,
2890 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2891}
2892
2893static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2894{
98f87667
KB
2895 if (!ctrl_info->heartbeat_counter)
2896 return;
2897
6c223761
KB
2898 ctrl_info->previous_num_interrupts =
2899 atomic_read(&ctrl_info->num_interrupts);
98f87667
KB
2900 ctrl_info->previous_heartbeat_count =
2901 pqi_read_heartbeat_counter(ctrl_info);
6c223761 2902
6c223761
KB
2903 ctrl_info->heartbeat_timer.expires =
2904 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2905 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2906 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
061ef06a 2907 add_timer(&ctrl_info->heartbeat_timer);
6c223761
KB
2908}
2909
2910static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2911{
98f87667 2912 del_timer_sync(&ctrl_info->heartbeat_timer);
6c223761
KB
2913}
2914
6a50d6ad 2915static inline int pqi_event_type_to_event_index(unsigned int event_type)
6c223761
KB
2916{
2917 int index;
2918
6a50d6ad
KB
2919 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2920 if (event_type == pqi_supported_event_types[index])
2921 return index;
6c223761 2922
6a50d6ad
KB
2923 return -1;
2924}
2925
2926static inline bool pqi_is_supported_event(unsigned int event_type)
2927{
2928 return pqi_event_type_to_event_index(event_type) != -1;
6c223761
KB
2929}
2930
2931static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2932{
2933 unsigned int num_events;
2934 pqi_index_t oq_pi;
2935 pqi_index_t oq_ci;
2936 struct pqi_event_queue *event_queue;
2937 struct pqi_event_response *response;
6a50d6ad 2938 struct pqi_event *event;
6c223761
KB
2939 int event_index;
2940
2941 event_queue = &ctrl_info->event_queue;
2942 num_events = 0;
6c223761
KB
2943 oq_ci = event_queue->oq_ci_copy;
2944
2945 while (1) {
2946 oq_pi = *event_queue->oq_pi;
2947 if (oq_pi == oq_ci)
2948 break;
2949
2950 num_events++;
2951 response = event_queue->oq_element_array +
2952 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2953
2954 event_index =
2955 pqi_event_type_to_event_index(response->event_type);
2956
2957 if (event_index >= 0) {
2958 if (response->request_acknowlege) {
6a50d6ad
KB
2959 event = &ctrl_info->events[event_index];
2960 event->pending = true;
2961 event->event_type = response->event_type;
2962 event->event_id = response->event_id;
2963 event->additional_event_id =
6c223761 2964 response->additional_event_id;
6c223761
KB
2965 }
2966 }
2967
2968 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2969 }
2970
2971 if (num_events) {
2972 event_queue->oq_ci_copy = oq_ci;
2973 writel(oq_ci, event_queue->oq_ci);
98f87667 2974 schedule_work(&ctrl_info->event_work);
6c223761
KB
2975 }
2976
2977 return num_events;
2978}
2979
061ef06a
KB
2980#define PQI_LEGACY_INTX_MASK 0x1
2981
2982static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2983 bool enable_intx)
2984{
2985 u32 intx_mask;
2986 struct pqi_device_registers __iomem *pqi_registers;
2987 volatile void __iomem *register_addr;
2988
2989 pqi_registers = ctrl_info->pqi_registers;
2990
2991 if (enable_intx)
2992 register_addr = &pqi_registers->legacy_intx_mask_clear;
2993 else
2994 register_addr = &pqi_registers->legacy_intx_mask_set;
2995
2996 intx_mask = readl(register_addr);
2997 intx_mask |= PQI_LEGACY_INTX_MASK;
2998 writel(intx_mask, register_addr);
2999}
3000
3001static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3002 enum pqi_irq_mode new_mode)
3003{
3004 switch (ctrl_info->irq_mode) {
3005 case IRQ_MODE_MSIX:
3006 switch (new_mode) {
3007 case IRQ_MODE_MSIX:
3008 break;
3009 case IRQ_MODE_INTX:
3010 pqi_configure_legacy_intx(ctrl_info, true);
3011 sis_disable_msix(ctrl_info);
3012 sis_enable_intx(ctrl_info);
3013 break;
3014 case IRQ_MODE_NONE:
3015 sis_disable_msix(ctrl_info);
3016 break;
3017 }
3018 break;
3019 case IRQ_MODE_INTX:
3020 switch (new_mode) {
3021 case IRQ_MODE_MSIX:
3022 pqi_configure_legacy_intx(ctrl_info, false);
3023 sis_disable_intx(ctrl_info);
3024 sis_enable_msix(ctrl_info);
3025 break;
3026 case IRQ_MODE_INTX:
3027 break;
3028 case IRQ_MODE_NONE:
3029 pqi_configure_legacy_intx(ctrl_info, false);
3030 sis_disable_intx(ctrl_info);
3031 break;
3032 }
3033 break;
3034 case IRQ_MODE_NONE:
3035 switch (new_mode) {
3036 case IRQ_MODE_MSIX:
3037 sis_enable_msix(ctrl_info);
3038 break;
3039 case IRQ_MODE_INTX:
3040 pqi_configure_legacy_intx(ctrl_info, true);
3041 sis_enable_intx(ctrl_info);
3042 break;
3043 case IRQ_MODE_NONE:
3044 break;
3045 }
3046 break;
3047 }
3048
3049 ctrl_info->irq_mode = new_mode;
3050}
3051
3052#define PQI_LEGACY_INTX_PENDING 0x1
3053
3054static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3055{
3056 bool valid_irq;
3057 u32 intx_status;
3058
3059 switch (ctrl_info->irq_mode) {
3060 case IRQ_MODE_MSIX:
3061 valid_irq = true;
3062 break;
3063 case IRQ_MODE_INTX:
3064 intx_status =
3065 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3066 if (intx_status & PQI_LEGACY_INTX_PENDING)
3067 valid_irq = true;
3068 else
3069 valid_irq = false;
3070 break;
3071 case IRQ_MODE_NONE:
3072 default:
3073 valid_irq = false;
3074 break;
3075 }
3076
3077 return valid_irq;
3078}
3079
6c223761
KB
3080static irqreturn_t pqi_irq_handler(int irq, void *data)
3081{
3082 struct pqi_ctrl_info *ctrl_info;
3083 struct pqi_queue_group *queue_group;
3084 unsigned int num_responses_handled;
3085
3086 queue_group = data;
3087 ctrl_info = queue_group->ctrl_info;
3088
061ef06a 3089 if (!pqi_is_valid_irq(ctrl_info))
6c223761
KB
3090 return IRQ_NONE;
3091
3092 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3093
3094 if (irq == ctrl_info->event_irq)
3095 num_responses_handled += pqi_process_event_intr(ctrl_info);
3096
3097 if (num_responses_handled)
3098 atomic_inc(&ctrl_info->num_interrupts);
3099
3100 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3101 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3102
3103 return IRQ_HANDLED;
3104}
3105
3106static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3107{
d91d7820 3108 struct pci_dev *pci_dev = ctrl_info->pci_dev;
6c223761
KB
3109 int i;
3110 int rc;
3111
d91d7820 3112 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
6c223761
KB
3113
3114 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
d91d7820 3115 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
52198226 3116 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 3117 if (rc) {
d91d7820 3118 dev_err(&pci_dev->dev,
6c223761 3119 "irq %u init failed with error %d\n",
d91d7820 3120 pci_irq_vector(pci_dev, i), rc);
6c223761
KB
3121 return rc;
3122 }
3123 ctrl_info->num_msix_vectors_initialized++;
3124 }
3125
3126 return 0;
3127}
3128
98bf061b
KB
3129static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3130{
3131 int i;
3132
3133 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3134 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3135 &ctrl_info->queue_groups[i]);
3136
3137 ctrl_info->num_msix_vectors_initialized = 0;
3138}
3139
6c223761
KB
3140static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3141{
98bf061b 3142 int num_vectors_enabled;
6c223761 3143
98bf061b 3144 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
52198226
CH
3145 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3146 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
98bf061b 3147 if (num_vectors_enabled < 0) {
6c223761 3148 dev_err(&ctrl_info->pci_dev->dev,
98bf061b
KB
3149 "MSI-X init failed with error %d\n",
3150 num_vectors_enabled);
3151 return num_vectors_enabled;
6c223761
KB
3152 }
3153
98bf061b 3154 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
061ef06a 3155 ctrl_info->irq_mode = IRQ_MODE_MSIX;
6c223761
KB
3156 return 0;
3157}
3158
98bf061b
KB
3159static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3160{
3161 if (ctrl_info->num_msix_vectors_enabled) {
3162 pci_free_irq_vectors(ctrl_info->pci_dev);
3163 ctrl_info->num_msix_vectors_enabled = 0;
3164 }
3165}
3166
6c223761
KB
3167static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3168{
3169 unsigned int i;
3170 size_t alloc_length;
3171 size_t element_array_length_per_iq;
3172 size_t element_array_length_per_oq;
3173 void *element_array;
3174 void *next_queue_index;
3175 void *aligned_pointer;
3176 unsigned int num_inbound_queues;
3177 unsigned int num_outbound_queues;
3178 unsigned int num_queue_indexes;
3179 struct pqi_queue_group *queue_group;
3180
3181 element_array_length_per_iq =
3182 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3183 ctrl_info->num_elements_per_iq;
3184 element_array_length_per_oq =
3185 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3186 ctrl_info->num_elements_per_oq;
3187 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3188 num_outbound_queues = ctrl_info->num_queue_groups;
3189 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3190
3191 aligned_pointer = NULL;
3192
3193 for (i = 0; i < num_inbound_queues; i++) {
3194 aligned_pointer = PTR_ALIGN(aligned_pointer,
3195 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3196 aligned_pointer += element_array_length_per_iq;
3197 }
3198
3199 for (i = 0; i < num_outbound_queues; i++) {
3200 aligned_pointer = PTR_ALIGN(aligned_pointer,
3201 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3202 aligned_pointer += element_array_length_per_oq;
3203 }
3204
3205 aligned_pointer = PTR_ALIGN(aligned_pointer,
3206 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3207 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3208 PQI_EVENT_OQ_ELEMENT_LENGTH;
3209
3210 for (i = 0; i < num_queue_indexes; i++) {
3211 aligned_pointer = PTR_ALIGN(aligned_pointer,
3212 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3213 aligned_pointer += sizeof(pqi_index_t);
3214 }
3215
3216 alloc_length = (size_t)aligned_pointer +
3217 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3218
e1d213bd
KB
3219 alloc_length += PQI_EXTRA_SGL_MEMORY;
3220
6c223761
KB
3221 ctrl_info->queue_memory_base =
3222 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3223 alloc_length,
3224 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3225
d87d5474 3226 if (!ctrl_info->queue_memory_base)
6c223761 3227 return -ENOMEM;
6c223761
KB
3228
3229 ctrl_info->queue_memory_length = alloc_length;
3230
3231 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3232 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3233
3234 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3235 queue_group = &ctrl_info->queue_groups[i];
3236 queue_group->iq_element_array[RAID_PATH] = element_array;
3237 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3238 ctrl_info->queue_memory_base_dma_handle +
3239 (element_array - ctrl_info->queue_memory_base);
3240 element_array += element_array_length_per_iq;
3241 element_array = PTR_ALIGN(element_array,
3242 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3243 queue_group->iq_element_array[AIO_PATH] = element_array;
3244 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3245 ctrl_info->queue_memory_base_dma_handle +
3246 (element_array - ctrl_info->queue_memory_base);
3247 element_array += element_array_length_per_iq;
3248 element_array = PTR_ALIGN(element_array,
3249 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3250 }
3251
3252 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3253 queue_group = &ctrl_info->queue_groups[i];
3254 queue_group->oq_element_array = element_array;
3255 queue_group->oq_element_array_bus_addr =
3256 ctrl_info->queue_memory_base_dma_handle +
3257 (element_array - ctrl_info->queue_memory_base);
3258 element_array += element_array_length_per_oq;
3259 element_array = PTR_ALIGN(element_array,
3260 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3261 }
3262
3263 ctrl_info->event_queue.oq_element_array = element_array;
3264 ctrl_info->event_queue.oq_element_array_bus_addr =
3265 ctrl_info->queue_memory_base_dma_handle +
3266 (element_array - ctrl_info->queue_memory_base);
3267 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3268 PQI_EVENT_OQ_ELEMENT_LENGTH;
3269
3270 next_queue_index = PTR_ALIGN(element_array,
3271 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3272
3273 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3274 queue_group = &ctrl_info->queue_groups[i];
3275 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3276 queue_group->iq_ci_bus_addr[RAID_PATH] =
3277 ctrl_info->queue_memory_base_dma_handle +
3278 (next_queue_index - ctrl_info->queue_memory_base);
3279 next_queue_index += sizeof(pqi_index_t);
3280 next_queue_index = PTR_ALIGN(next_queue_index,
3281 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3282 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3283 queue_group->iq_ci_bus_addr[AIO_PATH] =
3284 ctrl_info->queue_memory_base_dma_handle +
3285 (next_queue_index - ctrl_info->queue_memory_base);
3286 next_queue_index += sizeof(pqi_index_t);
3287 next_queue_index = PTR_ALIGN(next_queue_index,
3288 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3289 queue_group->oq_pi = next_queue_index;
3290 queue_group->oq_pi_bus_addr =
3291 ctrl_info->queue_memory_base_dma_handle +
3292 (next_queue_index - ctrl_info->queue_memory_base);
3293 next_queue_index += sizeof(pqi_index_t);
3294 next_queue_index = PTR_ALIGN(next_queue_index,
3295 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3296 }
3297
3298 ctrl_info->event_queue.oq_pi = next_queue_index;
3299 ctrl_info->event_queue.oq_pi_bus_addr =
3300 ctrl_info->queue_memory_base_dma_handle +
3301 (next_queue_index - ctrl_info->queue_memory_base);
3302
3303 return 0;
3304}
3305
3306static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3307{
3308 unsigned int i;
3309 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3310 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3311
3312 /*
3313 * Initialize the backpointers to the controller structure in
3314 * each operational queue group structure.
3315 */
3316 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3317 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3318
3319 /*
3320 * Assign IDs to all operational queues. Note that the IDs
3321 * assigned to operational IQs are independent of the IDs
3322 * assigned to operational OQs.
3323 */
3324 ctrl_info->event_queue.oq_id = next_oq_id++;
3325 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3326 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3327 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3328 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3329 }
3330
3331 /*
3332 * Assign MSI-X table entry indexes to all queues. Note that the
3333 * interrupt for the event queue is shared with the first queue group.
3334 */
3335 ctrl_info->event_queue.int_msg_num = 0;
3336 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3337 ctrl_info->queue_groups[i].int_msg_num = i;
3338
3339 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3340 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3341 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3342 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3343 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3344 }
3345}
3346
3347static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3348{
3349 size_t alloc_length;
3350 struct pqi_admin_queues_aligned *admin_queues_aligned;
3351 struct pqi_admin_queues *admin_queues;
3352
3353 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3354 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3355
3356 ctrl_info->admin_queue_memory_base =
3357 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3358 alloc_length,
3359 &ctrl_info->admin_queue_memory_base_dma_handle,
3360 GFP_KERNEL);
3361
3362 if (!ctrl_info->admin_queue_memory_base)
3363 return -ENOMEM;
3364
3365 ctrl_info->admin_queue_memory_length = alloc_length;
3366
3367 admin_queues = &ctrl_info->admin_queues;
3368 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3369 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3370 admin_queues->iq_element_array =
3371 &admin_queues_aligned->iq_element_array;
3372 admin_queues->oq_element_array =
3373 &admin_queues_aligned->oq_element_array;
3374 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3375 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3376
3377 admin_queues->iq_element_array_bus_addr =
3378 ctrl_info->admin_queue_memory_base_dma_handle +
3379 (admin_queues->iq_element_array -
3380 ctrl_info->admin_queue_memory_base);
3381 admin_queues->oq_element_array_bus_addr =
3382 ctrl_info->admin_queue_memory_base_dma_handle +
3383 (admin_queues->oq_element_array -
3384 ctrl_info->admin_queue_memory_base);
3385 admin_queues->iq_ci_bus_addr =
3386 ctrl_info->admin_queue_memory_base_dma_handle +
3387 ((void *)admin_queues->iq_ci -
3388 ctrl_info->admin_queue_memory_base);
3389 admin_queues->oq_pi_bus_addr =
3390 ctrl_info->admin_queue_memory_base_dma_handle +
3391 ((void *)admin_queues->oq_pi -
3392 ctrl_info->admin_queue_memory_base);
3393
3394 return 0;
3395}
3396
3397#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3398#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3399
3400static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3401{
3402 struct pqi_device_registers __iomem *pqi_registers;
3403 struct pqi_admin_queues *admin_queues;
3404 unsigned long timeout;
3405 u8 status;
3406 u32 reg;
3407
3408 pqi_registers = ctrl_info->pqi_registers;
3409 admin_queues = &ctrl_info->admin_queues;
3410
3411 writeq((u64)admin_queues->iq_element_array_bus_addr,
3412 &pqi_registers->admin_iq_element_array_addr);
3413 writeq((u64)admin_queues->oq_element_array_bus_addr,
3414 &pqi_registers->admin_oq_element_array_addr);
3415 writeq((u64)admin_queues->iq_ci_bus_addr,
3416 &pqi_registers->admin_iq_ci_addr);
3417 writeq((u64)admin_queues->oq_pi_bus_addr,
3418 &pqi_registers->admin_oq_pi_addr);
3419
3420 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3421 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3422 (admin_queues->int_msg_num << 16);
3423 writel(reg, &pqi_registers->admin_iq_num_elements);
3424 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3425 &pqi_registers->function_and_status_code);
3426
3427 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3428 while (1) {
3429 status = readb(&pqi_registers->function_and_status_code);
3430 if (status == PQI_STATUS_IDLE)
3431 break;
3432 if (time_after(jiffies, timeout))
3433 return -ETIMEDOUT;
3434 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3435 }
3436
3437 /*
3438 * The offset registers are not initialized to the correct
3439 * offsets until *after* the create admin queue pair command
3440 * completes successfully.
3441 */
3442 admin_queues->iq_pi = ctrl_info->iomem_base +
3443 PQI_DEVICE_REGISTERS_OFFSET +
3444 readq(&pqi_registers->admin_iq_pi_offset);
3445 admin_queues->oq_ci = ctrl_info->iomem_base +
3446 PQI_DEVICE_REGISTERS_OFFSET +
3447 readq(&pqi_registers->admin_oq_ci_offset);
3448
3449 return 0;
3450}
3451
3452static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3453 struct pqi_general_admin_request *request)
3454{
3455 struct pqi_admin_queues *admin_queues;
3456 void *next_element;
3457 pqi_index_t iq_pi;
3458
3459 admin_queues = &ctrl_info->admin_queues;
3460 iq_pi = admin_queues->iq_pi_copy;
3461
3462 next_element = admin_queues->iq_element_array +
3463 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3464
3465 memcpy(next_element, request, sizeof(*request));
3466
3467 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3468 admin_queues->iq_pi_copy = iq_pi;
3469
3470 /*
3471 * This write notifies the controller that an IU is available to be
3472 * processed.
3473 */
3474 writel(iq_pi, admin_queues->iq_pi);
3475}
3476
13bede67
KB
3477#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3478
6c223761
KB
3479static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3480 struct pqi_general_admin_response *response)
3481{
3482 struct pqi_admin_queues *admin_queues;
3483 pqi_index_t oq_pi;
3484 pqi_index_t oq_ci;
3485 unsigned long timeout;
3486
3487 admin_queues = &ctrl_info->admin_queues;
3488 oq_ci = admin_queues->oq_ci_copy;
3489
13bede67 3490 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
6c223761
KB
3491
3492 while (1) {
3493 oq_pi = *admin_queues->oq_pi;
3494 if (oq_pi != oq_ci)
3495 break;
3496 if (time_after(jiffies, timeout)) {
3497 dev_err(&ctrl_info->pci_dev->dev,
3498 "timed out waiting for admin response\n");
3499 return -ETIMEDOUT;
3500 }
13bede67
KB
3501 if (!sis_is_firmware_running(ctrl_info))
3502 return -ENXIO;
6c223761
KB
3503 usleep_range(1000, 2000);
3504 }
3505
3506 memcpy(response, admin_queues->oq_element_array +
3507 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3508
3509 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3510 admin_queues->oq_ci_copy = oq_ci;
3511 writel(oq_ci, admin_queues->oq_ci);
3512
3513 return 0;
3514}
3515
3516static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3517 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3518 struct pqi_io_request *io_request)
3519{
3520 struct pqi_io_request *next;
3521 void *next_element;
3522 pqi_index_t iq_pi;
3523 pqi_index_t iq_ci;
3524 size_t iu_length;
3525 unsigned long flags;
3526 unsigned int num_elements_needed;
3527 unsigned int num_elements_to_end_of_queue;
3528 size_t copy_count;
3529 struct pqi_iu_header *request;
3530
3531 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3532
376fb880
KB
3533 if (io_request) {
3534 io_request->queue_group = queue_group;
6c223761
KB
3535 list_add_tail(&io_request->request_list_entry,
3536 &queue_group->request_list[path]);
376fb880 3537 }
6c223761
KB
3538
3539 iq_pi = queue_group->iq_pi_copy[path];
3540
3541 list_for_each_entry_safe(io_request, next,
3542 &queue_group->request_list[path], request_list_entry) {
3543
3544 request = io_request->iu;
3545
3546 iu_length = get_unaligned_le16(&request->iu_length) +
3547 PQI_REQUEST_HEADER_LENGTH;
3548 num_elements_needed =
3549 DIV_ROUND_UP(iu_length,
3550 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3551
3552 iq_ci = *queue_group->iq_ci[path];
3553
3554 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3555 ctrl_info->num_elements_per_iq))
3556 break;
3557
3558 put_unaligned_le16(queue_group->oq_id,
3559 &request->response_queue_id);
3560
3561 next_element = queue_group->iq_element_array[path] +
3562 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3563
3564 num_elements_to_end_of_queue =
3565 ctrl_info->num_elements_per_iq - iq_pi;
3566
3567 if (num_elements_needed <= num_elements_to_end_of_queue) {
3568 memcpy(next_element, request, iu_length);
3569 } else {
3570 copy_count = num_elements_to_end_of_queue *
3571 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3572 memcpy(next_element, request, copy_count);
3573 memcpy(queue_group->iq_element_array[path],
3574 (u8 *)request + copy_count,
3575 iu_length - copy_count);
3576 }
3577
3578 iq_pi = (iq_pi + num_elements_needed) %
3579 ctrl_info->num_elements_per_iq;
3580
3581 list_del(&io_request->request_list_entry);
3582 }
3583
3584 if (iq_pi != queue_group->iq_pi_copy[path]) {
3585 queue_group->iq_pi_copy[path] = iq_pi;
3586 /*
3587 * This write notifies the controller that one or more IUs are
3588 * available to be processed.
3589 */
3590 writel(iq_pi, queue_group->iq_pi[path]);
3591 }
3592
3593 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3594}
3595
1f37e992
KB
3596#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3597
3598static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3599 struct completion *wait)
3600{
3601 int rc;
1f37e992
KB
3602
3603 while (1) {
3604 if (wait_for_completion_io_timeout(wait,
3605 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3606 rc = 0;
3607 break;
3608 }
3609
3610 pqi_check_ctrl_health(ctrl_info);
3611 if (pqi_ctrl_offline(ctrl_info)) {
3612 rc = -ENXIO;
3613 break;
3614 }
1f37e992
KB
3615 }
3616
3617 return rc;
3618}
3619
6c223761
KB
3620static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3621 void *context)
3622{
3623 struct completion *waiting = context;
3624
3625 complete(waiting);
3626}
3627
3628static int pqi_submit_raid_request_synchronous_with_io_request(
3629 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3630 unsigned long timeout_msecs)
3631{
3632 int rc = 0;
3633 DECLARE_COMPLETION_ONSTACK(wait);
3634
3635 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3636 io_request->context = &wait;
3637
3638 pqi_start_io(ctrl_info,
3639 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3640 io_request);
3641
3642 if (timeout_msecs == NO_TIMEOUT) {
1f37e992 3643 pqi_wait_for_completion_io(ctrl_info, &wait);
6c223761
KB
3644 } else {
3645 if (!wait_for_completion_io_timeout(&wait,
3646 msecs_to_jiffies(timeout_msecs))) {
3647 dev_warn(&ctrl_info->pci_dev->dev,
3648 "command timed out\n");
3649 rc = -ETIMEDOUT;
3650 }
3651 }
3652
3653 return rc;
3654}
3655
3656static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3657 struct pqi_iu_header *request, unsigned int flags,
3658 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3659{
3660 int rc;
3661 struct pqi_io_request *io_request;
3662 unsigned long start_jiffies;
3663 unsigned long msecs_blocked;
3664 size_t iu_length;
3665
3666 /*
3667 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3668 * are mutually exclusive.
3669 */
3670
3671 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3672 if (down_interruptible(&ctrl_info->sync_request_sem))
3673 return -ERESTARTSYS;
3674 } else {
3675 if (timeout_msecs == NO_TIMEOUT) {
3676 down(&ctrl_info->sync_request_sem);
3677 } else {
3678 start_jiffies = jiffies;
3679 if (down_timeout(&ctrl_info->sync_request_sem,
3680 msecs_to_jiffies(timeout_msecs)))
3681 return -ETIMEDOUT;
3682 msecs_blocked =
3683 jiffies_to_msecs(jiffies - start_jiffies);
3684 if (msecs_blocked >= timeout_msecs)
3685 return -ETIMEDOUT;
3686 timeout_msecs -= msecs_blocked;
3687 }
3688 }
3689
7561a7e4
KB
3690 pqi_ctrl_busy(ctrl_info);
3691 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3692 if (timeout_msecs == 0) {
3693 rc = -ETIMEDOUT;
3694 goto out;
3695 }
3696
376fb880
KB
3697 if (pqi_ctrl_offline(ctrl_info)) {
3698 rc = -ENXIO;
3699 goto out;
3700 }
3701
6c223761
KB
3702 io_request = pqi_alloc_io_request(ctrl_info);
3703
3704 put_unaligned_le16(io_request->index,
3705 &(((struct pqi_raid_path_request *)request)->request_id));
3706
3707 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3708 ((struct pqi_raid_path_request *)request)->error_index =
3709 ((struct pqi_raid_path_request *)request)->request_id;
3710
3711 iu_length = get_unaligned_le16(&request->iu_length) +
3712 PQI_REQUEST_HEADER_LENGTH;
3713 memcpy(io_request->iu, request, iu_length);
3714
3715 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3716 io_request, timeout_msecs);
3717
3718 if (error_info) {
3719 if (io_request->error_info)
3720 memcpy(error_info, io_request->error_info,
3721 sizeof(*error_info));
3722 else
3723 memset(error_info, 0, sizeof(*error_info));
3724 } else if (rc == 0 && io_request->error_info) {
3725 u8 scsi_status;
3726 struct pqi_raid_error_info *raid_error_info;
3727
3728 raid_error_info = io_request->error_info;
3729 scsi_status = raid_error_info->status;
3730
3731 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3732 raid_error_info->data_out_result ==
3733 PQI_DATA_IN_OUT_UNDERFLOW)
3734 scsi_status = SAM_STAT_GOOD;
3735
3736 if (scsi_status != SAM_STAT_GOOD)
3737 rc = -EIO;
3738 }
3739
3740 pqi_free_io_request(io_request);
3741
7561a7e4
KB
3742out:
3743 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
3744 up(&ctrl_info->sync_request_sem);
3745
3746 return rc;
3747}
3748
3749static int pqi_validate_admin_response(
3750 struct pqi_general_admin_response *response, u8 expected_function_code)
3751{
3752 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3753 return -EINVAL;
3754
3755 if (get_unaligned_le16(&response->header.iu_length) !=
3756 PQI_GENERAL_ADMIN_IU_LENGTH)
3757 return -EINVAL;
3758
3759 if (response->function_code != expected_function_code)
3760 return -EINVAL;
3761
3762 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3763 return -EINVAL;
3764
3765 return 0;
3766}
3767
3768static int pqi_submit_admin_request_synchronous(
3769 struct pqi_ctrl_info *ctrl_info,
3770 struct pqi_general_admin_request *request,
3771 struct pqi_general_admin_response *response)
3772{
3773 int rc;
3774
3775 pqi_submit_admin_request(ctrl_info, request);
3776
3777 rc = pqi_poll_for_admin_response(ctrl_info, response);
3778
3779 if (rc == 0)
3780 rc = pqi_validate_admin_response(response,
3781 request->function_code);
3782
3783 return rc;
3784}
3785
3786static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3787{
3788 int rc;
3789 struct pqi_general_admin_request request;
3790 struct pqi_general_admin_response response;
3791 struct pqi_device_capability *capability;
3792 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3793
3794 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3795 if (!capability)
3796 return -ENOMEM;
3797
3798 memset(&request, 0, sizeof(request));
3799
3800 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3801 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3802 &request.header.iu_length);
3803 request.function_code =
3804 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3805 put_unaligned_le32(sizeof(*capability),
3806 &request.data.report_device_capability.buffer_length);
3807
3808 rc = pqi_map_single(ctrl_info->pci_dev,
3809 &request.data.report_device_capability.sg_descriptor,
3810 capability, sizeof(*capability),
3811 PCI_DMA_FROMDEVICE);
3812 if (rc)
3813 goto out;
3814
3815 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3816 &response);
3817
3818 pqi_pci_unmap(ctrl_info->pci_dev,
3819 &request.data.report_device_capability.sg_descriptor, 1,
3820 PCI_DMA_FROMDEVICE);
3821
3822 if (rc)
3823 goto out;
3824
3825 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3826 rc = -EIO;
3827 goto out;
3828 }
3829
3830 ctrl_info->max_inbound_queues =
3831 get_unaligned_le16(&capability->max_inbound_queues);
3832 ctrl_info->max_elements_per_iq =
3833 get_unaligned_le16(&capability->max_elements_per_iq);
3834 ctrl_info->max_iq_element_length =
3835 get_unaligned_le16(&capability->max_iq_element_length)
3836 * 16;
3837 ctrl_info->max_outbound_queues =
3838 get_unaligned_le16(&capability->max_outbound_queues);
3839 ctrl_info->max_elements_per_oq =
3840 get_unaligned_le16(&capability->max_elements_per_oq);
3841 ctrl_info->max_oq_element_length =
3842 get_unaligned_le16(&capability->max_oq_element_length)
3843 * 16;
3844
3845 sop_iu_layer_descriptor =
3846 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3847
3848 ctrl_info->max_inbound_iu_length_per_firmware =
3849 get_unaligned_le16(
3850 &sop_iu_layer_descriptor->max_inbound_iu_length);
3851 ctrl_info->inbound_spanning_supported =
3852 sop_iu_layer_descriptor->inbound_spanning_supported;
3853 ctrl_info->outbound_spanning_supported =
3854 sop_iu_layer_descriptor->outbound_spanning_supported;
3855
3856out:
3857 kfree(capability);
3858
3859 return rc;
3860}
3861
3862static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3863{
3864 if (ctrl_info->max_iq_element_length <
3865 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3866 dev_err(&ctrl_info->pci_dev->dev,
3867 "max. inbound queue element length of %d is less than the required length of %d\n",
3868 ctrl_info->max_iq_element_length,
3869 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3870 return -EINVAL;
3871 }
3872
3873 if (ctrl_info->max_oq_element_length <
3874 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3875 dev_err(&ctrl_info->pci_dev->dev,
3876 "max. outbound queue element length of %d is less than the required length of %d\n",
3877 ctrl_info->max_oq_element_length,
3878 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3879 return -EINVAL;
3880 }
3881
3882 if (ctrl_info->max_inbound_iu_length_per_firmware <
3883 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3884 dev_err(&ctrl_info->pci_dev->dev,
3885 "max. inbound IU length of %u is less than the min. required length of %d\n",
3886 ctrl_info->max_inbound_iu_length_per_firmware,
3887 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3888 return -EINVAL;
3889 }
3890
77668f41
KB
3891 if (!ctrl_info->inbound_spanning_supported) {
3892 dev_err(&ctrl_info->pci_dev->dev,
3893 "the controller does not support inbound spanning\n");
3894 return -EINVAL;
3895 }
3896
3897 if (ctrl_info->outbound_spanning_supported) {
3898 dev_err(&ctrl_info->pci_dev->dev,
3899 "the controller supports outbound spanning but this driver does not\n");
3900 return -EINVAL;
3901 }
3902
6c223761
KB
3903 return 0;
3904}
3905
3906static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3907 bool inbound_queue, u16 queue_id)
3908{
3909 struct pqi_general_admin_request request;
3910 struct pqi_general_admin_response response;
3911
3912 memset(&request, 0, sizeof(request));
3913 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3914 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3915 &request.header.iu_length);
3916 if (inbound_queue)
3917 request.function_code =
3918 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3919 else
3920 request.function_code =
3921 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3922 put_unaligned_le16(queue_id,
3923 &request.data.delete_operational_queue.queue_id);
3924
3925 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3926 &response);
3927}
3928
3929static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3930{
3931 int rc;
3932 struct pqi_event_queue *event_queue;
3933 struct pqi_general_admin_request request;
3934 struct pqi_general_admin_response response;
3935
3936 event_queue = &ctrl_info->event_queue;
3937
3938 /*
3939 * Create OQ (Outbound Queue - device to host queue) to dedicate
3940 * to events.
3941 */
3942 memset(&request, 0, sizeof(request));
3943 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3944 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3945 &request.header.iu_length);
3946 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3947 put_unaligned_le16(event_queue->oq_id,
3948 &request.data.create_operational_oq.queue_id);
3949 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3950 &request.data.create_operational_oq.element_array_addr);
3951 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3952 &request.data.create_operational_oq.pi_addr);
3953 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3954 &request.data.create_operational_oq.num_elements);
3955 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3956 &request.data.create_operational_oq.element_length);
3957 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3958 put_unaligned_le16(event_queue->int_msg_num,
3959 &request.data.create_operational_oq.int_msg_num);
3960
3961 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3962 &response);
3963 if (rc)
3964 return rc;
3965
3966 event_queue->oq_ci = ctrl_info->iomem_base +
3967 PQI_DEVICE_REGISTERS_OFFSET +
3968 get_unaligned_le64(
3969 &response.data.create_operational_oq.oq_ci_offset);
3970
3971 return 0;
3972}
3973
061ef06a
KB
3974static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3975 unsigned int group_number)
6c223761 3976{
6c223761
KB
3977 int rc;
3978 struct pqi_queue_group *queue_group;
3979 struct pqi_general_admin_request request;
3980 struct pqi_general_admin_response response;
3981
061ef06a 3982 queue_group = &ctrl_info->queue_groups[group_number];
6c223761
KB
3983
3984 /*
3985 * Create IQ (Inbound Queue - host to device queue) for
3986 * RAID path.
3987 */
3988 memset(&request, 0, sizeof(request));
3989 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3990 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3991 &request.header.iu_length);
3992 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3993 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3994 &request.data.create_operational_iq.queue_id);
3995 put_unaligned_le64(
3996 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3997 &request.data.create_operational_iq.element_array_addr);
3998 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3999 &request.data.create_operational_iq.ci_addr);
4000 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4001 &request.data.create_operational_iq.num_elements);
4002 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4003 &request.data.create_operational_iq.element_length);
4004 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4005
4006 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4007 &response);
4008 if (rc) {
4009 dev_err(&ctrl_info->pci_dev->dev,
4010 "error creating inbound RAID queue\n");
4011 return rc;
4012 }
4013
4014 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4015 PQI_DEVICE_REGISTERS_OFFSET +
4016 get_unaligned_le64(
4017 &response.data.create_operational_iq.iq_pi_offset);
4018
4019 /*
4020 * Create IQ (Inbound Queue - host to device queue) for
4021 * Advanced I/O (AIO) path.
4022 */
4023 memset(&request, 0, sizeof(request));
4024 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4025 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4026 &request.header.iu_length);
4027 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4028 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4029 &request.data.create_operational_iq.queue_id);
4030 put_unaligned_le64((u64)queue_group->
4031 iq_element_array_bus_addr[AIO_PATH],
4032 &request.data.create_operational_iq.element_array_addr);
4033 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4034 &request.data.create_operational_iq.ci_addr);
4035 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4036 &request.data.create_operational_iq.num_elements);
4037 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4038 &request.data.create_operational_iq.element_length);
4039 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4040
4041 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4042 &response);
4043 if (rc) {
4044 dev_err(&ctrl_info->pci_dev->dev,
4045 "error creating inbound AIO queue\n");
4046 goto delete_inbound_queue_raid;
4047 }
4048
4049 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4050 PQI_DEVICE_REGISTERS_OFFSET +
4051 get_unaligned_le64(
4052 &response.data.create_operational_iq.iq_pi_offset);
4053
4054 /*
4055 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4056 * assumed to be for RAID path I/O unless we change the queue's
4057 * property.
4058 */
4059 memset(&request, 0, sizeof(request));
4060 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4061 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4062 &request.header.iu_length);
4063 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4064 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4065 &request.data.change_operational_iq_properties.queue_id);
4066 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4067 &request.data.change_operational_iq_properties.vendor_specific);
4068
4069 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4070 &response);
4071 if (rc) {
4072 dev_err(&ctrl_info->pci_dev->dev,
4073 "error changing queue property\n");
4074 goto delete_inbound_queue_aio;
4075 }
4076
4077 /*
4078 * Create OQ (Outbound Queue - device to host queue).
4079 */
4080 memset(&request, 0, sizeof(request));
4081 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4082 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4083 &request.header.iu_length);
4084 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4085 put_unaligned_le16(queue_group->oq_id,
4086 &request.data.create_operational_oq.queue_id);
4087 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4088 &request.data.create_operational_oq.element_array_addr);
4089 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4090 &request.data.create_operational_oq.pi_addr);
4091 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4092 &request.data.create_operational_oq.num_elements);
4093 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4094 &request.data.create_operational_oq.element_length);
4095 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4096 put_unaligned_le16(queue_group->int_msg_num,
4097 &request.data.create_operational_oq.int_msg_num);
4098
4099 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4100 &response);
4101 if (rc) {
4102 dev_err(&ctrl_info->pci_dev->dev,
4103 "error creating outbound queue\n");
4104 goto delete_inbound_queue_aio;
4105 }
4106
4107 queue_group->oq_ci = ctrl_info->iomem_base +
4108 PQI_DEVICE_REGISTERS_OFFSET +
4109 get_unaligned_le64(
4110 &response.data.create_operational_oq.oq_ci_offset);
4111
6c223761
KB
4112 return 0;
4113
4114delete_inbound_queue_aio:
4115 pqi_delete_operational_queue(ctrl_info, true,
4116 queue_group->iq_id[AIO_PATH]);
4117
4118delete_inbound_queue_raid:
4119 pqi_delete_operational_queue(ctrl_info, true,
4120 queue_group->iq_id[RAID_PATH]);
4121
4122 return rc;
4123}
4124
4125static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4126{
4127 int rc;
4128 unsigned int i;
4129
4130 rc = pqi_create_event_queue(ctrl_info);
4131 if (rc) {
4132 dev_err(&ctrl_info->pci_dev->dev,
4133 "error creating event queue\n");
4134 return rc;
4135 }
4136
4137 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
061ef06a 4138 rc = pqi_create_queue_group(ctrl_info, i);
6c223761
KB
4139 if (rc) {
4140 dev_err(&ctrl_info->pci_dev->dev,
4141 "error creating queue group number %u/%u\n",
4142 i, ctrl_info->num_queue_groups);
4143 return rc;
4144 }
4145 }
4146
4147 return 0;
4148}
4149
4150#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4151 (offsetof(struct pqi_event_config, descriptors) + \
4152 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4153
6a50d6ad
KB
4154static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4155 bool enable_events)
6c223761
KB
4156{
4157 int rc;
4158 unsigned int i;
4159 struct pqi_event_config *event_config;
6a50d6ad 4160 struct pqi_event_descriptor *event_descriptor;
6c223761
KB
4161 struct pqi_general_management_request request;
4162
4163 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4164 GFP_KERNEL);
4165 if (!event_config)
4166 return -ENOMEM;
4167
4168 memset(&request, 0, sizeof(request));
4169
4170 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4171 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4172 data.report_event_configuration.sg_descriptors[1]) -
4173 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4174 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4175 &request.data.report_event_configuration.buffer_length);
4176
4177 rc = pqi_map_single(ctrl_info->pci_dev,
4178 request.data.report_event_configuration.sg_descriptors,
4179 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4180 PCI_DMA_FROMDEVICE);
4181 if (rc)
4182 goto out;
4183
4184 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4185 0, NULL, NO_TIMEOUT);
4186
4187 pqi_pci_unmap(ctrl_info->pci_dev,
4188 request.data.report_event_configuration.sg_descriptors, 1,
4189 PCI_DMA_FROMDEVICE);
4190
4191 if (rc)
4192 goto out;
4193
6a50d6ad
KB
4194 for (i = 0; i < event_config->num_event_descriptors; i++) {
4195 event_descriptor = &event_config->descriptors[i];
4196 if (enable_events &&
4197 pqi_is_supported_event(event_descriptor->event_type))
4198 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4199 &event_descriptor->oq_id);
4200 else
4201 put_unaligned_le16(0, &event_descriptor->oq_id);
4202 }
6c223761
KB
4203
4204 memset(&request, 0, sizeof(request));
4205
4206 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4207 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4208 data.report_event_configuration.sg_descriptors[1]) -
4209 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4210 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4211 &request.data.report_event_configuration.buffer_length);
4212
4213 rc = pqi_map_single(ctrl_info->pci_dev,
4214 request.data.report_event_configuration.sg_descriptors,
4215 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4216 PCI_DMA_TODEVICE);
4217 if (rc)
4218 goto out;
4219
4220 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4221 NULL, NO_TIMEOUT);
4222
4223 pqi_pci_unmap(ctrl_info->pci_dev,
4224 request.data.report_event_configuration.sg_descriptors, 1,
4225 PCI_DMA_TODEVICE);
4226
4227out:
4228 kfree(event_config);
4229
4230 return rc;
4231}
4232
6a50d6ad
KB
4233static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4234{
4235 return pqi_configure_events(ctrl_info, true);
4236}
4237
4238static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4239{
4240 return pqi_configure_events(ctrl_info, false);
4241}
4242
6c223761
KB
4243static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4244{
4245 unsigned int i;
4246 struct device *dev;
4247 size_t sg_chain_buffer_length;
4248 struct pqi_io_request *io_request;
4249
4250 if (!ctrl_info->io_request_pool)
4251 return;
4252
4253 dev = &ctrl_info->pci_dev->dev;
4254 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4255 io_request = ctrl_info->io_request_pool;
4256
4257 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4258 kfree(io_request->iu);
4259 if (!io_request->sg_chain_buffer)
4260 break;
4261 dma_free_coherent(dev, sg_chain_buffer_length,
4262 io_request->sg_chain_buffer,
4263 io_request->sg_chain_buffer_dma_handle);
4264 io_request++;
4265 }
4266
4267 kfree(ctrl_info->io_request_pool);
4268 ctrl_info->io_request_pool = NULL;
4269}
4270
4271static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4272{
4273 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4274 ctrl_info->error_buffer_length,
4275 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4276
4277 if (!ctrl_info->error_buffer)
4278 return -ENOMEM;
4279
4280 return 0;
4281}
4282
4283static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4284{
4285 unsigned int i;
4286 void *sg_chain_buffer;
4287 size_t sg_chain_buffer_length;
4288 dma_addr_t sg_chain_buffer_dma_handle;
4289 struct device *dev;
4290 struct pqi_io_request *io_request;
4291
4292 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4293 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4294
4295 if (!ctrl_info->io_request_pool) {
4296 dev_err(&ctrl_info->pci_dev->dev,
4297 "failed to allocate I/O request pool\n");
4298 goto error;
4299 }
4300
4301 dev = &ctrl_info->pci_dev->dev;
4302 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4303 io_request = ctrl_info->io_request_pool;
4304
4305 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4306 io_request->iu =
4307 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4308
4309 if (!io_request->iu) {
4310 dev_err(&ctrl_info->pci_dev->dev,
4311 "failed to allocate IU buffers\n");
4312 goto error;
4313 }
4314
4315 sg_chain_buffer = dma_alloc_coherent(dev,
4316 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4317 GFP_KERNEL);
4318
4319 if (!sg_chain_buffer) {
4320 dev_err(&ctrl_info->pci_dev->dev,
4321 "failed to allocate PQI scatter-gather chain buffers\n");
4322 goto error;
4323 }
4324
4325 io_request->index = i;
4326 io_request->sg_chain_buffer = sg_chain_buffer;
4327 io_request->sg_chain_buffer_dma_handle =
4328 sg_chain_buffer_dma_handle;
4329 io_request++;
4330 }
4331
4332 return 0;
4333
4334error:
4335 pqi_free_all_io_requests(ctrl_info);
4336
4337 return -ENOMEM;
4338}
4339
4340/*
4341 * Calculate required resources that are sized based on max. outstanding
4342 * requests and max. transfer size.
4343 */
4344
4345static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4346{
4347 u32 max_transfer_size;
4348 u32 max_sg_entries;
4349
4350 ctrl_info->scsi_ml_can_queue =
4351 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4352 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4353
4354 ctrl_info->error_buffer_length =
4355 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4356
d727a776
KB
4357 if (reset_devices)
4358 max_transfer_size = min(ctrl_info->max_transfer_size,
4359 PQI_MAX_TRANSFER_SIZE_KDUMP);
4360 else
4361 max_transfer_size = min(ctrl_info->max_transfer_size,
4362 PQI_MAX_TRANSFER_SIZE);
6c223761
KB
4363
4364 max_sg_entries = max_transfer_size / PAGE_SIZE;
4365
4366 /* +1 to cover when the buffer is not page-aligned. */
4367 max_sg_entries++;
4368
4369 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4370
4371 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4372
4373 ctrl_info->sg_chain_buffer_length =
e1d213bd
KB
4374 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4375 PQI_EXTRA_SGL_MEMORY;
6c223761
KB
4376 ctrl_info->sg_tablesize = max_sg_entries;
4377 ctrl_info->max_sectors = max_transfer_size / 512;
4378}
4379
4380static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4381{
6c223761
KB
4382 int num_queue_groups;
4383 u16 num_elements_per_iq;
4384 u16 num_elements_per_oq;
4385
d727a776
KB
4386 if (reset_devices) {
4387 num_queue_groups = 1;
4388 } else {
4389 int num_cpus;
4390 int max_queue_groups;
4391
4392 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4393 ctrl_info->max_outbound_queues - 1);
4394 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
6c223761 4395
d727a776
KB
4396 num_cpus = num_online_cpus();
4397 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4398 num_queue_groups = min(num_queue_groups, max_queue_groups);
4399 }
6c223761
KB
4400
4401 ctrl_info->num_queue_groups = num_queue_groups;
061ef06a 4402 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
6c223761 4403
77668f41
KB
4404 /*
4405 * Make sure that the max. inbound IU length is an even multiple
4406 * of our inbound element length.
4407 */
4408 ctrl_info->max_inbound_iu_length =
4409 (ctrl_info->max_inbound_iu_length_per_firmware /
4410 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4411 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
4412
4413 num_elements_per_iq =
4414 (ctrl_info->max_inbound_iu_length /
4415 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4416
4417 /* Add one because one element in each queue is unusable. */
4418 num_elements_per_iq++;
4419
4420 num_elements_per_iq = min(num_elements_per_iq,
4421 ctrl_info->max_elements_per_iq);
4422
4423 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4424 num_elements_per_oq = min(num_elements_per_oq,
4425 ctrl_info->max_elements_per_oq);
4426
4427 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4428 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4429
4430 ctrl_info->max_sg_per_iu =
4431 ((ctrl_info->max_inbound_iu_length -
4432 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4433 sizeof(struct pqi_sg_descriptor)) +
4434 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4435}
4436
4437static inline void pqi_set_sg_descriptor(
4438 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4439{
4440 u64 address = (u64)sg_dma_address(sg);
4441 unsigned int length = sg_dma_len(sg);
4442
4443 put_unaligned_le64(address, &sg_descriptor->address);
4444 put_unaligned_le32(length, &sg_descriptor->length);
4445 put_unaligned_le32(0, &sg_descriptor->flags);
4446}
4447
4448static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4449 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4450 struct pqi_io_request *io_request)
4451{
4452 int i;
4453 u16 iu_length;
4454 int sg_count;
4455 bool chained;
4456 unsigned int num_sg_in_iu;
4457 unsigned int max_sg_per_iu;
4458 struct scatterlist *sg;
4459 struct pqi_sg_descriptor *sg_descriptor;
4460
4461 sg_count = scsi_dma_map(scmd);
4462 if (sg_count < 0)
4463 return sg_count;
4464
4465 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4466 PQI_REQUEST_HEADER_LENGTH;
4467
4468 if (sg_count == 0)
4469 goto out;
4470
4471 sg = scsi_sglist(scmd);
4472 sg_descriptor = request->sg_descriptors;
4473 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4474 chained = false;
4475 num_sg_in_iu = 0;
4476 i = 0;
4477
4478 while (1) {
4479 pqi_set_sg_descriptor(sg_descriptor, sg);
4480 if (!chained)
4481 num_sg_in_iu++;
4482 i++;
4483 if (i == sg_count)
4484 break;
4485 sg_descriptor++;
4486 if (i == max_sg_per_iu) {
4487 put_unaligned_le64(
4488 (u64)io_request->sg_chain_buffer_dma_handle,
4489 &sg_descriptor->address);
4490 put_unaligned_le32((sg_count - num_sg_in_iu)
4491 * sizeof(*sg_descriptor),
4492 &sg_descriptor->length);
4493 put_unaligned_le32(CISS_SG_CHAIN,
4494 &sg_descriptor->flags);
4495 chained = true;
4496 num_sg_in_iu++;
4497 sg_descriptor = io_request->sg_chain_buffer;
4498 }
4499 sg = sg_next(sg);
4500 }
4501
4502 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4503 request->partial = chained;
4504 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4505
4506out:
4507 put_unaligned_le16(iu_length, &request->header.iu_length);
4508
4509 return 0;
4510}
4511
4512static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4513 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4514 struct pqi_io_request *io_request)
4515{
4516 int i;
4517 u16 iu_length;
4518 int sg_count;
a60eec02
KB
4519 bool chained;
4520 unsigned int num_sg_in_iu;
4521 unsigned int max_sg_per_iu;
6c223761
KB
4522 struct scatterlist *sg;
4523 struct pqi_sg_descriptor *sg_descriptor;
4524
4525 sg_count = scsi_dma_map(scmd);
4526 if (sg_count < 0)
4527 return sg_count;
a60eec02
KB
4528
4529 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4530 PQI_REQUEST_HEADER_LENGTH;
4531 num_sg_in_iu = 0;
4532
6c223761
KB
4533 if (sg_count == 0)
4534 goto out;
4535
a60eec02
KB
4536 sg = scsi_sglist(scmd);
4537 sg_descriptor = request->sg_descriptors;
4538 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4539 chained = false;
4540 i = 0;
4541
4542 while (1) {
4543 pqi_set_sg_descriptor(sg_descriptor, sg);
4544 if (!chained)
4545 num_sg_in_iu++;
4546 i++;
4547 if (i == sg_count)
4548 break;
4549 sg_descriptor++;
4550 if (i == max_sg_per_iu) {
4551 put_unaligned_le64(
4552 (u64)io_request->sg_chain_buffer_dma_handle,
4553 &sg_descriptor->address);
4554 put_unaligned_le32((sg_count - num_sg_in_iu)
4555 * sizeof(*sg_descriptor),
4556 &sg_descriptor->length);
4557 put_unaligned_le32(CISS_SG_CHAIN,
4558 &sg_descriptor->flags);
4559 chained = true;
4560 num_sg_in_iu++;
4561 sg_descriptor = io_request->sg_chain_buffer;
6c223761 4562 }
a60eec02 4563 sg = sg_next(sg);
6c223761
KB
4564 }
4565
a60eec02
KB
4566 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4567 request->partial = chained;
6c223761 4568 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
4569
4570out:
6c223761
KB
4571 put_unaligned_le16(iu_length, &request->header.iu_length);
4572 request->num_sg_descriptors = num_sg_in_iu;
4573
4574 return 0;
4575}
4576
4577static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4578 void *context)
4579{
4580 struct scsi_cmnd *scmd;
4581
4582 scmd = io_request->scmd;
4583 pqi_free_io_request(io_request);
4584 scsi_dma_unmap(scmd);
4585 pqi_scsi_done(scmd);
4586}
4587
376fb880
KB
4588static int pqi_raid_submit_scsi_cmd_with_io_request(
4589 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
6c223761
KB
4590 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4591 struct pqi_queue_group *queue_group)
4592{
4593 int rc;
4594 size_t cdb_length;
6c223761
KB
4595 struct pqi_raid_path_request *request;
4596
6c223761
KB
4597 io_request->io_complete_callback = pqi_raid_io_complete;
4598 io_request->scmd = scmd;
4599
6c223761
KB
4600 request = io_request->iu;
4601 memset(request, 0,
4602 offsetof(struct pqi_raid_path_request, sg_descriptors));
4603
4604 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4605 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4606 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4607 put_unaligned_le16(io_request->index, &request->request_id);
4608 request->error_index = request->request_id;
4609 memcpy(request->lun_number, device->scsi3addr,
4610 sizeof(request->lun_number));
4611
4612 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4613 memcpy(request->cdb, scmd->cmnd, cdb_length);
4614
4615 switch (cdb_length) {
4616 case 6:
4617 case 10:
4618 case 12:
4619 case 16:
4620 /* No bytes in the Additional CDB bytes field */
4621 request->additional_cdb_bytes_usage =
4622 SOP_ADDITIONAL_CDB_BYTES_0;
4623 break;
4624 case 20:
4625 /* 4 bytes in the Additional cdb field */
4626 request->additional_cdb_bytes_usage =
4627 SOP_ADDITIONAL_CDB_BYTES_4;
4628 break;
4629 case 24:
4630 /* 8 bytes in the Additional cdb field */
4631 request->additional_cdb_bytes_usage =
4632 SOP_ADDITIONAL_CDB_BYTES_8;
4633 break;
4634 case 28:
4635 /* 12 bytes in the Additional cdb field */
4636 request->additional_cdb_bytes_usage =
4637 SOP_ADDITIONAL_CDB_BYTES_12;
4638 break;
4639 case 32:
4640 default:
4641 /* 16 bytes in the Additional cdb field */
4642 request->additional_cdb_bytes_usage =
4643 SOP_ADDITIONAL_CDB_BYTES_16;
4644 break;
4645 }
4646
4647 switch (scmd->sc_data_direction) {
4648 case DMA_TO_DEVICE:
4649 request->data_direction = SOP_READ_FLAG;
4650 break;
4651 case DMA_FROM_DEVICE:
4652 request->data_direction = SOP_WRITE_FLAG;
4653 break;
4654 case DMA_NONE:
4655 request->data_direction = SOP_NO_DIRECTION_FLAG;
4656 break;
4657 case DMA_BIDIRECTIONAL:
4658 request->data_direction = SOP_BIDIRECTIONAL;
4659 break;
4660 default:
4661 dev_err(&ctrl_info->pci_dev->dev,
4662 "unknown data direction: %d\n",
4663 scmd->sc_data_direction);
6c223761
KB
4664 break;
4665 }
4666
4667 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4668 if (rc) {
4669 pqi_free_io_request(io_request);
4670 return SCSI_MLQUEUE_HOST_BUSY;
4671 }
4672
4673 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4674
4675 return 0;
4676}
4677
376fb880
KB
4678static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4679 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4680 struct pqi_queue_group *queue_group)
4681{
4682 struct pqi_io_request *io_request;
4683
4684 io_request = pqi_alloc_io_request(ctrl_info);
4685
4686 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4687 device, scmd, queue_group);
4688}
4689
4690static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
4691{
4692 if (!pqi_ctrl_blocked(ctrl_info))
4693 schedule_work(&ctrl_info->raid_bypass_retry_work);
4694}
4695
4696static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
4697{
4698 struct scsi_cmnd *scmd;
03b288cf 4699 struct pqi_scsi_dev *device;
376fb880
KB
4700 struct pqi_ctrl_info *ctrl_info;
4701
4702 if (!io_request->raid_bypass)
4703 return false;
4704
4705 scmd = io_request->scmd;
4706 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
4707 return false;
4708 if (host_byte(scmd->result) == DID_NO_CONNECT)
4709 return false;
4710
03b288cf
KB
4711 device = scmd->device->hostdata;
4712 if (pqi_device_offline(device))
4713 return false;
4714
376fb880
KB
4715 ctrl_info = shost_to_hba(scmd->device->host);
4716 if (pqi_ctrl_offline(ctrl_info))
4717 return false;
4718
4719 return true;
4720}
4721
4722static inline void pqi_add_to_raid_bypass_retry_list(
4723 struct pqi_ctrl_info *ctrl_info,
4724 struct pqi_io_request *io_request, bool at_head)
4725{
4726 unsigned long flags;
4727
4728 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4729 if (at_head)
4730 list_add(&io_request->request_list_entry,
4731 &ctrl_info->raid_bypass_retry_list);
4732 else
4733 list_add_tail(&io_request->request_list_entry,
4734 &ctrl_info->raid_bypass_retry_list);
4735 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4736}
4737
4738static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
4739 void *context)
4740{
4741 struct scsi_cmnd *scmd;
4742
4743 scmd = io_request->scmd;
4744 pqi_free_io_request(io_request);
4745 pqi_scsi_done(scmd);
4746}
4747
4748static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
4749{
4750 struct scsi_cmnd *scmd;
4751 struct pqi_ctrl_info *ctrl_info;
4752
4753 io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
4754 scmd = io_request->scmd;
4755 scmd->result = 0;
4756 ctrl_info = shost_to_hba(scmd->device->host);
4757
4758 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
4759 pqi_schedule_bypass_retry(ctrl_info);
4760}
4761
4762static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
4763{
4764 struct scsi_cmnd *scmd;
4765 struct pqi_scsi_dev *device;
4766 struct pqi_ctrl_info *ctrl_info;
4767 struct pqi_queue_group *queue_group;
4768
4769 scmd = io_request->scmd;
4770 device = scmd->device->hostdata;
4771 if (pqi_device_in_reset(device)) {
4772 pqi_free_io_request(io_request);
4773 set_host_byte(scmd, DID_RESET);
4774 pqi_scsi_done(scmd);
4775 return 0;
4776 }
4777
4778 ctrl_info = shost_to_hba(scmd->device->host);
4779 queue_group = io_request->queue_group;
4780
4781 pqi_reinit_io_request(io_request);
4782
4783 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
4784 device, scmd, queue_group);
4785}
4786
4787static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
4788 struct pqi_ctrl_info *ctrl_info)
4789{
4790 unsigned long flags;
4791 struct pqi_io_request *io_request;
4792
4793 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
4794 io_request = list_first_entry_or_null(
4795 &ctrl_info->raid_bypass_retry_list,
4796 struct pqi_io_request, request_list_entry);
4797 if (io_request)
4798 list_del(&io_request->request_list_entry);
4799 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4800
4801 return io_request;
4802}
4803
4804static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
4805{
4806 int rc;
4807 struct pqi_io_request *io_request;
4808
4809 pqi_ctrl_busy(ctrl_info);
4810
4811 while (1) {
4812 if (pqi_ctrl_blocked(ctrl_info))
4813 break;
4814 io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
4815 if (!io_request)
4816 break;
4817 rc = pqi_retry_raid_bypass(io_request);
4818 if (rc) {
4819 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
4820 true);
4821 pqi_schedule_bypass_retry(ctrl_info);
4822 break;
4823 }
4824 }
4825
4826 pqi_ctrl_unbusy(ctrl_info);
4827}
4828
4829static void pqi_raid_bypass_retry_worker(struct work_struct *work)
4830{
4831 struct pqi_ctrl_info *ctrl_info;
4832
4833 ctrl_info = container_of(work, struct pqi_ctrl_info,
4834 raid_bypass_retry_work);
4835 pqi_retry_raid_bypass_requests(ctrl_info);
4836}
4837
5f310425
KB
4838static void pqi_clear_all_queued_raid_bypass_retries(
4839 struct pqi_ctrl_info *ctrl_info)
376fb880
KB
4840{
4841 unsigned long flags;
376fb880
KB
4842
4843 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
5f310425 4844 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
376fb880
KB
4845 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
4846}
4847
6c223761
KB
4848static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4849 void *context)
4850{
4851 struct scsi_cmnd *scmd;
4852
4853 scmd = io_request->scmd;
4854 scsi_dma_unmap(scmd);
4855 if (io_request->status == -EAGAIN)
4856 set_host_byte(scmd, DID_IMM_RETRY);
376fb880
KB
4857 else if (pqi_raid_bypass_retry_needed(io_request)) {
4858 pqi_queue_raid_bypass_retry(io_request);
4859 return;
4860 }
6c223761
KB
4861 pqi_free_io_request(io_request);
4862 pqi_scsi_done(scmd);
4863}
4864
4865static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4866 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4867 struct pqi_queue_group *queue_group)
4868{
4869 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
376fb880 4870 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
6c223761
KB
4871}
4872
4873static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4874 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4875 unsigned int cdb_length, struct pqi_queue_group *queue_group,
376fb880 4876 struct pqi_encryption_info *encryption_info, bool raid_bypass)
6c223761
KB
4877{
4878 int rc;
4879 struct pqi_io_request *io_request;
4880 struct pqi_aio_path_request *request;
4881
4882 io_request = pqi_alloc_io_request(ctrl_info);
4883 io_request->io_complete_callback = pqi_aio_io_complete;
4884 io_request->scmd = scmd;
376fb880 4885 io_request->raid_bypass = raid_bypass;
6c223761
KB
4886
4887 request = io_request->iu;
4888 memset(request, 0,
4889 offsetof(struct pqi_raid_path_request, sg_descriptors));
4890
4891 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4892 put_unaligned_le32(aio_handle, &request->nexus_id);
4893 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4894 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4895 put_unaligned_le16(io_request->index, &request->request_id);
4896 request->error_index = request->request_id;
4897 if (cdb_length > sizeof(request->cdb))
4898 cdb_length = sizeof(request->cdb);
4899 request->cdb_length = cdb_length;
4900 memcpy(request->cdb, cdb, cdb_length);
4901
4902 switch (scmd->sc_data_direction) {
4903 case DMA_TO_DEVICE:
4904 request->data_direction = SOP_READ_FLAG;
4905 break;
4906 case DMA_FROM_DEVICE:
4907 request->data_direction = SOP_WRITE_FLAG;
4908 break;
4909 case DMA_NONE:
4910 request->data_direction = SOP_NO_DIRECTION_FLAG;
4911 break;
4912 case DMA_BIDIRECTIONAL:
4913 request->data_direction = SOP_BIDIRECTIONAL;
4914 break;
4915 default:
4916 dev_err(&ctrl_info->pci_dev->dev,
4917 "unknown data direction: %d\n",
4918 scmd->sc_data_direction);
6c223761
KB
4919 break;
4920 }
4921
4922 if (encryption_info) {
4923 request->encryption_enable = true;
4924 put_unaligned_le16(encryption_info->data_encryption_key_index,
4925 &request->data_encryption_key_index);
4926 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4927 &request->encrypt_tweak_lower);
4928 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4929 &request->encrypt_tweak_upper);
4930 }
4931
4932 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4933 if (rc) {
4934 pqi_free_io_request(io_request);
4935 return SCSI_MLQUEUE_HOST_BUSY;
4936 }
4937
4938 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4939
4940 return 0;
4941}
4942
061ef06a
KB
4943static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4944 struct scsi_cmnd *scmd)
4945{
4946 u16 hw_queue;
4947
4948 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4949 if (hw_queue > ctrl_info->max_hw_queue_index)
4950 hw_queue = 0;
4951
4952 return hw_queue;
4953}
4954
7561a7e4
KB
4955/*
4956 * This function gets called just before we hand the completed SCSI request
4957 * back to the SML.
4958 */
4959
4960void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4961{
4962 struct pqi_scsi_dev *device;
4963
4964 device = scmd->device->hostdata;
4965 atomic_dec(&device->scsi_cmds_outstanding);
4966}
4967
6c223761 4968static int pqi_scsi_queue_command(struct Scsi_Host *shost,
7d81d2b8 4969 struct scsi_cmnd *scmd)
6c223761
KB
4970{
4971 int rc;
4972 struct pqi_ctrl_info *ctrl_info;
4973 struct pqi_scsi_dev *device;
061ef06a 4974 u16 hw_queue;
6c223761
KB
4975 struct pqi_queue_group *queue_group;
4976 bool raid_bypassed;
4977
4978 device = scmd->device->hostdata;
6c223761
KB
4979 ctrl_info = shost_to_hba(shost);
4980
7561a7e4
KB
4981 atomic_inc(&device->scsi_cmds_outstanding);
4982
6c223761
KB
4983 if (pqi_ctrl_offline(ctrl_info)) {
4984 set_host_byte(scmd, DID_NO_CONNECT);
4985 pqi_scsi_done(scmd);
4986 return 0;
4987 }
4988
7561a7e4
KB
4989 pqi_ctrl_busy(ctrl_info);
4990 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4991 rc = SCSI_MLQUEUE_HOST_BUSY;
4992 goto out;
4993 }
4994
7d81d2b8
KB
4995 /*
4996 * This is necessary because the SML doesn't zero out this field during
4997 * error recovery.
4998 */
4999 scmd->result = 0;
5000
061ef06a
KB
5001 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
5002 queue_group = &ctrl_info->queue_groups[hw_queue];
6c223761
KB
5003
5004 if (pqi_is_logical_device(device)) {
5005 raid_bypassed = false;
588a63fe 5006 if (device->raid_bypass_enabled &&
57292b58 5007 !blk_rq_is_passthrough(scmd->request)) {
6c223761
KB
5008 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
5009 scmd, queue_group);
376fb880
KB
5010 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
5011 raid_bypassed = true;
6c223761
KB
5012 }
5013 if (!raid_bypassed)
5014 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5015 queue_group);
5016 } else {
5017 if (device->aio_enabled)
5018 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
5019 queue_group);
5020 else
5021 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
5022 queue_group);
5023 }
5024
7561a7e4
KB
5025out:
5026 pqi_ctrl_unbusy(ctrl_info);
5027 if (rc)
5028 atomic_dec(&device->scsi_cmds_outstanding);
5029
6c223761
KB
5030 return rc;
5031}
5032
7561a7e4
KB
5033static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
5034 struct pqi_queue_group *queue_group)
5035{
5036 unsigned int path;
5037 unsigned long flags;
5038 bool list_is_empty;
5039
5040 for (path = 0; path < 2; path++) {
5041 while (1) {
5042 spin_lock_irqsave(
5043 &queue_group->submit_lock[path], flags);
5044 list_is_empty =
5045 list_empty(&queue_group->request_list[path]);
5046 spin_unlock_irqrestore(
5047 &queue_group->submit_lock[path], flags);
5048 if (list_is_empty)
5049 break;
5050 pqi_check_ctrl_health(ctrl_info);
5051 if (pqi_ctrl_offline(ctrl_info))
5052 return -ENXIO;
5053 usleep_range(1000, 2000);
5054 }
5055 }
5056
5057 return 0;
5058}
5059
5060static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
5061{
5062 int rc;
5063 unsigned int i;
5064 unsigned int path;
5065 struct pqi_queue_group *queue_group;
5066 pqi_index_t iq_pi;
5067 pqi_index_t iq_ci;
5068
5069 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5070 queue_group = &ctrl_info->queue_groups[i];
5071
5072 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
5073 if (rc)
5074 return rc;
5075
5076 for (path = 0; path < 2; path++) {
5077 iq_pi = queue_group->iq_pi_copy[path];
5078
5079 while (1) {
5080 iq_ci = *queue_group->iq_ci[path];
5081 if (iq_ci == iq_pi)
5082 break;
5083 pqi_check_ctrl_health(ctrl_info);
5084 if (pqi_ctrl_offline(ctrl_info))
5085 return -ENXIO;
5086 usleep_range(1000, 2000);
5087 }
5088 }
5089 }
5090
5091 return 0;
5092}
5093
5094static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
5095 struct pqi_scsi_dev *device)
5096{
5097 unsigned int i;
5098 unsigned int path;
5099 struct pqi_queue_group *queue_group;
5100 unsigned long flags;
5101 struct pqi_io_request *io_request;
5102 struct pqi_io_request *next;
5103 struct scsi_cmnd *scmd;
5104 struct pqi_scsi_dev *scsi_device;
5105
5106 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5107 queue_group = &ctrl_info->queue_groups[i];
5108
5109 for (path = 0; path < 2; path++) {
5110 spin_lock_irqsave(
5111 &queue_group->submit_lock[path], flags);
5112
5113 list_for_each_entry_safe(io_request, next,
5114 &queue_group->request_list[path],
5115 request_list_entry) {
5116 scmd = io_request->scmd;
5117 if (!scmd)
5118 continue;
5119
5120 scsi_device = scmd->device->hostdata;
5121 if (scsi_device != device)
5122 continue;
5123
5124 list_del(&io_request->request_list_entry);
5125 set_host_byte(scmd, DID_RESET);
5126 pqi_scsi_done(scmd);
5127 }
5128
5129 spin_unlock_irqrestore(
5130 &queue_group->submit_lock[path], flags);
5131 }
5132 }
5133}
5134
061ef06a
KB
5135static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
5136 struct pqi_scsi_dev *device)
5137{
5138 while (atomic_read(&device->scsi_cmds_outstanding)) {
5139 pqi_check_ctrl_health(ctrl_info);
5140 if (pqi_ctrl_offline(ctrl_info))
5141 return -ENXIO;
5142 usleep_range(1000, 2000);
5143 }
5144
5145 return 0;
5146}
5147
5148static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
5149{
5150 bool io_pending;
5151 unsigned long flags;
5152 struct pqi_scsi_dev *device;
5153
5154 while (1) {
5155 io_pending = false;
5156
5157 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5158 list_for_each_entry(device, &ctrl_info->scsi_device_list,
5159 scsi_device_list_entry) {
5160 if (atomic_read(&device->scsi_cmds_outstanding)) {
5161 io_pending = true;
5162 break;
5163 }
5164 }
5165 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5166 flags);
5167
5168 if (!io_pending)
5169 break;
5170
5171 pqi_check_ctrl_health(ctrl_info);
5172 if (pqi_ctrl_offline(ctrl_info))
5173 return -ENXIO;
5174
5175 usleep_range(1000, 2000);
5176 }
5177
5178 return 0;
5179}
5180
14bb215d
KB
5181static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
5182 void *context)
6c223761 5183{
14bb215d 5184 struct completion *waiting = context;
6c223761 5185
14bb215d
KB
5186 complete(waiting);
5187}
6c223761 5188
14bb215d
KB
5189#define PQI_LUN_RESET_TIMEOUT_SECS 10
5190
5191static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
5192 struct pqi_scsi_dev *device, struct completion *wait)
5193{
5194 int rc;
14bb215d
KB
5195
5196 while (1) {
5197 if (wait_for_completion_io_timeout(wait,
5198 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
5199 rc = 0;
5200 break;
6c223761
KB
5201 }
5202
14bb215d
KB
5203 pqi_check_ctrl_health(ctrl_info);
5204 if (pqi_ctrl_offline(ctrl_info)) {
4e8415e3 5205 rc = -ENXIO;
14bb215d
KB
5206 break;
5207 }
6c223761 5208 }
6c223761 5209
14bb215d 5210 return rc;
6c223761
KB
5211}
5212
14bb215d 5213static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
5214 struct pqi_scsi_dev *device)
5215{
5216 int rc;
5217 struct pqi_io_request *io_request;
5218 DECLARE_COMPLETION_ONSTACK(wait);
5219 struct pqi_task_management_request *request;
5220
6c223761 5221 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 5222 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
5223 io_request->context = &wait;
5224
5225 request = io_request->iu;
5226 memset(request, 0, sizeof(*request));
5227
5228 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5229 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5230 &request->header.iu_length);
5231 put_unaligned_le16(io_request->index, &request->request_id);
5232 memcpy(request->lun_number, device->scsi3addr,
5233 sizeof(request->lun_number));
5234 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5235
5236 pqi_start_io(ctrl_info,
5237 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5238 io_request);
5239
14bb215d
KB
5240 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5241 if (rc == 0)
6c223761 5242 rc = io_request->status;
6c223761
KB
5243
5244 pqi_free_io_request(io_request);
6c223761
KB
5245
5246 return rc;
5247}
5248
5249/* Performs a reset at the LUN level. */
5250
5251static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5252 struct pqi_scsi_dev *device)
5253{
5254 int rc;
5255
14bb215d 5256 rc = pqi_lun_reset(ctrl_info, device);
061ef06a
KB
5257 if (rc == 0)
5258 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
6c223761 5259
14bb215d 5260 return rc == 0 ? SUCCESS : FAILED;
6c223761
KB
5261}
5262
5263static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5264{
5265 int rc;
7561a7e4 5266 struct Scsi_Host *shost;
6c223761
KB
5267 struct pqi_ctrl_info *ctrl_info;
5268 struct pqi_scsi_dev *device;
5269
7561a7e4
KB
5270 shost = scmd->device->host;
5271 ctrl_info = shost_to_hba(shost);
6c223761
KB
5272 device = scmd->device->hostdata;
5273
5274 dev_err(&ctrl_info->pci_dev->dev,
5275 "resetting scsi %d:%d:%d:%d\n",
7561a7e4 5276 shost->host_no, device->bus, device->target, device->lun);
6c223761 5277
7561a7e4
KB
5278 pqi_check_ctrl_health(ctrl_info);
5279 if (pqi_ctrl_offline(ctrl_info)) {
5280 rc = FAILED;
5281 goto out;
5282 }
6c223761 5283
7561a7e4
KB
5284 mutex_lock(&ctrl_info->lun_reset_mutex);
5285
5286 pqi_ctrl_block_requests(ctrl_info);
5287 pqi_ctrl_wait_until_quiesced(ctrl_info);
5288 pqi_fail_io_queued_for_device(ctrl_info, device);
5289 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5290 pqi_device_reset_start(device);
5291 pqi_ctrl_unblock_requests(ctrl_info);
5292
5293 if (rc)
5294 rc = FAILED;
5295 else
5296 rc = pqi_device_reset(ctrl_info, device);
5297
5298 pqi_device_reset_done(device);
5299
5300 mutex_unlock(&ctrl_info->lun_reset_mutex);
5301
5302out:
6c223761
KB
5303 dev_err(&ctrl_info->pci_dev->dev,
5304 "reset of scsi %d:%d:%d:%d: %s\n",
7561a7e4 5305 shost->host_no, device->bus, device->target, device->lun,
6c223761
KB
5306 rc == SUCCESS ? "SUCCESS" : "FAILED");
5307
5308 return rc;
5309}
5310
5311static int pqi_slave_alloc(struct scsi_device *sdev)
5312{
5313 struct pqi_scsi_dev *device;
5314 unsigned long flags;
5315 struct pqi_ctrl_info *ctrl_info;
5316 struct scsi_target *starget;
5317 struct sas_rphy *rphy;
5318
5319 ctrl_info = shost_to_hba(sdev->host);
5320
5321 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5322
5323 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5324 starget = scsi_target(sdev);
5325 rphy = target_to_rphy(starget);
5326 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5327 if (device) {
5328 device->target = sdev_id(sdev);
5329 device->lun = sdev->lun;
5330 device->target_lun_valid = true;
5331 }
5332 } else {
5333 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5334 sdev_id(sdev), sdev->lun);
5335 }
5336
94086f5b 5337 if (device) {
6c223761
KB
5338 sdev->hostdata = device;
5339 device->sdev = sdev;
5340 if (device->queue_depth) {
5341 device->advertised_queue_depth = device->queue_depth;
5342 scsi_change_queue_depth(sdev,
5343 device->advertised_queue_depth);
5344 }
5345 }
5346
5347 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5348
5349 return 0;
5350}
5351
52198226
CH
5352static int pqi_map_queues(struct Scsi_Host *shost)
5353{
5354 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5355
5356 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
5357}
5358
6c223761
KB
5359static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5360 void __user *arg)
5361{
5362 struct pci_dev *pci_dev;
5363 u32 subsystem_vendor;
5364 u32 subsystem_device;
5365 cciss_pci_info_struct pciinfo;
5366
5367 if (!arg)
5368 return -EINVAL;
5369
5370 pci_dev = ctrl_info->pci_dev;
5371
5372 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5373 pciinfo.bus = pci_dev->bus->number;
5374 pciinfo.dev_fn = pci_dev->devfn;
5375 subsystem_vendor = pci_dev->subsystem_vendor;
5376 subsystem_device = pci_dev->subsystem_device;
5377 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5378 subsystem_vendor;
5379
5380 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5381 return -EFAULT;
5382
5383 return 0;
5384}
5385
5386static int pqi_getdrivver_ioctl(void __user *arg)
5387{
5388 u32 version;
5389
5390 if (!arg)
5391 return -EINVAL;
5392
5393 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5394 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5395
5396 if (copy_to_user(arg, &version, sizeof(version)))
5397 return -EFAULT;
5398
5399 return 0;
5400}
5401
5402struct ciss_error_info {
5403 u8 scsi_status;
5404 int command_status;
5405 size_t sense_data_length;
5406};
5407
5408static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5409 struct ciss_error_info *ciss_error_info)
5410{
5411 int ciss_cmd_status;
5412 size_t sense_data_length;
5413
5414 switch (pqi_error_info->data_out_result) {
5415 case PQI_DATA_IN_OUT_GOOD:
5416 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5417 break;
5418 case PQI_DATA_IN_OUT_UNDERFLOW:
5419 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5420 break;
5421 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5422 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5423 break;
5424 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5425 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5426 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5427 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5428 case PQI_DATA_IN_OUT_ERROR:
5429 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5430 break;
5431 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5432 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5433 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5434 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5435 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5436 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5437 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5438 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5439 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5440 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5441 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5442 break;
5443 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5444 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5445 break;
5446 case PQI_DATA_IN_OUT_ABORTED:
5447 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5448 break;
5449 case PQI_DATA_IN_OUT_TIMEOUT:
5450 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5451 break;
5452 default:
5453 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5454 break;
5455 }
5456
5457 sense_data_length =
5458 get_unaligned_le16(&pqi_error_info->sense_data_length);
5459 if (sense_data_length == 0)
5460 sense_data_length =
5461 get_unaligned_le16(&pqi_error_info->response_data_length);
5462 if (sense_data_length)
5463 if (sense_data_length > sizeof(pqi_error_info->data))
5464 sense_data_length = sizeof(pqi_error_info->data);
5465
5466 ciss_error_info->scsi_status = pqi_error_info->status;
5467 ciss_error_info->command_status = ciss_cmd_status;
5468 ciss_error_info->sense_data_length = sense_data_length;
5469}
5470
5471static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5472{
5473 int rc;
5474 char *kernel_buffer = NULL;
5475 u16 iu_length;
5476 size_t sense_data_length;
5477 IOCTL_Command_struct iocommand;
5478 struct pqi_raid_path_request request;
5479 struct pqi_raid_error_info pqi_error_info;
5480 struct ciss_error_info ciss_error_info;
5481
5482 if (pqi_ctrl_offline(ctrl_info))
5483 return -ENXIO;
5484 if (!arg)
5485 return -EINVAL;
5486 if (!capable(CAP_SYS_RAWIO))
5487 return -EPERM;
5488 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5489 return -EFAULT;
5490 if (iocommand.buf_size < 1 &&
5491 iocommand.Request.Type.Direction != XFER_NONE)
5492 return -EINVAL;
5493 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5494 return -EINVAL;
5495 if (iocommand.Request.Type.Type != TYPE_CMD)
5496 return -EINVAL;
5497
5498 switch (iocommand.Request.Type.Direction) {
5499 case XFER_NONE:
5500 case XFER_WRITE:
5501 case XFER_READ:
41555d54 5502 case XFER_READ | XFER_WRITE:
6c223761
KB
5503 break;
5504 default:
5505 return -EINVAL;
5506 }
5507
5508 if (iocommand.buf_size > 0) {
5509 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5510 if (!kernel_buffer)
5511 return -ENOMEM;
5512 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5513 if (copy_from_user(kernel_buffer, iocommand.buf,
5514 iocommand.buf_size)) {
5515 rc = -EFAULT;
5516 goto out;
5517 }
5518 } else {
5519 memset(kernel_buffer, 0, iocommand.buf_size);
5520 }
5521 }
5522
5523 memset(&request, 0, sizeof(request));
5524
5525 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5526 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5527 PQI_REQUEST_HEADER_LENGTH;
5528 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5529 sizeof(request.lun_number));
5530 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5531 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5532
5533 switch (iocommand.Request.Type.Direction) {
5534 case XFER_NONE:
5535 request.data_direction = SOP_NO_DIRECTION_FLAG;
5536 break;
5537 case XFER_WRITE:
5538 request.data_direction = SOP_WRITE_FLAG;
5539 break;
5540 case XFER_READ:
5541 request.data_direction = SOP_READ_FLAG;
5542 break;
41555d54
KB
5543 case XFER_READ | XFER_WRITE:
5544 request.data_direction = SOP_BIDIRECTIONAL;
5545 break;
6c223761
KB
5546 }
5547
5548 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5549
5550 if (iocommand.buf_size > 0) {
5551 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5552
5553 rc = pqi_map_single(ctrl_info->pci_dev,
5554 &request.sg_descriptors[0], kernel_buffer,
5555 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5556 if (rc)
5557 goto out;
5558
5559 iu_length += sizeof(request.sg_descriptors[0]);
5560 }
5561
5562 put_unaligned_le16(iu_length, &request.header.iu_length);
5563
5564 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5565 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5566
5567 if (iocommand.buf_size > 0)
5568 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5569 PCI_DMA_BIDIRECTIONAL);
5570
5571 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5572
5573 if (rc == 0) {
5574 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5575 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5576 iocommand.error_info.CommandStatus =
5577 ciss_error_info.command_status;
5578 sense_data_length = ciss_error_info.sense_data_length;
5579 if (sense_data_length) {
5580 if (sense_data_length >
5581 sizeof(iocommand.error_info.SenseInfo))
5582 sense_data_length =
5583 sizeof(iocommand.error_info.SenseInfo);
5584 memcpy(iocommand.error_info.SenseInfo,
5585 pqi_error_info.data, sense_data_length);
5586 iocommand.error_info.SenseLen = sense_data_length;
5587 }
5588 }
5589
5590 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5591 rc = -EFAULT;
5592 goto out;
5593 }
5594
5595 if (rc == 0 && iocommand.buf_size > 0 &&
5596 (iocommand.Request.Type.Direction & XFER_READ)) {
5597 if (copy_to_user(iocommand.buf, kernel_buffer,
5598 iocommand.buf_size)) {
5599 rc = -EFAULT;
5600 }
5601 }
5602
5603out:
5604 kfree(kernel_buffer);
5605
5606 return rc;
5607}
5608
5609static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5610{
5611 int rc;
5612 struct pqi_ctrl_info *ctrl_info;
5613
5614 ctrl_info = shost_to_hba(sdev->host);
5615
5616 switch (cmd) {
5617 case CCISS_DEREGDISK:
5618 case CCISS_REGNEWDISK:
5619 case CCISS_REGNEWD:
5620 rc = pqi_scan_scsi_devices(ctrl_info);
5621 break;
5622 case CCISS_GETPCIINFO:
5623 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5624 break;
5625 case CCISS_GETDRIVVER:
5626 rc = pqi_getdrivver_ioctl(arg);
5627 break;
5628 case CCISS_PASSTHRU:
5629 rc = pqi_passthru_ioctl(ctrl_info, arg);
5630 break;
5631 default:
5632 rc = -EINVAL;
5633 break;
5634 }
5635
5636 return rc;
5637}
5638
5639static ssize_t pqi_version_show(struct device *dev,
5640 struct device_attribute *attr, char *buffer)
5641{
5642 ssize_t count = 0;
5643 struct Scsi_Host *shost;
5644 struct pqi_ctrl_info *ctrl_info;
5645
5646 shost = class_to_shost(dev);
5647 ctrl_info = shost_to_hba(shost);
5648
5649 count += snprintf(buffer + count, PAGE_SIZE - count,
5650 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5651
5652 count += snprintf(buffer + count, PAGE_SIZE - count,
5653 "firmware: %s\n", ctrl_info->firmware_version);
5654
5655 return count;
5656}
5657
5658static ssize_t pqi_host_rescan_store(struct device *dev,
5659 struct device_attribute *attr, const char *buffer, size_t count)
5660{
5661 struct Scsi_Host *shost = class_to_shost(dev);
5662
5663 pqi_scan_start(shost);
5664
5665 return count;
5666}
5667
3c50976f
KB
5668static ssize_t pqi_lockup_action_show(struct device *dev,
5669 struct device_attribute *attr, char *buffer)
5670{
5671 int count = 0;
5672 unsigned int i;
5673
5674 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5675 if (pqi_lockup_actions[i].action == pqi_lockup_action)
5676 count += snprintf(buffer + count, PAGE_SIZE - count,
5677 "[%s] ", pqi_lockup_actions[i].name);
5678 else
5679 count += snprintf(buffer + count, PAGE_SIZE - count,
5680 "%s ", pqi_lockup_actions[i].name);
5681 }
5682
5683 count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
5684
5685 return count;
5686}
5687
5688static ssize_t pqi_lockup_action_store(struct device *dev,
5689 struct device_attribute *attr, const char *buffer, size_t count)
5690{
5691 unsigned int i;
5692 char *action_name;
5693 char action_name_buffer[32];
5694
5695 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
5696 action_name = strstrip(action_name_buffer);
5697
5698 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
5699 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
5700 pqi_lockup_action = pqi_lockup_actions[i].action;
5701 return count;
5702 }
5703 }
5704
5705 return -EINVAL;
5706}
5707
cbe0c7b1
KB
5708static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5709static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
3c50976f
KB
5710static DEVICE_ATTR(lockup_action, 0644,
5711 pqi_lockup_action_show, pqi_lockup_action_store);
6c223761
KB
5712
5713static struct device_attribute *pqi_shost_attrs[] = {
5714 &dev_attr_version,
5715 &dev_attr_rescan,
3c50976f 5716 &dev_attr_lockup_action,
6c223761
KB
5717 NULL
5718};
5719
5720static ssize_t pqi_sas_address_show(struct device *dev,
5721 struct device_attribute *attr, char *buffer)
5722{
5723 struct pqi_ctrl_info *ctrl_info;
5724 struct scsi_device *sdev;
5725 struct pqi_scsi_dev *device;
5726 unsigned long flags;
5727 u64 sas_address;
5728
5729 sdev = to_scsi_device(dev);
5730 ctrl_info = shost_to_hba(sdev->host);
5731
5732 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5733
5734 device = sdev->hostdata;
5735 if (pqi_is_logical_device(device)) {
5736 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5737 flags);
5738 return -ENODEV;
5739 }
5740 sas_address = device->sas_address;
5741
5742 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5743
5744 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5745}
5746
5747static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5748 struct device_attribute *attr, char *buffer)
5749{
5750 struct pqi_ctrl_info *ctrl_info;
5751 struct scsi_device *sdev;
5752 struct pqi_scsi_dev *device;
5753 unsigned long flags;
5754
5755 sdev = to_scsi_device(dev);
5756 ctrl_info = shost_to_hba(sdev->host);
5757
5758 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5759
5760 device = sdev->hostdata;
588a63fe 5761 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
6c223761
KB
5762 buffer[1] = '\n';
5763 buffer[2] = '\0';
5764
5765 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5766
5767 return 2;
5768}
5769
a9f93392
KB
5770static ssize_t pqi_raid_level_show(struct device *dev,
5771 struct device_attribute *attr, char *buffer)
5772{
5773 struct pqi_ctrl_info *ctrl_info;
5774 struct scsi_device *sdev;
5775 struct pqi_scsi_dev *device;
5776 unsigned long flags;
5777 char *raid_level;
5778
5779 sdev = to_scsi_device(dev);
5780 ctrl_info = shost_to_hba(sdev->host);
5781
5782 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5783
5784 device = sdev->hostdata;
5785
5786 if (pqi_is_logical_device(device))
5787 raid_level = pqi_raid_level_to_string(device->raid_level);
5788 else
5789 raid_level = "N/A";
5790
5791 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5792
5793 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
5794}
5795
cbe0c7b1
KB
5796static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5797static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
6c223761 5798 pqi_ssd_smart_path_enabled_show, NULL);
a9f93392 5799static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
6c223761
KB
5800
5801static struct device_attribute *pqi_sdev_attrs[] = {
5802 &dev_attr_sas_address,
5803 &dev_attr_ssd_smart_path_enabled,
a9f93392 5804 &dev_attr_raid_level,
6c223761
KB
5805 NULL
5806};
5807
5808static struct scsi_host_template pqi_driver_template = {
5809 .module = THIS_MODULE,
5810 .name = DRIVER_NAME_SHORT,
5811 .proc_name = DRIVER_NAME_SHORT,
5812 .queuecommand = pqi_scsi_queue_command,
5813 .scan_start = pqi_scan_start,
5814 .scan_finished = pqi_scan_finished,
5815 .this_id = -1,
5816 .use_clustering = ENABLE_CLUSTERING,
5817 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5818 .ioctl = pqi_ioctl,
5819 .slave_alloc = pqi_slave_alloc,
52198226 5820 .map_queues = pqi_map_queues,
6c223761
KB
5821 .sdev_attrs = pqi_sdev_attrs,
5822 .shost_attrs = pqi_shost_attrs,
5823};
5824
5825static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5826{
5827 int rc;
5828 struct Scsi_Host *shost;
5829
5830 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5831 if (!shost) {
5832 dev_err(&ctrl_info->pci_dev->dev,
5833 "scsi_host_alloc failed for controller %u\n",
5834 ctrl_info->ctrl_id);
5835 return -ENOMEM;
5836 }
5837
5838 shost->io_port = 0;
5839 shost->n_io_port = 0;
5840 shost->this_id = -1;
5841 shost->max_channel = PQI_MAX_BUS;
5842 shost->max_cmd_len = MAX_COMMAND_SIZE;
5843 shost->max_lun = ~0;
5844 shost->max_id = ~0;
5845 shost->max_sectors = ctrl_info->max_sectors;
5846 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5847 shost->cmd_per_lun = shost->can_queue;
5848 shost->sg_tablesize = ctrl_info->sg_tablesize;
5849 shost->transportt = pqi_sas_transport_template;
52198226 5850 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
5851 shost->unique_id = shost->irq;
5852 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5853 shost->hostdata[0] = (unsigned long)ctrl_info;
5854
5855 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5856 if (rc) {
5857 dev_err(&ctrl_info->pci_dev->dev,
5858 "scsi_add_host failed for controller %u\n",
5859 ctrl_info->ctrl_id);
5860 goto free_host;
5861 }
5862
5863 rc = pqi_add_sas_host(shost, ctrl_info);
5864 if (rc) {
5865 dev_err(&ctrl_info->pci_dev->dev,
5866 "add SAS host failed for controller %u\n",
5867 ctrl_info->ctrl_id);
5868 goto remove_host;
5869 }
5870
5871 ctrl_info->scsi_host = shost;
5872
5873 return 0;
5874
5875remove_host:
5876 scsi_remove_host(shost);
5877free_host:
5878 scsi_host_put(shost);
5879
5880 return rc;
5881}
5882
5883static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5884{
5885 struct Scsi_Host *shost;
5886
5887 pqi_delete_sas_host(ctrl_info);
5888
5889 shost = ctrl_info->scsi_host;
5890 if (!shost)
5891 return;
5892
5893 scsi_remove_host(shost);
5894 scsi_host_put(shost);
5895}
5896
336b6819
KB
5897static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
5898{
5899 int rc = 0;
5900 struct pqi_device_registers __iomem *pqi_registers;
5901 unsigned long timeout;
5902 unsigned int timeout_msecs;
5903 union pqi_reset_register reset_reg;
6c223761 5904
336b6819
KB
5905 pqi_registers = ctrl_info->pqi_registers;
5906 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
5907 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
5908
5909 while (1) {
5910 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
5911 reset_reg.all_bits = readl(&pqi_registers->device_reset);
5912 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
5913 break;
5914 pqi_check_ctrl_health(ctrl_info);
5915 if (pqi_ctrl_offline(ctrl_info)) {
5916 rc = -ENXIO;
5917 break;
5918 }
5919 if (time_after(jiffies, timeout)) {
5920 rc = -ETIMEDOUT;
5921 break;
5922 }
5923 }
5924
5925 return rc;
5926}
6c223761
KB
5927
5928static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5929{
5930 int rc;
336b6819
KB
5931 union pqi_reset_register reset_reg;
5932
5933 if (ctrl_info->pqi_reset_quiesce_supported) {
5934 rc = sis_pqi_reset_quiesce(ctrl_info);
5935 if (rc) {
5936 dev_err(&ctrl_info->pci_dev->dev,
5937 "PQI reset failed during quiesce with error %d\n",
5938 rc);
5939 return rc;
5940 }
5941 }
6c223761 5942
336b6819
KB
5943 reset_reg.all_bits = 0;
5944 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
5945 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
6c223761 5946
336b6819 5947 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
6c223761 5948
336b6819 5949 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
6c223761
KB
5950 if (rc)
5951 dev_err(&ctrl_info->pci_dev->dev,
336b6819 5952 "PQI reset failed with error %d\n", rc);
6c223761
KB
5953
5954 return rc;
5955}
5956
5957static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5958{
5959 int rc;
5960 struct bmic_identify_controller *identify;
5961
5962 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5963 if (!identify)
5964 return -ENOMEM;
5965
5966 rc = pqi_identify_controller(ctrl_info, identify);
5967 if (rc)
5968 goto out;
5969
5970 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5971 sizeof(identify->firmware_version));
5972 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5973 snprintf(ctrl_info->firmware_version +
5974 strlen(ctrl_info->firmware_version),
5975 sizeof(ctrl_info->firmware_version),
5976 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5977
5978out:
5979 kfree(identify);
5980
5981 return rc;
5982}
5983
98f87667
KB
5984static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5985{
5986 u32 table_length;
5987 u32 section_offset;
5988 void __iomem *table_iomem_addr;
5989 struct pqi_config_table *config_table;
5990 struct pqi_config_table_section_header *section;
5991
5992 table_length = ctrl_info->config_table_length;
5993
5994 config_table = kmalloc(table_length, GFP_KERNEL);
5995 if (!config_table) {
5996 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 5997 "failed to allocate memory for PQI configuration table\n");
98f87667
KB
5998 return -ENOMEM;
5999 }
6000
6001 /*
6002 * Copy the config table contents from I/O memory space into the
6003 * temporary buffer.
6004 */
6005 table_iomem_addr = ctrl_info->iomem_base +
6006 ctrl_info->config_table_offset;
6007 memcpy_fromio(config_table, table_iomem_addr, table_length);
6008
6009 section_offset =
6010 get_unaligned_le32(&config_table->first_section_offset);
6011
6012 while (section_offset) {
6013 section = (void *)config_table + section_offset;
6014
6015 switch (get_unaligned_le16(&section->section_id)) {
6016 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5a259e32
KB
6017 if (pqi_disable_heartbeat)
6018 dev_warn(&ctrl_info->pci_dev->dev,
6019 "heartbeat disabled by module parameter\n");
6020 else
6021 ctrl_info->heartbeat_counter =
6022 table_iomem_addr +
6023 section_offset +
6024 offsetof(
6025 struct pqi_config_table_heartbeat,
6026 heartbeat_counter);
98f87667
KB
6027 break;
6028 }
6029
6030 section_offset =
6031 get_unaligned_le16(&section->next_section_offset);
6032 }
6033
6034 kfree(config_table);
6035
6036 return 0;
6037}
6038
162d7753
KB
6039/* Switches the controller from PQI mode back into SIS mode. */
6040
6041static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
6042{
6043 int rc;
6044
061ef06a 6045 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
162d7753
KB
6046 rc = pqi_reset(ctrl_info);
6047 if (rc)
6048 return rc;
6049 sis_reenable_sis_mode(ctrl_info);
6050 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6051
6052 return 0;
6053}
6054
6055/*
6056 * If the controller isn't already in SIS mode, this function forces it into
6057 * SIS mode.
6058 */
6059
6060static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73
KB
6061{
6062 if (!sis_is_firmware_running(ctrl_info))
6063 return -ENXIO;
6064
162d7753
KB
6065 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
6066 return 0;
6067
6068 if (sis_is_kernel_up(ctrl_info)) {
6069 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
6070 return 0;
ff6abb73
KB
6071 }
6072
162d7753 6073 return pqi_revert_to_sis_mode(ctrl_info);
ff6abb73
KB
6074}
6075
6c223761
KB
6076static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
6077{
6078 int rc;
6079
162d7753
KB
6080 rc = pqi_force_sis_mode(ctrl_info);
6081 if (rc)
6082 return rc;
6c223761
KB
6083
6084 /*
6085 * Wait until the controller is ready to start accepting SIS
6086 * commands.
6087 */
6088 rc = sis_wait_for_ctrl_ready(ctrl_info);
8845fdfa 6089 if (rc)
6c223761 6090 return rc;
6c223761
KB
6091
6092 /*
6093 * Get the controller properties. This allows us to determine
6094 * whether or not it supports PQI mode.
6095 */
6096 rc = sis_get_ctrl_properties(ctrl_info);
6097 if (rc) {
6098 dev_err(&ctrl_info->pci_dev->dev,
6099 "error obtaining controller properties\n");
6100 return rc;
6101 }
6102
6103 rc = sis_get_pqi_capabilities(ctrl_info);
6104 if (rc) {
6105 dev_err(&ctrl_info->pci_dev->dev,
6106 "error obtaining controller capabilities\n");
6107 return rc;
6108 }
6109
d727a776
KB
6110 if (reset_devices) {
6111 if (ctrl_info->max_outstanding_requests >
6112 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
6113 ctrl_info->max_outstanding_requests =
6114 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
6115 } else {
6116 if (ctrl_info->max_outstanding_requests >
6117 PQI_MAX_OUTSTANDING_REQUESTS)
6118 ctrl_info->max_outstanding_requests =
6119 PQI_MAX_OUTSTANDING_REQUESTS;
6120 }
6c223761
KB
6121
6122 pqi_calculate_io_resources(ctrl_info);
6123
6124 rc = pqi_alloc_error_buffer(ctrl_info);
6125 if (rc) {
6126 dev_err(&ctrl_info->pci_dev->dev,
6127 "failed to allocate PQI error buffer\n");
6128 return rc;
6129 }
6130
6131 /*
6132 * If the function we are about to call succeeds, the
6133 * controller will transition from legacy SIS mode
6134 * into PQI mode.
6135 */
6136 rc = sis_init_base_struct_addr(ctrl_info);
6137 if (rc) {
6138 dev_err(&ctrl_info->pci_dev->dev,
6139 "error initializing PQI mode\n");
6140 return rc;
6141 }
6142
6143 /* Wait for the controller to complete the SIS -> PQI transition. */
6144 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6145 if (rc) {
6146 dev_err(&ctrl_info->pci_dev->dev,
6147 "transition to PQI mode failed\n");
6148 return rc;
6149 }
6150
6151 /* From here on, we are running in PQI mode. */
6152 ctrl_info->pqi_mode_enabled = true;
ff6abb73 6153 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761 6154
98f87667
KB
6155 rc = pqi_process_config_table(ctrl_info);
6156 if (rc)
6157 return rc;
6158
6c223761
KB
6159 rc = pqi_alloc_admin_queues(ctrl_info);
6160 if (rc) {
6161 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 6162 "failed to allocate admin queues\n");
6c223761
KB
6163 return rc;
6164 }
6165
6166 rc = pqi_create_admin_queues(ctrl_info);
6167 if (rc) {
6168 dev_err(&ctrl_info->pci_dev->dev,
6169 "error creating admin queues\n");
6170 return rc;
6171 }
6172
6173 rc = pqi_report_device_capability(ctrl_info);
6174 if (rc) {
6175 dev_err(&ctrl_info->pci_dev->dev,
6176 "obtaining device capability failed\n");
6177 return rc;
6178 }
6179
6180 rc = pqi_validate_device_capability(ctrl_info);
6181 if (rc)
6182 return rc;
6183
6184 pqi_calculate_queue_resources(ctrl_info);
6185
6186 rc = pqi_enable_msix_interrupts(ctrl_info);
6187 if (rc)
6188 return rc;
6189
6190 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
6191 ctrl_info->max_msix_vectors =
6192 ctrl_info->num_msix_vectors_enabled;
6193 pqi_calculate_queue_resources(ctrl_info);
6194 }
6195
6196 rc = pqi_alloc_io_resources(ctrl_info);
6197 if (rc)
6198 return rc;
6199
6200 rc = pqi_alloc_operational_queues(ctrl_info);
d87d5474
KB
6201 if (rc) {
6202 dev_err(&ctrl_info->pci_dev->dev,
6203 "failed to allocate operational queues\n");
6c223761 6204 return rc;
d87d5474 6205 }
6c223761
KB
6206
6207 pqi_init_operational_queues(ctrl_info);
6208
6209 rc = pqi_request_irqs(ctrl_info);
6210 if (rc)
6211 return rc;
6212
6c223761
KB
6213 rc = pqi_create_queues(ctrl_info);
6214 if (rc)
6215 return rc;
6216
061ef06a
KB
6217 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6218
6219 ctrl_info->controller_online = true;
6220 pqi_start_heartbeat_timer(ctrl_info);
6c223761 6221
6a50d6ad 6222 rc = pqi_enable_events(ctrl_info);
6c223761
KB
6223 if (rc) {
6224 dev_err(&ctrl_info->pci_dev->dev,
6a50d6ad 6225 "error enabling events\n");
6c223761
KB
6226 return rc;
6227 }
6228
6c223761
KB
6229 /* Register with the SCSI subsystem. */
6230 rc = pqi_register_scsi(ctrl_info);
6231 if (rc)
6232 return rc;
6233
6234 rc = pqi_get_ctrl_firmware_version(ctrl_info);
6235 if (rc) {
6236 dev_err(&ctrl_info->pci_dev->dev,
6237 "error obtaining firmware version\n");
6238 return rc;
6239 }
6240
6241 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6242 if (rc) {
6243 dev_err(&ctrl_info->pci_dev->dev,
6244 "error updating host wellness\n");
6245 return rc;
6246 }
6247
6248 pqi_schedule_update_time_worker(ctrl_info);
6249
6250 pqi_scan_scsi_devices(ctrl_info);
6251
6252 return 0;
6253}
6254
061ef06a
KB
6255static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
6256{
6257 unsigned int i;
6258 struct pqi_admin_queues *admin_queues;
6259 struct pqi_event_queue *event_queue;
6260
6261 admin_queues = &ctrl_info->admin_queues;
6262 admin_queues->iq_pi_copy = 0;
6263 admin_queues->oq_ci_copy = 0;
6264 *admin_queues->oq_pi = 0;
6265
6266 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6267 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
6268 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
6269 ctrl_info->queue_groups[i].oq_ci_copy = 0;
6270
6271 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
6272 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
6273 *ctrl_info->queue_groups[i].oq_pi = 0;
6274 }
6275
6276 event_queue = &ctrl_info->event_queue;
6277 *event_queue->oq_pi = 0;
6278 event_queue->oq_ci_copy = 0;
6279}
6280
6281static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
6282{
6283 int rc;
6284
6285 rc = pqi_force_sis_mode(ctrl_info);
6286 if (rc)
6287 return rc;
6288
6289 /*
6290 * Wait until the controller is ready to start accepting SIS
6291 * commands.
6292 */
6293 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
6294 if (rc)
6295 return rc;
6296
6297 /*
6298 * If the function we are about to call succeeds, the
6299 * controller will transition from legacy SIS mode
6300 * into PQI mode.
6301 */
6302 rc = sis_init_base_struct_addr(ctrl_info);
6303 if (rc) {
6304 dev_err(&ctrl_info->pci_dev->dev,
6305 "error initializing PQI mode\n");
6306 return rc;
6307 }
6308
6309 /* Wait for the controller to complete the SIS -> PQI transition. */
6310 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
6311 if (rc) {
6312 dev_err(&ctrl_info->pci_dev->dev,
6313 "transition to PQI mode failed\n");
6314 return rc;
6315 }
6316
6317 /* From here on, we are running in PQI mode. */
6318 ctrl_info->pqi_mode_enabled = true;
6319 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6320
6321 pqi_reinit_queues(ctrl_info);
6322
6323 rc = pqi_create_admin_queues(ctrl_info);
6324 if (rc) {
6325 dev_err(&ctrl_info->pci_dev->dev,
6326 "error creating admin queues\n");
6327 return rc;
6328 }
6329
6330 rc = pqi_create_queues(ctrl_info);
6331 if (rc)
6332 return rc;
6333
6334 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6335
6336 ctrl_info->controller_online = true;
6337 pqi_start_heartbeat_timer(ctrl_info);
6338 pqi_ctrl_unblock_requests(ctrl_info);
6339
6340 rc = pqi_enable_events(ctrl_info);
6341 if (rc) {
6342 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 6343 "error enabling events\n");
061ef06a
KB
6344 return rc;
6345 }
6346
6347 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6348 if (rc) {
6349 dev_err(&ctrl_info->pci_dev->dev,
6350 "error updating host wellness\n");
6351 return rc;
6352 }
6353
6354 pqi_schedule_update_time_worker(ctrl_info);
6355
6356 pqi_scan_scsi_devices(ctrl_info);
6357
6358 return 0;
6359}
6360
a81ed5f3
KB
6361static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6362 u16 timeout)
6363{
6364 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6365 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6366}
6367
6c223761
KB
6368static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6369{
6370 int rc;
6371 u64 mask;
6372
6373 rc = pci_enable_device(ctrl_info->pci_dev);
6374 if (rc) {
6375 dev_err(&ctrl_info->pci_dev->dev,
6376 "failed to enable PCI device\n");
6377 return rc;
6378 }
6379
6380 if (sizeof(dma_addr_t) > 4)
6381 mask = DMA_BIT_MASK(64);
6382 else
6383 mask = DMA_BIT_MASK(32);
6384
6385 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6386 if (rc) {
6387 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6388 goto disable_device;
6389 }
6390
6391 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6392 if (rc) {
6393 dev_err(&ctrl_info->pci_dev->dev,
6394 "failed to obtain PCI resources\n");
6395 goto disable_device;
6396 }
6397
6398 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6399 ctrl_info->pci_dev, 0),
6400 sizeof(struct pqi_ctrl_registers));
6401 if (!ctrl_info->iomem_base) {
6402 dev_err(&ctrl_info->pci_dev->dev,
6403 "failed to map memory for controller registers\n");
6404 rc = -ENOMEM;
6405 goto release_regions;
6406 }
6407
a81ed5f3
KB
6408#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6409
6410 /* Increase the PCIe completion timeout. */
6411 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6412 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6413 if (rc) {
6414 dev_err(&ctrl_info->pci_dev->dev,
6415 "failed to set PCIe completion timeout\n");
6416 goto release_regions;
6417 }
6418
6c223761
KB
6419 /* Enable bus mastering. */
6420 pci_set_master(ctrl_info->pci_dev);
6421
cbe0c7b1
KB
6422 ctrl_info->registers = ctrl_info->iomem_base;
6423 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6424
6c223761
KB
6425 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6426
6427 return 0;
6428
6429release_regions:
6430 pci_release_regions(ctrl_info->pci_dev);
6431disable_device:
6432 pci_disable_device(ctrl_info->pci_dev);
6433
6434 return rc;
6435}
6436
6437static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6438{
6439 iounmap(ctrl_info->iomem_base);
6440 pci_release_regions(ctrl_info->pci_dev);
cbe0c7b1
KB
6441 if (pci_is_enabled(ctrl_info->pci_dev))
6442 pci_disable_device(ctrl_info->pci_dev);
6c223761
KB
6443 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6444}
6445
6446static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6447{
6448 struct pqi_ctrl_info *ctrl_info;
6449
6450 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6451 GFP_KERNEL, numa_node);
6452 if (!ctrl_info)
6453 return NULL;
6454
6455 mutex_init(&ctrl_info->scan_mutex);
7561a7e4 6456 mutex_init(&ctrl_info->lun_reset_mutex);
6c223761
KB
6457
6458 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6459 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6460
6461 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6462 atomic_set(&ctrl_info->num_interrupts, 0);
6463
6464 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6465 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6466
98f87667 6467 init_timer(&ctrl_info->heartbeat_timer);
5f310425 6468 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
98f87667 6469
6c223761
KB
6470 sema_init(&ctrl_info->sync_request_sem,
6471 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7561a7e4 6472 init_waitqueue_head(&ctrl_info->block_requests_wait);
6c223761 6473
376fb880
KB
6474 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
6475 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
6476 INIT_WORK(&ctrl_info->raid_bypass_retry_work,
6477 pqi_raid_bypass_retry_worker);
6478
6c223761 6479 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
061ef06a 6480 ctrl_info->irq_mode = IRQ_MODE_NONE;
6c223761
KB
6481 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6482
6483 return ctrl_info;
6484}
6485
6486static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6487{
6488 kfree(ctrl_info);
6489}
6490
6491static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6492{
98bf061b
KB
6493 pqi_free_irqs(ctrl_info);
6494 pqi_disable_msix_interrupts(ctrl_info);
6c223761
KB
6495}
6496
6497static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6498{
6499 pqi_stop_heartbeat_timer(ctrl_info);
6500 pqi_free_interrupts(ctrl_info);
6501 if (ctrl_info->queue_memory_base)
6502 dma_free_coherent(&ctrl_info->pci_dev->dev,
6503 ctrl_info->queue_memory_length,
6504 ctrl_info->queue_memory_base,
6505 ctrl_info->queue_memory_base_dma_handle);
6506 if (ctrl_info->admin_queue_memory_base)
6507 dma_free_coherent(&ctrl_info->pci_dev->dev,
6508 ctrl_info->admin_queue_memory_length,
6509 ctrl_info->admin_queue_memory_base,
6510 ctrl_info->admin_queue_memory_base_dma_handle);
6511 pqi_free_all_io_requests(ctrl_info);
6512 if (ctrl_info->error_buffer)
6513 dma_free_coherent(&ctrl_info->pci_dev->dev,
6514 ctrl_info->error_buffer_length,
6515 ctrl_info->error_buffer,
6516 ctrl_info->error_buffer_dma_handle);
6517 if (ctrl_info->iomem_base)
6518 pqi_cleanup_pci_init(ctrl_info);
6519 pqi_free_ctrl_info(ctrl_info);
6520}
6521
6522static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6523{
061ef06a
KB
6524 pqi_cancel_rescan_worker(ctrl_info);
6525 pqi_cancel_update_time_worker(ctrl_info);
e57a1f9b
KB
6526 pqi_remove_all_scsi_devices(ctrl_info);
6527 pqi_unregister_scsi(ctrl_info);
162d7753
KB
6528 if (ctrl_info->pqi_mode_enabled)
6529 pqi_revert_to_sis_mode(ctrl_info);
6c223761
KB
6530 pqi_free_ctrl_resources(ctrl_info);
6531}
6532
3c50976f
KB
6533static void pqi_perform_lockup_action(void)
6534{
6535 switch (pqi_lockup_action) {
6536 case PANIC:
6537 panic("FATAL: Smart Family Controller lockup detected");
6538 break;
6539 case REBOOT:
6540 emergency_restart();
6541 break;
6542 case NONE:
6543 default:
6544 break;
6545 }
6546}
6547
5f310425
KB
6548static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
6549 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
6550 .status = SAM_STAT_CHECK_CONDITION,
6551};
6552
6553static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
376fb880
KB
6554{
6555 unsigned int i;
376fb880 6556 struct pqi_io_request *io_request;
376fb880
KB
6557 struct scsi_cmnd *scmd;
6558
5f310425
KB
6559 for (i = 0; i < ctrl_info->max_io_slots; i++) {
6560 io_request = &ctrl_info->io_request_pool[i];
6561 if (atomic_read(&io_request->refcount) == 0)
6562 continue;
376fb880 6563
5f310425
KB
6564 scmd = io_request->scmd;
6565 if (scmd) {
6566 set_host_byte(scmd, DID_NO_CONNECT);
6567 } else {
6568 io_request->status = -ENXIO;
6569 io_request->error_info =
6570 &pqi_ctrl_offline_raid_error_info;
376fb880 6571 }
5f310425
KB
6572
6573 io_request->io_complete_callback(io_request,
6574 io_request->context);
376fb880
KB
6575 }
6576}
6577
5f310425 6578static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
376fb880 6579{
5f310425
KB
6580 pqi_perform_lockup_action();
6581 pqi_stop_heartbeat_timer(ctrl_info);
6582 pqi_free_interrupts(ctrl_info);
6583 pqi_cancel_rescan_worker(ctrl_info);
6584 pqi_cancel_update_time_worker(ctrl_info);
6585 pqi_ctrl_wait_until_quiesced(ctrl_info);
6586 pqi_fail_all_outstanding_requests(ctrl_info);
6587 pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
6588 pqi_ctrl_unblock_requests(ctrl_info);
6589}
6590
6591static void pqi_ctrl_offline_worker(struct work_struct *work)
6592{
6593 struct pqi_ctrl_info *ctrl_info;
6594
6595 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
6596 pqi_take_ctrl_offline_deferred(ctrl_info);
376fb880
KB
6597}
6598
6599static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
6600{
5f310425
KB
6601 if (!ctrl_info->controller_online)
6602 return;
6603
376fb880 6604 ctrl_info->controller_online = false;
5f310425
KB
6605 ctrl_info->pqi_mode_enabled = false;
6606 pqi_ctrl_block_requests(ctrl_info);
5a259e32
KB
6607 if (!pqi_disable_ctrl_shutdown)
6608 sis_shutdown_ctrl(ctrl_info);
376fb880
KB
6609 pci_disable_device(ctrl_info->pci_dev);
6610 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
5f310425 6611 schedule_work(&ctrl_info->ctrl_offline_work);
376fb880
KB
6612}
6613
d91d7820 6614static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6c223761
KB
6615 const struct pci_device_id *id)
6616{
6617 char *ctrl_description;
6618
37b36847 6619 if (id->driver_data)
6c223761 6620 ctrl_description = (char *)id->driver_data;
37b36847
KB
6621 else
6622 ctrl_description = "Microsemi Smart Family Controller";
6c223761 6623
d91d7820 6624 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6c223761
KB
6625}
6626
d91d7820
KB
6627static int pqi_pci_probe(struct pci_dev *pci_dev,
6628 const struct pci_device_id *id)
6c223761
KB
6629{
6630 int rc;
6631 int node;
6632 struct pqi_ctrl_info *ctrl_info;
6633
d91d7820 6634 pqi_print_ctrl_info(pci_dev, id);
6c223761
KB
6635
6636 if (pqi_disable_device_id_wildcards &&
6637 id->subvendor == PCI_ANY_ID &&
6638 id->subdevice == PCI_ANY_ID) {
d91d7820 6639 dev_warn(&pci_dev->dev,
6c223761
KB
6640 "controller not probed because device ID wildcards are disabled\n");
6641 return -ENODEV;
6642 }
6643
6644 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
d91d7820 6645 dev_warn(&pci_dev->dev,
6c223761
KB
6646 "controller device ID matched using wildcards\n");
6647
d91d7820 6648 node = dev_to_node(&pci_dev->dev);
6c223761 6649 if (node == NUMA_NO_NODE)
d91d7820 6650 set_dev_node(&pci_dev->dev, 0);
6c223761
KB
6651
6652 ctrl_info = pqi_alloc_ctrl_info(node);
6653 if (!ctrl_info) {
d91d7820 6654 dev_err(&pci_dev->dev,
6c223761
KB
6655 "failed to allocate controller info block\n");
6656 return -ENOMEM;
6657 }
6658
d91d7820 6659 ctrl_info->pci_dev = pci_dev;
6c223761
KB
6660
6661 rc = pqi_pci_init(ctrl_info);
6662 if (rc)
6663 goto error;
6664
6665 rc = pqi_ctrl_init(ctrl_info);
6666 if (rc)
6667 goto error;
6668
6669 return 0;
6670
6671error:
6672 pqi_remove_ctrl(ctrl_info);
6673
6674 return rc;
6675}
6676
d91d7820 6677static void pqi_pci_remove(struct pci_dev *pci_dev)
6c223761
KB
6678{
6679 struct pqi_ctrl_info *ctrl_info;
6680
d91d7820 6681 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
6682 if (!ctrl_info)
6683 return;
6684
6685 pqi_remove_ctrl(ctrl_info);
6686}
6687
d91d7820 6688static void pqi_shutdown(struct pci_dev *pci_dev)
6c223761
KB
6689{
6690 int rc;
6691 struct pqi_ctrl_info *ctrl_info;
6692
d91d7820 6693 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
6694 if (!ctrl_info)
6695 goto error;
6696
6697 /*
6698 * Write all data in the controller's battery-backed cache to
6699 * storage.
6700 */
58322fe0 6701 rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
6c223761
KB
6702 if (rc == 0)
6703 return;
6704
6705error:
d91d7820 6706 dev_warn(&pci_dev->dev,
6c223761
KB
6707 "unable to flush controller cache\n");
6708}
6709
3c50976f
KB
6710static void pqi_process_lockup_action_param(void)
6711{
6712 unsigned int i;
6713
6714 if (!pqi_lockup_action_param)
6715 return;
6716
6717 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6718 if (strcmp(pqi_lockup_action_param,
6719 pqi_lockup_actions[i].name) == 0) {
6720 pqi_lockup_action = pqi_lockup_actions[i].action;
6721 return;
6722 }
6723 }
6724
6725 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
6726 DRIVER_NAME_SHORT, pqi_lockup_action_param);
6727}
6728
6729static void pqi_process_module_params(void)
6730{
6731 pqi_process_lockup_action_param();
6732}
6733
5c146686 6734static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
061ef06a
KB
6735{
6736 struct pqi_ctrl_info *ctrl_info;
6737
6738 ctrl_info = pci_get_drvdata(pci_dev);
6739
6740 pqi_disable_events(ctrl_info);
6741 pqi_cancel_update_time_worker(ctrl_info);
6742 pqi_cancel_rescan_worker(ctrl_info);
6743 pqi_wait_until_scan_finished(ctrl_info);
6744 pqi_wait_until_lun_reset_finished(ctrl_info);
58322fe0 6745 pqi_flush_cache(ctrl_info, SUSPEND);
061ef06a
KB
6746 pqi_ctrl_block_requests(ctrl_info);
6747 pqi_ctrl_wait_until_quiesced(ctrl_info);
6748 pqi_wait_until_inbound_queues_empty(ctrl_info);
6749 pqi_ctrl_wait_for_pending_io(ctrl_info);
6750 pqi_stop_heartbeat_timer(ctrl_info);
6751
6752 if (state.event == PM_EVENT_FREEZE)
6753 return 0;
6754
6755 pci_save_state(pci_dev);
6756 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6757
6758 ctrl_info->controller_online = false;
6759 ctrl_info->pqi_mode_enabled = false;
6760
6761 return 0;
6762}
6763
5c146686 6764static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
061ef06a
KB
6765{
6766 int rc;
6767 struct pqi_ctrl_info *ctrl_info;
6768
6769 ctrl_info = pci_get_drvdata(pci_dev);
6770
6771 if (pci_dev->current_state != PCI_D0) {
6772 ctrl_info->max_hw_queue_index = 0;
6773 pqi_free_interrupts(ctrl_info);
6774 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6775 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6776 IRQF_SHARED, DRIVER_NAME_SHORT,
6777 &ctrl_info->queue_groups[0]);
6778 if (rc) {
6779 dev_err(&ctrl_info->pci_dev->dev,
6780 "irq %u init failed with error %d\n",
6781 pci_dev->irq, rc);
6782 return rc;
6783 }
6784 pqi_start_heartbeat_timer(ctrl_info);
6785 pqi_ctrl_unblock_requests(ctrl_info);
6786 return 0;
6787 }
6788
6789 pci_set_power_state(pci_dev, PCI_D0);
6790 pci_restore_state(pci_dev);
6791
6792 return pqi_ctrl_init_resume(ctrl_info);
6793}
6794
6c223761
KB
6795/* Define the PCI IDs for the controllers that we support. */
6796static const struct pci_device_id pqi_pci_id_table[] = {
7eddabff
KB
6797 {
6798 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6799 0x152d, 0x8a22)
6800 },
6801 {
6802 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6803 0x152d, 0x8a23)
6804 },
6805 {
6806 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6807 0x152d, 0x8a24)
6808 },
6809 {
6810 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6811 0x152d, 0x8a36)
6812 },
6813 {
6814 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6815 0x152d, 0x8a37)
6816 },
6c223761
KB
6817 {
6818 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6819 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6820 },
6821 {
6822 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6823 PCI_VENDOR_ID_ADAPTEC2, 0x0605)
6c223761
KB
6824 },
6825 {
6826 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6827 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6c223761
KB
6828 },
6829 {
6830 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6831 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6c223761
KB
6832 },
6833 {
6834 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6835 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6c223761
KB
6836 },
6837 {
6838 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6839 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6c223761
KB
6840 },
6841 {
6842 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6843 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6c223761
KB
6844 },
6845 {
6846 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6847 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6c223761
KB
6848 },
6849 {
6850 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6851 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6c223761
KB
6852 },
6853 {
6854 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6855 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6c223761
KB
6856 },
6857 {
6858 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6859 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6c223761
KB
6860 },
6861 {
6862 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6863 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6c223761
KB
6864 },
6865 {
6866 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6867 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6c223761
KB
6868 },
6869 {
6870 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6871 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6c223761
KB
6872 },
6873 {
6874 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6875 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6c223761
KB
6876 },
6877 {
6878 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6879 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6c223761
KB
6880 },
6881 {
6882 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6883 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6c223761
KB
6884 },
6885 {
6886 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6887 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6c223761
KB
6888 },
6889 {
6890 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6891 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6c223761
KB
6892 },
6893 {
6894 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6895 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6c223761
KB
6896 },
6897 {
6898 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6899 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6c223761
KB
6900 },
6901 {
6902 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6903 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6c223761
KB
6904 },
6905 {
6906 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6907 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6c223761
KB
6908 },
6909 {
6910 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6911 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6c223761
KB
6912 },
6913 {
6914 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6915 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6c223761
KB
6916 },
6917 {
6918 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff
KB
6919 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
6920 },
6921 {
6922 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6923 PCI_VENDOR_ID_HP, 0x0600)
6924 },
6925 {
6926 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6927 PCI_VENDOR_ID_HP, 0x0601)
6928 },
6929 {
6930 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6931 PCI_VENDOR_ID_HP, 0x0602)
6932 },
6933 {
6934 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6935 PCI_VENDOR_ID_HP, 0x0603)
6936 },
6937 {
6938 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6939 PCI_VENDOR_ID_HP, 0x0604)
6940 },
6941 {
6942 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6943 PCI_VENDOR_ID_HP, 0x0606)
6944 },
6945 {
6946 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6947 PCI_VENDOR_ID_HP, 0x0650)
6948 },
6949 {
6950 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6951 PCI_VENDOR_ID_HP, 0x0651)
6952 },
6953 {
6954 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6955 PCI_VENDOR_ID_HP, 0x0652)
6956 },
6957 {
6958 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6959 PCI_VENDOR_ID_HP, 0x0653)
6960 },
6961 {
6962 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6963 PCI_VENDOR_ID_HP, 0x0654)
6964 },
6965 {
6966 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6967 PCI_VENDOR_ID_HP, 0x0655)
6968 },
6969 {
6970 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6971 PCI_VENDOR_ID_HP, 0x0656)
6972 },
6973 {
6974 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6975 PCI_VENDOR_ID_HP, 0x0657)
6976 },
6977 {
6978 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6979 PCI_VENDOR_ID_HP, 0x0700)
6980 },
6981 {
6982 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6983 PCI_VENDOR_ID_HP, 0x0701)
6c223761
KB
6984 },
6985 {
6986 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6987 PCI_VENDOR_ID_HP, 0x1001)
6988 },
6989 {
6990 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6991 PCI_VENDOR_ID_HP, 0x1100)
6992 },
6993 {
6994 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6995 PCI_VENDOR_ID_HP, 0x1101)
6996 },
6997 {
6998 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6999 PCI_VENDOR_ID_HP, 0x1102)
7000 },
7001 {
7002 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7003 PCI_VENDOR_ID_HP, 0x1150)
7004 },
7005 {
7006 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7007 PCI_ANY_ID, PCI_ANY_ID)
7008 },
7009 { 0 }
7010};
7011
7012MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
7013
7014static struct pci_driver pqi_pci_driver = {
7015 .name = DRIVER_NAME_SHORT,
7016 .id_table = pqi_pci_id_table,
7017 .probe = pqi_pci_probe,
7018 .remove = pqi_pci_remove,
7019 .shutdown = pqi_shutdown,
061ef06a
KB
7020#if defined(CONFIG_PM)
7021 .suspend = pqi_suspend,
7022 .resume = pqi_resume,
7023#endif
6c223761
KB
7024};
7025
7026static int __init pqi_init(void)
7027{
7028 int rc;
7029
7030 pr_info(DRIVER_NAME "\n");
7031
7032 pqi_sas_transport_template =
7033 sas_attach_transport(&pqi_sas_transport_functions);
7034 if (!pqi_sas_transport_template)
7035 return -ENODEV;
7036
3c50976f
KB
7037 pqi_process_module_params();
7038
6c223761
KB
7039 rc = pci_register_driver(&pqi_pci_driver);
7040 if (rc)
7041 sas_release_transport(pqi_sas_transport_template);
7042
7043 return rc;
7044}
7045
7046static void __exit pqi_cleanup(void)
7047{
7048 pci_unregister_driver(&pqi_pci_driver);
7049 sas_release_transport(pqi_sas_transport_template);
7050}
7051
7052module_init(pqi_init);
7053module_exit(pqi_cleanup);
7054
7055static void __attribute__((unused)) verify_structures(void)
7056{
7057 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7058 sis_host_to_ctrl_doorbell) != 0x20);
7059 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7060 sis_interrupt_mask) != 0x34);
7061 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7062 sis_ctrl_to_host_doorbell) != 0x9c);
7063 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7064 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
7065 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7066 sis_driver_scratch) != 0xb0);
6c223761
KB
7067 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7068 sis_firmware_status) != 0xbc);
7069 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7070 sis_mailbox) != 0x1000);
7071 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
7072 pqi_registers) != 0x4000);
7073
7074 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7075 iu_type) != 0x0);
7076 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7077 iu_length) != 0x2);
7078 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7079 response_queue_id) != 0x4);
7080 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
7081 work_area) != 0x6);
7082 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
7083
7084 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7085 status) != 0x0);
7086 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7087 service_response) != 0x1);
7088 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7089 data_present) != 0x2);
7090 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7091 reserved) != 0x3);
7092 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7093 residual_count) != 0x4);
7094 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7095 data_length) != 0x8);
7096 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7097 reserved1) != 0xa);
7098 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
7099 data) != 0xc);
7100 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
7101
7102 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7103 data_in_result) != 0x0);
7104 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7105 data_out_result) != 0x1);
7106 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7107 reserved) != 0x2);
7108 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7109 status) != 0x5);
7110 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7111 status_qualifier) != 0x6);
7112 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7113 sense_data_length) != 0x8);
7114 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7115 response_data_length) != 0xa);
7116 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7117 data_in_transferred) != 0xc);
7118 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7119 data_out_transferred) != 0x10);
7120 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
7121 data) != 0x14);
7122 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
7123
7124 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7125 signature) != 0x0);
7126 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7127 function_and_status_code) != 0x8);
7128 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7129 max_admin_iq_elements) != 0x10);
7130 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7131 max_admin_oq_elements) != 0x11);
7132 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7133 admin_iq_element_length) != 0x12);
7134 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7135 admin_oq_element_length) != 0x13);
7136 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7137 max_reset_timeout) != 0x14);
7138 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7139 legacy_intx_status) != 0x18);
7140 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7141 legacy_intx_mask_set) != 0x1c);
7142 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7143 legacy_intx_mask_clear) != 0x20);
7144 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7145 device_status) != 0x40);
7146 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7147 admin_iq_pi_offset) != 0x48);
7148 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7149 admin_oq_ci_offset) != 0x50);
7150 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7151 admin_iq_element_array_addr) != 0x58);
7152 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7153 admin_oq_element_array_addr) != 0x60);
7154 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7155 admin_iq_ci_addr) != 0x68);
7156 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7157 admin_oq_pi_addr) != 0x70);
7158 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7159 admin_iq_num_elements) != 0x78);
7160 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7161 admin_oq_num_elements) != 0x79);
7162 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7163 admin_queue_int_msg_num) != 0x7a);
7164 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7165 device_error) != 0x80);
7166 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7167 error_details) != 0x88);
7168 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7169 device_reset) != 0x90);
7170 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
7171 power_action) != 0x94);
7172 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
7173
7174 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7175 header.iu_type) != 0);
7176 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7177 header.iu_length) != 2);
7178 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7179 header.work_area) != 6);
7180 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7181 request_id) != 8);
7182 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7183 function_code) != 10);
7184 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7185 data.report_device_capability.buffer_length) != 44);
7186 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7187 data.report_device_capability.sg_descriptor) != 48);
7188 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7189 data.create_operational_iq.queue_id) != 12);
7190 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7191 data.create_operational_iq.element_array_addr) != 16);
7192 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7193 data.create_operational_iq.ci_addr) != 24);
7194 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7195 data.create_operational_iq.num_elements) != 32);
7196 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7197 data.create_operational_iq.element_length) != 34);
7198 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7199 data.create_operational_iq.queue_protocol) != 36);
7200 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7201 data.create_operational_oq.queue_id) != 12);
7202 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7203 data.create_operational_oq.element_array_addr) != 16);
7204 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7205 data.create_operational_oq.pi_addr) != 24);
7206 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7207 data.create_operational_oq.num_elements) != 32);
7208 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7209 data.create_operational_oq.element_length) != 34);
7210 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7211 data.create_operational_oq.queue_protocol) != 36);
7212 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7213 data.create_operational_oq.int_msg_num) != 40);
7214 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7215 data.create_operational_oq.coalescing_count) != 42);
7216 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7217 data.create_operational_oq.min_coalescing_time) != 44);
7218 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7219 data.create_operational_oq.max_coalescing_time) != 48);
7220 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
7221 data.delete_operational_queue.queue_id) != 12);
7222 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
7223 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7224 data.create_operational_iq) != 64 - 11);
7225 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7226 data.create_operational_oq) != 64 - 11);
7227 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
7228 data.delete_operational_queue) != 64 - 11);
7229
7230 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7231 header.iu_type) != 0);
7232 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7233 header.iu_length) != 2);
7234 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7235 header.work_area) != 6);
7236 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7237 request_id) != 8);
7238 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7239 function_code) != 10);
7240 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7241 status) != 11);
7242 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7243 data.create_operational_iq.status_descriptor) != 12);
7244 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7245 data.create_operational_iq.iq_pi_offset) != 16);
7246 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7247 data.create_operational_oq.status_descriptor) != 12);
7248 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
7249 data.create_operational_oq.oq_ci_offset) != 16);
7250 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
7251
7252 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7253 header.iu_type) != 0);
7254 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7255 header.iu_length) != 2);
7256 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7257 header.response_queue_id) != 4);
7258 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7259 header.work_area) != 6);
7260 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7261 request_id) != 8);
7262 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7263 nexus_id) != 10);
7264 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7265 buffer_length) != 12);
7266 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7267 lun_number) != 16);
7268 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7269 protocol_specific) != 24);
7270 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7271 error_index) != 27);
7272 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7273 cdb) != 32);
7274 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
7275 sg_descriptors) != 64);
7276 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
7277 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7278
7279 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7280 header.iu_type) != 0);
7281 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7282 header.iu_length) != 2);
7283 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7284 header.response_queue_id) != 4);
7285 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7286 header.work_area) != 6);
7287 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7288 request_id) != 8);
7289 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7290 nexus_id) != 12);
7291 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7292 buffer_length) != 16);
7293 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7294 data_encryption_key_index) != 22);
7295 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7296 encrypt_tweak_lower) != 24);
7297 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7298 encrypt_tweak_upper) != 28);
7299 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7300 cdb) != 32);
7301 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7302 error_index) != 48);
7303 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7304 num_sg_descriptors) != 50);
7305 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7306 cdb_length) != 51);
7307 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7308 lun_number) != 52);
7309 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
7310 sg_descriptors) != 64);
7311 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
7312 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
7313
7314 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7315 header.iu_type) != 0);
7316 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7317 header.iu_length) != 2);
7318 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7319 request_id) != 8);
7320 BUILD_BUG_ON(offsetof(struct pqi_io_response,
7321 error_index) != 10);
7322
7323 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7324 header.iu_type) != 0);
7325 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7326 header.iu_length) != 2);
7327 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7328 header.response_queue_id) != 4);
7329 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7330 request_id) != 8);
7331 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7332 data.report_event_configuration.buffer_length) != 12);
7333 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7334 data.report_event_configuration.sg_descriptors) != 16);
7335 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7336 data.set_event_configuration.global_event_oq_id) != 10);
7337 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7338 data.set_event_configuration.buffer_length) != 12);
7339 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
7340 data.set_event_configuration.sg_descriptors) != 16);
7341
7342 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7343 max_inbound_iu_length) != 6);
7344 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
7345 max_outbound_iu_length) != 14);
7346 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
7347
7348 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7349 data_length) != 0);
7350 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7351 iq_arbitration_priority_support_bitmask) != 8);
7352 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7353 maximum_aw_a) != 9);
7354 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7355 maximum_aw_b) != 10);
7356 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7357 maximum_aw_c) != 11);
7358 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7359 max_inbound_queues) != 16);
7360 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7361 max_elements_per_iq) != 18);
7362 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7363 max_iq_element_length) != 24);
7364 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7365 min_iq_element_length) != 26);
7366 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7367 max_outbound_queues) != 30);
7368 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7369 max_elements_per_oq) != 32);
7370 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7371 intr_coalescing_time_granularity) != 34);
7372 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7373 max_oq_element_length) != 36);
7374 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7375 min_oq_element_length) != 38);
7376 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
7377 iu_layer_descriptors) != 64);
7378 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
7379
7380 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7381 event_type) != 0);
7382 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
7383 oq_id) != 2);
7384 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
7385
7386 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7387 num_event_descriptors) != 2);
7388 BUILD_BUG_ON(offsetof(struct pqi_event_config,
7389 descriptors) != 4);
7390
061ef06a
KB
7391 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
7392 ARRAY_SIZE(pqi_supported_event_types));
7393
6c223761
KB
7394 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7395 header.iu_type) != 0);
7396 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7397 header.iu_length) != 2);
7398 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7399 event_type) != 8);
7400 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7401 event_id) != 10);
7402 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7403 additional_event_id) != 12);
7404 BUILD_BUG_ON(offsetof(struct pqi_event_response,
7405 data) != 16);
7406 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
7407
7408 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7409 header.iu_type) != 0);
7410 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7411 header.iu_length) != 2);
7412 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7413 event_type) != 8);
7414 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7415 event_id) != 10);
7416 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7417 additional_event_id) != 12);
7418 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
7419
7420 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7421 header.iu_type) != 0);
7422 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7423 header.iu_length) != 2);
7424 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7425 request_id) != 8);
7426 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7427 nexus_id) != 10);
7428 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7429 lun_number) != 16);
7430 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7431 protocol_specific) != 24);
7432 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7433 outbound_queue_id_to_manage) != 26);
7434 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7435 request_id_to_manage) != 28);
7436 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7437 task_management_function) != 30);
7438 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
7439
7440 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7441 header.iu_type) != 0);
7442 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7443 header.iu_length) != 2);
7444 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7445 request_id) != 8);
7446 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7447 nexus_id) != 10);
7448 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7449 additional_response_info) != 12);
7450 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7451 response_code) != 15);
7452 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
7453
7454 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7455 configured_logical_drive_count) != 0);
7456 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7457 configuration_signature) != 1);
7458 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7459 firmware_version) != 5);
7460 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7461 extended_logical_unit_count) != 154);
7462 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7463 firmware_build_number) != 190);
7464 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7465 controller_mode) != 292);
7466
1be42f46
KB
7467 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7468 phys_bay_in_box) != 115);
7469 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7470 device_type) != 120);
7471 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7472 redundant_path_present_map) != 1736);
7473 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7474 active_path_number) != 1738);
7475 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7476 alternate_paths_phys_connector) != 1739);
7477 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7478 alternate_paths_phys_box_on_port) != 1755);
7479 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7480 current_queue_depth_limit) != 1796);
7481 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
7482
6c223761
KB
7483 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
7484 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
7485 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
7486 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7487 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
7488 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7489 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
7490 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
7491 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7492 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
7493 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
7494 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7495
7496 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
d727a776
KB
7497 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
7498 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
6c223761 7499}