scsi: smartpqi: remove qdepth calculations for logical volumes
[linux-2.6-block.git] / drivers / scsi / smartpqi / smartpqi_init.c
CommitLineData
6c223761
KB
1/*
2 * driver for Microsemi PQI-based storage controllers
b805dbfe 3 * Copyright (c) 2016-2017 Microsemi Corporation
6c223761
KB
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h>
52198226 28#include <linux/blk-mq-pci.h>
6c223761
KB
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_transport_sas.h>
34#include <asm/unaligned.h>
35#include "smartpqi.h"
36#include "smartpqi_sis.h"
37
38#if !defined(BUILD_TIMESTAMP)
39#define BUILD_TIMESTAMP
40#endif
41
699bed75 42#define DRIVER_VERSION "0.9.13-370"
6c223761
KB
43#define DRIVER_MAJOR 0
44#define DRIVER_MINOR 9
699bed75
KB
45#define DRIVER_RELEASE 13
46#define DRIVER_REVISION 370
6c223761
KB
47
48#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
49#define DRIVER_NAME_SHORT "smartpqi"
50
e1d213bd
KB
51#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
52
6c223761
KB
53MODULE_AUTHOR("Microsemi");
54MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
55 DRIVER_VERSION);
56MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
57MODULE_VERSION(DRIVER_VERSION);
58MODULE_LICENSE("GPL");
59
6c223761
KB
60static char *hpe_branded_controller = "HPE Smart Array Controller";
61static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
62
63static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
64static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
65static void pqi_scan_start(struct Scsi_Host *shost);
66static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
67 struct pqi_queue_group *queue_group, enum pqi_io_path path,
68 struct pqi_io_request *io_request);
69static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
70 struct pqi_iu_header *request, unsigned int flags,
71 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
72static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
73 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
74 unsigned int cdb_length, struct pqi_queue_group *queue_group,
75 struct pqi_encryption_info *encryption_info);
76
77/* for flags argument to pqi_submit_raid_request_synchronous() */
78#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
79
80static struct scsi_transport_template *pqi_sas_transport_template;
81
82static atomic_t pqi_controller_count = ATOMIC_INIT(0);
83
6a50d6ad
KB
84static unsigned int pqi_supported_event_types[] = {
85 PQI_EVENT_TYPE_HOTPLUG,
86 PQI_EVENT_TYPE_HARDWARE,
87 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
88 PQI_EVENT_TYPE_LOGICAL_DEVICE,
89 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
90 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
91};
92
6c223761
KB
93static int pqi_disable_device_id_wildcards;
94module_param_named(disable_device_id_wildcards,
cbe0c7b1 95 pqi_disable_device_id_wildcards, int, 0644);
6c223761
KB
96MODULE_PARM_DESC(disable_device_id_wildcards,
97 "Disable device ID wildcards.");
98
99static char *raid_levels[] = {
100 "RAID-0",
101 "RAID-4",
102 "RAID-1(1+0)",
103 "RAID-5",
104 "RAID-5+1",
105 "RAID-ADG",
106 "RAID-1(ADM)",
107};
108
109static char *pqi_raid_level_to_string(u8 raid_level)
110{
111 if (raid_level < ARRAY_SIZE(raid_levels))
112 return raid_levels[raid_level];
113
114 return "";
115}
116
117#define SA_RAID_0 0
118#define SA_RAID_4 1
119#define SA_RAID_1 2 /* also used for RAID 10 */
120#define SA_RAID_5 3 /* also used for RAID 50 */
121#define SA_RAID_51 4
122#define SA_RAID_6 5 /* also used for RAID 60 */
123#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
124#define SA_RAID_MAX SA_RAID_ADM
125#define SA_RAID_UNKNOWN 0xff
126
127static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
128{
7561a7e4 129 pqi_prep_for_scsi_done(scmd);
6c223761
KB
130 scmd->scsi_done(scmd);
131}
132
133static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
134{
135 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
136}
137
138static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
139{
140 void *hostdata = shost_priv(shost);
141
142 return *((struct pqi_ctrl_info **)hostdata);
143}
144
145static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
146{
147 return !device->is_physical_device;
148}
149
bd10cf0b
KB
150static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
151{
152 return scsi3addr[2] != 0;
153}
154
6c223761
KB
155static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
156{
157 return !ctrl_info->controller_online;
158}
159
160static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
161{
162 if (ctrl_info->controller_online)
163 if (!sis_is_firmware_running(ctrl_info))
164 pqi_take_ctrl_offline(ctrl_info);
165}
166
167static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
168{
169 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
170}
171
ff6abb73
KB
172static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
173 struct pqi_ctrl_info *ctrl_info)
174{
175 return sis_read_driver_scratch(ctrl_info);
176}
177
178static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
179 enum pqi_ctrl_mode mode)
180{
181 sis_write_driver_scratch(ctrl_info, mode);
182}
183
7561a7e4
KB
184#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
185static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
186{
187 ctrl_info->block_requests = true;
188 scsi_block_requests(ctrl_info->scsi_host);
189}
190
191static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
192{
193 ctrl_info->block_requests = false;
194 wake_up_all(&ctrl_info->block_requests_wait);
195 scsi_unblock_requests(ctrl_info->scsi_host);
196}
197
198static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
199{
200 return ctrl_info->block_requests;
201}
202
203static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
204 unsigned long timeout_msecs)
205{
206 unsigned long remaining_msecs;
207
208 if (!pqi_ctrl_blocked(ctrl_info))
209 return timeout_msecs;
210
211 atomic_inc(&ctrl_info->num_blocked_threads);
212
213 if (timeout_msecs == NO_TIMEOUT) {
214 wait_event(ctrl_info->block_requests_wait,
215 !pqi_ctrl_blocked(ctrl_info));
216 remaining_msecs = timeout_msecs;
217 } else {
218 unsigned long remaining_jiffies;
219
220 remaining_jiffies =
221 wait_event_timeout(ctrl_info->block_requests_wait,
222 !pqi_ctrl_blocked(ctrl_info),
223 msecs_to_jiffies(timeout_msecs));
224 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
225 }
226
227 atomic_dec(&ctrl_info->num_blocked_threads);
228
229 return remaining_msecs;
230}
231
232static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
233{
234 atomic_inc(&ctrl_info->num_busy_threads);
235}
236
237static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
238{
239 atomic_dec(&ctrl_info->num_busy_threads);
240}
241
242static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
243{
244 while (atomic_read(&ctrl_info->num_busy_threads) >
245 atomic_read(&ctrl_info->num_blocked_threads))
246 usleep_range(1000, 2000);
247}
248
249static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
250{
251 device->in_reset = true;
252}
253
254static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
255{
256 device->in_reset = false;
257}
258
259static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
260{
261 return device->in_reset;
262}
6c223761
KB
263
264static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
265{
266 schedule_delayed_work(&ctrl_info->rescan_work,
267 PQI_RESCAN_WORK_INTERVAL);
268}
269
061ef06a
KB
270static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
271{
272 cancel_delayed_work_sync(&ctrl_info->rescan_work);
273}
274
98f87667
KB
275static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
276{
277 if (!ctrl_info->heartbeat_counter)
278 return 0;
279
280 return readl(ctrl_info->heartbeat_counter);
281}
282
6c223761
KB
283static int pqi_map_single(struct pci_dev *pci_dev,
284 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
285 size_t buffer_length, int data_direction)
286{
287 dma_addr_t bus_address;
288
289 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
290 return 0;
291
292 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
293 data_direction);
294 if (pci_dma_mapping_error(pci_dev, bus_address))
295 return -ENOMEM;
296
297 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
298 put_unaligned_le32(buffer_length, &sg_descriptor->length);
299 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
300
301 return 0;
302}
303
304static void pqi_pci_unmap(struct pci_dev *pci_dev,
305 struct pqi_sg_descriptor *descriptors, int num_descriptors,
306 int data_direction)
307{
308 int i;
309
310 if (data_direction == PCI_DMA_NONE)
311 return;
312
313 for (i = 0; i < num_descriptors; i++)
314 pci_unmap_single(pci_dev,
315 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
316 get_unaligned_le32(&descriptors[i].length),
317 data_direction);
318}
319
320static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
321 struct pqi_raid_path_request *request, u8 cmd,
322 u8 *scsi3addr, void *buffer, size_t buffer_length,
323 u16 vpd_page, int *pci_direction)
324{
325 u8 *cdb;
326 int pci_dir;
327
328 memset(request, 0, sizeof(*request));
329
330 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
331 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
332 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
333 &request->header.iu_length);
334 put_unaligned_le32(buffer_length, &request->buffer_length);
335 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
336 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
337 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
338
339 cdb = request->cdb;
340
341 switch (cmd) {
342 case INQUIRY:
343 request->data_direction = SOP_READ_FLAG;
344 cdb[0] = INQUIRY;
345 if (vpd_page & VPD_PAGE) {
346 cdb[1] = 0x1;
347 cdb[2] = (u8)vpd_page;
348 }
349 cdb[4] = (u8)buffer_length;
350 break;
351 case CISS_REPORT_LOG:
352 case CISS_REPORT_PHYS:
353 request->data_direction = SOP_READ_FLAG;
354 cdb[0] = cmd;
355 if (cmd == CISS_REPORT_PHYS)
356 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
357 else
358 cdb[1] = CISS_REPORT_LOG_EXTENDED;
359 put_unaligned_be32(buffer_length, &cdb[6]);
360 break;
361 case CISS_GET_RAID_MAP:
362 request->data_direction = SOP_READ_FLAG;
363 cdb[0] = CISS_READ;
364 cdb[1] = CISS_GET_RAID_MAP;
365 put_unaligned_be32(buffer_length, &cdb[6]);
366 break;
367 case SA_CACHE_FLUSH:
368 request->data_direction = SOP_WRITE_FLAG;
369 cdb[0] = BMIC_WRITE;
370 cdb[6] = BMIC_CACHE_FLUSH;
371 put_unaligned_be16(buffer_length, &cdb[7]);
372 break;
373 case BMIC_IDENTIFY_CONTROLLER:
374 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
375 request->data_direction = SOP_READ_FLAG;
376 cdb[0] = BMIC_READ;
377 cdb[6] = cmd;
378 put_unaligned_be16(buffer_length, &cdb[7]);
379 break;
380 case BMIC_WRITE_HOST_WELLNESS:
381 request->data_direction = SOP_WRITE_FLAG;
382 cdb[0] = BMIC_WRITE;
383 cdb[6] = cmd;
384 put_unaligned_be16(buffer_length, &cdb[7]);
385 break;
386 default:
387 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
388 cmd);
6c223761
KB
389 break;
390 }
391
392 switch (request->data_direction) {
393 case SOP_READ_FLAG:
394 pci_dir = PCI_DMA_FROMDEVICE;
395 break;
396 case SOP_WRITE_FLAG:
397 pci_dir = PCI_DMA_TODEVICE;
398 break;
399 case SOP_NO_DIRECTION_FLAG:
400 pci_dir = PCI_DMA_NONE;
401 break;
402 default:
403 pci_dir = PCI_DMA_BIDIRECTIONAL;
404 break;
405 }
406
407 *pci_direction = pci_dir;
408
409 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
410 buffer, buffer_length, pci_dir);
411}
412
413static struct pqi_io_request *pqi_alloc_io_request(
414 struct pqi_ctrl_info *ctrl_info)
415{
416 struct pqi_io_request *io_request;
417 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
418
419 while (1) {
420 io_request = &ctrl_info->io_request_pool[i];
421 if (atomic_inc_return(&io_request->refcount) == 1)
422 break;
423 atomic_dec(&io_request->refcount);
424 i = (i + 1) % ctrl_info->max_io_slots;
425 }
426
427 /* benignly racy */
428 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
429
430 io_request->scmd = NULL;
431 io_request->status = 0;
432 io_request->error_info = NULL;
433
434 return io_request;
435}
436
437static void pqi_free_io_request(struct pqi_io_request *io_request)
438{
439 atomic_dec(&io_request->refcount);
440}
441
442static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
443 struct bmic_identify_controller *buffer)
444{
445 int rc;
446 int pci_direction;
447 struct pqi_raid_path_request request;
448
449 rc = pqi_build_raid_path_request(ctrl_info, &request,
450 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
451 sizeof(*buffer), 0, &pci_direction);
452 if (rc)
453 return rc;
454
455 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
456 NULL, NO_TIMEOUT);
457
458 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
459 pci_direction);
460
461 return rc;
462}
463
464static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
465 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
466{
467 int rc;
468 int pci_direction;
469 struct pqi_raid_path_request request;
470
471 rc = pqi_build_raid_path_request(ctrl_info, &request,
472 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
473 &pci_direction);
474 if (rc)
475 return rc;
476
477 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
478 NULL, NO_TIMEOUT);
479
480 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
481 pci_direction);
482
483 return rc;
484}
485
486static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
487 struct pqi_scsi_dev *device,
488 struct bmic_identify_physical_device *buffer,
489 size_t buffer_length)
490{
491 int rc;
492 int pci_direction;
493 u16 bmic_device_index;
494 struct pqi_raid_path_request request;
495
496 rc = pqi_build_raid_path_request(ctrl_info, &request,
497 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
498 buffer_length, 0, &pci_direction);
499 if (rc)
500 return rc;
501
502 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
503 request.cdb[2] = (u8)bmic_device_index;
504 request.cdb[9] = (u8)(bmic_device_index >> 8);
505
506 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
507 0, NULL, NO_TIMEOUT);
508
509 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
510 pci_direction);
511
512 return rc;
513}
514
515#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
6c223761
KB
516
517static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
518{
519 int rc;
520 struct pqi_raid_path_request request;
521 int pci_direction;
522 u8 *buffer;
523
524 /*
525 * Don't bother trying to flush the cache if the controller is
526 * locked up.
527 */
528 if (pqi_ctrl_offline(ctrl_info))
529 return -ENXIO;
530
531 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
532 if (!buffer)
533 return -ENOMEM;
534
535 rc = pqi_build_raid_path_request(ctrl_info, &request,
536 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
537 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
538 if (rc)
539 goto out;
540
541 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
d48f8fad 542 0, NULL, NO_TIMEOUT);
6c223761
KB
543
544 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
545 pci_direction);
546
547out:
548 kfree(buffer);
549
550 return rc;
551}
552
553static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
554 void *buffer, size_t buffer_length)
555{
556 int rc;
557 struct pqi_raid_path_request request;
558 int pci_direction;
559
560 rc = pqi_build_raid_path_request(ctrl_info, &request,
561 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
562 buffer_length, 0, &pci_direction);
563 if (rc)
564 return rc;
565
566 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
567 0, NULL, NO_TIMEOUT);
568
569 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
570 pci_direction);
571
572 return rc;
573}
574
575#pragma pack(1)
576
577struct bmic_host_wellness_driver_version {
578 u8 start_tag[4];
579 u8 driver_version_tag[2];
580 __le16 driver_version_length;
581 char driver_version[32];
582 u8 end_tag[2];
583};
584
585#pragma pack()
586
587static int pqi_write_driver_version_to_host_wellness(
588 struct pqi_ctrl_info *ctrl_info)
589{
590 int rc;
591 struct bmic_host_wellness_driver_version *buffer;
592 size_t buffer_length;
593
594 buffer_length = sizeof(*buffer);
595
596 buffer = kmalloc(buffer_length, GFP_KERNEL);
597 if (!buffer)
598 return -ENOMEM;
599
600 buffer->start_tag[0] = '<';
601 buffer->start_tag[1] = 'H';
602 buffer->start_tag[2] = 'W';
603 buffer->start_tag[3] = '>';
604 buffer->driver_version_tag[0] = 'D';
605 buffer->driver_version_tag[1] = 'V';
606 put_unaligned_le16(sizeof(buffer->driver_version),
607 &buffer->driver_version_length);
061ef06a 608 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
6c223761
KB
609 sizeof(buffer->driver_version) - 1);
610 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
611 buffer->end_tag[0] = 'Z';
612 buffer->end_tag[1] = 'Z';
613
614 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
615
616 kfree(buffer);
617
618 return rc;
619}
620
621#pragma pack(1)
622
623struct bmic_host_wellness_time {
624 u8 start_tag[4];
625 u8 time_tag[2];
626 __le16 time_length;
627 u8 time[8];
628 u8 dont_write_tag[2];
629 u8 end_tag[2];
630};
631
632#pragma pack()
633
634static int pqi_write_current_time_to_host_wellness(
635 struct pqi_ctrl_info *ctrl_info)
636{
637 int rc;
638 struct bmic_host_wellness_time *buffer;
639 size_t buffer_length;
640 time64_t local_time;
641 unsigned int year;
ed10858e 642 struct tm tm;
6c223761
KB
643
644 buffer_length = sizeof(*buffer);
645
646 buffer = kmalloc(buffer_length, GFP_KERNEL);
647 if (!buffer)
648 return -ENOMEM;
649
650 buffer->start_tag[0] = '<';
651 buffer->start_tag[1] = 'H';
652 buffer->start_tag[2] = 'W';
653 buffer->start_tag[3] = '>';
654 buffer->time_tag[0] = 'T';
655 buffer->time_tag[1] = 'D';
656 put_unaligned_le16(sizeof(buffer->time),
657 &buffer->time_length);
658
ed10858e
AB
659 local_time = ktime_get_real_seconds();
660 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
6c223761
KB
661 year = tm.tm_year + 1900;
662
663 buffer->time[0] = bin2bcd(tm.tm_hour);
664 buffer->time[1] = bin2bcd(tm.tm_min);
665 buffer->time[2] = bin2bcd(tm.tm_sec);
666 buffer->time[3] = 0;
667 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
668 buffer->time[5] = bin2bcd(tm.tm_mday);
669 buffer->time[6] = bin2bcd(year / 100);
670 buffer->time[7] = bin2bcd(year % 100);
671
672 buffer->dont_write_tag[0] = 'D';
673 buffer->dont_write_tag[1] = 'W';
674 buffer->end_tag[0] = 'Z';
675 buffer->end_tag[1] = 'Z';
676
677 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
678
679 kfree(buffer);
680
681 return rc;
682}
683
684#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
685
686static void pqi_update_time_worker(struct work_struct *work)
687{
688 int rc;
689 struct pqi_ctrl_info *ctrl_info;
690
691 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
692 update_time_work);
693
6c223761
KB
694 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
695 if (rc)
696 dev_warn(&ctrl_info->pci_dev->dev,
697 "error updating time on controller\n");
698
699 schedule_delayed_work(&ctrl_info->update_time_work,
700 PQI_UPDATE_TIME_WORK_INTERVAL);
701}
702
703static inline void pqi_schedule_update_time_worker(
4fbebf1a 704 struct pqi_ctrl_info *ctrl_info)
6c223761 705{
061ef06a
KB
706 if (ctrl_info->update_time_worker_scheduled)
707 return;
708
4fbebf1a 709 schedule_delayed_work(&ctrl_info->update_time_work, 0);
061ef06a
KB
710 ctrl_info->update_time_worker_scheduled = true;
711}
712
713static inline void pqi_cancel_update_time_worker(
714 struct pqi_ctrl_info *ctrl_info)
715{
716 if (!ctrl_info->update_time_worker_scheduled)
717 return;
718
719 cancel_delayed_work_sync(&ctrl_info->update_time_work);
720 ctrl_info->update_time_worker_scheduled = false;
6c223761
KB
721}
722
723static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
724 void *buffer, size_t buffer_length)
725{
726 int rc;
727 int pci_direction;
728 struct pqi_raid_path_request request;
729
730 rc = pqi_build_raid_path_request(ctrl_info, &request,
731 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
732 if (rc)
733 return rc;
734
735 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
736 NULL, NO_TIMEOUT);
737
738 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
739 pci_direction);
740
741 return rc;
742}
743
744static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
745 void **buffer)
746{
747 int rc;
748 size_t lun_list_length;
749 size_t lun_data_length;
750 size_t new_lun_list_length;
751 void *lun_data = NULL;
752 struct report_lun_header *report_lun_header;
753
754 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
755 if (!report_lun_header) {
756 rc = -ENOMEM;
757 goto out;
758 }
759
760 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
761 sizeof(*report_lun_header));
762 if (rc)
763 goto out;
764
765 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
766
767again:
768 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
769
770 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
771 if (!lun_data) {
772 rc = -ENOMEM;
773 goto out;
774 }
775
776 if (lun_list_length == 0) {
777 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
778 goto out;
779 }
780
781 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
782 if (rc)
783 goto out;
784
785 new_lun_list_length = get_unaligned_be32(
786 &((struct report_lun_header *)lun_data)->list_length);
787
788 if (new_lun_list_length > lun_list_length) {
789 lun_list_length = new_lun_list_length;
790 kfree(lun_data);
791 goto again;
792 }
793
794out:
795 kfree(report_lun_header);
796
797 if (rc) {
798 kfree(lun_data);
799 lun_data = NULL;
800 }
801
802 *buffer = lun_data;
803
804 return rc;
805}
806
807static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
808 void **buffer)
809{
810 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
811 buffer);
812}
813
814static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
815 void **buffer)
816{
817 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
818}
819
820static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
821 struct report_phys_lun_extended **physdev_list,
822 struct report_log_lun_extended **logdev_list)
823{
824 int rc;
825 size_t logdev_list_length;
826 size_t logdev_data_length;
827 struct report_log_lun_extended *internal_logdev_list;
828 struct report_log_lun_extended *logdev_data;
829 struct report_lun_header report_lun_header;
830
831 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
832 if (rc)
833 dev_err(&ctrl_info->pci_dev->dev,
834 "report physical LUNs failed\n");
835
836 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
837 if (rc)
838 dev_err(&ctrl_info->pci_dev->dev,
839 "report logical LUNs failed\n");
840
841 /*
842 * Tack the controller itself onto the end of the logical device list.
843 */
844
845 logdev_data = *logdev_list;
846
847 if (logdev_data) {
848 logdev_list_length =
849 get_unaligned_be32(&logdev_data->header.list_length);
850 } else {
851 memset(&report_lun_header, 0, sizeof(report_lun_header));
852 logdev_data =
853 (struct report_log_lun_extended *)&report_lun_header;
854 logdev_list_length = 0;
855 }
856
857 logdev_data_length = sizeof(struct report_lun_header) +
858 logdev_list_length;
859
860 internal_logdev_list = kmalloc(logdev_data_length +
861 sizeof(struct report_log_lun_extended), GFP_KERNEL);
862 if (!internal_logdev_list) {
863 kfree(*logdev_list);
864 *logdev_list = NULL;
865 return -ENOMEM;
866 }
867
868 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
869 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
870 sizeof(struct report_log_lun_extended_entry));
871 put_unaligned_be32(logdev_list_length +
872 sizeof(struct report_log_lun_extended_entry),
873 &internal_logdev_list->header.list_length);
874
875 kfree(*logdev_list);
876 *logdev_list = internal_logdev_list;
877
878 return 0;
879}
880
881static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
882 int bus, int target, int lun)
883{
884 device->bus = bus;
885 device->target = target;
886 device->lun = lun;
887}
888
889static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
890{
891 u8 *scsi3addr;
892 u32 lunid;
bd10cf0b
KB
893 int bus;
894 int target;
895 int lun;
6c223761
KB
896
897 scsi3addr = device->scsi3addr;
898 lunid = get_unaligned_le32(scsi3addr);
899
900 if (pqi_is_hba_lunid(scsi3addr)) {
901 /* The specified device is the controller. */
902 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
903 device->target_lun_valid = true;
904 return;
905 }
906
907 if (pqi_is_logical_device(device)) {
bd10cf0b
KB
908 if (device->is_external_raid_device) {
909 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
910 target = (lunid >> 16) & 0x3fff;
911 lun = lunid & 0xff;
912 } else {
913 bus = PQI_RAID_VOLUME_BUS;
914 target = 0;
915 lun = lunid & 0x3fff;
916 }
917 pqi_set_bus_target_lun(device, bus, target, lun);
6c223761
KB
918 device->target_lun_valid = true;
919 return;
920 }
921
922 /*
923 * Defer target and LUN assignment for non-controller physical devices
924 * because the SAS transport layer will make these assignments later.
925 */
926 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
927}
928
929static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
930 struct pqi_scsi_dev *device)
931{
932 int rc;
933 u8 raid_level;
934 u8 *buffer;
935
936 raid_level = SA_RAID_UNKNOWN;
937
938 buffer = kmalloc(64, GFP_KERNEL);
939 if (buffer) {
940 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
941 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
942 if (rc == 0) {
943 raid_level = buffer[8];
944 if (raid_level > SA_RAID_MAX)
945 raid_level = SA_RAID_UNKNOWN;
946 }
947 kfree(buffer);
948 }
949
950 device->raid_level = raid_level;
951}
952
953static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
954 struct pqi_scsi_dev *device, struct raid_map *raid_map)
955{
956 char *err_msg;
957 u32 raid_map_size;
958 u32 r5or6_blocks_per_row;
959 unsigned int num_phys_disks;
960 unsigned int num_raid_map_entries;
961
962 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
963
964 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
965 err_msg = "RAID map too small";
966 goto bad_raid_map;
967 }
968
969 if (raid_map_size > sizeof(*raid_map)) {
970 err_msg = "RAID map too large";
971 goto bad_raid_map;
972 }
973
974 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
975 (get_unaligned_le16(&raid_map->data_disks_per_row) +
976 get_unaligned_le16(&raid_map->metadata_disks_per_row));
977 num_raid_map_entries = num_phys_disks *
978 get_unaligned_le16(&raid_map->row_cnt);
979
980 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
981 err_msg = "invalid number of map entries in RAID map";
982 goto bad_raid_map;
983 }
984
985 if (device->raid_level == SA_RAID_1) {
986 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
987 err_msg = "invalid RAID-1 map";
988 goto bad_raid_map;
989 }
990 } else if (device->raid_level == SA_RAID_ADM) {
991 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
992 err_msg = "invalid RAID-1(ADM) map";
993 goto bad_raid_map;
994 }
995 } else if ((device->raid_level == SA_RAID_5 ||
996 device->raid_level == SA_RAID_6) &&
997 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
998 /* RAID 50/60 */
999 r5or6_blocks_per_row =
1000 get_unaligned_le16(&raid_map->strip_size) *
1001 get_unaligned_le16(&raid_map->data_disks_per_row);
1002 if (r5or6_blocks_per_row == 0) {
1003 err_msg = "invalid RAID-5 or RAID-6 map";
1004 goto bad_raid_map;
1005 }
1006 }
1007
1008 return 0;
1009
1010bad_raid_map:
d87d5474
KB
1011 dev_warn(&ctrl_info->pci_dev->dev,
1012 "scsi %d:%d:%d:%d %s\n",
1013 ctrl_info->scsi_host->host_no,
1014 device->bus, device->target, device->lun, err_msg);
6c223761
KB
1015
1016 return -EINVAL;
1017}
1018
1019static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1020 struct pqi_scsi_dev *device)
1021{
1022 int rc;
1023 int pci_direction;
1024 struct pqi_raid_path_request request;
1025 struct raid_map *raid_map;
1026
1027 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1028 if (!raid_map)
1029 return -ENOMEM;
1030
1031 rc = pqi_build_raid_path_request(ctrl_info, &request,
1032 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1033 sizeof(*raid_map), 0, &pci_direction);
1034 if (rc)
1035 goto error;
1036
1037 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1038 NULL, NO_TIMEOUT);
1039
1040 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1041 pci_direction);
1042
1043 if (rc)
1044 goto error;
1045
1046 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1047 if (rc)
1048 goto error;
1049
1050 device->raid_map = raid_map;
1051
1052 return 0;
1053
1054error:
1055 kfree(raid_map);
1056
1057 return rc;
1058}
1059
1060static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
1061 struct pqi_scsi_dev *device)
1062{
1063 int rc;
1064 u8 *buffer;
1065 u8 offload_status;
1066
1067 buffer = kmalloc(64, GFP_KERNEL);
1068 if (!buffer)
1069 return;
1070
1071 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1072 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
1073 if (rc)
1074 goto out;
1075
1076#define OFFLOAD_STATUS_BYTE 4
1077#define OFFLOAD_CONFIGURED_BIT 0x1
1078#define OFFLOAD_ENABLED_BIT 0x2
1079
1080 offload_status = buffer[OFFLOAD_STATUS_BYTE];
1081 device->offload_configured =
1082 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
1083 if (device->offload_configured) {
1084 device->offload_enabled_pending =
1085 !!(offload_status & OFFLOAD_ENABLED_BIT);
1086 if (pqi_get_raid_map(ctrl_info, device))
1087 device->offload_enabled_pending = false;
1088 }
1089
1090out:
1091 kfree(buffer);
1092}
1093
1094/*
1095 * Use vendor-specific VPD to determine online/offline status of a volume.
1096 */
1097
1098static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1099 struct pqi_scsi_dev *device)
1100{
1101 int rc;
1102 size_t page_length;
1103 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1104 bool volume_offline = true;
1105 u32 volume_flags;
1106 struct ciss_vpd_logical_volume_status *vpd;
1107
1108 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1109 if (!vpd)
1110 goto no_buffer;
1111
1112 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1113 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1114 if (rc)
1115 goto out;
1116
1117 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1118 volume_status) + vpd->page_length;
1119 if (page_length < sizeof(*vpd))
1120 goto out;
1121
1122 volume_status = vpd->volume_status;
1123 volume_flags = get_unaligned_be32(&vpd->flags);
1124 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1125
1126out:
1127 kfree(vpd);
1128no_buffer:
1129 device->volume_status = volume_status;
1130 device->volume_offline = volume_offline;
1131}
1132
1133static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1134 struct pqi_scsi_dev *device)
1135{
1136 int rc;
1137 u8 *buffer;
1138
1139 buffer = kmalloc(64, GFP_KERNEL);
1140 if (!buffer)
1141 return -ENOMEM;
1142
1143 /* Send an inquiry to the device to see what it is. */
1144 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1145 if (rc)
1146 goto out;
1147
1148 scsi_sanitize_inquiry_string(&buffer[8], 8);
1149 scsi_sanitize_inquiry_string(&buffer[16], 16);
1150
1151 device->devtype = buffer[0] & 0x1f;
cbe0c7b1
KB
1152 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1153 memcpy(device->model, &buffer[16], sizeof(device->model));
6c223761
KB
1154
1155 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
bd10cf0b
KB
1156 if (device->is_external_raid_device) {
1157 device->raid_level = SA_RAID_UNKNOWN;
1158 device->volume_status = CISS_LV_OK;
1159 device->volume_offline = false;
1160 } else {
1161 pqi_get_raid_level(ctrl_info, device);
1162 pqi_get_offload_status(ctrl_info, device);
1163 pqi_get_volume_status(ctrl_info, device);
1164 }
6c223761
KB
1165 }
1166
1167out:
1168 kfree(buffer);
1169
1170 return rc;
1171}
1172
1173static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1174 struct pqi_scsi_dev *device,
1175 struct bmic_identify_physical_device *id_phys)
1176{
1177 int rc;
1178
1179 memset(id_phys, 0, sizeof(*id_phys));
1180
1181 rc = pqi_identify_physical_device(ctrl_info, device,
1182 id_phys, sizeof(*id_phys));
1183 if (rc) {
1184 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1185 return;
1186 }
1187
1188 device->queue_depth =
1189 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1190 device->device_type = id_phys->device_type;
1191 device->active_path_index = id_phys->active_path_number;
1192 device->path_map = id_phys->redundant_path_present_map;
1193 memcpy(&device->box,
1194 &id_phys->alternate_paths_phys_box_on_port,
1195 sizeof(device->box));
1196 memcpy(&device->phys_connector,
1197 &id_phys->alternate_paths_phys_connector,
1198 sizeof(device->phys_connector));
1199 device->bay = id_phys->phys_bay_in_box;
1200}
1201
1202static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1203 struct pqi_scsi_dev *device)
1204{
1205 char *status;
1206 static const char unknown_state_str[] =
1207 "Volume is in an unknown state (%u)";
1208 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1209
1210 switch (device->volume_status) {
1211 case CISS_LV_OK:
1212 status = "Volume online";
1213 break;
1214 case CISS_LV_FAILED:
1215 status = "Volume failed";
1216 break;
1217 case CISS_LV_NOT_CONFIGURED:
1218 status = "Volume not configured";
1219 break;
1220 case CISS_LV_DEGRADED:
1221 status = "Volume degraded";
1222 break;
1223 case CISS_LV_READY_FOR_RECOVERY:
1224 status = "Volume ready for recovery operation";
1225 break;
1226 case CISS_LV_UNDERGOING_RECOVERY:
1227 status = "Volume undergoing recovery";
1228 break;
1229 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1230 status = "Wrong physical drive was replaced";
1231 break;
1232 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1233 status = "A physical drive not properly connected";
1234 break;
1235 case CISS_LV_HARDWARE_OVERHEATING:
1236 status = "Hardware is overheating";
1237 break;
1238 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1239 status = "Hardware has overheated";
1240 break;
1241 case CISS_LV_UNDERGOING_EXPANSION:
1242 status = "Volume undergoing expansion";
1243 break;
1244 case CISS_LV_NOT_AVAILABLE:
1245 status = "Volume waiting for transforming volume";
1246 break;
1247 case CISS_LV_QUEUED_FOR_EXPANSION:
1248 status = "Volume queued for expansion";
1249 break;
1250 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1251 status = "Volume disabled due to SCSI ID conflict";
1252 break;
1253 case CISS_LV_EJECTED:
1254 status = "Volume has been ejected";
1255 break;
1256 case CISS_LV_UNDERGOING_ERASE:
1257 status = "Volume undergoing background erase";
1258 break;
1259 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1260 status = "Volume ready for predictive spare rebuild";
1261 break;
1262 case CISS_LV_UNDERGOING_RPI:
1263 status = "Volume undergoing rapid parity initialization";
1264 break;
1265 case CISS_LV_PENDING_RPI:
1266 status = "Volume queued for rapid parity initialization";
1267 break;
1268 case CISS_LV_ENCRYPTED_NO_KEY:
1269 status = "Encrypted volume inaccessible - key not present";
1270 break;
1271 case CISS_LV_UNDERGOING_ENCRYPTION:
1272 status = "Volume undergoing encryption process";
1273 break;
1274 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1275 status = "Volume undergoing encryption re-keying process";
1276 break;
1277 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
d87d5474 1278 status = "Volume encrypted but encryption is disabled";
6c223761
KB
1279 break;
1280 case CISS_LV_PENDING_ENCRYPTION:
1281 status = "Volume pending migration to encrypted state";
1282 break;
1283 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1284 status = "Volume pending encryption rekeying";
1285 break;
1286 case CISS_LV_NOT_SUPPORTED:
1287 status = "Volume not supported on this controller";
1288 break;
1289 case CISS_LV_STATUS_UNAVAILABLE:
1290 status = "Volume status not available";
1291 break;
1292 default:
1293 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1294 unknown_state_str, device->volume_status);
1295 status = unknown_state_buffer;
1296 break;
1297 }
1298
1299 dev_info(&ctrl_info->pci_dev->dev,
1300 "scsi %d:%d:%d:%d %s\n",
1301 ctrl_info->scsi_host->host_no,
1302 device->bus, device->target, device->lun, status);
1303}
1304
6c223761
KB
1305static void pqi_rescan_worker(struct work_struct *work)
1306{
1307 struct pqi_ctrl_info *ctrl_info;
1308
1309 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1310 rescan_work);
1311
1312 pqi_scan_scsi_devices(ctrl_info);
1313}
1314
1315static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1316 struct pqi_scsi_dev *device)
1317{
1318 int rc;
1319
1320 if (pqi_is_logical_device(device))
1321 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1322 device->target, device->lun);
1323 else
1324 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1325
1326 return rc;
1327}
1328
1329static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1330 struct pqi_scsi_dev *device)
1331{
1332 if (pqi_is_logical_device(device))
1333 scsi_remove_device(device->sdev);
1334 else
1335 pqi_remove_sas_device(device);
1336}
1337
1338/* Assumes the SCSI device list lock is held. */
1339
1340static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1341 int bus, int target, int lun)
1342{
1343 struct pqi_scsi_dev *device;
1344
1345 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1346 scsi_device_list_entry)
1347 if (device->bus == bus && device->target == target &&
1348 device->lun == lun)
1349 return device;
1350
1351 return NULL;
1352}
1353
1354static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1355 struct pqi_scsi_dev *dev2)
1356{
1357 if (dev1->is_physical_device != dev2->is_physical_device)
1358 return false;
1359
1360 if (dev1->is_physical_device)
1361 return dev1->wwid == dev2->wwid;
1362
1363 return memcmp(dev1->volume_id, dev2->volume_id,
1364 sizeof(dev1->volume_id)) == 0;
1365}
1366
1367enum pqi_find_result {
1368 DEVICE_NOT_FOUND,
1369 DEVICE_CHANGED,
1370 DEVICE_SAME,
1371};
1372
1373static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1374 struct pqi_scsi_dev *device_to_find,
1375 struct pqi_scsi_dev **matching_device)
1376{
1377 struct pqi_scsi_dev *device;
1378
1379 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1380 scsi_device_list_entry) {
1381 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1382 device->scsi3addr)) {
1383 *matching_device = device;
1384 if (pqi_device_equal(device_to_find, device)) {
1385 if (device_to_find->volume_offline)
1386 return DEVICE_CHANGED;
1387 return DEVICE_SAME;
1388 }
1389 return DEVICE_CHANGED;
1390 }
1391 }
1392
1393 return DEVICE_NOT_FOUND;
1394}
1395
1396static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1397 char *action, struct pqi_scsi_dev *device)
1398{
1399 dev_info(&ctrl_info->pci_dev->dev,
94086f5b 1400 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c qd=%d\n",
6c223761
KB
1401 action,
1402 ctrl_info->scsi_host->host_no,
1403 device->bus,
1404 device->target,
1405 device->lun,
1406 scsi_device_type(device->devtype),
1407 device->vendor,
1408 device->model,
bd10cf0b
KB
1409 pqi_is_logical_device(device) ?
1410 pqi_raid_level_to_string(device->raid_level) : "",
6c223761
KB
1411 device->offload_configured ? '+' : '-',
1412 device->offload_enabled_pending ? '+' : '-',
6c223761
KB
1413 device->queue_depth);
1414}
1415
1416/* Assumes the SCSI device list lock is held. */
1417
1418static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1419 struct pqi_scsi_dev *new_device)
1420{
1421 existing_device->devtype = new_device->devtype;
1422 existing_device->device_type = new_device->device_type;
1423 existing_device->bus = new_device->bus;
1424 if (new_device->target_lun_valid) {
1425 existing_device->target = new_device->target;
1426 existing_device->lun = new_device->lun;
1427 existing_device->target_lun_valid = true;
1428 }
1429
1430 /* By definition, the scsi3addr and wwid fields are already the same. */
1431
1432 existing_device->is_physical_device = new_device->is_physical_device;
bd10cf0b
KB
1433 existing_device->is_external_raid_device =
1434 new_device->is_external_raid_device;
6c223761
KB
1435 existing_device->aio_enabled = new_device->aio_enabled;
1436 memcpy(existing_device->vendor, new_device->vendor,
1437 sizeof(existing_device->vendor));
1438 memcpy(existing_device->model, new_device->model,
1439 sizeof(existing_device->model));
1440 existing_device->sas_address = new_device->sas_address;
1441 existing_device->raid_level = new_device->raid_level;
1442 existing_device->queue_depth = new_device->queue_depth;
1443 existing_device->aio_handle = new_device->aio_handle;
1444 existing_device->volume_status = new_device->volume_status;
1445 existing_device->active_path_index = new_device->active_path_index;
1446 existing_device->path_map = new_device->path_map;
1447 existing_device->bay = new_device->bay;
1448 memcpy(existing_device->box, new_device->box,
1449 sizeof(existing_device->box));
1450 memcpy(existing_device->phys_connector, new_device->phys_connector,
1451 sizeof(existing_device->phys_connector));
1452 existing_device->offload_configured = new_device->offload_configured;
1453 existing_device->offload_enabled = false;
1454 existing_device->offload_enabled_pending =
1455 new_device->offload_enabled_pending;
1456 existing_device->offload_to_mirror = 0;
1457 kfree(existing_device->raid_map);
1458 existing_device->raid_map = new_device->raid_map;
1459
1460 /* To prevent this from being freed later. */
1461 new_device->raid_map = NULL;
1462}
1463
1464static inline void pqi_free_device(struct pqi_scsi_dev *device)
1465{
1466 if (device) {
1467 kfree(device->raid_map);
1468 kfree(device);
1469 }
1470}
1471
1472/*
1473 * Called when exposing a new device to the OS fails in order to re-adjust
1474 * our internal SCSI device list to match the SCSI ML's view.
1475 */
1476
1477static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1478 struct pqi_scsi_dev *device)
1479{
1480 unsigned long flags;
1481
1482 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1483 list_del(&device->scsi_device_list_entry);
1484 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1485
1486 /* Allow the device structure to be freed later. */
1487 device->keep_device = false;
1488}
1489
1490static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1491 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1492{
1493 int rc;
1494 unsigned int i;
1495 unsigned long flags;
1496 enum pqi_find_result find_result;
1497 struct pqi_scsi_dev *device;
1498 struct pqi_scsi_dev *next;
1499 struct pqi_scsi_dev *matching_device;
1500 struct list_head add_list;
1501 struct list_head delete_list;
1502
1503 INIT_LIST_HEAD(&add_list);
1504 INIT_LIST_HEAD(&delete_list);
1505
1506 /*
1507 * The idea here is to do as little work as possible while holding the
1508 * spinlock. That's why we go to great pains to defer anything other
1509 * than updating the internal device list until after we release the
1510 * spinlock.
1511 */
1512
1513 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1514
1515 /* Assume that all devices in the existing list have gone away. */
1516 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1517 scsi_device_list_entry)
1518 device->device_gone = true;
1519
1520 for (i = 0; i < num_new_devices; i++) {
1521 device = new_device_list[i];
1522
1523 find_result = pqi_scsi_find_entry(ctrl_info, device,
1524 &matching_device);
1525
1526 switch (find_result) {
1527 case DEVICE_SAME:
1528 /*
1529 * The newly found device is already in the existing
1530 * device list.
1531 */
1532 device->new_device = false;
1533 matching_device->device_gone = false;
1534 pqi_scsi_update_device(matching_device, device);
1535 break;
1536 case DEVICE_NOT_FOUND:
1537 /*
1538 * The newly found device is NOT in the existing device
1539 * list.
1540 */
1541 device->new_device = true;
1542 break;
1543 case DEVICE_CHANGED:
1544 /*
1545 * The original device has gone away and we need to add
1546 * the new device.
1547 */
1548 device->new_device = true;
1549 break;
6c223761
KB
1550 }
1551 }
1552
1553 /* Process all devices that have gone away. */
1554 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1555 scsi_device_list_entry) {
1556 if (device->device_gone) {
1557 list_del(&device->scsi_device_list_entry);
1558 list_add_tail(&device->delete_list_entry, &delete_list);
1559 }
1560 }
1561
1562 /* Process all new devices. */
1563 for (i = 0; i < num_new_devices; i++) {
1564 device = new_device_list[i];
1565 if (!device->new_device)
1566 continue;
1567 if (device->volume_offline)
1568 continue;
1569 list_add_tail(&device->scsi_device_list_entry,
1570 &ctrl_info->scsi_device_list);
1571 list_add_tail(&device->add_list_entry, &add_list);
1572 /* To prevent this device structure from being freed later. */
1573 device->keep_device = true;
1574 }
1575
6c223761
KB
1576 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1577 scsi_device_list_entry)
1578 device->offload_enabled =
1579 device->offload_enabled_pending;
1580
1581 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1582
1583 /* Remove all devices that have gone away. */
1584 list_for_each_entry_safe(device, next, &delete_list,
1585 delete_list_entry) {
1586 if (device->sdev)
1587 pqi_remove_device(ctrl_info, device);
1588 if (device->volume_offline) {
1589 pqi_dev_info(ctrl_info, "offline", device);
1590 pqi_show_volume_status(ctrl_info, device);
1591 } else {
1592 pqi_dev_info(ctrl_info, "removed", device);
1593 }
1594 list_del(&device->delete_list_entry);
1595 pqi_free_device(device);
1596 }
1597
1598 /*
1599 * Notify the SCSI ML if the queue depth of any existing device has
1600 * changed.
1601 */
1602 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1603 scsi_device_list_entry) {
1604 if (device->sdev && device->queue_depth !=
1605 device->advertised_queue_depth) {
1606 device->advertised_queue_depth = device->queue_depth;
1607 scsi_change_queue_depth(device->sdev,
1608 device->advertised_queue_depth);
1609 }
1610 }
1611
1612 /* Expose any new devices. */
1613 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
94086f5b 1614 if (!device->sdev) {
6c223761
KB
1615 rc = pqi_add_device(ctrl_info, device);
1616 if (rc) {
1617 dev_warn(&ctrl_info->pci_dev->dev,
1618 "scsi %d:%d:%d:%d addition failed, device not added\n",
1619 ctrl_info->scsi_host->host_no,
1620 device->bus, device->target,
1621 device->lun);
1622 pqi_fixup_botched_add(ctrl_info, device);
1623 continue;
1624 }
1625 }
1626 pqi_dev_info(ctrl_info, "added", device);
1627 }
1628}
1629
1630static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1631{
1632 bool is_supported = false;
1633
1634 switch (device->devtype) {
1635 case TYPE_DISK:
1636 case TYPE_ZBC:
1637 case TYPE_TAPE:
1638 case TYPE_MEDIUM_CHANGER:
1639 case TYPE_ENCLOSURE:
1640 is_supported = true;
1641 break;
1642 case TYPE_RAID:
1643 /*
1644 * Only support the HBA controller itself as a RAID
1645 * controller. If it's a RAID controller other than
1646 * the HBA itself (an external RAID controller, MSA500
1647 * or similar), we don't support it.
1648 */
1649 if (pqi_is_hba_lunid(device->scsi3addr))
1650 is_supported = true;
1651 break;
1652 }
1653
1654 return is_supported;
1655}
1656
94086f5b 1657static inline bool pqi_skip_device(u8 *scsi3addr)
6c223761 1658{
94086f5b
KB
1659 /* Ignore all masked devices. */
1660 if (MASKED_DEVICE(scsi3addr))
6c223761 1661 return true;
6c223761
KB
1662
1663 return false;
1664}
1665
6c223761
KB
1666static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1667{
1668 int i;
1669 int rc;
1670 struct list_head new_device_list_head;
1671 struct report_phys_lun_extended *physdev_list = NULL;
1672 struct report_log_lun_extended *logdev_list = NULL;
1673 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1674 struct report_log_lun_extended_entry *log_lun_ext_entry;
1675 struct bmic_identify_physical_device *id_phys = NULL;
1676 u32 num_physicals;
1677 u32 num_logicals;
1678 struct pqi_scsi_dev **new_device_list = NULL;
1679 struct pqi_scsi_dev *device;
1680 struct pqi_scsi_dev *next;
1681 unsigned int num_new_devices;
1682 unsigned int num_valid_devices;
1683 bool is_physical_device;
1684 u8 *scsi3addr;
1685 static char *out_of_memory_msg =
1686 "out of memory, device discovery stopped";
1687
1688 INIT_LIST_HEAD(&new_device_list_head);
1689
1690 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1691 if (rc)
1692 goto out;
1693
1694 if (physdev_list)
1695 num_physicals =
1696 get_unaligned_be32(&physdev_list->header.list_length)
1697 / sizeof(physdev_list->lun_entries[0]);
1698 else
1699 num_physicals = 0;
1700
1701 if (logdev_list)
1702 num_logicals =
1703 get_unaligned_be32(&logdev_list->header.list_length)
1704 / sizeof(logdev_list->lun_entries[0]);
1705 else
1706 num_logicals = 0;
1707
1708 if (num_physicals) {
1709 /*
1710 * We need this buffer for calls to pqi_get_physical_disk_info()
1711 * below. We allocate it here instead of inside
1712 * pqi_get_physical_disk_info() because it's a fairly large
1713 * buffer.
1714 */
1715 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1716 if (!id_phys) {
1717 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1718 out_of_memory_msg);
1719 rc = -ENOMEM;
1720 goto out;
1721 }
1722 }
1723
1724 num_new_devices = num_physicals + num_logicals;
1725
1726 new_device_list = kmalloc(sizeof(*new_device_list) *
1727 num_new_devices, GFP_KERNEL);
1728 if (!new_device_list) {
1729 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1730 rc = -ENOMEM;
1731 goto out;
1732 }
1733
1734 for (i = 0; i < num_new_devices; i++) {
1735 device = kzalloc(sizeof(*device), GFP_KERNEL);
1736 if (!device) {
1737 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1738 out_of_memory_msg);
1739 rc = -ENOMEM;
1740 goto out;
1741 }
1742 list_add_tail(&device->new_device_list_entry,
1743 &new_device_list_head);
1744 }
1745
1746 device = NULL;
1747 num_valid_devices = 0;
1748
1749 for (i = 0; i < num_new_devices; i++) {
1750
1751 if (i < num_physicals) {
1752 is_physical_device = true;
1753 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1754 log_lun_ext_entry = NULL;
1755 scsi3addr = phys_lun_ext_entry->lunid;
1756 } else {
1757 is_physical_device = false;
1758 phys_lun_ext_entry = NULL;
1759 log_lun_ext_entry =
1760 &logdev_list->lun_entries[i - num_physicals];
1761 scsi3addr = log_lun_ext_entry->lunid;
1762 }
1763
94086f5b 1764 if (is_physical_device && pqi_skip_device(scsi3addr))
6c223761
KB
1765 continue;
1766
1767 if (device)
1768 device = list_next_entry(device, new_device_list_entry);
1769 else
1770 device = list_first_entry(&new_device_list_head,
1771 struct pqi_scsi_dev, new_device_list_entry);
1772
1773 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1774 device->is_physical_device = is_physical_device;
bd10cf0b
KB
1775 if (!is_physical_device)
1776 device->is_external_raid_device =
1777 pqi_is_external_raid_addr(scsi3addr);
6c223761
KB
1778
1779 /* Gather information about the device. */
1780 rc = pqi_get_device_info(ctrl_info, device);
1781 if (rc == -ENOMEM) {
1782 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1783 out_of_memory_msg);
1784 goto out;
1785 }
1786 if (rc) {
1787 dev_warn(&ctrl_info->pci_dev->dev,
1788 "obtaining device info failed, skipping device %016llx\n",
1789 get_unaligned_be64(device->scsi3addr));
1790 rc = 0;
1791 continue;
1792 }
1793
1794 if (!pqi_is_supported_device(device))
1795 continue;
1796
1797 pqi_assign_bus_target_lun(device);
1798
6c223761
KB
1799 if (device->is_physical_device) {
1800 device->wwid = phys_lun_ext_entry->wwid;
1801 if ((phys_lun_ext_entry->device_flags &
1802 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1803 phys_lun_ext_entry->aio_handle)
1804 device->aio_enabled = true;
1805 } else {
1806 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1807 sizeof(device->volume_id));
1808 }
1809
1810 switch (device->devtype) {
1811 case TYPE_DISK:
1812 case TYPE_ZBC:
1813 case TYPE_ENCLOSURE:
1814 if (device->is_physical_device) {
1815 device->sas_address =
1816 get_unaligned_be64(&device->wwid);
1817 if (device->devtype == TYPE_DISK ||
1818 device->devtype == TYPE_ZBC) {
1819 device->aio_handle =
1820 phys_lun_ext_entry->aio_handle;
1821 pqi_get_physical_disk_info(ctrl_info,
1822 device, id_phys);
1823 }
1824 }
1825 break;
1826 }
1827
1828 new_device_list[num_valid_devices++] = device;
1829 }
1830
1831 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1832
1833out:
1834 list_for_each_entry_safe(device, next, &new_device_list_head,
1835 new_device_list_entry) {
1836 if (device->keep_device)
1837 continue;
1838 list_del(&device->new_device_list_entry);
1839 pqi_free_device(device);
1840 }
1841
1842 kfree(new_device_list);
1843 kfree(physdev_list);
1844 kfree(logdev_list);
1845 kfree(id_phys);
1846
1847 return rc;
1848}
1849
1850static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1851{
1852 unsigned long flags;
1853 struct pqi_scsi_dev *device;
6c223761 1854
a37ef745
KB
1855 while (1) {
1856 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1857
1858 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1859 struct pqi_scsi_dev, scsi_device_list_entry);
1860 if (device)
1861 list_del(&device->scsi_device_list_entry);
1862
1863 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1864 flags);
1865
1866 if (!device)
1867 break;
6c223761 1868
6c223761
KB
1869 if (device->sdev)
1870 pqi_remove_device(ctrl_info, device);
6c223761
KB
1871 pqi_free_device(device);
1872 }
6c223761
KB
1873}
1874
1875static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1876{
1877 int rc;
1878
1879 if (pqi_ctrl_offline(ctrl_info))
1880 return -ENXIO;
1881
1882 mutex_lock(&ctrl_info->scan_mutex);
1883
1884 rc = pqi_update_scsi_devices(ctrl_info);
1885 if (rc)
1886 pqi_schedule_rescan_worker(ctrl_info);
1887
1888 mutex_unlock(&ctrl_info->scan_mutex);
1889
1890 return rc;
1891}
1892
1893static void pqi_scan_start(struct Scsi_Host *shost)
1894{
1895 pqi_scan_scsi_devices(shost_to_hba(shost));
1896}
1897
1898/* Returns TRUE if scan is finished. */
1899
1900static int pqi_scan_finished(struct Scsi_Host *shost,
1901 unsigned long elapsed_time)
1902{
1903 struct pqi_ctrl_info *ctrl_info;
1904
1905 ctrl_info = shost_priv(shost);
1906
1907 return !mutex_is_locked(&ctrl_info->scan_mutex);
1908}
1909
061ef06a
KB
1910static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
1911{
1912 mutex_lock(&ctrl_info->scan_mutex);
1913 mutex_unlock(&ctrl_info->scan_mutex);
1914}
1915
1916static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
1917{
1918 mutex_lock(&ctrl_info->lun_reset_mutex);
1919 mutex_unlock(&ctrl_info->lun_reset_mutex);
1920}
1921
6c223761
KB
1922static inline void pqi_set_encryption_info(
1923 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1924 u64 first_block)
1925{
1926 u32 volume_blk_size;
1927
1928 /*
1929 * Set the encryption tweak values based on logical block address.
1930 * If the block size is 512, the tweak value is equal to the LBA.
1931 * For other block sizes, tweak value is (LBA * block size) / 512.
1932 */
1933 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1934 if (volume_blk_size != 512)
1935 first_block = (first_block * volume_blk_size) / 512;
1936
1937 encryption_info->data_encryption_key_index =
1938 get_unaligned_le16(&raid_map->data_encryption_key_index);
1939 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1940 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1941}
1942
1943/*
1944 * Attempt to perform offload RAID mapping for a logical volume I/O.
1945 */
1946
1947#define PQI_RAID_BYPASS_INELIGIBLE 1
1948
1949static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1950 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1951 struct pqi_queue_group *queue_group)
1952{
1953 struct raid_map *raid_map;
1954 bool is_write = false;
1955 u32 map_index;
1956 u64 first_block;
1957 u64 last_block;
1958 u32 block_cnt;
1959 u32 blocks_per_row;
1960 u64 first_row;
1961 u64 last_row;
1962 u32 first_row_offset;
1963 u32 last_row_offset;
1964 u32 first_column;
1965 u32 last_column;
1966 u64 r0_first_row;
1967 u64 r0_last_row;
1968 u32 r5or6_blocks_per_row;
1969 u64 r5or6_first_row;
1970 u64 r5or6_last_row;
1971 u32 r5or6_first_row_offset;
1972 u32 r5or6_last_row_offset;
1973 u32 r5or6_first_column;
1974 u32 r5or6_last_column;
1975 u16 data_disks_per_row;
1976 u32 total_disks_per_row;
1977 u16 layout_map_count;
1978 u32 stripesize;
1979 u16 strip_size;
1980 u32 first_group;
1981 u32 last_group;
1982 u32 current_group;
1983 u32 map_row;
1984 u32 aio_handle;
1985 u64 disk_block;
1986 u32 disk_block_cnt;
1987 u8 cdb[16];
1988 u8 cdb_length;
1989 int offload_to_mirror;
1990 struct pqi_encryption_info *encryption_info_ptr;
1991 struct pqi_encryption_info encryption_info;
1992#if BITS_PER_LONG == 32
1993 u64 tmpdiv;
1994#endif
1995
1996 /* Check for valid opcode, get LBA and block count. */
1997 switch (scmd->cmnd[0]) {
1998 case WRITE_6:
1999 is_write = true;
2000 /* fall through */
2001 case READ_6:
e018ef57
B
2002 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2003 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
6c223761
KB
2004 block_cnt = (u32)scmd->cmnd[4];
2005 if (block_cnt == 0)
2006 block_cnt = 256;
2007 break;
2008 case WRITE_10:
2009 is_write = true;
2010 /* fall through */
2011 case READ_10:
2012 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2013 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2014 break;
2015 case WRITE_12:
2016 is_write = true;
2017 /* fall through */
2018 case READ_12:
2019 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2020 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2021 break;
2022 case WRITE_16:
2023 is_write = true;
2024 /* fall through */
2025 case READ_16:
2026 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2027 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2028 break;
2029 default:
2030 /* Process via normal I/O path. */
2031 return PQI_RAID_BYPASS_INELIGIBLE;
2032 }
2033
2034 /* Check for write to non-RAID-0. */
2035 if (is_write && device->raid_level != SA_RAID_0)
2036 return PQI_RAID_BYPASS_INELIGIBLE;
2037
2038 if (unlikely(block_cnt == 0))
2039 return PQI_RAID_BYPASS_INELIGIBLE;
2040
2041 last_block = first_block + block_cnt - 1;
2042 raid_map = device->raid_map;
2043
2044 /* Check for invalid block or wraparound. */
2045 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2046 last_block < first_block)
2047 return PQI_RAID_BYPASS_INELIGIBLE;
2048
2049 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2050 strip_size = get_unaligned_le16(&raid_map->strip_size);
2051 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2052
2053 /* Calculate stripe information for the request. */
2054 blocks_per_row = data_disks_per_row * strip_size;
2055#if BITS_PER_LONG == 32
2056 tmpdiv = first_block;
2057 do_div(tmpdiv, blocks_per_row);
2058 first_row = tmpdiv;
2059 tmpdiv = last_block;
2060 do_div(tmpdiv, blocks_per_row);
2061 last_row = tmpdiv;
2062 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2063 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2064 tmpdiv = first_row_offset;
2065 do_div(tmpdiv, strip_size);
2066 first_column = tmpdiv;
2067 tmpdiv = last_row_offset;
2068 do_div(tmpdiv, strip_size);
2069 last_column = tmpdiv;
2070#else
2071 first_row = first_block / blocks_per_row;
2072 last_row = last_block / blocks_per_row;
2073 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2074 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2075 first_column = first_row_offset / strip_size;
2076 last_column = last_row_offset / strip_size;
2077#endif
2078
2079 /* If this isn't a single row/column then give to the controller. */
2080 if (first_row != last_row || first_column != last_column)
2081 return PQI_RAID_BYPASS_INELIGIBLE;
2082
2083 /* Proceeding with driver mapping. */
2084 total_disks_per_row = data_disks_per_row +
2085 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2086 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2087 get_unaligned_le16(&raid_map->row_cnt);
2088 map_index = (map_row * total_disks_per_row) + first_column;
2089
2090 /* RAID 1 */
2091 if (device->raid_level == SA_RAID_1) {
2092 if (device->offload_to_mirror)
2093 map_index += data_disks_per_row;
2094 device->offload_to_mirror = !device->offload_to_mirror;
2095 } else if (device->raid_level == SA_RAID_ADM) {
2096 /* RAID ADM */
2097 /*
2098 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2099 * divisible by 3.
2100 */
2101 offload_to_mirror = device->offload_to_mirror;
2102 if (offload_to_mirror == 0) {
2103 /* use physical disk in the first mirrored group. */
2104 map_index %= data_disks_per_row;
2105 } else {
2106 do {
2107 /*
2108 * Determine mirror group that map_index
2109 * indicates.
2110 */
2111 current_group = map_index / data_disks_per_row;
2112
2113 if (offload_to_mirror != current_group) {
2114 if (current_group <
2115 layout_map_count - 1) {
2116 /*
2117 * Select raid index from
2118 * next group.
2119 */
2120 map_index += data_disks_per_row;
2121 current_group++;
2122 } else {
2123 /*
2124 * Select raid index from first
2125 * group.
2126 */
2127 map_index %= data_disks_per_row;
2128 current_group = 0;
2129 }
2130 }
2131 } while (offload_to_mirror != current_group);
2132 }
2133
2134 /* Set mirror group to use next time. */
2135 offload_to_mirror =
2136 (offload_to_mirror >= layout_map_count - 1) ?
2137 0 : offload_to_mirror + 1;
2138 WARN_ON(offload_to_mirror >= layout_map_count);
2139 device->offload_to_mirror = offload_to_mirror;
2140 /*
2141 * Avoid direct use of device->offload_to_mirror within this
2142 * function since multiple threads might simultaneously
2143 * increment it beyond the range of device->layout_map_count -1.
2144 */
2145 } else if ((device->raid_level == SA_RAID_5 ||
2146 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2147 /* RAID 50/60 */
2148 /* Verify first and last block are in same RAID group */
2149 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2150 stripesize = r5or6_blocks_per_row * layout_map_count;
2151#if BITS_PER_LONG == 32
2152 tmpdiv = first_block;
2153 first_group = do_div(tmpdiv, stripesize);
2154 tmpdiv = first_group;
2155 do_div(tmpdiv, r5or6_blocks_per_row);
2156 first_group = tmpdiv;
2157 tmpdiv = last_block;
2158 last_group = do_div(tmpdiv, stripesize);
2159 tmpdiv = last_group;
2160 do_div(tmpdiv, r5or6_blocks_per_row);
2161 last_group = tmpdiv;
2162#else
2163 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2164 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2165#endif
2166 if (first_group != last_group)
2167 return PQI_RAID_BYPASS_INELIGIBLE;
2168
2169 /* Verify request is in a single row of RAID 5/6 */
2170#if BITS_PER_LONG == 32
2171 tmpdiv = first_block;
2172 do_div(tmpdiv, stripesize);
2173 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2174 tmpdiv = last_block;
2175 do_div(tmpdiv, stripesize);
2176 r5or6_last_row = r0_last_row = tmpdiv;
2177#else
2178 first_row = r5or6_first_row = r0_first_row =
2179 first_block / stripesize;
2180 r5or6_last_row = r0_last_row = last_block / stripesize;
2181#endif
2182 if (r5or6_first_row != r5or6_last_row)
2183 return PQI_RAID_BYPASS_INELIGIBLE;
2184
2185 /* Verify request is in a single column */
2186#if BITS_PER_LONG == 32
2187 tmpdiv = first_block;
2188 first_row_offset = do_div(tmpdiv, stripesize);
2189 tmpdiv = first_row_offset;
2190 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2191 r5or6_first_row_offset = first_row_offset;
2192 tmpdiv = last_block;
2193 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2194 tmpdiv = r5or6_last_row_offset;
2195 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2196 tmpdiv = r5or6_first_row_offset;
2197 do_div(tmpdiv, strip_size);
2198 first_column = r5or6_first_column = tmpdiv;
2199 tmpdiv = r5or6_last_row_offset;
2200 do_div(tmpdiv, strip_size);
2201 r5or6_last_column = tmpdiv;
2202#else
2203 first_row_offset = r5or6_first_row_offset =
2204 (u32)((first_block % stripesize) %
2205 r5or6_blocks_per_row);
2206
2207 r5or6_last_row_offset =
2208 (u32)((last_block % stripesize) %
2209 r5or6_blocks_per_row);
2210
2211 first_column = r5or6_first_row_offset / strip_size;
2212 r5or6_first_column = first_column;
2213 r5or6_last_column = r5or6_last_row_offset / strip_size;
2214#endif
2215 if (r5or6_first_column != r5or6_last_column)
2216 return PQI_RAID_BYPASS_INELIGIBLE;
2217
2218 /* Request is eligible */
2219 map_row =
2220 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2221 get_unaligned_le16(&raid_map->row_cnt);
2222
2223 map_index = (first_group *
2224 (get_unaligned_le16(&raid_map->row_cnt) *
2225 total_disks_per_row)) +
2226 (map_row * total_disks_per_row) + first_column;
2227 }
2228
2229 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2230 return PQI_RAID_BYPASS_INELIGIBLE;
2231
2232 aio_handle = raid_map->disk_data[map_index].aio_handle;
2233 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2234 first_row * strip_size +
2235 (first_row_offset - first_column * strip_size);
2236 disk_block_cnt = block_cnt;
2237
2238 /* Handle differing logical/physical block sizes. */
2239 if (raid_map->phys_blk_shift) {
2240 disk_block <<= raid_map->phys_blk_shift;
2241 disk_block_cnt <<= raid_map->phys_blk_shift;
2242 }
2243
2244 if (unlikely(disk_block_cnt > 0xffff))
2245 return PQI_RAID_BYPASS_INELIGIBLE;
2246
2247 /* Build the new CDB for the physical disk I/O. */
2248 if (disk_block > 0xffffffff) {
2249 cdb[0] = is_write ? WRITE_16 : READ_16;
2250 cdb[1] = 0;
2251 put_unaligned_be64(disk_block, &cdb[2]);
2252 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2253 cdb[14] = 0;
2254 cdb[15] = 0;
2255 cdb_length = 16;
2256 } else {
2257 cdb[0] = is_write ? WRITE_10 : READ_10;
2258 cdb[1] = 0;
2259 put_unaligned_be32((u32)disk_block, &cdb[2]);
2260 cdb[6] = 0;
2261 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2262 cdb[9] = 0;
2263 cdb_length = 10;
2264 }
2265
2266 if (get_unaligned_le16(&raid_map->flags) &
2267 RAID_MAP_ENCRYPTION_ENABLED) {
2268 pqi_set_encryption_info(&encryption_info, raid_map,
2269 first_block);
2270 encryption_info_ptr = &encryption_info;
2271 } else {
2272 encryption_info_ptr = NULL;
2273 }
2274
2275 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2276 cdb, cdb_length, queue_group, encryption_info_ptr);
2277}
2278
2279#define PQI_STATUS_IDLE 0x0
2280
2281#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2282#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2283
2284#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2285#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2286#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2287#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2288#define PQI_DEVICE_STATE_ERROR 0x4
2289
2290#define PQI_MODE_READY_TIMEOUT_SECS 30
2291#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2292
2293static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2294{
2295 struct pqi_device_registers __iomem *pqi_registers;
2296 unsigned long timeout;
2297 u64 signature;
2298 u8 status;
2299
2300 pqi_registers = ctrl_info->pqi_registers;
2301 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2302
2303 while (1) {
2304 signature = readq(&pqi_registers->signature);
2305 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2306 sizeof(signature)) == 0)
2307 break;
2308 if (time_after(jiffies, timeout)) {
2309 dev_err(&ctrl_info->pci_dev->dev,
2310 "timed out waiting for PQI signature\n");
2311 return -ETIMEDOUT;
2312 }
2313 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2314 }
2315
2316 while (1) {
2317 status = readb(&pqi_registers->function_and_status_code);
2318 if (status == PQI_STATUS_IDLE)
2319 break;
2320 if (time_after(jiffies, timeout)) {
2321 dev_err(&ctrl_info->pci_dev->dev,
2322 "timed out waiting for PQI IDLE\n");
2323 return -ETIMEDOUT;
2324 }
2325 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2326 }
2327
2328 while (1) {
2329 if (readl(&pqi_registers->device_status) ==
2330 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2331 break;
2332 if (time_after(jiffies, timeout)) {
2333 dev_err(&ctrl_info->pci_dev->dev,
2334 "timed out waiting for PQI all registers ready\n");
2335 return -ETIMEDOUT;
2336 }
2337 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2338 }
2339
2340 return 0;
2341}
2342
2343static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2344{
2345 struct pqi_scsi_dev *device;
2346
2347 device = io_request->scmd->device->hostdata;
2348 device->offload_enabled = false;
2349}
2350
d87d5474 2351static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
6c223761
KB
2352{
2353 struct pqi_ctrl_info *ctrl_info;
e58081a7 2354 struct pqi_scsi_dev *device;
6c223761
KB
2355
2356 if (scsi_device_online(sdev)) {
2357 scsi_device_set_state(sdev, SDEV_OFFLINE);
2358 ctrl_info = shost_to_hba(sdev->host);
2359 schedule_delayed_work(&ctrl_info->rescan_work, 0);
e58081a7 2360 device = sdev->hostdata;
d87d5474
KB
2361 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
2362 path, ctrl_info->scsi_host->host_no, device->bus,
e58081a7 2363 device->target, device->lun);
6c223761
KB
2364 }
2365}
2366
2367static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2368{
2369 u8 scsi_status;
2370 u8 host_byte;
2371 struct scsi_cmnd *scmd;
2372 struct pqi_raid_error_info *error_info;
2373 size_t sense_data_length;
2374 int residual_count;
2375 int xfer_count;
2376 struct scsi_sense_hdr sshdr;
2377
2378 scmd = io_request->scmd;
2379 if (!scmd)
2380 return;
2381
2382 error_info = io_request->error_info;
2383 scsi_status = error_info->status;
2384 host_byte = DID_OK;
2385
2386 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2387 xfer_count =
2388 get_unaligned_le32(&error_info->data_out_transferred);
2389 residual_count = scsi_bufflen(scmd) - xfer_count;
2390 scsi_set_resid(scmd, residual_count);
2391 if (xfer_count < scmd->underflow)
2392 host_byte = DID_SOFT_ERROR;
2393 }
2394
2395 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2396 if (sense_data_length == 0)
2397 sense_data_length =
2398 get_unaligned_le16(&error_info->response_data_length);
2399 if (sense_data_length) {
2400 if (sense_data_length > sizeof(error_info->data))
2401 sense_data_length = sizeof(error_info->data);
2402
2403 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2404 scsi_normalize_sense(error_info->data,
2405 sense_data_length, &sshdr) &&
2406 sshdr.sense_key == HARDWARE_ERROR &&
2407 sshdr.asc == 0x3e &&
2408 sshdr.ascq == 0x1) {
d87d5474 2409 pqi_take_device_offline(scmd->device, "RAID");
6c223761
KB
2410 host_byte = DID_NO_CONNECT;
2411 }
2412
2413 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2414 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2415 memcpy(scmd->sense_buffer, error_info->data,
2416 sense_data_length);
2417 }
2418
2419 scmd->result = scsi_status;
2420 set_host_byte(scmd, host_byte);
2421}
2422
2423static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2424{
2425 u8 scsi_status;
2426 u8 host_byte;
2427 struct scsi_cmnd *scmd;
2428 struct pqi_aio_error_info *error_info;
2429 size_t sense_data_length;
2430 int residual_count;
2431 int xfer_count;
2432 bool device_offline;
2433
2434 scmd = io_request->scmd;
2435 error_info = io_request->error_info;
2436 host_byte = DID_OK;
2437 sense_data_length = 0;
2438 device_offline = false;
2439
2440 switch (error_info->service_response) {
2441 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2442 scsi_status = error_info->status;
2443 break;
2444 case PQI_AIO_SERV_RESPONSE_FAILURE:
2445 switch (error_info->status) {
2446 case PQI_AIO_STATUS_IO_ABORTED:
2447 scsi_status = SAM_STAT_TASK_ABORTED;
2448 break;
2449 case PQI_AIO_STATUS_UNDERRUN:
2450 scsi_status = SAM_STAT_GOOD;
2451 residual_count = get_unaligned_le32(
2452 &error_info->residual_count);
2453 scsi_set_resid(scmd, residual_count);
2454 xfer_count = scsi_bufflen(scmd) - residual_count;
2455 if (xfer_count < scmd->underflow)
2456 host_byte = DID_SOFT_ERROR;
2457 break;
2458 case PQI_AIO_STATUS_OVERRUN:
2459 scsi_status = SAM_STAT_GOOD;
2460 break;
2461 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2462 pqi_aio_path_disabled(io_request);
2463 scsi_status = SAM_STAT_GOOD;
2464 io_request->status = -EAGAIN;
2465 break;
2466 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2467 case PQI_AIO_STATUS_INVALID_DEVICE:
2468 device_offline = true;
d87d5474 2469 pqi_take_device_offline(scmd->device, "AIO");
6c223761
KB
2470 host_byte = DID_NO_CONNECT;
2471 scsi_status = SAM_STAT_CHECK_CONDITION;
2472 break;
2473 case PQI_AIO_STATUS_IO_ERROR:
2474 default:
2475 scsi_status = SAM_STAT_CHECK_CONDITION;
2476 break;
2477 }
2478 break;
2479 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2480 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2481 scsi_status = SAM_STAT_GOOD;
2482 break;
2483 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2484 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2485 default:
2486 scsi_status = SAM_STAT_CHECK_CONDITION;
2487 break;
2488 }
2489
2490 if (error_info->data_present) {
2491 sense_data_length =
2492 get_unaligned_le16(&error_info->data_length);
2493 if (sense_data_length) {
2494 if (sense_data_length > sizeof(error_info->data))
2495 sense_data_length = sizeof(error_info->data);
2496 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2497 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2498 memcpy(scmd->sense_buffer, error_info->data,
2499 sense_data_length);
2500 }
2501 }
2502
2503 if (device_offline && sense_data_length == 0)
2504 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2505 0x3e, 0x1);
2506
2507 scmd->result = scsi_status;
2508 set_host_byte(scmd, host_byte);
2509}
2510
2511static void pqi_process_io_error(unsigned int iu_type,
2512 struct pqi_io_request *io_request)
2513{
2514 switch (iu_type) {
2515 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2516 pqi_process_raid_io_error(io_request);
2517 break;
2518 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2519 pqi_process_aio_io_error(io_request);
2520 break;
2521 }
2522}
2523
2524static int pqi_interpret_task_management_response(
2525 struct pqi_task_management_response *response)
2526{
2527 int rc;
2528
2529 switch (response->response_code) {
b17f0486
KB
2530 case SOP_TMF_COMPLETE:
2531 case SOP_TMF_FUNCTION_SUCCEEDED:
6c223761
KB
2532 rc = 0;
2533 break;
2534 default:
2535 rc = -EIO;
2536 break;
2537 }
2538
2539 return rc;
2540}
2541
2542static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2543 struct pqi_queue_group *queue_group)
2544{
2545 unsigned int num_responses;
2546 pqi_index_t oq_pi;
2547 pqi_index_t oq_ci;
2548 struct pqi_io_request *io_request;
2549 struct pqi_io_response *response;
2550 u16 request_id;
2551
2552 num_responses = 0;
2553 oq_ci = queue_group->oq_ci_copy;
2554
2555 while (1) {
2556 oq_pi = *queue_group->oq_pi;
2557 if (oq_pi == oq_ci)
2558 break;
2559
2560 num_responses++;
2561 response = queue_group->oq_element_array +
2562 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2563
2564 request_id = get_unaligned_le16(&response->request_id);
2565 WARN_ON(request_id >= ctrl_info->max_io_slots);
2566
2567 io_request = &ctrl_info->io_request_pool[request_id];
2568 WARN_ON(atomic_read(&io_request->refcount) == 0);
2569
2570 switch (response->header.iu_type) {
2571 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2572 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2573 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2574 break;
2575 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2576 io_request->status =
2577 pqi_interpret_task_management_response(
2578 (void *)response);
2579 break;
2580 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2581 pqi_aio_path_disabled(io_request);
2582 io_request->status = -EAGAIN;
2583 break;
2584 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2585 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2586 io_request->error_info = ctrl_info->error_buffer +
2587 (get_unaligned_le16(&response->error_index) *
2588 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2589 pqi_process_io_error(response->header.iu_type,
2590 io_request);
2591 break;
2592 default:
2593 dev_err(&ctrl_info->pci_dev->dev,
2594 "unexpected IU type: 0x%x\n",
2595 response->header.iu_type);
6c223761
KB
2596 break;
2597 }
2598
2599 io_request->io_complete_callback(io_request,
2600 io_request->context);
2601
2602 /*
2603 * Note that the I/O request structure CANNOT BE TOUCHED after
2604 * returning from the I/O completion callback!
2605 */
2606
2607 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2608 }
2609
2610 if (num_responses) {
2611 queue_group->oq_ci_copy = oq_ci;
2612 writel(oq_ci, queue_group->oq_ci);
2613 }
2614
2615 return num_responses;
2616}
2617
2618static inline unsigned int pqi_num_elements_free(unsigned int pi,
df7a1fcf 2619 unsigned int ci, unsigned int elements_in_queue)
6c223761
KB
2620{
2621 unsigned int num_elements_used;
2622
2623 if (pi >= ci)
2624 num_elements_used = pi - ci;
2625 else
2626 num_elements_used = elements_in_queue - ci + pi;
2627
2628 return elements_in_queue - num_elements_used - 1;
2629}
2630
98f87667 2631static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
2632 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2633{
2634 pqi_index_t iq_pi;
2635 pqi_index_t iq_ci;
2636 unsigned long flags;
2637 void *next_element;
6c223761
KB
2638 struct pqi_queue_group *queue_group;
2639
2640 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2641 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2642
6c223761
KB
2643 while (1) {
2644 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2645
2646 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2647 iq_ci = *queue_group->iq_ci[RAID_PATH];
2648
2649 if (pqi_num_elements_free(iq_pi, iq_ci,
2650 ctrl_info->num_elements_per_iq))
2651 break;
2652
2653 spin_unlock_irqrestore(
2654 &queue_group->submit_lock[RAID_PATH], flags);
2655
98f87667 2656 if (pqi_ctrl_offline(ctrl_info))
6c223761 2657 return;
6c223761
KB
2658 }
2659
2660 next_element = queue_group->iq_element_array[RAID_PATH] +
2661 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2662
2663 memcpy(next_element, iu, iu_length);
2664
2665 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
6c223761
KB
2666 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2667
2668 /*
2669 * This write notifies the controller that an IU is available to be
2670 * processed.
2671 */
2672 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2673
2674 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
6c223761
KB
2675}
2676
2677static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2678 struct pqi_event *event)
2679{
2680 struct pqi_event_acknowledge_request request;
2681
2682 memset(&request, 0, sizeof(request));
2683
2684 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2685 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2686 &request.header.iu_length);
2687 request.event_type = event->event_type;
2688 request.event_id = event->event_id;
2689 request.additional_event_id = event->additional_event_id;
2690
98f87667 2691 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
6c223761
KB
2692}
2693
2694static void pqi_event_worker(struct work_struct *work)
2695{
2696 unsigned int i;
2697 struct pqi_ctrl_info *ctrl_info;
6a50d6ad 2698 struct pqi_event *event;
6c223761
KB
2699
2700 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2701
7561a7e4
KB
2702 pqi_ctrl_busy(ctrl_info);
2703 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
2704
6a50d6ad 2705 event = ctrl_info->events;
6c223761 2706 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
6a50d6ad
KB
2707 if (event->pending) {
2708 event->pending = false;
2709 pqi_acknowledge_event(ctrl_info, event);
6c223761 2710 }
6a50d6ad 2711 event++;
6c223761
KB
2712 }
2713
7561a7e4
KB
2714 pqi_ctrl_unbusy(ctrl_info);
2715
2716 pqi_schedule_rescan_worker(ctrl_info);
6c223761
KB
2717}
2718
2719static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2720{
2721 unsigned int i;
2722 unsigned int path;
2723 struct pqi_queue_group *queue_group;
2724 unsigned long flags;
2725 struct pqi_io_request *io_request;
2726 struct pqi_io_request *next;
2727 struct scsi_cmnd *scmd;
2728
2729 ctrl_info->controller_online = false;
2730 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
5b0fba0f 2731 sis_shutdown_ctrl(ctrl_info);
6c223761
KB
2732
2733 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2734 queue_group = &ctrl_info->queue_groups[i];
2735
2736 for (path = 0; path < 2; path++) {
2737 spin_lock_irqsave(
2738 &queue_group->submit_lock[path], flags);
2739
2740 list_for_each_entry_safe(io_request, next,
2741 &queue_group->request_list[path],
2742 request_list_entry) {
2743
2744 scmd = io_request->scmd;
2745 if (scmd) {
2746 set_host_byte(scmd, DID_NO_CONNECT);
2747 pqi_scsi_done(scmd);
2748 }
2749
2750 list_del(&io_request->request_list_entry);
2751 }
2752
2753 spin_unlock_irqrestore(
2754 &queue_group->submit_lock[path], flags);
2755 }
2756 }
2757}
2758
98f87667 2759#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
6c223761
KB
2760
2761static void pqi_heartbeat_timer_handler(unsigned long data)
2762{
2763 int num_interrupts;
98f87667 2764 u32 heartbeat_count;
6c223761
KB
2765 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2766
98f87667
KB
2767 pqi_check_ctrl_health(ctrl_info);
2768 if (pqi_ctrl_offline(ctrl_info))
061ef06a
KB
2769 return;
2770
6c223761 2771 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
98f87667 2772 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
6c223761
KB
2773
2774 if (num_interrupts == ctrl_info->previous_num_interrupts) {
98f87667
KB
2775 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
2776 dev_err(&ctrl_info->pci_dev->dev,
2777 "no heartbeat detected - last heartbeat count: %u\n",
2778 heartbeat_count);
6c223761
KB
2779 pqi_take_ctrl_offline(ctrl_info);
2780 return;
2781 }
6c223761 2782 } else {
98f87667 2783 ctrl_info->previous_num_interrupts = num_interrupts;
6c223761
KB
2784 }
2785
98f87667 2786 ctrl_info->previous_heartbeat_count = heartbeat_count;
6c223761
KB
2787 mod_timer(&ctrl_info->heartbeat_timer,
2788 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2789}
2790
2791static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2792{
98f87667
KB
2793 if (!ctrl_info->heartbeat_counter)
2794 return;
2795
6c223761
KB
2796 ctrl_info->previous_num_interrupts =
2797 atomic_read(&ctrl_info->num_interrupts);
98f87667
KB
2798 ctrl_info->previous_heartbeat_count =
2799 pqi_read_heartbeat_counter(ctrl_info);
6c223761 2800
6c223761
KB
2801 ctrl_info->heartbeat_timer.expires =
2802 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2803 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2804 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
061ef06a 2805 add_timer(&ctrl_info->heartbeat_timer);
6c223761
KB
2806}
2807
2808static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2809{
98f87667 2810 del_timer_sync(&ctrl_info->heartbeat_timer);
6c223761
KB
2811}
2812
6a50d6ad 2813static inline int pqi_event_type_to_event_index(unsigned int event_type)
6c223761
KB
2814{
2815 int index;
2816
6a50d6ad
KB
2817 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2818 if (event_type == pqi_supported_event_types[index])
2819 return index;
6c223761 2820
6a50d6ad
KB
2821 return -1;
2822}
2823
2824static inline bool pqi_is_supported_event(unsigned int event_type)
2825{
2826 return pqi_event_type_to_event_index(event_type) != -1;
6c223761
KB
2827}
2828
2829static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2830{
2831 unsigned int num_events;
2832 pqi_index_t oq_pi;
2833 pqi_index_t oq_ci;
2834 struct pqi_event_queue *event_queue;
2835 struct pqi_event_response *response;
6a50d6ad 2836 struct pqi_event *event;
6c223761
KB
2837 int event_index;
2838
2839 event_queue = &ctrl_info->event_queue;
2840 num_events = 0;
6c223761
KB
2841 oq_ci = event_queue->oq_ci_copy;
2842
2843 while (1) {
2844 oq_pi = *event_queue->oq_pi;
2845 if (oq_pi == oq_ci)
2846 break;
2847
2848 num_events++;
2849 response = event_queue->oq_element_array +
2850 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2851
2852 event_index =
2853 pqi_event_type_to_event_index(response->event_type);
2854
2855 if (event_index >= 0) {
2856 if (response->request_acknowlege) {
6a50d6ad
KB
2857 event = &ctrl_info->events[event_index];
2858 event->pending = true;
2859 event->event_type = response->event_type;
2860 event->event_id = response->event_id;
2861 event->additional_event_id =
6c223761 2862 response->additional_event_id;
6c223761
KB
2863 }
2864 }
2865
2866 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2867 }
2868
2869 if (num_events) {
2870 event_queue->oq_ci_copy = oq_ci;
2871 writel(oq_ci, event_queue->oq_ci);
98f87667 2872 schedule_work(&ctrl_info->event_work);
6c223761
KB
2873 }
2874
2875 return num_events;
2876}
2877
061ef06a
KB
2878#define PQI_LEGACY_INTX_MASK 0x1
2879
2880static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2881 bool enable_intx)
2882{
2883 u32 intx_mask;
2884 struct pqi_device_registers __iomem *pqi_registers;
2885 volatile void __iomem *register_addr;
2886
2887 pqi_registers = ctrl_info->pqi_registers;
2888
2889 if (enable_intx)
2890 register_addr = &pqi_registers->legacy_intx_mask_clear;
2891 else
2892 register_addr = &pqi_registers->legacy_intx_mask_set;
2893
2894 intx_mask = readl(register_addr);
2895 intx_mask |= PQI_LEGACY_INTX_MASK;
2896 writel(intx_mask, register_addr);
2897}
2898
2899static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
2900 enum pqi_irq_mode new_mode)
2901{
2902 switch (ctrl_info->irq_mode) {
2903 case IRQ_MODE_MSIX:
2904 switch (new_mode) {
2905 case IRQ_MODE_MSIX:
2906 break;
2907 case IRQ_MODE_INTX:
2908 pqi_configure_legacy_intx(ctrl_info, true);
2909 sis_disable_msix(ctrl_info);
2910 sis_enable_intx(ctrl_info);
2911 break;
2912 case IRQ_MODE_NONE:
2913 sis_disable_msix(ctrl_info);
2914 break;
2915 }
2916 break;
2917 case IRQ_MODE_INTX:
2918 switch (new_mode) {
2919 case IRQ_MODE_MSIX:
2920 pqi_configure_legacy_intx(ctrl_info, false);
2921 sis_disable_intx(ctrl_info);
2922 sis_enable_msix(ctrl_info);
2923 break;
2924 case IRQ_MODE_INTX:
2925 break;
2926 case IRQ_MODE_NONE:
2927 pqi_configure_legacy_intx(ctrl_info, false);
2928 sis_disable_intx(ctrl_info);
2929 break;
2930 }
2931 break;
2932 case IRQ_MODE_NONE:
2933 switch (new_mode) {
2934 case IRQ_MODE_MSIX:
2935 sis_enable_msix(ctrl_info);
2936 break;
2937 case IRQ_MODE_INTX:
2938 pqi_configure_legacy_intx(ctrl_info, true);
2939 sis_enable_intx(ctrl_info);
2940 break;
2941 case IRQ_MODE_NONE:
2942 break;
2943 }
2944 break;
2945 }
2946
2947 ctrl_info->irq_mode = new_mode;
2948}
2949
2950#define PQI_LEGACY_INTX_PENDING 0x1
2951
2952static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
2953{
2954 bool valid_irq;
2955 u32 intx_status;
2956
2957 switch (ctrl_info->irq_mode) {
2958 case IRQ_MODE_MSIX:
2959 valid_irq = true;
2960 break;
2961 case IRQ_MODE_INTX:
2962 intx_status =
2963 readl(&ctrl_info->pqi_registers->legacy_intx_status);
2964 if (intx_status & PQI_LEGACY_INTX_PENDING)
2965 valid_irq = true;
2966 else
2967 valid_irq = false;
2968 break;
2969 case IRQ_MODE_NONE:
2970 default:
2971 valid_irq = false;
2972 break;
2973 }
2974
2975 return valid_irq;
2976}
2977
6c223761
KB
2978static irqreturn_t pqi_irq_handler(int irq, void *data)
2979{
2980 struct pqi_ctrl_info *ctrl_info;
2981 struct pqi_queue_group *queue_group;
2982 unsigned int num_responses_handled;
2983
2984 queue_group = data;
2985 ctrl_info = queue_group->ctrl_info;
2986
061ef06a 2987 if (!pqi_is_valid_irq(ctrl_info))
6c223761
KB
2988 return IRQ_NONE;
2989
2990 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2991
2992 if (irq == ctrl_info->event_irq)
2993 num_responses_handled += pqi_process_event_intr(ctrl_info);
2994
2995 if (num_responses_handled)
2996 atomic_inc(&ctrl_info->num_interrupts);
2997
2998 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2999 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3000
3001 return IRQ_HANDLED;
3002}
3003
3004static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3005{
d91d7820 3006 struct pci_dev *pci_dev = ctrl_info->pci_dev;
6c223761
KB
3007 int i;
3008 int rc;
3009
d91d7820 3010 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
6c223761
KB
3011
3012 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
d91d7820 3013 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
52198226 3014 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
6c223761 3015 if (rc) {
d91d7820 3016 dev_err(&pci_dev->dev,
6c223761 3017 "irq %u init failed with error %d\n",
d91d7820 3018 pci_irq_vector(pci_dev, i), rc);
6c223761
KB
3019 return rc;
3020 }
3021 ctrl_info->num_msix_vectors_initialized++;
3022 }
3023
3024 return 0;
3025}
3026
98bf061b
KB
3027static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3028{
3029 int i;
3030
3031 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3032 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3033 &ctrl_info->queue_groups[i]);
3034
3035 ctrl_info->num_msix_vectors_initialized = 0;
3036}
3037
6c223761
KB
3038static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3039{
98bf061b 3040 int num_vectors_enabled;
6c223761 3041
98bf061b 3042 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
52198226
CH
3043 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3044 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
98bf061b 3045 if (num_vectors_enabled < 0) {
6c223761 3046 dev_err(&ctrl_info->pci_dev->dev,
98bf061b
KB
3047 "MSI-X init failed with error %d\n",
3048 num_vectors_enabled);
3049 return num_vectors_enabled;
6c223761
KB
3050 }
3051
98bf061b 3052 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
061ef06a 3053 ctrl_info->irq_mode = IRQ_MODE_MSIX;
6c223761
KB
3054 return 0;
3055}
3056
98bf061b
KB
3057static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3058{
3059 if (ctrl_info->num_msix_vectors_enabled) {
3060 pci_free_irq_vectors(ctrl_info->pci_dev);
3061 ctrl_info->num_msix_vectors_enabled = 0;
3062 }
3063}
3064
6c223761
KB
3065static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3066{
3067 unsigned int i;
3068 size_t alloc_length;
3069 size_t element_array_length_per_iq;
3070 size_t element_array_length_per_oq;
3071 void *element_array;
3072 void *next_queue_index;
3073 void *aligned_pointer;
3074 unsigned int num_inbound_queues;
3075 unsigned int num_outbound_queues;
3076 unsigned int num_queue_indexes;
3077 struct pqi_queue_group *queue_group;
3078
3079 element_array_length_per_iq =
3080 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3081 ctrl_info->num_elements_per_iq;
3082 element_array_length_per_oq =
3083 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3084 ctrl_info->num_elements_per_oq;
3085 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3086 num_outbound_queues = ctrl_info->num_queue_groups;
3087 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3088
3089 aligned_pointer = NULL;
3090
3091 for (i = 0; i < num_inbound_queues; i++) {
3092 aligned_pointer = PTR_ALIGN(aligned_pointer,
3093 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3094 aligned_pointer += element_array_length_per_iq;
3095 }
3096
3097 for (i = 0; i < num_outbound_queues; i++) {
3098 aligned_pointer = PTR_ALIGN(aligned_pointer,
3099 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3100 aligned_pointer += element_array_length_per_oq;
3101 }
3102
3103 aligned_pointer = PTR_ALIGN(aligned_pointer,
3104 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3105 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3106 PQI_EVENT_OQ_ELEMENT_LENGTH;
3107
3108 for (i = 0; i < num_queue_indexes; i++) {
3109 aligned_pointer = PTR_ALIGN(aligned_pointer,
3110 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3111 aligned_pointer += sizeof(pqi_index_t);
3112 }
3113
3114 alloc_length = (size_t)aligned_pointer +
3115 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3116
e1d213bd
KB
3117 alloc_length += PQI_EXTRA_SGL_MEMORY;
3118
6c223761
KB
3119 ctrl_info->queue_memory_base =
3120 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3121 alloc_length,
3122 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3123
d87d5474 3124 if (!ctrl_info->queue_memory_base)
6c223761 3125 return -ENOMEM;
6c223761
KB
3126
3127 ctrl_info->queue_memory_length = alloc_length;
3128
3129 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3130 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3131
3132 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3133 queue_group = &ctrl_info->queue_groups[i];
3134 queue_group->iq_element_array[RAID_PATH] = element_array;
3135 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3136 ctrl_info->queue_memory_base_dma_handle +
3137 (element_array - ctrl_info->queue_memory_base);
3138 element_array += element_array_length_per_iq;
3139 element_array = PTR_ALIGN(element_array,
3140 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3141 queue_group->iq_element_array[AIO_PATH] = element_array;
3142 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3143 ctrl_info->queue_memory_base_dma_handle +
3144 (element_array - ctrl_info->queue_memory_base);
3145 element_array += element_array_length_per_iq;
3146 element_array = PTR_ALIGN(element_array,
3147 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3148 }
3149
3150 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3151 queue_group = &ctrl_info->queue_groups[i];
3152 queue_group->oq_element_array = element_array;
3153 queue_group->oq_element_array_bus_addr =
3154 ctrl_info->queue_memory_base_dma_handle +
3155 (element_array - ctrl_info->queue_memory_base);
3156 element_array += element_array_length_per_oq;
3157 element_array = PTR_ALIGN(element_array,
3158 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3159 }
3160
3161 ctrl_info->event_queue.oq_element_array = element_array;
3162 ctrl_info->event_queue.oq_element_array_bus_addr =
3163 ctrl_info->queue_memory_base_dma_handle +
3164 (element_array - ctrl_info->queue_memory_base);
3165 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3166 PQI_EVENT_OQ_ELEMENT_LENGTH;
3167
3168 next_queue_index = PTR_ALIGN(element_array,
3169 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3170
3171 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3172 queue_group = &ctrl_info->queue_groups[i];
3173 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3174 queue_group->iq_ci_bus_addr[RAID_PATH] =
3175 ctrl_info->queue_memory_base_dma_handle +
3176 (next_queue_index - ctrl_info->queue_memory_base);
3177 next_queue_index += sizeof(pqi_index_t);
3178 next_queue_index = PTR_ALIGN(next_queue_index,
3179 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3180 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3181 queue_group->iq_ci_bus_addr[AIO_PATH] =
3182 ctrl_info->queue_memory_base_dma_handle +
3183 (next_queue_index - ctrl_info->queue_memory_base);
3184 next_queue_index += sizeof(pqi_index_t);
3185 next_queue_index = PTR_ALIGN(next_queue_index,
3186 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3187 queue_group->oq_pi = next_queue_index;
3188 queue_group->oq_pi_bus_addr =
3189 ctrl_info->queue_memory_base_dma_handle +
3190 (next_queue_index - ctrl_info->queue_memory_base);
3191 next_queue_index += sizeof(pqi_index_t);
3192 next_queue_index = PTR_ALIGN(next_queue_index,
3193 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3194 }
3195
3196 ctrl_info->event_queue.oq_pi = next_queue_index;
3197 ctrl_info->event_queue.oq_pi_bus_addr =
3198 ctrl_info->queue_memory_base_dma_handle +
3199 (next_queue_index - ctrl_info->queue_memory_base);
3200
3201 return 0;
3202}
3203
3204static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3205{
3206 unsigned int i;
3207 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3208 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3209
3210 /*
3211 * Initialize the backpointers to the controller structure in
3212 * each operational queue group structure.
3213 */
3214 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3215 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3216
3217 /*
3218 * Assign IDs to all operational queues. Note that the IDs
3219 * assigned to operational IQs are independent of the IDs
3220 * assigned to operational OQs.
3221 */
3222 ctrl_info->event_queue.oq_id = next_oq_id++;
3223 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3224 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3225 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3226 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3227 }
3228
3229 /*
3230 * Assign MSI-X table entry indexes to all queues. Note that the
3231 * interrupt for the event queue is shared with the first queue group.
3232 */
3233 ctrl_info->event_queue.int_msg_num = 0;
3234 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3235 ctrl_info->queue_groups[i].int_msg_num = i;
3236
3237 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3238 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3239 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3240 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3241 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3242 }
3243}
3244
3245static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3246{
3247 size_t alloc_length;
3248 struct pqi_admin_queues_aligned *admin_queues_aligned;
3249 struct pqi_admin_queues *admin_queues;
3250
3251 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3252 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3253
3254 ctrl_info->admin_queue_memory_base =
3255 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3256 alloc_length,
3257 &ctrl_info->admin_queue_memory_base_dma_handle,
3258 GFP_KERNEL);
3259
3260 if (!ctrl_info->admin_queue_memory_base)
3261 return -ENOMEM;
3262
3263 ctrl_info->admin_queue_memory_length = alloc_length;
3264
3265 admin_queues = &ctrl_info->admin_queues;
3266 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3267 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3268 admin_queues->iq_element_array =
3269 &admin_queues_aligned->iq_element_array;
3270 admin_queues->oq_element_array =
3271 &admin_queues_aligned->oq_element_array;
3272 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3273 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3274
3275 admin_queues->iq_element_array_bus_addr =
3276 ctrl_info->admin_queue_memory_base_dma_handle +
3277 (admin_queues->iq_element_array -
3278 ctrl_info->admin_queue_memory_base);
3279 admin_queues->oq_element_array_bus_addr =
3280 ctrl_info->admin_queue_memory_base_dma_handle +
3281 (admin_queues->oq_element_array -
3282 ctrl_info->admin_queue_memory_base);
3283 admin_queues->iq_ci_bus_addr =
3284 ctrl_info->admin_queue_memory_base_dma_handle +
3285 ((void *)admin_queues->iq_ci -
3286 ctrl_info->admin_queue_memory_base);
3287 admin_queues->oq_pi_bus_addr =
3288 ctrl_info->admin_queue_memory_base_dma_handle +
3289 ((void *)admin_queues->oq_pi -
3290 ctrl_info->admin_queue_memory_base);
3291
3292 return 0;
3293}
3294
3295#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3296#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3297
3298static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3299{
3300 struct pqi_device_registers __iomem *pqi_registers;
3301 struct pqi_admin_queues *admin_queues;
3302 unsigned long timeout;
3303 u8 status;
3304 u32 reg;
3305
3306 pqi_registers = ctrl_info->pqi_registers;
3307 admin_queues = &ctrl_info->admin_queues;
3308
3309 writeq((u64)admin_queues->iq_element_array_bus_addr,
3310 &pqi_registers->admin_iq_element_array_addr);
3311 writeq((u64)admin_queues->oq_element_array_bus_addr,
3312 &pqi_registers->admin_oq_element_array_addr);
3313 writeq((u64)admin_queues->iq_ci_bus_addr,
3314 &pqi_registers->admin_iq_ci_addr);
3315 writeq((u64)admin_queues->oq_pi_bus_addr,
3316 &pqi_registers->admin_oq_pi_addr);
3317
3318 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3319 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3320 (admin_queues->int_msg_num << 16);
3321 writel(reg, &pqi_registers->admin_iq_num_elements);
3322 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3323 &pqi_registers->function_and_status_code);
3324
3325 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3326 while (1) {
3327 status = readb(&pqi_registers->function_and_status_code);
3328 if (status == PQI_STATUS_IDLE)
3329 break;
3330 if (time_after(jiffies, timeout))
3331 return -ETIMEDOUT;
3332 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3333 }
3334
3335 /*
3336 * The offset registers are not initialized to the correct
3337 * offsets until *after* the create admin queue pair command
3338 * completes successfully.
3339 */
3340 admin_queues->iq_pi = ctrl_info->iomem_base +
3341 PQI_DEVICE_REGISTERS_OFFSET +
3342 readq(&pqi_registers->admin_iq_pi_offset);
3343 admin_queues->oq_ci = ctrl_info->iomem_base +
3344 PQI_DEVICE_REGISTERS_OFFSET +
3345 readq(&pqi_registers->admin_oq_ci_offset);
3346
3347 return 0;
3348}
3349
3350static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3351 struct pqi_general_admin_request *request)
3352{
3353 struct pqi_admin_queues *admin_queues;
3354 void *next_element;
3355 pqi_index_t iq_pi;
3356
3357 admin_queues = &ctrl_info->admin_queues;
3358 iq_pi = admin_queues->iq_pi_copy;
3359
3360 next_element = admin_queues->iq_element_array +
3361 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3362
3363 memcpy(next_element, request, sizeof(*request));
3364
3365 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3366 admin_queues->iq_pi_copy = iq_pi;
3367
3368 /*
3369 * This write notifies the controller that an IU is available to be
3370 * processed.
3371 */
3372 writel(iq_pi, admin_queues->iq_pi);
3373}
3374
3375static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3376 struct pqi_general_admin_response *response)
3377{
3378 struct pqi_admin_queues *admin_queues;
3379 pqi_index_t oq_pi;
3380 pqi_index_t oq_ci;
3381 unsigned long timeout;
3382
3383 admin_queues = &ctrl_info->admin_queues;
3384 oq_ci = admin_queues->oq_ci_copy;
3385
3386 timeout = (3 * HZ) + jiffies;
3387
3388 while (1) {
3389 oq_pi = *admin_queues->oq_pi;
3390 if (oq_pi != oq_ci)
3391 break;
3392 if (time_after(jiffies, timeout)) {
3393 dev_err(&ctrl_info->pci_dev->dev,
3394 "timed out waiting for admin response\n");
3395 return -ETIMEDOUT;
3396 }
3397 usleep_range(1000, 2000);
3398 }
3399
3400 memcpy(response, admin_queues->oq_element_array +
3401 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3402
3403 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3404 admin_queues->oq_ci_copy = oq_ci;
3405 writel(oq_ci, admin_queues->oq_ci);
3406
3407 return 0;
3408}
3409
3410static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3411 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3412 struct pqi_io_request *io_request)
3413{
3414 struct pqi_io_request *next;
3415 void *next_element;
3416 pqi_index_t iq_pi;
3417 pqi_index_t iq_ci;
3418 size_t iu_length;
3419 unsigned long flags;
3420 unsigned int num_elements_needed;
3421 unsigned int num_elements_to_end_of_queue;
3422 size_t copy_count;
3423 struct pqi_iu_header *request;
3424
3425 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3426
3427 if (io_request)
3428 list_add_tail(&io_request->request_list_entry,
3429 &queue_group->request_list[path]);
3430
3431 iq_pi = queue_group->iq_pi_copy[path];
3432
3433 list_for_each_entry_safe(io_request, next,
3434 &queue_group->request_list[path], request_list_entry) {
3435
3436 request = io_request->iu;
3437
3438 iu_length = get_unaligned_le16(&request->iu_length) +
3439 PQI_REQUEST_HEADER_LENGTH;
3440 num_elements_needed =
3441 DIV_ROUND_UP(iu_length,
3442 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3443
3444 iq_ci = *queue_group->iq_ci[path];
3445
3446 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3447 ctrl_info->num_elements_per_iq))
3448 break;
3449
3450 put_unaligned_le16(queue_group->oq_id,
3451 &request->response_queue_id);
3452
3453 next_element = queue_group->iq_element_array[path] +
3454 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3455
3456 num_elements_to_end_of_queue =
3457 ctrl_info->num_elements_per_iq - iq_pi;
3458
3459 if (num_elements_needed <= num_elements_to_end_of_queue) {
3460 memcpy(next_element, request, iu_length);
3461 } else {
3462 copy_count = num_elements_to_end_of_queue *
3463 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3464 memcpy(next_element, request, copy_count);
3465 memcpy(queue_group->iq_element_array[path],
3466 (u8 *)request + copy_count,
3467 iu_length - copy_count);
3468 }
3469
3470 iq_pi = (iq_pi + num_elements_needed) %
3471 ctrl_info->num_elements_per_iq;
3472
3473 list_del(&io_request->request_list_entry);
3474 }
3475
3476 if (iq_pi != queue_group->iq_pi_copy[path]) {
3477 queue_group->iq_pi_copy[path] = iq_pi;
3478 /*
3479 * This write notifies the controller that one or more IUs are
3480 * available to be processed.
3481 */
3482 writel(iq_pi, queue_group->iq_pi[path]);
3483 }
3484
3485 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3486}
3487
1f37e992
KB
3488#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3489
3490static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3491 struct completion *wait)
3492{
3493 int rc;
1f37e992
KB
3494
3495 while (1) {
3496 if (wait_for_completion_io_timeout(wait,
3497 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3498 rc = 0;
3499 break;
3500 }
3501
3502 pqi_check_ctrl_health(ctrl_info);
3503 if (pqi_ctrl_offline(ctrl_info)) {
3504 rc = -ENXIO;
3505 break;
3506 }
1f37e992
KB
3507 }
3508
3509 return rc;
3510}
3511
6c223761
KB
3512static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3513 void *context)
3514{
3515 struct completion *waiting = context;
3516
3517 complete(waiting);
3518}
3519
3520static int pqi_submit_raid_request_synchronous_with_io_request(
3521 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3522 unsigned long timeout_msecs)
3523{
3524 int rc = 0;
3525 DECLARE_COMPLETION_ONSTACK(wait);
3526
3527 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3528 io_request->context = &wait;
3529
3530 pqi_start_io(ctrl_info,
3531 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3532 io_request);
3533
3534 if (timeout_msecs == NO_TIMEOUT) {
1f37e992 3535 pqi_wait_for_completion_io(ctrl_info, &wait);
6c223761
KB
3536 } else {
3537 if (!wait_for_completion_io_timeout(&wait,
3538 msecs_to_jiffies(timeout_msecs))) {
3539 dev_warn(&ctrl_info->pci_dev->dev,
3540 "command timed out\n");
3541 rc = -ETIMEDOUT;
3542 }
3543 }
3544
3545 return rc;
3546}
3547
3548static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3549 struct pqi_iu_header *request, unsigned int flags,
3550 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3551{
3552 int rc;
3553 struct pqi_io_request *io_request;
3554 unsigned long start_jiffies;
3555 unsigned long msecs_blocked;
3556 size_t iu_length;
3557
3558 /*
3559 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3560 * are mutually exclusive.
3561 */
3562
3563 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3564 if (down_interruptible(&ctrl_info->sync_request_sem))
3565 return -ERESTARTSYS;
3566 } else {
3567 if (timeout_msecs == NO_TIMEOUT) {
3568 down(&ctrl_info->sync_request_sem);
3569 } else {
3570 start_jiffies = jiffies;
3571 if (down_timeout(&ctrl_info->sync_request_sem,
3572 msecs_to_jiffies(timeout_msecs)))
3573 return -ETIMEDOUT;
3574 msecs_blocked =
3575 jiffies_to_msecs(jiffies - start_jiffies);
3576 if (msecs_blocked >= timeout_msecs)
3577 return -ETIMEDOUT;
3578 timeout_msecs -= msecs_blocked;
3579 }
3580 }
3581
7561a7e4
KB
3582 pqi_ctrl_busy(ctrl_info);
3583 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3584 if (timeout_msecs == 0) {
3585 rc = -ETIMEDOUT;
3586 goto out;
3587 }
3588
6c223761
KB
3589 io_request = pqi_alloc_io_request(ctrl_info);
3590
3591 put_unaligned_le16(io_request->index,
3592 &(((struct pqi_raid_path_request *)request)->request_id));
3593
3594 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3595 ((struct pqi_raid_path_request *)request)->error_index =
3596 ((struct pqi_raid_path_request *)request)->request_id;
3597
3598 iu_length = get_unaligned_le16(&request->iu_length) +
3599 PQI_REQUEST_HEADER_LENGTH;
3600 memcpy(io_request->iu, request, iu_length);
3601
3602 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3603 io_request, timeout_msecs);
3604
3605 if (error_info) {
3606 if (io_request->error_info)
3607 memcpy(error_info, io_request->error_info,
3608 sizeof(*error_info));
3609 else
3610 memset(error_info, 0, sizeof(*error_info));
3611 } else if (rc == 0 && io_request->error_info) {
3612 u8 scsi_status;
3613 struct pqi_raid_error_info *raid_error_info;
3614
3615 raid_error_info = io_request->error_info;
3616 scsi_status = raid_error_info->status;
3617
3618 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3619 raid_error_info->data_out_result ==
3620 PQI_DATA_IN_OUT_UNDERFLOW)
3621 scsi_status = SAM_STAT_GOOD;
3622
3623 if (scsi_status != SAM_STAT_GOOD)
3624 rc = -EIO;
3625 }
3626
3627 pqi_free_io_request(io_request);
3628
7561a7e4
KB
3629out:
3630 pqi_ctrl_unbusy(ctrl_info);
6c223761
KB
3631 up(&ctrl_info->sync_request_sem);
3632
3633 return rc;
3634}
3635
3636static int pqi_validate_admin_response(
3637 struct pqi_general_admin_response *response, u8 expected_function_code)
3638{
3639 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3640 return -EINVAL;
3641
3642 if (get_unaligned_le16(&response->header.iu_length) !=
3643 PQI_GENERAL_ADMIN_IU_LENGTH)
3644 return -EINVAL;
3645
3646 if (response->function_code != expected_function_code)
3647 return -EINVAL;
3648
3649 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3650 return -EINVAL;
3651
3652 return 0;
3653}
3654
3655static int pqi_submit_admin_request_synchronous(
3656 struct pqi_ctrl_info *ctrl_info,
3657 struct pqi_general_admin_request *request,
3658 struct pqi_general_admin_response *response)
3659{
3660 int rc;
3661
3662 pqi_submit_admin_request(ctrl_info, request);
3663
3664 rc = pqi_poll_for_admin_response(ctrl_info, response);
3665
3666 if (rc == 0)
3667 rc = pqi_validate_admin_response(response,
3668 request->function_code);
3669
3670 return rc;
3671}
3672
3673static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3674{
3675 int rc;
3676 struct pqi_general_admin_request request;
3677 struct pqi_general_admin_response response;
3678 struct pqi_device_capability *capability;
3679 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3680
3681 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3682 if (!capability)
3683 return -ENOMEM;
3684
3685 memset(&request, 0, sizeof(request));
3686
3687 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3688 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3689 &request.header.iu_length);
3690 request.function_code =
3691 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3692 put_unaligned_le32(sizeof(*capability),
3693 &request.data.report_device_capability.buffer_length);
3694
3695 rc = pqi_map_single(ctrl_info->pci_dev,
3696 &request.data.report_device_capability.sg_descriptor,
3697 capability, sizeof(*capability),
3698 PCI_DMA_FROMDEVICE);
3699 if (rc)
3700 goto out;
3701
3702 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3703 &response);
3704
3705 pqi_pci_unmap(ctrl_info->pci_dev,
3706 &request.data.report_device_capability.sg_descriptor, 1,
3707 PCI_DMA_FROMDEVICE);
3708
3709 if (rc)
3710 goto out;
3711
3712 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3713 rc = -EIO;
3714 goto out;
3715 }
3716
3717 ctrl_info->max_inbound_queues =
3718 get_unaligned_le16(&capability->max_inbound_queues);
3719 ctrl_info->max_elements_per_iq =
3720 get_unaligned_le16(&capability->max_elements_per_iq);
3721 ctrl_info->max_iq_element_length =
3722 get_unaligned_le16(&capability->max_iq_element_length)
3723 * 16;
3724 ctrl_info->max_outbound_queues =
3725 get_unaligned_le16(&capability->max_outbound_queues);
3726 ctrl_info->max_elements_per_oq =
3727 get_unaligned_le16(&capability->max_elements_per_oq);
3728 ctrl_info->max_oq_element_length =
3729 get_unaligned_le16(&capability->max_oq_element_length)
3730 * 16;
3731
3732 sop_iu_layer_descriptor =
3733 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3734
3735 ctrl_info->max_inbound_iu_length_per_firmware =
3736 get_unaligned_le16(
3737 &sop_iu_layer_descriptor->max_inbound_iu_length);
3738 ctrl_info->inbound_spanning_supported =
3739 sop_iu_layer_descriptor->inbound_spanning_supported;
3740 ctrl_info->outbound_spanning_supported =
3741 sop_iu_layer_descriptor->outbound_spanning_supported;
3742
3743out:
3744 kfree(capability);
3745
3746 return rc;
3747}
3748
3749static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3750{
3751 if (ctrl_info->max_iq_element_length <
3752 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3753 dev_err(&ctrl_info->pci_dev->dev,
3754 "max. inbound queue element length of %d is less than the required length of %d\n",
3755 ctrl_info->max_iq_element_length,
3756 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3757 return -EINVAL;
3758 }
3759
3760 if (ctrl_info->max_oq_element_length <
3761 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3762 dev_err(&ctrl_info->pci_dev->dev,
3763 "max. outbound queue element length of %d is less than the required length of %d\n",
3764 ctrl_info->max_oq_element_length,
3765 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3766 return -EINVAL;
3767 }
3768
3769 if (ctrl_info->max_inbound_iu_length_per_firmware <
3770 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3771 dev_err(&ctrl_info->pci_dev->dev,
3772 "max. inbound IU length of %u is less than the min. required length of %d\n",
3773 ctrl_info->max_inbound_iu_length_per_firmware,
3774 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3775 return -EINVAL;
3776 }
3777
77668f41
KB
3778 if (!ctrl_info->inbound_spanning_supported) {
3779 dev_err(&ctrl_info->pci_dev->dev,
3780 "the controller does not support inbound spanning\n");
3781 return -EINVAL;
3782 }
3783
3784 if (ctrl_info->outbound_spanning_supported) {
3785 dev_err(&ctrl_info->pci_dev->dev,
3786 "the controller supports outbound spanning but this driver does not\n");
3787 return -EINVAL;
3788 }
3789
6c223761
KB
3790 return 0;
3791}
3792
3793static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3794 bool inbound_queue, u16 queue_id)
3795{
3796 struct pqi_general_admin_request request;
3797 struct pqi_general_admin_response response;
3798
3799 memset(&request, 0, sizeof(request));
3800 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3801 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3802 &request.header.iu_length);
3803 if (inbound_queue)
3804 request.function_code =
3805 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3806 else
3807 request.function_code =
3808 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3809 put_unaligned_le16(queue_id,
3810 &request.data.delete_operational_queue.queue_id);
3811
3812 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3813 &response);
3814}
3815
3816static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3817{
3818 int rc;
3819 struct pqi_event_queue *event_queue;
3820 struct pqi_general_admin_request request;
3821 struct pqi_general_admin_response response;
3822
3823 event_queue = &ctrl_info->event_queue;
3824
3825 /*
3826 * Create OQ (Outbound Queue - device to host queue) to dedicate
3827 * to events.
3828 */
3829 memset(&request, 0, sizeof(request));
3830 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3831 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3832 &request.header.iu_length);
3833 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3834 put_unaligned_le16(event_queue->oq_id,
3835 &request.data.create_operational_oq.queue_id);
3836 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3837 &request.data.create_operational_oq.element_array_addr);
3838 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3839 &request.data.create_operational_oq.pi_addr);
3840 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3841 &request.data.create_operational_oq.num_elements);
3842 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3843 &request.data.create_operational_oq.element_length);
3844 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3845 put_unaligned_le16(event_queue->int_msg_num,
3846 &request.data.create_operational_oq.int_msg_num);
3847
3848 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3849 &response);
3850 if (rc)
3851 return rc;
3852
3853 event_queue->oq_ci = ctrl_info->iomem_base +
3854 PQI_DEVICE_REGISTERS_OFFSET +
3855 get_unaligned_le64(
3856 &response.data.create_operational_oq.oq_ci_offset);
3857
3858 return 0;
3859}
3860
061ef06a
KB
3861static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3862 unsigned int group_number)
6c223761 3863{
6c223761
KB
3864 int rc;
3865 struct pqi_queue_group *queue_group;
3866 struct pqi_general_admin_request request;
3867 struct pqi_general_admin_response response;
3868
061ef06a 3869 queue_group = &ctrl_info->queue_groups[group_number];
6c223761
KB
3870
3871 /*
3872 * Create IQ (Inbound Queue - host to device queue) for
3873 * RAID path.
3874 */
3875 memset(&request, 0, sizeof(request));
3876 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3877 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3878 &request.header.iu_length);
3879 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3880 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3881 &request.data.create_operational_iq.queue_id);
3882 put_unaligned_le64(
3883 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3884 &request.data.create_operational_iq.element_array_addr);
3885 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3886 &request.data.create_operational_iq.ci_addr);
3887 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3888 &request.data.create_operational_iq.num_elements);
3889 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3890 &request.data.create_operational_iq.element_length);
3891 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3892
3893 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3894 &response);
3895 if (rc) {
3896 dev_err(&ctrl_info->pci_dev->dev,
3897 "error creating inbound RAID queue\n");
3898 return rc;
3899 }
3900
3901 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3902 PQI_DEVICE_REGISTERS_OFFSET +
3903 get_unaligned_le64(
3904 &response.data.create_operational_iq.iq_pi_offset);
3905
3906 /*
3907 * Create IQ (Inbound Queue - host to device queue) for
3908 * Advanced I/O (AIO) path.
3909 */
3910 memset(&request, 0, sizeof(request));
3911 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3912 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3913 &request.header.iu_length);
3914 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3915 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3916 &request.data.create_operational_iq.queue_id);
3917 put_unaligned_le64((u64)queue_group->
3918 iq_element_array_bus_addr[AIO_PATH],
3919 &request.data.create_operational_iq.element_array_addr);
3920 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3921 &request.data.create_operational_iq.ci_addr);
3922 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3923 &request.data.create_operational_iq.num_elements);
3924 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3925 &request.data.create_operational_iq.element_length);
3926 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3927
3928 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3929 &response);
3930 if (rc) {
3931 dev_err(&ctrl_info->pci_dev->dev,
3932 "error creating inbound AIO queue\n");
3933 goto delete_inbound_queue_raid;
3934 }
3935
3936 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3937 PQI_DEVICE_REGISTERS_OFFSET +
3938 get_unaligned_le64(
3939 &response.data.create_operational_iq.iq_pi_offset);
3940
3941 /*
3942 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3943 * assumed to be for RAID path I/O unless we change the queue's
3944 * property.
3945 */
3946 memset(&request, 0, sizeof(request));
3947 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3948 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3949 &request.header.iu_length);
3950 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3951 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3952 &request.data.change_operational_iq_properties.queue_id);
3953 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3954 &request.data.change_operational_iq_properties.vendor_specific);
3955
3956 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3957 &response);
3958 if (rc) {
3959 dev_err(&ctrl_info->pci_dev->dev,
3960 "error changing queue property\n");
3961 goto delete_inbound_queue_aio;
3962 }
3963
3964 /*
3965 * Create OQ (Outbound Queue - device to host queue).
3966 */
3967 memset(&request, 0, sizeof(request));
3968 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3969 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3970 &request.header.iu_length);
3971 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3972 put_unaligned_le16(queue_group->oq_id,
3973 &request.data.create_operational_oq.queue_id);
3974 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3975 &request.data.create_operational_oq.element_array_addr);
3976 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3977 &request.data.create_operational_oq.pi_addr);
3978 put_unaligned_le16(ctrl_info->num_elements_per_oq,
3979 &request.data.create_operational_oq.num_elements);
3980 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3981 &request.data.create_operational_oq.element_length);
3982 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3983 put_unaligned_le16(queue_group->int_msg_num,
3984 &request.data.create_operational_oq.int_msg_num);
3985
3986 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3987 &response);
3988 if (rc) {
3989 dev_err(&ctrl_info->pci_dev->dev,
3990 "error creating outbound queue\n");
3991 goto delete_inbound_queue_aio;
3992 }
3993
3994 queue_group->oq_ci = ctrl_info->iomem_base +
3995 PQI_DEVICE_REGISTERS_OFFSET +
3996 get_unaligned_le64(
3997 &response.data.create_operational_oq.oq_ci_offset);
3998
6c223761
KB
3999 return 0;
4000
4001delete_inbound_queue_aio:
4002 pqi_delete_operational_queue(ctrl_info, true,
4003 queue_group->iq_id[AIO_PATH]);
4004
4005delete_inbound_queue_raid:
4006 pqi_delete_operational_queue(ctrl_info, true,
4007 queue_group->iq_id[RAID_PATH]);
4008
4009 return rc;
4010}
4011
4012static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4013{
4014 int rc;
4015 unsigned int i;
4016
4017 rc = pqi_create_event_queue(ctrl_info);
4018 if (rc) {
4019 dev_err(&ctrl_info->pci_dev->dev,
4020 "error creating event queue\n");
4021 return rc;
4022 }
4023
4024 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
061ef06a 4025 rc = pqi_create_queue_group(ctrl_info, i);
6c223761
KB
4026 if (rc) {
4027 dev_err(&ctrl_info->pci_dev->dev,
4028 "error creating queue group number %u/%u\n",
4029 i, ctrl_info->num_queue_groups);
4030 return rc;
4031 }
4032 }
4033
4034 return 0;
4035}
4036
4037#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4038 (offsetof(struct pqi_event_config, descriptors) + \
4039 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4040
6a50d6ad
KB
4041static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4042 bool enable_events)
6c223761
KB
4043{
4044 int rc;
4045 unsigned int i;
4046 struct pqi_event_config *event_config;
6a50d6ad 4047 struct pqi_event_descriptor *event_descriptor;
6c223761
KB
4048 struct pqi_general_management_request request;
4049
4050 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4051 GFP_KERNEL);
4052 if (!event_config)
4053 return -ENOMEM;
4054
4055 memset(&request, 0, sizeof(request));
4056
4057 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4058 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4059 data.report_event_configuration.sg_descriptors[1]) -
4060 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4061 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4062 &request.data.report_event_configuration.buffer_length);
4063
4064 rc = pqi_map_single(ctrl_info->pci_dev,
4065 request.data.report_event_configuration.sg_descriptors,
4066 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4067 PCI_DMA_FROMDEVICE);
4068 if (rc)
4069 goto out;
4070
4071 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4072 0, NULL, NO_TIMEOUT);
4073
4074 pqi_pci_unmap(ctrl_info->pci_dev,
4075 request.data.report_event_configuration.sg_descriptors, 1,
4076 PCI_DMA_FROMDEVICE);
4077
4078 if (rc)
4079 goto out;
4080
6a50d6ad
KB
4081 for (i = 0; i < event_config->num_event_descriptors; i++) {
4082 event_descriptor = &event_config->descriptors[i];
4083 if (enable_events &&
4084 pqi_is_supported_event(event_descriptor->event_type))
4085 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4086 &event_descriptor->oq_id);
4087 else
4088 put_unaligned_le16(0, &event_descriptor->oq_id);
4089 }
6c223761
KB
4090
4091 memset(&request, 0, sizeof(request));
4092
4093 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4094 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4095 data.report_event_configuration.sg_descriptors[1]) -
4096 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4097 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4098 &request.data.report_event_configuration.buffer_length);
4099
4100 rc = pqi_map_single(ctrl_info->pci_dev,
4101 request.data.report_event_configuration.sg_descriptors,
4102 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4103 PCI_DMA_TODEVICE);
4104 if (rc)
4105 goto out;
4106
4107 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4108 NULL, NO_TIMEOUT);
4109
4110 pqi_pci_unmap(ctrl_info->pci_dev,
4111 request.data.report_event_configuration.sg_descriptors, 1,
4112 PCI_DMA_TODEVICE);
4113
4114out:
4115 kfree(event_config);
4116
4117 return rc;
4118}
4119
6a50d6ad
KB
4120static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4121{
4122 return pqi_configure_events(ctrl_info, true);
4123}
4124
4125static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4126{
4127 return pqi_configure_events(ctrl_info, false);
4128}
4129
6c223761
KB
4130static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4131{
4132 unsigned int i;
4133 struct device *dev;
4134 size_t sg_chain_buffer_length;
4135 struct pqi_io_request *io_request;
4136
4137 if (!ctrl_info->io_request_pool)
4138 return;
4139
4140 dev = &ctrl_info->pci_dev->dev;
4141 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4142 io_request = ctrl_info->io_request_pool;
4143
4144 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4145 kfree(io_request->iu);
4146 if (!io_request->sg_chain_buffer)
4147 break;
4148 dma_free_coherent(dev, sg_chain_buffer_length,
4149 io_request->sg_chain_buffer,
4150 io_request->sg_chain_buffer_dma_handle);
4151 io_request++;
4152 }
4153
4154 kfree(ctrl_info->io_request_pool);
4155 ctrl_info->io_request_pool = NULL;
4156}
4157
4158static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4159{
4160 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4161 ctrl_info->error_buffer_length,
4162 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4163
4164 if (!ctrl_info->error_buffer)
4165 return -ENOMEM;
4166
4167 return 0;
4168}
4169
4170static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4171{
4172 unsigned int i;
4173 void *sg_chain_buffer;
4174 size_t sg_chain_buffer_length;
4175 dma_addr_t sg_chain_buffer_dma_handle;
4176 struct device *dev;
4177 struct pqi_io_request *io_request;
4178
4179 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4180 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4181
4182 if (!ctrl_info->io_request_pool) {
4183 dev_err(&ctrl_info->pci_dev->dev,
4184 "failed to allocate I/O request pool\n");
4185 goto error;
4186 }
4187
4188 dev = &ctrl_info->pci_dev->dev;
4189 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4190 io_request = ctrl_info->io_request_pool;
4191
4192 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4193 io_request->iu =
4194 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4195
4196 if (!io_request->iu) {
4197 dev_err(&ctrl_info->pci_dev->dev,
4198 "failed to allocate IU buffers\n");
4199 goto error;
4200 }
4201
4202 sg_chain_buffer = dma_alloc_coherent(dev,
4203 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4204 GFP_KERNEL);
4205
4206 if (!sg_chain_buffer) {
4207 dev_err(&ctrl_info->pci_dev->dev,
4208 "failed to allocate PQI scatter-gather chain buffers\n");
4209 goto error;
4210 }
4211
4212 io_request->index = i;
4213 io_request->sg_chain_buffer = sg_chain_buffer;
4214 io_request->sg_chain_buffer_dma_handle =
4215 sg_chain_buffer_dma_handle;
4216 io_request++;
4217 }
4218
4219 return 0;
4220
4221error:
4222 pqi_free_all_io_requests(ctrl_info);
4223
4224 return -ENOMEM;
4225}
4226
4227/*
4228 * Calculate required resources that are sized based on max. outstanding
4229 * requests and max. transfer size.
4230 */
4231
4232static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4233{
4234 u32 max_transfer_size;
4235 u32 max_sg_entries;
4236
4237 ctrl_info->scsi_ml_can_queue =
4238 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4239 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4240
4241 ctrl_info->error_buffer_length =
4242 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4243
d727a776
KB
4244 if (reset_devices)
4245 max_transfer_size = min(ctrl_info->max_transfer_size,
4246 PQI_MAX_TRANSFER_SIZE_KDUMP);
4247 else
4248 max_transfer_size = min(ctrl_info->max_transfer_size,
4249 PQI_MAX_TRANSFER_SIZE);
6c223761
KB
4250
4251 max_sg_entries = max_transfer_size / PAGE_SIZE;
4252
4253 /* +1 to cover when the buffer is not page-aligned. */
4254 max_sg_entries++;
4255
4256 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4257
4258 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4259
4260 ctrl_info->sg_chain_buffer_length =
e1d213bd
KB
4261 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4262 PQI_EXTRA_SGL_MEMORY;
6c223761
KB
4263 ctrl_info->sg_tablesize = max_sg_entries;
4264 ctrl_info->max_sectors = max_transfer_size / 512;
4265}
4266
4267static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4268{
6c223761
KB
4269 int num_queue_groups;
4270 u16 num_elements_per_iq;
4271 u16 num_elements_per_oq;
4272
d727a776
KB
4273 if (reset_devices) {
4274 num_queue_groups = 1;
4275 } else {
4276 int num_cpus;
4277 int max_queue_groups;
4278
4279 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4280 ctrl_info->max_outbound_queues - 1);
4281 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
6c223761 4282
d727a776
KB
4283 num_cpus = num_online_cpus();
4284 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4285 num_queue_groups = min(num_queue_groups, max_queue_groups);
4286 }
6c223761
KB
4287
4288 ctrl_info->num_queue_groups = num_queue_groups;
061ef06a 4289 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
6c223761 4290
77668f41
KB
4291 /*
4292 * Make sure that the max. inbound IU length is an even multiple
4293 * of our inbound element length.
4294 */
4295 ctrl_info->max_inbound_iu_length =
4296 (ctrl_info->max_inbound_iu_length_per_firmware /
4297 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4298 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
6c223761
KB
4299
4300 num_elements_per_iq =
4301 (ctrl_info->max_inbound_iu_length /
4302 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4303
4304 /* Add one because one element in each queue is unusable. */
4305 num_elements_per_iq++;
4306
4307 num_elements_per_iq = min(num_elements_per_iq,
4308 ctrl_info->max_elements_per_iq);
4309
4310 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4311 num_elements_per_oq = min(num_elements_per_oq,
4312 ctrl_info->max_elements_per_oq);
4313
4314 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4315 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4316
4317 ctrl_info->max_sg_per_iu =
4318 ((ctrl_info->max_inbound_iu_length -
4319 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4320 sizeof(struct pqi_sg_descriptor)) +
4321 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4322}
4323
4324static inline void pqi_set_sg_descriptor(
4325 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4326{
4327 u64 address = (u64)sg_dma_address(sg);
4328 unsigned int length = sg_dma_len(sg);
4329
4330 put_unaligned_le64(address, &sg_descriptor->address);
4331 put_unaligned_le32(length, &sg_descriptor->length);
4332 put_unaligned_le32(0, &sg_descriptor->flags);
4333}
4334
4335static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4336 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4337 struct pqi_io_request *io_request)
4338{
4339 int i;
4340 u16 iu_length;
4341 int sg_count;
4342 bool chained;
4343 unsigned int num_sg_in_iu;
4344 unsigned int max_sg_per_iu;
4345 struct scatterlist *sg;
4346 struct pqi_sg_descriptor *sg_descriptor;
4347
4348 sg_count = scsi_dma_map(scmd);
4349 if (sg_count < 0)
4350 return sg_count;
4351
4352 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4353 PQI_REQUEST_HEADER_LENGTH;
4354
4355 if (sg_count == 0)
4356 goto out;
4357
4358 sg = scsi_sglist(scmd);
4359 sg_descriptor = request->sg_descriptors;
4360 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4361 chained = false;
4362 num_sg_in_iu = 0;
4363 i = 0;
4364
4365 while (1) {
4366 pqi_set_sg_descriptor(sg_descriptor, sg);
4367 if (!chained)
4368 num_sg_in_iu++;
4369 i++;
4370 if (i == sg_count)
4371 break;
4372 sg_descriptor++;
4373 if (i == max_sg_per_iu) {
4374 put_unaligned_le64(
4375 (u64)io_request->sg_chain_buffer_dma_handle,
4376 &sg_descriptor->address);
4377 put_unaligned_le32((sg_count - num_sg_in_iu)
4378 * sizeof(*sg_descriptor),
4379 &sg_descriptor->length);
4380 put_unaligned_le32(CISS_SG_CHAIN,
4381 &sg_descriptor->flags);
4382 chained = true;
4383 num_sg_in_iu++;
4384 sg_descriptor = io_request->sg_chain_buffer;
4385 }
4386 sg = sg_next(sg);
4387 }
4388
4389 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4390 request->partial = chained;
4391 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4392
4393out:
4394 put_unaligned_le16(iu_length, &request->header.iu_length);
4395
4396 return 0;
4397}
4398
4399static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4400 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4401 struct pqi_io_request *io_request)
4402{
4403 int i;
4404 u16 iu_length;
4405 int sg_count;
a60eec02
KB
4406 bool chained;
4407 unsigned int num_sg_in_iu;
4408 unsigned int max_sg_per_iu;
6c223761
KB
4409 struct scatterlist *sg;
4410 struct pqi_sg_descriptor *sg_descriptor;
4411
4412 sg_count = scsi_dma_map(scmd);
4413 if (sg_count < 0)
4414 return sg_count;
a60eec02
KB
4415
4416 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4417 PQI_REQUEST_HEADER_LENGTH;
4418 num_sg_in_iu = 0;
4419
6c223761
KB
4420 if (sg_count == 0)
4421 goto out;
4422
a60eec02
KB
4423 sg = scsi_sglist(scmd);
4424 sg_descriptor = request->sg_descriptors;
4425 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4426 chained = false;
4427 i = 0;
4428
4429 while (1) {
4430 pqi_set_sg_descriptor(sg_descriptor, sg);
4431 if (!chained)
4432 num_sg_in_iu++;
4433 i++;
4434 if (i == sg_count)
4435 break;
4436 sg_descriptor++;
4437 if (i == max_sg_per_iu) {
4438 put_unaligned_le64(
4439 (u64)io_request->sg_chain_buffer_dma_handle,
4440 &sg_descriptor->address);
4441 put_unaligned_le32((sg_count - num_sg_in_iu)
4442 * sizeof(*sg_descriptor),
4443 &sg_descriptor->length);
4444 put_unaligned_le32(CISS_SG_CHAIN,
4445 &sg_descriptor->flags);
4446 chained = true;
4447 num_sg_in_iu++;
4448 sg_descriptor = io_request->sg_chain_buffer;
6c223761 4449 }
a60eec02 4450 sg = sg_next(sg);
6c223761
KB
4451 }
4452
a60eec02
KB
4453 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4454 request->partial = chained;
6c223761 4455 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
a60eec02
KB
4456
4457out:
6c223761
KB
4458 put_unaligned_le16(iu_length, &request->header.iu_length);
4459 request->num_sg_descriptors = num_sg_in_iu;
4460
4461 return 0;
4462}
4463
4464static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4465 void *context)
4466{
4467 struct scsi_cmnd *scmd;
4468
4469 scmd = io_request->scmd;
4470 pqi_free_io_request(io_request);
4471 scsi_dma_unmap(scmd);
4472 pqi_scsi_done(scmd);
4473}
4474
4475static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4476 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4477 struct pqi_queue_group *queue_group)
4478{
4479 int rc;
4480 size_t cdb_length;
4481 struct pqi_io_request *io_request;
4482 struct pqi_raid_path_request *request;
4483
4484 io_request = pqi_alloc_io_request(ctrl_info);
4485 io_request->io_complete_callback = pqi_raid_io_complete;
4486 io_request->scmd = scmd;
4487
4488 scmd->host_scribble = (unsigned char *)io_request;
4489
4490 request = io_request->iu;
4491 memset(request, 0,
4492 offsetof(struct pqi_raid_path_request, sg_descriptors));
4493
4494 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4495 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4496 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4497 put_unaligned_le16(io_request->index, &request->request_id);
4498 request->error_index = request->request_id;
4499 memcpy(request->lun_number, device->scsi3addr,
4500 sizeof(request->lun_number));
4501
4502 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4503 memcpy(request->cdb, scmd->cmnd, cdb_length);
4504
4505 switch (cdb_length) {
4506 case 6:
4507 case 10:
4508 case 12:
4509 case 16:
4510 /* No bytes in the Additional CDB bytes field */
4511 request->additional_cdb_bytes_usage =
4512 SOP_ADDITIONAL_CDB_BYTES_0;
4513 break;
4514 case 20:
4515 /* 4 bytes in the Additional cdb field */
4516 request->additional_cdb_bytes_usage =
4517 SOP_ADDITIONAL_CDB_BYTES_4;
4518 break;
4519 case 24:
4520 /* 8 bytes in the Additional cdb field */
4521 request->additional_cdb_bytes_usage =
4522 SOP_ADDITIONAL_CDB_BYTES_8;
4523 break;
4524 case 28:
4525 /* 12 bytes in the Additional cdb field */
4526 request->additional_cdb_bytes_usage =
4527 SOP_ADDITIONAL_CDB_BYTES_12;
4528 break;
4529 case 32:
4530 default:
4531 /* 16 bytes in the Additional cdb field */
4532 request->additional_cdb_bytes_usage =
4533 SOP_ADDITIONAL_CDB_BYTES_16;
4534 break;
4535 }
4536
4537 switch (scmd->sc_data_direction) {
4538 case DMA_TO_DEVICE:
4539 request->data_direction = SOP_READ_FLAG;
4540 break;
4541 case DMA_FROM_DEVICE:
4542 request->data_direction = SOP_WRITE_FLAG;
4543 break;
4544 case DMA_NONE:
4545 request->data_direction = SOP_NO_DIRECTION_FLAG;
4546 break;
4547 case DMA_BIDIRECTIONAL:
4548 request->data_direction = SOP_BIDIRECTIONAL;
4549 break;
4550 default:
4551 dev_err(&ctrl_info->pci_dev->dev,
4552 "unknown data direction: %d\n",
4553 scmd->sc_data_direction);
6c223761
KB
4554 break;
4555 }
4556
4557 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4558 if (rc) {
4559 pqi_free_io_request(io_request);
4560 return SCSI_MLQUEUE_HOST_BUSY;
4561 }
4562
4563 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4564
4565 return 0;
4566}
4567
4568static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4569 void *context)
4570{
4571 struct scsi_cmnd *scmd;
4572
4573 scmd = io_request->scmd;
4574 scsi_dma_unmap(scmd);
4575 if (io_request->status == -EAGAIN)
4576 set_host_byte(scmd, DID_IMM_RETRY);
4577 pqi_free_io_request(io_request);
4578 pqi_scsi_done(scmd);
4579}
4580
4581static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4582 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4583 struct pqi_queue_group *queue_group)
4584{
4585 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4586 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4587}
4588
4589static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4590 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4591 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4592 struct pqi_encryption_info *encryption_info)
4593{
4594 int rc;
4595 struct pqi_io_request *io_request;
4596 struct pqi_aio_path_request *request;
4597
4598 io_request = pqi_alloc_io_request(ctrl_info);
4599 io_request->io_complete_callback = pqi_aio_io_complete;
4600 io_request->scmd = scmd;
4601
4602 scmd->host_scribble = (unsigned char *)io_request;
4603
4604 request = io_request->iu;
4605 memset(request, 0,
4606 offsetof(struct pqi_raid_path_request, sg_descriptors));
4607
4608 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4609 put_unaligned_le32(aio_handle, &request->nexus_id);
4610 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4611 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4612 put_unaligned_le16(io_request->index, &request->request_id);
4613 request->error_index = request->request_id;
4614 if (cdb_length > sizeof(request->cdb))
4615 cdb_length = sizeof(request->cdb);
4616 request->cdb_length = cdb_length;
4617 memcpy(request->cdb, cdb, cdb_length);
4618
4619 switch (scmd->sc_data_direction) {
4620 case DMA_TO_DEVICE:
4621 request->data_direction = SOP_READ_FLAG;
4622 break;
4623 case DMA_FROM_DEVICE:
4624 request->data_direction = SOP_WRITE_FLAG;
4625 break;
4626 case DMA_NONE:
4627 request->data_direction = SOP_NO_DIRECTION_FLAG;
4628 break;
4629 case DMA_BIDIRECTIONAL:
4630 request->data_direction = SOP_BIDIRECTIONAL;
4631 break;
4632 default:
4633 dev_err(&ctrl_info->pci_dev->dev,
4634 "unknown data direction: %d\n",
4635 scmd->sc_data_direction);
6c223761
KB
4636 break;
4637 }
4638
4639 if (encryption_info) {
4640 request->encryption_enable = true;
4641 put_unaligned_le16(encryption_info->data_encryption_key_index,
4642 &request->data_encryption_key_index);
4643 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4644 &request->encrypt_tweak_lower);
4645 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4646 &request->encrypt_tweak_upper);
4647 }
4648
4649 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4650 if (rc) {
4651 pqi_free_io_request(io_request);
4652 return SCSI_MLQUEUE_HOST_BUSY;
4653 }
4654
4655 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4656
4657 return 0;
4658}
4659
061ef06a
KB
4660static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4661 struct scsi_cmnd *scmd)
4662{
4663 u16 hw_queue;
4664
4665 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4666 if (hw_queue > ctrl_info->max_hw_queue_index)
4667 hw_queue = 0;
4668
4669 return hw_queue;
4670}
4671
7561a7e4
KB
4672/*
4673 * This function gets called just before we hand the completed SCSI request
4674 * back to the SML.
4675 */
4676
4677void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4678{
4679 struct pqi_scsi_dev *device;
4680
4681 device = scmd->device->hostdata;
4682 atomic_dec(&device->scsi_cmds_outstanding);
4683}
4684
6c223761 4685static int pqi_scsi_queue_command(struct Scsi_Host *shost,
7d81d2b8 4686 struct scsi_cmnd *scmd)
6c223761
KB
4687{
4688 int rc;
4689 struct pqi_ctrl_info *ctrl_info;
4690 struct pqi_scsi_dev *device;
061ef06a 4691 u16 hw_queue;
6c223761
KB
4692 struct pqi_queue_group *queue_group;
4693 bool raid_bypassed;
4694
4695 device = scmd->device->hostdata;
6c223761
KB
4696 ctrl_info = shost_to_hba(shost);
4697
7561a7e4
KB
4698 atomic_inc(&device->scsi_cmds_outstanding);
4699
6c223761
KB
4700 if (pqi_ctrl_offline(ctrl_info)) {
4701 set_host_byte(scmd, DID_NO_CONNECT);
4702 pqi_scsi_done(scmd);
4703 return 0;
4704 }
4705
7561a7e4
KB
4706 pqi_ctrl_busy(ctrl_info);
4707 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4708 rc = SCSI_MLQUEUE_HOST_BUSY;
4709 goto out;
4710 }
4711
7d81d2b8
KB
4712 /*
4713 * This is necessary because the SML doesn't zero out this field during
4714 * error recovery.
4715 */
4716 scmd->result = 0;
4717
061ef06a
KB
4718 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
4719 queue_group = &ctrl_info->queue_groups[hw_queue];
6c223761
KB
4720
4721 if (pqi_is_logical_device(device)) {
4722 raid_bypassed = false;
4723 if (device->offload_enabled &&
57292b58 4724 !blk_rq_is_passthrough(scmd->request)) {
6c223761
KB
4725 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4726 scmd, queue_group);
4727 if (rc == 0 ||
4728 rc == SCSI_MLQUEUE_HOST_BUSY ||
4729 rc == SAM_STAT_CHECK_CONDITION ||
4730 rc == SAM_STAT_RESERVATION_CONFLICT)
94086f5b 4731 raid_bypassed = true;
6c223761
KB
4732 }
4733 if (!raid_bypassed)
4734 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4735 queue_group);
4736 } else {
4737 if (device->aio_enabled)
4738 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4739 queue_group);
4740 else
4741 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4742 queue_group);
4743 }
4744
7561a7e4
KB
4745out:
4746 pqi_ctrl_unbusy(ctrl_info);
4747 if (rc)
4748 atomic_dec(&device->scsi_cmds_outstanding);
4749
6c223761
KB
4750 return rc;
4751}
4752
7561a7e4
KB
4753static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
4754 struct pqi_queue_group *queue_group)
4755{
4756 unsigned int path;
4757 unsigned long flags;
4758 bool list_is_empty;
4759
4760 for (path = 0; path < 2; path++) {
4761 while (1) {
4762 spin_lock_irqsave(
4763 &queue_group->submit_lock[path], flags);
4764 list_is_empty =
4765 list_empty(&queue_group->request_list[path]);
4766 spin_unlock_irqrestore(
4767 &queue_group->submit_lock[path], flags);
4768 if (list_is_empty)
4769 break;
4770 pqi_check_ctrl_health(ctrl_info);
4771 if (pqi_ctrl_offline(ctrl_info))
4772 return -ENXIO;
4773 usleep_range(1000, 2000);
4774 }
4775 }
4776
4777 return 0;
4778}
4779
4780static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
4781{
4782 int rc;
4783 unsigned int i;
4784 unsigned int path;
4785 struct pqi_queue_group *queue_group;
4786 pqi_index_t iq_pi;
4787 pqi_index_t iq_ci;
4788
4789 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4790 queue_group = &ctrl_info->queue_groups[i];
4791
4792 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
4793 if (rc)
4794 return rc;
4795
4796 for (path = 0; path < 2; path++) {
4797 iq_pi = queue_group->iq_pi_copy[path];
4798
4799 while (1) {
4800 iq_ci = *queue_group->iq_ci[path];
4801 if (iq_ci == iq_pi)
4802 break;
4803 pqi_check_ctrl_health(ctrl_info);
4804 if (pqi_ctrl_offline(ctrl_info))
4805 return -ENXIO;
4806 usleep_range(1000, 2000);
4807 }
4808 }
4809 }
4810
4811 return 0;
4812}
4813
4814static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
4815 struct pqi_scsi_dev *device)
4816{
4817 unsigned int i;
4818 unsigned int path;
4819 struct pqi_queue_group *queue_group;
4820 unsigned long flags;
4821 struct pqi_io_request *io_request;
4822 struct pqi_io_request *next;
4823 struct scsi_cmnd *scmd;
4824 struct pqi_scsi_dev *scsi_device;
4825
4826 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4827 queue_group = &ctrl_info->queue_groups[i];
4828
4829 for (path = 0; path < 2; path++) {
4830 spin_lock_irqsave(
4831 &queue_group->submit_lock[path], flags);
4832
4833 list_for_each_entry_safe(io_request, next,
4834 &queue_group->request_list[path],
4835 request_list_entry) {
4836 scmd = io_request->scmd;
4837 if (!scmd)
4838 continue;
4839
4840 scsi_device = scmd->device->hostdata;
4841 if (scsi_device != device)
4842 continue;
4843
4844 list_del(&io_request->request_list_entry);
4845 set_host_byte(scmd, DID_RESET);
4846 pqi_scsi_done(scmd);
4847 }
4848
4849 spin_unlock_irqrestore(
4850 &queue_group->submit_lock[path], flags);
4851 }
4852 }
4853}
4854
061ef06a
KB
4855static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
4856 struct pqi_scsi_dev *device)
4857{
4858 while (atomic_read(&device->scsi_cmds_outstanding)) {
4859 pqi_check_ctrl_health(ctrl_info);
4860 if (pqi_ctrl_offline(ctrl_info))
4861 return -ENXIO;
4862 usleep_range(1000, 2000);
4863 }
4864
4865 return 0;
4866}
4867
4868static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
4869{
4870 bool io_pending;
4871 unsigned long flags;
4872 struct pqi_scsi_dev *device;
4873
4874 while (1) {
4875 io_pending = false;
4876
4877 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4878 list_for_each_entry(device, &ctrl_info->scsi_device_list,
4879 scsi_device_list_entry) {
4880 if (atomic_read(&device->scsi_cmds_outstanding)) {
4881 io_pending = true;
4882 break;
4883 }
4884 }
4885 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
4886 flags);
4887
4888 if (!io_pending)
4889 break;
4890
4891 pqi_check_ctrl_health(ctrl_info);
4892 if (pqi_ctrl_offline(ctrl_info))
4893 return -ENXIO;
4894
4895 usleep_range(1000, 2000);
4896 }
4897
4898 return 0;
4899}
4900
14bb215d
KB
4901static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
4902 void *context)
6c223761 4903{
14bb215d 4904 struct completion *waiting = context;
6c223761 4905
14bb215d
KB
4906 complete(waiting);
4907}
6c223761 4908
14bb215d
KB
4909#define PQI_LUN_RESET_TIMEOUT_SECS 10
4910
4911static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4912 struct pqi_scsi_dev *device, struct completion *wait)
4913{
4914 int rc;
14bb215d
KB
4915
4916 while (1) {
4917 if (wait_for_completion_io_timeout(wait,
4918 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4919 rc = 0;
4920 break;
6c223761
KB
4921 }
4922
14bb215d
KB
4923 pqi_check_ctrl_health(ctrl_info);
4924 if (pqi_ctrl_offline(ctrl_info)) {
4e8415e3 4925 rc = -ENXIO;
14bb215d
KB
4926 break;
4927 }
6c223761 4928 }
6c223761 4929
14bb215d 4930 return rc;
6c223761
KB
4931}
4932
14bb215d 4933static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
6c223761
KB
4934 struct pqi_scsi_dev *device)
4935{
4936 int rc;
4937 struct pqi_io_request *io_request;
4938 DECLARE_COMPLETION_ONSTACK(wait);
4939 struct pqi_task_management_request *request;
4940
6c223761 4941 io_request = pqi_alloc_io_request(ctrl_info);
14bb215d 4942 io_request->io_complete_callback = pqi_lun_reset_complete;
6c223761
KB
4943 io_request->context = &wait;
4944
4945 request = io_request->iu;
4946 memset(request, 0, sizeof(*request));
4947
4948 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4949 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4950 &request->header.iu_length);
4951 put_unaligned_le16(io_request->index, &request->request_id);
4952 memcpy(request->lun_number, device->scsi3addr,
4953 sizeof(request->lun_number));
4954 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4955
4956 pqi_start_io(ctrl_info,
4957 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4958 io_request);
4959
14bb215d
KB
4960 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
4961 if (rc == 0)
6c223761 4962 rc = io_request->status;
6c223761
KB
4963
4964 pqi_free_io_request(io_request);
6c223761
KB
4965
4966 return rc;
4967}
4968
4969/* Performs a reset at the LUN level. */
4970
4971static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4972 struct pqi_scsi_dev *device)
4973{
4974 int rc;
4975
14bb215d 4976 rc = pqi_lun_reset(ctrl_info, device);
061ef06a
KB
4977 if (rc == 0)
4978 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
6c223761 4979
14bb215d 4980 return rc == 0 ? SUCCESS : FAILED;
6c223761
KB
4981}
4982
4983static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4984{
4985 int rc;
7561a7e4 4986 struct Scsi_Host *shost;
6c223761
KB
4987 struct pqi_ctrl_info *ctrl_info;
4988 struct pqi_scsi_dev *device;
4989
7561a7e4
KB
4990 shost = scmd->device->host;
4991 ctrl_info = shost_to_hba(shost);
6c223761
KB
4992 device = scmd->device->hostdata;
4993
4994 dev_err(&ctrl_info->pci_dev->dev,
4995 "resetting scsi %d:%d:%d:%d\n",
7561a7e4 4996 shost->host_no, device->bus, device->target, device->lun);
6c223761 4997
7561a7e4
KB
4998 pqi_check_ctrl_health(ctrl_info);
4999 if (pqi_ctrl_offline(ctrl_info)) {
5000 rc = FAILED;
5001 goto out;
5002 }
6c223761 5003
7561a7e4
KB
5004 mutex_lock(&ctrl_info->lun_reset_mutex);
5005
5006 pqi_ctrl_block_requests(ctrl_info);
5007 pqi_ctrl_wait_until_quiesced(ctrl_info);
5008 pqi_fail_io_queued_for_device(ctrl_info, device);
5009 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5010 pqi_device_reset_start(device);
5011 pqi_ctrl_unblock_requests(ctrl_info);
5012
5013 if (rc)
5014 rc = FAILED;
5015 else
5016 rc = pqi_device_reset(ctrl_info, device);
5017
5018 pqi_device_reset_done(device);
5019
5020 mutex_unlock(&ctrl_info->lun_reset_mutex);
5021
5022out:
6c223761
KB
5023 dev_err(&ctrl_info->pci_dev->dev,
5024 "reset of scsi %d:%d:%d:%d: %s\n",
7561a7e4 5025 shost->host_no, device->bus, device->target, device->lun,
6c223761
KB
5026 rc == SUCCESS ? "SUCCESS" : "FAILED");
5027
5028 return rc;
5029}
5030
5031static int pqi_slave_alloc(struct scsi_device *sdev)
5032{
5033 struct pqi_scsi_dev *device;
5034 unsigned long flags;
5035 struct pqi_ctrl_info *ctrl_info;
5036 struct scsi_target *starget;
5037 struct sas_rphy *rphy;
5038
5039 ctrl_info = shost_to_hba(sdev->host);
5040
5041 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5042
5043 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5044 starget = scsi_target(sdev);
5045 rphy = target_to_rphy(starget);
5046 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5047 if (device) {
5048 device->target = sdev_id(sdev);
5049 device->lun = sdev->lun;
5050 device->target_lun_valid = true;
5051 }
5052 } else {
5053 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5054 sdev_id(sdev), sdev->lun);
5055 }
5056
94086f5b 5057 if (device) {
6c223761
KB
5058 sdev->hostdata = device;
5059 device->sdev = sdev;
5060 if (device->queue_depth) {
5061 device->advertised_queue_depth = device->queue_depth;
5062 scsi_change_queue_depth(sdev,
5063 device->advertised_queue_depth);
5064 }
5065 }
5066
5067 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5068
5069 return 0;
5070}
5071
52198226
CH
5072static int pqi_map_queues(struct Scsi_Host *shost)
5073{
5074 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5075
5076 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
5077}
5078
6c223761
KB
5079static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5080 void __user *arg)
5081{
5082 struct pci_dev *pci_dev;
5083 u32 subsystem_vendor;
5084 u32 subsystem_device;
5085 cciss_pci_info_struct pciinfo;
5086
5087 if (!arg)
5088 return -EINVAL;
5089
5090 pci_dev = ctrl_info->pci_dev;
5091
5092 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5093 pciinfo.bus = pci_dev->bus->number;
5094 pciinfo.dev_fn = pci_dev->devfn;
5095 subsystem_vendor = pci_dev->subsystem_vendor;
5096 subsystem_device = pci_dev->subsystem_device;
5097 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5098 subsystem_vendor;
5099
5100 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5101 return -EFAULT;
5102
5103 return 0;
5104}
5105
5106static int pqi_getdrivver_ioctl(void __user *arg)
5107{
5108 u32 version;
5109
5110 if (!arg)
5111 return -EINVAL;
5112
5113 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5114 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5115
5116 if (copy_to_user(arg, &version, sizeof(version)))
5117 return -EFAULT;
5118
5119 return 0;
5120}
5121
5122struct ciss_error_info {
5123 u8 scsi_status;
5124 int command_status;
5125 size_t sense_data_length;
5126};
5127
5128static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5129 struct ciss_error_info *ciss_error_info)
5130{
5131 int ciss_cmd_status;
5132 size_t sense_data_length;
5133
5134 switch (pqi_error_info->data_out_result) {
5135 case PQI_DATA_IN_OUT_GOOD:
5136 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5137 break;
5138 case PQI_DATA_IN_OUT_UNDERFLOW:
5139 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5140 break;
5141 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5142 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5143 break;
5144 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5145 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5146 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5147 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5148 case PQI_DATA_IN_OUT_ERROR:
5149 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5150 break;
5151 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5152 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5153 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5154 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5155 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5156 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5157 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5158 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5159 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5160 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5161 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5162 break;
5163 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5164 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5165 break;
5166 case PQI_DATA_IN_OUT_ABORTED:
5167 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5168 break;
5169 case PQI_DATA_IN_OUT_TIMEOUT:
5170 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5171 break;
5172 default:
5173 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5174 break;
5175 }
5176
5177 sense_data_length =
5178 get_unaligned_le16(&pqi_error_info->sense_data_length);
5179 if (sense_data_length == 0)
5180 sense_data_length =
5181 get_unaligned_le16(&pqi_error_info->response_data_length);
5182 if (sense_data_length)
5183 if (sense_data_length > sizeof(pqi_error_info->data))
5184 sense_data_length = sizeof(pqi_error_info->data);
5185
5186 ciss_error_info->scsi_status = pqi_error_info->status;
5187 ciss_error_info->command_status = ciss_cmd_status;
5188 ciss_error_info->sense_data_length = sense_data_length;
5189}
5190
5191static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5192{
5193 int rc;
5194 char *kernel_buffer = NULL;
5195 u16 iu_length;
5196 size_t sense_data_length;
5197 IOCTL_Command_struct iocommand;
5198 struct pqi_raid_path_request request;
5199 struct pqi_raid_error_info pqi_error_info;
5200 struct ciss_error_info ciss_error_info;
5201
5202 if (pqi_ctrl_offline(ctrl_info))
5203 return -ENXIO;
5204 if (!arg)
5205 return -EINVAL;
5206 if (!capable(CAP_SYS_RAWIO))
5207 return -EPERM;
5208 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5209 return -EFAULT;
5210 if (iocommand.buf_size < 1 &&
5211 iocommand.Request.Type.Direction != XFER_NONE)
5212 return -EINVAL;
5213 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5214 return -EINVAL;
5215 if (iocommand.Request.Type.Type != TYPE_CMD)
5216 return -EINVAL;
5217
5218 switch (iocommand.Request.Type.Direction) {
5219 case XFER_NONE:
5220 case XFER_WRITE:
5221 case XFER_READ:
5222 break;
5223 default:
5224 return -EINVAL;
5225 }
5226
5227 if (iocommand.buf_size > 0) {
5228 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5229 if (!kernel_buffer)
5230 return -ENOMEM;
5231 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5232 if (copy_from_user(kernel_buffer, iocommand.buf,
5233 iocommand.buf_size)) {
5234 rc = -EFAULT;
5235 goto out;
5236 }
5237 } else {
5238 memset(kernel_buffer, 0, iocommand.buf_size);
5239 }
5240 }
5241
5242 memset(&request, 0, sizeof(request));
5243
5244 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5245 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5246 PQI_REQUEST_HEADER_LENGTH;
5247 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5248 sizeof(request.lun_number));
5249 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5250 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5251
5252 switch (iocommand.Request.Type.Direction) {
5253 case XFER_NONE:
5254 request.data_direction = SOP_NO_DIRECTION_FLAG;
5255 break;
5256 case XFER_WRITE:
5257 request.data_direction = SOP_WRITE_FLAG;
5258 break;
5259 case XFER_READ:
5260 request.data_direction = SOP_READ_FLAG;
5261 break;
5262 }
5263
5264 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5265
5266 if (iocommand.buf_size > 0) {
5267 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5268
5269 rc = pqi_map_single(ctrl_info->pci_dev,
5270 &request.sg_descriptors[0], kernel_buffer,
5271 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5272 if (rc)
5273 goto out;
5274
5275 iu_length += sizeof(request.sg_descriptors[0]);
5276 }
5277
5278 put_unaligned_le16(iu_length, &request.header.iu_length);
5279
5280 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5281 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5282
5283 if (iocommand.buf_size > 0)
5284 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5285 PCI_DMA_BIDIRECTIONAL);
5286
5287 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5288
5289 if (rc == 0) {
5290 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5291 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5292 iocommand.error_info.CommandStatus =
5293 ciss_error_info.command_status;
5294 sense_data_length = ciss_error_info.sense_data_length;
5295 if (sense_data_length) {
5296 if (sense_data_length >
5297 sizeof(iocommand.error_info.SenseInfo))
5298 sense_data_length =
5299 sizeof(iocommand.error_info.SenseInfo);
5300 memcpy(iocommand.error_info.SenseInfo,
5301 pqi_error_info.data, sense_data_length);
5302 iocommand.error_info.SenseLen = sense_data_length;
5303 }
5304 }
5305
5306 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5307 rc = -EFAULT;
5308 goto out;
5309 }
5310
5311 if (rc == 0 && iocommand.buf_size > 0 &&
5312 (iocommand.Request.Type.Direction & XFER_READ)) {
5313 if (copy_to_user(iocommand.buf, kernel_buffer,
5314 iocommand.buf_size)) {
5315 rc = -EFAULT;
5316 }
5317 }
5318
5319out:
5320 kfree(kernel_buffer);
5321
5322 return rc;
5323}
5324
5325static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5326{
5327 int rc;
5328 struct pqi_ctrl_info *ctrl_info;
5329
5330 ctrl_info = shost_to_hba(sdev->host);
5331
5332 switch (cmd) {
5333 case CCISS_DEREGDISK:
5334 case CCISS_REGNEWDISK:
5335 case CCISS_REGNEWD:
5336 rc = pqi_scan_scsi_devices(ctrl_info);
5337 break;
5338 case CCISS_GETPCIINFO:
5339 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5340 break;
5341 case CCISS_GETDRIVVER:
5342 rc = pqi_getdrivver_ioctl(arg);
5343 break;
5344 case CCISS_PASSTHRU:
5345 rc = pqi_passthru_ioctl(ctrl_info, arg);
5346 break;
5347 default:
5348 rc = -EINVAL;
5349 break;
5350 }
5351
5352 return rc;
5353}
5354
5355static ssize_t pqi_version_show(struct device *dev,
5356 struct device_attribute *attr, char *buffer)
5357{
5358 ssize_t count = 0;
5359 struct Scsi_Host *shost;
5360 struct pqi_ctrl_info *ctrl_info;
5361
5362 shost = class_to_shost(dev);
5363 ctrl_info = shost_to_hba(shost);
5364
5365 count += snprintf(buffer + count, PAGE_SIZE - count,
5366 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5367
5368 count += snprintf(buffer + count, PAGE_SIZE - count,
5369 "firmware: %s\n", ctrl_info->firmware_version);
5370
5371 return count;
5372}
5373
5374static ssize_t pqi_host_rescan_store(struct device *dev,
5375 struct device_attribute *attr, const char *buffer, size_t count)
5376{
5377 struct Scsi_Host *shost = class_to_shost(dev);
5378
5379 pqi_scan_start(shost);
5380
5381 return count;
5382}
5383
cbe0c7b1
KB
5384static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5385static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
6c223761
KB
5386
5387static struct device_attribute *pqi_shost_attrs[] = {
5388 &dev_attr_version,
5389 &dev_attr_rescan,
5390 NULL
5391};
5392
5393static ssize_t pqi_sas_address_show(struct device *dev,
5394 struct device_attribute *attr, char *buffer)
5395{
5396 struct pqi_ctrl_info *ctrl_info;
5397 struct scsi_device *sdev;
5398 struct pqi_scsi_dev *device;
5399 unsigned long flags;
5400 u64 sas_address;
5401
5402 sdev = to_scsi_device(dev);
5403 ctrl_info = shost_to_hba(sdev->host);
5404
5405 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5406
5407 device = sdev->hostdata;
5408 if (pqi_is_logical_device(device)) {
5409 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5410 flags);
5411 return -ENODEV;
5412 }
5413 sas_address = device->sas_address;
5414
5415 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5416
5417 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5418}
5419
5420static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5421 struct device_attribute *attr, char *buffer)
5422{
5423 struct pqi_ctrl_info *ctrl_info;
5424 struct scsi_device *sdev;
5425 struct pqi_scsi_dev *device;
5426 unsigned long flags;
5427
5428 sdev = to_scsi_device(dev);
5429 ctrl_info = shost_to_hba(sdev->host);
5430
5431 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5432
5433 device = sdev->hostdata;
5434 buffer[0] = device->offload_enabled ? '1' : '0';
5435 buffer[1] = '\n';
5436 buffer[2] = '\0';
5437
5438 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5439
5440 return 2;
5441}
5442
cbe0c7b1
KB
5443static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5444static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
6c223761
KB
5445 pqi_ssd_smart_path_enabled_show, NULL);
5446
5447static struct device_attribute *pqi_sdev_attrs[] = {
5448 &dev_attr_sas_address,
5449 &dev_attr_ssd_smart_path_enabled,
5450 NULL
5451};
5452
5453static struct scsi_host_template pqi_driver_template = {
5454 .module = THIS_MODULE,
5455 .name = DRIVER_NAME_SHORT,
5456 .proc_name = DRIVER_NAME_SHORT,
5457 .queuecommand = pqi_scsi_queue_command,
5458 .scan_start = pqi_scan_start,
5459 .scan_finished = pqi_scan_finished,
5460 .this_id = -1,
5461 .use_clustering = ENABLE_CLUSTERING,
5462 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5463 .ioctl = pqi_ioctl,
5464 .slave_alloc = pqi_slave_alloc,
52198226 5465 .map_queues = pqi_map_queues,
6c223761
KB
5466 .sdev_attrs = pqi_sdev_attrs,
5467 .shost_attrs = pqi_shost_attrs,
5468};
5469
5470static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5471{
5472 int rc;
5473 struct Scsi_Host *shost;
5474
5475 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5476 if (!shost) {
5477 dev_err(&ctrl_info->pci_dev->dev,
5478 "scsi_host_alloc failed for controller %u\n",
5479 ctrl_info->ctrl_id);
5480 return -ENOMEM;
5481 }
5482
5483 shost->io_port = 0;
5484 shost->n_io_port = 0;
5485 shost->this_id = -1;
5486 shost->max_channel = PQI_MAX_BUS;
5487 shost->max_cmd_len = MAX_COMMAND_SIZE;
5488 shost->max_lun = ~0;
5489 shost->max_id = ~0;
5490 shost->max_sectors = ctrl_info->max_sectors;
5491 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5492 shost->cmd_per_lun = shost->can_queue;
5493 shost->sg_tablesize = ctrl_info->sg_tablesize;
5494 shost->transportt = pqi_sas_transport_template;
52198226 5495 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
6c223761
KB
5496 shost->unique_id = shost->irq;
5497 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5498 shost->hostdata[0] = (unsigned long)ctrl_info;
5499
5500 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5501 if (rc) {
5502 dev_err(&ctrl_info->pci_dev->dev,
5503 "scsi_add_host failed for controller %u\n",
5504 ctrl_info->ctrl_id);
5505 goto free_host;
5506 }
5507
5508 rc = pqi_add_sas_host(shost, ctrl_info);
5509 if (rc) {
5510 dev_err(&ctrl_info->pci_dev->dev,
5511 "add SAS host failed for controller %u\n",
5512 ctrl_info->ctrl_id);
5513 goto remove_host;
5514 }
5515
5516 ctrl_info->scsi_host = shost;
5517
5518 return 0;
5519
5520remove_host:
5521 scsi_remove_host(shost);
5522free_host:
5523 scsi_host_put(shost);
5524
5525 return rc;
5526}
5527
5528static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5529{
5530 struct Scsi_Host *shost;
5531
5532 pqi_delete_sas_host(ctrl_info);
5533
5534 shost = ctrl_info->scsi_host;
5535 if (!shost)
5536 return;
5537
5538 scsi_remove_host(shost);
5539 scsi_host_put(shost);
5540}
5541
5542#define PQI_RESET_ACTION_RESET 0x1
5543
5544#define PQI_RESET_TYPE_NO_RESET 0x0
5545#define PQI_RESET_TYPE_SOFT_RESET 0x1
5546#define PQI_RESET_TYPE_FIRM_RESET 0x2
5547#define PQI_RESET_TYPE_HARD_RESET 0x3
5548
5549static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5550{
5551 int rc;
5552 u32 reset_params;
5553
5554 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5555 PQI_RESET_TYPE_HARD_RESET;
5556
5557 writel(reset_params,
5558 &ctrl_info->pqi_registers->device_reset);
5559
5560 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5561 if (rc)
5562 dev_err(&ctrl_info->pci_dev->dev,
5563 "PQI reset failed\n");
5564
5565 return rc;
5566}
5567
5568static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5569{
5570 int rc;
5571 struct bmic_identify_controller *identify;
5572
5573 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5574 if (!identify)
5575 return -ENOMEM;
5576
5577 rc = pqi_identify_controller(ctrl_info, identify);
5578 if (rc)
5579 goto out;
5580
5581 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5582 sizeof(identify->firmware_version));
5583 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5584 snprintf(ctrl_info->firmware_version +
5585 strlen(ctrl_info->firmware_version),
5586 sizeof(ctrl_info->firmware_version),
5587 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5588
5589out:
5590 kfree(identify);
5591
5592 return rc;
5593}
5594
98f87667
KB
5595static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5596{
5597 u32 table_length;
5598 u32 section_offset;
5599 void __iomem *table_iomem_addr;
5600 struct pqi_config_table *config_table;
5601 struct pqi_config_table_section_header *section;
5602
5603 table_length = ctrl_info->config_table_length;
5604
5605 config_table = kmalloc(table_length, GFP_KERNEL);
5606 if (!config_table) {
5607 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 5608 "failed to allocate memory for PQI configuration table\n");
98f87667
KB
5609 return -ENOMEM;
5610 }
5611
5612 /*
5613 * Copy the config table contents from I/O memory space into the
5614 * temporary buffer.
5615 */
5616 table_iomem_addr = ctrl_info->iomem_base +
5617 ctrl_info->config_table_offset;
5618 memcpy_fromio(config_table, table_iomem_addr, table_length);
5619
5620 section_offset =
5621 get_unaligned_le32(&config_table->first_section_offset);
5622
5623 while (section_offset) {
5624 section = (void *)config_table + section_offset;
5625
5626 switch (get_unaligned_le16(&section->section_id)) {
5627 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5628 ctrl_info->heartbeat_counter = table_iomem_addr +
5629 section_offset +
5630 offsetof(struct pqi_config_table_heartbeat,
5631 heartbeat_counter);
5632 break;
5633 }
5634
5635 section_offset =
5636 get_unaligned_le16(&section->next_section_offset);
5637 }
5638
5639 kfree(config_table);
5640
5641 return 0;
5642}
5643
162d7753
KB
5644/* Switches the controller from PQI mode back into SIS mode. */
5645
5646static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
5647{
5648 int rc;
5649
061ef06a 5650 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
162d7753
KB
5651 rc = pqi_reset(ctrl_info);
5652 if (rc)
5653 return rc;
5654 sis_reenable_sis_mode(ctrl_info);
5655 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5656
5657 return 0;
5658}
5659
5660/*
5661 * If the controller isn't already in SIS mode, this function forces it into
5662 * SIS mode.
5663 */
5664
5665static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
ff6abb73
KB
5666{
5667 if (!sis_is_firmware_running(ctrl_info))
5668 return -ENXIO;
5669
162d7753
KB
5670 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
5671 return 0;
5672
5673 if (sis_is_kernel_up(ctrl_info)) {
5674 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5675 return 0;
ff6abb73
KB
5676 }
5677
162d7753 5678 return pqi_revert_to_sis_mode(ctrl_info);
ff6abb73
KB
5679}
5680
6c223761
KB
5681static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5682{
5683 int rc;
5684
162d7753
KB
5685 rc = pqi_force_sis_mode(ctrl_info);
5686 if (rc)
5687 return rc;
6c223761
KB
5688
5689 /*
5690 * Wait until the controller is ready to start accepting SIS
5691 * commands.
5692 */
5693 rc = sis_wait_for_ctrl_ready(ctrl_info);
8845fdfa 5694 if (rc)
6c223761 5695 return rc;
6c223761
KB
5696
5697 /*
5698 * Get the controller properties. This allows us to determine
5699 * whether or not it supports PQI mode.
5700 */
5701 rc = sis_get_ctrl_properties(ctrl_info);
5702 if (rc) {
5703 dev_err(&ctrl_info->pci_dev->dev,
5704 "error obtaining controller properties\n");
5705 return rc;
5706 }
5707
5708 rc = sis_get_pqi_capabilities(ctrl_info);
5709 if (rc) {
5710 dev_err(&ctrl_info->pci_dev->dev,
5711 "error obtaining controller capabilities\n");
5712 return rc;
5713 }
5714
d727a776
KB
5715 if (reset_devices) {
5716 if (ctrl_info->max_outstanding_requests >
5717 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
5718 ctrl_info->max_outstanding_requests =
5719 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
5720 } else {
5721 if (ctrl_info->max_outstanding_requests >
5722 PQI_MAX_OUTSTANDING_REQUESTS)
5723 ctrl_info->max_outstanding_requests =
5724 PQI_MAX_OUTSTANDING_REQUESTS;
5725 }
6c223761
KB
5726
5727 pqi_calculate_io_resources(ctrl_info);
5728
5729 rc = pqi_alloc_error_buffer(ctrl_info);
5730 if (rc) {
5731 dev_err(&ctrl_info->pci_dev->dev,
5732 "failed to allocate PQI error buffer\n");
5733 return rc;
5734 }
5735
5736 /*
5737 * If the function we are about to call succeeds, the
5738 * controller will transition from legacy SIS mode
5739 * into PQI mode.
5740 */
5741 rc = sis_init_base_struct_addr(ctrl_info);
5742 if (rc) {
5743 dev_err(&ctrl_info->pci_dev->dev,
5744 "error initializing PQI mode\n");
5745 return rc;
5746 }
5747
5748 /* Wait for the controller to complete the SIS -> PQI transition. */
5749 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5750 if (rc) {
5751 dev_err(&ctrl_info->pci_dev->dev,
5752 "transition to PQI mode failed\n");
5753 return rc;
5754 }
5755
5756 /* From here on, we are running in PQI mode. */
5757 ctrl_info->pqi_mode_enabled = true;
ff6abb73 5758 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6c223761 5759
98f87667
KB
5760 rc = pqi_process_config_table(ctrl_info);
5761 if (rc)
5762 return rc;
5763
6c223761
KB
5764 rc = pqi_alloc_admin_queues(ctrl_info);
5765 if (rc) {
5766 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 5767 "failed to allocate admin queues\n");
6c223761
KB
5768 return rc;
5769 }
5770
5771 rc = pqi_create_admin_queues(ctrl_info);
5772 if (rc) {
5773 dev_err(&ctrl_info->pci_dev->dev,
5774 "error creating admin queues\n");
5775 return rc;
5776 }
5777
5778 rc = pqi_report_device_capability(ctrl_info);
5779 if (rc) {
5780 dev_err(&ctrl_info->pci_dev->dev,
5781 "obtaining device capability failed\n");
5782 return rc;
5783 }
5784
5785 rc = pqi_validate_device_capability(ctrl_info);
5786 if (rc)
5787 return rc;
5788
5789 pqi_calculate_queue_resources(ctrl_info);
5790
5791 rc = pqi_enable_msix_interrupts(ctrl_info);
5792 if (rc)
5793 return rc;
5794
5795 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5796 ctrl_info->max_msix_vectors =
5797 ctrl_info->num_msix_vectors_enabled;
5798 pqi_calculate_queue_resources(ctrl_info);
5799 }
5800
5801 rc = pqi_alloc_io_resources(ctrl_info);
5802 if (rc)
5803 return rc;
5804
5805 rc = pqi_alloc_operational_queues(ctrl_info);
d87d5474
KB
5806 if (rc) {
5807 dev_err(&ctrl_info->pci_dev->dev,
5808 "failed to allocate operational queues\n");
6c223761 5809 return rc;
d87d5474 5810 }
6c223761
KB
5811
5812 pqi_init_operational_queues(ctrl_info);
5813
5814 rc = pqi_request_irqs(ctrl_info);
5815 if (rc)
5816 return rc;
5817
6c223761
KB
5818 rc = pqi_create_queues(ctrl_info);
5819 if (rc)
5820 return rc;
5821
061ef06a
KB
5822 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
5823
5824 ctrl_info->controller_online = true;
5825 pqi_start_heartbeat_timer(ctrl_info);
6c223761 5826
6a50d6ad 5827 rc = pqi_enable_events(ctrl_info);
6c223761
KB
5828 if (rc) {
5829 dev_err(&ctrl_info->pci_dev->dev,
6a50d6ad 5830 "error enabling events\n");
6c223761
KB
5831 return rc;
5832 }
5833
6c223761
KB
5834 /* Register with the SCSI subsystem. */
5835 rc = pqi_register_scsi(ctrl_info);
5836 if (rc)
5837 return rc;
5838
5839 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5840 if (rc) {
5841 dev_err(&ctrl_info->pci_dev->dev,
5842 "error obtaining firmware version\n");
5843 return rc;
5844 }
5845
5846 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5847 if (rc) {
5848 dev_err(&ctrl_info->pci_dev->dev,
5849 "error updating host wellness\n");
5850 return rc;
5851 }
5852
5853 pqi_schedule_update_time_worker(ctrl_info);
5854
5855 pqi_scan_scsi_devices(ctrl_info);
5856
5857 return 0;
5858}
5859
061ef06a
KB
5860#if defined(CONFIG_PM)
5861
5862static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
5863{
5864 unsigned int i;
5865 struct pqi_admin_queues *admin_queues;
5866 struct pqi_event_queue *event_queue;
5867
5868 admin_queues = &ctrl_info->admin_queues;
5869 admin_queues->iq_pi_copy = 0;
5870 admin_queues->oq_ci_copy = 0;
5871 *admin_queues->oq_pi = 0;
5872
5873 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5874 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
5875 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
5876 ctrl_info->queue_groups[i].oq_ci_copy = 0;
5877
5878 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
5879 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
5880 *ctrl_info->queue_groups[i].oq_pi = 0;
5881 }
5882
5883 event_queue = &ctrl_info->event_queue;
5884 *event_queue->oq_pi = 0;
5885 event_queue->oq_ci_copy = 0;
5886}
5887
5888static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
5889{
5890 int rc;
5891
5892 rc = pqi_force_sis_mode(ctrl_info);
5893 if (rc)
5894 return rc;
5895
5896 /*
5897 * Wait until the controller is ready to start accepting SIS
5898 * commands.
5899 */
5900 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
5901 if (rc)
5902 return rc;
5903
5904 /*
5905 * If the function we are about to call succeeds, the
5906 * controller will transition from legacy SIS mode
5907 * into PQI mode.
5908 */
5909 rc = sis_init_base_struct_addr(ctrl_info);
5910 if (rc) {
5911 dev_err(&ctrl_info->pci_dev->dev,
5912 "error initializing PQI mode\n");
5913 return rc;
5914 }
5915
5916 /* Wait for the controller to complete the SIS -> PQI transition. */
5917 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5918 if (rc) {
5919 dev_err(&ctrl_info->pci_dev->dev,
5920 "transition to PQI mode failed\n");
5921 return rc;
5922 }
5923
5924 /* From here on, we are running in PQI mode. */
5925 ctrl_info->pqi_mode_enabled = true;
5926 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
5927
5928 pqi_reinit_queues(ctrl_info);
5929
5930 rc = pqi_create_admin_queues(ctrl_info);
5931 if (rc) {
5932 dev_err(&ctrl_info->pci_dev->dev,
5933 "error creating admin queues\n");
5934 return rc;
5935 }
5936
5937 rc = pqi_create_queues(ctrl_info);
5938 if (rc)
5939 return rc;
5940
5941 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
5942
5943 ctrl_info->controller_online = true;
5944 pqi_start_heartbeat_timer(ctrl_info);
5945 pqi_ctrl_unblock_requests(ctrl_info);
5946
5947 rc = pqi_enable_events(ctrl_info);
5948 if (rc) {
5949 dev_err(&ctrl_info->pci_dev->dev,
d87d5474 5950 "error enabling events\n");
061ef06a
KB
5951 return rc;
5952 }
5953
5954 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5955 if (rc) {
5956 dev_err(&ctrl_info->pci_dev->dev,
5957 "error updating host wellness\n");
5958 return rc;
5959 }
5960
5961 pqi_schedule_update_time_worker(ctrl_info);
5962
5963 pqi_scan_scsi_devices(ctrl_info);
5964
5965 return 0;
5966}
5967
5968#endif /* CONFIG_PM */
5969
a81ed5f3
KB
5970static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
5971 u16 timeout)
5972{
5973 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
5974 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
5975}
5976
6c223761
KB
5977static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5978{
5979 int rc;
5980 u64 mask;
5981
5982 rc = pci_enable_device(ctrl_info->pci_dev);
5983 if (rc) {
5984 dev_err(&ctrl_info->pci_dev->dev,
5985 "failed to enable PCI device\n");
5986 return rc;
5987 }
5988
5989 if (sizeof(dma_addr_t) > 4)
5990 mask = DMA_BIT_MASK(64);
5991 else
5992 mask = DMA_BIT_MASK(32);
5993
5994 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
5995 if (rc) {
5996 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5997 goto disable_device;
5998 }
5999
6000 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6001 if (rc) {
6002 dev_err(&ctrl_info->pci_dev->dev,
6003 "failed to obtain PCI resources\n");
6004 goto disable_device;
6005 }
6006
6007 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6008 ctrl_info->pci_dev, 0),
6009 sizeof(struct pqi_ctrl_registers));
6010 if (!ctrl_info->iomem_base) {
6011 dev_err(&ctrl_info->pci_dev->dev,
6012 "failed to map memory for controller registers\n");
6013 rc = -ENOMEM;
6014 goto release_regions;
6015 }
6016
a81ed5f3
KB
6017#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6018
6019 /* Increase the PCIe completion timeout. */
6020 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6021 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6022 if (rc) {
6023 dev_err(&ctrl_info->pci_dev->dev,
6024 "failed to set PCIe completion timeout\n");
6025 goto release_regions;
6026 }
6027
6c223761
KB
6028 /* Enable bus mastering. */
6029 pci_set_master(ctrl_info->pci_dev);
6030
cbe0c7b1
KB
6031 ctrl_info->registers = ctrl_info->iomem_base;
6032 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6033
6c223761
KB
6034 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6035
6036 return 0;
6037
6038release_regions:
6039 pci_release_regions(ctrl_info->pci_dev);
6040disable_device:
6041 pci_disable_device(ctrl_info->pci_dev);
6042
6043 return rc;
6044}
6045
6046static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6047{
6048 iounmap(ctrl_info->iomem_base);
6049 pci_release_regions(ctrl_info->pci_dev);
cbe0c7b1
KB
6050 if (pci_is_enabled(ctrl_info->pci_dev))
6051 pci_disable_device(ctrl_info->pci_dev);
6c223761
KB
6052 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6053}
6054
6055static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6056{
6057 struct pqi_ctrl_info *ctrl_info;
6058
6059 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6060 GFP_KERNEL, numa_node);
6061 if (!ctrl_info)
6062 return NULL;
6063
6064 mutex_init(&ctrl_info->scan_mutex);
7561a7e4 6065 mutex_init(&ctrl_info->lun_reset_mutex);
6c223761
KB
6066
6067 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6068 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6069
6070 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6071 atomic_set(&ctrl_info->num_interrupts, 0);
6072
6073 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6074 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6075
98f87667
KB
6076 init_timer(&ctrl_info->heartbeat_timer);
6077
6c223761
KB
6078 sema_init(&ctrl_info->sync_request_sem,
6079 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
7561a7e4 6080 init_waitqueue_head(&ctrl_info->block_requests_wait);
6c223761
KB
6081
6082 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
061ef06a 6083 ctrl_info->irq_mode = IRQ_MODE_NONE;
6c223761
KB
6084 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6085
6086 return ctrl_info;
6087}
6088
6089static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6090{
6091 kfree(ctrl_info);
6092}
6093
6094static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6095{
98bf061b
KB
6096 pqi_free_irqs(ctrl_info);
6097 pqi_disable_msix_interrupts(ctrl_info);
6c223761
KB
6098}
6099
6100static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6101{
6102 pqi_stop_heartbeat_timer(ctrl_info);
6103 pqi_free_interrupts(ctrl_info);
6104 if (ctrl_info->queue_memory_base)
6105 dma_free_coherent(&ctrl_info->pci_dev->dev,
6106 ctrl_info->queue_memory_length,
6107 ctrl_info->queue_memory_base,
6108 ctrl_info->queue_memory_base_dma_handle);
6109 if (ctrl_info->admin_queue_memory_base)
6110 dma_free_coherent(&ctrl_info->pci_dev->dev,
6111 ctrl_info->admin_queue_memory_length,
6112 ctrl_info->admin_queue_memory_base,
6113 ctrl_info->admin_queue_memory_base_dma_handle);
6114 pqi_free_all_io_requests(ctrl_info);
6115 if (ctrl_info->error_buffer)
6116 dma_free_coherent(&ctrl_info->pci_dev->dev,
6117 ctrl_info->error_buffer_length,
6118 ctrl_info->error_buffer,
6119 ctrl_info->error_buffer_dma_handle);
6120 if (ctrl_info->iomem_base)
6121 pqi_cleanup_pci_init(ctrl_info);
6122 pqi_free_ctrl_info(ctrl_info);
6123}
6124
6125static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6126{
061ef06a
KB
6127 pqi_cancel_rescan_worker(ctrl_info);
6128 pqi_cancel_update_time_worker(ctrl_info);
e57a1f9b
KB
6129 pqi_remove_all_scsi_devices(ctrl_info);
6130 pqi_unregister_scsi(ctrl_info);
162d7753
KB
6131 if (ctrl_info->pqi_mode_enabled)
6132 pqi_revert_to_sis_mode(ctrl_info);
6c223761
KB
6133 pqi_free_ctrl_resources(ctrl_info);
6134}
6135
d91d7820 6136static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
6c223761
KB
6137 const struct pci_device_id *id)
6138{
6139 char *ctrl_description;
6140
6141 if (id->driver_data) {
6142 ctrl_description = (char *)id->driver_data;
6143 } else {
6144 switch (id->subvendor) {
6145 case PCI_VENDOR_ID_HP:
6146 ctrl_description = hpe_branded_controller;
6147 break;
6148 case PCI_VENDOR_ID_ADAPTEC2:
6149 default:
6150 ctrl_description = microsemi_branded_controller;
6151 break;
6152 }
6153 }
6154
d91d7820 6155 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
6c223761
KB
6156}
6157
d91d7820
KB
6158static int pqi_pci_probe(struct pci_dev *pci_dev,
6159 const struct pci_device_id *id)
6c223761
KB
6160{
6161 int rc;
6162 int node;
6163 struct pqi_ctrl_info *ctrl_info;
6164
d91d7820 6165 pqi_print_ctrl_info(pci_dev, id);
6c223761
KB
6166
6167 if (pqi_disable_device_id_wildcards &&
6168 id->subvendor == PCI_ANY_ID &&
6169 id->subdevice == PCI_ANY_ID) {
d91d7820 6170 dev_warn(&pci_dev->dev,
6c223761
KB
6171 "controller not probed because device ID wildcards are disabled\n");
6172 return -ENODEV;
6173 }
6174
6175 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
d91d7820 6176 dev_warn(&pci_dev->dev,
6c223761
KB
6177 "controller device ID matched using wildcards\n");
6178
d91d7820 6179 node = dev_to_node(&pci_dev->dev);
6c223761 6180 if (node == NUMA_NO_NODE)
d91d7820 6181 set_dev_node(&pci_dev->dev, 0);
6c223761
KB
6182
6183 ctrl_info = pqi_alloc_ctrl_info(node);
6184 if (!ctrl_info) {
d91d7820 6185 dev_err(&pci_dev->dev,
6c223761
KB
6186 "failed to allocate controller info block\n");
6187 return -ENOMEM;
6188 }
6189
d91d7820 6190 ctrl_info->pci_dev = pci_dev;
6c223761
KB
6191
6192 rc = pqi_pci_init(ctrl_info);
6193 if (rc)
6194 goto error;
6195
6196 rc = pqi_ctrl_init(ctrl_info);
6197 if (rc)
6198 goto error;
6199
6200 return 0;
6201
6202error:
6203 pqi_remove_ctrl(ctrl_info);
6204
6205 return rc;
6206}
6207
d91d7820 6208static void pqi_pci_remove(struct pci_dev *pci_dev)
6c223761
KB
6209{
6210 struct pqi_ctrl_info *ctrl_info;
6211
d91d7820 6212 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
6213 if (!ctrl_info)
6214 return;
6215
6216 pqi_remove_ctrl(ctrl_info);
6217}
6218
d91d7820 6219static void pqi_shutdown(struct pci_dev *pci_dev)
6c223761
KB
6220{
6221 int rc;
6222 struct pqi_ctrl_info *ctrl_info;
6223
d91d7820 6224 ctrl_info = pci_get_drvdata(pci_dev);
6c223761
KB
6225 if (!ctrl_info)
6226 goto error;
6227
6228 /*
6229 * Write all data in the controller's battery-backed cache to
6230 * storage.
6231 */
6232 rc = pqi_flush_cache(ctrl_info);
6233 if (rc == 0)
6234 return;
6235
6236error:
d91d7820 6237 dev_warn(&pci_dev->dev,
6c223761
KB
6238 "unable to flush controller cache\n");
6239}
6240
061ef06a
KB
6241#if defined(CONFIG_PM)
6242
6243static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
6244{
6245 struct pqi_ctrl_info *ctrl_info;
6246
6247 ctrl_info = pci_get_drvdata(pci_dev);
6248
6249 pqi_disable_events(ctrl_info);
6250 pqi_cancel_update_time_worker(ctrl_info);
6251 pqi_cancel_rescan_worker(ctrl_info);
6252 pqi_wait_until_scan_finished(ctrl_info);
6253 pqi_wait_until_lun_reset_finished(ctrl_info);
6254 pqi_flush_cache(ctrl_info);
6255 pqi_ctrl_block_requests(ctrl_info);
6256 pqi_ctrl_wait_until_quiesced(ctrl_info);
6257 pqi_wait_until_inbound_queues_empty(ctrl_info);
6258 pqi_ctrl_wait_for_pending_io(ctrl_info);
6259 pqi_stop_heartbeat_timer(ctrl_info);
6260
6261 if (state.event == PM_EVENT_FREEZE)
6262 return 0;
6263
6264 pci_save_state(pci_dev);
6265 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6266
6267 ctrl_info->controller_online = false;
6268 ctrl_info->pqi_mode_enabled = false;
6269
6270 return 0;
6271}
6272
6273static int pqi_resume(struct pci_dev *pci_dev)
6274{
6275 int rc;
6276 struct pqi_ctrl_info *ctrl_info;
6277
6278 ctrl_info = pci_get_drvdata(pci_dev);
6279
6280 if (pci_dev->current_state != PCI_D0) {
6281 ctrl_info->max_hw_queue_index = 0;
6282 pqi_free_interrupts(ctrl_info);
6283 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6284 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6285 IRQF_SHARED, DRIVER_NAME_SHORT,
6286 &ctrl_info->queue_groups[0]);
6287 if (rc) {
6288 dev_err(&ctrl_info->pci_dev->dev,
6289 "irq %u init failed with error %d\n",
6290 pci_dev->irq, rc);
6291 return rc;
6292 }
6293 pqi_start_heartbeat_timer(ctrl_info);
6294 pqi_ctrl_unblock_requests(ctrl_info);
6295 return 0;
6296 }
6297
6298 pci_set_power_state(pci_dev, PCI_D0);
6299 pci_restore_state(pci_dev);
6300
6301 return pqi_ctrl_init_resume(ctrl_info);
6302}
6303
6304#endif /* CONFIG_PM */
6305
6c223761
KB
6306/* Define the PCI IDs for the controllers that we support. */
6307static const struct pci_device_id pqi_pci_id_table[] = {
7eddabff
KB
6308 {
6309 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6310 0x152d, 0x8a22)
6311 },
6312 {
6313 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6314 0x152d, 0x8a23)
6315 },
6316 {
6317 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6318 0x152d, 0x8a24)
6319 },
6320 {
6321 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6322 0x152d, 0x8a36)
6323 },
6324 {
6325 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6326 0x152d, 0x8a37)
6327 },
6c223761
KB
6328 {
6329 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6330 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6331 },
6332 {
6333 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6334 PCI_VENDOR_ID_ADAPTEC2, 0x0605)
6c223761
KB
6335 },
6336 {
6337 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6338 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6c223761
KB
6339 },
6340 {
6341 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6342 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6c223761
KB
6343 },
6344 {
6345 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6346 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6c223761
KB
6347 },
6348 {
6349 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6350 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6c223761
KB
6351 },
6352 {
6353 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6354 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6c223761
KB
6355 },
6356 {
6357 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6358 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6c223761
KB
6359 },
6360 {
6361 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6362 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6c223761
KB
6363 },
6364 {
6365 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6366 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6c223761
KB
6367 },
6368 {
6369 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6370 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6c223761
KB
6371 },
6372 {
6373 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6374 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6c223761
KB
6375 },
6376 {
6377 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6378 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6c223761
KB
6379 },
6380 {
6381 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6382 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6c223761
KB
6383 },
6384 {
6385 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6386 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6c223761
KB
6387 },
6388 {
6389 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6390 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6c223761
KB
6391 },
6392 {
6393 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6394 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6c223761
KB
6395 },
6396 {
6397 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6398 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6c223761
KB
6399 },
6400 {
6401 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6402 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6c223761
KB
6403 },
6404 {
6405 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6406 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6c223761
KB
6407 },
6408 {
6409 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6410 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6c223761
KB
6411 },
6412 {
6413 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6414 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6c223761
KB
6415 },
6416 {
6417 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6418 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6c223761
KB
6419 },
6420 {
6421 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6422 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6c223761
KB
6423 },
6424 {
6425 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff 6426 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6c223761
KB
6427 },
6428 {
6429 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
7eddabff
KB
6430 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
6431 },
6432 {
6433 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6434 PCI_VENDOR_ID_HP, 0x0600)
6435 },
6436 {
6437 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6438 PCI_VENDOR_ID_HP, 0x0601)
6439 },
6440 {
6441 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6442 PCI_VENDOR_ID_HP, 0x0602)
6443 },
6444 {
6445 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6446 PCI_VENDOR_ID_HP, 0x0603)
6447 },
6448 {
6449 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6450 PCI_VENDOR_ID_HP, 0x0604)
6451 },
6452 {
6453 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6454 PCI_VENDOR_ID_HP, 0x0606)
6455 },
6456 {
6457 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6458 PCI_VENDOR_ID_HP, 0x0650)
6459 },
6460 {
6461 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6462 PCI_VENDOR_ID_HP, 0x0651)
6463 },
6464 {
6465 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6466 PCI_VENDOR_ID_HP, 0x0652)
6467 },
6468 {
6469 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6470 PCI_VENDOR_ID_HP, 0x0653)
6471 },
6472 {
6473 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6474 PCI_VENDOR_ID_HP, 0x0654)
6475 },
6476 {
6477 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6478 PCI_VENDOR_ID_HP, 0x0655)
6479 },
6480 {
6481 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6482 PCI_VENDOR_ID_HP, 0x0656)
6483 },
6484 {
6485 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6486 PCI_VENDOR_ID_HP, 0x0657)
6487 },
6488 {
6489 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6490 PCI_VENDOR_ID_HP, 0x0700)
6491 },
6492 {
6493 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6494 PCI_VENDOR_ID_HP, 0x0701)
6c223761
KB
6495 },
6496 {
6497 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6498 PCI_VENDOR_ID_HP, 0x1001)
6499 },
6500 {
6501 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6502 PCI_VENDOR_ID_HP, 0x1100)
6503 },
6504 {
6505 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6506 PCI_VENDOR_ID_HP, 0x1101)
6507 },
6508 {
6509 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6510 PCI_VENDOR_ID_HP, 0x1102)
6511 },
6512 {
6513 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6514 PCI_VENDOR_ID_HP, 0x1150)
6515 },
6516 {
6517 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6518 PCI_ANY_ID, PCI_ANY_ID)
6519 },
6520 { 0 }
6521};
6522
6523MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
6524
6525static struct pci_driver pqi_pci_driver = {
6526 .name = DRIVER_NAME_SHORT,
6527 .id_table = pqi_pci_id_table,
6528 .probe = pqi_pci_probe,
6529 .remove = pqi_pci_remove,
6530 .shutdown = pqi_shutdown,
061ef06a
KB
6531#if defined(CONFIG_PM)
6532 .suspend = pqi_suspend,
6533 .resume = pqi_resume,
6534#endif
6c223761
KB
6535};
6536
6537static int __init pqi_init(void)
6538{
6539 int rc;
6540
6541 pr_info(DRIVER_NAME "\n");
6542
6543 pqi_sas_transport_template =
6544 sas_attach_transport(&pqi_sas_transport_functions);
6545 if (!pqi_sas_transport_template)
6546 return -ENODEV;
6547
6548 rc = pci_register_driver(&pqi_pci_driver);
6549 if (rc)
6550 sas_release_transport(pqi_sas_transport_template);
6551
6552 return rc;
6553}
6554
6555static void __exit pqi_cleanup(void)
6556{
6557 pci_unregister_driver(&pqi_pci_driver);
6558 sas_release_transport(pqi_sas_transport_template);
6559}
6560
6561module_init(pqi_init);
6562module_exit(pqi_cleanup);
6563
6564static void __attribute__((unused)) verify_structures(void)
6565{
6566 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6567 sis_host_to_ctrl_doorbell) != 0x20);
6568 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6569 sis_interrupt_mask) != 0x34);
6570 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6571 sis_ctrl_to_host_doorbell) != 0x9c);
6572 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6573 sis_ctrl_to_host_doorbell_clear) != 0xa0);
ff6abb73
KB
6574 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6575 sis_driver_scratch) != 0xb0);
6c223761
KB
6576 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6577 sis_firmware_status) != 0xbc);
6578 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6579 sis_mailbox) != 0x1000);
6580 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6581 pqi_registers) != 0x4000);
6582
6583 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6584 iu_type) != 0x0);
6585 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6586 iu_length) != 0x2);
6587 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6588 response_queue_id) != 0x4);
6589 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6590 work_area) != 0x6);
6591 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
6592
6593 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6594 status) != 0x0);
6595 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6596 service_response) != 0x1);
6597 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6598 data_present) != 0x2);
6599 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6600 reserved) != 0x3);
6601 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6602 residual_count) != 0x4);
6603 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6604 data_length) != 0x8);
6605 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6606 reserved1) != 0xa);
6607 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6608 data) != 0xc);
6609 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
6610
6611 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6612 data_in_result) != 0x0);
6613 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6614 data_out_result) != 0x1);
6615 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6616 reserved) != 0x2);
6617 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6618 status) != 0x5);
6619 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6620 status_qualifier) != 0x6);
6621 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6622 sense_data_length) != 0x8);
6623 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6624 response_data_length) != 0xa);
6625 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6626 data_in_transferred) != 0xc);
6627 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6628 data_out_transferred) != 0x10);
6629 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6630 data) != 0x14);
6631 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
6632
6633 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6634 signature) != 0x0);
6635 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6636 function_and_status_code) != 0x8);
6637 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6638 max_admin_iq_elements) != 0x10);
6639 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6640 max_admin_oq_elements) != 0x11);
6641 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6642 admin_iq_element_length) != 0x12);
6643 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6644 admin_oq_element_length) != 0x13);
6645 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6646 max_reset_timeout) != 0x14);
6647 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6648 legacy_intx_status) != 0x18);
6649 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6650 legacy_intx_mask_set) != 0x1c);
6651 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6652 legacy_intx_mask_clear) != 0x20);
6653 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6654 device_status) != 0x40);
6655 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6656 admin_iq_pi_offset) != 0x48);
6657 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6658 admin_oq_ci_offset) != 0x50);
6659 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6660 admin_iq_element_array_addr) != 0x58);
6661 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6662 admin_oq_element_array_addr) != 0x60);
6663 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6664 admin_iq_ci_addr) != 0x68);
6665 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6666 admin_oq_pi_addr) != 0x70);
6667 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6668 admin_iq_num_elements) != 0x78);
6669 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6670 admin_oq_num_elements) != 0x79);
6671 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6672 admin_queue_int_msg_num) != 0x7a);
6673 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6674 device_error) != 0x80);
6675 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6676 error_details) != 0x88);
6677 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6678 device_reset) != 0x90);
6679 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6680 power_action) != 0x94);
6681 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6682
6683 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6684 header.iu_type) != 0);
6685 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6686 header.iu_length) != 2);
6687 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6688 header.work_area) != 6);
6689 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6690 request_id) != 8);
6691 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6692 function_code) != 10);
6693 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6694 data.report_device_capability.buffer_length) != 44);
6695 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6696 data.report_device_capability.sg_descriptor) != 48);
6697 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6698 data.create_operational_iq.queue_id) != 12);
6699 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6700 data.create_operational_iq.element_array_addr) != 16);
6701 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6702 data.create_operational_iq.ci_addr) != 24);
6703 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6704 data.create_operational_iq.num_elements) != 32);
6705 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6706 data.create_operational_iq.element_length) != 34);
6707 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6708 data.create_operational_iq.queue_protocol) != 36);
6709 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6710 data.create_operational_oq.queue_id) != 12);
6711 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6712 data.create_operational_oq.element_array_addr) != 16);
6713 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6714 data.create_operational_oq.pi_addr) != 24);
6715 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6716 data.create_operational_oq.num_elements) != 32);
6717 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6718 data.create_operational_oq.element_length) != 34);
6719 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6720 data.create_operational_oq.queue_protocol) != 36);
6721 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6722 data.create_operational_oq.int_msg_num) != 40);
6723 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6724 data.create_operational_oq.coalescing_count) != 42);
6725 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6726 data.create_operational_oq.min_coalescing_time) != 44);
6727 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6728 data.create_operational_oq.max_coalescing_time) != 48);
6729 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6730 data.delete_operational_queue.queue_id) != 12);
6731 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6732 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6733 data.create_operational_iq) != 64 - 11);
6734 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6735 data.create_operational_oq) != 64 - 11);
6736 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6737 data.delete_operational_queue) != 64 - 11);
6738
6739 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6740 header.iu_type) != 0);
6741 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6742 header.iu_length) != 2);
6743 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6744 header.work_area) != 6);
6745 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6746 request_id) != 8);
6747 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6748 function_code) != 10);
6749 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6750 status) != 11);
6751 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6752 data.create_operational_iq.status_descriptor) != 12);
6753 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6754 data.create_operational_iq.iq_pi_offset) != 16);
6755 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6756 data.create_operational_oq.status_descriptor) != 12);
6757 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6758 data.create_operational_oq.oq_ci_offset) != 16);
6759 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6760
6761 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6762 header.iu_type) != 0);
6763 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6764 header.iu_length) != 2);
6765 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6766 header.response_queue_id) != 4);
6767 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6768 header.work_area) != 6);
6769 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6770 request_id) != 8);
6771 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6772 nexus_id) != 10);
6773 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6774 buffer_length) != 12);
6775 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6776 lun_number) != 16);
6777 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6778 protocol_specific) != 24);
6779 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6780 error_index) != 27);
6781 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6782 cdb) != 32);
6783 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6784 sg_descriptors) != 64);
6785 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6786 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6787
6788 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6789 header.iu_type) != 0);
6790 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6791 header.iu_length) != 2);
6792 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6793 header.response_queue_id) != 4);
6794 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6795 header.work_area) != 6);
6796 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6797 request_id) != 8);
6798 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6799 nexus_id) != 12);
6800 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6801 buffer_length) != 16);
6802 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6803 data_encryption_key_index) != 22);
6804 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6805 encrypt_tweak_lower) != 24);
6806 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6807 encrypt_tweak_upper) != 28);
6808 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6809 cdb) != 32);
6810 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6811 error_index) != 48);
6812 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6813 num_sg_descriptors) != 50);
6814 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6815 cdb_length) != 51);
6816 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6817 lun_number) != 52);
6818 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6819 sg_descriptors) != 64);
6820 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6821 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6822
6823 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6824 header.iu_type) != 0);
6825 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6826 header.iu_length) != 2);
6827 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6828 request_id) != 8);
6829 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6830 error_index) != 10);
6831
6832 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6833 header.iu_type) != 0);
6834 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6835 header.iu_length) != 2);
6836 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6837 header.response_queue_id) != 4);
6838 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6839 request_id) != 8);
6840 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6841 data.report_event_configuration.buffer_length) != 12);
6842 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6843 data.report_event_configuration.sg_descriptors) != 16);
6844 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6845 data.set_event_configuration.global_event_oq_id) != 10);
6846 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6847 data.set_event_configuration.buffer_length) != 12);
6848 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6849 data.set_event_configuration.sg_descriptors) != 16);
6850
6851 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6852 max_inbound_iu_length) != 6);
6853 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6854 max_outbound_iu_length) != 14);
6855 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6856
6857 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6858 data_length) != 0);
6859 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6860 iq_arbitration_priority_support_bitmask) != 8);
6861 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6862 maximum_aw_a) != 9);
6863 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6864 maximum_aw_b) != 10);
6865 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6866 maximum_aw_c) != 11);
6867 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6868 max_inbound_queues) != 16);
6869 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6870 max_elements_per_iq) != 18);
6871 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6872 max_iq_element_length) != 24);
6873 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6874 min_iq_element_length) != 26);
6875 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6876 max_outbound_queues) != 30);
6877 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6878 max_elements_per_oq) != 32);
6879 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6880 intr_coalescing_time_granularity) != 34);
6881 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6882 max_oq_element_length) != 36);
6883 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6884 min_oq_element_length) != 38);
6885 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6886 iu_layer_descriptors) != 64);
6887 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6888
6889 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6890 event_type) != 0);
6891 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6892 oq_id) != 2);
6893 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6894
6895 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6896 num_event_descriptors) != 2);
6897 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6898 descriptors) != 4);
6899
061ef06a
KB
6900 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
6901 ARRAY_SIZE(pqi_supported_event_types));
6902
6c223761
KB
6903 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6904 header.iu_type) != 0);
6905 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6906 header.iu_length) != 2);
6907 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6908 event_type) != 8);
6909 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6910 event_id) != 10);
6911 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6912 additional_event_id) != 12);
6913 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6914 data) != 16);
6915 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6916
6917 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6918 header.iu_type) != 0);
6919 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6920 header.iu_length) != 2);
6921 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6922 event_type) != 8);
6923 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6924 event_id) != 10);
6925 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6926 additional_event_id) != 12);
6927 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6928
6929 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6930 header.iu_type) != 0);
6931 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6932 header.iu_length) != 2);
6933 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6934 request_id) != 8);
6935 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6936 nexus_id) != 10);
6937 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6938 lun_number) != 16);
6939 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6940 protocol_specific) != 24);
6941 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6942 outbound_queue_id_to_manage) != 26);
6943 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6944 request_id_to_manage) != 28);
6945 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6946 task_management_function) != 30);
6947 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6948
6949 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6950 header.iu_type) != 0);
6951 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6952 header.iu_length) != 2);
6953 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6954 request_id) != 8);
6955 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6956 nexus_id) != 10);
6957 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6958 additional_response_info) != 12);
6959 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6960 response_code) != 15);
6961 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6962
6963 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6964 configured_logical_drive_count) != 0);
6965 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6966 configuration_signature) != 1);
6967 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6968 firmware_version) != 5);
6969 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6970 extended_logical_unit_count) != 154);
6971 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6972 firmware_build_number) != 190);
6973 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6974 controller_mode) != 292);
6975
1be42f46
KB
6976 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
6977 phys_bay_in_box) != 115);
6978 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
6979 device_type) != 120);
6980 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
6981 redundant_path_present_map) != 1736);
6982 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
6983 active_path_number) != 1738);
6984 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
6985 alternate_paths_phys_connector) != 1739);
6986 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
6987 alternate_paths_phys_box_on_port) != 1755);
6988 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
6989 current_queue_depth_limit) != 1796);
6990 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
6991
6c223761
KB
6992 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6993 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6994 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6995 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6996 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6997 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6998 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6999 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
7000 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7001 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
7002 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
7003 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7004
7005 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
d727a776
KB
7006 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
7007 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
6c223761 7008}