8040500
[linux-block.git] /
1 /*
2  * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3  * was acquired by Western Digital in 2012.
4  *
5  * Copyright 2012 sTec, Inc.
6  * Copyright (c) 2017 Western Digital Corporation or its affiliates.
7  *
8  * This file is part of the Linux kernel, and is made available under
9  * the terms of the GNU General Public License version 2.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/blkdev.h>
19 #include <linux/sched.h>
20 #include <linux/interrupt.h>
21 #include <linux/compiler.h>
22 #include <linux/workqueue.h>
23 #include <linux/delay.h>
24 #include <linux/time.h>
25 #include <linux/hdreg.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/completion.h>
28 #include <linux/scatterlist.h>
29 #include <linux/version.h>
30 #include <linux/err.h>
31 #include <linux/aer.h>
32 #include <linux/wait.h>
33 #include <linux/uio.h>
34 #include <linux/stringify.h>
35 #include <scsi/scsi.h>
36 #include <scsi/sg.h>
37 #include <linux/io.h>
38 #include <linux/uaccess.h>
39 #include <asm/unaligned.h>
40
41 #include "skd_s1120.h"
42
43 static int skd_dbg_level;
44 static int skd_isr_comp_limit = 4;
45
46 enum {
47         STEC_LINK_2_5GTS = 0,
48         STEC_LINK_5GTS = 1,
49         STEC_LINK_8GTS = 2,
50         STEC_LINK_UNKNOWN = 0xFF
51 };
52
53 enum {
54         SKD_FLUSH_INITIALIZER,
55         SKD_FLUSH_ZERO_SIZE_FIRST,
56         SKD_FLUSH_DATA_SECOND,
57 };
58
59 #define SKD_ASSERT(expr) \
60         do { \
61                 if (unlikely(!(expr))) { \
62                         pr_err("Assertion failed! %s,%s,%s,line=%d\n",  \
63                                # expr, __FILE__, __func__, __LINE__); \
64                 } \
65         } while (0)
66
67 #define DRV_NAME "skd"
68 #define DRV_VERSION "2.2.1"
69 #define DRV_BUILD_ID "0260"
70 #define PFX DRV_NAME ": "
71 #define DRV_BIN_VERSION 0x100
72 #define DRV_VER_COMPL   "2.2.1." DRV_BUILD_ID
73
74 MODULE_LICENSE("GPL");
75
76 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
77 MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
78
79 #define PCI_VENDOR_ID_STEC      0x1B39
80 #define PCI_DEVICE_ID_S1120     0x0001
81
82 #define SKD_FUA_NV              (1 << 1)
83 #define SKD_MINORS_PER_DEVICE   16
84
85 #define SKD_MAX_QUEUE_DEPTH     200u
86
87 #define SKD_PAUSE_TIMEOUT       (5 * 1000)
88
89 #define SKD_N_FITMSG_BYTES      (512u)
90 #define SKD_MAX_REQ_PER_MSG     14
91
92 #define SKD_N_SPECIAL_CONTEXT   32u
93 #define SKD_N_SPECIAL_FITMSG_BYTES      (128u)
94
95 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
96  * 128KB limit.  That allows 4096*4K = 16M xfer size
97  */
98 #define SKD_N_SG_PER_REQ_DEFAULT 256u
99 #define SKD_N_SG_PER_SPECIAL    256u
100
101 #define SKD_N_COMPLETION_ENTRY  256u
102 #define SKD_N_READ_CAP_BYTES    (8u)
103
104 #define SKD_N_INTERNAL_BYTES    (512u)
105
106 #define SKD_SKCOMP_SIZE                                                 \
107         ((sizeof(struct fit_completion_entry_v1) +                      \
108           sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
109
110 /* 5 bits of uniqifier, 0xF800 */
111 #define SKD_ID_INCR             (0x400)
112 #define SKD_ID_TABLE_MASK       (3u << 8u)
113 #define  SKD_ID_RW_REQUEST      (0u << 8u)
114 #define  SKD_ID_INTERNAL        (1u << 8u)
115 #define  SKD_ID_SPECIAL_REQUEST (2u << 8u)
116 #define  SKD_ID_FIT_MSG         (3u << 8u)
117 #define SKD_ID_SLOT_MASK        0x00FFu
118 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
119
120 #define SKD_N_TIMEOUT_SLOT      4u
121 #define SKD_TIMEOUT_SLOT_MASK   3u
122
123 #define SKD_N_MAX_SECTORS 2048u
124
125 #define SKD_MAX_RETRIES 2u
126
127 #define SKD_TIMER_SECONDS(seconds) (seconds)
128 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
129
130 #define INQ_STD_NBYTES 36
131
132 enum skd_drvr_state {
133         SKD_DRVR_STATE_LOAD,
134         SKD_DRVR_STATE_IDLE,
135         SKD_DRVR_STATE_BUSY,
136         SKD_DRVR_STATE_STARTING,
137         SKD_DRVR_STATE_ONLINE,
138         SKD_DRVR_STATE_PAUSING,
139         SKD_DRVR_STATE_PAUSED,
140         SKD_DRVR_STATE_DRAINING_TIMEOUT,
141         SKD_DRVR_STATE_RESTARTING,
142         SKD_DRVR_STATE_RESUMING,
143         SKD_DRVR_STATE_STOPPING,
144         SKD_DRVR_STATE_FAULT,
145         SKD_DRVR_STATE_DISAPPEARED,
146         SKD_DRVR_STATE_PROTOCOL_MISMATCH,
147         SKD_DRVR_STATE_BUSY_ERASE,
148         SKD_DRVR_STATE_BUSY_SANITIZE,
149         SKD_DRVR_STATE_BUSY_IMMINENT,
150         SKD_DRVR_STATE_WAIT_BOOT,
151         SKD_DRVR_STATE_SYNCING,
152 };
153
154 #define SKD_WAIT_BOOT_TIMO      SKD_TIMER_SECONDS(90u)
155 #define SKD_STARTING_TIMO       SKD_TIMER_SECONDS(8u)
156 #define SKD_RESTARTING_TIMO     SKD_TIMER_MINUTES(4u)
157 #define SKD_DRAINING_TIMO       SKD_TIMER_SECONDS(6u)
158 #define SKD_BUSY_TIMO           SKD_TIMER_MINUTES(20u)
159 #define SKD_STARTED_BUSY_TIMO   SKD_TIMER_SECONDS(60u)
160 #define SKD_START_WAIT_SECONDS  90u
161
162 enum skd_req_state {
163         SKD_REQ_STATE_IDLE,
164         SKD_REQ_STATE_SETUP,
165         SKD_REQ_STATE_BUSY,
166         SKD_REQ_STATE_COMPLETED,
167         SKD_REQ_STATE_TIMEOUT,
168         SKD_REQ_STATE_ABORTED,
169 };
170
171 enum skd_fit_msg_state {
172         SKD_MSG_STATE_IDLE,
173         SKD_MSG_STATE_BUSY,
174 };
175
176 enum skd_check_status_action {
177         SKD_CHECK_STATUS_REPORT_GOOD,
178         SKD_CHECK_STATUS_REPORT_SMART_ALERT,
179         SKD_CHECK_STATUS_REQUEUE_REQUEST,
180         SKD_CHECK_STATUS_REPORT_ERROR,
181         SKD_CHECK_STATUS_BUSY_IMMINENT,
182 };
183
184 struct skd_msg_buf {
185         struct fit_msg_hdr      fmh;
186         struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
187 };
188
189 struct skd_fitmsg_context {
190         enum skd_fit_msg_state state;
191
192         struct skd_fitmsg_context *next;
193
194         u32 id;
195         u16 outstanding;
196
197         u32 length;
198
199         struct skd_msg_buf *msg_buf;
200         dma_addr_t mb_dma_address;
201 };
202
203 struct skd_request_context {
204         enum skd_req_state state;
205
206         struct skd_request_context *next;
207
208         u16 id;
209         u32 fitmsg_id;
210
211         struct request *req;
212         u8 flush_cmd;
213
214         u32 timeout_stamp;
215         enum dma_data_direction data_dir;
216         struct scatterlist *sg;
217         u32 n_sg;
218         u32 sg_byte_count;
219
220         struct fit_sg_descriptor *sksg_list;
221         dma_addr_t sksg_dma_address;
222
223         struct fit_completion_entry_v1 completion;
224
225         struct fit_comp_error_info err_info;
226
227 };
228
229 struct skd_special_context {
230         struct skd_request_context req;
231
232         u8 orphaned;
233
234         void *data_buf;
235         dma_addr_t db_dma_address;
236
237         struct skd_msg_buf *msg_buf;
238         dma_addr_t mb_dma_address;
239 };
240
241 struct skd_sg_io {
242         fmode_t mode;
243         void __user *argp;
244
245         struct sg_io_hdr sg;
246
247         u8 cdb[16];
248
249         u32 dxfer_len;
250         u32 iovcnt;
251         struct sg_iovec *iov;
252         struct sg_iovec no_iov_iov;
253
254         struct skd_special_context *skspcl;
255 };
256
257 typedef enum skd_irq_type {
258         SKD_IRQ_LEGACY,
259         SKD_IRQ_MSI,
260         SKD_IRQ_MSIX
261 } skd_irq_type_t;
262
263 #define SKD_MAX_BARS                    2
264
265 struct skd_device {
266         void __iomem *mem_map[SKD_MAX_BARS];
267         resource_size_t mem_phys[SKD_MAX_BARS];
268         u32 mem_size[SKD_MAX_BARS];
269
270         struct skd_msix_entry *msix_entries;
271
272         struct pci_dev *pdev;
273         int pcie_error_reporting_is_enabled;
274
275         spinlock_t lock;
276         struct gendisk *disk;
277         struct request_queue *queue;
278         struct device *class_dev;
279         int gendisk_on;
280         int sync_done;
281
282         u32 devno;
283         u32 major;
284         char isr_name[30];
285
286         enum skd_drvr_state state;
287         u32 drive_state;
288
289         u32 in_flight;
290         u32 cur_max_queue_depth;
291         u32 queue_low_water_mark;
292         u32 dev_max_queue_depth;
293
294         u32 num_fitmsg_context;
295         u32 num_req_context;
296
297         u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
298         u32 timeout_stamp;
299         struct skd_fitmsg_context *skmsg_free_list;
300         struct skd_fitmsg_context *skmsg_table;
301
302         struct skd_request_context *skreq_free_list;
303         struct skd_request_context *skreq_table;
304
305         struct skd_special_context *skspcl_free_list;
306         struct skd_special_context *skspcl_table;
307
308         struct skd_special_context internal_skspcl;
309         u32 read_cap_blocksize;
310         u32 read_cap_last_lba;
311         int read_cap_is_valid;
312         int inquiry_is_valid;
313         u8 inq_serial_num[13];  /*12 chars plus null term */
314
315         u8 skcomp_cycle;
316         u32 skcomp_ix;
317         struct fit_completion_entry_v1 *skcomp_table;
318         struct fit_comp_error_info *skerr_table;
319         dma_addr_t cq_dma_address;
320
321         wait_queue_head_t waitq;
322
323         struct timer_list timer;
324         u32 timer_countdown;
325         u32 timer_substate;
326
327         int n_special;
328         int sgs_per_request;
329         u32 last_mtd;
330
331         u32 proto_ver;
332
333         int dbg_level;
334         u32 connect_time_stamp;
335         int connect_retries;
336 #define SKD_MAX_CONNECT_RETRIES 16
337         u32 drive_jiffies;
338
339         u32 timo_slot;
340
341         struct work_struct completion_worker;
342 };
343
344 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
345 #define SKD_READL(DEV, OFF)      skd_reg_read32(DEV, OFF)
346 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
347
348 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
349 {
350         u32 val = readl(skdev->mem_map[1] + offset);
351
352         if (unlikely(skdev->dbg_level >= 2))
353                 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
354         return val;
355 }
356
357 static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
358                                    u32 offset)
359 {
360         writel(val, skdev->mem_map[1] + offset);
361         if (unlikely(skdev->dbg_level >= 2))
362                 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
363 }
364
365 static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
366                                    u32 offset)
367 {
368         writeq(val, skdev->mem_map[1] + offset);
369         if (unlikely(skdev->dbg_level >= 2))
370                 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
371                         val);
372 }
373
374
375 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
376 static int skd_isr_type = SKD_IRQ_DEFAULT;
377
378 module_param(skd_isr_type, int, 0444);
379 MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
380                  " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
381
382 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
383 static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
384
385 module_param(skd_max_req_per_msg, int, 0444);
386 MODULE_PARM_DESC(skd_max_req_per_msg,
387                  "Maximum SCSI requests packed in a single message."
388                  " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
389
390 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
391 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
392 static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
393
394 module_param(skd_max_queue_depth, int, 0444);
395 MODULE_PARM_DESC(skd_max_queue_depth,
396                  "Maximum SCSI requests issued to s1120."
397                  " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
398
399 static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
400 module_param(skd_sgs_per_request, int, 0444);
401 MODULE_PARM_DESC(skd_sgs_per_request,
402                  "Maximum SG elements per block request."
403                  " (1-4096, default==256)");
404
405 static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
406 module_param(skd_max_pass_thru, int, 0444);
407 MODULE_PARM_DESC(skd_max_pass_thru,
408                  "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
409
410 module_param(skd_dbg_level, int, 0444);
411 MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
412
413 module_param(skd_isr_comp_limit, int, 0444);
414 MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
415
416 /* Major device number dynamically assigned. */
417 static u32 skd_major;
418
419 static void skd_destruct(struct skd_device *skdev);
420 static const struct block_device_operations skd_blockdev_ops;
421 static void skd_send_fitmsg(struct skd_device *skdev,
422                             struct skd_fitmsg_context *skmsg);
423 static void skd_send_special_fitmsg(struct skd_device *skdev,
424                                     struct skd_special_context *skspcl);
425 static void skd_request_fn(struct request_queue *rq);
426 static void skd_end_request(struct skd_device *skdev,
427                 struct skd_request_context *skreq, blk_status_t status);
428 static bool skd_preop_sg_list(struct skd_device *skdev,
429                              struct skd_request_context *skreq);
430 static void skd_postop_sg_list(struct skd_device *skdev,
431                                struct skd_request_context *skreq);
432
433 static void skd_restart_device(struct skd_device *skdev);
434 static int skd_quiesce_dev(struct skd_device *skdev);
435 static int skd_unquiesce_dev(struct skd_device *skdev);
436 static void skd_release_special(struct skd_device *skdev,
437                                 struct skd_special_context *skspcl);
438 static void skd_disable_interrupts(struct skd_device *skdev);
439 static void skd_isr_fwstate(struct skd_device *skdev);
440 static void skd_recover_requests(struct skd_device *skdev);
441 static void skd_soft_reset(struct skd_device *skdev);
442
443 const char *skd_drive_state_to_str(int state);
444 const char *skd_skdev_state_to_str(enum skd_drvr_state state);
445 static void skd_log_skdev(struct skd_device *skdev, const char *event);
446 static void skd_log_skmsg(struct skd_device *skdev,
447                           struct skd_fitmsg_context *skmsg, const char *event);
448 static void skd_log_skreq(struct skd_device *skdev,
449                           struct skd_request_context *skreq, const char *event);
450
451 /*
452  *****************************************************************************
453  * READ/WRITE REQUESTS
454  *****************************************************************************
455  */
456 static void skd_fail_all_pending(struct skd_device *skdev)
457 {
458         struct request_queue *q = skdev->queue;
459         struct request *req;
460
461         for (;; ) {
462                 req = blk_peek_request(q);
463                 if (req == NULL)
464                         break;
465                 blk_start_request(req);
466                 __blk_end_request_all(req, BLK_STS_IOERR);
467         }
468 }
469
470 static void
471 skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
472                 int data_dir, unsigned lba,
473                 unsigned count)
474 {
475         if (data_dir == READ)
476                 scsi_req->cdb[0] = READ_10;
477         else
478                 scsi_req->cdb[0] = WRITE_10;
479
480         scsi_req->cdb[1] = 0;
481         scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
482         scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
483         scsi_req->cdb[4] = (lba & 0xff00) >> 8;
484         scsi_req->cdb[5] = (lba & 0xff);
485         scsi_req->cdb[6] = 0;
486         scsi_req->cdb[7] = (count & 0xff00) >> 8;
487         scsi_req->cdb[8] = count & 0xff;
488         scsi_req->cdb[9] = 0;
489 }
490
491 static void
492 skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
493                             struct skd_request_context *skreq)
494 {
495         skreq->flush_cmd = 1;
496
497         scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
498         scsi_req->cdb[1] = 0;
499         scsi_req->cdb[2] = 0;
500         scsi_req->cdb[3] = 0;
501         scsi_req->cdb[4] = 0;
502         scsi_req->cdb[5] = 0;
503         scsi_req->cdb[6] = 0;
504         scsi_req->cdb[7] = 0;
505         scsi_req->cdb[8] = 0;
506         scsi_req->cdb[9] = 0;
507 }
508
509 /*
510  * Return true if and only if all pending requests should be failed.
511  */
512 static bool skd_fail_all(struct request_queue *q)
513 {
514         struct skd_device *skdev = q->queuedata;
515
516         SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
517
518         skd_log_skdev(skdev, "req_not_online");
519         switch (skdev->state) {
520         case SKD_DRVR_STATE_PAUSING:
521         case SKD_DRVR_STATE_PAUSED:
522         case SKD_DRVR_STATE_STARTING:
523         case SKD_DRVR_STATE_RESTARTING:
524         case SKD_DRVR_STATE_WAIT_BOOT:
525         /* In case of starting, we haven't started the queue,
526          * so we can't get here... but requests are
527          * possibly hanging out waiting for us because we
528          * reported the dev/skd0 already.  They'll wait
529          * forever if connect doesn't complete.
530          * What to do??? delay dev/skd0 ??
531          */
532         case SKD_DRVR_STATE_BUSY:
533         case SKD_DRVR_STATE_BUSY_IMMINENT:
534         case SKD_DRVR_STATE_BUSY_ERASE:
535         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
536                 return false;
537
538         case SKD_DRVR_STATE_BUSY_SANITIZE:
539         case SKD_DRVR_STATE_STOPPING:
540         case SKD_DRVR_STATE_SYNCING:
541         case SKD_DRVR_STATE_FAULT:
542         case SKD_DRVR_STATE_DISAPPEARED:
543         default:
544                 return true;
545         }
546 }
547
548 static void skd_request_fn(struct request_queue *q)
549 {
550         struct skd_device *skdev = q->queuedata;
551         struct skd_fitmsg_context *skmsg = NULL;
552         struct fit_msg_hdr *fmh = NULL;
553         struct skd_request_context *skreq;
554         struct request *req = NULL;
555         struct skd_scsi_request *scsi_req;
556         unsigned long io_flags;
557         u32 lba;
558         u32 count;
559         int data_dir;
560         __be64 be_dmaa;
561         u64 cmdctxt;
562         u32 timo_slot;
563         int flush, fua;
564
565         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
566                 if (skd_fail_all(q))
567                         skd_fail_all_pending(skdev);
568                 return;
569         }
570
571         if (blk_queue_stopped(skdev->queue)) {
572                 if (skdev->skmsg_free_list == NULL ||
573                     skdev->skreq_free_list == NULL ||
574                     skdev->in_flight >= skdev->queue_low_water_mark)
575                         /* There is still some kind of shortage */
576                         return;
577
578                 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
579         }
580
581         /*
582          * Stop conditions:
583          *  - There are no more native requests
584          *  - There are already the maximum number of requests in progress
585          *  - There are no more skd_request_context entries
586          *  - There are no more FIT msg buffers
587          */
588         for (;; ) {
589
590                 flush = fua = 0;
591
592                 req = blk_peek_request(q);
593
594                 /* Are there any native requests to start? */
595                 if (req == NULL)
596                         break;
597
598                 lba = (u32)blk_rq_pos(req);
599                 count = blk_rq_sectors(req);
600                 data_dir = rq_data_dir(req);
601                 io_flags = req->cmd_flags;
602
603                 if (req_op(req) == REQ_OP_FLUSH)
604                         flush++;
605
606                 if (io_flags & REQ_FUA)
607                         fua++;
608
609                 dev_dbg(&skdev->pdev->dev,
610                         "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
611                         req, lba, lba, count, count, data_dir);
612
613                 /* At this point we know there is a request */
614
615                 /* Are too many requets already in progress? */
616                 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
617                         dev_dbg(&skdev->pdev->dev, "qdepth %d, limit %d\n",
618                                 skdev->in_flight, skdev->cur_max_queue_depth);
619                         break;
620                 }
621
622                 /* Is a skd_request_context available? */
623                 skreq = skdev->skreq_free_list;
624                 if (skreq == NULL) {
625                         dev_dbg(&skdev->pdev->dev, "Out of req=%p\n", q);
626                         break;
627                 }
628                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
629                 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
630
631                 /* Now we check to see if we can get a fit msg */
632                 if (skmsg == NULL) {
633                         if (skdev->skmsg_free_list == NULL) {
634                                 dev_dbg(&skdev->pdev->dev, "Out of msg\n");
635                                 break;
636                         }
637                 }
638
639                 skreq->flush_cmd = 0;
640                 skreq->n_sg = 0;
641                 skreq->sg_byte_count = 0;
642
643                 /*
644                  * OK to now dequeue request from q.
645                  *
646                  * At this point we are comitted to either start or reject
647                  * the native request. Note that skd_request_context is
648                  * available but is still at the head of the free list.
649                  */
650                 blk_start_request(req);
651                 skreq->req = req;
652                 skreq->fitmsg_id = 0;
653
654                 skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE :
655                         DMA_TO_DEVICE;
656
657                 if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
658                         dev_dbg(&skdev->pdev->dev, "error Out\n");
659                         skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
660                         continue;
661                 }
662
663                 /* Either a FIT msg is in progress or we have to start one. */
664                 if (skmsg == NULL) {
665                         /* Are there any FIT msg buffers available? */
666                         skmsg = skdev->skmsg_free_list;
667                         if (skmsg == NULL) {
668                                 dev_dbg(&skdev->pdev->dev,
669                                         "Out of msg skdev=%p\n",
670                                         skdev);
671                                 break;
672                         }
673                         SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
674                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
675
676                         skdev->skmsg_free_list = skmsg->next;
677
678                         skmsg->state = SKD_MSG_STATE_BUSY;
679                         skmsg->id += SKD_ID_INCR;
680
681                         /* Initialize the FIT msg header */
682                         fmh = &skmsg->msg_buf->fmh;
683                         memset(fmh, 0, sizeof(*fmh));
684                         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
685                         skmsg->length = sizeof(*fmh);
686                 }
687
688                 skreq->fitmsg_id = skmsg->id;
689
690                 scsi_req =
691                         &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
692                 memset(scsi_req, 0, sizeof(*scsi_req));
693
694                 be_dmaa = cpu_to_be64(skreq->sksg_dma_address);
695                 cmdctxt = skreq->id + SKD_ID_INCR;
696
697                 scsi_req->hdr.tag = cmdctxt;
698                 scsi_req->hdr.sg_list_dma_address = be_dmaa;
699
700                 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
701                         skd_prep_zerosize_flush_cdb(scsi_req, skreq);
702                         SKD_ASSERT(skreq->flush_cmd == 1);
703                 } else {
704                         skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
705                 }
706
707                 if (fua)
708                         scsi_req->cdb[1] |= SKD_FUA_NV;
709
710                 scsi_req->hdr.sg_list_len_bytes =
711                         cpu_to_be32(skreq->sg_byte_count);
712
713                 /* Complete resource allocations. */
714                 skdev->skreq_free_list = skreq->next;
715                 skreq->state = SKD_REQ_STATE_BUSY;
716                 skreq->id += SKD_ID_INCR;
717
718                 skmsg->length += sizeof(struct skd_scsi_request);
719                 fmh->num_protocol_cmds_coalesced++;
720
721                 /*
722                  * Update the active request counts.
723                  * Capture the timeout timestamp.
724                  */
725                 skreq->timeout_stamp = skdev->timeout_stamp;
726                 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
727                 skdev->timeout_slot[timo_slot]++;
728                 skdev->in_flight++;
729                 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
730                         skdev->in_flight);
731
732                 /*
733                  * If the FIT msg buffer is full send it.
734                  */
735                 if (fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
736                         skd_send_fitmsg(skdev, skmsg);
737                         skmsg = NULL;
738                         fmh = NULL;
739                 }
740         }
741
742         /* If the FIT msg buffer is not empty send what we got. */
743         if (skmsg) {
744                 WARN_ON_ONCE(!fmh->num_protocol_cmds_coalesced);
745                 skd_send_fitmsg(skdev, skmsg);
746                 skmsg = NULL;
747                 fmh = NULL;
748         }
749
750         /*
751          * If req is non-NULL it means there is something to do but
752          * we are out of a resource.
753          */
754         if (req)
755                 blk_stop_queue(skdev->queue);
756 }
757
758 static void skd_end_request(struct skd_device *skdev,
759                 struct skd_request_context *skreq, blk_status_t error)
760 {
761         if (unlikely(error)) {
762                 struct request *req = skreq->req;
763                 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
764                 u32 lba = (u32)blk_rq_pos(req);
765                 u32 count = blk_rq_sectors(req);
766
767                 dev_err(&skdev->pdev->dev,
768                         "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba,
769                         count, skreq->id);
770         } else
771                 dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", skreq->id,
772                         error);
773
774         __blk_end_request_all(skreq->req, error);
775 }
776
777 static bool skd_preop_sg_list(struct skd_device *skdev,
778                              struct skd_request_context *skreq)
779 {
780         struct request *req = skreq->req;
781         struct scatterlist *sgl = &skreq->sg[0], *sg;
782         int n_sg;
783         int i;
784
785         skreq->sg_byte_count = 0;
786
787         WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
788                      skreq->data_dir != DMA_FROM_DEVICE);
789
790         n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
791         if (n_sg <= 0)
792                 return false;
793
794         /*
795          * Map scatterlist to PCI bus addresses.
796          * Note PCI might change the number of entries.
797          */
798         n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
799         if (n_sg <= 0)
800                 return false;
801
802         SKD_ASSERT(n_sg <= skdev->sgs_per_request);
803
804         skreq->n_sg = n_sg;
805
806         for_each_sg(sgl, sg, n_sg, i) {
807                 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
808                 u32 cnt = sg_dma_len(sg);
809                 uint64_t dma_addr = sg_dma_address(sg);
810
811                 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
812                 sgd->byte_count = cnt;
813                 skreq->sg_byte_count += cnt;
814                 sgd->host_side_addr = dma_addr;
815                 sgd->dev_side_addr = 0;
816         }
817
818         skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
819         skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
820
821         if (unlikely(skdev->dbg_level > 1)) {
822                 dev_dbg(&skdev->pdev->dev,
823                         "skreq=%x sksg_list=%p sksg_dma=%llx\n",
824                         skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
825                 for (i = 0; i < n_sg; i++) {
826                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
827
828                         dev_dbg(&skdev->pdev->dev,
829                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
830                                 i, sgd->byte_count, sgd->control,
831                                 sgd->host_side_addr, sgd->next_desc_ptr);
832                 }
833         }
834
835         return true;
836 }
837
838 static void skd_postop_sg_list(struct skd_device *skdev,
839                                struct skd_request_context *skreq)
840 {
841         /*
842          * restore the next ptr for next IO request so we
843          * don't have to set it every time.
844          */
845         skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
846                 skreq->sksg_dma_address +
847                 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
848         pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
849 }
850
851 /*
852  *****************************************************************************
853  * TIMER
854  *****************************************************************************
855  */
856
857 static void skd_timer_tick_not_online(struct skd_device *skdev);
858
859 static void skd_timer_tick(ulong arg)
860 {
861         struct skd_device *skdev = (struct skd_device *)arg;
862
863         u32 timo_slot;
864         unsigned long reqflags;
865         u32 state;
866
867         if (skdev->state == SKD_DRVR_STATE_FAULT)
868                 /* The driver has declared fault, and we want it to
869                  * stay that way until driver is reloaded.
870                  */
871                 return;
872
873         spin_lock_irqsave(&skdev->lock, reqflags);
874
875         state = SKD_READL(skdev, FIT_STATUS);
876         state &= FIT_SR_DRIVE_STATE_MASK;
877         if (state != skdev->drive_state)
878                 skd_isr_fwstate(skdev);
879
880         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
881                 skd_timer_tick_not_online(skdev);
882                 goto timer_func_out;
883         }
884         skdev->timeout_stamp++;
885         timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
886
887         /*
888          * All requests that happened during the previous use of
889          * this slot should be done by now. The previous use was
890          * over 7 seconds ago.
891          */
892         if (skdev->timeout_slot[timo_slot] == 0)
893                 goto timer_func_out;
894
895         /* Something is overdue */
896         dev_dbg(&skdev->pdev->dev, "found %d timeouts, draining busy=%d\n",
897                 skdev->timeout_slot[timo_slot], skdev->in_flight);
898         dev_err(&skdev->pdev->dev, "Overdue IOs (%d), busy %d\n",
899                 skdev->timeout_slot[timo_slot], skdev->in_flight);
900
901         skdev->timer_countdown = SKD_DRAINING_TIMO;
902         skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
903         skdev->timo_slot = timo_slot;
904         blk_stop_queue(skdev->queue);
905
906 timer_func_out:
907         mod_timer(&skdev->timer, (jiffies + HZ));
908
909         spin_unlock_irqrestore(&skdev->lock, reqflags);
910 }
911
912 static void skd_timer_tick_not_online(struct skd_device *skdev)
913 {
914         switch (skdev->state) {
915         case SKD_DRVR_STATE_IDLE:
916         case SKD_DRVR_STATE_LOAD:
917                 break;
918         case SKD_DRVR_STATE_BUSY_SANITIZE:
919                 dev_dbg(&skdev->pdev->dev,
920                         "drive busy sanitize[%x], driver[%x]\n",
921                         skdev->drive_state, skdev->state);
922                 /* If we've been in sanitize for 3 seconds, we figure we're not
923                  * going to get anymore completions, so recover requests now
924                  */
925                 if (skdev->timer_countdown > 0) {
926                         skdev->timer_countdown--;
927                         return;
928                 }
929                 skd_recover_requests(skdev);
930                 break;
931
932         case SKD_DRVR_STATE_BUSY:
933         case SKD_DRVR_STATE_BUSY_IMMINENT:
934         case SKD_DRVR_STATE_BUSY_ERASE:
935                 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
936                         skdev->state, skdev->timer_countdown);
937                 if (skdev->timer_countdown > 0) {
938                         skdev->timer_countdown--;
939                         return;
940                 }
941                 dev_dbg(&skdev->pdev->dev,
942                         "busy[%x], timedout=%d, restarting device.",
943                         skdev->state, skdev->timer_countdown);
944                 skd_restart_device(skdev);
945                 break;
946
947         case SKD_DRVR_STATE_WAIT_BOOT:
948         case SKD_DRVR_STATE_STARTING:
949                 if (skdev->timer_countdown > 0) {
950                         skdev->timer_countdown--;
951                         return;
952                 }
953                 /* For now, we fault the drive.  Could attempt resets to
954                  * revcover at some point. */
955                 skdev->state = SKD_DRVR_STATE_FAULT;
956
957                 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
958                         skdev->drive_state);
959
960                 /*start the queue so we can respond with error to requests */
961                 /* wakeup anyone waiting for startup complete */
962                 blk_start_queue(skdev->queue);
963                 skdev->gendisk_on = -1;
964                 wake_up_interruptible(&skdev->waitq);
965                 break;
966
967         case SKD_DRVR_STATE_ONLINE:
968                 /* shouldn't get here. */
969                 break;
970
971         case SKD_DRVR_STATE_PAUSING:
972         case SKD_DRVR_STATE_PAUSED:
973                 break;
974
975         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
976                 dev_dbg(&skdev->pdev->dev,
977                         "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
978                         skdev->timo_slot, skdev->timer_countdown,
979                         skdev->in_flight,
980                         skdev->timeout_slot[skdev->timo_slot]);
981                 /* if the slot has cleared we can let the I/O continue */
982                 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
983                         dev_dbg(&skdev->pdev->dev,
984                                 "Slot drained, starting queue.\n");
985                         skdev->state = SKD_DRVR_STATE_ONLINE;
986                         blk_start_queue(skdev->queue);
987                         return;
988                 }
989                 if (skdev->timer_countdown > 0) {
990                         skdev->timer_countdown--;
991                         return;
992                 }
993                 skd_restart_device(skdev);
994                 break;
995
996         case SKD_DRVR_STATE_RESTARTING:
997                 if (skdev->timer_countdown > 0) {
998                         skdev->timer_countdown--;
999                         return;
1000                 }
1001                 /* For now, we fault the drive. Could attempt resets to
1002                  * revcover at some point. */
1003                 skdev->state = SKD_DRVR_STATE_FAULT;
1004                 dev_err(&skdev->pdev->dev,
1005                         "DriveFault Reconnect Timeout (%x)\n",
1006                         skdev->drive_state);
1007
1008                 /*
1009                  * Recovering does two things:
1010                  * 1. completes IO with error
1011                  * 2. reclaims dma resources
1012                  * When is it safe to recover requests?
1013                  * - if the drive state is faulted
1014                  * - if the state is still soft reset after out timeout
1015                  * - if the drive registers are dead (state = FF)
1016                  * If it is "unsafe", we still need to recover, so we will
1017                  * disable pci bus mastering and disable our interrupts.
1018                  */
1019
1020                 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1021                     (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1022                     (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1023                         /* It never came out of soft reset. Try to
1024                          * recover the requests and then let them
1025                          * fail. This is to mitigate hung processes. */
1026                         skd_recover_requests(skdev);
1027                 else {
1028                         dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
1029                                 skdev->drive_state);
1030                         pci_disable_device(skdev->pdev);
1031                         skd_disable_interrupts(skdev);
1032                         skd_recover_requests(skdev);
1033                 }
1034
1035                 /*start the queue so we can respond with error to requests */
1036                 /* wakeup anyone waiting for startup complete */
1037                 blk_start_queue(skdev->queue);
1038                 skdev->gendisk_on = -1;
1039                 wake_up_interruptible(&skdev->waitq);
1040                 break;
1041
1042         case SKD_DRVR_STATE_RESUMING:
1043         case SKD_DRVR_STATE_STOPPING:
1044         case SKD_DRVR_STATE_SYNCING:
1045         case SKD_DRVR_STATE_FAULT:
1046         case SKD_DRVR_STATE_DISAPPEARED:
1047         default:
1048                 break;
1049         }
1050 }
1051
1052 static int skd_start_timer(struct skd_device *skdev)
1053 {
1054         int rc;
1055
1056         setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1057
1058         rc = mod_timer(&skdev->timer, (jiffies + HZ));
1059         if (rc)
1060                 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
1061         return rc;
1062 }
1063
1064 static void skd_kill_timer(struct skd_device *skdev)
1065 {
1066         del_timer_sync(&skdev->timer);
1067 }
1068
1069 /*
1070  *****************************************************************************
1071  * IOCTL
1072  *****************************************************************************
1073  */
1074 static int skd_ioctl_sg_io(struct skd_device *skdev,
1075                            fmode_t mode, void __user *argp);
1076 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1077                                         struct skd_sg_io *sksgio);
1078 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1079                                    struct skd_sg_io *sksgio);
1080 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1081                                     struct skd_sg_io *sksgio);
1082 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1083                                  struct skd_sg_io *sksgio, int dxfer_dir);
1084 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1085                                  struct skd_sg_io *sksgio);
1086 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1087 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1088                                     struct skd_sg_io *sksgio);
1089 static int skd_sg_io_put_status(struct skd_device *skdev,
1090                                 struct skd_sg_io *sksgio);
1091
1092 static void skd_complete_special(struct skd_device *skdev,
1093                                  struct fit_completion_entry_v1 *skcomp,
1094                                  struct fit_comp_error_info *skerr,
1095                                  struct skd_special_context *skspcl);
1096
1097 static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1098                           uint cmd_in, ulong arg)
1099 {
1100         static const int sg_version_num = 30527;
1101         int rc = 0, timeout;
1102         struct gendisk *disk = bdev->bd_disk;
1103         struct skd_device *skdev = disk->private_data;
1104         int __user *p = (int __user *)arg;
1105
1106         dev_dbg(&skdev->pdev->dev,
1107                 "%s: CMD[%s] ioctl  mode 0x%x, cmd 0x%x arg %0lx\n",
1108                 disk->disk_name, current->comm, mode, cmd_in, arg);
1109
1110         if (!capable(CAP_SYS_ADMIN))
1111                 return -EPERM;
1112
1113         switch (cmd_in) {
1114         case SG_SET_TIMEOUT:
1115                 rc = get_user(timeout, p);
1116                 if (!rc)
1117                         disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
1118                 break;
1119         case SG_GET_TIMEOUT:
1120                 rc = jiffies_to_clock_t(disk->queue->sg_timeout);
1121                 break;
1122         case SG_GET_VERSION_NUM:
1123                 rc = put_user(sg_version_num, p);
1124                 break;
1125         case SG_IO:
1126                 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
1127                 break;
1128
1129         default:
1130                 rc = -ENOTTY;
1131                 break;
1132         }
1133
1134         dev_dbg(&skdev->pdev->dev, "%s:  completion rc %d\n", disk->disk_name,
1135                 rc);
1136         return rc;
1137 }
1138
1139 static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1140                            void __user *argp)
1141 {
1142         int rc;
1143         struct skd_sg_io sksgio;
1144
1145         memset(&sksgio, 0, sizeof(sksgio));
1146         sksgio.mode = mode;
1147         sksgio.argp = argp;
1148         sksgio.iov = &sksgio.no_iov_iov;
1149
1150         switch (skdev->state) {
1151         case SKD_DRVR_STATE_ONLINE:
1152         case SKD_DRVR_STATE_BUSY_IMMINENT:
1153                 break;
1154
1155         default:
1156                 dev_dbg(&skdev->pdev->dev, "drive not online\n");
1157                 rc = -ENXIO;
1158                 goto out;
1159         }
1160
1161         rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1162         if (rc)
1163                 goto out;
1164
1165         rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1166         if (rc)
1167                 goto out;
1168
1169         rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1170         if (rc)
1171                 goto out;
1172
1173         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1174         if (rc)
1175                 goto out;
1176
1177         rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1178         if (rc)
1179                 goto out;
1180
1181         rc = skd_sg_io_await(skdev, &sksgio);
1182         if (rc)
1183                 goto out;
1184
1185         rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1186         if (rc)
1187                 goto out;
1188
1189         rc = skd_sg_io_put_status(skdev, &sksgio);
1190         if (rc)
1191                 goto out;
1192
1193         rc = 0;
1194
1195 out:
1196         skd_sg_io_release_skspcl(skdev, &sksgio);
1197
1198         if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1199                 kfree(sksgio.iov);
1200         return rc;
1201 }
1202
1203 static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1204                                         struct skd_sg_io *sksgio)
1205 {
1206         struct sg_io_hdr *sgp = &sksgio->sg;
1207         int i, __maybe_unused acc;
1208
1209         if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
1210                 dev_dbg(&skdev->pdev->dev, "access sg failed %p\n",
1211                         sksgio->argp);
1212                 return -EFAULT;
1213         }
1214
1215         if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
1216                 dev_dbg(&skdev->pdev->dev, "copy_from_user sg failed %p\n",
1217                         sksgio->argp);
1218                 return -EFAULT;
1219         }
1220
1221         if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
1222                 dev_dbg(&skdev->pdev->dev, "interface_id invalid 0x%x\n",
1223                         sgp->interface_id);
1224                 return -EINVAL;
1225         }
1226
1227         if (sgp->cmd_len > sizeof(sksgio->cdb)) {
1228                 dev_dbg(&skdev->pdev->dev, "cmd_len invalid %d\n",
1229                         sgp->cmd_len);
1230                 return -EINVAL;
1231         }
1232
1233         if (sgp->iovec_count > 256) {
1234                 dev_dbg(&skdev->pdev->dev, "iovec_count invalid %d\n",
1235                         sgp->iovec_count);
1236                 return -EINVAL;
1237         }
1238
1239         if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
1240                 dev_dbg(&skdev->pdev->dev, "dxfer_len invalid %d\n",
1241                         sgp->dxfer_len);
1242                 return -EINVAL;
1243         }
1244
1245         switch (sgp->dxfer_direction) {
1246         case SG_DXFER_NONE:
1247                 acc = -1;
1248                 break;
1249
1250         case SG_DXFER_TO_DEV:
1251                 acc = VERIFY_READ;
1252                 break;
1253
1254         case SG_DXFER_FROM_DEV:
1255         case SG_DXFER_TO_FROM_DEV:
1256                 acc = VERIFY_WRITE;
1257                 break;
1258
1259         default:
1260                 dev_dbg(&skdev->pdev->dev, "dxfer_dir invalid %d\n",
1261                         sgp->dxfer_direction);
1262                 return -EINVAL;
1263         }
1264
1265         if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
1266                 dev_dbg(&skdev->pdev->dev, "copy_from_user cmdp failed %p\n",
1267                         sgp->cmdp);
1268                 return -EFAULT;
1269         }
1270
1271         if (sgp->mx_sb_len != 0) {
1272                 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
1273                         dev_dbg(&skdev->pdev->dev, "access sbp failed %p\n",
1274                                 sgp->sbp);
1275                         return -EFAULT;
1276                 }
1277         }
1278
1279         if (sgp->iovec_count == 0) {
1280                 sksgio->iov[0].iov_base = sgp->dxferp;
1281                 sksgio->iov[0].iov_len = sgp->dxfer_len;
1282                 sksgio->iovcnt = 1;
1283                 sksgio->dxfer_len = sgp->dxfer_len;
1284         } else {
1285                 struct sg_iovec *iov;
1286                 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1287                 size_t iov_data_len;
1288
1289                 iov = kmalloc(nbytes, GFP_KERNEL);
1290                 if (iov == NULL) {
1291                         dev_dbg(&skdev->pdev->dev, "alloc iovec failed %d\n",
1292                                 sgp->iovec_count);
1293                         return -ENOMEM;
1294                 }
1295                 sksgio->iov = iov;
1296                 sksgio->iovcnt = sgp->iovec_count;
1297
1298                 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
1299                         dev_dbg(&skdev->pdev->dev,
1300                                 "copy_from_user iovec failed %p\n",
1301                                 sgp->dxferp);
1302                         return -EFAULT;
1303                 }
1304
1305                 /*
1306                  * Sum up the vecs, making sure they don't overflow
1307                  */
1308                 iov_data_len = 0;
1309                 for (i = 0; i < sgp->iovec_count; i++) {
1310                         if (iov_data_len + iov[i].iov_len < iov_data_len)
1311                                 return -EINVAL;
1312                         iov_data_len += iov[i].iov_len;
1313                 }
1314
1315                 /* SG_IO howto says that the shorter of the two wins */
1316                 if (sgp->dxfer_len < iov_data_len) {
1317                         sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1318                                                      sgp->iovec_count,
1319                                                      sgp->dxfer_len);
1320                         sksgio->dxfer_len = sgp->dxfer_len;
1321                 } else
1322                         sksgio->dxfer_len = iov_data_len;
1323         }
1324
1325         if (sgp->dxfer_direction != SG_DXFER_NONE) {
1326                 struct sg_iovec *iov = sksgio->iov;
1327                 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1328                         if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
1329                                 dev_dbg(&skdev->pdev->dev,
1330                                         "access data failed %p/%zd\n",
1331                                         iov->iov_base, iov->iov_len);
1332                                 return -EFAULT;
1333                         }
1334                 }
1335         }
1336
1337         return 0;
1338 }
1339
1340 static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1341                                    struct skd_sg_io *sksgio)
1342 {
1343         struct skd_special_context *skspcl = NULL;
1344         int rc;
1345
1346         for (;;) {
1347                 ulong flags;
1348
1349                 spin_lock_irqsave(&skdev->lock, flags);
1350                 skspcl = skdev->skspcl_free_list;
1351                 if (skspcl != NULL) {
1352                         skdev->skspcl_free_list =
1353                                 (struct skd_special_context *)skspcl->req.next;
1354                         skspcl->req.id += SKD_ID_INCR;
1355                         skspcl->req.state = SKD_REQ_STATE_SETUP;
1356                         skspcl->orphaned = 0;
1357                         skspcl->req.n_sg = 0;
1358                 }
1359                 spin_unlock_irqrestore(&skdev->lock, flags);
1360
1361                 if (skspcl != NULL) {
1362                         rc = 0;
1363                         break;
1364                 }
1365
1366                 dev_dbg(&skdev->pdev->dev, "blocking\n");
1367
1368                 rc = wait_event_interruptible_timeout(
1369                                 skdev->waitq,
1370                                 (skdev->skspcl_free_list != NULL),
1371                                 msecs_to_jiffies(sksgio->sg.timeout));
1372
1373                 dev_dbg(&skdev->pdev->dev, "unblocking, rc=%d\n", rc);
1374
1375                 if (rc <= 0) {
1376                         if (rc == 0)
1377                                 rc = -ETIMEDOUT;
1378                         else
1379                                 rc = -EINTR;
1380                         break;
1381                 }
1382                 /*
1383                  * If we get here rc > 0 meaning the timeout to
1384                  * wait_event_interruptible_timeout() had time left, hence the
1385                  * sought event -- non-empty free list -- happened.
1386                  * Retry the allocation.
1387                  */
1388         }
1389         sksgio->skspcl = skspcl;
1390
1391         return rc;
1392 }
1393
1394 static int skd_skreq_prep_buffering(struct skd_device *skdev,
1395                                     struct skd_request_context *skreq,
1396                                     u32 dxfer_len)
1397 {
1398         u32 resid = dxfer_len;
1399
1400         /*
1401          * The DMA engine must have aligned addresses and byte counts.
1402          */
1403         resid += (-resid) & 3;
1404         skreq->sg_byte_count = resid;
1405
1406         skreq->n_sg = 0;
1407
1408         while (resid > 0) {
1409                 u32 nbytes = PAGE_SIZE;
1410                 u32 ix = skreq->n_sg;
1411                 struct scatterlist *sg = &skreq->sg[ix];
1412                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1413                 struct page *page;
1414
1415                 if (nbytes > resid)
1416                         nbytes = resid;
1417
1418                 page = alloc_page(GFP_KERNEL);
1419                 if (page == NULL)
1420                         return -ENOMEM;
1421
1422                 sg_set_page(sg, page, nbytes, 0);
1423
1424                 /* TODO: This should be going through a pci_???()
1425                  * routine to do proper mapping. */
1426                 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1427                 sksg->byte_count = nbytes;
1428
1429                 sksg->host_side_addr = sg_phys(sg);
1430
1431                 sksg->dev_side_addr = 0;
1432                 sksg->next_desc_ptr = skreq->sksg_dma_address +
1433                                       (ix + 1) * sizeof(*sksg);
1434
1435                 skreq->n_sg++;
1436                 resid -= nbytes;
1437         }
1438
1439         if (skreq->n_sg > 0) {
1440                 u32 ix = skreq->n_sg - 1;
1441                 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1442
1443                 sksg->control = FIT_SGD_CONTROL_LAST;
1444                 sksg->next_desc_ptr = 0;
1445         }
1446
1447         if (unlikely(skdev->dbg_level > 1)) {
1448                 u32 i;
1449
1450                 dev_dbg(&skdev->pdev->dev,
1451                         "skreq=%x sksg_list=%p sksg_dma=%llx\n",
1452                         skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
1453                 for (i = 0; i < skreq->n_sg; i++) {
1454                         struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1455
1456                         dev_dbg(&skdev->pdev->dev,
1457                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1458                                 i, sgd->byte_count, sgd->control,
1459                                 sgd->host_side_addr, sgd->next_desc_ptr);
1460                 }
1461         }
1462
1463         return 0;
1464 }
1465
1466 static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1467                                     struct skd_sg_io *sksgio)
1468 {
1469         struct skd_special_context *skspcl = sksgio->skspcl;
1470         struct skd_request_context *skreq = &skspcl->req;
1471         u32 dxfer_len = sksgio->dxfer_len;
1472         int rc;
1473
1474         rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1475         /*
1476          * Eventually, errors or not, skd_release_special() is called
1477          * to recover allocations including partial allocations.
1478          */
1479         return rc;
1480 }
1481
1482 static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1483                                  struct skd_sg_io *sksgio, int dxfer_dir)
1484 {
1485         struct skd_special_context *skspcl = sksgio->skspcl;
1486         u32 iov_ix = 0;
1487         struct sg_iovec curiov;
1488         u32 sksg_ix = 0;
1489         u8 *bufp = NULL;
1490         u32 buf_len = 0;
1491         u32 resid = sksgio->dxfer_len;
1492         int rc;
1493
1494         curiov.iov_len = 0;
1495         curiov.iov_base = NULL;
1496
1497         if (dxfer_dir != sksgio->sg.dxfer_direction) {
1498                 if (dxfer_dir != SG_DXFER_TO_DEV ||
1499                     sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1500                         return 0;
1501         }
1502
1503         while (resid > 0) {
1504                 u32 nbytes = PAGE_SIZE;
1505
1506                 if (curiov.iov_len == 0) {
1507                         curiov = sksgio->iov[iov_ix++];
1508                         continue;
1509                 }
1510
1511                 if (buf_len == 0) {
1512                         struct page *page;
1513                         page = sg_page(&skspcl->req.sg[sksg_ix++]);
1514                         bufp = page_address(page);
1515                         buf_len = PAGE_SIZE;
1516                 }
1517
1518                 nbytes = min_t(u32, nbytes, resid);
1519                 nbytes = min_t(u32, nbytes, curiov.iov_len);
1520                 nbytes = min_t(u32, nbytes, buf_len);
1521
1522                 if (dxfer_dir == SG_DXFER_TO_DEV)
1523                         rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1524                 else
1525                         rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1526
1527                 if (rc)
1528                         return -EFAULT;
1529
1530                 resid -= nbytes;
1531                 curiov.iov_len -= nbytes;
1532                 curiov.iov_base += nbytes;
1533                 buf_len -= nbytes;
1534         }
1535
1536         return 0;
1537 }
1538
1539 static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1540                                  struct skd_sg_io *sksgio)
1541 {
1542         struct skd_special_context *skspcl = sksgio->skspcl;
1543         struct fit_msg_hdr *fmh = &skspcl->msg_buf->fmh;
1544         struct skd_scsi_request *scsi_req = &skspcl->msg_buf->scsi[0];
1545
1546         memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1547
1548         /* Initialize the FIT msg header */
1549         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1550         fmh->num_protocol_cmds_coalesced = 1;
1551
1552         /* Initialize the SCSI request */
1553         if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1554                 scsi_req->hdr.sg_list_dma_address =
1555                         cpu_to_be64(skspcl->req.sksg_dma_address);
1556         scsi_req->hdr.tag = skspcl->req.id;
1557         scsi_req->hdr.sg_list_len_bytes =
1558                 cpu_to_be32(skspcl->req.sg_byte_count);
1559         memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1560
1561         skspcl->req.state = SKD_REQ_STATE_BUSY;
1562         skd_send_special_fitmsg(skdev, skspcl);
1563
1564         return 0;
1565 }
1566
1567 static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1568 {
1569         unsigned long flags;
1570         int rc;
1571
1572         rc = wait_event_interruptible_timeout(skdev->waitq,
1573                                               (sksgio->skspcl->req.state !=
1574                                                SKD_REQ_STATE_BUSY),
1575                                               msecs_to_jiffies(sksgio->sg.
1576                                                                timeout));
1577
1578         spin_lock_irqsave(&skdev->lock, flags);
1579
1580         if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
1581                 dev_dbg(&skdev->pdev->dev, "skspcl %p aborted\n",
1582                         sksgio->skspcl);
1583
1584                 /* Build check cond, sense and let command finish. */
1585                 /* For a timeout, we must fabricate completion and sense
1586                  * data to complete the command */
1587                 sksgio->skspcl->req.completion.status =
1588                         SAM_STAT_CHECK_CONDITION;
1589
1590                 memset(&sksgio->skspcl->req.err_info, 0,
1591                        sizeof(sksgio->skspcl->req.err_info));
1592                 sksgio->skspcl->req.err_info.type = 0x70;
1593                 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1594                 sksgio->skspcl->req.err_info.code = 0x44;
1595                 sksgio->skspcl->req.err_info.qual = 0;
1596                 rc = 0;
1597         } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1598                 /* No longer on the adapter. We finish. */
1599                 rc = 0;
1600         else {
1601                 /* Something's gone wrong. Still busy. Timeout or
1602                  * user interrupted (control-C). Mark as an orphan
1603                  * so it will be disposed when completed. */
1604                 sksgio->skspcl->orphaned = 1;
1605                 sksgio->skspcl = NULL;
1606                 if (rc == 0) {
1607                         dev_dbg(&skdev->pdev->dev, "timed out %p (%u ms)\n",
1608                                 sksgio, sksgio->sg.timeout);
1609                         rc = -ETIMEDOUT;
1610                 } else {
1611                         dev_dbg(&skdev->pdev->dev, "cntlc %p\n", sksgio);
1612                         rc = -EINTR;
1613                 }
1614         }
1615
1616         spin_unlock_irqrestore(&skdev->lock, flags);
1617
1618         return rc;
1619 }
1620
1621 static int skd_sg_io_put_status(struct skd_device *skdev,
1622                                 struct skd_sg_io *sksgio)
1623 {
1624         struct sg_io_hdr *sgp = &sksgio->sg;
1625         struct skd_special_context *skspcl = sksgio->skspcl;
1626         int resid = 0;
1627
1628         u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1629
1630         sgp->status = skspcl->req.completion.status;
1631         resid = sksgio->dxfer_len - nb;
1632
1633         sgp->masked_status = sgp->status & STATUS_MASK;
1634         sgp->msg_status = 0;
1635         sgp->host_status = 0;
1636         sgp->driver_status = 0;
1637         sgp->resid = resid;
1638         if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1639                 sgp->info |= SG_INFO_CHECK;
1640
1641         dev_dbg(&skdev->pdev->dev, "status %x masked %x resid 0x%x\n",
1642                 sgp->status, sgp->masked_status, sgp->resid);
1643
1644         if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1645                 if (sgp->mx_sb_len > 0) {
1646                         struct fit_comp_error_info *ei = &skspcl->req.err_info;
1647                         u32 nbytes = sizeof(*ei);
1648
1649                         nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1650
1651                         sgp->sb_len_wr = nbytes;
1652
1653                         if (__copy_to_user(sgp->sbp, ei, nbytes)) {
1654                                 dev_dbg(&skdev->pdev->dev,
1655                                         "copy_to_user sense failed %p\n",
1656                                         sgp->sbp);
1657                                 return -EFAULT;
1658                         }
1659                 }
1660         }
1661
1662         if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
1663                 dev_dbg(&skdev->pdev->dev, "copy_to_user sg failed %p\n",
1664                         sksgio->argp);
1665                 return -EFAULT;
1666         }
1667
1668         return 0;
1669 }
1670
1671 static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1672                                     struct skd_sg_io *sksgio)
1673 {
1674         struct skd_special_context *skspcl = sksgio->skspcl;
1675
1676         if (skspcl != NULL) {
1677                 ulong flags;
1678
1679                 sksgio->skspcl = NULL;
1680
1681                 spin_lock_irqsave(&skdev->lock, flags);
1682                 skd_release_special(skdev, skspcl);
1683                 spin_unlock_irqrestore(&skdev->lock, flags);
1684         }
1685
1686         return 0;
1687 }
1688
1689 /*
1690  *****************************************************************************
1691  * INTERNAL REQUESTS -- generated by driver itself
1692  *****************************************************************************
1693  */
1694
1695 static int skd_format_internal_skspcl(struct skd_device *skdev)
1696 {
1697         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1698         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1699         struct fit_msg_hdr *fmh;
1700         uint64_t dma_address;
1701         struct skd_scsi_request *scsi;
1702
1703         fmh = &skspcl->msg_buf->fmh;
1704         fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1705         fmh->num_protocol_cmds_coalesced = 1;
1706
1707         scsi = &skspcl->msg_buf->scsi[0];
1708         memset(scsi, 0, sizeof(*scsi));
1709         dma_address = skspcl->req.sksg_dma_address;
1710         scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1711         sgd->control = FIT_SGD_CONTROL_LAST;
1712         sgd->byte_count = 0;
1713         sgd->host_side_addr = skspcl->db_dma_address;
1714         sgd->dev_side_addr = 0;
1715         sgd->next_desc_ptr = 0LL;
1716
1717         return 1;
1718 }
1719
1720 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1721
1722 static void skd_send_internal_skspcl(struct skd_device *skdev,
1723                                      struct skd_special_context *skspcl,
1724                                      u8 opcode)
1725 {
1726         struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1727         struct skd_scsi_request *scsi;
1728         unsigned char *buf = skspcl->data_buf;
1729         int i;
1730
1731         if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1732                 /*
1733                  * A refresh is already in progress.
1734                  * Just wait for it to finish.
1735                  */
1736                 return;
1737
1738         SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1739         skspcl->req.state = SKD_REQ_STATE_BUSY;
1740         skspcl->req.id += SKD_ID_INCR;
1741
1742         scsi = &skspcl->msg_buf->scsi[0];
1743         scsi->hdr.tag = skspcl->req.id;
1744
1745         memset(scsi->cdb, 0, sizeof(scsi->cdb));
1746
1747         switch (opcode) {
1748         case TEST_UNIT_READY:
1749                 scsi->cdb[0] = TEST_UNIT_READY;
1750                 sgd->byte_count = 0;
1751                 scsi->hdr.sg_list_len_bytes = 0;
1752                 break;
1753
1754         case READ_CAPACITY:
1755                 scsi->cdb[0] = READ_CAPACITY;
1756                 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1757                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1758                 break;
1759
1760         case INQUIRY:
1761                 scsi->cdb[0] = INQUIRY;
1762                 scsi->cdb[1] = 0x01;    /* evpd */
1763                 scsi->cdb[2] = 0x80;    /* serial number page */
1764                 scsi->cdb[4] = 0x10;
1765                 sgd->byte_count = 16;
1766                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1767                 break;
1768
1769         case SYNCHRONIZE_CACHE:
1770                 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1771                 sgd->byte_count = 0;
1772                 scsi->hdr.sg_list_len_bytes = 0;
1773                 break;
1774
1775         case WRITE_BUFFER:
1776                 scsi->cdb[0] = WRITE_BUFFER;
1777                 scsi->cdb[1] = 0x02;
1778                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1779                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1780                 sgd->byte_count = WR_BUF_SIZE;
1781                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1782                 /* fill incrementing byte pattern */
1783                 for (i = 0; i < sgd->byte_count; i++)
1784                         buf[i] = i & 0xFF;
1785                 break;
1786
1787         case READ_BUFFER:
1788                 scsi->cdb[0] = READ_BUFFER;
1789                 scsi->cdb[1] = 0x02;
1790                 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1791                 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1792                 sgd->byte_count = WR_BUF_SIZE;
1793                 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1794                 memset(skspcl->data_buf, 0, sgd->byte_count);
1795                 break;
1796
1797         default:
1798                 SKD_ASSERT("Don't know what to send");
1799                 return;
1800
1801         }
1802         skd_send_special_fitmsg(skdev, skspcl);
1803 }
1804
1805 static void skd_refresh_device_data(struct skd_device *skdev)
1806 {
1807         struct skd_special_context *skspcl = &skdev->internal_skspcl;
1808
1809         skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1810 }
1811
1812 static int skd_chk_read_buf(struct skd_device *skdev,
1813                             struct skd_special_context *skspcl)
1814 {
1815         unsigned char *buf = skspcl->data_buf;
1816         int i;
1817
1818         /* check for incrementing byte pattern */
1819         for (i = 0; i < WR_BUF_SIZE; i++)
1820                 if (buf[i] != (i & 0xFF))
1821                         return 1;
1822
1823         return 0;
1824 }
1825
1826 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1827                                  u8 code, u8 qual, u8 fruc)
1828 {
1829         /* If the check condition is of special interest, log a message */
1830         if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1831             && (code == 0x04) && (qual == 0x06)) {
1832                 dev_err(&skdev->pdev->dev,
1833                         "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1834                         key, code, qual, fruc);
1835         }
1836 }
1837
1838 static void skd_complete_internal(struct skd_device *skdev,
1839                                   struct fit_completion_entry_v1 *skcomp,
1840                                   struct fit_comp_error_info *skerr,
1841                                   struct skd_special_context *skspcl)
1842 {
1843         u8 *buf = skspcl->data_buf;
1844         u8 status;
1845         int i;
1846         struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
1847
1848         lockdep_assert_held(&skdev->lock);
1849
1850         SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1851
1852         dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
1853
1854         skspcl->req.completion = *skcomp;
1855         skspcl->req.state = SKD_REQ_STATE_IDLE;
1856         skspcl->req.id += SKD_ID_INCR;
1857
1858         status = skspcl->req.completion.status;
1859
1860         skd_log_check_status(skdev, status, skerr->key, skerr->code,
1861                              skerr->qual, skerr->fruc);
1862
1863         switch (scsi->cdb[0]) {
1864         case TEST_UNIT_READY:
1865                 if (status == SAM_STAT_GOOD)
1866                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1867                 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1868                          (skerr->key == MEDIUM_ERROR))
1869                         skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1870                 else {
1871                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1872                                 dev_dbg(&skdev->pdev->dev,
1873                                         "TUR failed, don't send anymore state 0x%x\n",
1874                                         skdev->state);
1875                                 return;
1876                         }
1877                         dev_dbg(&skdev->pdev->dev,
1878                                 "**** TUR failed, retry skerr\n");
1879                         skd_send_internal_skspcl(skdev, skspcl,
1880                                                  TEST_UNIT_READY);
1881                 }
1882                 break;
1883
1884         case WRITE_BUFFER:
1885                 if (status == SAM_STAT_GOOD)
1886                         skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1887                 else {
1888                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1889                                 dev_dbg(&skdev->pdev->dev,
1890                                         "write buffer failed, don't send anymore state 0x%x\n",
1891                                         skdev->state);
1892                                 return;
1893                         }
1894                         dev_dbg(&skdev->pdev->dev,
1895                                 "**** write buffer failed, retry skerr\n");
1896                         skd_send_internal_skspcl(skdev, skspcl,
1897                                                  TEST_UNIT_READY);
1898                 }
1899                 break;
1900
1901         case READ_BUFFER:
1902                 if (status == SAM_STAT_GOOD) {
1903                         if (skd_chk_read_buf(skdev, skspcl) == 0)
1904                                 skd_send_internal_skspcl(skdev, skspcl,
1905                                                          READ_CAPACITY);
1906                         else {
1907                                 dev_err(&skdev->pdev->dev,
1908                                         "*** W/R Buffer mismatch %d ***\n",
1909                                         skdev->connect_retries);
1910                                 if (skdev->connect_retries <
1911                                     SKD_MAX_CONNECT_RETRIES) {
1912                                         skdev->connect_retries++;
1913                                         skd_soft_reset(skdev);
1914                                 } else {
1915                                         dev_err(&skdev->pdev->dev,
1916                                                 "W/R Buffer Connect Error\n");
1917                                         return;
1918                                 }
1919                         }
1920
1921                 } else {
1922                         if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1923                                 dev_dbg(&skdev->pdev->dev,
1924                                         "read buffer failed, don't send anymore state 0x%x\n",
1925                                         skdev->state);
1926                                 return;
1927                         }
1928                         dev_dbg(&skdev->pdev->dev,
1929                                 "**** read buffer failed, retry skerr\n");
1930                         skd_send_internal_skspcl(skdev, skspcl,
1931                                                  TEST_UNIT_READY);
1932                 }
1933                 break;
1934
1935         case READ_CAPACITY:
1936                 skdev->read_cap_is_valid = 0;
1937                 if (status == SAM_STAT_GOOD) {
1938                         skdev->read_cap_last_lba =
1939                                 (buf[0] << 24) | (buf[1] << 16) |
1940                                 (buf[2] << 8) | buf[3];
1941                         skdev->read_cap_blocksize =
1942                                 (buf[4] << 24) | (buf[5] << 16) |
1943                                 (buf[6] << 8) | buf[7];
1944
1945                         dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1946                                 skdev->read_cap_last_lba,
1947                                 skdev->read_cap_blocksize);
1948
1949                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1950
1951                         skdev->read_cap_is_valid = 1;
1952
1953                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1954                 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
1955                            (skerr->key == MEDIUM_ERROR)) {
1956                         skdev->read_cap_last_lba = ~0;
1957                         set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1958                         dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
1959                         skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1960                 } else {
1961                         dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
1962                         skd_send_internal_skspcl(skdev, skspcl,
1963                                                  TEST_UNIT_READY);
1964                 }
1965                 break;
1966
1967         case INQUIRY:
1968                 skdev->inquiry_is_valid = 0;
1969                 if (status == SAM_STAT_GOOD) {
1970                         skdev->inquiry_is_valid = 1;
1971
1972                         for (i = 0; i < 12; i++)
1973                                 skdev->inq_serial_num[i] = buf[i + 4];
1974                         skdev->inq_serial_num[12] = 0;
1975                 }
1976
1977                 if (skd_unquiesce_dev(skdev) < 0)
1978                         dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
1979                  /* connection is complete */
1980                 skdev->connect_retries = 0;
1981                 break;
1982
1983         case SYNCHRONIZE_CACHE:
1984                 if (status == SAM_STAT_GOOD)
1985                         skdev->sync_done = 1;
1986                 else
1987                         skdev->sync_done = -1;
1988                 wake_up_interruptible(&skdev->waitq);
1989                 break;
1990
1991         default:
1992                 SKD_ASSERT("we didn't send this");
1993         }
1994 }
1995
1996 /*
1997  *****************************************************************************
1998  * FIT MESSAGES
1999  *****************************************************************************
2000  */
2001
2002 static void skd_send_fitmsg(struct skd_device *skdev,
2003                             struct skd_fitmsg_context *skmsg)
2004 {
2005         u64 qcmd;
2006         struct fit_msg_hdr *fmh;
2007
2008         dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
2009                 skmsg->mb_dma_address, skdev->in_flight);
2010         dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
2011
2012         qcmd = skmsg->mb_dma_address;
2013         qcmd |= FIT_QCMD_QID_NORMAL;
2014
2015         fmh = &skmsg->msg_buf->fmh;
2016         skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2017
2018         if (unlikely(skdev->dbg_level > 1)) {
2019                 u8 *bp = (u8 *)skmsg->msg_buf;
2020                 int i;
2021                 for (i = 0; i < skmsg->length; i += 8) {
2022                         dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
2023                                 &bp[i]);
2024                         if (i == 0)
2025                                 i = 64 - 8;
2026                 }
2027         }
2028
2029         if (skmsg->length > 256)
2030                 qcmd |= FIT_QCMD_MSGSIZE_512;
2031         else if (skmsg->length > 128)
2032                 qcmd |= FIT_QCMD_MSGSIZE_256;
2033         else if (skmsg->length > 64)
2034                 qcmd |= FIT_QCMD_MSGSIZE_128;
2035         else
2036                 /*
2037                  * This makes no sense because the FIT msg header is
2038                  * 64 bytes. If the msg is only 64 bytes long it has
2039                  * no payload.
2040                  */
2041                 qcmd |= FIT_QCMD_MSGSIZE_64;
2042
2043         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2044         smp_wmb();
2045
2046         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2047 }
2048
2049 static void skd_send_special_fitmsg(struct skd_device *skdev,
2050                                     struct skd_special_context *skspcl)
2051 {
2052         u64 qcmd;
2053
2054         if (unlikely(skdev->dbg_level > 1)) {
2055                 u8 *bp = (u8 *)skspcl->msg_buf;
2056                 int i;
2057
2058                 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
2059                         dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
2060                                 &bp[i]);
2061                         if (i == 0)
2062                                 i = 64 - 8;
2063                 }
2064
2065                 dev_dbg(&skdev->pdev->dev,
2066                         "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2067                         skspcl, skspcl->req.id, skspcl->req.sksg_list,
2068                         skspcl->req.sksg_dma_address);
2069                 for (i = 0; i < skspcl->req.n_sg; i++) {
2070                         struct fit_sg_descriptor *sgd =
2071                                 &skspcl->req.sksg_list[i];
2072
2073                         dev_dbg(&skdev->pdev->dev,
2074                                 "  sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
2075                                 i, sgd->byte_count, sgd->control,
2076                                 sgd->host_side_addr, sgd->next_desc_ptr);
2077                 }
2078         }
2079
2080         /*
2081          * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2082          * and one 64-byte SSDI command.
2083          */
2084         qcmd = skspcl->mb_dma_address;
2085         qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2086
2087         /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2088         smp_wmb();
2089
2090         SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2091 }
2092
2093 /*
2094  *****************************************************************************
2095  * COMPLETION QUEUE
2096  *****************************************************************************
2097  */
2098
2099 static void skd_complete_other(struct skd_device *skdev,
2100                                struct fit_completion_entry_v1 *skcomp,
2101                                struct fit_comp_error_info *skerr);
2102
2103 struct sns_info {
2104         u8 type;
2105         u8 stat;
2106         u8 key;
2107         u8 asc;
2108         u8 ascq;
2109         u8 mask;
2110         enum skd_check_status_action action;
2111 };
2112
2113 static struct sns_info skd_chkstat_table[] = {
2114         /* Good */
2115         { 0x70, 0x02, RECOVERED_ERROR, 0,    0,    0x1c,
2116           SKD_CHECK_STATUS_REPORT_GOOD },
2117
2118         /* Smart alerts */
2119         { 0x70, 0x02, NO_SENSE,        0x0B, 0x00, 0x1E,        /* warnings */
2120           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2121         { 0x70, 0x02, NO_SENSE,        0x5D, 0x00, 0x1E,        /* thresholds */
2122           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2123         { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F,        /* temperature over trigger */
2124           SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2125
2126         /* Retry (with limits) */
2127         { 0x70, 0x02, 0x0B,            0,    0,    0x1C,        /* This one is for DMA ERROR */
2128           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2129         { 0x70, 0x02, 0x06,            0x0B, 0x00, 0x1E,        /* warnings */
2130           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2131         { 0x70, 0x02, 0x06,            0x5D, 0x00, 0x1E,        /* thresholds */
2132           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2133         { 0x70, 0x02, 0x06,            0x80, 0x30, 0x1F,        /* backup power */
2134           SKD_CHECK_STATUS_REQUEUE_REQUEST },
2135
2136         /* Busy (or about to be) */
2137         { 0x70, 0x02, 0x06,            0x3f, 0x01, 0x1F, /* fw changed */
2138           SKD_CHECK_STATUS_BUSY_IMMINENT },
2139 };
2140
2141 /*
2142  * Look up status and sense data to decide how to handle the error
2143  * from the device.
2144  * mask says which fields must match e.g., mask=0x18 means check
2145  * type and stat, ignore key, asc, ascq.
2146  */
2147
2148 static enum skd_check_status_action
2149 skd_check_status(struct skd_device *skdev,
2150                  u8 cmp_status, struct fit_comp_error_info *skerr)
2151 {
2152         int i;
2153
2154         dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2155                 skerr->key, skerr->code, skerr->qual, skerr->fruc);
2156
2157         dev_dbg(&skdev->pdev->dev,
2158                 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2159                 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
2160                 skerr->fruc);
2161
2162         /* Does the info match an entry in the good category? */
2163         for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
2164                 struct sns_info *sns = &skd_chkstat_table[i];
2165
2166                 if (sns->mask & 0x10)
2167                         if (skerr->type != sns->type)
2168                                 continue;
2169
2170                 if (sns->mask & 0x08)
2171                         if (cmp_status != sns->stat)
2172                                 continue;
2173
2174                 if (sns->mask & 0x04)
2175                         if (skerr->key != sns->key)
2176                                 continue;
2177
2178                 if (sns->mask & 0x02)
2179                         if (skerr->code != sns->asc)
2180                                 continue;
2181
2182                 if (sns->mask & 0x01)
2183                         if (skerr->qual != sns->ascq)
2184                                 continue;
2185
2186                 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
2187                         dev_err(&skdev->pdev->dev,
2188                                 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
2189                                 skerr->key, skerr->code, skerr->qual);
2190                 }
2191                 return sns->action;
2192         }
2193
2194         /* No other match, so nonzero status means error,
2195          * zero status means good
2196          */
2197         if (cmp_status) {
2198                 dev_dbg(&skdev->pdev->dev, "status check: error\n");
2199                 return SKD_CHECK_STATUS_REPORT_ERROR;
2200         }
2201
2202         dev_dbg(&skdev->pdev->dev, "status check good default\n");
2203         return SKD_CHECK_STATUS_REPORT_GOOD;
2204 }
2205
2206 static void skd_resolve_req_exception(struct skd_device *skdev,
2207                                       struct skd_request_context *skreq)
2208 {
2209         u8 cmp_status = skreq->completion.status;
2210
2211         switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2212         case SKD_CHECK_STATUS_REPORT_GOOD:
2213         case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2214                 skd_end_request(skdev, skreq, BLK_STS_OK);
2215                 break;
2216
2217         case SKD_CHECK_STATUS_BUSY_IMMINENT:
2218                 skd_log_skreq(skdev, skreq, "retry(busy)");
2219                 blk_requeue_request(skdev->queue, skreq->req);
2220                 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
2221                 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2222                 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2223                 skd_quiesce_dev(skdev);
2224                 break;
2225
2226         case SKD_CHECK_STATUS_REQUEUE_REQUEST:
2227                 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2228                         skd_log_skreq(skdev, skreq, "retry");
2229                         blk_requeue_request(skdev->queue, skreq->req);
2230                         break;
2231                 }
2232                 /* fall through */
2233
2234         case SKD_CHECK_STATUS_REPORT_ERROR:
2235         default:
2236                 skd_end_request(skdev, skreq, BLK_STS_IOERR);
2237                 break;
2238         }
2239 }
2240
2241 /* assume spinlock is already held */
2242 static void skd_release_skreq(struct skd_device *skdev,
2243                               struct skd_request_context *skreq)
2244 {
2245         u32 msg_slot;
2246         struct skd_fitmsg_context *skmsg;
2247
2248         u32 timo_slot;
2249
2250         /*
2251          * Reclaim the FIT msg buffer if this is
2252          * the first of the requests it carried to
2253          * be completed. The FIT msg buffer used to
2254          * send this request cannot be reused until
2255          * we are sure the s1120 card has copied
2256          * it to its memory. The FIT msg might have
2257          * contained several requests. As soon as
2258          * any of them are completed we know that
2259          * the entire FIT msg was transferred.
2260          * Only the first completed request will
2261          * match the FIT msg buffer id. The FIT
2262          * msg buffer id is immediately updated.
2263          * When subsequent requests complete the FIT
2264          * msg buffer id won't match, so we know
2265          * quite cheaply that it is already done.
2266          */
2267         msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2268         SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2269
2270         skmsg = &skdev->skmsg_table[msg_slot];
2271         if (skmsg->id == skreq->fitmsg_id) {
2272                 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2273                 SKD_ASSERT(skmsg->outstanding > 0);
2274                 skmsg->outstanding--;
2275                 if (skmsg->outstanding == 0) {
2276                         skmsg->state = SKD_MSG_STATE_IDLE;
2277                         skmsg->id += SKD_ID_INCR;
2278                         skmsg->next = skdev->skmsg_free_list;
2279                         skdev->skmsg_free_list = skmsg;
2280                 }
2281         }
2282
2283         /*
2284          * Decrease the number of active requests.
2285          * Also decrements the count in the timeout slot.
2286          */
2287         SKD_ASSERT(skdev->in_flight > 0);
2288         skdev->in_flight -= 1;
2289
2290         timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2291         SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2292         skdev->timeout_slot[timo_slot] -= 1;
2293
2294         /*
2295          * Reset backpointer
2296          */
2297         skreq->req = NULL;
2298
2299         /*
2300          * Reclaim the skd_request_context
2301          */
2302         skreq->state = SKD_REQ_STATE_IDLE;
2303         skreq->id += SKD_ID_INCR;
2304         skreq->next = skdev->skreq_free_list;
2305         skdev->skreq_free_list = skreq;
2306 }
2307
2308 #define DRIVER_INQ_EVPD_PAGE_CODE   0xDA
2309
2310 static void skd_do_inq_page_00(struct skd_device *skdev,
2311                                struct fit_completion_entry_v1 *skcomp,
2312                                struct fit_comp_error_info *skerr,
2313                                uint8_t *cdb, uint8_t *buf)
2314 {
2315         uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2316
2317         /* Caller requested "supported pages".  The driver needs to insert
2318          * its page.
2319          */
2320         dev_dbg(&skdev->pdev->dev,
2321                 "skd_do_driver_inquiry: modify supported pages.\n");
2322
2323         /* If the device rejected the request because the CDB was
2324          * improperly formed, then just leave.
2325          */
2326         if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2327             skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2328                 return;
2329
2330         /* Get the amount of space the caller allocated */
2331         max_bytes = (cdb[3] << 8) | cdb[4];
2332
2333         /* Get the number of pages actually returned by the device */
2334         drive_pages = (buf[2] << 8) | buf[3];
2335         drive_bytes = drive_pages + 4;
2336         new_size = drive_pages + 1;
2337
2338         /* Supported pages must be in numerical order, so find where
2339          * the driver page needs to be inserted into the list of
2340          * pages returned by the device.
2341          */
2342         for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2343                 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2344                         return; /* Device using this page code. abort */
2345                 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2346                         break;
2347         }
2348
2349         if (insert_pt < max_bytes) {
2350                 uint16_t u;
2351
2352                 /* Shift everything up one byte to make room. */
2353                 for (u = new_size + 3; u > insert_pt; u--)
2354                         buf[u] = buf[u - 1];
2355                 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2356
2357                 /* SCSI byte order increment of num_returned_bytes by 1 */
2358                 skcomp->num_returned_bytes =
2359                         cpu_to_be32(be32_to_cpu(skcomp->num_returned_bytes) + 1);
2360         }
2361
2362         /* update page length field to reflect the driver's page too */
2363         buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2364         buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2365 }
2366
2367 static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2368 {
2369         int pcie_reg;
2370         u16 pci_bus_speed;
2371         u8 pci_lanes;
2372
2373         pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2374         if (pcie_reg) {
2375                 u16 linksta;
2376                 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2377
2378                 pci_bus_speed = linksta & 0xF;
2379                 pci_lanes = (linksta & 0x3F0) >> 4;
2380         } else {
2381                 *speed = STEC_LINK_UNKNOWN;
2382                 *width = 0xFF;
2383                 return;
2384         }
2385
2386         switch (pci_bus_speed) {
2387         case 1:
2388                 *speed = STEC_LINK_2_5GTS;
2389                 break;
2390         case 2:
2391                 *speed = STEC_LINK_5GTS;
2392                 break;
2393         case 3:
2394                 *speed = STEC_LINK_8GTS;
2395                 break;
2396         default:
2397                 *speed = STEC_LINK_UNKNOWN;
2398                 break;
2399         }
2400
2401         if (pci_lanes <= 0x20)
2402                 *width = pci_lanes;
2403         else
2404                 *width = 0xFF;
2405 }
2406
2407 static void skd_do_inq_page_da(struct skd_device *skdev,
2408                                struct fit_completion_entry_v1 *skcomp,
2409                                struct fit_comp_error_info *skerr,
2410                                uint8_t *cdb, uint8_t *buf)
2411 {
2412         struct pci_dev *pdev = skdev->pdev;
2413         unsigned max_bytes;
2414         struct driver_inquiry_data inq;
2415         u16 val;
2416
2417         dev_dbg(&skdev->pdev->dev, "skd_do_driver_inquiry: return driver page\n");
2418
2419         memset(&inq, 0, sizeof(inq));
2420
2421         inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2422
2423         skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2424         inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2425         inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2426         inq.pcie_function_number = PCI_FUNC(pdev->devfn);
2427
2428         pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2429         inq.pcie_vendor_id = cpu_to_be16(val);
2430
2431         pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2432         inq.pcie_device_id = cpu_to_be16(val);
2433
2434         pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2435         inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
2436
2437         pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2438         inq.pcie_subsystem_device_id = cpu_to_be16(val);
2439
2440         /* Driver version, fixed lenth, padded with spaces on the right */
2441         inq.driver_version_length = sizeof(inq.driver_version);
2442         memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2443         memcpy(inq.driver_version, DRV_VER_COMPL,
2444                min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2445
2446         inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2447
2448         /* Clear the error set by the device */
2449         skcomp->status = SAM_STAT_GOOD;
2450         memset((void *)skerr, 0, sizeof(*skerr));
2451
2452         /* copy response into output buffer */
2453         max_bytes = (cdb[3] << 8) | cdb[4];
2454         memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2455
2456         skcomp->num_returned_bytes =
2457                 cpu_to_be32(min_t(uint16_t, max_bytes, sizeof(inq)));
2458 }
2459
2460 static void skd_do_driver_inq(struct skd_device *skdev,
2461                               struct fit_completion_entry_v1 *skcomp,
2462                               struct fit_comp_error_info *skerr,
2463                               uint8_t *cdb, uint8_t *buf)
2464 {
2465         if (!buf)
2466                 return;
2467         else if (cdb[0] != INQUIRY)
2468                 return;         /* Not an INQUIRY */
2469         else if ((cdb[1] & 1) == 0)
2470                 return;         /* EVPD not set */
2471         else if (cdb[2] == 0)
2472                 /* Need to add driver's page to supported pages list */
2473                 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2474         else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2475                 /* Caller requested driver's page */
2476                 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2477 }
2478
2479 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2480 {
2481         if (!sg)
2482                 return NULL;
2483         if (!sg_page(sg))
2484                 return NULL;
2485         return sg_virt(sg);
2486 }
2487
2488 static void skd_process_scsi_inq(struct skd_device *skdev,
2489                                  struct fit_completion_entry_v1 *skcomp,
2490                                  struct fit_comp_error_info *skerr,
2491                                  struct skd_special_context *skspcl)
2492 {
2493         uint8_t *buf;
2494         struct skd_scsi_request *scsi_req = &skspcl->msg_buf->scsi[0];
2495
2496         dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2497                             skspcl->req.data_dir);
2498         buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2499
2500         if (buf)
2501                 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2502 }
2503
2504 static int skd_isr_completion_posted(struct skd_device *skdev,
2505                                         int limit, int *enqueued)
2506 {
2507         struct fit_completion_entry_v1 *skcmp;
2508         struct fit_comp_error_info *skerr;
2509         u16 req_id;
2510         u32 req_slot;
2511         struct skd_request_context *skreq;
2512         u16 cmp_cntxt;
2513         u8 cmp_status;
2514         u8 cmp_cycle;
2515         u32 cmp_bytes;
2516         int rc;
2517         int processed = 0;
2518
2519         lockdep_assert_held(&skdev->lock);
2520
2521         for (;; ) {
2522                 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2523
2524                 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2525                 cmp_cycle = skcmp->cycle;
2526                 cmp_cntxt = skcmp->tag;
2527                 cmp_status = skcmp->status;
2528                 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2529
2530                 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2531
2532                 dev_dbg(&skdev->pdev->dev,
2533                         "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
2534                         skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
2535                         cmp_cntxt, cmp_status, skdev->in_flight, cmp_bytes,
2536                         skdev->proto_ver);
2537
2538                 if (cmp_cycle != skdev->skcomp_cycle) {
2539                         dev_dbg(&skdev->pdev->dev, "end of completions\n");
2540                         break;
2541                 }
2542                 /*
2543                  * Update the completion queue head index and possibly
2544                  * the completion cycle count. 8-bit wrap-around.
2545                  */
2546                 skdev->skcomp_ix++;
2547                 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2548                         skdev->skcomp_ix = 0;
2549                         skdev->skcomp_cycle++;
2550                 }
2551
2552                 /*
2553                  * The command context is a unique 32-bit ID. The low order
2554                  * bits help locate the request. The request is usually a
2555                  * r/w request (see skd_start() above) or a special request.
2556                  */
2557                 req_id = cmp_cntxt;
2558                 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2559
2560                 /* Is this other than a r/w request? */
2561                 if (req_slot >= skdev->num_req_context) {
2562                         /*
2563                          * This is not a completion for a r/w request.
2564                          */
2565                         skd_complete_other(skdev, skcmp, skerr);
2566                         continue;
2567                 }
2568
2569                 skreq = &skdev->skreq_table[req_slot];
2570
2571                 /*
2572                  * Make sure the request ID for the slot matches.
2573                  */
2574                 if (skreq->id != req_id) {
2575                         dev_dbg(&skdev->pdev->dev,
2576                                 "mismatch comp_id=0x%x req_id=0x%x\n", req_id,
2577                                 skreq->id);
2578                         {
2579                                 u16 new_id = cmp_cntxt;
2580                                 dev_err(&skdev->pdev->dev,
2581                                         "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2582                                         req_id, skreq->id, new_id);
2583
2584                                 continue;
2585                         }
2586                 }
2587
2588                 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2589
2590                 if (skreq->state == SKD_REQ_STATE_ABORTED) {
2591                         dev_dbg(&skdev->pdev->dev, "reclaim req %p id=%04x\n",
2592                                 skreq, skreq->id);
2593                         /* a previously timed out command can
2594                          * now be cleaned up */
2595                         skd_release_skreq(skdev, skreq);
2596                         continue;
2597                 }
2598
2599                 skreq->completion = *skcmp;
2600                 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2601                         skreq->err_info = *skerr;
2602                         skd_log_check_status(skdev, cmp_status, skerr->key,
2603                                              skerr->code, skerr->qual,
2604                                              skerr->fruc);
2605                 }
2606                 /* Release DMA resources for the request. */
2607                 if (skreq->n_sg > 0)
2608                         skd_postop_sg_list(skdev, skreq);
2609
2610                 if (!skreq->req) {
2611                         dev_dbg(&skdev->pdev->dev,
2612                                 "NULL backptr skdreq %p, req=0x%x req_id=0x%x\n",
2613                                 skreq, skreq->id, req_id);
2614                 } else {
2615                         /*
2616                          * Capture the outcome and post it back to the
2617                          * native request.
2618                          */
2619                         if (likely(cmp_status == SAM_STAT_GOOD))
2620                                 skd_end_request(skdev, skreq, BLK_STS_OK);
2621                         else
2622                                 skd_resolve_req_exception(skdev, skreq);
2623                 }
2624
2625                 /*
2626                  * Release the skreq, its FIT msg (if one), timeout slot,
2627                  * and queue depth.
2628                  */
2629                 skd_release_skreq(skdev, skreq);
2630
2631                 /* skd_isr_comp_limit equal zero means no limit */
2632                 if (limit) {
2633                         if (++processed >= limit) {
2634                                 rc = 1;
2635                                 break;
2636                         }
2637                 }
2638         }
2639
2640         if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2641                 && (skdev->in_flight) == 0) {
2642                 skdev->state = SKD_DRVR_STATE_PAUSED;
2643                 wake_up_interruptible(&skdev->waitq);
2644         }
2645
2646         return rc;
2647 }
2648
2649 static void skd_complete_other(struct skd_device *skdev,
2650                                struct fit_completion_entry_v1 *skcomp,
2651                                struct fit_comp_error_info *skerr)
2652 {
2653         u32 req_id = 0;
2654         u32 req_table;
2655         u32 req_slot;
2656         struct skd_special_context *skspcl;
2657
2658         lockdep_assert_held(&skdev->lock);
2659
2660         req_id = skcomp->tag;
2661         req_table = req_id & SKD_ID_TABLE_MASK;
2662         req_slot = req_id & SKD_ID_SLOT_MASK;
2663
2664         dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
2665                 req_id, req_slot);
2666
2667         /*
2668          * Based on the request id, determine how to dispatch this completion.
2669          * This swich/case is finding the good cases and forwarding the
2670          * completion entry. Errors are reported below the switch.
2671          */
2672         switch (req_table) {
2673         case SKD_ID_RW_REQUEST:
2674                 /*
2675                  * The caller, skd_isr_completion_posted() above,
2676                  * handles r/w requests. The only way we get here
2677                  * is if the req_slot is out of bounds.
2678                  */
2679                 break;
2680
2681         case SKD_ID_SPECIAL_REQUEST:
2682                 /*
2683                  * Make sure the req_slot is in bounds and that the id
2684                  * matches.
2685                  */
2686                 if (req_slot < skdev->n_special) {
2687                         skspcl = &skdev->skspcl_table[req_slot];
2688                         if (skspcl->req.id == req_id &&
2689                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2690                                 skd_complete_special(skdev,
2691                                                      skcomp, skerr, skspcl);
2692                                 return;
2693                         }
2694                 }
2695                 break;
2696
2697         case SKD_ID_INTERNAL:
2698                 if (req_slot == 0) {
2699                         skspcl = &skdev->internal_skspcl;
2700                         if (skspcl->req.id == req_id &&
2701                             skspcl->req.state == SKD_REQ_STATE_BUSY) {
2702                                 skd_complete_internal(skdev,
2703                                                       skcomp, skerr, skspcl);
2704                                 return;
2705                         }
2706                 }
2707                 break;
2708
2709         case SKD_ID_FIT_MSG:
2710                 /*
2711                  * These id's should never appear in a completion record.
2712                  */
2713                 break;
2714
2715         default:
2716                 /*
2717                  * These id's should never appear anywhere;
2718                  */
2719                 break;
2720         }
2721
2722         /*
2723          * If we get here it is a bad or stale id.
2724          */
2725 }
2726
2727 static void skd_complete_special(struct skd_device *skdev,
2728                                  struct fit_completion_entry_v1 *skcomp,
2729                                  struct fit_comp_error_info *skerr,
2730                                  struct skd_special_context *skspcl)
2731 {
2732         lockdep_assert_held(&skdev->lock);
2733
2734         dev_dbg(&skdev->pdev->dev, " completing special request %p\n", skspcl);
2735         if (skspcl->orphaned) {
2736                 /* Discard orphaned request */
2737                 /* ?: Can this release directly or does it need
2738                  * to use a worker? */
2739                 dev_dbg(&skdev->pdev->dev, "release orphaned %p\n", skspcl);
2740                 skd_release_special(skdev, skspcl);
2741                 return;
2742         }
2743
2744         skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2745
2746         skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2747         skspcl->req.completion = *skcomp;
2748         skspcl->req.err_info = *skerr;
2749
2750         skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2751                              skerr->code, skerr->qual, skerr->fruc);
2752
2753         wake_up_interruptible(&skdev->waitq);
2754 }
2755
2756 /* assume spinlock is already held */
2757 static void skd_release_special(struct skd_device *skdev,
2758                                 struct skd_special_context *skspcl)
2759 {
2760         int i, was_depleted;
2761
2762         for (i = 0; i < skspcl->req.n_sg; i++) {
2763                 struct page *page = sg_page(&skspcl->req.sg[i]);
2764                 __free_page(page);
2765         }
2766
2767         was_depleted = (skdev->skspcl_free_list == NULL);
2768
2769         skspcl->req.state = SKD_REQ_STATE_IDLE;
2770         skspcl->req.id += SKD_ID_INCR;
2771         skspcl->req.next =
2772                 (struct skd_request_context *)skdev->skspcl_free_list;
2773         skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2774
2775         if (was_depleted) {
2776                 dev_dbg(&skdev->pdev->dev, "skspcl was depleted\n");
2777                 /* Free list was depleted. Their might be waiters. */
2778                 wake_up_interruptible(&skdev->waitq);
2779         }
2780 }
2781
2782 static void skd_reset_skcomp(struct skd_device *skdev)
2783 {
2784         memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
2785
2786         skdev->skcomp_ix = 0;
2787         skdev->skcomp_cycle = 1;
2788 }
2789
2790 /*
2791  *****************************************************************************
2792  * INTERRUPTS
2793  *****************************************************************************
2794  */
2795 static void skd_completion_worker(struct work_struct *work)
2796 {
2797         struct skd_device *skdev =
2798                 container_of(work, struct skd_device, completion_worker);
2799         unsigned long flags;
2800         int flush_enqueued = 0;
2801
2802         spin_lock_irqsave(&skdev->lock, flags);
2803
2804         /*
2805          * pass in limit=0, which means no limit..
2806          * process everything in compq
2807          */
2808         skd_isr_completion_posted(skdev, 0, &flush_enqueued);
2809         skd_request_fn(skdev->queue);
2810
2811         spin_unlock_irqrestore(&skdev->lock, flags);
2812 }
2813
2814 static void skd_isr_msg_from_dev(struct skd_device *skdev);
2815
2816 static irqreturn_t
2817 skd_isr(int irq, void *ptr)
2818 {
2819         struct skd_device *skdev = ptr;
2820         u32 intstat;
2821         u32 ack;
2822         int rc = 0;
2823         int deferred = 0;
2824         int flush_enqueued = 0;
2825
2826         spin_lock(&skdev->lock);
2827
2828         for (;; ) {
2829                 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2830
2831                 ack = FIT_INT_DEF_MASK;
2832                 ack &= intstat;
2833
2834                 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
2835                         ack);
2836
2837                 /* As long as there is an int pending on device, keep
2838                  * running loop.  When none, get out, but if we've never
2839                  * done any processing, call completion handler?
2840                  */
2841                 if (ack == 0) {
2842                         /* No interrupts on device, but run the completion
2843                          * processor anyway?
2844                          */
2845                         if (rc == 0)
2846                                 if (likely (skdev->state
2847                                         == SKD_DRVR_STATE_ONLINE))
2848                                         deferred = 1;
2849                         break;
2850                 }
2851
2852                 rc = IRQ_HANDLED;
2853
2854                 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2855
2856                 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
2857                            (skdev->state != SKD_DRVR_STATE_STOPPING))) {
2858                         if (intstat & FIT_ISH_COMPLETION_POSTED) {
2859                                 /*
2860                                  * If we have already deferred completion
2861                                  * processing, don't bother running it again
2862                                  */
2863                                 if (deferred == 0)
2864                                         deferred =
2865                                                 skd_isr_completion_posted(skdev,
2866                                                 skd_isr_comp_limit, &flush_enqueued);
2867                         }
2868
2869                         if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2870                                 skd_isr_fwstate(skdev);
2871                                 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2872                                     skdev->state ==
2873                                     SKD_DRVR_STATE_DISAPPEARED) {
2874                                         spin_unlock(&skdev->lock);
2875                                         return rc;
2876                                 }
2877                         }
2878
2879                         if (intstat & FIT_ISH_MSG_FROM_DEV)
2880                                 skd_isr_msg_from_dev(skdev);
2881                 }
2882         }
2883
2884         if (unlikely(flush_enqueued))
2885                 skd_request_fn(skdev->queue);
2886
2887         if (deferred)
2888                 schedule_work(&skdev->completion_worker);
2889         else if (!flush_enqueued)
2890                 skd_request_fn(skdev->queue);
2891
2892         spin_unlock(&skdev->lock);
2893
2894         return rc;
2895 }
2896
2897 static void skd_drive_fault(struct skd_device *skdev)
2898 {
2899         skdev->state = SKD_DRVR_STATE_FAULT;
2900         dev_err(&skdev->pdev->dev, "Drive FAULT\n");
2901 }
2902
2903 static void skd_drive_disappeared(struct skd_device *skdev)
2904 {
2905         skdev->state = SKD_DRVR_STATE_DISAPPEARED;
2906         dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
2907 }
2908
2909 static void skd_isr_fwstate(struct skd_device *skdev)
2910 {
2911         u32 sense;
2912         u32 state;
2913         u32 mtd;
2914         int prev_driver_state = skdev->state;
2915
2916         sense = SKD_READL(skdev, FIT_STATUS);
2917         state = sense & FIT_SR_DRIVE_STATE_MASK;
2918
2919         dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
2920                 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
2921                 skd_drive_state_to_str(state), state);
2922
2923         skdev->drive_state = state;
2924
2925         switch (skdev->drive_state) {
2926         case FIT_SR_DRIVE_INIT:
2927                 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
2928                         skd_disable_interrupts(skdev);
2929                         break;
2930                 }
2931                 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
2932                         skd_recover_requests(skdev);
2933                 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
2934                         skdev->timer_countdown = SKD_STARTING_TIMO;
2935                         skdev->state = SKD_DRVR_STATE_STARTING;
2936                         skd_soft_reset(skdev);
2937                         break;
2938                 }
2939                 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
2940                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2941                 skdev->last_mtd = mtd;
2942                 break;
2943
2944         case FIT_SR_DRIVE_ONLINE:
2945                 skdev->cur_max_queue_depth = skd_max_queue_depth;
2946                 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
2947                         skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
2948
2949                 skdev->queue_low_water_mark =
2950                         skdev->cur_max_queue_depth * 2 / 3 + 1;
2951                 if (skdev->queue_low_water_mark < 1)
2952                         skdev->queue_low_water_mark = 1;
2953                 dev_info(&skdev->pdev->dev,
2954                          "Queue depth limit=%d dev=%d lowat=%d\n",
2955                          skdev->cur_max_queue_depth,
2956                          skdev->dev_max_queue_depth,
2957                          skdev->queue_low_water_mark);
2958
2959                 skd_refresh_device_data(skdev);
2960                 break;
2961
2962         case FIT_SR_DRIVE_BUSY:
2963                 skdev->state = SKD_DRVR_STATE_BUSY;
2964                 skdev->timer_countdown = SKD_BUSY_TIMO;
2965                 skd_quiesce_dev(skdev);
2966                 break;
2967         case FIT_SR_DRIVE_BUSY_SANITIZE:
2968                 /* set timer for 3 seconds, we'll abort any unfinished
2969                  * commands after that expires
2970                  */
2971                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2972                 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
2973                 blk_start_queue(skdev->queue);
2974                 break;
2975         case FIT_SR_DRIVE_BUSY_ERASE:
2976                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2977                 skdev->timer_countdown = SKD_BUSY_TIMO;
2978                 break;
2979         case FIT_SR_DRIVE_OFFLINE:
2980                 skdev->state = SKD_DRVR_STATE_IDLE;
2981                 break;
2982         case FIT_SR_DRIVE_SOFT_RESET:
2983                 switch (skdev->state) {
2984                 case SKD_DRVR_STATE_STARTING:
2985                 case SKD_DRVR_STATE_RESTARTING:
2986                         /* Expected by a caller of skd_soft_reset() */
2987                         break;
2988                 default:
2989                         skdev->state = SKD_DRVR_STATE_RESTARTING;
2990                         break;
2991                 }
2992                 break;
2993         case FIT_SR_DRIVE_FW_BOOTING:
2994                 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
2995                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2996                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
2997                 break;
2998
2999         case FIT_SR_DRIVE_DEGRADED:
3000         case FIT_SR_PCIE_LINK_DOWN:
3001         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3002                 break;
3003
3004         case FIT_SR_DRIVE_FAULT:
3005                 skd_drive_fault(skdev);
3006                 skd_recover_requests(skdev);
3007                 blk_start_queue(skdev->queue);
3008                 break;
3009
3010         /* PCIe bus returned all Fs? */
3011         case 0xFF:
3012                 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
3013                          sense);
3014                 skd_drive_disappeared(skdev);
3015                 skd_recover_requests(skdev);
3016                 blk_start_queue(skdev->queue);
3017                 break;
3018         default:
3019                 /*
3020                  * Uknown FW State. Wait for a state we recognize.
3021                  */
3022                 break;
3023         }
3024         dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
3025                 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3026                 skd_skdev_state_to_str(skdev->state), skdev->state);
3027 }
3028
3029 static void skd_recover_requests(struct skd_device *skdev)
3030 {
3031         int i;
3032
3033         for (i = 0; i < skdev->num_req_context; i++) {
3034                 struct skd_request_context *skreq = &skdev->skreq_table[i];
3035
3036                 if (skreq->state == SKD_REQ_STATE_BUSY) {
3037                         skd_log_skreq(skdev, skreq, "recover");
3038
3039                         SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
3040                         SKD_ASSERT(skreq->req != NULL);
3041
3042                         /* Release DMA resources for the request. */
3043                         if (skreq->n_sg > 0)
3044                                 skd_postop_sg_list(skdev, skreq);
3045
3046                         skd_end_request(skdev, skreq, BLK_STS_IOERR);
3047
3048                         skreq->req = NULL;
3049
3050                         skreq->state = SKD_REQ_STATE_IDLE;
3051                         skreq->id += SKD_ID_INCR;
3052                 }
3053                 if (i > 0)
3054                         skreq[-1].next = skreq;
3055                 skreq->next = NULL;
3056         }
3057         skdev->skreq_free_list = skdev->skreq_table;
3058
3059         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3060                 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3061
3062                 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3063                         skd_log_skmsg(skdev, skmsg, "salvaged");
3064                         SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3065                         skmsg->state = SKD_MSG_STATE_IDLE;
3066                         skmsg->id += SKD_ID_INCR;
3067                 }
3068                 if (i > 0)
3069                         skmsg[-1].next = skmsg;
3070                 skmsg->next = NULL;
3071         }
3072         skdev->skmsg_free_list = skdev->skmsg_table;
3073
3074         for (i = 0; i < skdev->n_special; i++) {
3075                 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3076
3077                 /* If orphaned, reclaim it because it has already been reported
3078                  * to the process as an error (it was just waiting for
3079                  * a completion that didn't come, and now it will never come)
3080                  * If busy, change to a state that will cause it to error
3081                  * out in the wait routine and let it do the normal
3082                  * reporting and reclaiming
3083                  */
3084                 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3085                         if (skspcl->orphaned) {
3086                                 dev_dbg(&skdev->pdev->dev, "orphaned %p\n",
3087                                         skspcl);
3088                                 skd_release_special(skdev, skspcl);
3089                         } else {
3090                                 dev_dbg(&skdev->pdev->dev, "not orphaned %p\n",
3091                                         skspcl);
3092                                 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3093                         }
3094                 }
3095         }
3096         skdev->skspcl_free_list = skdev->skspcl_table;
3097
3098         for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3099                 skdev->timeout_slot[i] = 0;
3100
3101         skdev->in_flight = 0;
3102 }
3103
3104 static void skd_isr_msg_from_dev(struct skd_device *skdev)
3105 {
3106         u32 mfd;
3107         u32 mtd;
3108         u32 data;
3109
3110         mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3111
3112         dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
3113                 skdev->last_mtd);
3114
3115         /* ignore any mtd that is an ack for something we didn't send */
3116         if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3117                 return;
3118
3119         switch (FIT_MXD_TYPE(mfd)) {
3120         case FIT_MTD_FITFW_INIT:
3121                 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3122
3123                 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
3124                         dev_err(&skdev->pdev->dev, "protocol mismatch\n");
3125                         dev_err(&skdev->pdev->dev, "  got=%d support=%d\n",
3126                                 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
3127                         dev_err(&skdev->pdev->dev, "  please upgrade driver\n");
3128                         skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3129                         skd_soft_reset(skdev);
3130                         break;
3131                 }
3132                 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3133                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3134                 skdev->last_mtd = mtd;
3135                 break;
3136
3137         case FIT_MTD_GET_CMDQ_DEPTH:
3138                 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3139                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3140                                    SKD_N_COMPLETION_ENTRY);
3141                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3142                 skdev->last_mtd = mtd;
3143                 break;
3144
3145         case FIT_MTD_SET_COMPQ_DEPTH:
3146                 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3147                 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3148                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3149                 skdev->last_mtd = mtd;
3150                 break;
3151
3152         case FIT_MTD_SET_COMPQ_ADDR:
3153                 skd_reset_skcomp(skdev);
3154                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3155                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3156                 skdev->last_mtd = mtd;
3157                 break;
3158
3159         case FIT_MTD_CMD_LOG_HOST_ID:
3160                 skdev->connect_time_stamp = get_seconds();
3161                 data = skdev->connect_time_stamp & 0xFFFF;
3162                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3163                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3164                 skdev->last_mtd = mtd;
3165                 break;
3166
3167         case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3168                 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3169                 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3170                 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3171                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3172                 skdev->last_mtd = mtd;
3173                 break;
3174
3175         case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3176                 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3177                 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3178                 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3179                 skdev->last_mtd = mtd;
3180
3181                 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
3182                         skdev->connect_time_stamp, skdev->drive_jiffies);
3183                 break;
3184
3185         case FIT_MTD_ARM_QUEUE:
3186                 skdev->last_mtd = 0;
3187                 /*
3188                  * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3189                  */
3190                 break;
3191
3192         default:
3193                 break;
3194         }
3195 }
3196
3197 static void skd_disable_interrupts(struct skd_device *skdev)
3198 {
3199         u32 sense;
3200
3201         sense = SKD_READL(skdev, FIT_CONTROL);
3202         sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3203         SKD_WRITEL(skdev, sense, FIT_CONTROL);
3204         dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
3205
3206         /* Note that the 1s is written. A 1-bit means
3207          * disable, a 0 means enable.
3208          */
3209         SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3210 }
3211
3212 static void skd_enable_interrupts(struct skd_device *skdev)
3213 {
3214         u32 val;
3215
3216         /* unmask interrupts first */
3217         val = FIT_ISH_FW_STATE_CHANGE +
3218               FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3219
3220         /* Note that the compliment of mask is written. A 1-bit means
3221          * disable, a 0 means enable. */
3222         SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
3223         dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
3224
3225         val = SKD_READL(skdev, FIT_CONTROL);
3226         val |= FIT_CR_ENABLE_INTERRUPTS;
3227         dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
3228         SKD_WRITEL(skdev, val, FIT_CONTROL);
3229 }
3230
3231 /*
3232  *****************************************************************************
3233  * START, STOP, RESTART, QUIESCE, UNQUIESCE
3234  *****************************************************************************
3235  */
3236
3237 static void skd_soft_reset(struct skd_device *skdev)
3238 {
3239         u32 val;
3240
3241         val = SKD_READL(skdev, FIT_CONTROL);
3242         val |= (FIT_CR_SOFT_RESET);
3243         dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
3244         SKD_WRITEL(skdev, val, FIT_CONTROL);
3245 }
3246
3247 static void skd_start_device(struct skd_device *skdev)
3248 {
3249         unsigned long flags;
3250         u32 sense;
3251         u32 state;
3252
3253         spin_lock_irqsave(&skdev->lock, flags);
3254
3255         /* ack all ghost interrupts */
3256         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3257
3258         sense = SKD_READL(skdev, FIT_STATUS);
3259
3260         dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
3261
3262         state = sense & FIT_SR_DRIVE_STATE_MASK;
3263         skdev->drive_state = state;
3264         skdev->last_mtd = 0;
3265
3266         skdev->state = SKD_DRVR_STATE_STARTING;
3267         skdev->timer_countdown = SKD_STARTING_TIMO;
3268
3269         skd_enable_interrupts(skdev);
3270
3271         switch (skdev->drive_state) {
3272         case FIT_SR_DRIVE_OFFLINE:
3273                 dev_err(&skdev->pdev->dev, "Drive offline...\n");
3274                 break;
3275
3276         case FIT_SR_DRIVE_FW_BOOTING:
3277                 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
3278                 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3279                 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3280                 break;
3281
3282         case FIT_SR_DRIVE_BUSY_SANITIZE:
3283                 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
3284                 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3285                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3286                 break;
3287
3288         case FIT_SR_DRIVE_BUSY_ERASE:
3289                 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
3290                 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3291                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3292                 break;
3293
3294         case FIT_SR_DRIVE_INIT:
3295         case FIT_SR_DRIVE_ONLINE:
3296                 skd_soft_reset(skdev);
3297                 break;
3298
3299         case FIT_SR_DRIVE_BUSY:
3300                 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
3301                 skdev->state = SKD_DRVR_STATE_BUSY;
3302                 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3303                 break;
3304
3305         case FIT_SR_DRIVE_SOFT_RESET:
3306                 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
3307                 break;
3308
3309         case FIT_SR_DRIVE_FAULT:
3310                 /* Fault state is bad...soft reset won't do it...
3311                  * Hard reset, maybe, but does it work on device?
3312                  * For now, just fault so the system doesn't hang.
3313                  */
3314                 skd_drive_fault(skdev);
3315                 /*start the queue so we can respond with error to requests */
3316                 dev_dbg(&skdev->pdev->dev, "starting queue\n");
3317                 blk_start_queue(skdev->queue);
3318                 skdev->gendisk_on = -1;
3319                 wake_up_interruptible(&skdev->waitq);
3320                 break;
3321
3322         case 0xFF:
3323                 /* Most likely the device isn't there or isn't responding
3324                  * to the BAR1 addresses. */
3325                 skd_drive_disappeared(skdev);
3326                 /*start the queue so we can respond with error to requests */
3327                 dev_dbg(&skdev->pdev->dev,
3328                         "starting queue to error-out reqs\n");
3329                 blk_start_queue(skdev->queue);
3330                 skdev->gendisk_on = -1;
3331                 wake_up_interruptible(&skdev->waitq);
3332                 break;
3333
3334         default:
3335                 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
3336                         skdev->drive_state);
3337                 break;
3338         }
3339
3340         state = SKD_READL(skdev, FIT_CONTROL);
3341         dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
3342
3343         state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
3344         dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
3345
3346         state = SKD_READL(skdev, FIT_INT_MASK_HOST);
3347         dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
3348
3349         state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3350         dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
3351
3352         state = SKD_READL(skdev, FIT_HW_VERSION);
3353         dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
3354
3355         spin_unlock_irqrestore(&skdev->lock, flags);
3356 }
3357
3358 static void skd_stop_device(struct skd_device *skdev)
3359 {
3360         unsigned long flags;
3361         struct skd_special_context *skspcl = &skdev->internal_skspcl;
3362         u32 dev_state;
3363         int i;
3364
3365         spin_lock_irqsave(&skdev->lock, flags);
3366
3367         if (skdev->state != SKD_DRVR_STATE_ONLINE) {
3368                 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
3369                 goto stop_out;
3370         }
3371
3372         if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
3373                 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
3374                 goto stop_out;
3375         }
3376
3377         skdev->state = SKD_DRVR_STATE_SYNCING;
3378         skdev->sync_done = 0;
3379
3380         skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3381
3382         spin_unlock_irqrestore(&skdev->lock, flags);
3383
3384         wait_event_interruptible_timeout(skdev->waitq,
3385                                          (skdev->sync_done), (10 * HZ));
3386
3387         spin_lock_irqsave(&skdev->lock, flags);
3388
3389         switch (skdev->sync_done) {
3390         case 0:
3391                 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
3392                 break;
3393         case 1:
3394                 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
3395                 break;
3396         default:
3397                 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
3398         }
3399
3400 stop_out:
3401         skdev->state = SKD_DRVR_STATE_STOPPING;
3402         spin_unlock_irqrestore(&skdev->lock, flags);
3403
3404         skd_kill_timer(skdev);
3405
3406         spin_lock_irqsave(&skdev->lock, flags);
3407         skd_disable_interrupts(skdev);
3408
3409         /* ensure all ints on device are cleared */
3410         /* soft reset the device to unload with a clean slate */
3411         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3412         SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3413
3414         spin_unlock_irqrestore(&skdev->lock, flags);
3415
3416         /* poll every 100ms, 1 second timeout */
3417         for (i = 0; i < 10; i++) {
3418                 dev_state =
3419                         SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3420                 if (dev_state == FIT_SR_DRIVE_INIT)
3421                         break;
3422                 set_current_state(TASK_INTERRUPTIBLE);
3423                 schedule_timeout(msecs_to_jiffies(100));
3424         }
3425
3426         if (dev_state != FIT_SR_DRIVE_INIT)
3427                 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
3428                         dev_state);
3429 }
3430
3431 /* assume spinlock is held */
3432 static void skd_restart_device(struct skd_device *skdev)
3433 {
3434         u32 state;
3435
3436         /* ack all ghost interrupts */
3437         SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3438
3439         state = SKD_READL(skdev, FIT_STATUS);
3440
3441         dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
3442
3443         state &= FIT_SR_DRIVE_STATE_MASK;
3444         skdev->drive_state = state;
3445         skdev->last_mtd = 0;
3446
3447         skdev->state = SKD_DRVR_STATE_RESTARTING;
3448         skdev->timer_countdown = SKD_RESTARTING_TIMO;
3449
3450         skd_soft_reset(skdev);
3451 }
3452
3453 /* assume spinlock is held */
3454 static int skd_quiesce_dev(struct skd_device *skdev)
3455 {
3456         int rc = 0;
3457
3458         switch (skdev->state) {
3459         case SKD_DRVR_STATE_BUSY:
3460         case SKD_DRVR_STATE_BUSY_IMMINENT:
3461                 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
3462                 blk_stop_queue(skdev->queue);
3463                 break;
3464         case SKD_DRVR_STATE_ONLINE:
3465         case SKD_DRVR_STATE_STOPPING:
3466         case SKD_DRVR_STATE_SYNCING:
3467         case SKD_DRVR_STATE_PAUSING:
3468         case SKD_DRVR_STATE_PAUSED:
3469         case SKD_DRVR_STATE_STARTING:
3470         case SKD_DRVR_STATE_RESTARTING:
3471         case SKD_DRVR_STATE_RESUMING:
3472         default:
3473                 rc = -EINVAL;
3474                 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
3475                         skdev->state);
3476         }
3477         return rc;
3478 }
3479
3480 /* assume spinlock is held */
3481 static int skd_unquiesce_dev(struct skd_device *skdev)
3482 {
3483         int prev_driver_state = skdev->state;
3484
3485         skd_log_skdev(skdev, "unquiesce");
3486         if (skdev->state == SKD_DRVR_STATE_ONLINE) {
3487                 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
3488                 return 0;
3489         }
3490         if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3491                 /*
3492                  * If there has been an state change to other than
3493                  * ONLINE, we will rely on controller state change
3494                  * to come back online and restart the queue.
3495                  * The BUSY state means that driver is ready to
3496                  * continue normal processing but waiting for controller
3497                  * to become available.
3498                  */
3499                 skdev->state = SKD_DRVR_STATE_BUSY;
3500                 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
3501                 return 0;
3502         }
3503
3504         /*
3505          * Drive has just come online, driver is either in startup,
3506          * paused performing a task, or bust waiting for hardware.
3507          */
3508         switch (skdev->state) {
3509         case SKD_DRVR_STATE_PAUSED:
3510         case SKD_DRVR_STATE_BUSY:
3511         case SKD_DRVR_STATE_BUSY_IMMINENT:
3512         case SKD_DRVR_STATE_BUSY_ERASE:
3513         case SKD_DRVR_STATE_STARTING:
3514         case SKD_DRVR_STATE_RESTARTING:
3515         case SKD_DRVR_STATE_FAULT:
3516         case SKD_DRVR_STATE_IDLE:
3517         case SKD_DRVR_STATE_LOAD:
3518                 skdev->state = SKD_DRVR_STATE_ONLINE;
3519                 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
3520                         skd_skdev_state_to_str(prev_driver_state),
3521                         prev_driver_state, skd_skdev_state_to_str(skdev->state),
3522                         skdev->state);
3523                 dev_dbg(&skdev->pdev->dev,
3524                         "**** device ONLINE...starting block queue\n");
3525                 dev_dbg(&skdev->pdev->dev, "starting queue\n");
3526                 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
3527                 blk_start_queue(skdev->queue);
3528                 skdev->gendisk_on = 1;
3529                 wake_up_interruptible(&skdev->waitq);
3530                 break;
3531
3532         case SKD_DRVR_STATE_DISAPPEARED:
3533         default:
3534                 dev_dbg(&skdev->pdev->dev,
3535                         "**** driver state %d, not implemented\n",
3536                         skdev->state);
3537                 return -EBUSY;
3538         }
3539         return 0;
3540 }
3541
3542 /*
3543  *****************************************************************************
3544  * PCIe MSI/MSI-X INTERRUPT HANDLERS
3545  *****************************************************************************
3546  */
3547
3548 static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3549 {
3550         struct skd_device *skdev = skd_host_data;
3551         unsigned long flags;
3552
3553         spin_lock_irqsave(&skdev->lock, flags);
3554         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3555                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3556         dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
3557                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3558         SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3559         spin_unlock_irqrestore(&skdev->lock, flags);
3560         return IRQ_HANDLED;
3561 }
3562
3563 static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3564 {
3565         struct skd_device *skdev = skd_host_data;
3566         unsigned long flags;
3567
3568         spin_lock_irqsave(&skdev->lock, flags);
3569         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3570                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3571         SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3572         skd_isr_fwstate(skdev);
3573         spin_unlock_irqrestore(&skdev->lock, flags);
3574         return IRQ_HANDLED;
3575 }
3576
3577 static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3578 {
3579         struct skd_device *skdev = skd_host_data;
3580         unsigned long flags;
3581         int flush_enqueued = 0;
3582         int deferred;
3583
3584         spin_lock_irqsave(&skdev->lock, flags);
3585         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3586                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3587         SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3588         deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3589                                                 &flush_enqueued);
3590         if (flush_enqueued)
3591                 skd_request_fn(skdev->queue);
3592
3593         if (deferred)
3594                 schedule_work(&skdev->completion_worker);
3595         else if (!flush_enqueued)
3596                 skd_request_fn(skdev->queue);
3597
3598         spin_unlock_irqrestore(&skdev->lock, flags);
3599
3600         return IRQ_HANDLED;
3601 }
3602
3603 static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3604 {
3605         struct skd_device *skdev = skd_host_data;
3606         unsigned long flags;
3607
3608         spin_lock_irqsave(&skdev->lock, flags);
3609         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3610                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3611         SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3612         skd_isr_msg_from_dev(skdev);
3613         spin_unlock_irqrestore(&skdev->lock, flags);
3614         return IRQ_HANDLED;
3615 }
3616
3617 static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3618 {
3619         struct skd_device *skdev = skd_host_data;
3620         unsigned long flags;
3621
3622         spin_lock_irqsave(&skdev->lock, flags);
3623         dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3624                 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3625         SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3626         spin_unlock_irqrestore(&skdev->lock, flags);
3627         return IRQ_HANDLED;
3628 }
3629
3630 /*
3631  *****************************************************************************
3632  * PCIe MSI/MSI-X SETUP
3633  *****************************************************************************
3634  */
3635
3636 struct skd_msix_entry {
3637         char isr_name[30];
3638 };
3639
3640 struct skd_init_msix_entry {
3641         const char *name;
3642         irq_handler_t handler;
3643 };
3644
3645 #define SKD_MAX_MSIX_COUNT              13
3646 #define SKD_MIN_MSIX_COUNT              7
3647 #define SKD_BASE_MSIX_IRQ               4
3648
3649 static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3650         { "(DMA 0)",        skd_reserved_isr },
3651         { "(DMA 1)",        skd_reserved_isr },
3652         { "(DMA 2)",        skd_reserved_isr },
3653         { "(DMA 3)",        skd_reserved_isr },
3654         { "(State Change)", skd_statec_isr   },
3655         { "(COMPL_Q)",      skd_comp_q       },
3656         { "(MSG)",          skd_msg_isr      },
3657         { "(Reserved)",     skd_reserved_isr },
3658         { "(Reserved)",     skd_reserved_isr },
3659         { "(Queue Full 0)", skd_qfull_isr    },
3660         { "(Queue Full 1)", skd_qfull_isr    },
3661         { "(Queue Full 2)", skd_qfull_isr    },
3662         { "(Queue Full 3)", skd_qfull_isr    },
3663 };
3664
3665 static int skd_acquire_msix(struct skd_device *skdev)
3666 {
3667         int i, rc;
3668         struct pci_dev *pdev = skdev->pdev;
3669
3670         rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
3671                         PCI_IRQ_MSIX);
3672         if (rc < 0) {
3673                 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
3674                 goto out;
3675         }
3676
3677         skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
3678                         sizeof(struct skd_msix_entry), GFP_KERNEL);
3679         if (!skdev->msix_entries) {
3680                 rc = -ENOMEM;
3681                 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
3682                 goto out;
3683         }
3684
3685         /* Enable MSI-X vectors for the base queue */
3686         for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3687                 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
3688
3689                 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3690                          "%s%d-msix %s", DRV_NAME, skdev->devno,
3691                          msix_entries[i].name);
3692
3693                 rc = devm_request_irq(&skdev->pdev->dev,
3694                                 pci_irq_vector(skdev->pdev, i),
3695                                 msix_entries[i].handler, 0,
3696                                 qentry->isr_name, skdev);
3697                 if (rc) {
3698                         dev_err(&skdev->pdev->dev,
3699                                 "Unable to register(%d) MSI-X handler %d: %s\n",
3700                                 rc, i, qentry->isr_name);
3701                         goto msix_out;
3702                 }
3703         }
3704
3705         dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
3706                 SKD_MAX_MSIX_COUNT);
3707         return 0;
3708
3709 msix_out:
3710         while (--i >= 0)
3711                 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
3712 out:
3713         kfree(skdev->msix_entries);
3714         skdev->msix_entries = NULL;
3715         return rc;
3716 }
3717
3718 static int skd_acquire_irq(struct skd_device *skdev)
3719 {
3720         struct pci_dev *pdev = skdev->pdev;
3721         unsigned int irq_flag = PCI_IRQ_LEGACY;
3722         int rc;
3723
3724         if (skd_isr_type == SKD_IRQ_MSIX) {
3725                 rc = skd_acquire_msix(skdev);
3726                 if (!rc)
3727                         return 0;
3728
3729                 dev_err(&skdev->pdev->dev,
3730                         "failed to enable MSI-X, re-trying with MSI %d\n", rc);
3731         }
3732
3733         snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
3734                         skdev->devno);
3735
3736         if (skd_isr_type != SKD_IRQ_LEGACY)
3737                 irq_flag |= PCI_IRQ_MSI;
3738         rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
3739         if (rc < 0) {
3740                 dev_err(&skdev->pdev->dev,
3741                         "failed to allocate the MSI interrupt %d\n", rc);
3742                 return rc;
3743         }
3744
3745         rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
3746                         pdev->msi_enabled ? 0 : IRQF_SHARED,
3747                         skdev->isr_name, skdev);
3748         if (rc) {
3749                 pci_free_irq_vectors(pdev);
3750                 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
3751                         rc);
3752                 return rc;
3753         }
3754
3755         return 0;
3756 }
3757
3758 static void skd_release_irq(struct skd_device *skdev)
3759 {
3760         struct pci_dev *pdev = skdev->pdev;
3761
3762         if (skdev->msix_entries) {
3763                 int i;
3764
3765                 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3766                         devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
3767                                         skdev);
3768                 }
3769
3770                 kfree(skdev->msix_entries);
3771                 skdev->msix_entries = NULL;
3772         } else {
3773                 devm_free_irq(&pdev->dev, pdev->irq, skdev);
3774         }
3775
3776         pci_free_irq_vectors(pdev);
3777 }
3778
3779 /*
3780  *****************************************************************************
3781  * CONSTRUCT
3782  *****************************************************************************
3783  */
3784
3785 static int skd_cons_skcomp(struct skd_device *skdev)
3786 {
3787         int rc = 0;
3788         struct fit_completion_entry_v1 *skcomp;
3789
3790         dev_dbg(&skdev->pdev->dev,
3791                 "comp pci_alloc, total bytes %zd entries %d\n",
3792                 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
3793
3794         skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
3795                                        &skdev->cq_dma_address);
3796
3797         if (skcomp == NULL) {
3798                 rc = -ENOMEM;
3799                 goto err_out;
3800         }
3801
3802         skdev->skcomp_table = skcomp;
3803         skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
3804                                                            sizeof(*skcomp) *
3805                                                            SKD_N_COMPLETION_ENTRY);
3806
3807 err_out:
3808         return rc;
3809 }
3810
3811 static int skd_cons_skmsg(struct skd_device *skdev)
3812 {
3813         int rc = 0;
3814         u32 i;
3815
3816         dev_dbg(&skdev->pdev->dev,
3817                 "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
3818                 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
3819                 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
3820
3821         skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
3822                                      sizeof(struct skd_fitmsg_context),
3823                                      GFP_KERNEL);
3824         if (skdev->skmsg_table == NULL) {
3825                 rc = -ENOMEM;
3826                 goto err_out;
3827         }
3828
3829         for (i = 0; i < skdev->num_fitmsg_context; i++) {
3830                 struct skd_fitmsg_context *skmsg;
3831
3832                 skmsg = &skdev->skmsg_table[i];
3833
3834                 skmsg->id = i + SKD_ID_FIT_MSG;
3835
3836                 skmsg->state = SKD_MSG_STATE_IDLE;
3837                 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
3838                                                       SKD_N_FITMSG_BYTES,
3839                                                       &skmsg->mb_dma_address);
3840
3841                 if (skmsg->msg_buf == NULL) {
3842                         rc = -ENOMEM;
3843                         goto err_out;
3844                 }
3845
3846                 WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
3847                      (FIT_QCMD_ALIGN - 1),
3848                      "not aligned: msg_buf %p mb_dma_address %#llx\n",
3849                      skmsg->msg_buf, skmsg->mb_dma_address);
3850                 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
3851
3852                 skmsg->next = &skmsg[1];
3853         }
3854
3855         /* Free list is in order starting with the 0th entry. */
3856         skdev->skmsg_table[i - 1].next = NULL;
3857         skdev->skmsg_free_list = skdev->skmsg_table;
3858
3859 err_out:
3860         return rc;
3861 }
3862
3863 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
3864                                                   u32 n_sg,
3865                                                   dma_addr_t *ret_dma_addr)
3866 {
3867         struct fit_sg_descriptor *sg_list;
3868         u32 nbytes;
3869
3870         nbytes = sizeof(*sg_list) * n_sg;
3871
3872         sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
3873
3874         if (sg_list != NULL) {
3875                 uint64_t dma_address = *ret_dma_addr;
3876                 u32 i;
3877
3878                 memset(sg_list, 0, nbytes);
3879
3880                 for (i = 0; i < n_sg - 1; i++) {
3881                         uint64_t ndp_off;
3882                         ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
3883
3884                         sg_list[i].next_desc_ptr = dma_address + ndp_off;
3885                 }
3886                 sg_list[i].next_desc_ptr = 0LL;
3887         }
3888
3889         return sg_list;
3890 }
3891
3892 static int skd_cons_skreq(struct skd_device *skdev)
3893 {
3894         int rc = 0;
3895         u32 i;
3896
3897         dev_dbg(&skdev->pdev->dev,
3898                 "skreq_table kcalloc, struct %lu, count %u total %lu\n",
3899                 sizeof(struct skd_request_context), skdev->num_req_context,
3900                 sizeof(struct skd_request_context) * skdev->num_req_context);
3901
3902         skdev->skreq_table = kcalloc(skdev->num_req_context,
3903                                      sizeof(struct skd_request_context),
3904                                      GFP_KERNEL);
3905         if (skdev->skreq_table == NULL) {
3906                 rc = -ENOMEM;
3907                 goto err_out;
3908         }
3909
3910         dev_dbg(&skdev->pdev->dev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
3911                 skdev->sgs_per_request, sizeof(struct scatterlist),
3912                 skdev->sgs_per_request * sizeof(struct scatterlist));
3913
3914         for (i = 0; i < skdev->num_req_context; i++) {
3915                 struct skd_request_context *skreq;
3916
3917                 skreq = &skdev->skreq_table[i];
3918
3919                 skreq->id = i + SKD_ID_RW_REQUEST;
3920                 skreq->state = SKD_REQ_STATE_IDLE;
3921
3922                 skreq->sg = kcalloc(skdev->sgs_per_request,
3923                                     sizeof(struct scatterlist), GFP_KERNEL);
3924                 if (skreq->sg == NULL) {
3925                         rc = -ENOMEM;
3926                         goto err_out;
3927                 }
3928                 sg_init_table(skreq->sg, skdev->sgs_per_request);
3929
3930                 skreq->sksg_list = skd_cons_sg_list(skdev,
3931                                                     skdev->sgs_per_request,
3932                                                     &skreq->sksg_dma_address);
3933
3934                 if (skreq->sksg_list == NULL) {
3935                         rc = -ENOMEM;
3936                         goto err_out;
3937                 }
3938
3939                 skreq->next = &skreq[1];
3940         }
3941
3942         /* Free list is in order starting with the 0th entry. */
3943         skdev->skreq_table[i - 1].next = NULL;
3944         skdev->skreq_free_list = skdev->skreq_table;
3945
3946 err_out:
3947         return rc;
3948 }
3949
3950 static int skd_cons_skspcl(struct skd_device *skdev)
3951 {
3952         int rc = 0;
3953         u32 i, nbytes;
3954
3955         dev_dbg(&skdev->pdev->dev,
3956                 "skspcl_table kcalloc, struct %lu, count %u total %lu\n",
3957                 sizeof(struct skd_special_context), skdev->n_special,
3958                 sizeof(struct skd_special_context) * skdev->n_special);
3959
3960         skdev->skspcl_table = kcalloc(skdev->n_special,
3961                                       sizeof(struct skd_special_context),
3962                                       GFP_KERNEL);
3963         if (skdev->skspcl_table == NULL) {
3964                 rc = -ENOMEM;
3965                 goto err_out;
3966         }
3967
3968         for (i = 0; i < skdev->n_special; i++) {
3969                 struct skd_special_context *skspcl;
3970
3971                 skspcl = &skdev->skspcl_table[i];
3972
3973                 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
3974                 skspcl->req.state = SKD_REQ_STATE_IDLE;
3975
3976                 skspcl->req.next = &skspcl[1].req;
3977
3978                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
3979
3980                 skspcl->msg_buf =
3981                         pci_zalloc_consistent(skdev->pdev, nbytes,
3982                                               &skspcl->mb_dma_address);
3983                 if (skspcl->msg_buf == NULL) {
3984                         rc = -ENOMEM;
3985                         goto err_out;
3986                 }
3987
3988                 skspcl->req.sg = kcalloc(SKD_N_SG_PER_SPECIAL,
3989                                          sizeof(struct scatterlist),
3990                                          GFP_KERNEL);
3991                 if (skspcl->req.sg == NULL) {
3992                         rc = -ENOMEM;
3993                         goto err_out;
3994                 }
3995
3996                 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
3997                                                          SKD_N_SG_PER_SPECIAL,
3998                                                          &skspcl->req.
3999                                                          sksg_dma_address);
4000                 if (skspcl->req.sksg_list == NULL) {
4001                         rc = -ENOMEM;
4002                         goto err_out;
4003                 }
4004         }
4005
4006         /* Free list is in order starting with the 0th entry. */
4007         skdev->skspcl_table[i - 1].req.next = NULL;
4008         skdev->skspcl_free_list = skdev->skspcl_table;
4009
4010         return rc;
4011
4012 err_out:
4013         return rc;
4014 }
4015
4016 static int skd_cons_sksb(struct skd_device *skdev)
4017 {
4018         int rc = 0;
4019         struct skd_special_context *skspcl;
4020         u32 nbytes;
4021
4022         skspcl = &skdev->internal_skspcl;
4023
4024         skspcl->req.id = 0 + SKD_ID_INTERNAL;
4025         skspcl->req.state = SKD_REQ_STATE_IDLE;
4026
4027         nbytes = SKD_N_INTERNAL_BYTES;
4028
4029         skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4030                                                  &skspcl->db_dma_address);
4031         if (skspcl->data_buf == NULL) {
4032                 rc = -ENOMEM;
4033                 goto err_out;
4034         }
4035
4036         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4037         skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4038                                                 &skspcl->mb_dma_address);
4039         if (skspcl->msg_buf == NULL) {
4040                 rc = -ENOMEM;
4041                 goto err_out;
4042         }
4043
4044         skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4045                                                  &skspcl->req.sksg_dma_address);
4046         if (skspcl->req.sksg_list == NULL) {
4047                 rc = -ENOMEM;
4048                 goto err_out;
4049         }
4050
4051         if (!skd_format_internal_skspcl(skdev)) {
4052                 rc = -EINVAL;
4053                 goto err_out;
4054         }
4055
4056 err_out:
4057         return rc;
4058 }
4059
4060 static int skd_cons_disk(struct skd_device *skdev)
4061 {
4062         int rc = 0;
4063         struct gendisk *disk;
4064         struct request_queue *q;
4065         unsigned long flags;
4066
4067         disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4068         if (!disk) {
4069                 rc = -ENOMEM;
4070                 goto err_out;
4071         }
4072
4073         skdev->disk = disk;
4074         sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4075
4076         disk->major = skdev->major;
4077         disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4078         disk->fops = &skd_blockdev_ops;
4079         disk->private_data = skdev;
4080
4081         q = blk_init_queue(skd_request_fn, &skdev->lock);
4082         if (!q) {
4083                 rc = -ENOMEM;
4084                 goto err_out;
4085         }
4086         blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
4087
4088         skdev->queue = q;
4089         disk->queue = q;
4090         q->queuedata = skdev;
4091
4092         blk_queue_write_cache(q, true, true);
4093         blk_queue_max_segments(q, skdev->sgs_per_request);
4094         blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4095
4096         /* set optimal I/O size to 8KB */
4097         blk_queue_io_opt(q, 8192);
4098
4099         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
4100         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
4101
4102         spin_lock_irqsave(&skdev->lock, flags);
4103         dev_dbg(&skdev->pdev->dev, "stopping queue\n");
4104         blk_stop_queue(skdev->queue);
4105         spin_unlock_irqrestore(&skdev->lock, flags);
4106
4107 err_out:
4108         return rc;
4109 }
4110
4111 #define SKD_N_DEV_TABLE         16u
4112 static u32 skd_next_devno;
4113
4114 static struct skd_device *skd_construct(struct pci_dev *pdev)
4115 {
4116         struct skd_device *skdev;
4117         int blk_major = skd_major;
4118         int rc;
4119
4120         skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4121
4122         if (!skdev) {
4123                 dev_err(&pdev->dev, "memory alloc failure\n");
4124                 return NULL;
4125         }
4126
4127         skdev->state = SKD_DRVR_STATE_LOAD;
4128         skdev->pdev = pdev;
4129         skdev->devno = skd_next_devno++;
4130         skdev->major = blk_major;
4131         skdev->dev_max_queue_depth = 0;
4132
4133         skdev->num_req_context = skd_max_queue_depth;
4134         skdev->num_fitmsg_context = skd_max_queue_depth;
4135         skdev->n_special = skd_max_pass_thru;
4136         skdev->cur_max_queue_depth = 1;
4137         skdev->queue_low_water_mark = 1;
4138         skdev->proto_ver = 99;
4139         skdev->sgs_per_request = skd_sgs_per_request;
4140         skdev->dbg_level = skd_dbg_level;
4141
4142         spin_lock_init(&skdev->lock);
4143
4144         INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4145
4146         dev_dbg(&skdev->pdev->dev, "skcomp\n");
4147         rc = skd_cons_skcomp(skdev);
4148         if (rc < 0)
4149                 goto err_out;
4150
4151         dev_dbg(&skdev->pdev->dev, "skmsg\n");
4152         rc = skd_cons_skmsg(skdev);
4153         if (rc < 0)
4154                 goto err_out;
4155
4156         dev_dbg(&skdev->pdev->dev, "skreq\n");
4157         rc = skd_cons_skreq(skdev);
4158         if (rc < 0)
4159                 goto err_out;
4160
4161         dev_dbg(&skdev->pdev->dev, "skspcl\n");
4162         rc = skd_cons_skspcl(skdev);
4163         if (rc < 0)
4164                 goto err_out;
4165
4166         dev_dbg(&skdev->pdev->dev, "sksb\n");
4167         rc = skd_cons_sksb(skdev);
4168         if (rc < 0)
4169                 goto err_out;
4170
4171         dev_dbg(&skdev->pdev->dev, "disk\n");
4172         rc = skd_cons_disk(skdev);
4173         if (rc < 0)
4174                 goto err_out;
4175
4176         dev_dbg(&skdev->pdev->dev, "VICTORY\n");
4177         return skdev;
4178
4179 err_out:
4180         dev_dbg(&skdev->pdev->dev, "construct failed\n");
4181         skd_destruct(skdev);
4182         return NULL;
4183 }
4184
4185 /*
4186  *****************************************************************************
4187  * DESTRUCT (FREE)
4188  *****************************************************************************
4189  */
4190
4191 static void skd_free_skcomp(struct skd_device *skdev)
4192 {
4193         if (skdev->skcomp_table)
4194                 pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
4195                                     skdev->skcomp_table, skdev->cq_dma_address);
4196
4197         skdev->skcomp_table = NULL;
4198         skdev->cq_dma_address = 0;
4199 }
4200
4201 static void skd_free_skmsg(struct skd_device *skdev)
4202 {
4203         u32 i;
4204
4205         if (skdev->skmsg_table == NULL)
4206                 return;
4207
4208         for (i = 0; i < skdev->num_fitmsg_context; i++) {
4209                 struct skd_fitmsg_context *skmsg;
4210
4211                 skmsg = &skdev->skmsg_table[i];
4212
4213                 if (skmsg->msg_buf != NULL) {
4214                         pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4215                                             skmsg->msg_buf,
4216                                             skmsg->mb_dma_address);
4217                 }
4218                 skmsg->msg_buf = NULL;
4219                 skmsg->mb_dma_address = 0;
4220         }
4221
4222         kfree(skdev->skmsg_table);
4223         skdev->skmsg_table = NULL;
4224 }
4225
4226 static void skd_free_sg_list(struct skd_device *skdev,
4227                              struct fit_sg_descriptor *sg_list,
4228                              u32 n_sg, dma_addr_t dma_addr)
4229 {
4230         if (sg_list != NULL) {
4231                 u32 nbytes;
4232
4233                 nbytes = sizeof(*sg_list) * n_sg;
4234
4235                 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4236         }
4237 }
4238
4239 static void skd_free_skreq(struct skd_device *skdev)
4240 {
4241         u32 i;
4242
4243         if (skdev->skreq_table == NULL)
4244                 return;
4245
4246         for (i = 0; i < skdev->num_req_context; i++) {
4247                 struct skd_request_context *skreq;
4248
4249                 skreq = &skdev->skreq_table[i];
4250
4251                 skd_free_sg_list(skdev, skreq->sksg_list,
4252                                  skdev->sgs_per_request,
4253                                  skreq->sksg_dma_address);
4254
4255                 skreq->sksg_list = NULL;
4256                 skreq->sksg_dma_address = 0;
4257
4258                 kfree(skreq->sg);
4259         }
4260
4261         kfree(skdev->skreq_table);
4262         skdev->skreq_table = NULL;
4263 }
4264
4265 static void skd_free_skspcl(struct skd_device *skdev)
4266 {
4267         u32 i;
4268         u32 nbytes;
4269
4270         if (skdev->skspcl_table == NULL)
4271                 return;
4272
4273         for (i = 0; i < skdev->n_special; i++) {
4274                 struct skd_special_context *skspcl;
4275
4276                 skspcl = &skdev->skspcl_table[i];
4277
4278                 if (skspcl->msg_buf != NULL) {
4279                         nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4280                         pci_free_consistent(skdev->pdev, nbytes,
4281                                             skspcl->msg_buf,
4282                                             skspcl->mb_dma_address);
4283                 }
4284
4285                 skspcl->msg_buf = NULL;
4286                 skspcl->mb_dma_address = 0;
4287
4288                 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4289                                  SKD_N_SG_PER_SPECIAL,
4290                                  skspcl->req.sksg_dma_address);
4291
4292                 skspcl->req.sksg_list = NULL;
4293                 skspcl->req.sksg_dma_address = 0;
4294
4295                 kfree(skspcl->req.sg);
4296         }
4297
4298         kfree(skdev->skspcl_table);
4299         skdev->skspcl_table = NULL;
4300 }
4301
4302 static void skd_free_sksb(struct skd_device *skdev)
4303 {
4304         struct skd_special_context *skspcl;
4305         u32 nbytes;
4306
4307         skspcl = &skdev->internal_skspcl;
4308
4309         if (skspcl->data_buf != NULL) {
4310                 nbytes = SKD_N_INTERNAL_BYTES;
4311
4312                 pci_free_consistent(skdev->pdev, nbytes,
4313                                     skspcl->data_buf, skspcl->db_dma_address);
4314         }
4315
4316         skspcl->data_buf = NULL;
4317         skspcl->db_dma_address = 0;
4318
4319         if (skspcl->msg_buf != NULL) {
4320                 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4321                 pci_free_consistent(skdev->pdev, nbytes,
4322                                     skspcl->msg_buf, skspcl->mb_dma_address);
4323         }
4324
4325         skspcl->msg_buf = NULL;
4326         skspcl->mb_dma_address = 0;
4327
4328         skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4329                          skspcl->req.sksg_dma_address);
4330
4331         skspcl->req.sksg_list = NULL;
4332         skspcl->req.sksg_dma_address = 0;
4333 }
4334
4335 static void skd_free_disk(struct skd_device *skdev)
4336 {
4337         struct gendisk *disk = skdev->disk;
4338
4339         if (disk && (disk->flags & GENHD_FL_UP))
4340                 del_gendisk(disk);
4341
4342         if (skdev->queue) {
4343                 blk_cleanup_queue(skdev->queue);
4344                 skdev->queue = NULL;
4345                 disk->queue = NULL;
4346         }
4347
4348         put_disk(disk);
4349         skdev->disk = NULL;
4350 }
4351
4352 static void skd_destruct(struct skd_device *skdev)
4353 {
4354         if (skdev == NULL)
4355                 return;
4356
4357         dev_dbg(&skdev->pdev->dev, "disk\n");
4358         skd_free_disk(skdev);
4359
4360         dev_dbg(&skdev->pdev->dev, "sksb\n");
4361         skd_free_sksb(skdev);
4362
4363         dev_dbg(&skdev->pdev->dev, "skspcl\n");
4364         skd_free_skspcl(skdev);
4365
4366         dev_dbg(&skdev->pdev->dev, "skreq\n");
4367         skd_free_skreq(skdev);
4368
4369         dev_dbg(&skdev->pdev->dev, "skmsg\n");
4370         skd_free_skmsg(skdev);
4371
4372         dev_dbg(&skdev->pdev->dev, "skcomp\n");
4373         skd_free_skcomp(skdev);
4374
4375         dev_dbg(&skdev->pdev->dev, "skdev\n");
4376         kfree(skdev);
4377 }
4378
4379 /*
4380  *****************************************************************************
4381  * BLOCK DEVICE (BDEV) GLUE
4382  *****************************************************************************
4383  */
4384
4385 static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4386 {
4387         struct skd_device *skdev;
4388         u64 capacity;
4389
4390         skdev = bdev->bd_disk->private_data;
4391
4392         dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
4393                 bdev->bd_disk->disk_name, current->comm);
4394
4395         if (skdev->read_cap_is_valid) {
4396                 capacity = get_capacity(skdev->disk);
4397                 geo->heads = 64;
4398                 geo->sectors = 255;
4399                 geo->cylinders = (capacity) / (255 * 64);
4400
4401                 return 0;
4402         }
4403         return -EIO;
4404 }
4405
4406 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
4407 {
4408         dev_dbg(&skdev->pdev->dev, "add_disk\n");
4409         device_add_disk(parent, skdev->disk);
4410         return 0;
4411 }
4412
4413 static const struct block_device_operations skd_blockdev_ops = {
4414         .owner          = THIS_MODULE,
4415         .ioctl          = skd_bdev_ioctl,
4416         .getgeo         = skd_bdev_getgeo,
4417 };
4418
4419 /*
4420  *****************************************************************************
4421  * PCIe DRIVER GLUE
4422  *****************************************************************************
4423  */
4424
4425 static const struct pci_device_id skd_pci_tbl[] = {
4426         { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4427           PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4428         { 0 }                     /* terminate list */
4429 };
4430
4431 MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4432
4433 static char *skd_pci_info(struct skd_device *skdev, char *str)
4434 {
4435         int pcie_reg;
4436
4437         strcpy(str, "PCIe (");
4438         pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4439
4440         if (pcie_reg) {
4441
4442                 char lwstr[6];
4443                 uint16_t pcie_lstat, lspeed, lwidth;
4444
4445                 pcie_reg += 0x12;
4446                 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4447                 lspeed = pcie_lstat & (0xF);
4448                 lwidth = (pcie_lstat & 0x3F0) >> 4;
4449
4450                 if (lspeed == 1)
4451                         strcat(str, "2.5GT/s ");
4452                 else if (lspeed == 2)
4453                         strcat(str, "5.0GT/s ");
4454                 else
4455                         strcat(str, "<unknown> ");
4456                 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4457                 strcat(str, lwstr);
4458         }
4459         return str;
4460 }
4461
4462 static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4463 {
4464         int i;
4465         int rc = 0;
4466         char pci_str[32];
4467         struct skd_device *skdev;
4468
4469         dev_info(&pdev->dev, "STEC s1120 Driver(%s) version %s-b%s\n",
4470                  DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4471         dev_info(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
4472                  pdev->device);
4473
4474         rc = pci_enable_device(pdev);
4475         if (rc)
4476                 return rc;
4477         rc = pci_request_regions(pdev, DRV_NAME);
4478         if (rc)
4479                 goto err_out;
4480         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4481         if (!rc) {
4482                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4483                         dev_err(&pdev->dev, "consistent DMA mask error %d\n",
4484                                 rc);
4485                 }
4486         } else {
4487                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4488                 if (rc) {
4489                         dev_err(&pdev->dev, "DMA mask error %d\n", rc);
4490                         goto err_out_regions;
4491                 }
4492         }
4493
4494         if (!skd_major) {
4495                 rc = register_blkdev(0, DRV_NAME);
4496                 if (rc < 0)
4497                         goto err_out_regions;
4498                 BUG_ON(!rc);
4499                 skd_major = rc;
4500         }
4501
4502         skdev = skd_construct(pdev);
4503         if (skdev == NULL) {
4504                 rc = -ENOMEM;
4505                 goto err_out_regions;
4506         }
4507
4508         skd_pci_info(skdev, pci_str);
4509         dev_info(&pdev->dev, "%s 64bit\n", pci_str);
4510
4511         pci_set_master(pdev);
4512         rc = pci_enable_pcie_error_reporting(pdev);
4513         if (rc) {
4514                 dev_err(&pdev->dev,
4515                         "bad enable of PCIe error reporting rc=%d\n", rc);
4516                 skdev->pcie_error_reporting_is_enabled = 0;
4517         } else
4518                 skdev->pcie_error_reporting_is_enabled = 1;
4519
4520         pci_set_drvdata(pdev, skdev);
4521
4522         for (i = 0; i < SKD_MAX_BARS; i++) {
4523                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4524                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4525                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4526                                             skdev->mem_size[i]);
4527                 if (!skdev->mem_map[i]) {
4528                         dev_err(&pdev->dev,
4529                                 "Unable to map adapter memory!\n");
4530                         rc = -ENODEV;
4531                         goto err_out_iounmap;
4532                 }
4533                 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
4534                         skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
4535                         skdev->mem_size[i]);
4536         }
4537
4538         rc = skd_acquire_irq(skdev);
4539         if (rc) {
4540                 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
4541                 goto err_out_iounmap;
4542         }
4543
4544         rc = skd_start_timer(skdev);
4545         if (rc)
4546                 goto err_out_timer;
4547
4548         init_waitqueue_head(&skdev->waitq);
4549
4550         skd_start_device(skdev);
4551
4552         rc = wait_event_interruptible_timeout(skdev->waitq,
4553                                               (skdev->gendisk_on),
4554                                               (SKD_START_WAIT_SECONDS * HZ));
4555         if (skdev->gendisk_on > 0) {
4556                 /* device came on-line after reset */
4557                 skd_bdev_attach(&pdev->dev, skdev);
4558                 rc = 0;
4559         } else {
4560                 /* we timed out, something is wrong with the device,
4561                    don't add the disk structure */
4562                 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
4563                         rc);
4564                 /* in case of no error; we timeout with ENXIO */
4565                 if (!rc)
4566                         rc = -ENXIO;
4567                 goto err_out_timer;
4568         }
4569
4570         return rc;
4571
4572 err_out_timer:
4573         skd_stop_device(skdev);
4574         skd_release_irq(skdev);
4575
4576 err_out_iounmap:
4577         for (i = 0; i < SKD_MAX_BARS; i++)
4578                 if (skdev->mem_map[i])
4579                         iounmap(skdev->mem_map[i]);
4580
4581         if (skdev->pcie_error_reporting_is_enabled)
4582                 pci_disable_pcie_error_reporting(pdev);
4583
4584         skd_destruct(skdev);
4585
4586 err_out_regions:
4587         pci_release_regions(pdev);
4588
4589 err_out:
4590         pci_disable_device(pdev);
4591         pci_set_drvdata(pdev, NULL);
4592         return rc;
4593 }
4594
4595 static void skd_pci_remove(struct pci_dev *pdev)
4596 {
4597         int i;
4598         struct skd_device *skdev;
4599
4600         skdev = pci_get_drvdata(pdev);
4601         if (!skdev) {
4602                 dev_err(&pdev->dev, "no device data for PCI\n");
4603                 return;
4604         }
4605         skd_stop_device(skdev);
4606         skd_release_irq(skdev);
4607
4608         for (i = 0; i < SKD_MAX_BARS; i++)
4609                 if (skdev->mem_map[i])
4610                         iounmap(skdev->mem_map[i]);
4611
4612         if (skdev->pcie_error_reporting_is_enabled)
4613                 pci_disable_pcie_error_reporting(pdev);
4614
4615         skd_destruct(skdev);
4616
4617         pci_release_regions(pdev);
4618         pci_disable_device(pdev);
4619         pci_set_drvdata(pdev, NULL);
4620
4621         return;
4622 }
4623
4624 static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4625 {
4626         int i;
4627         struct skd_device *skdev;
4628
4629         skdev = pci_get_drvdata(pdev);
4630         if (!skdev) {
4631                 dev_err(&pdev->dev, "no device data for PCI\n");
4632                 return -EIO;
4633         }
4634
4635         skd_stop_device(skdev);
4636
4637         skd_release_irq(skdev);
4638
4639         for (i = 0; i < SKD_MAX_BARS; i++)
4640                 if (skdev->mem_map[i])
4641                         iounmap(skdev->mem_map[i]);
4642
4643         if (skdev->pcie_error_reporting_is_enabled)
4644                 pci_disable_pcie_error_reporting(pdev);
4645
4646         pci_release_regions(pdev);
4647         pci_save_state(pdev);
4648         pci_disable_device(pdev);
4649         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4650         return 0;
4651 }
4652
4653 static int skd_pci_resume(struct pci_dev *pdev)
4654 {
4655         int i;
4656         int rc = 0;
4657         struct skd_device *skdev;
4658
4659         skdev = pci_get_drvdata(pdev);
4660         if (!skdev) {
4661                 dev_err(&pdev->dev, "no device data for PCI\n");
4662                 return -1;
4663         }
4664
4665         pci_set_power_state(pdev, PCI_D0);
4666         pci_enable_wake(pdev, PCI_D0, 0);
4667         pci_restore_state(pdev);
4668
4669         rc = pci_enable_device(pdev);
4670         if (rc)
4671                 return rc;
4672         rc = pci_request_regions(pdev, DRV_NAME);
4673         if (rc)
4674                 goto err_out;
4675         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4676         if (!rc) {
4677                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4678
4679                         dev_err(&pdev->dev, "consistent DMA mask error %d\n",
4680                                 rc);
4681                 }
4682         } else {
4683                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4684                 if (rc) {
4685
4686                         dev_err(&pdev->dev, "DMA mask error %d\n", rc);
4687                         goto err_out_regions;
4688                 }
4689         }
4690
4691         pci_set_master(pdev);
4692         rc = pci_enable_pcie_error_reporting(pdev);
4693         if (rc) {
4694                 dev_err(&pdev->dev,
4695                         "bad enable of PCIe error reporting rc=%d\n", rc);
4696                 skdev->pcie_error_reporting_is_enabled = 0;
4697         } else
4698                 skdev->pcie_error_reporting_is_enabled = 1;
4699
4700         for (i = 0; i < SKD_MAX_BARS; i++) {
4701
4702                 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4703                 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4704                 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4705                                             skdev->mem_size[i]);
4706                 if (!skdev->mem_map[i]) {
4707                         dev_err(&pdev->dev, "Unable to map adapter memory!\n");
4708                         rc = -ENODEV;
4709                         goto err_out_iounmap;
4710                 }
4711                 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
4712                         skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
4713                         skdev->mem_size[i]);
4714         }
4715         rc = skd_acquire_irq(skdev);
4716         if (rc) {
4717                 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
4718                 goto err_out_iounmap;
4719         }
4720
4721         rc = skd_start_timer(skdev);
4722         if (rc)
4723                 goto err_out_timer;
4724
4725         init_waitqueue_head(&skdev->waitq);
4726
4727         skd_start_device(skdev);
4728
4729         return rc;
4730
4731 err_out_timer:
4732         skd_stop_device(skdev);
4733         skd_release_irq(skdev);
4734
4735 err_out_iounmap:
4736         for (i = 0; i < SKD_MAX_BARS; i++)
4737                 if (skdev->mem_map[i])
4738                         iounmap(skdev->mem_map[i]);
4739
4740         if (skdev->pcie_error_reporting_is_enabled)
4741                 pci_disable_pcie_error_reporting(pdev);
4742
4743 err_out_regions:
4744         pci_release_regions(pdev);
4745
4746 err_out:
4747         pci_disable_device(pdev);
4748         return rc;
4749 }
4750
4751 static void skd_pci_shutdown(struct pci_dev *pdev)
4752 {
4753         struct skd_device *skdev;
4754
4755         dev_err(&pdev->dev, "%s called\n", __func__);
4756
4757         skdev = pci_get_drvdata(pdev);
4758         if (!skdev) {
4759                 dev_err(&pdev->dev, "no device data for PCI\n");
4760                 return;
4761         }
4762
4763         dev_err(&pdev->dev, "calling stop\n");
4764         skd_stop_device(skdev);
4765 }
4766
4767 static struct pci_driver skd_driver = {
4768         .name           = DRV_NAME,
4769         .id_table       = skd_pci_tbl,
4770         .probe          = skd_pci_probe,
4771         .remove         = skd_pci_remove,
4772         .suspend        = skd_pci_suspend,
4773         .resume         = skd_pci_resume,
4774         .shutdown       = skd_pci_shutdown,
4775 };
4776
4777 /*
4778  *****************************************************************************
4779  * LOGGING SUPPORT
4780  *****************************************************************************
4781  */
4782
4783 const char *skd_drive_state_to_str(int state)
4784 {
4785         switch (state) {
4786         case FIT_SR_DRIVE_OFFLINE:
4787                 return "OFFLINE";
4788         case FIT_SR_DRIVE_INIT:
4789                 return "INIT";
4790         case FIT_SR_DRIVE_ONLINE:
4791                 return "ONLINE";
4792         case FIT_SR_DRIVE_BUSY:
4793                 return "BUSY";
4794         case FIT_SR_DRIVE_FAULT:
4795                 return "FAULT";
4796         case FIT_SR_DRIVE_DEGRADED:
4797                 return "DEGRADED";
4798         case FIT_SR_PCIE_LINK_DOWN:
4799                 return "INK_DOWN";
4800         case FIT_SR_DRIVE_SOFT_RESET:
4801                 return "SOFT_RESET";
4802         case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
4803                 return "NEED_FW";
4804         case FIT_SR_DRIVE_INIT_FAULT:
4805                 return "INIT_FAULT";
4806         case FIT_SR_DRIVE_BUSY_SANITIZE:
4807                 return "BUSY_SANITIZE";
4808         case FIT_SR_DRIVE_BUSY_ERASE:
4809                 return "BUSY_ERASE";
4810         case FIT_SR_DRIVE_FW_BOOTING:
4811                 return "FW_BOOTING";
4812         default:
4813                 return "???";
4814         }
4815 }
4816
4817 const char *skd_skdev_state_to_str(enum skd_drvr_state state)
4818 {
4819         switch (state) {
4820         case SKD_DRVR_STATE_LOAD:
4821                 return "LOAD";
4822         case SKD_DRVR_STATE_IDLE:
4823                 return "IDLE";
4824         case SKD_DRVR_STATE_BUSY:
4825                 return "BUSY";
4826         case SKD_DRVR_STATE_STARTING:
4827                 return "STARTING";
4828         case SKD_DRVR_STATE_ONLINE:
4829                 return "ONLINE";
4830         case SKD_DRVR_STATE_PAUSING:
4831                 return "PAUSING";
4832         case SKD_DRVR_STATE_PAUSED:
4833                 return "PAUSED";
4834         case SKD_DRVR_STATE_DRAINING_TIMEOUT:
4835                 return "DRAINING_TIMEOUT";
4836         case SKD_DRVR_STATE_RESTARTING:
4837                 return "RESTARTING";
4838         case SKD_DRVR_STATE_RESUMING:
4839                 return "RESUMING";
4840         case SKD_DRVR_STATE_STOPPING:
4841                 return "STOPPING";
4842         case SKD_DRVR_STATE_SYNCING:
4843                 return "SYNCING";
4844         case SKD_DRVR_STATE_FAULT:
4845                 return "FAULT";
4846         case SKD_DRVR_STATE_DISAPPEARED:
4847                 return "DISAPPEARED";
4848         case SKD_DRVR_STATE_BUSY_ERASE:
4849                 return "BUSY_ERASE";
4850         case SKD_DRVR_STATE_BUSY_SANITIZE:
4851                 return "BUSY_SANITIZE";
4852         case SKD_DRVR_STATE_BUSY_IMMINENT:
4853                 return "BUSY_IMMINENT";
4854         case SKD_DRVR_STATE_WAIT_BOOT:
4855                 return "WAIT_BOOT";
4856
4857         default:
4858                 return "???";
4859         }
4860 }
4861
4862 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
4863 {
4864         switch (state) {
4865         case SKD_MSG_STATE_IDLE:
4866                 return "IDLE";
4867         case SKD_MSG_STATE_BUSY:
4868                 return "BUSY";
4869         default:
4870                 return "???";
4871         }
4872 }
4873
4874 static const char *skd_skreq_state_to_str(enum skd_req_state state)
4875 {
4876         switch (state) {
4877         case SKD_REQ_STATE_IDLE:
4878                 return "IDLE";
4879         case SKD_REQ_STATE_SETUP:
4880                 return "SETUP";
4881         case SKD_REQ_STATE_BUSY:
4882                 return "BUSY";
4883         case SKD_REQ_STATE_COMPLETED:
4884                 return "COMPLETED";
4885         case SKD_REQ_STATE_TIMEOUT:
4886                 return "TIMEOUT";
4887         case SKD_REQ_STATE_ABORTED:
4888                 return "ABORTED";
4889         default:
4890                 return "???";
4891         }
4892 }
4893
4894 static void skd_log_skdev(struct skd_device *skdev, const char *event)
4895 {
4896         dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
4897         dev_dbg(&skdev->pdev->dev, "  drive_state=%s(%d) driver_state=%s(%d)\n",
4898                 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
4899                 skd_skdev_state_to_str(skdev->state), skdev->state);
4900         dev_dbg(&skdev->pdev->dev, "  busy=%d limit=%d dev=%d lowat=%d\n",
4901                 skdev->in_flight, skdev->cur_max_queue_depth,
4902                 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
4903         dev_dbg(&skdev->pdev->dev, "  timestamp=0x%x cycle=%d cycle_ix=%d\n",
4904                 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
4905 }
4906
4907 static void skd_log_skmsg(struct skd_device *skdev,
4908                           struct skd_fitmsg_context *skmsg, const char *event)
4909 {
4910         dev_dbg(&skdev->pdev->dev, "skmsg=%p event='%s'\n", skmsg, event);
4911         dev_dbg(&skdev->pdev->dev, "  state=%s(%d) id=0x%04x length=%d\n",
4912                 skd_skmsg_state_to_str(skmsg->state), skmsg->state, skmsg->id,
4913                 skmsg->length);
4914 }
4915
4916 static void skd_log_skreq(struct skd_device *skdev,
4917                           struct skd_request_context *skreq, const char *event)
4918 {
4919         dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
4920         dev_dbg(&skdev->pdev->dev, "  state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
4921                 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
4922                 skreq->fitmsg_id);
4923         dev_dbg(&skdev->pdev->dev, "  timo=0x%x sg_dir=%d n_sg=%d\n",
4924                 skreq->timeout_stamp, skreq->data_dir, skreq->n_sg);
4925
4926         if (skreq->req != NULL) {
4927                 struct request *req = skreq->req;
4928                 u32 lba = (u32)blk_rq_pos(req);
4929                 u32 count = blk_rq_sectors(req);
4930
4931                 dev_dbg(&skdev->pdev->dev,
4932                         "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req,
4933                         lba, lba, count, count, (int)rq_data_dir(req));
4934         } else
4935                 dev_dbg(&skdev->pdev->dev, "req=NULL\n");
4936 }
4937
4938 /*
4939  *****************************************************************************
4940  * MODULE GLUE
4941  *****************************************************************************
4942  */
4943
4944 static int __init skd_init(void)
4945 {
4946         BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
4947         BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
4948         BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
4949         BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
4950         BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
4951         BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
4952         BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
4953         BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
4954
4955         pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
4956
4957         switch (skd_isr_type) {
4958         case SKD_IRQ_LEGACY:
4959         case SKD_IRQ_MSI:
4960         case SKD_IRQ_MSIX:
4961                 break;
4962         default:
4963                 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
4964                        skd_isr_type, SKD_IRQ_DEFAULT);
4965                 skd_isr_type = SKD_IRQ_DEFAULT;
4966         }
4967
4968         if (skd_max_queue_depth < 1 ||
4969             skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
4970                 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
4971                        skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
4972                 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
4973         }
4974
4975         if (skd_max_req_per_msg < 1 ||
4976             skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
4977                 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
4978                        skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
4979                 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
4980         }
4981
4982         if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
4983                 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
4984                        skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
4985                 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
4986         }
4987
4988         if (skd_dbg_level < 0 || skd_dbg_level > 2) {
4989                 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
4990                        skd_dbg_level, 0);
4991                 skd_dbg_level = 0;
4992         }
4993
4994         if (skd_isr_comp_limit < 0) {
4995                 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
4996                        skd_isr_comp_limit, 0);
4997                 skd_isr_comp_limit = 0;
4998         }
4999
5000         if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
5001                 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
5002                        skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5003                 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5004         }
5005
5006         return pci_register_driver(&skd_driver);
5007 }
5008
5009 static void __exit skd_exit(void)
5010 {
5011         pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5012
5013         pci_unregister_driver(&skd_driver);
5014
5015         if (skd_major)
5016                 unregister_blkdev(skd_major, DRV_NAME);
5017 }
5018
5019 module_init(skd_init);
5020 module_exit(skd_exit);