treewide: Use fallthrough pseudo-keyword
[linux-block.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @sp: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24         uint16_t cflags;
25         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26         struct scsi_qla_host *vha = sp->vha;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34                 vha->qla_stats.output_requests++;
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38                 vha->qla_stats.input_requests++;
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment descriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment descriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @vha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @vha: HA context
118  * @req: request queue
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         /* Adjust ring index. */
128         req->ring_index++;
129         if (req->ring_index == req->length) {
130                 req->ring_index = 0;
131                 req->ring_ptr = req->ring;
132         } else {
133                 req->ring_ptr++;
134         }
135
136         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137
138         /* Load packet defaults. */
139         put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
140                            CONTINUE_A64_TYPE, &cont_pkt->entry_type);
141
142         return (cont_pkt);
143 }
144
145 inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149         uint8_t guard = scsi_host_get_guard(cmd->device->host);
150
151         /* We always use DIFF Bundling for best performance */
152         *fw_prot_opts = 0;
153
154         /* Translate SCSI opcode to a protection opcode */
155         switch (scsi_get_prot_op(cmd)) {
156         case SCSI_PROT_READ_STRIP:
157                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158                 break;
159         case SCSI_PROT_WRITE_INSERT:
160                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161                 break;
162         case SCSI_PROT_READ_INSERT:
163                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164                 break;
165         case SCSI_PROT_WRITE_STRIP:
166                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167                 break;
168         case SCSI_PROT_READ_PASS:
169         case SCSI_PROT_WRITE_PASS:
170                 if (guard & SHOST_DIX_GUARD_IP)
171                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172                 else
173                         *fw_prot_opts |= PO_MODE_DIF_PASS;
174                 break;
175         default:        /* Normal Request */
176                 *fw_prot_opts |= PO_MODE_DIF_PASS;
177                 break;
178         }
179
180         return scsi_prot_sg_count(cmd);
181 }
182
183 /*
184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185  * capable IOCB types.
186  *
187  * @sp: SRB command to process
188  * @cmd_pkt: Command type 2 IOCB
189  * @tot_dsds: Total number of segments to transfer
190  */
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192     uint16_t tot_dsds)
193 {
194         uint16_t        avail_dsds;
195         struct dsd32    *cur_dsd;
196         scsi_qla_host_t *vha;
197         struct scsi_cmnd *cmd;
198         struct scatterlist *sg;
199         int i;
200
201         cmd = GET_CMD_SP(sp);
202
203         /* Update entry type to indicate Command Type 2 IOCB */
204         put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
205
206         /* No data transfer */
207         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
208                 cmd_pkt->byte_count = cpu_to_le32(0);
209                 return;
210         }
211
212         vha = sp->vha;
213         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
214
215         /* Three DSDs are available in the Command Type 2 IOCB */
216         avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
217         cur_dsd = cmd_pkt->dsd32;
218
219         /* Load data segments */
220         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221                 cont_entry_t *cont_pkt;
222
223                 /* Allocate additional continuation packets? */
224                 if (avail_dsds == 0) {
225                         /*
226                          * Seven DSDs are available in the Continuation
227                          * Type 0 IOCB.
228                          */
229                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
230                         cur_dsd = cont_pkt->dsd;
231                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
232                 }
233
234                 append_dsd32(&cur_dsd, sg);
235                 avail_dsds--;
236         }
237 }
238
239 /**
240  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
241  * capable IOCB types.
242  *
243  * @sp: SRB command to process
244  * @cmd_pkt: Command type 3 IOCB
245  * @tot_dsds: Total number of segments to transfer
246  */
247 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
248     uint16_t tot_dsds)
249 {
250         uint16_t        avail_dsds;
251         struct dsd64    *cur_dsd;
252         scsi_qla_host_t *vha;
253         struct scsi_cmnd *cmd;
254         struct scatterlist *sg;
255         int i;
256
257         cmd = GET_CMD_SP(sp);
258
259         /* Update entry type to indicate Command Type 3 IOCB */
260         put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
261
262         /* No data transfer */
263         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
264                 cmd_pkt->byte_count = cpu_to_le32(0);
265                 return;
266         }
267
268         vha = sp->vha;
269         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
270
271         /* Two DSDs are available in the Command Type 3 IOCB */
272         avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
273         cur_dsd = cmd_pkt->dsd64;
274
275         /* Load data segments */
276         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
277                 cont_a64_entry_t *cont_pkt;
278
279                 /* Allocate additional continuation packets? */
280                 if (avail_dsds == 0) {
281                         /*
282                          * Five DSDs are available in the Continuation
283                          * Type 1 IOCB.
284                          */
285                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
286                         cur_dsd = cont_pkt->dsd;
287                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
288                 }
289
290                 append_dsd64(&cur_dsd, sg);
291                 avail_dsds--;
292         }
293 }
294
295 /*
296  * Find the first handle that is not in use, starting from
297  * req->current_outstanding_cmd + 1. The caller must hold the lock that is
298  * associated with @req.
299  */
300 uint32_t qla2xxx_get_next_handle(struct req_que *req)
301 {
302         uint32_t index, handle = req->current_outstanding_cmd;
303
304         for (index = 1; index < req->num_outstanding_cmds; index++) {
305                 handle++;
306                 if (handle == req->num_outstanding_cmds)
307                         handle = 1;
308                 if (!req->outstanding_cmds[handle])
309                         return handle;
310         }
311
312         return 0;
313 }
314
315 /**
316  * qla2x00_start_scsi() - Send a SCSI command to the ISP
317  * @sp: command to send to the ISP
318  *
319  * Returns non-zero if a failure occurred, else zero.
320  */
321 int
322 qla2x00_start_scsi(srb_t *sp)
323 {
324         int             nseg;
325         unsigned long   flags;
326         scsi_qla_host_t *vha;
327         struct scsi_cmnd *cmd;
328         uint32_t        *clr_ptr;
329         uint32_t        handle;
330         cmd_entry_t     *cmd_pkt;
331         uint16_t        cnt;
332         uint16_t        req_cnt;
333         uint16_t        tot_dsds;
334         struct device_reg_2xxx __iomem *reg;
335         struct qla_hw_data *ha;
336         struct req_que *req;
337         struct rsp_que *rsp;
338
339         /* Setup device pointers. */
340         vha = sp->vha;
341         ha = vha->hw;
342         reg = &ha->iobase->isp;
343         cmd = GET_CMD_SP(sp);
344         req = ha->req_q_map[0];
345         rsp = ha->rsp_q_map[0];
346         /* So we know we haven't pci_map'ed anything yet */
347         tot_dsds = 0;
348
349         /* Send marker if required */
350         if (vha->marker_needed != 0) {
351                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
352                     QLA_SUCCESS) {
353                         return (QLA_FUNCTION_FAILED);
354                 }
355                 vha->marker_needed = 0;
356         }
357
358         /* Acquire ring specific lock */
359         spin_lock_irqsave(&ha->hardware_lock, flags);
360
361         handle = qla2xxx_get_next_handle(req);
362         if (handle == 0)
363                 goto queuing_error;
364
365         /* Map the sg table so we have an accurate count of sg entries needed */
366         if (scsi_sg_count(cmd)) {
367                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368                     scsi_sg_count(cmd), cmd->sc_data_direction);
369                 if (unlikely(!nseg))
370                         goto queuing_error;
371         } else
372                 nseg = 0;
373
374         tot_dsds = nseg;
375
376         /* Calculate the number of request entries needed. */
377         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
378         if (req->cnt < (req_cnt + 2)) {
379                 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
380                 if (req->ring_index < cnt)
381                         req->cnt = cnt - req->ring_index;
382                 else
383                         req->cnt = req->length -
384                             (req->ring_index - cnt);
385                 /* If still no head room then bail out */
386                 if (req->cnt < (req_cnt + 2))
387                         goto queuing_error;
388         }
389
390         /* Build command packet */
391         req->current_outstanding_cmd = handle;
392         req->outstanding_cmds[handle] = sp;
393         sp->handle = handle;
394         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395         req->cnt -= req_cnt;
396
397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398         cmd_pkt->handle = handle;
399         /* Zero out remaining portion of packet. */
400         clr_ptr = (uint32_t *)cmd_pkt + 2;
401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404         /* Set target ID and LUN number*/
405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407         cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
408
409         /* Load SCSI command packet. */
410         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
411         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
412
413         /* Build IOCB segments */
414         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
415
416         /* Set total data segment count. */
417         cmd_pkt->entry_count = (uint8_t)req_cnt;
418         wmb();
419
420         /* Adjust ring index. */
421         req->ring_index++;
422         if (req->ring_index == req->length) {
423                 req->ring_index = 0;
424                 req->ring_ptr = req->ring;
425         } else
426                 req->ring_ptr++;
427
428         sp->flags |= SRB_DMA_VALID;
429
430         /* Set chip new ring index. */
431         wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
432         rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
433
434         /* Manage unprocessed RIO/ZIO commands in response queue. */
435         if (vha->flags.process_response_queue &&
436             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
437                 qla2x00_process_response_queue(rsp);
438
439         spin_unlock_irqrestore(&ha->hardware_lock, flags);
440         return (QLA_SUCCESS);
441
442 queuing_error:
443         if (tot_dsds)
444                 scsi_dma_unmap(cmd);
445
446         spin_unlock_irqrestore(&ha->hardware_lock, flags);
447
448         return (QLA_FUNCTION_FAILED);
449 }
450
451 /**
452  * qla2x00_start_iocbs() - Execute the IOCB command
453  * @vha: HA context
454  * @req: request queue
455  */
456 void
457 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
458 {
459         struct qla_hw_data *ha = vha->hw;
460         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
461
462         if (IS_P3P_TYPE(ha)) {
463                 qla82xx_start_iocbs(vha);
464         } else {
465                 /* Adjust ring index. */
466                 req->ring_index++;
467                 if (req->ring_index == req->length) {
468                         req->ring_index = 0;
469                         req->ring_ptr = req->ring;
470                 } else
471                         req->ring_ptr++;
472
473                 /* Set chip new ring index. */
474                 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
475                         wrt_reg_dword(req->req_q_in, req->ring_index);
476                 } else if (IS_QLA83XX(ha)) {
477                         wrt_reg_dword(req->req_q_in, req->ring_index);
478                         rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
479                 } else if (IS_QLAFX00(ha)) {
480                         wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
481                         rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
482                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
483                 } else if (IS_FWI2_CAPABLE(ha)) {
484                         wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
485                         rd_reg_dword_relaxed(&reg->isp24.req_q_in);
486                 } else {
487                         wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
488                                 req->ring_index);
489                         rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
490                 }
491         }
492 }
493
494 /**
495  * qla2x00_marker() - Send a marker IOCB to the firmware.
496  * @vha: HA context
497  * @qpair: queue pair pointer
498  * @loop_id: loop ID
499  * @lun: LUN
500  * @type: marker modifier
501  *
502  * Can be called from both normal and interrupt context.
503  *
504  * Returns non-zero if a failure occurred, else zero.
505  */
506 static int
507 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
508     uint16_t loop_id, uint64_t lun, uint8_t type)
509 {
510         mrk_entry_t *mrk;
511         struct mrk_entry_24xx *mrk24 = NULL;
512         struct req_que *req = qpair->req;
513         struct qla_hw_data *ha = vha->hw;
514         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
515
516         mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
517         if (mrk == NULL) {
518                 ql_log(ql_log_warn, base_vha, 0x3026,
519                     "Failed to allocate Marker IOCB.\n");
520
521                 return (QLA_FUNCTION_FAILED);
522         }
523
524         mrk->entry_type = MARKER_TYPE;
525         mrk->modifier = type;
526         if (type != MK_SYNC_ALL) {
527                 if (IS_FWI2_CAPABLE(ha)) {
528                         mrk24 = (struct mrk_entry_24xx *) mrk;
529                         mrk24->nport_handle = cpu_to_le16(loop_id);
530                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
531                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
532                         mrk24->vp_index = vha->vp_idx;
533                         mrk24->handle = make_handle(req->id, mrk24->handle);
534                 } else {
535                         SET_TARGET_ID(ha, mrk->target, loop_id);
536                         mrk->lun = cpu_to_le16((uint16_t)lun);
537                 }
538         }
539         wmb();
540
541         qla2x00_start_iocbs(vha, req);
542
543         return (QLA_SUCCESS);
544 }
545
546 int
547 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
548     uint16_t loop_id, uint64_t lun, uint8_t type)
549 {
550         int ret;
551         unsigned long flags = 0;
552
553         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
554         ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
555         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
556
557         return (ret);
558 }
559
560 /*
561  * qla2x00_issue_marker
562  *
563  * Issue marker
564  * Caller CAN have hardware lock held as specified by ha_locked parameter.
565  * Might release it, then reaquire.
566  */
567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
568 {
569         if (ha_locked) {
570                 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
571                                         MK_SYNC_ALL) != QLA_SUCCESS)
572                         return QLA_FUNCTION_FAILED;
573         } else {
574                 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
575                                         MK_SYNC_ALL) != QLA_SUCCESS)
576                         return QLA_FUNCTION_FAILED;
577         }
578         vha->marker_needed = 0;
579
580         return QLA_SUCCESS;
581 }
582
583 static inline int
584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
585         uint16_t tot_dsds)
586 {
587         struct dsd64 *cur_dsd = NULL, *next_dsd;
588         scsi_qla_host_t *vha;
589         struct qla_hw_data *ha;
590         struct scsi_cmnd *cmd;
591         struct  scatterlist *cur_seg;
592         uint8_t avail_dsds;
593         uint8_t first_iocb = 1;
594         uint32_t dsd_list_len;
595         struct dsd_dma *dsd_ptr;
596         struct ct6_dsd *ctx;
597
598         cmd = GET_CMD_SP(sp);
599
600         /* Update entry type to indicate Command Type 3 IOCB */
601         put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
602
603         /* No data transfer */
604         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
605                 cmd_pkt->byte_count = cpu_to_le32(0);
606                 return 0;
607         }
608
609         vha = sp->vha;
610         ha = vha->hw;
611
612         /* Set transfer direction */
613         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
614                 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
615                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
616                 vha->qla_stats.output_requests++;
617         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
618                 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
619                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
620                 vha->qla_stats.input_requests++;
621         }
622
623         cur_seg = scsi_sglist(cmd);
624         ctx = sp->u.scmd.ct6_ctx;
625
626         while (tot_dsds) {
627                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
628                     QLA_DSDS_PER_IOCB : tot_dsds;
629                 tot_dsds -= avail_dsds;
630                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
631
632                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
633                     struct dsd_dma, list);
634                 next_dsd = dsd_ptr->dsd_addr;
635                 list_del(&dsd_ptr->list);
636                 ha->gbl_dsd_avail--;
637                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
638                 ctx->dsd_use_cnt++;
639                 ha->gbl_dsd_inuse++;
640
641                 if (first_iocb) {
642                         first_iocb = 0;
643                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
644                                            &cmd_pkt->fcp_dsd.address);
645                         cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
646                 } else {
647                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
648                                            &cur_dsd->address);
649                         cur_dsd->length = cpu_to_le32(dsd_list_len);
650                         cur_dsd++;
651                 }
652                 cur_dsd = next_dsd;
653                 while (avail_dsds) {
654                         append_dsd64(&cur_dsd, cur_seg);
655                         cur_seg = sg_next(cur_seg);
656                         avail_dsds--;
657                 }
658         }
659
660         /* Null termination */
661         cur_dsd->address = 0;
662         cur_dsd->length = 0;
663         cur_dsd++;
664         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
665         return 0;
666 }
667
668 /*
669  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
670  * for Command Type 6.
671  *
672  * @dsds: number of data segment descriptors needed
673  *
674  * Returns the number of dsd list needed to store @dsds.
675  */
676 static inline uint16_t
677 qla24xx_calc_dsd_lists(uint16_t dsds)
678 {
679         uint16_t dsd_lists = 0;
680
681         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
682         if (dsds % QLA_DSDS_PER_IOCB)
683                 dsd_lists++;
684         return dsd_lists;
685 }
686
687
688 /**
689  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
690  * IOCB types.
691  *
692  * @sp: SRB command to process
693  * @cmd_pkt: Command type 3 IOCB
694  * @tot_dsds: Total number of segments to transfer
695  * @req: pointer to request queue
696  */
697 inline void
698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
699         uint16_t tot_dsds, struct req_que *req)
700 {
701         uint16_t        avail_dsds;
702         struct dsd64    *cur_dsd;
703         scsi_qla_host_t *vha;
704         struct scsi_cmnd *cmd;
705         struct scatterlist *sg;
706         int i;
707
708         cmd = GET_CMD_SP(sp);
709
710         /* Update entry type to indicate Command Type 3 IOCB */
711         put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
712
713         /* No data transfer */
714         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
715                 cmd_pkt->byte_count = cpu_to_le32(0);
716                 return;
717         }
718
719         vha = sp->vha;
720
721         /* Set transfer direction */
722         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
723                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
724                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
725                 vha->qla_stats.output_requests++;
726         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
727                 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
728                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
729                 vha->qla_stats.input_requests++;
730         }
731
732         /* One DSD is available in the Command Type 3 IOCB */
733         avail_dsds = 1;
734         cur_dsd = &cmd_pkt->dsd;
735
736         /* Load data segments */
737
738         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
739                 cont_a64_entry_t *cont_pkt;
740
741                 /* Allocate additional continuation packets? */
742                 if (avail_dsds == 0) {
743                         /*
744                          * Five DSDs are available in the Continuation
745                          * Type 1 IOCB.
746                          */
747                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
748                         cur_dsd = cont_pkt->dsd;
749                         avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
750                 }
751
752                 append_dsd64(&cur_dsd, sg);
753                 avail_dsds--;
754         }
755 }
756
757 struct fw_dif_context {
758         __le32  ref_tag;
759         __le16  app_tag;
760         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
761         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
762 };
763
764 /*
765  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
766  *
767  */
768 static inline void
769 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
770     unsigned int protcnt)
771 {
772         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
773
774         switch (scsi_get_prot_type(cmd)) {
775         case SCSI_PROT_DIF_TYPE0:
776                 /*
777                  * No check for ql2xenablehba_err_chk, as it would be an
778                  * I/O error if hba tag generation is not done.
779                  */
780                 pkt->ref_tag = cpu_to_le32((uint32_t)
781                     (0xffffffff & scsi_get_lba(cmd)));
782
783                 if (!qla2x00_hba_err_chk_enabled(sp))
784                         break;
785
786                 pkt->ref_tag_mask[0] = 0xff;
787                 pkt->ref_tag_mask[1] = 0xff;
788                 pkt->ref_tag_mask[2] = 0xff;
789                 pkt->ref_tag_mask[3] = 0xff;
790                 break;
791
792         /*
793          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
794          * match LBA in CDB + N
795          */
796         case SCSI_PROT_DIF_TYPE2:
797                 pkt->app_tag = cpu_to_le16(0);
798                 pkt->app_tag_mask[0] = 0x0;
799                 pkt->app_tag_mask[1] = 0x0;
800
801                 pkt->ref_tag = cpu_to_le32((uint32_t)
802                     (0xffffffff & scsi_get_lba(cmd)));
803
804                 if (!qla2x00_hba_err_chk_enabled(sp))
805                         break;
806
807                 /* enable ALL bytes of the ref tag */
808                 pkt->ref_tag_mask[0] = 0xff;
809                 pkt->ref_tag_mask[1] = 0xff;
810                 pkt->ref_tag_mask[2] = 0xff;
811                 pkt->ref_tag_mask[3] = 0xff;
812                 break;
813
814         /* For Type 3 protection: 16 bit GUARD only */
815         case SCSI_PROT_DIF_TYPE3:
816                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
817                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
818                                                                 0x00;
819                 break;
820
821         /*
822          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
823          * 16 bit app tag.
824          */
825         case SCSI_PROT_DIF_TYPE1:
826                 pkt->ref_tag = cpu_to_le32((uint32_t)
827                     (0xffffffff & scsi_get_lba(cmd)));
828                 pkt->app_tag = cpu_to_le16(0);
829                 pkt->app_tag_mask[0] = 0x0;
830                 pkt->app_tag_mask[1] = 0x0;
831
832                 if (!qla2x00_hba_err_chk_enabled(sp))
833                         break;
834
835                 /* enable ALL bytes of the ref tag */
836                 pkt->ref_tag_mask[0] = 0xff;
837                 pkt->ref_tag_mask[1] = 0xff;
838                 pkt->ref_tag_mask[2] = 0xff;
839                 pkt->ref_tag_mask[3] = 0xff;
840                 break;
841         }
842 }
843
844 int
845 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
846         uint32_t *partial)
847 {
848         struct scatterlist *sg;
849         uint32_t cumulative_partial, sg_len;
850         dma_addr_t sg_dma_addr;
851
852         if (sgx->num_bytes == sgx->tot_bytes)
853                 return 0;
854
855         sg = sgx->cur_sg;
856         cumulative_partial = sgx->tot_partial;
857
858         sg_dma_addr = sg_dma_address(sg);
859         sg_len = sg_dma_len(sg);
860
861         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
862
863         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
864                 sgx->dma_len = (blk_sz - cumulative_partial);
865                 sgx->tot_partial = 0;
866                 sgx->num_bytes += blk_sz;
867                 *partial = 0;
868         } else {
869                 sgx->dma_len = sg_len - sgx->bytes_consumed;
870                 sgx->tot_partial += sgx->dma_len;
871                 *partial = 1;
872         }
873
874         sgx->bytes_consumed += sgx->dma_len;
875
876         if (sg_len == sgx->bytes_consumed) {
877                 sg = sg_next(sg);
878                 sgx->num_sg++;
879                 sgx->cur_sg = sg;
880                 sgx->bytes_consumed = 0;
881         }
882
883         return 1;
884 }
885
886 int
887 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
888         struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
889 {
890         void *next_dsd;
891         uint8_t avail_dsds = 0;
892         uint32_t dsd_list_len;
893         struct dsd_dma *dsd_ptr;
894         struct scatterlist *sg_prot;
895         struct dsd64 *cur_dsd = dsd;
896         uint16_t        used_dsds = tot_dsds;
897         uint32_t        prot_int; /* protection interval */
898         uint32_t        partial;
899         struct qla2_sgx sgx;
900         dma_addr_t      sle_dma;
901         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
902         struct scsi_cmnd *cmd;
903
904         memset(&sgx, 0, sizeof(struct qla2_sgx));
905         if (sp) {
906                 cmd = GET_CMD_SP(sp);
907                 prot_int = cmd->device->sector_size;
908
909                 sgx.tot_bytes = scsi_bufflen(cmd);
910                 sgx.cur_sg = scsi_sglist(cmd);
911                 sgx.sp = sp;
912
913                 sg_prot = scsi_prot_sglist(cmd);
914         } else if (tc) {
915                 prot_int      = tc->blk_sz;
916                 sgx.tot_bytes = tc->bufflen;
917                 sgx.cur_sg    = tc->sg;
918                 sg_prot       = tc->prot_sg;
919         } else {
920                 BUG();
921                 return 1;
922         }
923
924         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
925
926                 sle_dma = sgx.dma_addr;
927                 sle_dma_len = sgx.dma_len;
928 alloc_and_fill:
929                 /* Allocate additional continuation packets? */
930                 if (avail_dsds == 0) {
931                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
932                                         QLA_DSDS_PER_IOCB : used_dsds;
933                         dsd_list_len = (avail_dsds + 1) * 12;
934                         used_dsds -= avail_dsds;
935
936                         /* allocate tracking DS */
937                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
938                         if (!dsd_ptr)
939                                 return 1;
940
941                         /* allocate new list */
942                         dsd_ptr->dsd_addr = next_dsd =
943                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
944                                 &dsd_ptr->dsd_list_dma);
945
946                         if (!next_dsd) {
947                                 /*
948                                  * Need to cleanup only this dsd_ptr, rest
949                                  * will be done by sp_free_dma()
950                                  */
951                                 kfree(dsd_ptr);
952                                 return 1;
953                         }
954
955                         if (sp) {
956                                 list_add_tail(&dsd_ptr->list,
957                                               &sp->u.scmd.crc_ctx->dsd_list);
958
959                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
960                         } else {
961                                 list_add_tail(&dsd_ptr->list,
962                                     &(tc->ctx->dsd_list));
963                                 *tc->ctx_dsd_alloced = 1;
964                         }
965
966
967                         /* add new list to cmd iocb or last list */
968                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
969                                            &cur_dsd->address);
970                         cur_dsd->length = cpu_to_le32(dsd_list_len);
971                         cur_dsd = next_dsd;
972                 }
973                 put_unaligned_le64(sle_dma, &cur_dsd->address);
974                 cur_dsd->length = cpu_to_le32(sle_dma_len);
975                 cur_dsd++;
976                 avail_dsds--;
977
978                 if (partial == 0) {
979                         /* Got a full protection interval */
980                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
981                         sle_dma_len = 8;
982
983                         tot_prot_dma_len += sle_dma_len;
984                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
985                                 tot_prot_dma_len = 0;
986                                 sg_prot = sg_next(sg_prot);
987                         }
988
989                         partial = 1; /* So as to not re-enter this block */
990                         goto alloc_and_fill;
991                 }
992         }
993         /* Null termination */
994         cur_dsd->address = 0;
995         cur_dsd->length = 0;
996         cur_dsd++;
997         return 0;
998 }
999
1000 int
1001 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
1002         struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1003 {
1004         void *next_dsd;
1005         uint8_t avail_dsds = 0;
1006         uint32_t dsd_list_len;
1007         struct dsd_dma *dsd_ptr;
1008         struct scatterlist *sg, *sgl;
1009         struct dsd64 *cur_dsd = dsd;
1010         int     i;
1011         uint16_t        used_dsds = tot_dsds;
1012         struct scsi_cmnd *cmd;
1013
1014         if (sp) {
1015                 cmd = GET_CMD_SP(sp);
1016                 sgl = scsi_sglist(cmd);
1017         } else if (tc) {
1018                 sgl = tc->sg;
1019         } else {
1020                 BUG();
1021                 return 1;
1022         }
1023
1024
1025         for_each_sg(sgl, sg, tot_dsds, i) {
1026                 /* Allocate additional continuation packets? */
1027                 if (avail_dsds == 0) {
1028                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1029                                         QLA_DSDS_PER_IOCB : used_dsds;
1030                         dsd_list_len = (avail_dsds + 1) * 12;
1031                         used_dsds -= avail_dsds;
1032
1033                         /* allocate tracking DS */
1034                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1035                         if (!dsd_ptr)
1036                                 return 1;
1037
1038                         /* allocate new list */
1039                         dsd_ptr->dsd_addr = next_dsd =
1040                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1041                                 &dsd_ptr->dsd_list_dma);
1042
1043                         if (!next_dsd) {
1044                                 /*
1045                                  * Need to cleanup only this dsd_ptr, rest
1046                                  * will be done by sp_free_dma()
1047                                  */
1048                                 kfree(dsd_ptr);
1049                                 return 1;
1050                         }
1051
1052                         if (sp) {
1053                                 list_add_tail(&dsd_ptr->list,
1054                                               &sp->u.scmd.crc_ctx->dsd_list);
1055
1056                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1057                         } else {
1058                                 list_add_tail(&dsd_ptr->list,
1059                                     &(tc->ctx->dsd_list));
1060                                 *tc->ctx_dsd_alloced = 1;
1061                         }
1062
1063                         /* add new list to cmd iocb or last list */
1064                         put_unaligned_le64(dsd_ptr->dsd_list_dma,
1065                                            &cur_dsd->address);
1066                         cur_dsd->length = cpu_to_le32(dsd_list_len);
1067                         cur_dsd = next_dsd;
1068                 }
1069                 append_dsd64(&cur_dsd, sg);
1070                 avail_dsds--;
1071
1072         }
1073         /* Null termination */
1074         cur_dsd->address = 0;
1075         cur_dsd->length = 0;
1076         cur_dsd++;
1077         return 0;
1078 }
1079
1080 int
1081 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1082         struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1083 {
1084         struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1085         struct scatterlist *sg, *sgl;
1086         struct crc_context *difctx = NULL;
1087         struct scsi_qla_host *vha;
1088         uint dsd_list_len;
1089         uint avail_dsds = 0;
1090         uint used_dsds = tot_dsds;
1091         bool dif_local_dma_alloc = false;
1092         bool direction_to_device = false;
1093         int i;
1094
1095         if (sp) {
1096                 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1097
1098                 sgl = scsi_prot_sglist(cmd);
1099                 vha = sp->vha;
1100                 difctx = sp->u.scmd.crc_ctx;
1101                 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1102                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1103                   "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1104                         __func__, cmd, difctx, sp);
1105         } else if (tc) {
1106                 vha = tc->vha;
1107                 sgl = tc->prot_sg;
1108                 difctx = tc->ctx;
1109                 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1110         } else {
1111                 BUG();
1112                 return 1;
1113         }
1114
1115         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1116             "%s: enter (write=%u)\n", __func__, direction_to_device);
1117
1118         /* if initiator doing write or target doing read */
1119         if (direction_to_device) {
1120                 for_each_sg(sgl, sg, tot_dsds, i) {
1121                         u64 sle_phys = sg_phys(sg);
1122
1123                         /* If SGE addr + len flips bits in upper 32-bits */
1124                         if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1125                                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1126                                     "%s: page boundary crossing (phys=%llx len=%x)\n",
1127                                     __func__, sle_phys, sg->length);
1128
1129                                 if (difctx) {
1130                                         ha->dif_bundle_crossed_pages++;
1131                                         dif_local_dma_alloc = true;
1132                                 } else {
1133                                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1134                                             vha, 0xe022,
1135                                             "%s: difctx pointer is NULL\n",
1136                                             __func__);
1137                                 }
1138                                 break;
1139                         }
1140                 }
1141                 ha->dif_bundle_writes++;
1142         } else {
1143                 ha->dif_bundle_reads++;
1144         }
1145
1146         if (ql2xdifbundlinginternalbuffers)
1147                 dif_local_dma_alloc = direction_to_device;
1148
1149         if (dif_local_dma_alloc) {
1150                 u32 track_difbundl_buf = 0;
1151                 u32 ldma_sg_len = 0;
1152                 u8 ldma_needed = 1;
1153
1154                 difctx->no_dif_bundl = 0;
1155                 difctx->dif_bundl_len = 0;
1156
1157                 /* Track DSD buffers */
1158                 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1159                 /* Track local DMA buffers */
1160                 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1161
1162                 for_each_sg(sgl, sg, tot_dsds, i) {
1163                         u32 sglen = sg_dma_len(sg);
1164
1165                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1166                             "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1167                             __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1168                             difctx->dif_bundl_len, ldma_needed);
1169
1170                         while (sglen) {
1171                                 u32 xfrlen = 0;
1172
1173                                 if (ldma_needed) {
1174                                         /*
1175                                          * Allocate list item to store
1176                                          * the DMA buffers
1177                                          */
1178                                         dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1179                                             GFP_ATOMIC);
1180                                         if (!dsd_ptr) {
1181                                                 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1182                                                     "%s: failed alloc dsd_ptr\n",
1183                                                     __func__);
1184                                                 return 1;
1185                                         }
1186                                         ha->dif_bundle_kallocs++;
1187
1188                                         /* allocate dma buffer */
1189                                         dsd_ptr->dsd_addr = dma_pool_alloc
1190                                                 (ha->dif_bundl_pool, GFP_ATOMIC,
1191                                                  &dsd_ptr->dsd_list_dma);
1192                                         if (!dsd_ptr->dsd_addr) {
1193                                                 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1194                                                     "%s: failed alloc ->dsd_ptr\n",
1195                                                     __func__);
1196                                                 /*
1197                                                  * need to cleanup only this
1198                                                  * dsd_ptr rest will be done
1199                                                  * by sp_free_dma()
1200                                                  */
1201                                                 kfree(dsd_ptr);
1202                                                 ha->dif_bundle_kallocs--;
1203                                                 return 1;
1204                                         }
1205                                         ha->dif_bundle_dma_allocs++;
1206                                         ldma_needed = 0;
1207                                         difctx->no_dif_bundl++;
1208                                         list_add_tail(&dsd_ptr->list,
1209                                             &difctx->ldif_dma_hndl_list);
1210                                 }
1211
1212                                 /* xfrlen is min of dma pool size and sglen */
1213                                 xfrlen = (sglen >
1214                                    (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1215                                     DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1216                                     sglen;
1217
1218                                 /* replace with local allocated dma buffer */
1219                                 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1220                                     dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1221                                     difctx->dif_bundl_len);
1222                                 difctx->dif_bundl_len += xfrlen;
1223                                 sglen -= xfrlen;
1224                                 ldma_sg_len += xfrlen;
1225                                 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1226                                     sg_is_last(sg)) {
1227                                         ldma_needed = 1;
1228                                         ldma_sg_len = 0;
1229                                 }
1230                         }
1231                 }
1232
1233                 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1234                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1235                     "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1236                     difctx->dif_bundl_len, difctx->no_dif_bundl,
1237                     track_difbundl_buf);
1238
1239                 if (sp)
1240                         sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1241                 else
1242                         tc->prot_flags = DIF_BUNDL_DMA_VALID;
1243
1244                 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1245                     &difctx->ldif_dma_hndl_list, list) {
1246                         u32 sglen = (difctx->dif_bundl_len >
1247                             DIF_BUNDLING_DMA_POOL_SIZE) ?
1248                             DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1249
1250                         BUG_ON(track_difbundl_buf == 0);
1251
1252                         /* Allocate additional continuation packets? */
1253                         if (avail_dsds == 0) {
1254                                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1255                                     0xe024,
1256                                     "%s: adding continuation iocb's\n",
1257                                     __func__);
1258                                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1259                                     QLA_DSDS_PER_IOCB : used_dsds;
1260                                 dsd_list_len = (avail_dsds + 1) * 12;
1261                                 used_dsds -= avail_dsds;
1262
1263                                 /* allocate tracking DS */
1264                                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1265                                 if (!dsd_ptr) {
1266                                         ql_dbg(ql_dbg_tgt, vha, 0xe026,
1267                                             "%s: failed alloc dsd_ptr\n",
1268                                             __func__);
1269                                         return 1;
1270                                 }
1271                                 ha->dif_bundle_kallocs++;
1272
1273                                 difctx->no_ldif_dsd++;
1274                                 /* allocate new list */
1275                                 dsd_ptr->dsd_addr =
1276                                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1277                                         &dsd_ptr->dsd_list_dma);
1278                                 if (!dsd_ptr->dsd_addr) {
1279                                         ql_dbg(ql_dbg_tgt, vha, 0xe026,
1280                                             "%s: failed alloc ->dsd_addr\n",
1281                                             __func__);
1282                                         /*
1283                                          * need to cleanup only this dsd_ptr
1284                                          *  rest will be done by sp_free_dma()
1285                                          */
1286                                         kfree(dsd_ptr);
1287                                         ha->dif_bundle_kallocs--;
1288                                         return 1;
1289                                 }
1290                                 ha->dif_bundle_dma_allocs++;
1291
1292                                 if (sp) {
1293                                         list_add_tail(&dsd_ptr->list,
1294                                             &difctx->ldif_dsd_list);
1295                                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1296                                 } else {
1297                                         list_add_tail(&dsd_ptr->list,
1298                                             &difctx->ldif_dsd_list);
1299                                         tc->ctx_dsd_alloced = 1;
1300                                 }
1301
1302                                 /* add new list to cmd iocb or last list */
1303                                 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1304                                                    &cur_dsd->address);
1305                                 cur_dsd->length = cpu_to_le32(dsd_list_len);
1306                                 cur_dsd = dsd_ptr->dsd_addr;
1307                         }
1308                         put_unaligned_le64(dif_dsd->dsd_list_dma,
1309                                            &cur_dsd->address);
1310                         cur_dsd->length = cpu_to_le32(sglen);
1311                         cur_dsd++;
1312                         avail_dsds--;
1313                         difctx->dif_bundl_len -= sglen;
1314                         track_difbundl_buf--;
1315                 }
1316
1317                 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1318                     "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1319                         difctx->no_ldif_dsd, difctx->no_dif_bundl);
1320         } else {
1321                 for_each_sg(sgl, sg, tot_dsds, i) {
1322                         /* Allocate additional continuation packets? */
1323                         if (avail_dsds == 0) {
1324                                 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1325                                     QLA_DSDS_PER_IOCB : used_dsds;
1326                                 dsd_list_len = (avail_dsds + 1) * 12;
1327                                 used_dsds -= avail_dsds;
1328
1329                                 /* allocate tracking DS */
1330                                 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1331                                 if (!dsd_ptr) {
1332                                         ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1333                                             vha, 0xe027,
1334                                             "%s: failed alloc dsd_dma...\n",
1335                                             __func__);
1336                                         return 1;
1337                                 }
1338
1339                                 /* allocate new list */
1340                                 dsd_ptr->dsd_addr =
1341                                     dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1342                                         &dsd_ptr->dsd_list_dma);
1343                                 if (!dsd_ptr->dsd_addr) {
1344                                         /* need to cleanup only this dsd_ptr */
1345                                         /* rest will be done by sp_free_dma() */
1346                                         kfree(dsd_ptr);
1347                                         return 1;
1348                                 }
1349
1350                                 if (sp) {
1351                                         list_add_tail(&dsd_ptr->list,
1352                                             &difctx->dsd_list);
1353                                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1354                                 } else {
1355                                         list_add_tail(&dsd_ptr->list,
1356                                             &difctx->dsd_list);
1357                                         tc->ctx_dsd_alloced = 1;
1358                                 }
1359
1360                                 /* add new list to cmd iocb or last list */
1361                                 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1362                                                    &cur_dsd->address);
1363                                 cur_dsd->length = cpu_to_le32(dsd_list_len);
1364                                 cur_dsd = dsd_ptr->dsd_addr;
1365                         }
1366                         append_dsd64(&cur_dsd, sg);
1367                         avail_dsds--;
1368                 }
1369         }
1370         /* Null termination */
1371         cur_dsd->address = 0;
1372         cur_dsd->length = 0;
1373         cur_dsd++;
1374         return 0;
1375 }
1376
1377 /**
1378  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1379  *                                                      Type 6 IOCB types.
1380  *
1381  * @sp: SRB command to process
1382  * @cmd_pkt: Command type 3 IOCB
1383  * @tot_dsds: Total number of segments to transfer
1384  * @tot_prot_dsds: Total number of segments with protection information
1385  * @fw_prot_opts: Protection options to be passed to firmware
1386  */
1387 static inline int
1388 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1389     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1390 {
1391         struct dsd64            *cur_dsd;
1392         __be32                  *fcp_dl;
1393         scsi_qla_host_t         *vha;
1394         struct scsi_cmnd        *cmd;
1395         uint32_t                total_bytes = 0;
1396         uint32_t                data_bytes;
1397         uint32_t                dif_bytes;
1398         uint8_t                 bundling = 1;
1399         uint16_t                blk_size;
1400         struct crc_context      *crc_ctx_pkt = NULL;
1401         struct qla_hw_data      *ha;
1402         uint8_t                 additional_fcpcdb_len;
1403         uint16_t                fcp_cmnd_len;
1404         struct fcp_cmnd         *fcp_cmnd;
1405         dma_addr_t              crc_ctx_dma;
1406
1407         cmd = GET_CMD_SP(sp);
1408
1409         /* Update entry type to indicate Command Type CRC_2 IOCB */
1410         put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1411
1412         vha = sp->vha;
1413         ha = vha->hw;
1414
1415         /* No data transfer */
1416         data_bytes = scsi_bufflen(cmd);
1417         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1418                 cmd_pkt->byte_count = cpu_to_le32(0);
1419                 return QLA_SUCCESS;
1420         }
1421
1422         cmd_pkt->vp_index = sp->vha->vp_idx;
1423
1424         /* Set transfer direction */
1425         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1426                 cmd_pkt->control_flags =
1427                     cpu_to_le16(CF_WRITE_DATA);
1428         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1429                 cmd_pkt->control_flags =
1430                     cpu_to_le16(CF_READ_DATA);
1431         }
1432
1433         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1434             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1435             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1436             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1437                 bundling = 0;
1438
1439         /* Allocate CRC context from global pool */
1440         crc_ctx_pkt = sp->u.scmd.crc_ctx =
1441             dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1442
1443         if (!crc_ctx_pkt)
1444                 goto crc_queuing_error;
1445
1446         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1447
1448         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1449
1450         /* Set handle */
1451         crc_ctx_pkt->handle = cmd_pkt->handle;
1452
1453         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1454
1455         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1456             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1457
1458         put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1459         cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
1460
1461         /* Determine SCSI command length -- align to 4 byte boundary */
1462         if (cmd->cmd_len > 16) {
1463                 additional_fcpcdb_len = cmd->cmd_len - 16;
1464                 if ((cmd->cmd_len % 4) != 0) {
1465                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1466                         goto crc_queuing_error;
1467                 }
1468                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1469         } else {
1470                 additional_fcpcdb_len = 0;
1471                 fcp_cmnd_len = 12 + 16 + 4;
1472         }
1473
1474         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1475
1476         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1477         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1478                 fcp_cmnd->additional_cdb_len |= 1;
1479         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1480                 fcp_cmnd->additional_cdb_len |= 2;
1481
1482         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1483         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1484         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1485         put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1486                            &cmd_pkt->fcp_cmnd_dseg_address);
1487         fcp_cmnd->task_management = 0;
1488         fcp_cmnd->task_attribute = TSK_SIMPLE;
1489
1490         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1491
1492         /* Compute dif len and adjust data len to incude protection */
1493         dif_bytes = 0;
1494         blk_size = cmd->device->sector_size;
1495         dif_bytes = (data_bytes / blk_size) * 8;
1496
1497         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1498         case SCSI_PROT_READ_INSERT:
1499         case SCSI_PROT_WRITE_STRIP:
1500                 total_bytes = data_bytes;
1501                 data_bytes += dif_bytes;
1502                 break;
1503
1504         case SCSI_PROT_READ_STRIP:
1505         case SCSI_PROT_WRITE_INSERT:
1506         case SCSI_PROT_READ_PASS:
1507         case SCSI_PROT_WRITE_PASS:
1508                 total_bytes = data_bytes + dif_bytes;
1509                 break;
1510         default:
1511                 BUG();
1512         }
1513
1514         if (!qla2x00_hba_err_chk_enabled(sp))
1515                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1516         /* HBA error checking enabled */
1517         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1518                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1519                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1520                         SCSI_PROT_DIF_TYPE2))
1521                         fw_prot_opts |= BIT_10;
1522                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1523                     SCSI_PROT_DIF_TYPE3)
1524                         fw_prot_opts |= BIT_11;
1525         }
1526
1527         if (!bundling) {
1528                 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1529         } else {
1530                 /*
1531                  * Configure Bundling if we need to fetch interlaving
1532                  * protection PCI accesses
1533                  */
1534                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1535                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1536                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1537                                                         tot_prot_dsds);
1538                 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1539         }
1540
1541         /* Finish the common fields of CRC pkt */
1542         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1543         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1544         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1545         crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1546         /* Fibre channel byte count */
1547         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1548         fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1549             additional_fcpcdb_len);
1550         *fcp_dl = htonl(total_bytes);
1551
1552         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1553                 cmd_pkt->byte_count = cpu_to_le32(0);
1554                 return QLA_SUCCESS;
1555         }
1556         /* Walks data segments */
1557
1558         cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1559
1560         if (!bundling && tot_prot_dsds) {
1561                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1562                         cur_dsd, tot_dsds, NULL))
1563                         goto crc_queuing_error;
1564         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1565                         (tot_dsds - tot_prot_dsds), NULL))
1566                 goto crc_queuing_error;
1567
1568         if (bundling && tot_prot_dsds) {
1569                 /* Walks dif segments */
1570                 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1571                 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1572                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1573                                 tot_prot_dsds, NULL))
1574                         goto crc_queuing_error;
1575         }
1576         return QLA_SUCCESS;
1577
1578 crc_queuing_error:
1579         /* Cleanup will be performed by the caller */
1580
1581         return QLA_FUNCTION_FAILED;
1582 }
1583
1584 /**
1585  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1586  * @sp: command to send to the ISP
1587  *
1588  * Returns non-zero if a failure occurred, else zero.
1589  */
1590 int
1591 qla24xx_start_scsi(srb_t *sp)
1592 {
1593         int             nseg;
1594         unsigned long   flags;
1595         uint32_t        *clr_ptr;
1596         uint32_t        handle;
1597         struct cmd_type_7 *cmd_pkt;
1598         uint16_t        cnt;
1599         uint16_t        req_cnt;
1600         uint16_t        tot_dsds;
1601         struct req_que *req = NULL;
1602         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1603         struct scsi_qla_host *vha = sp->vha;
1604         struct qla_hw_data *ha = vha->hw;
1605
1606         /* Setup device pointers. */
1607         req = vha->req;
1608
1609         /* So we know we haven't pci_map'ed anything yet */
1610         tot_dsds = 0;
1611
1612         /* Send marker if required */
1613         if (vha->marker_needed != 0) {
1614                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1615                     QLA_SUCCESS)
1616                         return QLA_FUNCTION_FAILED;
1617                 vha->marker_needed = 0;
1618         }
1619
1620         /* Acquire ring specific lock */
1621         spin_lock_irqsave(&ha->hardware_lock, flags);
1622
1623         handle = qla2xxx_get_next_handle(req);
1624         if (handle == 0)
1625                 goto queuing_error;
1626
1627         /* Map the sg table so we have an accurate count of sg entries needed */
1628         if (scsi_sg_count(cmd)) {
1629                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1630                     scsi_sg_count(cmd), cmd->sc_data_direction);
1631                 if (unlikely(!nseg))
1632                         goto queuing_error;
1633         } else
1634                 nseg = 0;
1635
1636         tot_dsds = nseg;
1637         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1638         if (req->cnt < (req_cnt + 2)) {
1639                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1640                     rd_reg_dword_relaxed(req->req_q_out);
1641                 if (req->ring_index < cnt)
1642                         req->cnt = cnt - req->ring_index;
1643                 else
1644                         req->cnt = req->length -
1645                                 (req->ring_index - cnt);
1646                 if (req->cnt < (req_cnt + 2))
1647                         goto queuing_error;
1648         }
1649
1650         /* Build command packet. */
1651         req->current_outstanding_cmd = handle;
1652         req->outstanding_cmds[handle] = sp;
1653         sp->handle = handle;
1654         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1655         req->cnt -= req_cnt;
1656
1657         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1658         cmd_pkt->handle = make_handle(req->id, handle);
1659
1660         /* Zero out remaining portion of packet. */
1661         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1662         clr_ptr = (uint32_t *)cmd_pkt + 2;
1663         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1664         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1665
1666         /* Set NPORT-ID and LUN number*/
1667         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1668         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1669         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1670         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1671         cmd_pkt->vp_index = sp->vha->vp_idx;
1672
1673         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1674         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1675
1676         cmd_pkt->task = TSK_SIMPLE;
1677
1678         /* Load SCSI command packet. */
1679         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1680         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1681
1682         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1683
1684         /* Build IOCB segments */
1685         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1686
1687         /* Set total data segment count. */
1688         cmd_pkt->entry_count = (uint8_t)req_cnt;
1689         wmb();
1690         /* Adjust ring index. */
1691         req->ring_index++;
1692         if (req->ring_index == req->length) {
1693                 req->ring_index = 0;
1694                 req->ring_ptr = req->ring;
1695         } else
1696                 req->ring_ptr++;
1697
1698         sp->flags |= SRB_DMA_VALID;
1699
1700         /* Set chip new ring index. */
1701         wrt_reg_dword(req->req_q_in, req->ring_index);
1702
1703         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1704         return QLA_SUCCESS;
1705
1706 queuing_error:
1707         if (tot_dsds)
1708                 scsi_dma_unmap(cmd);
1709
1710         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1711
1712         return QLA_FUNCTION_FAILED;
1713 }
1714
1715 /**
1716  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1717  * @sp: command to send to the ISP
1718  *
1719  * Returns non-zero if a failure occurred, else zero.
1720  */
1721 int
1722 qla24xx_dif_start_scsi(srb_t *sp)
1723 {
1724         int                     nseg;
1725         unsigned long           flags;
1726         uint32_t                *clr_ptr;
1727         uint32_t                handle;
1728         uint16_t                cnt;
1729         uint16_t                req_cnt = 0;
1730         uint16_t                tot_dsds;
1731         uint16_t                tot_prot_dsds;
1732         uint16_t                fw_prot_opts = 0;
1733         struct req_que          *req = NULL;
1734         struct rsp_que          *rsp = NULL;
1735         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1736         struct scsi_qla_host    *vha = sp->vha;
1737         struct qla_hw_data      *ha = vha->hw;
1738         struct cmd_type_crc_2   *cmd_pkt;
1739         uint32_t                status = 0;
1740
1741 #define QDSS_GOT_Q_SPACE        BIT_0
1742
1743         /* Only process protection or >16 cdb in this routine */
1744         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1745                 if (cmd->cmd_len <= 16)
1746                         return qla24xx_start_scsi(sp);
1747         }
1748
1749         /* Setup device pointers. */
1750         req = vha->req;
1751         rsp = req->rsp;
1752
1753         /* So we know we haven't pci_map'ed anything yet */
1754         tot_dsds = 0;
1755
1756         /* Send marker if required */
1757         if (vha->marker_needed != 0) {
1758                 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1759                     QLA_SUCCESS)
1760                         return QLA_FUNCTION_FAILED;
1761                 vha->marker_needed = 0;
1762         }
1763
1764         /* Acquire ring specific lock */
1765         spin_lock_irqsave(&ha->hardware_lock, flags);
1766
1767         handle = qla2xxx_get_next_handle(req);
1768         if (handle == 0)
1769                 goto queuing_error;
1770
1771         /* Compute number of required data segments */
1772         /* Map the sg table so we have an accurate count of sg entries needed */
1773         if (scsi_sg_count(cmd)) {
1774                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1775                     scsi_sg_count(cmd), cmd->sc_data_direction);
1776                 if (unlikely(!nseg))
1777                         goto queuing_error;
1778                 else
1779                         sp->flags |= SRB_DMA_VALID;
1780
1781                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1782                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1783                         struct qla2_sgx sgx;
1784                         uint32_t        partial;
1785
1786                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1787                         sgx.tot_bytes = scsi_bufflen(cmd);
1788                         sgx.cur_sg = scsi_sglist(cmd);
1789                         sgx.sp = sp;
1790
1791                         nseg = 0;
1792                         while (qla24xx_get_one_block_sg(
1793                             cmd->device->sector_size, &sgx, &partial))
1794                                 nseg++;
1795                 }
1796         } else
1797                 nseg = 0;
1798
1799         /* number of required data segments */
1800         tot_dsds = nseg;
1801
1802         /* Compute number of required protection segments */
1803         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1804                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1805                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1806                 if (unlikely(!nseg))
1807                         goto queuing_error;
1808                 else
1809                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1810
1811                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1812                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1813                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1814                 }
1815         } else {
1816                 nseg = 0;
1817         }
1818
1819         req_cnt = 1;
1820         /* Total Data and protection sg segment(s) */
1821         tot_prot_dsds = nseg;
1822         tot_dsds += nseg;
1823         if (req->cnt < (req_cnt + 2)) {
1824                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1825                     rd_reg_dword_relaxed(req->req_q_out);
1826                 if (req->ring_index < cnt)
1827                         req->cnt = cnt - req->ring_index;
1828                 else
1829                         req->cnt = req->length -
1830                                 (req->ring_index - cnt);
1831                 if (req->cnt < (req_cnt + 2))
1832                         goto queuing_error;
1833         }
1834
1835         status |= QDSS_GOT_Q_SPACE;
1836
1837         /* Build header part of command packet (excluding the OPCODE). */
1838         req->current_outstanding_cmd = handle;
1839         req->outstanding_cmds[handle] = sp;
1840         sp->handle = handle;
1841         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1842         req->cnt -= req_cnt;
1843
1844         /* Fill-in common area */
1845         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1846         cmd_pkt->handle = make_handle(req->id, handle);
1847
1848         clr_ptr = (uint32_t *)cmd_pkt + 2;
1849         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1850
1851         /* Set NPORT-ID and LUN number*/
1852         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1853         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1854         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1855         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1856
1857         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1858         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1859
1860         /* Total Data and protection segment(s) */
1861         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1862
1863         /* Build IOCB segments and adjust for data protection segments */
1864         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1865             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1866                 QLA_SUCCESS)
1867                 goto queuing_error;
1868
1869         cmd_pkt->entry_count = (uint8_t)req_cnt;
1870         /* Specify response queue number where completion should happen */
1871         cmd_pkt->entry_status = (uint8_t) rsp->id;
1872         cmd_pkt->timeout = cpu_to_le16(0);
1873         wmb();
1874
1875         /* Adjust ring index. */
1876         req->ring_index++;
1877         if (req->ring_index == req->length) {
1878                 req->ring_index = 0;
1879                 req->ring_ptr = req->ring;
1880         } else
1881                 req->ring_ptr++;
1882
1883         /* Set chip new ring index. */
1884         wrt_reg_dword(req->req_q_in, req->ring_index);
1885
1886         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1887
1888         return QLA_SUCCESS;
1889
1890 queuing_error:
1891         if (status & QDSS_GOT_Q_SPACE) {
1892                 req->outstanding_cmds[handle] = NULL;
1893                 req->cnt += req_cnt;
1894         }
1895         /* Cleanup will be performed by the caller (queuecommand) */
1896
1897         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1898         return QLA_FUNCTION_FAILED;
1899 }
1900
1901 /**
1902  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1903  * @sp: command to send to the ISP
1904  *
1905  * Returns non-zero if a failure occurred, else zero.
1906  */
1907 static int
1908 qla2xxx_start_scsi_mq(srb_t *sp)
1909 {
1910         int             nseg;
1911         unsigned long   flags;
1912         uint32_t        *clr_ptr;
1913         uint32_t        handle;
1914         struct cmd_type_7 *cmd_pkt;
1915         uint16_t        cnt;
1916         uint16_t        req_cnt;
1917         uint16_t        tot_dsds;
1918         struct req_que *req = NULL;
1919         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1920         struct scsi_qla_host *vha = sp->fcport->vha;
1921         struct qla_hw_data *ha = vha->hw;
1922         struct qla_qpair *qpair = sp->qpair;
1923
1924         /* Acquire qpair specific lock */
1925         spin_lock_irqsave(&qpair->qp_lock, flags);
1926
1927         /* Setup qpair pointers */
1928         req = qpair->req;
1929
1930         /* So we know we haven't pci_map'ed anything yet */
1931         tot_dsds = 0;
1932
1933         /* Send marker if required */
1934         if (vha->marker_needed != 0) {
1935                 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1936                     QLA_SUCCESS) {
1937                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
1938                         return QLA_FUNCTION_FAILED;
1939                 }
1940                 vha->marker_needed = 0;
1941         }
1942
1943         handle = qla2xxx_get_next_handle(req);
1944         if (handle == 0)
1945                 goto queuing_error;
1946
1947         /* Map the sg table so we have an accurate count of sg entries needed */
1948         if (scsi_sg_count(cmd)) {
1949                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1950                     scsi_sg_count(cmd), cmd->sc_data_direction);
1951                 if (unlikely(!nseg))
1952                         goto queuing_error;
1953         } else
1954                 nseg = 0;
1955
1956         tot_dsds = nseg;
1957         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1958         if (req->cnt < (req_cnt + 2)) {
1959                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1960                     rd_reg_dword_relaxed(req->req_q_out);
1961                 if (req->ring_index < cnt)
1962                         req->cnt = cnt - req->ring_index;
1963                 else
1964                         req->cnt = req->length -
1965                                 (req->ring_index - cnt);
1966                 if (req->cnt < (req_cnt + 2))
1967                         goto queuing_error;
1968         }
1969
1970         /* Build command packet. */
1971         req->current_outstanding_cmd = handle;
1972         req->outstanding_cmds[handle] = sp;
1973         sp->handle = handle;
1974         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1975         req->cnt -= req_cnt;
1976
1977         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1978         cmd_pkt->handle = make_handle(req->id, handle);
1979
1980         /* Zero out remaining portion of packet. */
1981         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1982         clr_ptr = (uint32_t *)cmd_pkt + 2;
1983         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1984         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1985
1986         /* Set NPORT-ID and LUN number*/
1987         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1988         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1989         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1990         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1991         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1992
1993         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1994         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1995
1996         cmd_pkt->task = TSK_SIMPLE;
1997
1998         /* Load SCSI command packet. */
1999         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2000         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2001
2002         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2003
2004         /* Build IOCB segments */
2005         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2006
2007         /* Set total data segment count. */
2008         cmd_pkt->entry_count = (uint8_t)req_cnt;
2009         wmb();
2010         /* Adjust ring index. */
2011         req->ring_index++;
2012         if (req->ring_index == req->length) {
2013                 req->ring_index = 0;
2014                 req->ring_ptr = req->ring;
2015         } else
2016                 req->ring_ptr++;
2017
2018         sp->flags |= SRB_DMA_VALID;
2019
2020         /* Set chip new ring index. */
2021         wrt_reg_dword(req->req_q_in, req->ring_index);
2022
2023         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2024         return QLA_SUCCESS;
2025
2026 queuing_error:
2027         if (tot_dsds)
2028                 scsi_dma_unmap(cmd);
2029
2030         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2031
2032         return QLA_FUNCTION_FAILED;
2033 }
2034
2035
2036 /**
2037  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2038  * @sp: command to send to the ISP
2039  *
2040  * Returns non-zero if a failure occurred, else zero.
2041  */
2042 int
2043 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2044 {
2045         int                     nseg;
2046         unsigned long           flags;
2047         uint32_t                *clr_ptr;
2048         uint32_t                handle;
2049         uint16_t                cnt;
2050         uint16_t                req_cnt = 0;
2051         uint16_t                tot_dsds;
2052         uint16_t                tot_prot_dsds;
2053         uint16_t                fw_prot_opts = 0;
2054         struct req_que          *req = NULL;
2055         struct rsp_que          *rsp = NULL;
2056         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
2057         struct scsi_qla_host    *vha = sp->fcport->vha;
2058         struct qla_hw_data      *ha = vha->hw;
2059         struct cmd_type_crc_2   *cmd_pkt;
2060         uint32_t                status = 0;
2061         struct qla_qpair        *qpair = sp->qpair;
2062
2063 #define QDSS_GOT_Q_SPACE        BIT_0
2064
2065         /* Check for host side state */
2066         if (!qpair->online) {
2067                 cmd->result = DID_NO_CONNECT << 16;
2068                 return QLA_INTERFACE_ERROR;
2069         }
2070
2071         if (!qpair->difdix_supported &&
2072                 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2073                 cmd->result = DID_NO_CONNECT << 16;
2074                 return QLA_INTERFACE_ERROR;
2075         }
2076
2077         /* Only process protection or >16 cdb in this routine */
2078         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2079                 if (cmd->cmd_len <= 16)
2080                         return qla2xxx_start_scsi_mq(sp);
2081         }
2082
2083         spin_lock_irqsave(&qpair->qp_lock, flags);
2084
2085         /* Setup qpair pointers */
2086         rsp = qpair->rsp;
2087         req = qpair->req;
2088
2089         /* So we know we haven't pci_map'ed anything yet */
2090         tot_dsds = 0;
2091
2092         /* Send marker if required */
2093         if (vha->marker_needed != 0) {
2094                 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2095                     QLA_SUCCESS) {
2096                         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2097                         return QLA_FUNCTION_FAILED;
2098                 }
2099                 vha->marker_needed = 0;
2100         }
2101
2102         handle = qla2xxx_get_next_handle(req);
2103         if (handle == 0)
2104                 goto queuing_error;
2105
2106         /* Compute number of required data segments */
2107         /* Map the sg table so we have an accurate count of sg entries needed */
2108         if (scsi_sg_count(cmd)) {
2109                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2110                     scsi_sg_count(cmd), cmd->sc_data_direction);
2111                 if (unlikely(!nseg))
2112                         goto queuing_error;
2113                 else
2114                         sp->flags |= SRB_DMA_VALID;
2115
2116                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2117                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2118                         struct qla2_sgx sgx;
2119                         uint32_t        partial;
2120
2121                         memset(&sgx, 0, sizeof(struct qla2_sgx));
2122                         sgx.tot_bytes = scsi_bufflen(cmd);
2123                         sgx.cur_sg = scsi_sglist(cmd);
2124                         sgx.sp = sp;
2125
2126                         nseg = 0;
2127                         while (qla24xx_get_one_block_sg(
2128                             cmd->device->sector_size, &sgx, &partial))
2129                                 nseg++;
2130                 }
2131         } else
2132                 nseg = 0;
2133
2134         /* number of required data segments */
2135         tot_dsds = nseg;
2136
2137         /* Compute number of required protection segments */
2138         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2139                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2140                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2141                 if (unlikely(!nseg))
2142                         goto queuing_error;
2143                 else
2144                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
2145
2146                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2147                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2148                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2149                 }
2150         } else {
2151                 nseg = 0;
2152         }
2153
2154         req_cnt = 1;
2155         /* Total Data and protection sg segment(s) */
2156         tot_prot_dsds = nseg;
2157         tot_dsds += nseg;
2158         if (req->cnt < (req_cnt + 2)) {
2159                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2160                     rd_reg_dword_relaxed(req->req_q_out);
2161                 if (req->ring_index < cnt)
2162                         req->cnt = cnt - req->ring_index;
2163                 else
2164                         req->cnt = req->length -
2165                                 (req->ring_index - cnt);
2166                 if (req->cnt < (req_cnt + 2))
2167                         goto queuing_error;
2168         }
2169
2170         status |= QDSS_GOT_Q_SPACE;
2171
2172         /* Build header part of command packet (excluding the OPCODE). */
2173         req->current_outstanding_cmd = handle;
2174         req->outstanding_cmds[handle] = sp;
2175         sp->handle = handle;
2176         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2177         req->cnt -= req_cnt;
2178
2179         /* Fill-in common area */
2180         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2181         cmd_pkt->handle = make_handle(req->id, handle);
2182
2183         clr_ptr = (uint32_t *)cmd_pkt + 2;
2184         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2185
2186         /* Set NPORT-ID and LUN number*/
2187         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2188         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2189         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2190         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2191
2192         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2193         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2194
2195         /* Total Data and protection segment(s) */
2196         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2197
2198         /* Build IOCB segments and adjust for data protection segments */
2199         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2200             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2201                 QLA_SUCCESS)
2202                 goto queuing_error;
2203
2204         cmd_pkt->entry_count = (uint8_t)req_cnt;
2205         cmd_pkt->timeout = cpu_to_le16(0);
2206         wmb();
2207
2208         /* Adjust ring index. */
2209         req->ring_index++;
2210         if (req->ring_index == req->length) {
2211                 req->ring_index = 0;
2212                 req->ring_ptr = req->ring;
2213         } else
2214                 req->ring_ptr++;
2215
2216         /* Set chip new ring index. */
2217         wrt_reg_dword(req->req_q_in, req->ring_index);
2218
2219         /* Manage unprocessed RIO/ZIO commands in response queue. */
2220         if (vha->flags.process_response_queue &&
2221             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2222                 qla24xx_process_response_queue(vha, rsp);
2223
2224         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2225
2226         return QLA_SUCCESS;
2227
2228 queuing_error:
2229         if (status & QDSS_GOT_Q_SPACE) {
2230                 req->outstanding_cmds[handle] = NULL;
2231                 req->cnt += req_cnt;
2232         }
2233         /* Cleanup will be performed by the caller (queuecommand) */
2234
2235         spin_unlock_irqrestore(&qpair->qp_lock, flags);
2236         return QLA_FUNCTION_FAILED;
2237 }
2238
2239 /* Generic Control-SRB manipulation functions. */
2240
2241 /* hardware_lock assumed to be held. */
2242
2243 void *
2244 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2245 {
2246         scsi_qla_host_t *vha = qpair->vha;
2247         struct qla_hw_data *ha = vha->hw;
2248         struct req_que *req = qpair->req;
2249         device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2250         uint32_t handle;
2251         request_t *pkt;
2252         uint16_t cnt, req_cnt;
2253
2254         pkt = NULL;
2255         req_cnt = 1;
2256         handle = 0;
2257
2258         if (sp && (sp->type != SRB_SCSI_CMD)) {
2259                 /* Adjust entry-counts as needed. */
2260                 req_cnt = sp->iocbs;
2261         }
2262
2263         /* Check for room on request queue. */
2264         if (req->cnt < req_cnt + 2) {
2265                 if (qpair->use_shadow_reg)
2266                         cnt = *req->out_ptr;
2267                 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2268                     IS_QLA28XX(ha))
2269                         cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
2270                 else if (IS_P3P_TYPE(ha))
2271                         cnt = rd_reg_dword(reg->isp82.req_q_out);
2272                 else if (IS_FWI2_CAPABLE(ha))
2273                         cnt = rd_reg_dword(&reg->isp24.req_q_out);
2274                 else if (IS_QLAFX00(ha))
2275                         cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
2276                 else
2277                         cnt = qla2x00_debounce_register(
2278                             ISP_REQ_Q_OUT(ha, &reg->isp));
2279
2280                 if  (req->ring_index < cnt)
2281                         req->cnt = cnt - req->ring_index;
2282                 else
2283                         req->cnt = req->length -
2284                             (req->ring_index - cnt);
2285         }
2286         if (req->cnt < req_cnt + 2)
2287                 goto queuing_error;
2288
2289         if (sp) {
2290                 handle = qla2xxx_get_next_handle(req);
2291                 if (handle == 0) {
2292                         ql_log(ql_log_warn, vha, 0x700b,
2293                             "No room on outstanding cmd array.\n");
2294                         goto queuing_error;
2295                 }
2296
2297                 /* Prep command array. */
2298                 req->current_outstanding_cmd = handle;
2299                 req->outstanding_cmds[handle] = sp;
2300                 sp->handle = handle;
2301         }
2302
2303         /* Prep packet */
2304         req->cnt -= req_cnt;
2305         pkt = req->ring_ptr;
2306         memset(pkt, 0, REQUEST_ENTRY_SIZE);
2307         if (IS_QLAFX00(ha)) {
2308                 wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2309                 wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
2310         } else {
2311                 pkt->entry_count = req_cnt;
2312                 pkt->handle = handle;
2313         }
2314
2315         return pkt;
2316
2317 queuing_error:
2318         qpair->tgt_counters.num_alloc_iocb_failed++;
2319         return pkt;
2320 }
2321
2322 void *
2323 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2324 {
2325         scsi_qla_host_t *vha = qpair->vha;
2326
2327         if (qla2x00_reset_active(vha))
2328                 return NULL;
2329
2330         return __qla2x00_alloc_iocbs(qpair, sp);
2331 }
2332
2333 void *
2334 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2335 {
2336         return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2337 }
2338
2339 static void
2340 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2341 {
2342         struct srb_iocb *lio = &sp->u.iocb_cmd;
2343
2344         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2345         logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2346         if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2347                 logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2348                 if (sp->vha->flags.nvme_first_burst)
2349                         logio->io_parameter[0] =
2350                                 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2351         }
2352
2353         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2354         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2355         logio->port_id[1] = sp->fcport->d_id.b.area;
2356         logio->port_id[2] = sp->fcport->d_id.b.domain;
2357         logio->vp_index = sp->vha->vp_idx;
2358 }
2359
2360 static void
2361 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2362 {
2363         struct srb_iocb *lio = &sp->u.iocb_cmd;
2364
2365         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2366         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2367
2368         if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2369                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2370         } else {
2371                 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2372                 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2373                         logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2374                 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2375                         logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2376         }
2377         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2378         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2379         logio->port_id[1] = sp->fcport->d_id.b.area;
2380         logio->port_id[2] = sp->fcport->d_id.b.domain;
2381         logio->vp_index = sp->vha->vp_idx;
2382 }
2383
2384 static void
2385 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2386 {
2387         struct qla_hw_data *ha = sp->vha->hw;
2388         struct srb_iocb *lio = &sp->u.iocb_cmd;
2389         uint16_t opts;
2390
2391         mbx->entry_type = MBX_IOCB_TYPE;
2392         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2393         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2394         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2395         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2396         if (HAS_EXTENDED_IDS(ha)) {
2397                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2398                 mbx->mb10 = cpu_to_le16(opts);
2399         } else {
2400                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2401         }
2402         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2403         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2404             sp->fcport->d_id.b.al_pa);
2405         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2406 }
2407
2408 static void
2409 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2410 {
2411         u16 control_flags = LCF_COMMAND_LOGO;
2412         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2413
2414         if (sp->fcport->explicit_logout) {
2415                 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2416         } else {
2417                 control_flags |= LCF_IMPL_LOGO;
2418
2419                 if (!sp->fcport->keep_nport_handle)
2420                         control_flags |= LCF_FREE_NPORT;
2421         }
2422
2423         logio->control_flags = cpu_to_le16(control_flags);
2424         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2425         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2426         logio->port_id[1] = sp->fcport->d_id.b.area;
2427         logio->port_id[2] = sp->fcport->d_id.b.domain;
2428         logio->vp_index = sp->vha->vp_idx;
2429 }
2430
2431 static void
2432 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2433 {
2434         struct qla_hw_data *ha = sp->vha->hw;
2435
2436         mbx->entry_type = MBX_IOCB_TYPE;
2437         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2438         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2439         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2440             cpu_to_le16(sp->fcport->loop_id) :
2441             cpu_to_le16(sp->fcport->loop_id << 8);
2442         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2443         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2444             sp->fcport->d_id.b.al_pa);
2445         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2446         /* Implicit: mbx->mbx10 = 0. */
2447 }
2448
2449 static void
2450 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2451 {
2452         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2453         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2454         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2455         logio->vp_index = sp->vha->vp_idx;
2456 }
2457
2458 static void
2459 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2460 {
2461         struct qla_hw_data *ha = sp->vha->hw;
2462
2463         mbx->entry_type = MBX_IOCB_TYPE;
2464         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2465         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2466         if (HAS_EXTENDED_IDS(ha)) {
2467                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2468                 mbx->mb10 = cpu_to_le16(BIT_0);
2469         } else {
2470                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2471         }
2472         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2473         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2474         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2475         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2476         mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2477 }
2478
2479 static void
2480 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2481 {
2482         uint32_t flags;
2483         uint64_t lun;
2484         struct fc_port *fcport = sp->fcport;
2485         scsi_qla_host_t *vha = fcport->vha;
2486         struct qla_hw_data *ha = vha->hw;
2487         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2488         struct req_que *req = vha->req;
2489
2490         flags = iocb->u.tmf.flags;
2491         lun = iocb->u.tmf.lun;
2492
2493         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2494         tsk->entry_count = 1;
2495         tsk->handle = make_handle(req->id, tsk->handle);
2496         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2497         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2498         tsk->control_flags = cpu_to_le32(flags);
2499         tsk->port_id[0] = fcport->d_id.b.al_pa;
2500         tsk->port_id[1] = fcport->d_id.b.area;
2501         tsk->port_id[2] = fcport->d_id.b.domain;
2502         tsk->vp_index = fcport->vha->vp_idx;
2503
2504         if (flags == TCF_LUN_RESET) {
2505                 int_to_scsilun(lun, &tsk->lun);
2506                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2507                         sizeof(tsk->lun));
2508         }
2509 }
2510
2511 void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2512 {
2513         timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2514         sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2515         sp->free = qla2x00_sp_free;
2516         if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2517                 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2518         sp->start_timer = 1;
2519 }
2520
2521 static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2522 {
2523         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2524
2525         kfree(sp->fcport);
2526
2527         if (elsio->u.els_logo.els_logo_pyld)
2528                 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2529                     elsio->u.els_logo.els_logo_pyld,
2530                     elsio->u.els_logo.els_logo_pyld_dma);
2531
2532         del_timer(&elsio->timer);
2533         qla2x00_rel_sp(sp);
2534 }
2535
2536 static void
2537 qla2x00_els_dcmd_iocb_timeout(void *data)
2538 {
2539         srb_t *sp = data;
2540         fc_port_t *fcport = sp->fcport;
2541         struct scsi_qla_host *vha = sp->vha;
2542         struct srb_iocb *lio = &sp->u.iocb_cmd;
2543         unsigned long flags = 0;
2544         int res, h;
2545
2546         ql_dbg(ql_dbg_io, vha, 0x3069,
2547             "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2548             sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2549             fcport->d_id.b.al_pa);
2550
2551         /* Abort the exchange */
2552         res = qla24xx_async_abort_cmd(sp, false);
2553         if (res) {
2554                 ql_dbg(ql_dbg_io, vha, 0x3070,
2555                     "mbx abort_command failed.\n");
2556                 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2557                 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2558                         if (sp->qpair->req->outstanding_cmds[h] == sp) {
2559                                 sp->qpair->req->outstanding_cmds[h] = NULL;
2560                                 break;
2561                         }
2562                 }
2563                 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2564                 complete(&lio->u.els_logo.comp);
2565         } else {
2566                 ql_dbg(ql_dbg_io, vha, 0x3071,
2567                     "mbx abort_command success.\n");
2568         }
2569 }
2570
2571 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2572 {
2573         fc_port_t *fcport = sp->fcport;
2574         struct srb_iocb *lio = &sp->u.iocb_cmd;
2575         struct scsi_qla_host *vha = sp->vha;
2576
2577         ql_dbg(ql_dbg_io, vha, 0x3072,
2578             "%s hdl=%x, portid=%02x%02x%02x done\n",
2579             sp->name, sp->handle, fcport->d_id.b.domain,
2580             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2581
2582         complete(&lio->u.els_logo.comp);
2583 }
2584
2585 int
2586 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2587     port_id_t remote_did)
2588 {
2589         srb_t *sp;
2590         fc_port_t *fcport = NULL;
2591         struct srb_iocb *elsio = NULL;
2592         struct qla_hw_data *ha = vha->hw;
2593         struct els_logo_payload logo_pyld;
2594         int rval = QLA_SUCCESS;
2595
2596         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2597         if (!fcport) {
2598                ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2599                return -ENOMEM;
2600         }
2601
2602         /* Alloc SRB structure */
2603         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2604         if (!sp) {
2605                 kfree(fcport);
2606                 ql_log(ql_log_info, vha, 0x70e6,
2607                  "SRB allocation failed\n");
2608                 return -ENOMEM;
2609         }
2610
2611         elsio = &sp->u.iocb_cmd;
2612         fcport->loop_id = 0xFFFF;
2613         fcport->d_id.b.domain = remote_did.b.domain;
2614         fcport->d_id.b.area = remote_did.b.area;
2615         fcport->d_id.b.al_pa = remote_did.b.al_pa;
2616
2617         ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2618             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2619
2620         sp->type = SRB_ELS_DCMD;
2621         sp->name = "ELS_DCMD";
2622         sp->fcport = fcport;
2623         elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2624         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2625         init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2626         sp->done = qla2x00_els_dcmd_sp_done;
2627         sp->free = qla2x00_els_dcmd_sp_free;
2628
2629         elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2630                             DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2631                             GFP_KERNEL);
2632
2633         if (!elsio->u.els_logo.els_logo_pyld) {
2634                 sp->free(sp);
2635                 return QLA_FUNCTION_FAILED;
2636         }
2637
2638         memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2639
2640         elsio->u.els_logo.els_cmd = els_opcode;
2641         logo_pyld.opcode = els_opcode;
2642         logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2643         logo_pyld.s_id[1] = vha->d_id.b.area;
2644         logo_pyld.s_id[2] = vha->d_id.b.domain;
2645         host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2646         memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2647
2648         memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2649             sizeof(struct els_logo_payload));
2650         ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2651         ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2652                        elsio->u.els_logo.els_logo_pyld,
2653                        sizeof(*elsio->u.els_logo.els_logo_pyld));
2654
2655         rval = qla2x00_start_sp(sp);
2656         if (rval != QLA_SUCCESS) {
2657                 sp->free(sp);
2658                 return QLA_FUNCTION_FAILED;
2659         }
2660
2661         ql_dbg(ql_dbg_io, vha, 0x3074,
2662             "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2663             sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2664             fcport->d_id.b.area, fcport->d_id.b.al_pa);
2665
2666         wait_for_completion(&elsio->u.els_logo.comp);
2667
2668         sp->free(sp);
2669         return rval;
2670 }
2671
2672 static void
2673 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2674 {
2675         scsi_qla_host_t *vha = sp->vha;
2676         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2677
2678         els_iocb->entry_type = ELS_IOCB_TYPE;
2679         els_iocb->entry_count = 1;
2680         els_iocb->sys_define = 0;
2681         els_iocb->entry_status = 0;
2682         els_iocb->handle = sp->handle;
2683         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2684         els_iocb->tx_dsd_count = cpu_to_le16(1);
2685         els_iocb->vp_index = vha->vp_idx;
2686         els_iocb->sof_type = EST_SOFI3;
2687         els_iocb->rx_dsd_count = 0;
2688         els_iocb->opcode = elsio->u.els_logo.els_cmd;
2689
2690         els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2691         els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2692         els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2693         /* For SID the byte order is different than DID */
2694         els_iocb->s_id[1] = vha->d_id.b.al_pa;
2695         els_iocb->s_id[2] = vha->d_id.b.area;
2696         els_iocb->s_id[0] = vha->d_id.b.domain;
2697
2698         if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2699                 els_iocb->control_flags = 0;
2700                 els_iocb->tx_byte_count = els_iocb->tx_len =
2701                         cpu_to_le32(sizeof(struct els_plogi_payload));
2702                 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2703                                    &els_iocb->tx_address);
2704                 els_iocb->rx_dsd_count = cpu_to_le16(1);
2705                 els_iocb->rx_byte_count = els_iocb->rx_len =
2706                         cpu_to_le32(sizeof(struct els_plogi_payload));
2707                 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2708                                    &els_iocb->rx_address);
2709
2710                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2711                     "PLOGI ELS IOCB:\n");
2712                 ql_dump_buffer(ql_log_info, vha, 0x0109,
2713                     (uint8_t *)els_iocb,
2714                     sizeof(*els_iocb));
2715         } else {
2716                 els_iocb->control_flags = cpu_to_le16(1 << 13);
2717                 els_iocb->tx_byte_count =
2718                         cpu_to_le32(sizeof(struct els_logo_payload));
2719                 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2720                                    &els_iocb->tx_address);
2721                 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2722
2723                 els_iocb->rx_byte_count = 0;
2724                 els_iocb->rx_address = 0;
2725                 els_iocb->rx_len = 0;
2726                 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2727                        "LOGO ELS IOCB:");
2728                 ql_dump_buffer(ql_log_info, vha, 0x010b,
2729                                els_iocb,
2730                                sizeof(*els_iocb));
2731         }
2732
2733         sp->vha->qla_stats.control_requests++;
2734 }
2735
2736 static void
2737 qla2x00_els_dcmd2_iocb_timeout(void *data)
2738 {
2739         srb_t *sp = data;
2740         fc_port_t *fcport = sp->fcport;
2741         struct scsi_qla_host *vha = sp->vha;
2742         unsigned long flags = 0;
2743         int res, h;
2744
2745         ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2746             "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2747             sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2748
2749         /* Abort the exchange */
2750         res = qla24xx_async_abort_cmd(sp, false);
2751         ql_dbg(ql_dbg_io, vha, 0x3070,
2752             "mbx abort_command %s\n",
2753             (res == QLA_SUCCESS) ? "successful" : "failed");
2754         if (res) {
2755                 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2756                 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2757                         if (sp->qpair->req->outstanding_cmds[h] == sp) {
2758                                 sp->qpair->req->outstanding_cmds[h] = NULL;
2759                                 break;
2760                         }
2761                 }
2762                 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2763                 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2764         }
2765 }
2766
2767 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2768 {
2769         if (els_plogi->els_plogi_pyld)
2770                 dma_free_coherent(&vha->hw->pdev->dev,
2771                                   els_plogi->tx_size,
2772                                   els_plogi->els_plogi_pyld,
2773                                   els_plogi->els_plogi_pyld_dma);
2774
2775         if (els_plogi->els_resp_pyld)
2776                 dma_free_coherent(&vha->hw->pdev->dev,
2777                                   els_plogi->rx_size,
2778                                   els_plogi->els_resp_pyld,
2779                                   els_plogi->els_resp_pyld_dma);
2780 }
2781
2782 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2783 {
2784         fc_port_t *fcport = sp->fcport;
2785         struct srb_iocb *lio = &sp->u.iocb_cmd;
2786         struct scsi_qla_host *vha = sp->vha;
2787         struct event_arg ea;
2788         struct qla_work_evt *e;
2789         struct fc_port *conflict_fcport;
2790         port_id_t cid;  /* conflict Nport id */
2791         const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2792         u16 lid;
2793
2794         ql_dbg(ql_dbg_disc, vha, 0x3072,
2795             "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2796             sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2797
2798         fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2799         del_timer(&sp->u.iocb_cmd.timer);
2800
2801         if (sp->flags & SRB_WAKEUP_ON_COMP)
2802                 complete(&lio->u.els_plogi.comp);
2803         else {
2804                 switch (le32_to_cpu(fw_status[0])) {
2805                 case CS_DATA_UNDERRUN:
2806                 case CS_COMPLETE:
2807                         memset(&ea, 0, sizeof(ea));
2808                         ea.fcport = fcport;
2809                         ea.rc = res;
2810                         qla_handle_els_plogi_done(vha, &ea);
2811                         break;
2812
2813                 case CS_IOCB_ERROR:
2814                         switch (le32_to_cpu(fw_status[1])) {
2815                         case LSC_SCODE_PORTID_USED:
2816                                 lid = le32_to_cpu(fw_status[2]) & 0xffff;
2817                                 qlt_find_sess_invalidate_other(vha,
2818                                     wwn_to_u64(fcport->port_name),
2819                                     fcport->d_id, lid, &conflict_fcport);
2820                                 if (conflict_fcport) {
2821                                         /*
2822                                          * Another fcport shares the same
2823                                          * loop_id & nport id; conflict
2824                                          * fcport needs to finish cleanup
2825                                          * before this fcport can proceed
2826                                          * to login.
2827                                          */
2828                                         conflict_fcport->conflict = fcport;
2829                                         fcport->login_pause = 1;
2830                                         ql_dbg(ql_dbg_disc, vha, 0x20ed,
2831                                             "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
2832                                             __func__, __LINE__,
2833                                             fcport->port_name,
2834                                             fcport->d_id.b24, lid);
2835                                 } else {
2836                                         ql_dbg(ql_dbg_disc, vha, 0x20ed,
2837                                             "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2838                                             __func__, __LINE__,
2839                                             fcport->port_name,
2840                                             fcport->d_id.b24, lid);
2841                                         qla2x00_clear_loop_id(fcport);
2842                                         set_bit(lid, vha->hw->loop_id_map);
2843                                         fcport->loop_id = lid;
2844                                         fcport->keep_nport_handle = 0;
2845                                         qlt_schedule_sess_for_deletion(fcport);
2846                                 }
2847                                 break;
2848
2849                         case LSC_SCODE_NPORT_USED:
2850                                 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2851                                         & 0xff;
2852                                 cid.b.area   = (le32_to_cpu(fw_status[2]) >>  8)
2853                                         & 0xff;
2854                                 cid.b.al_pa  = le32_to_cpu(fw_status[2]) & 0xff;
2855                                 cid.b.rsvd_1 = 0;
2856
2857                                 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2858                                     "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2859                                     __func__, __LINE__, fcport->port_name,
2860                                     fcport->loop_id, cid.b24);
2861                                 set_bit(fcport->loop_id,
2862                                     vha->hw->loop_id_map);
2863                                 fcport->loop_id = FC_NO_LOOP_ID;
2864                                 qla24xx_post_gnl_work(vha, fcport);
2865                                 break;
2866
2867                         case LSC_SCODE_NOXCB:
2868                                 vha->hw->exch_starvation++;
2869                                 if (vha->hw->exch_starvation > 5) {
2870                                         ql_log(ql_log_warn, vha, 0xd046,
2871                                             "Exchange starvation. Resetting RISC\n");
2872                                         vha->hw->exch_starvation = 0;
2873                                         set_bit(ISP_ABORT_NEEDED,
2874                                             &vha->dpc_flags);
2875                                         qla2xxx_wake_dpc(vha);
2876                                 }
2877                                 fallthrough;
2878                         default:
2879                                 ql_dbg(ql_dbg_disc, vha, 0x20eb,
2880                                     "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2881                                     __func__, sp->fcport->port_name,
2882                                     fw_status[0], fw_status[1], fw_status[2]);
2883
2884                                 fcport->flags &= ~FCF_ASYNC_SENT;
2885                                 qla2x00_set_fcport_disc_state(fcport,
2886                                     DSC_LOGIN_FAILED);
2887                                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2888                                 break;
2889                         }
2890                         break;
2891
2892                 default:
2893                         ql_dbg(ql_dbg_disc, vha, 0x20eb,
2894                             "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2895                             __func__, sp->fcport->port_name,
2896                             fw_status[0], fw_status[1], fw_status[2]);
2897
2898                         sp->fcport->flags &= ~FCF_ASYNC_SENT;
2899                         qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
2900                         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2901                         break;
2902                 }
2903
2904                 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2905                 if (!e) {
2906                         struct srb_iocb *elsio = &sp->u.iocb_cmd;
2907
2908                         qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
2909                         sp->free(sp);
2910                         return;
2911                 }
2912                 e->u.iosb.sp = sp;
2913                 qla2x00_post_work(vha, e);
2914         }
2915 }
2916
2917 int
2918 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2919     fc_port_t *fcport, bool wait)
2920 {
2921         srb_t *sp;
2922         struct srb_iocb *elsio = NULL;
2923         struct qla_hw_data *ha = vha->hw;
2924         int rval = QLA_SUCCESS;
2925         void    *ptr, *resp_ptr;
2926
2927         /* Alloc SRB structure */
2928         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2929         if (!sp) {
2930                 ql_log(ql_log_info, vha, 0x70e6,
2931                  "SRB allocation failed\n");
2932                 fcport->flags &= ~FCF_ASYNC_ACTIVE;
2933                 return -ENOMEM;
2934         }
2935
2936         fcport->flags |= FCF_ASYNC_SENT;
2937         qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
2938         elsio = &sp->u.iocb_cmd;
2939         ql_dbg(ql_dbg_io, vha, 0x3073,
2940             "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2941
2942         sp->type = SRB_ELS_DCMD;
2943         sp->name = "ELS_DCMD";
2944         sp->fcport = fcport;
2945
2946         elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2947         if (wait)
2948                 sp->flags = SRB_WAKEUP_ON_COMP;
2949
2950         qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2951
2952         sp->done = qla2x00_els_dcmd2_sp_done;
2953         elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2954
2955         ptr = elsio->u.els_plogi.els_plogi_pyld =
2956             dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
2957                 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2958
2959         if (!elsio->u.els_plogi.els_plogi_pyld) {
2960                 rval = QLA_FUNCTION_FAILED;
2961                 goto out;
2962         }
2963
2964         resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2965             dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
2966                 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2967
2968         if (!elsio->u.els_plogi.els_resp_pyld) {
2969                 rval = QLA_FUNCTION_FAILED;
2970                 goto out;
2971         }
2972
2973         ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2974
2975         memset(ptr, 0, sizeof(struct els_plogi_payload));
2976         memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2977         memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2978             &ha->plogi_els_payld.data,
2979             sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2980
2981         elsio->u.els_plogi.els_cmd = els_opcode;
2982         elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2983
2984         ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2985         ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2986             (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
2987             sizeof(*elsio->u.els_plogi.els_plogi_pyld));
2988
2989         init_completion(&elsio->u.els_plogi.comp);
2990         rval = qla2x00_start_sp(sp);
2991         if (rval != QLA_SUCCESS) {
2992                 rval = QLA_FUNCTION_FAILED;
2993         } else {
2994                 ql_dbg(ql_dbg_disc, vha, 0x3074,
2995                     "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2996                     sp->name, sp->handle, fcport->loop_id,
2997                     fcport->d_id.b24, vha->d_id.b24);
2998         }
2999
3000         if (wait) {
3001                 wait_for_completion(&elsio->u.els_plogi.comp);
3002
3003                 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3004                         rval = QLA_FUNCTION_FAILED;
3005         } else {
3006                 goto done;
3007         }
3008
3009 out:
3010         fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3011         qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3012         sp->free(sp);
3013 done:
3014         return rval;
3015 }
3016
3017 static void
3018 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3019 {
3020         struct bsg_job *bsg_job = sp->u.bsg_job;
3021         struct fc_bsg_request *bsg_request = bsg_job->request;
3022
3023         els_iocb->entry_type = ELS_IOCB_TYPE;
3024         els_iocb->entry_count = 1;
3025         els_iocb->sys_define = 0;
3026         els_iocb->entry_status = 0;
3027         els_iocb->handle = sp->handle;
3028         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3029         els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3030         els_iocb->vp_index = sp->vha->vp_idx;
3031         els_iocb->sof_type = EST_SOFI3;
3032         els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3033
3034         els_iocb->opcode =
3035             sp->type == SRB_ELS_CMD_RPT ?
3036             bsg_request->rqst_data.r_els.els_code :
3037             bsg_request->rqst_data.h_els.command_code;
3038         els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3039         els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3040         els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3041         els_iocb->control_flags = 0;
3042         els_iocb->rx_byte_count =
3043             cpu_to_le32(bsg_job->reply_payload.payload_len);
3044         els_iocb->tx_byte_count =
3045             cpu_to_le32(bsg_job->request_payload.payload_len);
3046
3047         put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3048                            &els_iocb->tx_address);
3049         els_iocb->tx_len = cpu_to_le32(sg_dma_len
3050             (bsg_job->request_payload.sg_list));
3051
3052         put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3053                            &els_iocb->rx_address);
3054         els_iocb->rx_len = cpu_to_le32(sg_dma_len
3055             (bsg_job->reply_payload.sg_list));
3056
3057         sp->vha->qla_stats.control_requests++;
3058 }
3059
3060 static void
3061 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3062 {
3063         uint16_t        avail_dsds;
3064         struct dsd64    *cur_dsd;
3065         struct scatterlist *sg;
3066         int index;
3067         uint16_t tot_dsds;
3068         scsi_qla_host_t *vha = sp->vha;
3069         struct qla_hw_data *ha = vha->hw;
3070         struct bsg_job *bsg_job = sp->u.bsg_job;
3071         int entry_count = 1;
3072
3073         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3074         ct_iocb->entry_type = CT_IOCB_TYPE;
3075         ct_iocb->entry_status = 0;
3076         ct_iocb->handle1 = sp->handle;
3077         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3078         ct_iocb->status = cpu_to_le16(0);
3079         ct_iocb->control_flags = cpu_to_le16(0);
3080         ct_iocb->timeout = 0;
3081         ct_iocb->cmd_dsd_count =
3082             cpu_to_le16(bsg_job->request_payload.sg_cnt);
3083         ct_iocb->total_dsd_count =
3084             cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3085         ct_iocb->req_bytecount =
3086             cpu_to_le32(bsg_job->request_payload.payload_len);
3087         ct_iocb->rsp_bytecount =
3088             cpu_to_le32(bsg_job->reply_payload.payload_len);
3089
3090         put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3091                            &ct_iocb->req_dsd.address);
3092         ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3093
3094         put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3095                            &ct_iocb->rsp_dsd.address);
3096         ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3097
3098         avail_dsds = 1;
3099         cur_dsd = &ct_iocb->rsp_dsd;
3100         index = 0;
3101         tot_dsds = bsg_job->reply_payload.sg_cnt;
3102
3103         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3104                 cont_a64_entry_t *cont_pkt;
3105
3106                 /* Allocate additional continuation packets? */
3107                 if (avail_dsds == 0) {
3108                         /*
3109                         * Five DSDs are available in the Cont.
3110                         * Type 1 IOCB.
3111                                */
3112                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3113                             vha->hw->req_q_map[0]);
3114                         cur_dsd = cont_pkt->dsd;
3115                         avail_dsds = 5;
3116                         entry_count++;
3117                 }
3118
3119                 append_dsd64(&cur_dsd, sg);
3120                 avail_dsds--;
3121         }
3122         ct_iocb->entry_count = entry_count;
3123
3124         sp->vha->qla_stats.control_requests++;
3125 }
3126
3127 static void
3128 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3129 {
3130         uint16_t        avail_dsds;
3131         struct dsd64    *cur_dsd;
3132         struct scatterlist *sg;
3133         int index;
3134         uint16_t cmd_dsds, rsp_dsds;
3135         scsi_qla_host_t *vha = sp->vha;
3136         struct qla_hw_data *ha = vha->hw;
3137         struct bsg_job *bsg_job = sp->u.bsg_job;
3138         int entry_count = 1;
3139         cont_a64_entry_t *cont_pkt = NULL;
3140
3141         ct_iocb->entry_type = CT_IOCB_TYPE;
3142         ct_iocb->entry_status = 0;
3143         ct_iocb->sys_define = 0;
3144         ct_iocb->handle = sp->handle;
3145
3146         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3147         ct_iocb->vp_index = sp->vha->vp_idx;
3148         ct_iocb->comp_status = cpu_to_le16(0);
3149
3150         cmd_dsds = bsg_job->request_payload.sg_cnt;
3151         rsp_dsds = bsg_job->reply_payload.sg_cnt;
3152
3153         ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3154         ct_iocb->timeout = 0;
3155         ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3156         ct_iocb->cmd_byte_count =
3157             cpu_to_le32(bsg_job->request_payload.payload_len);
3158
3159         avail_dsds = 2;
3160         cur_dsd = ct_iocb->dsd;
3161         index = 0;
3162
3163         for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3164                 /* Allocate additional continuation packets? */
3165                 if (avail_dsds == 0) {
3166                         /*
3167                          * Five DSDs are available in the Cont.
3168                          * Type 1 IOCB.
3169                          */
3170                         cont_pkt = qla2x00_prep_cont_type1_iocb(
3171                             vha, ha->req_q_map[0]);
3172                         cur_dsd = cont_pkt->dsd;
3173                         avail_dsds = 5;
3174                         entry_count++;
3175                 }
3176
3177                 append_dsd64(&cur_dsd, sg);
3178                 avail_dsds--;
3179         }
3180
3181         index = 0;
3182
3183         for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3184                 /* Allocate additional continuation packets? */
3185                 if (avail_dsds == 0) {
3186                         /*
3187                         * Five DSDs are available in the Cont.
3188                         * Type 1 IOCB.
3189                                */
3190                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3191                             ha->req_q_map[0]);
3192                         cur_dsd = cont_pkt->dsd;
3193                         avail_dsds = 5;
3194                         entry_count++;
3195                 }
3196
3197                 append_dsd64(&cur_dsd, sg);
3198                 avail_dsds--;
3199         }
3200         ct_iocb->entry_count = entry_count;
3201 }
3202
3203 /*
3204  * qla82xx_start_scsi() - Send a SCSI command to the ISP
3205  * @sp: command to send to the ISP
3206  *
3207  * Returns non-zero if a failure occurred, else zero.
3208  */
3209 int
3210 qla82xx_start_scsi(srb_t *sp)
3211 {
3212         int             nseg;
3213         unsigned long   flags;
3214         struct scsi_cmnd *cmd;
3215         uint32_t        *clr_ptr;
3216         uint32_t        handle;
3217         uint16_t        cnt;
3218         uint16_t        req_cnt;
3219         uint16_t        tot_dsds;
3220         struct device_reg_82xx __iomem *reg;
3221         uint32_t dbval;
3222         __be32 *fcp_dl;
3223         uint8_t additional_cdb_len;
3224         struct ct6_dsd *ctx;
3225         struct scsi_qla_host *vha = sp->vha;
3226         struct qla_hw_data *ha = vha->hw;
3227         struct req_que *req = NULL;
3228         struct rsp_que *rsp = NULL;
3229
3230         /* Setup device pointers. */
3231         reg = &ha->iobase->isp82;
3232         cmd = GET_CMD_SP(sp);
3233         req = vha->req;
3234         rsp = ha->rsp_q_map[0];
3235
3236         /* So we know we haven't pci_map'ed anything yet */
3237         tot_dsds = 0;
3238
3239         dbval = 0x04 | (ha->portnum << 5);
3240
3241         /* Send marker if required */
3242         if (vha->marker_needed != 0) {
3243                 if (qla2x00_marker(vha, ha->base_qpair,
3244                         0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3245                         ql_log(ql_log_warn, vha, 0x300c,
3246                             "qla2x00_marker failed for cmd=%p.\n", cmd);
3247                         return QLA_FUNCTION_FAILED;
3248                 }
3249                 vha->marker_needed = 0;
3250         }
3251
3252         /* Acquire ring specific lock */
3253         spin_lock_irqsave(&ha->hardware_lock, flags);
3254
3255         handle = qla2xxx_get_next_handle(req);
3256         if (handle == 0)
3257                 goto queuing_error;
3258
3259         /* Map the sg table so we have an accurate count of sg entries needed */
3260         if (scsi_sg_count(cmd)) {
3261                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3262                     scsi_sg_count(cmd), cmd->sc_data_direction);
3263                 if (unlikely(!nseg))
3264                         goto queuing_error;
3265         } else
3266                 nseg = 0;
3267
3268         tot_dsds = nseg;
3269
3270         if (tot_dsds > ql2xshiftctondsd) {
3271                 struct cmd_type_6 *cmd_pkt;
3272                 uint16_t more_dsd_lists = 0;
3273                 struct dsd_dma *dsd_ptr;
3274                 uint16_t i;
3275
3276                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3277                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3278                         ql_dbg(ql_dbg_io, vha, 0x300d,
3279                             "Num of DSD list %d is than %d for cmd=%p.\n",
3280                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3281                             cmd);
3282                         goto queuing_error;
3283                 }
3284
3285                 if (more_dsd_lists <= ha->gbl_dsd_avail)
3286                         goto sufficient_dsds;
3287                 else
3288                         more_dsd_lists -= ha->gbl_dsd_avail;
3289
3290                 for (i = 0; i < more_dsd_lists; i++) {
3291                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3292                         if (!dsd_ptr) {
3293                                 ql_log(ql_log_fatal, vha, 0x300e,
3294                                     "Failed to allocate memory for dsd_dma "
3295                                     "for cmd=%p.\n", cmd);
3296                                 goto queuing_error;
3297                         }
3298
3299                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3300                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3301                         if (!dsd_ptr->dsd_addr) {
3302                                 kfree(dsd_ptr);
3303                                 ql_log(ql_log_fatal, vha, 0x300f,
3304                                     "Failed to allocate memory for dsd_addr "
3305                                     "for cmd=%p.\n", cmd);
3306                                 goto queuing_error;
3307                         }
3308                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3309                         ha->gbl_dsd_avail++;
3310                 }
3311
3312 sufficient_dsds:
3313                 req_cnt = 1;
3314
3315                 if (req->cnt < (req_cnt + 2)) {
3316                         cnt = (uint16_t)rd_reg_dword_relaxed(
3317                                 &reg->req_q_out[0]);
3318                         if (req->ring_index < cnt)
3319                                 req->cnt = cnt - req->ring_index;
3320                         else
3321                                 req->cnt = req->length -
3322                                         (req->ring_index - cnt);
3323                         if (req->cnt < (req_cnt + 2))
3324                                 goto queuing_error;
3325                 }
3326
3327                 ctx = sp->u.scmd.ct6_ctx =
3328                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3329                 if (!ctx) {
3330                         ql_log(ql_log_fatal, vha, 0x3010,
3331                             "Failed to allocate ctx for cmd=%p.\n", cmd);
3332                         goto queuing_error;
3333                 }
3334
3335                 memset(ctx, 0, sizeof(struct ct6_dsd));
3336                 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3337                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3338                 if (!ctx->fcp_cmnd) {
3339                         ql_log(ql_log_fatal, vha, 0x3011,
3340                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3341                         goto queuing_error;
3342                 }
3343
3344                 /* Initialize the DSD list and dma handle */
3345                 INIT_LIST_HEAD(&ctx->dsd_list);
3346                 ctx->dsd_use_cnt = 0;
3347
3348                 if (cmd->cmd_len > 16) {
3349                         additional_cdb_len = cmd->cmd_len - 16;
3350                         if ((cmd->cmd_len % 4) != 0) {
3351                                 /* SCSI command bigger than 16 bytes must be
3352                                  * multiple of 4
3353                                  */
3354                                 ql_log(ql_log_warn, vha, 0x3012,
3355                                     "scsi cmd len %d not multiple of 4 "
3356                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
3357                                 goto queuing_error_fcp_cmnd;
3358                         }
3359                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3360                 } else {
3361                         additional_cdb_len = 0;
3362                         ctx->fcp_cmnd_len = 12 + 16 + 4;
3363                 }
3364
3365                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3366                 cmd_pkt->handle = make_handle(req->id, handle);
3367
3368                 /* Zero out remaining portion of packet. */
3369                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3370                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3371                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3372                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3373
3374                 /* Set NPORT-ID and LUN number*/
3375                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3376                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3377                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3378                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3379                 cmd_pkt->vp_index = sp->vha->vp_idx;
3380
3381                 /* Build IOCB segments */
3382                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3383                         goto queuing_error_fcp_cmnd;
3384
3385                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3386                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3387
3388                 /* build FCP_CMND IU */
3389                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3390                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3391
3392                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3393                         ctx->fcp_cmnd->additional_cdb_len |= 1;
3394                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3395                         ctx->fcp_cmnd->additional_cdb_len |= 2;
3396
3397                 /* Populate the FCP_PRIO. */
3398                 if (ha->flags.fcp_prio_enabled)
3399                         ctx->fcp_cmnd->task_attribute |=
3400                             sp->fcport->fcp_prio << 3;
3401
3402                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3403
3404                 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3405                     additional_cdb_len);
3406                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3407
3408                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3409                 put_unaligned_le64(ctx->fcp_cmnd_dma,
3410                                    &cmd_pkt->fcp_cmnd_dseg_address);
3411
3412                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3413                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3414                 /* Set total data segment count. */
3415                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3416                 /* Specify response queue number where
3417                  * completion should happen
3418                  */
3419                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3420         } else {
3421                 struct cmd_type_7 *cmd_pkt;
3422
3423                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3424                 if (req->cnt < (req_cnt + 2)) {
3425                         cnt = (uint16_t)rd_reg_dword_relaxed(
3426                             &reg->req_q_out[0]);
3427                         if (req->ring_index < cnt)
3428                                 req->cnt = cnt - req->ring_index;
3429                         else
3430                                 req->cnt = req->length -
3431                                         (req->ring_index - cnt);
3432                 }
3433                 if (req->cnt < (req_cnt + 2))
3434                         goto queuing_error;
3435
3436                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3437                 cmd_pkt->handle = make_handle(req->id, handle);
3438
3439                 /* Zero out remaining portion of packet. */
3440                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3441                 clr_ptr = (uint32_t *)cmd_pkt + 2;
3442                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3443                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3444
3445                 /* Set NPORT-ID and LUN number*/
3446                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3447                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3448                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3449                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3450                 cmd_pkt->vp_index = sp->vha->vp_idx;
3451
3452                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3453                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3454                     sizeof(cmd_pkt->lun));
3455
3456                 /* Populate the FCP_PRIO. */
3457                 if (ha->flags.fcp_prio_enabled)
3458                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3459
3460                 /* Load SCSI command packet. */
3461                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3462                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3463
3464                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3465
3466                 /* Build IOCB segments */
3467                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3468
3469                 /* Set total data segment count. */
3470                 cmd_pkt->entry_count = (uint8_t)req_cnt;
3471                 /* Specify response queue number where
3472                  * completion should happen.
3473                  */
3474                 cmd_pkt->entry_status = (uint8_t) rsp->id;
3475
3476         }
3477         /* Build command packet. */
3478         req->current_outstanding_cmd = handle;
3479         req->outstanding_cmds[handle] = sp;
3480         sp->handle = handle;
3481         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3482         req->cnt -= req_cnt;
3483         wmb();
3484
3485         /* Adjust ring index. */
3486         req->ring_index++;
3487         if (req->ring_index == req->length) {
3488                 req->ring_index = 0;
3489                 req->ring_ptr = req->ring;
3490         } else
3491                 req->ring_ptr++;
3492
3493         sp->flags |= SRB_DMA_VALID;
3494
3495         /* Set chip new ring index. */
3496         /* write, read and verify logic */
3497         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3498         if (ql2xdbwr)
3499                 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3500         else {
3501                 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3502                 wmb();
3503                 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3504                         wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3505                         wmb();
3506                 }
3507         }
3508
3509         /* Manage unprocessed RIO/ZIO commands in response queue. */
3510         if (vha->flags.process_response_queue &&
3511             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3512                 qla24xx_process_response_queue(vha, rsp);
3513
3514         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3515         return QLA_SUCCESS;
3516
3517 queuing_error_fcp_cmnd:
3518         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3519 queuing_error:
3520         if (tot_dsds)
3521                 scsi_dma_unmap(cmd);
3522
3523         if (sp->u.scmd.crc_ctx) {
3524                 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3525                 sp->u.scmd.crc_ctx = NULL;
3526         }
3527         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3528
3529         return QLA_FUNCTION_FAILED;
3530 }
3531
3532 static void
3533 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3534 {
3535         struct srb_iocb *aio = &sp->u.iocb_cmd;
3536         scsi_qla_host_t *vha = sp->vha;
3537         struct req_que *req = sp->qpair->req;
3538
3539         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3540         abt_iocb->entry_type = ABORT_IOCB_TYPE;
3541         abt_iocb->entry_count = 1;
3542         abt_iocb->handle = make_handle(req->id, sp->handle);
3543         if (sp->fcport) {
3544                 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3545                 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3546                 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3547                 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3548         }
3549         abt_iocb->handle_to_abort =
3550                 make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3551                             aio->u.abt.cmd_hndl);
3552         abt_iocb->vp_index = vha->vp_idx;
3553         abt_iocb->req_que_no = aio->u.abt.req_que_no;
3554         /* Send the command to the firmware */
3555         wmb();
3556 }
3557
3558 static void
3559 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3560 {
3561         int i, sz;
3562
3563         mbx->entry_type = MBX_IOCB_TYPE;
3564         mbx->handle = sp->handle;
3565         sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3566
3567         for (i = 0; i < sz; i++)
3568                 mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
3569 }
3570
3571 static void
3572 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3573 {
3574         sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3575         qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3576         ct_pkt->handle = sp->handle;
3577 }
3578
3579 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3580         struct nack_to_isp *nack)
3581 {
3582         struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3583
3584         nack->entry_type = NOTIFY_ACK_TYPE;
3585         nack->entry_count = 1;
3586         nack->ox_id = ntfy->ox_id;
3587
3588         nack->u.isp24.handle = sp->handle;
3589         nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3590         if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3591                 nack->u.isp24.flags = ntfy->u.isp24.flags &
3592                         cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3593         }
3594         nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3595         nack->u.isp24.status = ntfy->u.isp24.status;
3596         nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3597         nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3598         nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3599         nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3600         nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3601         nack->u.isp24.srr_flags = 0;
3602         nack->u.isp24.srr_reject_code = 0;
3603         nack->u.isp24.srr_reject_code_expl = 0;
3604         nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3605 }
3606
3607 /*
3608  * Build NVME LS request
3609  */
3610 static void
3611 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3612 {
3613         struct srb_iocb *nvme;
3614
3615         nvme = &sp->u.iocb_cmd;
3616         cmd_pkt->entry_type = PT_LS4_REQUEST;
3617         cmd_pkt->entry_count = 1;
3618         cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
3619
3620         cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3621         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3622         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3623
3624         cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3625         cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3626         cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
3627         put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3628
3629         cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3630         cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3631         cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
3632         put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3633 }
3634
3635 static void
3636 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3637 {
3638         int map, pos;
3639
3640         vce->entry_type = VP_CTRL_IOCB_TYPE;
3641         vce->handle = sp->handle;
3642         vce->entry_count = 1;
3643         vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3644         vce->vp_count = cpu_to_le16(1);
3645
3646         /*
3647          * index map in firmware starts with 1; decrement index
3648          * this is ok as we never use index 0
3649          */
3650         map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3651         pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3652         vce->vp_idx_map[map] |= 1 << pos;
3653 }
3654
3655 static void
3656 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3657 {
3658         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3659         logio->control_flags =
3660             cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3661
3662         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3663         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3664         logio->port_id[1] = sp->fcport->d_id.b.area;
3665         logio->port_id[2] = sp->fcport->d_id.b.domain;
3666         logio->vp_index = sp->fcport->vha->vp_idx;
3667 }
3668
3669 int
3670 qla2x00_start_sp(srb_t *sp)
3671 {
3672         int rval = QLA_SUCCESS;
3673         scsi_qla_host_t *vha = sp->vha;
3674         struct qla_hw_data *ha = vha->hw;
3675         struct qla_qpair *qp = sp->qpair;
3676         void *pkt;
3677         unsigned long flags;
3678
3679         spin_lock_irqsave(qp->qp_lock_ptr, flags);
3680         pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3681         if (!pkt) {
3682                 rval = EAGAIN;
3683                 ql_log(ql_log_warn, vha, 0x700c,
3684                     "qla2x00_alloc_iocbs failed.\n");
3685                 goto done;
3686         }
3687
3688         switch (sp->type) {
3689         case SRB_LOGIN_CMD:
3690                 IS_FWI2_CAPABLE(ha) ?
3691                     qla24xx_login_iocb(sp, pkt) :
3692                     qla2x00_login_iocb(sp, pkt);
3693                 break;
3694         case SRB_PRLI_CMD:
3695                 qla24xx_prli_iocb(sp, pkt);
3696                 break;
3697         case SRB_LOGOUT_CMD:
3698                 IS_FWI2_CAPABLE(ha) ?
3699                     qla24xx_logout_iocb(sp, pkt) :
3700                     qla2x00_logout_iocb(sp, pkt);
3701                 break;
3702         case SRB_ELS_CMD_RPT:
3703         case SRB_ELS_CMD_HST:
3704                 qla24xx_els_iocb(sp, pkt);
3705                 break;
3706         case SRB_CT_CMD:
3707                 IS_FWI2_CAPABLE(ha) ?
3708                     qla24xx_ct_iocb(sp, pkt) :
3709                     qla2x00_ct_iocb(sp, pkt);
3710                 break;
3711         case SRB_ADISC_CMD:
3712                 IS_FWI2_CAPABLE(ha) ?
3713                     qla24xx_adisc_iocb(sp, pkt) :
3714                     qla2x00_adisc_iocb(sp, pkt);
3715                 break;
3716         case SRB_TM_CMD:
3717                 IS_QLAFX00(ha) ?
3718                     qlafx00_tm_iocb(sp, pkt) :
3719                     qla24xx_tm_iocb(sp, pkt);
3720                 break;
3721         case SRB_FXIOCB_DCMD:
3722         case SRB_FXIOCB_BCMD:
3723                 qlafx00_fxdisc_iocb(sp, pkt);
3724                 break;
3725         case SRB_NVME_LS:
3726                 qla_nvme_ls(sp, pkt);
3727                 break;
3728         case SRB_ABT_CMD:
3729                 IS_QLAFX00(ha) ?
3730                         qlafx00_abort_iocb(sp, pkt) :
3731                         qla24xx_abort_iocb(sp, pkt);
3732                 break;
3733         case SRB_ELS_DCMD:
3734                 qla24xx_els_logo_iocb(sp, pkt);
3735                 break;
3736         case SRB_CT_PTHRU_CMD:
3737                 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3738                 break;
3739         case SRB_MB_IOCB:
3740                 qla2x00_mb_iocb(sp, pkt);
3741                 break;
3742         case SRB_NACK_PLOGI:
3743         case SRB_NACK_PRLI:
3744         case SRB_NACK_LOGO:
3745                 qla2x00_send_notify_ack_iocb(sp, pkt);
3746                 break;
3747         case SRB_CTRL_VP:
3748                 qla25xx_ctrlvp_iocb(sp, pkt);
3749                 break;
3750         case SRB_PRLO_CMD:
3751                 qla24xx_prlo_iocb(sp, pkt);
3752                 break;
3753         default:
3754                 break;
3755         }
3756
3757         if (sp->start_timer)
3758                 add_timer(&sp->u.iocb_cmd.timer);
3759
3760         wmb();
3761         qla2x00_start_iocbs(vha, qp->req);
3762 done:
3763         spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3764         return rval;
3765 }
3766
3767 static void
3768 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3769                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3770 {
3771         uint16_t avail_dsds;
3772         struct dsd64 *cur_dsd;
3773         uint32_t req_data_len = 0;
3774         uint32_t rsp_data_len = 0;
3775         struct scatterlist *sg;
3776         int index;
3777         int entry_count = 1;
3778         struct bsg_job *bsg_job = sp->u.bsg_job;
3779
3780         /*Update entry type to indicate bidir command */
3781         put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
3782
3783         /* Set the transfer direction, in this set both flags
3784          * Also set the BD_WRAP_BACK flag, firmware will take care
3785          * assigning DID=SID for outgoing pkts.
3786          */
3787         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3788         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3789         cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3790                                                         BD_WRAP_BACK);
3791
3792         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3793         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3794         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3795         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3796
3797         vha->bidi_stats.transfer_bytes += req_data_len;
3798         vha->bidi_stats.io_count++;
3799
3800         vha->qla_stats.output_bytes += req_data_len;
3801         vha->qla_stats.output_requests++;
3802
3803         /* Only one dsd is available for bidirectional IOCB, remaining dsds
3804          * are bundled in continuation iocb
3805          */
3806         avail_dsds = 1;
3807         cur_dsd = &cmd_pkt->fcp_dsd;
3808
3809         index = 0;
3810
3811         for_each_sg(bsg_job->request_payload.sg_list, sg,
3812                                 bsg_job->request_payload.sg_cnt, index) {
3813                 cont_a64_entry_t *cont_pkt;
3814
3815                 /* Allocate additional continuation packets */
3816                 if (avail_dsds == 0) {
3817                         /* Continuation type 1 IOCB can accomodate
3818                          * 5 DSDS
3819                          */
3820                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3821                         cur_dsd = cont_pkt->dsd;
3822                         avail_dsds = 5;
3823                         entry_count++;
3824                 }
3825                 append_dsd64(&cur_dsd, sg);
3826                 avail_dsds--;
3827         }
3828         /* For read request DSD will always goes to continuation IOCB
3829          * and follow the write DSD. If there is room on the current IOCB
3830          * then it is added to that IOCB else new continuation IOCB is
3831          * allocated.
3832          */
3833         for_each_sg(bsg_job->reply_payload.sg_list, sg,
3834                                 bsg_job->reply_payload.sg_cnt, index) {
3835                 cont_a64_entry_t *cont_pkt;
3836
3837                 /* Allocate additional continuation packets */
3838                 if (avail_dsds == 0) {
3839                         /* Continuation type 1 IOCB can accomodate
3840                          * 5 DSDS
3841                          */
3842                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3843                         cur_dsd = cont_pkt->dsd;
3844                         avail_dsds = 5;
3845                         entry_count++;
3846                 }
3847                 append_dsd64(&cur_dsd, sg);
3848                 avail_dsds--;
3849         }
3850         /* This value should be same as number of IOCB required for this cmd */
3851         cmd_pkt->entry_count = entry_count;
3852 }
3853
3854 int
3855 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3856 {
3857
3858         struct qla_hw_data *ha = vha->hw;
3859         unsigned long flags;
3860         uint32_t handle;
3861         uint16_t req_cnt;
3862         uint16_t cnt;
3863         uint32_t *clr_ptr;
3864         struct cmd_bidir *cmd_pkt = NULL;
3865         struct rsp_que *rsp;
3866         struct req_que *req;
3867         int rval = EXT_STATUS_OK;
3868
3869         rval = QLA_SUCCESS;
3870
3871         rsp = ha->rsp_q_map[0];
3872         req = vha->req;
3873
3874         /* Send marker if required */
3875         if (vha->marker_needed != 0) {
3876                 if (qla2x00_marker(vha, ha->base_qpair,
3877                         0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3878                         return EXT_STATUS_MAILBOX;
3879                 vha->marker_needed = 0;
3880         }
3881
3882         /* Acquire ring specific lock */
3883         spin_lock_irqsave(&ha->hardware_lock, flags);
3884
3885         handle = qla2xxx_get_next_handle(req);
3886         if (handle == 0) {
3887                 rval = EXT_STATUS_BUSY;
3888                 goto queuing_error;
3889         }
3890
3891         /* Calculate number of IOCB required */
3892         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3893
3894         /* Check for room on request queue. */
3895         if (req->cnt < req_cnt + 2) {
3896                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3897                     rd_reg_dword_relaxed(req->req_q_out);
3898                 if  (req->ring_index < cnt)
3899                         req->cnt = cnt - req->ring_index;
3900                 else
3901                         req->cnt = req->length -
3902                                 (req->ring_index - cnt);
3903         }
3904         if (req->cnt < req_cnt + 2) {
3905                 rval = EXT_STATUS_BUSY;
3906                 goto queuing_error;
3907         }
3908
3909         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3910         cmd_pkt->handle = make_handle(req->id, handle);
3911
3912         /* Zero out remaining portion of packet. */
3913         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3914         clr_ptr = (uint32_t *)cmd_pkt + 2;
3915         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3916
3917         /* Set NPORT-ID  (of vha)*/
3918         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3919         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3920         cmd_pkt->port_id[1] = vha->d_id.b.area;
3921         cmd_pkt->port_id[2] = vha->d_id.b.domain;
3922
3923         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3924         cmd_pkt->entry_status = (uint8_t) rsp->id;
3925         /* Build command packet. */
3926         req->current_outstanding_cmd = handle;
3927         req->outstanding_cmds[handle] = sp;
3928         sp->handle = handle;
3929         req->cnt -= req_cnt;
3930
3931         /* Send the command to the firmware */
3932         wmb();
3933         qla2x00_start_iocbs(vha, req);
3934 queuing_error:
3935         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3936         return rval;
3937 }