Merge branch 'for-3.5' of git://linux-nfs.org/~bfields/linux
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18  * @cmd: SCSI command
19  *
20  * Returns the proper CF_* direction based on CDB.
21  */
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25         uint16_t cflags;
26         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27         struct scsi_qla_host *vha = sp->fcport->vha;
28
29         cflags = 0;
30
31         /* Set transfer direction */
32         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33                 cflags = CF_WRITE;
34                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38         }
39         return (cflags);
40 }
41
42 /**
43  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44  * Continuation Type 0 IOCBs to allocate.
45  *
46  * @dsds: number of data segment decriptors needed
47  *
48  * Returns the number of IOCB entries needed to store @dsds.
49  */
50 uint16_t
51 qla2x00_calc_iocbs_32(uint16_t dsds)
52 {
53         uint16_t iocbs;
54
55         iocbs = 1;
56         if (dsds > 3) {
57                 iocbs += (dsds - 3) / 7;
58                 if ((dsds - 3) % 7)
59                         iocbs++;
60         }
61         return (iocbs);
62 }
63
64 /**
65  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66  * Continuation Type 1 IOCBs to allocate.
67  *
68  * @dsds: number of data segment decriptors needed
69  *
70  * Returns the number of IOCB entries needed to store @dsds.
71  */
72 uint16_t
73 qla2x00_calc_iocbs_64(uint16_t dsds)
74 {
75         uint16_t iocbs;
76
77         iocbs = 1;
78         if (dsds > 2) {
79                 iocbs += (dsds - 2) / 5;
80                 if ((dsds - 2) % 5)
81                         iocbs++;
82         }
83         return (iocbs);
84 }
85
86 /**
87  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88  * @ha: HA context
89  *
90  * Returns a pointer to the Continuation Type 0 IOCB packet.
91  */
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94 {
95         cont_entry_t *cont_pkt;
96         struct req_que *req = vha->req;
97         /* Adjust ring index. */
98         req->ring_index++;
99         if (req->ring_index == req->length) {
100                 req->ring_index = 0;
101                 req->ring_ptr = req->ring;
102         } else {
103                 req->ring_ptr++;
104         }
105
106         cont_pkt = (cont_entry_t *)req->ring_ptr;
107
108         /* Load packet defaults. */
109         *((uint32_t *)(&cont_pkt->entry_type)) =
110             __constant_cpu_to_le32(CONTINUE_TYPE);
111
112         return (cont_pkt);
113 }
114
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @ha: HA context
118  *
119  * Returns a pointer to the continuation type 1 IOCB packet.
120  */
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124         cont_a64_entry_t *cont_pkt;
125
126         /* Adjust ring index. */
127         req->ring_index++;
128         if (req->ring_index == req->length) {
129                 req->ring_index = 0;
130                 req->ring_ptr = req->ring;
131         } else {
132                 req->ring_ptr++;
133         }
134
135         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137         /* Load packet defaults. */
138         *((uint32_t *)(&cont_pkt->entry_type)) =
139             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
140
141         return (cont_pkt);
142 }
143
144 static inline int
145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
146 {
147         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148         uint8_t guard = scsi_host_get_guard(cmd->device->host);
149
150         /* We only support T10 DIF right now */
151         if (guard != SHOST_DIX_GUARD_CRC) {
152                 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
153                     "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
154                 return 0;
155         }
156
157         /* We always use DIFF Bundling for best performance */
158         *fw_prot_opts = 0;
159
160         /* Translate SCSI opcode to a protection opcode */
161         switch (scsi_get_prot_op(cmd)) {
162         case SCSI_PROT_READ_STRIP:
163                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164                 break;
165         case SCSI_PROT_WRITE_INSERT:
166                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167                 break;
168         case SCSI_PROT_READ_INSERT:
169                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170                 break;
171         case SCSI_PROT_WRITE_STRIP:
172                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173                 break;
174         case SCSI_PROT_READ_PASS:
175                 *fw_prot_opts |= PO_MODE_DIF_PASS;
176                 break;
177         case SCSI_PROT_WRITE_PASS:
178                 *fw_prot_opts |= PO_MODE_DIF_PASS;
179                 break;
180         default:        /* Normal Request */
181                 *fw_prot_opts |= PO_MODE_DIF_PASS;
182                 break;
183         }
184
185         return scsi_prot_sg_count(cmd);
186 }
187
188 /*
189  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190  * capable IOCB types.
191  *
192  * @sp: SRB command to process
193  * @cmd_pkt: Command type 2 IOCB
194  * @tot_dsds: Total number of segments to transfer
195  */
196 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197     uint16_t tot_dsds)
198 {
199         uint16_t        avail_dsds;
200         uint32_t        *cur_dsd;
201         scsi_qla_host_t *vha;
202         struct scsi_cmnd *cmd;
203         struct scatterlist *sg;
204         int i;
205
206         cmd = GET_CMD_SP(sp);
207
208         /* Update entry type to indicate Command Type 2 IOCB */
209         *((uint32_t *)(&cmd_pkt->entry_type)) =
210             __constant_cpu_to_le32(COMMAND_TYPE);
211
212         /* No data transfer */
213         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
214                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215                 return;
216         }
217
218         vha = sp->fcport->vha;
219         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
220
221         /* Three DSDs are available in the Command Type 2 IOCB */
222         avail_dsds = 3;
223         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225         /* Load data segments */
226         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227                 cont_entry_t *cont_pkt;
228
229                 /* Allocate additional continuation packets? */
230                 if (avail_dsds == 0) {
231                         /*
232                          * Seven DSDs are available in the Continuation
233                          * Type 0 IOCB.
234                          */
235                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
236                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237                         avail_dsds = 7;
238                 }
239
240                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242                 avail_dsds--;
243         }
244 }
245
246 /**
247  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248  * capable IOCB types.
249  *
250  * @sp: SRB command to process
251  * @cmd_pkt: Command type 3 IOCB
252  * @tot_dsds: Total number of segments to transfer
253  */
254 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255     uint16_t tot_dsds)
256 {
257         uint16_t        avail_dsds;
258         uint32_t        *cur_dsd;
259         scsi_qla_host_t *vha;
260         struct scsi_cmnd *cmd;
261         struct scatterlist *sg;
262         int i;
263
264         cmd = GET_CMD_SP(sp);
265
266         /* Update entry type to indicate Command Type 3 IOCB */
267         *((uint32_t *)(&cmd_pkt->entry_type)) =
268             __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270         /* No data transfer */
271         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
272                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273                 return;
274         }
275
276         vha = sp->fcport->vha;
277         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
278
279         /* Two DSDs are available in the Command Type 3 IOCB */
280         avail_dsds = 2;
281         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283         /* Load data segments */
284         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285                 dma_addr_t      sle_dma;
286                 cont_a64_entry_t *cont_pkt;
287
288                 /* Allocate additional continuation packets? */
289                 if (avail_dsds == 0) {
290                         /*
291                          * Five DSDs are available in the Continuation
292                          * Type 1 IOCB.
293                          */
294                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
295                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296                         avail_dsds = 5;
297                 }
298
299                 sle_dma = sg_dma_address(sg);
300                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303                 avail_dsds--;
304         }
305 }
306
307 /**
308  * qla2x00_start_scsi() - Send a SCSI command to the ISP
309  * @sp: command to send to the ISP
310  *
311  * Returns non-zero if a failure occurred, else zero.
312  */
313 int
314 qla2x00_start_scsi(srb_t *sp)
315 {
316         int             ret, nseg;
317         unsigned long   flags;
318         scsi_qla_host_t *vha;
319         struct scsi_cmnd *cmd;
320         uint32_t        *clr_ptr;
321         uint32_t        index;
322         uint32_t        handle;
323         cmd_entry_t     *cmd_pkt;
324         uint16_t        cnt;
325         uint16_t        req_cnt;
326         uint16_t        tot_dsds;
327         struct device_reg_2xxx __iomem *reg;
328         struct qla_hw_data *ha;
329         struct req_que *req;
330         struct rsp_que *rsp;
331         char            tag[2];
332
333         /* Setup device pointers. */
334         ret = 0;
335         vha = sp->fcport->vha;
336         ha = vha->hw;
337         reg = &ha->iobase->isp;
338         cmd = GET_CMD_SP(sp);
339         req = ha->req_q_map[0];
340         rsp = ha->rsp_q_map[0];
341         /* So we know we haven't pci_map'ed anything yet */
342         tot_dsds = 0;
343
344         /* Send marker if required */
345         if (vha->marker_needed != 0) {
346                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347                     QLA_SUCCESS) {
348                         return (QLA_FUNCTION_FAILED);
349                 }
350                 vha->marker_needed = 0;
351         }
352
353         /* Acquire ring specific lock */
354         spin_lock_irqsave(&ha->hardware_lock, flags);
355
356         /* Check for room in outstanding command list. */
357         handle = req->current_outstanding_cmd;
358         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
359                 handle++;
360                 if (handle == MAX_OUTSTANDING_COMMANDS)
361                         handle = 1;
362                 if (!req->outstanding_cmds[handle])
363                         break;
364         }
365         if (index == MAX_OUTSTANDING_COMMANDS)
366                 goto queuing_error;
367
368         /* Map the sg table so we have an accurate count of sg entries needed */
369         if (scsi_sg_count(cmd)) {
370                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
371                     scsi_sg_count(cmd), cmd->sc_data_direction);
372                 if (unlikely(!nseg))
373                         goto queuing_error;
374         } else
375                 nseg = 0;
376
377         tot_dsds = nseg;
378
379         /* Calculate the number of request entries needed. */
380         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
381         if (req->cnt < (req_cnt + 2)) {
382                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
383                 if (req->ring_index < cnt)
384                         req->cnt = cnt - req->ring_index;
385                 else
386                         req->cnt = req->length -
387                             (req->ring_index - cnt);
388                 /* If still no head room then bail out */
389                 if (req->cnt < (req_cnt + 2))
390                         goto queuing_error;
391         }
392
393         /* Build command packet */
394         req->current_outstanding_cmd = handle;
395         req->outstanding_cmds[handle] = sp;
396         sp->handle = handle;
397         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
398         req->cnt -= req_cnt;
399
400         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
401         cmd_pkt->handle = handle;
402         /* Zero out remaining portion of packet. */
403         clr_ptr = (uint32_t *)cmd_pkt + 2;
404         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
405         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
406
407         /* Set target ID and LUN number*/
408         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
409         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
410
411         /* Update tagged queuing modifier */
412         if (scsi_populate_tag_msg(cmd, tag)) {
413                 switch (tag[0]) {
414                 case HEAD_OF_QUEUE_TAG:
415                         cmd_pkt->control_flags =
416                             __constant_cpu_to_le16(CF_HEAD_TAG);
417                         break;
418                 case ORDERED_QUEUE_TAG:
419                         cmd_pkt->control_flags =
420                             __constant_cpu_to_le16(CF_ORDERED_TAG);
421                         break;
422                 default:
423                         cmd_pkt->control_flags =
424                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
425                         break;
426                 }
427         }
428
429         /* Load SCSI command packet. */
430         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
431         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
432
433         /* Build IOCB segments */
434         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
435
436         /* Set total data segment count. */
437         cmd_pkt->entry_count = (uint8_t)req_cnt;
438         wmb();
439
440         /* Adjust ring index. */
441         req->ring_index++;
442         if (req->ring_index == req->length) {
443                 req->ring_index = 0;
444                 req->ring_ptr = req->ring;
445         } else
446                 req->ring_ptr++;
447
448         sp->flags |= SRB_DMA_VALID;
449
450         /* Set chip new ring index. */
451         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
452         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
453
454         /* Manage unprocessed RIO/ZIO commands in response queue. */
455         if (vha->flags.process_response_queue &&
456             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
457                 qla2x00_process_response_queue(rsp);
458
459         spin_unlock_irqrestore(&ha->hardware_lock, flags);
460         return (QLA_SUCCESS);
461
462 queuing_error:
463         if (tot_dsds)
464                 scsi_dma_unmap(cmd);
465
466         spin_unlock_irqrestore(&ha->hardware_lock, flags);
467
468         return (QLA_FUNCTION_FAILED);
469 }
470
471 /**
472  * qla2x00_start_iocbs() - Execute the IOCB command
473  */
474 void
475 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
476 {
477         struct qla_hw_data *ha = vha->hw;
478         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
479
480         if (IS_QLA82XX(ha)) {
481                 qla82xx_start_iocbs(vha);
482         } else {
483                 /* Adjust ring index. */
484                 req->ring_index++;
485                 if (req->ring_index == req->length) {
486                         req->ring_index = 0;
487                         req->ring_ptr = req->ring;
488                 } else
489                         req->ring_ptr++;
490
491                 /* Set chip new ring index. */
492                 if (ha->mqenable || IS_QLA83XX(ha)) {
493                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
494                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
495                 } else if (IS_FWI2_CAPABLE(ha)) {
496                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
497                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
498                 } else {
499                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
500                                 req->ring_index);
501                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
502                 }
503         }
504 }
505
506 /**
507  * qla2x00_marker() - Send a marker IOCB to the firmware.
508  * @ha: HA context
509  * @loop_id: loop ID
510  * @lun: LUN
511  * @type: marker modifier
512  *
513  * Can be called from both normal and interrupt context.
514  *
515  * Returns non-zero if a failure occurred, else zero.
516  */
517 static int
518 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
519                         struct rsp_que *rsp, uint16_t loop_id,
520                         uint16_t lun, uint8_t type)
521 {
522         mrk_entry_t *mrk;
523         struct mrk_entry_24xx *mrk24;
524         struct qla_hw_data *ha = vha->hw;
525         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
526
527         mrk24 = NULL;
528         req = ha->req_q_map[0];
529         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
530         if (mrk == NULL) {
531                 ql_log(ql_log_warn, base_vha, 0x3026,
532                     "Failed to allocate Marker IOCB.\n");
533
534                 return (QLA_FUNCTION_FAILED);
535         }
536
537         mrk->entry_type = MARKER_TYPE;
538         mrk->modifier = type;
539         if (type != MK_SYNC_ALL) {
540                 if (IS_FWI2_CAPABLE(ha)) {
541                         mrk24 = (struct mrk_entry_24xx *) mrk;
542                         mrk24->nport_handle = cpu_to_le16(loop_id);
543                         mrk24->lun[1] = LSB(lun);
544                         mrk24->lun[2] = MSB(lun);
545                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
546                         mrk24->vp_index = vha->vp_idx;
547                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
548                 } else {
549                         SET_TARGET_ID(ha, mrk->target, loop_id);
550                         mrk->lun = cpu_to_le16(lun);
551                 }
552         }
553         wmb();
554
555         qla2x00_start_iocbs(vha, req);
556
557         return (QLA_SUCCESS);
558 }
559
560 int
561 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
562                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
563                 uint8_t type)
564 {
565         int ret;
566         unsigned long flags = 0;
567
568         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
569         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
570         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
571
572         return (ret);
573 }
574
575 /*
576  * qla2x00_issue_marker
577  *
578  * Issue marker
579  * Caller CAN have hardware lock held as specified by ha_locked parameter.
580  * Might release it, then reaquire.
581  */
582 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
583 {
584         if (ha_locked) {
585                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
586                                         MK_SYNC_ALL) != QLA_SUCCESS)
587                         return QLA_FUNCTION_FAILED;
588         } else {
589                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
590                                         MK_SYNC_ALL) != QLA_SUCCESS)
591                         return QLA_FUNCTION_FAILED;
592         }
593         vha->marker_needed = 0;
594
595         return QLA_SUCCESS;
596 }
597
598 /**
599  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
600  * Continuation Type 1 IOCBs to allocate.
601  *
602  * @dsds: number of data segment decriptors needed
603  *
604  * Returns the number of IOCB entries needed to store @dsds.
605  */
606 inline uint16_t
607 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
608 {
609         uint16_t iocbs;
610
611         iocbs = 1;
612         if (dsds > 1) {
613                 iocbs += (dsds - 1) / 5;
614                 if ((dsds - 1) % 5)
615                         iocbs++;
616         }
617         return iocbs;
618 }
619
620 static inline int
621 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
622         uint16_t tot_dsds)
623 {
624         uint32_t *cur_dsd = NULL;
625         scsi_qla_host_t *vha;
626         struct qla_hw_data *ha;
627         struct scsi_cmnd *cmd;
628         struct  scatterlist *cur_seg;
629         uint32_t *dsd_seg;
630         void *next_dsd;
631         uint8_t avail_dsds;
632         uint8_t first_iocb = 1;
633         uint32_t dsd_list_len;
634         struct dsd_dma *dsd_ptr;
635         struct ct6_dsd *ctx;
636
637         cmd = GET_CMD_SP(sp);
638
639         /* Update entry type to indicate Command Type 3 IOCB */
640         *((uint32_t *)(&cmd_pkt->entry_type)) =
641                 __constant_cpu_to_le32(COMMAND_TYPE_6);
642
643         /* No data transfer */
644         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
645                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
646                 return 0;
647         }
648
649         vha = sp->fcport->vha;
650         ha = vha->hw;
651
652         /* Set transfer direction */
653         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
654                 cmd_pkt->control_flags =
655                     __constant_cpu_to_le16(CF_WRITE_DATA);
656                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
657         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
658                 cmd_pkt->control_flags =
659                     __constant_cpu_to_le16(CF_READ_DATA);
660                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
661         }
662
663         cur_seg = scsi_sglist(cmd);
664         ctx = GET_CMD_CTX_SP(sp);
665
666         while (tot_dsds) {
667                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
668                     QLA_DSDS_PER_IOCB : tot_dsds;
669                 tot_dsds -= avail_dsds;
670                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
671
672                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
673                     struct dsd_dma, list);
674                 next_dsd = dsd_ptr->dsd_addr;
675                 list_del(&dsd_ptr->list);
676                 ha->gbl_dsd_avail--;
677                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
678                 ctx->dsd_use_cnt++;
679                 ha->gbl_dsd_inuse++;
680
681                 if (first_iocb) {
682                         first_iocb = 0;
683                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
684                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
685                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
686                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
687                 } else {
688                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
689                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
690                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
691                 }
692                 cur_dsd = (uint32_t *)next_dsd;
693                 while (avail_dsds) {
694                         dma_addr_t      sle_dma;
695
696                         sle_dma = sg_dma_address(cur_seg);
697                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
698                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
699                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
700                         cur_seg = sg_next(cur_seg);
701                         avail_dsds--;
702                 }
703         }
704
705         /* Null termination */
706         *cur_dsd++ =  0;
707         *cur_dsd++ = 0;
708         *cur_dsd++ = 0;
709         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
710         return 0;
711 }
712
713 /*
714  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
715  * for Command Type 6.
716  *
717  * @dsds: number of data segment decriptors needed
718  *
719  * Returns the number of dsd list needed to store @dsds.
720  */
721 inline uint16_t
722 qla24xx_calc_dsd_lists(uint16_t dsds)
723 {
724         uint16_t dsd_lists = 0;
725
726         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
727         if (dsds % QLA_DSDS_PER_IOCB)
728                 dsd_lists++;
729         return dsd_lists;
730 }
731
732
733 /**
734  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
735  * IOCB types.
736  *
737  * @sp: SRB command to process
738  * @cmd_pkt: Command type 3 IOCB
739  * @tot_dsds: Total number of segments to transfer
740  */
741 inline void
742 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
743     uint16_t tot_dsds)
744 {
745         uint16_t        avail_dsds;
746         uint32_t        *cur_dsd;
747         scsi_qla_host_t *vha;
748         struct scsi_cmnd *cmd;
749         struct scatterlist *sg;
750         int i;
751         struct req_que *req;
752
753         cmd = GET_CMD_SP(sp);
754
755         /* Update entry type to indicate Command Type 3 IOCB */
756         *((uint32_t *)(&cmd_pkt->entry_type)) =
757             __constant_cpu_to_le32(COMMAND_TYPE_7);
758
759         /* No data transfer */
760         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
761                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
762                 return;
763         }
764
765         vha = sp->fcport->vha;
766         req = vha->req;
767
768         /* Set transfer direction */
769         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
770                 cmd_pkt->task_mgmt_flags =
771                     __constant_cpu_to_le16(TMF_WRITE_DATA);
772                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
773         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
774                 cmd_pkt->task_mgmt_flags =
775                     __constant_cpu_to_le16(TMF_READ_DATA);
776                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
777         }
778
779         /* One DSD is available in the Command Type 3 IOCB */
780         avail_dsds = 1;
781         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
782
783         /* Load data segments */
784
785         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
786                 dma_addr_t      sle_dma;
787                 cont_a64_entry_t *cont_pkt;
788
789                 /* Allocate additional continuation packets? */
790                 if (avail_dsds == 0) {
791                         /*
792                          * Five DSDs are available in the Continuation
793                          * Type 1 IOCB.
794                          */
795                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
796                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
797                         avail_dsds = 5;
798                 }
799
800                 sle_dma = sg_dma_address(sg);
801                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
802                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
803                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
804                 avail_dsds--;
805         }
806 }
807
808 struct fw_dif_context {
809         uint32_t ref_tag;
810         uint16_t app_tag;
811         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
812         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
813 };
814
815 /*
816  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
817  *
818  */
819 static inline void
820 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
821     unsigned int protcnt)
822 {
823         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
824         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
825
826         switch (scsi_get_prot_type(cmd)) {
827         case SCSI_PROT_DIF_TYPE0:
828                 /*
829                  * No check for ql2xenablehba_err_chk, as it would be an
830                  * I/O error if hba tag generation is not done.
831                  */
832                 pkt->ref_tag = cpu_to_le32((uint32_t)
833                     (0xffffffff & scsi_get_lba(cmd)));
834
835                 if (!qla2x00_hba_err_chk_enabled(sp))
836                         break;
837
838                 pkt->ref_tag_mask[0] = 0xff;
839                 pkt->ref_tag_mask[1] = 0xff;
840                 pkt->ref_tag_mask[2] = 0xff;
841                 pkt->ref_tag_mask[3] = 0xff;
842                 break;
843
844         /*
845          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
846          * match LBA in CDB + N
847          */
848         case SCSI_PROT_DIF_TYPE2:
849                 pkt->app_tag = __constant_cpu_to_le16(0);
850                 pkt->app_tag_mask[0] = 0x0;
851                 pkt->app_tag_mask[1] = 0x0;
852
853                 pkt->ref_tag = cpu_to_le32((uint32_t)
854                     (0xffffffff & scsi_get_lba(cmd)));
855
856                 if (!qla2x00_hba_err_chk_enabled(sp))
857                         break;
858
859                 /* enable ALL bytes of the ref tag */
860                 pkt->ref_tag_mask[0] = 0xff;
861                 pkt->ref_tag_mask[1] = 0xff;
862                 pkt->ref_tag_mask[2] = 0xff;
863                 pkt->ref_tag_mask[3] = 0xff;
864                 break;
865
866         /* For Type 3 protection: 16 bit GUARD only */
867         case SCSI_PROT_DIF_TYPE3:
868                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
869                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
870                                                                 0x00;
871                 break;
872
873         /*
874          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
875          * 16 bit app tag.
876          */
877         case SCSI_PROT_DIF_TYPE1:
878                 pkt->ref_tag = cpu_to_le32((uint32_t)
879                     (0xffffffff & scsi_get_lba(cmd)));
880                 pkt->app_tag = __constant_cpu_to_le16(0);
881                 pkt->app_tag_mask[0] = 0x0;
882                 pkt->app_tag_mask[1] = 0x0;
883
884                 if (!qla2x00_hba_err_chk_enabled(sp))
885                         break;
886
887                 /* enable ALL bytes of the ref tag */
888                 pkt->ref_tag_mask[0] = 0xff;
889                 pkt->ref_tag_mask[1] = 0xff;
890                 pkt->ref_tag_mask[2] = 0xff;
891                 pkt->ref_tag_mask[3] = 0xff;
892                 break;
893         }
894
895         ql_dbg(ql_dbg_io, vha, 0x3009,
896             "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
897             "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
898             pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
899             scsi_get_prot_type(cmd), cmd);
900 }
901
902 struct qla2_sgx {
903         dma_addr_t              dma_addr;       /* OUT */
904         uint32_t                dma_len;        /* OUT */
905
906         uint32_t                tot_bytes;      /* IN */
907         struct scatterlist      *cur_sg;        /* IN */
908
909         /* for book keeping, bzero on initial invocation */
910         uint32_t                bytes_consumed;
911         uint32_t                num_bytes;
912         uint32_t                tot_partial;
913
914         /* for debugging */
915         uint32_t                num_sg;
916         srb_t                   *sp;
917 };
918
919 static int
920 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
921         uint32_t *partial)
922 {
923         struct scatterlist *sg;
924         uint32_t cumulative_partial, sg_len;
925         dma_addr_t sg_dma_addr;
926
927         if (sgx->num_bytes == sgx->tot_bytes)
928                 return 0;
929
930         sg = sgx->cur_sg;
931         cumulative_partial = sgx->tot_partial;
932
933         sg_dma_addr = sg_dma_address(sg);
934         sg_len = sg_dma_len(sg);
935
936         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
937
938         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
939                 sgx->dma_len = (blk_sz - cumulative_partial);
940                 sgx->tot_partial = 0;
941                 sgx->num_bytes += blk_sz;
942                 *partial = 0;
943         } else {
944                 sgx->dma_len = sg_len - sgx->bytes_consumed;
945                 sgx->tot_partial += sgx->dma_len;
946                 *partial = 1;
947         }
948
949         sgx->bytes_consumed += sgx->dma_len;
950
951         if (sg_len == sgx->bytes_consumed) {
952                 sg = sg_next(sg);
953                 sgx->num_sg++;
954                 sgx->cur_sg = sg;
955                 sgx->bytes_consumed = 0;
956         }
957
958         return 1;
959 }
960
961 static int
962 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
963         uint32_t *dsd, uint16_t tot_dsds)
964 {
965         void *next_dsd;
966         uint8_t avail_dsds = 0;
967         uint32_t dsd_list_len;
968         struct dsd_dma *dsd_ptr;
969         struct scatterlist *sg_prot;
970         uint32_t *cur_dsd = dsd;
971         uint16_t        used_dsds = tot_dsds;
972
973         uint32_t        prot_int;
974         uint32_t        partial;
975         struct qla2_sgx sgx;
976         dma_addr_t      sle_dma;
977         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
978         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
979
980         prot_int = cmd->device->sector_size;
981
982         memset(&sgx, 0, sizeof(struct qla2_sgx));
983         sgx.tot_bytes = scsi_bufflen(cmd);
984         sgx.cur_sg = scsi_sglist(cmd);
985         sgx.sp = sp;
986
987         sg_prot = scsi_prot_sglist(cmd);
988
989         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
990
991                 sle_dma = sgx.dma_addr;
992                 sle_dma_len = sgx.dma_len;
993 alloc_and_fill:
994                 /* Allocate additional continuation packets? */
995                 if (avail_dsds == 0) {
996                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
997                                         QLA_DSDS_PER_IOCB : used_dsds;
998                         dsd_list_len = (avail_dsds + 1) * 12;
999                         used_dsds -= avail_dsds;
1000
1001                         /* allocate tracking DS */
1002                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1003                         if (!dsd_ptr)
1004                                 return 1;
1005
1006                         /* allocate new list */
1007                         dsd_ptr->dsd_addr = next_dsd =
1008                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1009                                 &dsd_ptr->dsd_list_dma);
1010
1011                         if (!next_dsd) {
1012                                 /*
1013                                  * Need to cleanup only this dsd_ptr, rest
1014                                  * will be done by sp_free_dma()
1015                                  */
1016                                 kfree(dsd_ptr);
1017                                 return 1;
1018                         }
1019
1020                         list_add_tail(&dsd_ptr->list,
1021                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1022
1023                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1024
1025                         /* add new list to cmd iocb or last list */
1026                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1027                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1028                         *cur_dsd++ = dsd_list_len;
1029                         cur_dsd = (uint32_t *)next_dsd;
1030                 }
1031                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1032                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1033                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1034                 avail_dsds--;
1035
1036                 if (partial == 0) {
1037                         /* Got a full protection interval */
1038                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1039                         sle_dma_len = 8;
1040
1041                         tot_prot_dma_len += sle_dma_len;
1042                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1043                                 tot_prot_dma_len = 0;
1044                                 sg_prot = sg_next(sg_prot);
1045                         }
1046
1047                         partial = 1; /* So as to not re-enter this block */
1048                         goto alloc_and_fill;
1049                 }
1050         }
1051         /* Null termination */
1052         *cur_dsd++ = 0;
1053         *cur_dsd++ = 0;
1054         *cur_dsd++ = 0;
1055         return 0;
1056 }
1057
1058 static int
1059 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1060         uint16_t tot_dsds)
1061 {
1062         void *next_dsd;
1063         uint8_t avail_dsds = 0;
1064         uint32_t dsd_list_len;
1065         struct dsd_dma *dsd_ptr;
1066         struct scatterlist *sg;
1067         uint32_t *cur_dsd = dsd;
1068         int     i;
1069         uint16_t        used_dsds = tot_dsds;
1070         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1071         scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1072
1073         uint8_t         *cp;
1074
1075         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1076                 dma_addr_t      sle_dma;
1077
1078                 /* Allocate additional continuation packets? */
1079                 if (avail_dsds == 0) {
1080                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1081                                         QLA_DSDS_PER_IOCB : used_dsds;
1082                         dsd_list_len = (avail_dsds + 1) * 12;
1083                         used_dsds -= avail_dsds;
1084
1085                         /* allocate tracking DS */
1086                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1087                         if (!dsd_ptr)
1088                                 return 1;
1089
1090                         /* allocate new list */
1091                         dsd_ptr->dsd_addr = next_dsd =
1092                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1093                                 &dsd_ptr->dsd_list_dma);
1094
1095                         if (!next_dsd) {
1096                                 /*
1097                                  * Need to cleanup only this dsd_ptr, rest
1098                                  * will be done by sp_free_dma()
1099                                  */
1100                                 kfree(dsd_ptr);
1101                                 return 1;
1102                         }
1103
1104                         list_add_tail(&dsd_ptr->list,
1105                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1106
1107                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1108
1109                         /* add new list to cmd iocb or last list */
1110                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1111                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1112                         *cur_dsd++ = dsd_list_len;
1113                         cur_dsd = (uint32_t *)next_dsd;
1114                 }
1115                 sle_dma = sg_dma_address(sg);
1116                 ql_dbg(ql_dbg_io, vha, 0x300a,
1117                     "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1118                     i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
1119                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1120                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1121                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1122                 avail_dsds--;
1123
1124                 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1125                         cp = page_address(sg_page(sg)) + sg->offset;
1126                         ql_dbg(ql_dbg_io, vha, 0x300b,
1127                             "User data buffer=%p for cmd=%p.\n", cp, cmd);
1128                 }
1129         }
1130         /* Null termination */
1131         *cur_dsd++ = 0;
1132         *cur_dsd++ = 0;
1133         *cur_dsd++ = 0;
1134         return 0;
1135 }
1136
1137 static int
1138 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1139                                                         uint32_t *dsd,
1140         uint16_t tot_dsds)
1141 {
1142         void *next_dsd;
1143         uint8_t avail_dsds = 0;
1144         uint32_t dsd_list_len;
1145         struct dsd_dma *dsd_ptr;
1146         struct scatterlist *sg;
1147         int     i;
1148         struct scsi_cmnd *cmd;
1149         uint32_t *cur_dsd = dsd;
1150         uint16_t        used_dsds = tot_dsds;
1151         scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1152         uint8_t         *cp;
1153
1154         cmd = GET_CMD_SP(sp);
1155         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1156                 dma_addr_t      sle_dma;
1157
1158                 /* Allocate additional continuation packets? */
1159                 if (avail_dsds == 0) {
1160                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1161                                                 QLA_DSDS_PER_IOCB : used_dsds;
1162                         dsd_list_len = (avail_dsds + 1) * 12;
1163                         used_dsds -= avail_dsds;
1164
1165                         /* allocate tracking DS */
1166                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1167                         if (!dsd_ptr)
1168                                 return 1;
1169
1170                         /* allocate new list */
1171                         dsd_ptr->dsd_addr = next_dsd =
1172                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1173                                 &dsd_ptr->dsd_list_dma);
1174
1175                         if (!next_dsd) {
1176                                 /*
1177                                  * Need to cleanup only this dsd_ptr, rest
1178                                  * will be done by sp_free_dma()
1179                                  */
1180                                 kfree(dsd_ptr);
1181                                 return 1;
1182                         }
1183
1184                         list_add_tail(&dsd_ptr->list,
1185                             &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1186
1187                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
1188
1189                         /* add new list to cmd iocb or last list */
1190                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1191                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1192                         *cur_dsd++ = dsd_list_len;
1193                         cur_dsd = (uint32_t *)next_dsd;
1194                 }
1195                 sle_dma = sg_dma_address(sg);
1196                 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1197                         ql_dbg(ql_dbg_io, vha, 0x3027,
1198                             "%s(): %p, sg_entry %d - "
1199                             "addr=0x%x0x%x, len=%d.\n",
1200                             __func__, cur_dsd, i,
1201                             LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
1202                 }
1203                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1204                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1205                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1206
1207                 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1208                         cp = page_address(sg_page(sg)) + sg->offset;
1209                         ql_dbg(ql_dbg_io, vha, 0x3028,
1210                             "%s(): Protection Data buffer = %p.\n", __func__,
1211                             cp);
1212                 }
1213                 avail_dsds--;
1214         }
1215         /* Null termination */
1216         *cur_dsd++ = 0;
1217         *cur_dsd++ = 0;
1218         *cur_dsd++ = 0;
1219         return 0;
1220 }
1221
1222 /**
1223  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1224  *                                                      Type 6 IOCB types.
1225  *
1226  * @sp: SRB command to process
1227  * @cmd_pkt: Command type 3 IOCB
1228  * @tot_dsds: Total number of segments to transfer
1229  */
1230 static inline int
1231 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1232     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1233 {
1234         uint32_t                *cur_dsd, *fcp_dl;
1235         scsi_qla_host_t         *vha;
1236         struct scsi_cmnd        *cmd;
1237         struct scatterlist      *cur_seg;
1238         int                     sgc;
1239         uint32_t                total_bytes = 0;
1240         uint32_t                data_bytes;
1241         uint32_t                dif_bytes;
1242         uint8_t                 bundling = 1;
1243         uint16_t                blk_size;
1244         uint8_t                 *clr_ptr;
1245         struct crc_context      *crc_ctx_pkt = NULL;
1246         struct qla_hw_data      *ha;
1247         uint8_t                 additional_fcpcdb_len;
1248         uint16_t                fcp_cmnd_len;
1249         struct fcp_cmnd         *fcp_cmnd;
1250         dma_addr_t              crc_ctx_dma;
1251         char                    tag[2];
1252
1253         cmd = GET_CMD_SP(sp);
1254
1255         sgc = 0;
1256         /* Update entry type to indicate Command Type CRC_2 IOCB */
1257         *((uint32_t *)(&cmd_pkt->entry_type)) =
1258             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1259
1260         vha = sp->fcport->vha;
1261         ha = vha->hw;
1262
1263         /* No data transfer */
1264         data_bytes = scsi_bufflen(cmd);
1265         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1266                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1267                 return QLA_SUCCESS;
1268         }
1269
1270         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1271
1272         /* Set transfer direction */
1273         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1274                 cmd_pkt->control_flags =
1275                     __constant_cpu_to_le16(CF_WRITE_DATA);
1276         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1277                 cmd_pkt->control_flags =
1278                     __constant_cpu_to_le16(CF_READ_DATA);
1279         }
1280
1281         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1282             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1283             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1284             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1285                 bundling = 0;
1286
1287         /* Allocate CRC context from global pool */
1288         crc_ctx_pkt = sp->u.scmd.ctx =
1289             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1290
1291         if (!crc_ctx_pkt)
1292                 goto crc_queuing_error;
1293
1294         /* Zero out CTX area. */
1295         clr_ptr = (uint8_t *)crc_ctx_pkt;
1296         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1297
1298         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1299
1300         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1301
1302         /* Set handle */
1303         crc_ctx_pkt->handle = cmd_pkt->handle;
1304
1305         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1306
1307         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1308             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1309
1310         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1311         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1312         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1313
1314         /* Determine SCSI command length -- align to 4 byte boundary */
1315         if (cmd->cmd_len > 16) {
1316                 additional_fcpcdb_len = cmd->cmd_len - 16;
1317                 if ((cmd->cmd_len % 4) != 0) {
1318                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1319                         goto crc_queuing_error;
1320                 }
1321                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1322         } else {
1323                 additional_fcpcdb_len = 0;
1324                 fcp_cmnd_len = 12 + 16 + 4;
1325         }
1326
1327         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1328
1329         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1330         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1331                 fcp_cmnd->additional_cdb_len |= 1;
1332         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1333                 fcp_cmnd->additional_cdb_len |= 2;
1334
1335         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1336         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1337         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1338         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1339             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1340         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1341             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1342         fcp_cmnd->task_management = 0;
1343
1344         /*
1345          * Update tagged queuing modifier if using command tag queuing
1346          */
1347         if (scsi_populate_tag_msg(cmd, tag)) {
1348                 switch (tag[0]) {
1349                 case HEAD_OF_QUEUE_TAG:
1350                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1351                     break;
1352                 case ORDERED_QUEUE_TAG:
1353                     fcp_cmnd->task_attribute = TSK_ORDERED;
1354                     break;
1355                 default:
1356                     fcp_cmnd->task_attribute = 0;
1357                     break;
1358                 }
1359         } else {
1360                 fcp_cmnd->task_attribute = 0;
1361         }
1362
1363         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1364
1365         /* Compute dif len and adjust data len to incude protection */
1366         dif_bytes = 0;
1367         blk_size = cmd->device->sector_size;
1368         dif_bytes = (data_bytes / blk_size) * 8;
1369
1370         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1371         case SCSI_PROT_READ_INSERT:
1372         case SCSI_PROT_WRITE_STRIP:
1373             total_bytes = data_bytes;
1374             data_bytes += dif_bytes;
1375             break;
1376
1377         case SCSI_PROT_READ_STRIP:
1378         case SCSI_PROT_WRITE_INSERT:
1379         case SCSI_PROT_READ_PASS:
1380         case SCSI_PROT_WRITE_PASS:
1381             total_bytes = data_bytes + dif_bytes;
1382             break;
1383         default:
1384             BUG();
1385         }
1386
1387         if (!qla2x00_hba_err_chk_enabled(sp))
1388                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1389
1390         if (!bundling) {
1391                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1392         } else {
1393                 /*
1394                  * Configure Bundling if we need to fetch interlaving
1395                  * protection PCI accesses
1396                  */
1397                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1398                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1399                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1400                                                         tot_prot_dsds);
1401                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1402         }
1403
1404         /* Finish the common fields of CRC pkt */
1405         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1406         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1407         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1408         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1409         /* Fibre channel byte count */
1410         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1411         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1412             additional_fcpcdb_len);
1413         *fcp_dl = htonl(total_bytes);
1414
1415         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1416                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1417                 return QLA_SUCCESS;
1418         }
1419         /* Walks data segments */
1420
1421         cmd_pkt->control_flags |=
1422             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1423
1424         if (!bundling && tot_prot_dsds) {
1425                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1426                     cur_dsd, tot_dsds))
1427                         goto crc_queuing_error;
1428         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1429             (tot_dsds - tot_prot_dsds)))
1430                 goto crc_queuing_error;
1431
1432         if (bundling && tot_prot_dsds) {
1433                 /* Walks dif segments */
1434                 cur_seg = scsi_prot_sglist(cmd);
1435                 cmd_pkt->control_flags |=
1436                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1437                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1438                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1439                     tot_prot_dsds))
1440                         goto crc_queuing_error;
1441         }
1442         return QLA_SUCCESS;
1443
1444 crc_queuing_error:
1445         /* Cleanup will be performed by the caller */
1446
1447         return QLA_FUNCTION_FAILED;
1448 }
1449
1450 /**
1451  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1452  * @sp: command to send to the ISP
1453  *
1454  * Returns non-zero if a failure occurred, else zero.
1455  */
1456 int
1457 qla24xx_start_scsi(srb_t *sp)
1458 {
1459         int             ret, nseg;
1460         unsigned long   flags;
1461         uint32_t        *clr_ptr;
1462         uint32_t        index;
1463         uint32_t        handle;
1464         struct cmd_type_7 *cmd_pkt;
1465         uint16_t        cnt;
1466         uint16_t        req_cnt;
1467         uint16_t        tot_dsds;
1468         struct req_que *req = NULL;
1469         struct rsp_que *rsp = NULL;
1470         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1471         struct scsi_qla_host *vha = sp->fcport->vha;
1472         struct qla_hw_data *ha = vha->hw;
1473         char            tag[2];
1474
1475         /* Setup device pointers. */
1476         ret = 0;
1477
1478         qla25xx_set_que(sp, &rsp);
1479         req = vha->req;
1480
1481         /* So we know we haven't pci_map'ed anything yet */
1482         tot_dsds = 0;
1483
1484         /* Send marker if required */
1485         if (vha->marker_needed != 0) {
1486                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1487                     QLA_SUCCESS)
1488                         return QLA_FUNCTION_FAILED;
1489                 vha->marker_needed = 0;
1490         }
1491
1492         /* Acquire ring specific lock */
1493         spin_lock_irqsave(&ha->hardware_lock, flags);
1494
1495         /* Check for room in outstanding command list. */
1496         handle = req->current_outstanding_cmd;
1497         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1498                 handle++;
1499                 if (handle == MAX_OUTSTANDING_COMMANDS)
1500                         handle = 1;
1501                 if (!req->outstanding_cmds[handle])
1502                         break;
1503         }
1504         if (index == MAX_OUTSTANDING_COMMANDS) {
1505                 goto queuing_error;
1506         }
1507
1508         /* Map the sg table so we have an accurate count of sg entries needed */
1509         if (scsi_sg_count(cmd)) {
1510                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1511                     scsi_sg_count(cmd), cmd->sc_data_direction);
1512                 if (unlikely(!nseg))
1513                         goto queuing_error;
1514         } else
1515                 nseg = 0;
1516
1517         tot_dsds = nseg;
1518         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1519         if (req->cnt < (req_cnt + 2)) {
1520                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1521
1522                 if (req->ring_index < cnt)
1523                         req->cnt = cnt - req->ring_index;
1524                 else
1525                         req->cnt = req->length -
1526                                 (req->ring_index - cnt);
1527                 if (req->cnt < (req_cnt + 2))
1528                         goto queuing_error;
1529         }
1530
1531         /* Build command packet. */
1532         req->current_outstanding_cmd = handle;
1533         req->outstanding_cmds[handle] = sp;
1534         sp->handle = handle;
1535         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1536         req->cnt -= req_cnt;
1537
1538         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1539         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1540
1541         /* Zero out remaining portion of packet. */
1542         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1543         clr_ptr = (uint32_t *)cmd_pkt + 2;
1544         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1545         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1546
1547         /* Set NPORT-ID and LUN number*/
1548         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1549         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1550         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1551         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1552         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1553
1554         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1555         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1556
1557         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1558         if (scsi_populate_tag_msg(cmd, tag)) {
1559                 switch (tag[0]) {
1560                 case HEAD_OF_QUEUE_TAG:
1561                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1562                         break;
1563                 case ORDERED_QUEUE_TAG:
1564                         cmd_pkt->task = TSK_ORDERED;
1565                         break;
1566                 }
1567         }
1568
1569         /* Load SCSI command packet. */
1570         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1571         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1572
1573         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1574
1575         /* Build IOCB segments */
1576         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1577
1578         /* Set total data segment count. */
1579         cmd_pkt->entry_count = (uint8_t)req_cnt;
1580         /* Specify response queue number where completion should happen */
1581         cmd_pkt->entry_status = (uint8_t) rsp->id;
1582         wmb();
1583         /* Adjust ring index. */
1584         req->ring_index++;
1585         if (req->ring_index == req->length) {
1586                 req->ring_index = 0;
1587                 req->ring_ptr = req->ring;
1588         } else
1589                 req->ring_ptr++;
1590
1591         sp->flags |= SRB_DMA_VALID;
1592
1593         /* Set chip new ring index. */
1594         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1595         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1596
1597         /* Manage unprocessed RIO/ZIO commands in response queue. */
1598         if (vha->flags.process_response_queue &&
1599                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1600                 qla24xx_process_response_queue(vha, rsp);
1601
1602         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1603         return QLA_SUCCESS;
1604
1605 queuing_error:
1606         if (tot_dsds)
1607                 scsi_dma_unmap(cmd);
1608
1609         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1610
1611         return QLA_FUNCTION_FAILED;
1612 }
1613
1614
1615 /**
1616  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1617  * @sp: command to send to the ISP
1618  *
1619  * Returns non-zero if a failure occurred, else zero.
1620  */
1621 int
1622 qla24xx_dif_start_scsi(srb_t *sp)
1623 {
1624         int                     nseg;
1625         unsigned long           flags;
1626         uint32_t                *clr_ptr;
1627         uint32_t                index;
1628         uint32_t                handle;
1629         uint16_t                cnt;
1630         uint16_t                req_cnt = 0;
1631         uint16_t                tot_dsds;
1632         uint16_t                tot_prot_dsds;
1633         uint16_t                fw_prot_opts = 0;
1634         struct req_que          *req = NULL;
1635         struct rsp_que          *rsp = NULL;
1636         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1637         struct scsi_qla_host    *vha = sp->fcport->vha;
1638         struct qla_hw_data      *ha = vha->hw;
1639         struct cmd_type_crc_2   *cmd_pkt;
1640         uint32_t                status = 0;
1641
1642 #define QDSS_GOT_Q_SPACE        BIT_0
1643
1644         /* Only process protection or >16 cdb in this routine */
1645         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1646                 if (cmd->cmd_len <= 16)
1647                         return qla24xx_start_scsi(sp);
1648         }
1649
1650         /* Setup device pointers. */
1651
1652         qla25xx_set_que(sp, &rsp);
1653         req = vha->req;
1654
1655         /* So we know we haven't pci_map'ed anything yet */
1656         tot_dsds = 0;
1657
1658         /* Send marker if required */
1659         if (vha->marker_needed != 0) {
1660                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1661                     QLA_SUCCESS)
1662                         return QLA_FUNCTION_FAILED;
1663                 vha->marker_needed = 0;
1664         }
1665
1666         /* Acquire ring specific lock */
1667         spin_lock_irqsave(&ha->hardware_lock, flags);
1668
1669         /* Check for room in outstanding command list. */
1670         handle = req->current_outstanding_cmd;
1671         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1672                 handle++;
1673                 if (handle == MAX_OUTSTANDING_COMMANDS)
1674                         handle = 1;
1675                 if (!req->outstanding_cmds[handle])
1676                         break;
1677         }
1678
1679         if (index == MAX_OUTSTANDING_COMMANDS)
1680                 goto queuing_error;
1681
1682         /* Compute number of required data segments */
1683         /* Map the sg table so we have an accurate count of sg entries needed */
1684         if (scsi_sg_count(cmd)) {
1685                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1686                     scsi_sg_count(cmd), cmd->sc_data_direction);
1687                 if (unlikely(!nseg))
1688                         goto queuing_error;
1689                 else
1690                         sp->flags |= SRB_DMA_VALID;
1691
1692                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1693                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1694                         struct qla2_sgx sgx;
1695                         uint32_t        partial;
1696
1697                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1698                         sgx.tot_bytes = scsi_bufflen(cmd);
1699                         sgx.cur_sg = scsi_sglist(cmd);
1700                         sgx.sp = sp;
1701
1702                         nseg = 0;
1703                         while (qla24xx_get_one_block_sg(
1704                             cmd->device->sector_size, &sgx, &partial))
1705                                 nseg++;
1706                 }
1707         } else
1708                 nseg = 0;
1709
1710         /* number of required data segments */
1711         tot_dsds = nseg;
1712
1713         /* Compute number of required protection segments */
1714         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1715                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1716                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1717                 if (unlikely(!nseg))
1718                         goto queuing_error;
1719                 else
1720                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1721
1722                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1723                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1724                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1725                 }
1726         } else {
1727                 nseg = 0;
1728         }
1729
1730         req_cnt = 1;
1731         /* Total Data and protection sg segment(s) */
1732         tot_prot_dsds = nseg;
1733         tot_dsds += nseg;
1734         if (req->cnt < (req_cnt + 2)) {
1735                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1736
1737                 if (req->ring_index < cnt)
1738                         req->cnt = cnt - req->ring_index;
1739                 else
1740                         req->cnt = req->length -
1741                                 (req->ring_index - cnt);
1742                 if (req->cnt < (req_cnt + 2))
1743                         goto queuing_error;
1744         }
1745
1746         status |= QDSS_GOT_Q_SPACE;
1747
1748         /* Build header part of command packet (excluding the OPCODE). */
1749         req->current_outstanding_cmd = handle;
1750         req->outstanding_cmds[handle] = sp;
1751         sp->handle = handle;
1752         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1753         req->cnt -= req_cnt;
1754
1755         /* Fill-in common area */
1756         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1757         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1758
1759         clr_ptr = (uint32_t *)cmd_pkt + 2;
1760         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1761
1762         /* Set NPORT-ID and LUN number*/
1763         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1764         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1765         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1766         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1767
1768         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1769         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1770
1771         /* Total Data and protection segment(s) */
1772         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1773
1774         /* Build IOCB segments and adjust for data protection segments */
1775         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1776             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1777                 QLA_SUCCESS)
1778                 goto queuing_error;
1779
1780         cmd_pkt->entry_count = (uint8_t)req_cnt;
1781         /* Specify response queue number where completion should happen */
1782         cmd_pkt->entry_status = (uint8_t) rsp->id;
1783         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1784         wmb();
1785
1786         /* Adjust ring index. */
1787         req->ring_index++;
1788         if (req->ring_index == req->length) {
1789                 req->ring_index = 0;
1790                 req->ring_ptr = req->ring;
1791         } else
1792                 req->ring_ptr++;
1793
1794         /* Set chip new ring index. */
1795         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1796         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1797
1798         /* Manage unprocessed RIO/ZIO commands in response queue. */
1799         if (vha->flags.process_response_queue &&
1800             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1801                 qla24xx_process_response_queue(vha, rsp);
1802
1803         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1804
1805         return QLA_SUCCESS;
1806
1807 queuing_error:
1808         if (status & QDSS_GOT_Q_SPACE) {
1809                 req->outstanding_cmds[handle] = NULL;
1810                 req->cnt += req_cnt;
1811         }
1812         /* Cleanup will be performed by the caller (queuecommand) */
1813
1814         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1815         return QLA_FUNCTION_FAILED;
1816 }
1817
1818
1819 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1820 {
1821         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1822         struct qla_hw_data *ha = sp->fcport->vha->hw;
1823         int affinity = cmd->request->cpu;
1824
1825         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1826                 affinity < ha->max_rsp_queues - 1)
1827                 *rsp = ha->rsp_q_map[affinity + 1];
1828          else
1829                 *rsp = ha->rsp_q_map[0];
1830 }
1831
1832 /* Generic Control-SRB manipulation functions. */
1833 void *
1834 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1835 {
1836         struct qla_hw_data *ha = vha->hw;
1837         struct req_que *req = ha->req_q_map[0];
1838         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1839         uint32_t index, handle;
1840         request_t *pkt;
1841         uint16_t cnt, req_cnt;
1842
1843         pkt = NULL;
1844         req_cnt = 1;
1845         handle = 0;
1846
1847         if (!sp)
1848                 goto skip_cmd_array;
1849
1850         /* Check for room in outstanding command list. */
1851         handle = req->current_outstanding_cmd;
1852         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1853                 handle++;
1854                 if (handle == MAX_OUTSTANDING_COMMANDS)
1855                         handle = 1;
1856                 if (!req->outstanding_cmds[handle])
1857                         break;
1858         }
1859         if (index == MAX_OUTSTANDING_COMMANDS) {
1860                 ql_log(ql_log_warn, vha, 0x700b,
1861                     "No room on oustanding cmd array.\n");
1862                 goto queuing_error;
1863         }
1864
1865         /* Prep command array. */
1866         req->current_outstanding_cmd = handle;
1867         req->outstanding_cmds[handle] = sp;
1868         sp->handle = handle;
1869
1870         /* Adjust entry-counts as needed. */
1871         if (sp->type != SRB_SCSI_CMD)
1872                 req_cnt = sp->iocbs;
1873
1874 skip_cmd_array:
1875         /* Check for room on request queue. */
1876         if (req->cnt < req_cnt) {
1877                 if (ha->mqenable || IS_QLA83XX(ha))
1878                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1879                 else if (IS_QLA82XX(ha))
1880                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1881                 else if (IS_FWI2_CAPABLE(ha))
1882                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1883                 else
1884                         cnt = qla2x00_debounce_register(
1885                             ISP_REQ_Q_OUT(ha, &reg->isp));
1886
1887                 if  (req->ring_index < cnt)
1888                         req->cnt = cnt - req->ring_index;
1889                 else
1890                         req->cnt = req->length -
1891                             (req->ring_index - cnt);
1892         }
1893         if (req->cnt < req_cnt)
1894                 goto queuing_error;
1895
1896         /* Prep packet */
1897         req->cnt -= req_cnt;
1898         pkt = req->ring_ptr;
1899         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1900         pkt->entry_count = req_cnt;
1901         pkt->handle = handle;
1902
1903 queuing_error:
1904         return pkt;
1905 }
1906
1907 static void
1908 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1909 {
1910         struct srb_iocb *lio = &sp->u.iocb_cmd;
1911
1912         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1913         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1914         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1915                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1916         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1917                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1918         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1919         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1920         logio->port_id[1] = sp->fcport->d_id.b.area;
1921         logio->port_id[2] = sp->fcport->d_id.b.domain;
1922         logio->vp_index = sp->fcport->vha->vp_idx;
1923 }
1924
1925 static void
1926 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1927 {
1928         struct qla_hw_data *ha = sp->fcport->vha->hw;
1929         struct srb_iocb *lio = &sp->u.iocb_cmd;
1930         uint16_t opts;
1931
1932         mbx->entry_type = MBX_IOCB_TYPE;
1933         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1934         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1935         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1936         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1937         if (HAS_EXTENDED_IDS(ha)) {
1938                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1939                 mbx->mb10 = cpu_to_le16(opts);
1940         } else {
1941                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1942         }
1943         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1944         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1945             sp->fcport->d_id.b.al_pa);
1946         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1947 }
1948
1949 static void
1950 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1951 {
1952         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1953         logio->control_flags =
1954             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1955         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1956         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1957         logio->port_id[1] = sp->fcport->d_id.b.area;
1958         logio->port_id[2] = sp->fcport->d_id.b.domain;
1959         logio->vp_index = sp->fcport->vha->vp_idx;
1960 }
1961
1962 static void
1963 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1964 {
1965         struct qla_hw_data *ha = sp->fcport->vha->hw;
1966
1967         mbx->entry_type = MBX_IOCB_TYPE;
1968         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1969         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1970         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1971             cpu_to_le16(sp->fcport->loop_id):
1972             cpu_to_le16(sp->fcport->loop_id << 8);
1973         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1974         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1975             sp->fcport->d_id.b.al_pa);
1976         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1977         /* Implicit: mbx->mbx10 = 0. */
1978 }
1979
1980 static void
1981 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1982 {
1983         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1984         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1985         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1986         logio->vp_index = sp->fcport->vha->vp_idx;
1987 }
1988
1989 static void
1990 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1991 {
1992         struct qla_hw_data *ha = sp->fcport->vha->hw;
1993
1994         mbx->entry_type = MBX_IOCB_TYPE;
1995         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1996         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1997         if (HAS_EXTENDED_IDS(ha)) {
1998                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1999                 mbx->mb10 = cpu_to_le16(BIT_0);
2000         } else {
2001                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2002         }
2003         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2004         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2005         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2006         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2007         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
2008 }
2009
2010 static void
2011 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2012 {
2013         uint32_t flags;
2014         unsigned int lun;
2015         struct fc_port *fcport = sp->fcport;
2016         scsi_qla_host_t *vha = fcport->vha;
2017         struct qla_hw_data *ha = vha->hw;
2018         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2019         struct req_que *req = vha->req;
2020
2021         flags = iocb->u.tmf.flags;
2022         lun = iocb->u.tmf.lun;
2023
2024         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2025         tsk->entry_count = 1;
2026         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2027         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2028         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2029         tsk->control_flags = cpu_to_le32(flags);
2030         tsk->port_id[0] = fcport->d_id.b.al_pa;
2031         tsk->port_id[1] = fcport->d_id.b.area;
2032         tsk->port_id[2] = fcport->d_id.b.domain;
2033         tsk->vp_index = fcport->vha->vp_idx;
2034
2035         if (flags == TCF_LUN_RESET) {
2036                 int_to_scsilun(lun, &tsk->lun);
2037                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2038                         sizeof(tsk->lun));
2039         }
2040 }
2041
2042 static void
2043 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2044 {
2045         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2046
2047         els_iocb->entry_type = ELS_IOCB_TYPE;
2048         els_iocb->entry_count = 1;
2049         els_iocb->sys_define = 0;
2050         els_iocb->entry_status = 0;
2051         els_iocb->handle = sp->handle;
2052         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2053         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2054         els_iocb->vp_index = sp->fcport->vha->vp_idx;
2055         els_iocb->sof_type = EST_SOFI3;
2056         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2057
2058         els_iocb->opcode =
2059             sp->type == SRB_ELS_CMD_RPT ?
2060             bsg_job->request->rqst_data.r_els.els_code :
2061             bsg_job->request->rqst_data.h_els.command_code;
2062         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2063         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2064         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2065         els_iocb->control_flags = 0;
2066         els_iocb->rx_byte_count =
2067             cpu_to_le32(bsg_job->reply_payload.payload_len);
2068         els_iocb->tx_byte_count =
2069             cpu_to_le32(bsg_job->request_payload.payload_len);
2070
2071         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2072             (bsg_job->request_payload.sg_list)));
2073         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2074             (bsg_job->request_payload.sg_list)));
2075         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2076             (bsg_job->request_payload.sg_list));
2077
2078         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2079             (bsg_job->reply_payload.sg_list)));
2080         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2081             (bsg_job->reply_payload.sg_list)));
2082         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2083             (bsg_job->reply_payload.sg_list));
2084 }
2085
2086 static void
2087 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2088 {
2089         uint16_t        avail_dsds;
2090         uint32_t        *cur_dsd;
2091         struct scatterlist *sg;
2092         int index;
2093         uint16_t tot_dsds;
2094         scsi_qla_host_t *vha = sp->fcport->vha;
2095         struct qla_hw_data *ha = vha->hw;
2096         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2097         int loop_iterartion = 0;
2098         int cont_iocb_prsnt = 0;
2099         int entry_count = 1;
2100
2101         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2102         ct_iocb->entry_type = CT_IOCB_TYPE;
2103         ct_iocb->entry_status = 0;
2104         ct_iocb->handle1 = sp->handle;
2105         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2106         ct_iocb->status = __constant_cpu_to_le16(0);
2107         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2108         ct_iocb->timeout = 0;
2109         ct_iocb->cmd_dsd_count =
2110             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2111         ct_iocb->total_dsd_count =
2112             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2113         ct_iocb->req_bytecount =
2114             cpu_to_le32(bsg_job->request_payload.payload_len);
2115         ct_iocb->rsp_bytecount =
2116             cpu_to_le32(bsg_job->reply_payload.payload_len);
2117
2118         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2119             (bsg_job->request_payload.sg_list)));
2120         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2121             (bsg_job->request_payload.sg_list)));
2122         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2123
2124         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2125             (bsg_job->reply_payload.sg_list)));
2126         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2127             (bsg_job->reply_payload.sg_list)));
2128         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2129
2130         avail_dsds = 1;
2131         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2132         index = 0;
2133         tot_dsds = bsg_job->reply_payload.sg_cnt;
2134
2135         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2136                 dma_addr_t       sle_dma;
2137                 cont_a64_entry_t *cont_pkt;
2138
2139                 /* Allocate additional continuation packets? */
2140                 if (avail_dsds == 0) {
2141                         /*
2142                         * Five DSDs are available in the Cont.
2143                         * Type 1 IOCB.
2144                                */
2145                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2146                             vha->hw->req_q_map[0]);
2147                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2148                         avail_dsds = 5;
2149                         cont_iocb_prsnt = 1;
2150                         entry_count++;
2151                 }
2152
2153                 sle_dma = sg_dma_address(sg);
2154                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2155                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2156                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2157                 loop_iterartion++;
2158                 avail_dsds--;
2159         }
2160         ct_iocb->entry_count = entry_count;
2161 }
2162
2163 static void
2164 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2165 {
2166         uint16_t        avail_dsds;
2167         uint32_t        *cur_dsd;
2168         struct scatterlist *sg;
2169         int index;
2170         uint16_t tot_dsds;
2171         scsi_qla_host_t *vha = sp->fcport->vha;
2172         struct qla_hw_data *ha = vha->hw;
2173         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2174         int loop_iterartion = 0;
2175         int cont_iocb_prsnt = 0;
2176         int entry_count = 1;
2177
2178         ct_iocb->entry_type = CT_IOCB_TYPE;
2179         ct_iocb->entry_status = 0;
2180         ct_iocb->sys_define = 0;
2181         ct_iocb->handle = sp->handle;
2182
2183         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2184         ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2185         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2186
2187         ct_iocb->cmd_dsd_count =
2188             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2189         ct_iocb->timeout = 0;
2190         ct_iocb->rsp_dsd_count =
2191             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2192         ct_iocb->rsp_byte_count =
2193             cpu_to_le32(bsg_job->reply_payload.payload_len);
2194         ct_iocb->cmd_byte_count =
2195             cpu_to_le32(bsg_job->request_payload.payload_len);
2196         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2197             (bsg_job->request_payload.sg_list)));
2198         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2199            (bsg_job->request_payload.sg_list)));
2200         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2201             (bsg_job->request_payload.sg_list));
2202
2203         avail_dsds = 1;
2204         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2205         index = 0;
2206         tot_dsds = bsg_job->reply_payload.sg_cnt;
2207
2208         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2209                 dma_addr_t       sle_dma;
2210                 cont_a64_entry_t *cont_pkt;
2211
2212                 /* Allocate additional continuation packets? */
2213                 if (avail_dsds == 0) {
2214                         /*
2215                         * Five DSDs are available in the Cont.
2216                         * Type 1 IOCB.
2217                                */
2218                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2219                             ha->req_q_map[0]);
2220                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2221                         avail_dsds = 5;
2222                         cont_iocb_prsnt = 1;
2223                         entry_count++;
2224                 }
2225
2226                 sle_dma = sg_dma_address(sg);
2227                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2228                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2229                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2230                 loop_iterartion++;
2231                 avail_dsds--;
2232         }
2233         ct_iocb->entry_count = entry_count;
2234 }
2235
2236 /*
2237  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2238  * @sp: command to send to the ISP
2239  *
2240  * Returns non-zero if a failure occurred, else zero.
2241  */
2242 int
2243 qla82xx_start_scsi(srb_t *sp)
2244 {
2245         int             ret, nseg;
2246         unsigned long   flags;
2247         struct scsi_cmnd *cmd;
2248         uint32_t        *clr_ptr;
2249         uint32_t        index;
2250         uint32_t        handle;
2251         uint16_t        cnt;
2252         uint16_t        req_cnt;
2253         uint16_t        tot_dsds;
2254         struct device_reg_82xx __iomem *reg;
2255         uint32_t dbval;
2256         uint32_t *fcp_dl;
2257         uint8_t additional_cdb_len;
2258         struct ct6_dsd *ctx;
2259         struct scsi_qla_host *vha = sp->fcport->vha;
2260         struct qla_hw_data *ha = vha->hw;
2261         struct req_que *req = NULL;
2262         struct rsp_que *rsp = NULL;
2263         char tag[2];
2264
2265         /* Setup device pointers. */
2266         ret = 0;
2267         reg = &ha->iobase->isp82;
2268         cmd = GET_CMD_SP(sp);
2269         req = vha->req;
2270         rsp = ha->rsp_q_map[0];
2271
2272         /* So we know we haven't pci_map'ed anything yet */
2273         tot_dsds = 0;
2274
2275         dbval = 0x04 | (ha->portnum << 5);
2276
2277         /* Send marker if required */
2278         if (vha->marker_needed != 0) {
2279                 if (qla2x00_marker(vha, req,
2280                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2281                         ql_log(ql_log_warn, vha, 0x300c,
2282                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2283                         return QLA_FUNCTION_FAILED;
2284                 }
2285                 vha->marker_needed = 0;
2286         }
2287
2288         /* Acquire ring specific lock */
2289         spin_lock_irqsave(&ha->hardware_lock, flags);
2290
2291         /* Check for room in outstanding command list. */
2292         handle = req->current_outstanding_cmd;
2293         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2294                 handle++;
2295                 if (handle == MAX_OUTSTANDING_COMMANDS)
2296                         handle = 1;
2297                 if (!req->outstanding_cmds[handle])
2298                         break;
2299         }
2300         if (index == MAX_OUTSTANDING_COMMANDS)
2301                 goto queuing_error;
2302
2303         /* Map the sg table so we have an accurate count of sg entries needed */
2304         if (scsi_sg_count(cmd)) {
2305                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2306                     scsi_sg_count(cmd), cmd->sc_data_direction);
2307                 if (unlikely(!nseg))
2308                         goto queuing_error;
2309         } else
2310                 nseg = 0;
2311
2312         tot_dsds = nseg;
2313
2314         if (tot_dsds > ql2xshiftctondsd) {
2315                 struct cmd_type_6 *cmd_pkt;
2316                 uint16_t more_dsd_lists = 0;
2317                 struct dsd_dma *dsd_ptr;
2318                 uint16_t i;
2319
2320                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2321                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2322                         ql_dbg(ql_dbg_io, vha, 0x300d,
2323                             "Num of DSD list %d is than %d for cmd=%p.\n",
2324                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2325                             cmd);
2326                         goto queuing_error;
2327                 }
2328
2329                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2330                         goto sufficient_dsds;
2331                 else
2332                         more_dsd_lists -= ha->gbl_dsd_avail;
2333
2334                 for (i = 0; i < more_dsd_lists; i++) {
2335                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2336                         if (!dsd_ptr) {
2337                                 ql_log(ql_log_fatal, vha, 0x300e,
2338                                     "Failed to allocate memory for dsd_dma "
2339                                     "for cmd=%p.\n", cmd);
2340                                 goto queuing_error;
2341                         }
2342
2343                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2344                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2345                         if (!dsd_ptr->dsd_addr) {
2346                                 kfree(dsd_ptr);
2347                                 ql_log(ql_log_fatal, vha, 0x300f,
2348                                     "Failed to allocate memory for dsd_addr "
2349                                     "for cmd=%p.\n", cmd);
2350                                 goto queuing_error;
2351                         }
2352                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2353                         ha->gbl_dsd_avail++;
2354                 }
2355
2356 sufficient_dsds:
2357                 req_cnt = 1;
2358
2359                 if (req->cnt < (req_cnt + 2)) {
2360                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2361                                 &reg->req_q_out[0]);
2362                         if (req->ring_index < cnt)
2363                                 req->cnt = cnt - req->ring_index;
2364                         else
2365                                 req->cnt = req->length -
2366                                         (req->ring_index - cnt);
2367                         if (req->cnt < (req_cnt + 2))
2368                                 goto queuing_error;
2369                 }
2370
2371                 ctx = sp->u.scmd.ctx =
2372                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2373                 if (!ctx) {
2374                         ql_log(ql_log_fatal, vha, 0x3010,
2375                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2376                         goto queuing_error;
2377                 }
2378
2379                 memset(ctx, 0, sizeof(struct ct6_dsd));
2380                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2381                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2382                 if (!ctx->fcp_cmnd) {
2383                         ql_log(ql_log_fatal, vha, 0x3011,
2384                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2385                         goto queuing_error;
2386                 }
2387
2388                 /* Initialize the DSD list and dma handle */
2389                 INIT_LIST_HEAD(&ctx->dsd_list);
2390                 ctx->dsd_use_cnt = 0;
2391
2392                 if (cmd->cmd_len > 16) {
2393                         additional_cdb_len = cmd->cmd_len - 16;
2394                         if ((cmd->cmd_len % 4) != 0) {
2395                                 /* SCSI command bigger than 16 bytes must be
2396                                  * multiple of 4
2397                                  */
2398                                 ql_log(ql_log_warn, vha, 0x3012,
2399                                     "scsi cmd len %d not multiple of 4 "
2400                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2401                                 goto queuing_error_fcp_cmnd;
2402                         }
2403                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2404                 } else {
2405                         additional_cdb_len = 0;
2406                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2407                 }
2408
2409                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2410                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2411
2412                 /* Zero out remaining portion of packet. */
2413                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2414                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2415                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2416                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2417
2418                 /* Set NPORT-ID and LUN number*/
2419                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2420                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2421                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2422                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2423                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2424
2425                 /* Build IOCB segments */
2426                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2427                         goto queuing_error_fcp_cmnd;
2428
2429                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2430                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2431
2432                 /* build FCP_CMND IU */
2433                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2434                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2435                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2436
2437                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2438                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2439                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2440                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2441
2442                 /*
2443                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2444                  */
2445                 if (scsi_populate_tag_msg(cmd, tag)) {
2446                         switch (tag[0]) {
2447                         case HEAD_OF_QUEUE_TAG:
2448                                 ctx->fcp_cmnd->task_attribute =
2449                                     TSK_HEAD_OF_QUEUE;
2450                                 break;
2451                         case ORDERED_QUEUE_TAG:
2452                                 ctx->fcp_cmnd->task_attribute =
2453                                     TSK_ORDERED;
2454                                 break;
2455                         }
2456                 }
2457
2458                 /* Populate the FCP_PRIO. */
2459                 if (ha->flags.fcp_prio_enabled)
2460                         ctx->fcp_cmnd->task_attribute |=
2461                             sp->fcport->fcp_prio << 3;
2462
2463                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2464
2465                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2466                     additional_cdb_len);
2467                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2468
2469                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2470                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2471                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2472                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2473                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2474
2475                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2476                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2477                 /* Set total data segment count. */
2478                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2479                 /* Specify response queue number where
2480                  * completion should happen
2481                  */
2482                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2483         } else {
2484                 struct cmd_type_7 *cmd_pkt;
2485                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2486                 if (req->cnt < (req_cnt + 2)) {
2487                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2488                             &reg->req_q_out[0]);
2489                         if (req->ring_index < cnt)
2490                                 req->cnt = cnt - req->ring_index;
2491                         else
2492                                 req->cnt = req->length -
2493                                         (req->ring_index - cnt);
2494                 }
2495                 if (req->cnt < (req_cnt + 2))
2496                         goto queuing_error;
2497
2498                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2499                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2500
2501                 /* Zero out remaining portion of packet. */
2502                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2503                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2504                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2505                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2506
2507                 /* Set NPORT-ID and LUN number*/
2508                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2509                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2510                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2511                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2512                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2513
2514                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2515                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2516                     sizeof(cmd_pkt->lun));
2517
2518                 /*
2519                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2520                  */
2521                 if (scsi_populate_tag_msg(cmd, tag)) {
2522                         switch (tag[0]) {
2523                         case HEAD_OF_QUEUE_TAG:
2524                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2525                                 break;
2526                         case ORDERED_QUEUE_TAG:
2527                                 cmd_pkt->task = TSK_ORDERED;
2528                                 break;
2529                         }
2530                 }
2531
2532                 /* Populate the FCP_PRIO. */
2533                 if (ha->flags.fcp_prio_enabled)
2534                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2535
2536                 /* Load SCSI command packet. */
2537                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2538                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2539
2540                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2541
2542                 /* Build IOCB segments */
2543                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2544
2545                 /* Set total data segment count. */
2546                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2547                 /* Specify response queue number where
2548                  * completion should happen.
2549                  */
2550                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2551
2552         }
2553         /* Build command packet. */
2554         req->current_outstanding_cmd = handle;
2555         req->outstanding_cmds[handle] = sp;
2556         sp->handle = handle;
2557         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2558         req->cnt -= req_cnt;
2559         wmb();
2560
2561         /* Adjust ring index. */
2562         req->ring_index++;
2563         if (req->ring_index == req->length) {
2564                 req->ring_index = 0;
2565                 req->ring_ptr = req->ring;
2566         } else
2567                 req->ring_ptr++;
2568
2569         sp->flags |= SRB_DMA_VALID;
2570
2571         /* Set chip new ring index. */
2572         /* write, read and verify logic */
2573         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2574         if (ql2xdbwr)
2575                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2576         else {
2577                 WRT_REG_DWORD(
2578                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2579                         dbval);
2580                 wmb();
2581                 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2582                         WRT_REG_DWORD(
2583                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2584                                 dbval);
2585                         wmb();
2586                 }
2587         }
2588
2589         /* Manage unprocessed RIO/ZIO commands in response queue. */
2590         if (vha->flags.process_response_queue &&
2591             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2592                 qla24xx_process_response_queue(vha, rsp);
2593
2594         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2595         return QLA_SUCCESS;
2596
2597 queuing_error_fcp_cmnd:
2598         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2599 queuing_error:
2600         if (tot_dsds)
2601                 scsi_dma_unmap(cmd);
2602
2603         if (sp->u.scmd.ctx) {
2604                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2605                 sp->u.scmd.ctx = NULL;
2606         }
2607         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2608
2609         return QLA_FUNCTION_FAILED;
2610 }
2611
2612 int
2613 qla2x00_start_sp(srb_t *sp)
2614 {
2615         int rval;
2616         struct qla_hw_data *ha = sp->fcport->vha->hw;
2617         void *pkt;
2618         unsigned long flags;
2619
2620         rval = QLA_FUNCTION_FAILED;
2621         spin_lock_irqsave(&ha->hardware_lock, flags);
2622         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2623         if (!pkt) {
2624                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2625                     "qla2x00_alloc_iocbs failed.\n");
2626                 goto done;
2627         }
2628
2629         rval = QLA_SUCCESS;
2630         switch (sp->type) {
2631         case SRB_LOGIN_CMD:
2632                 IS_FWI2_CAPABLE(ha) ?
2633                     qla24xx_login_iocb(sp, pkt) :
2634                     qla2x00_login_iocb(sp, pkt);
2635                 break;
2636         case SRB_LOGOUT_CMD:
2637                 IS_FWI2_CAPABLE(ha) ?
2638                     qla24xx_logout_iocb(sp, pkt) :
2639                     qla2x00_logout_iocb(sp, pkt);
2640                 break;
2641         case SRB_ELS_CMD_RPT:
2642         case SRB_ELS_CMD_HST:
2643                 qla24xx_els_iocb(sp, pkt);
2644                 break;
2645         case SRB_CT_CMD:
2646                 IS_FWI2_CAPABLE(ha) ?
2647                     qla24xx_ct_iocb(sp, pkt) :
2648                     qla2x00_ct_iocb(sp, pkt);
2649                 break;
2650         case SRB_ADISC_CMD:
2651                 IS_FWI2_CAPABLE(ha) ?
2652                     qla24xx_adisc_iocb(sp, pkt) :
2653                     qla2x00_adisc_iocb(sp, pkt);
2654                 break;
2655         case SRB_TM_CMD:
2656                 qla24xx_tm_iocb(sp, pkt);
2657                 break;
2658         default:
2659                 break;
2660         }
2661
2662         wmb();
2663         qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2664 done:
2665         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2666         return rval;
2667 }