scsi: qla2xxx: Really fix qla2xxx_eh_abort()
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_iocb.c
CommitLineData
fa90c54f
AV
1/*
2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
1da177e4 4 *
fa90c54f
AV
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
1da177e4 7#include "qla_def.h"
2d70c103 8#include "qla_target.h"
1da177e4
LT
9
10#include <linux/blkdev.h>
11#include <linux/delay.h>
12
13#include <scsi/scsi_tcq.h>
14
1da177e4
LT
15/**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
2db6228d 17 * @sp: SCSI command
1da177e4
LT
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21static inline uint16_t
49fd462a 22qla2x00_get_cmd_direction(srb_t *sp)
1da177e4
LT
23{
24 uint16_t cflags;
9ba56b95 25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 26 struct scsi_qla_host *vha = sp->vha;
1da177e4
LT
27
28 cflags = 0;
29
30 /* Set transfer direction */
9ba56b95 31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1da177e4 32 cflags = CF_WRITE;
2be21fa2 33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 34 vha->qla_stats.output_requests++;
9ba56b95 35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1da177e4 36 cflags = CF_READ;
2be21fa2 37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 38 vha->qla_stats.input_requests++;
49fd462a 39 }
1da177e4
LT
40 return (cflags);
41}
42
43/**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
46 *
47 * @dsds: number of data segment decriptors needed
48 *
49 * Returns the number of IOCB entries needed to store @dsds.
50 */
51uint16_t
52qla2x00_calc_iocbs_32(uint16_t dsds)
53{
54 uint16_t iocbs;
55
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
61 }
62 return (iocbs);
63}
64
65/**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
68 *
69 * @dsds: number of data segment decriptors needed
70 *
71 * Returns the number of IOCB entries needed to store @dsds.
72 */
73uint16_t
74qla2x00_calc_iocbs_64(uint16_t dsds)
75{
76 uint16_t iocbs;
77
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
83 }
84 return (iocbs);
85}
86
87/**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
2db6228d 89 * @vha: HA context
1da177e4
LT
90 *
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */
93static inline cont_entry_t *
67c2e93a 94qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
1da177e4
LT
95{
96 cont_entry_t *cont_pkt;
67c2e93a 97 struct req_que *req = vha->req;
1da177e4 98 /* Adjust ring index. */
e315cd28
AC
99 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
1da177e4 103 } else {
e315cd28 104 req->ring_ptr++;
1da177e4
LT
105 }
106
e315cd28 107 cont_pkt = (cont_entry_t *)req->ring_ptr;
1da177e4
LT
108
109 /* Load packet defaults. */
2c26348c 110 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
1da177e4
LT
111
112 return (cont_pkt);
113}
114
115/**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
2db6228d
BVA
117 * @vha: HA context
118 * @req: request queue
1da177e4
LT
119 *
120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */
122static inline cont_a64_entry_t *
0d2aa38e 123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
1da177e4
LT
124{
125 cont_a64_entry_t *cont_pkt;
126
127 /* Adjust ring index. */
e315cd28
AC
128 req->ring_index++;
129 if (req->ring_index == req->length) {
130 req->ring_index = 0;
131 req->ring_ptr = req->ring;
1da177e4 132 } else {
e315cd28 133 req->ring_ptr++;
1da177e4
LT
134 }
135
e315cd28 136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
1da177e4
LT
137
138 /* Load packet defaults. */
2c26348c
BVA
139 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
140 CONTINUE_A64_TYPE, &cont_pkt->entry_type);
1da177e4
LT
141
142 return (cont_pkt);
143}
144
d7459527 145inline int
bad75002
AE
146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147{
9ba56b95
GM
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
bad75002 150
bad75002
AE
151 /* We always use DIFF Bundling for best performance */
152 *fw_prot_opts = 0;
153
154 /* Translate SCSI opcode to a protection opcode */
9ba56b95 155 switch (scsi_get_prot_op(cmd)) {
bad75002
AE
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 break;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 break;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 break;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 break;
168 case SCSI_PROT_READ_PASS:
bad75002 169 case SCSI_PROT_WRITE_PASS:
9e522cd8
AE
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 else
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
bad75002
AE
174 break;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
178 }
179
9ba56b95 180 return scsi_prot_sg_count(cmd);
bad75002
AE
181}
182
183/*
1da177e4
LT
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
186 *
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
190 */
191void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 uint16_t tot_dsds)
193{
194 uint16_t avail_dsds;
15b7a68c 195 struct dsd32 *cur_dsd;
e315cd28 196 scsi_qla_host_t *vha;
1da177e4 197 struct scsi_cmnd *cmd;
385d70b4
FT
198 struct scatterlist *sg;
199 int i;
1da177e4 200
9ba56b95 201 cmd = GET_CMD_SP(sp);
1da177e4
LT
202
203 /* Update entry type to indicate Command Type 2 IOCB */
2c26348c 204 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
1da177e4
LT
205
206 /* No data transfer */
385d70b4 207 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 208 cmd_pkt->byte_count = cpu_to_le32(0);
1da177e4
LT
209 return;
210 }
211
25ff6af1 212 vha = sp->vha;
49fd462a 213 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
1da177e4
LT
214
215 /* Three DSDs are available in the Command Type 2 IOCB */
15b7a68c
BVA
216 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
217 cur_dsd = cmd_pkt->dsd32;
1da177e4
LT
218
219 /* Load data segments */
385d70b4
FT
220 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221 cont_entry_t *cont_pkt;
222
223 /* Allocate additional continuation packets? */
224 if (avail_dsds == 0) {
225 /*
226 * Seven DSDs are available in the Continuation
227 * Type 0 IOCB.
228 */
67c2e93a 229 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
15b7a68c
BVA
230 cur_dsd = cont_pkt->dsd;
231 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
1da177e4 232 }
385d70b4 233
15b7a68c 234 append_dsd32(&cur_dsd, sg);
385d70b4 235 avail_dsds--;
1da177e4
LT
236 }
237}
238
239/**
240 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
241 * capable IOCB types.
242 *
243 * @sp: SRB command to process
244 * @cmd_pkt: Command type 3 IOCB
245 * @tot_dsds: Total number of segments to transfer
246 */
247void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
248 uint16_t tot_dsds)
249{
250 uint16_t avail_dsds;
15b7a68c 251 struct dsd64 *cur_dsd;
e315cd28 252 scsi_qla_host_t *vha;
1da177e4 253 struct scsi_cmnd *cmd;
385d70b4
FT
254 struct scatterlist *sg;
255 int i;
1da177e4 256
9ba56b95 257 cmd = GET_CMD_SP(sp);
1da177e4
LT
258
259 /* Update entry type to indicate Command Type 3 IOCB */
2c26348c 260 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
1da177e4
LT
261
262 /* No data transfer */
385d70b4 263 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 264 cmd_pkt->byte_count = cpu_to_le32(0);
1da177e4
LT
265 return;
266 }
267
25ff6af1 268 vha = sp->vha;
49fd462a 269 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
1da177e4
LT
270
271 /* Two DSDs are available in the Command Type 3 IOCB */
15b7a68c
BVA
272 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
273 cur_dsd = cmd_pkt->dsd64;
1da177e4
LT
274
275 /* Load data segments */
385d70b4 276 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
385d70b4
FT
277 cont_a64_entry_t *cont_pkt;
278
279 /* Allocate additional continuation packets? */
280 if (avail_dsds == 0) {
281 /*
282 * Five DSDs are available in the Continuation
283 * Type 1 IOCB.
284 */
0d2aa38e 285 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
15b7a68c
BVA
286 cur_dsd = cont_pkt->dsd;
287 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
1da177e4 288 }
385d70b4 289
15b7a68c 290 append_dsd64(&cur_dsd, sg);
385d70b4 291 avail_dsds--;
1da177e4
LT
292 }
293}
294
295/**
296 * qla2x00_start_scsi() - Send a SCSI command to the ISP
297 * @sp: command to send to the ISP
298 *
cc3ef7bc 299 * Returns non-zero if a failure occurred, else zero.
1da177e4
LT
300 */
301int
302qla2x00_start_scsi(srb_t *sp)
303{
52c82823 304 int nseg;
1da177e4 305 unsigned long flags;
e315cd28 306 scsi_qla_host_t *vha;
1da177e4
LT
307 struct scsi_cmnd *cmd;
308 uint32_t *clr_ptr;
309 uint32_t index;
310 uint32_t handle;
311 cmd_entry_t *cmd_pkt;
1da177e4
LT
312 uint16_t cnt;
313 uint16_t req_cnt;
314 uint16_t tot_dsds;
3d71644c 315 struct device_reg_2xxx __iomem *reg;
e315cd28
AC
316 struct qla_hw_data *ha;
317 struct req_que *req;
73208dfd 318 struct rsp_que *rsp;
1da177e4
LT
319
320 /* Setup device pointers. */
25ff6af1 321 vha = sp->vha;
e315cd28 322 ha = vha->hw;
3d71644c 323 reg = &ha->iobase->isp;
9ba56b95 324 cmd = GET_CMD_SP(sp);
73208dfd
AC
325 req = ha->req_q_map[0];
326 rsp = ha->rsp_q_map[0];
83021920 327 /* So we know we haven't pci_map'ed anything yet */
328 tot_dsds = 0;
1da177e4
LT
329
330 /* Send marker if required */
e315cd28 331 if (vha->marker_needed != 0) {
9eb9c6dc 332 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
7c3df132 333 QLA_SUCCESS) {
1da177e4 334 return (QLA_FUNCTION_FAILED);
7c3df132 335 }
e315cd28 336 vha->marker_needed = 0;
1da177e4
LT
337 }
338
339 /* Acquire ring specific lock */
c9c5ced9 340 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4
LT
341
342 /* Check for room in outstanding command list. */
e315cd28 343 handle = req->current_outstanding_cmd;
8d93f550 344 for (index = 1; index < req->num_outstanding_cmds; index++) {
1da177e4 345 handle++;
8d93f550 346 if (handle == req->num_outstanding_cmds)
1da177e4 347 handle = 1;
e315cd28 348 if (!req->outstanding_cmds[handle])
1da177e4
LT
349 break;
350 }
8d93f550 351 if (index == req->num_outstanding_cmds)
1da177e4
LT
352 goto queuing_error;
353
83021920 354 /* Map the sg table so we have an accurate count of sg entries needed */
2c3dfe3f
SJ
355 if (scsi_sg_count(cmd)) {
356 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
357 scsi_sg_count(cmd), cmd->sc_data_direction);
358 if (unlikely(!nseg))
359 goto queuing_error;
360 } else
361 nseg = 0;
362
385d70b4 363 tot_dsds = nseg;
83021920 364
1da177e4 365 /* Calculate the number of request entries needed. */
fd34f556 366 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
e315cd28 367 if (req->cnt < (req_cnt + 2)) {
1da177e4 368 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
e315cd28
AC
369 if (req->ring_index < cnt)
370 req->cnt = cnt - req->ring_index;
1da177e4 371 else
e315cd28
AC
372 req->cnt = req->length -
373 (req->ring_index - cnt);
a6eb3c9f
CL
374 /* If still no head room then bail out */
375 if (req->cnt < (req_cnt + 2))
376 goto queuing_error;
1da177e4 377 }
1da177e4 378
1da177e4 379 /* Build command packet */
e315cd28
AC
380 req->current_outstanding_cmd = handle;
381 req->outstanding_cmds[handle] = sp;
cf53b069 382 sp->handle = handle;
9ba56b95 383 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
e315cd28 384 req->cnt -= req_cnt;
1da177e4 385
e315cd28 386 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
1da177e4
LT
387 cmd_pkt->handle = handle;
388 /* Zero out remaining portion of packet. */
389 clr_ptr = (uint32_t *)cmd_pkt + 2;
390 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
391 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
392
bdf79621 393 /* Set target ID and LUN number*/
394 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
9ba56b95 395 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
ad950360 396 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
1da177e4 397
1da177e4
LT
398 /* Load SCSI command packet. */
399 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
385d70b4 400 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1da177e4
LT
401
402 /* Build IOCB segments */
fd34f556 403 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
1da177e4
LT
404
405 /* Set total data segment count. */
406 cmd_pkt->entry_count = (uint8_t)req_cnt;
407 wmb();
408
409 /* Adjust ring index. */
e315cd28
AC
410 req->ring_index++;
411 if (req->ring_index == req->length) {
412 req->ring_index = 0;
413 req->ring_ptr = req->ring;
1da177e4 414 } else
e315cd28 415 req->ring_ptr++;
1da177e4 416
1da177e4 417 sp->flags |= SRB_DMA_VALID;
1da177e4
LT
418
419 /* Set chip new ring index. */
e315cd28 420 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
1da177e4
LT
421 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
422
4fdfefe5 423 /* Manage unprocessed RIO/ZIO commands in response queue. */
e315cd28 424 if (vha->flags.process_response_queue &&
73208dfd
AC
425 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
426 qla2x00_process_response_queue(rsp);
4fdfefe5 427
c9c5ced9 428 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4
LT
429 return (QLA_SUCCESS);
430
431queuing_error:
385d70b4
FT
432 if (tot_dsds)
433 scsi_dma_unmap(cmd);
434
c9c5ced9 435 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4
LT
436
437 return (QLA_FUNCTION_FAILED);
438}
439
5162cf0c
GM
440/**
441 * qla2x00_start_iocbs() - Execute the IOCB command
2db6228d
BVA
442 * @vha: HA context
443 * @req: request queue
5162cf0c 444 */
2d70c103 445void
5162cf0c
GM
446qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
447{
448 struct qla_hw_data *ha = vha->hw;
118e2ef9 449 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
5162cf0c 450
7ec0effd 451 if (IS_P3P_TYPE(ha)) {
5162cf0c
GM
452 qla82xx_start_iocbs(vha);
453 } else {
454 /* Adjust ring index. */
455 req->ring_index++;
456 if (req->ring_index == req->length) {
457 req->ring_index = 0;
458 req->ring_ptr = req->ring;
459 } else
460 req->ring_ptr++;
461
462 /* Set chip new ring index. */
ecc89f25 463 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
d63b328f
QT
464 WRT_REG_DWORD(req->req_q_in, req->ring_index);
465 } else if (IS_QLA83XX(ha)) {
6246b8a1 466 WRT_REG_DWORD(req->req_q_in, req->ring_index);
98878a16 467 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
8ae6d9c7
GM
468 } else if (IS_QLAFX00(ha)) {
469 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
470 RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
471 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
5162cf0c
GM
472 } else if (IS_FWI2_CAPABLE(ha)) {
473 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
475 } else {
476 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
477 req->ring_index);
478 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
479 }
480 }
481}
482
1da177e4
LT
483/**
484 * qla2x00_marker() - Send a marker IOCB to the firmware.
2db6228d 485 * @vha: HA context
9eb9c6dc 486 * @qpair: queue pair pointer
1da177e4
LT
487 * @loop_id: loop ID
488 * @lun: LUN
489 * @type: marker modifier
490 *
491 * Can be called from both normal and interrupt context.
492 *
cc3ef7bc 493 * Returns non-zero if a failure occurred, else zero.
1da177e4 494 */
3dbe756a 495static int
9eb9c6dc
QT
496__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
497 uint16_t loop_id, uint64_t lun, uint8_t type)
1da177e4 498{
2b6c0cee 499 mrk_entry_t *mrk;
8ae6d9c7 500 struct mrk_entry_24xx *mrk24 = NULL;
9eb9c6dc 501 struct req_que *req = qpair->req;
e315cd28
AC
502 struct qla_hw_data *ha = vha->hw;
503 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 504
9eb9c6dc 505 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
2b6c0cee 506 if (mrk == NULL) {
7c3df132
SK
507 ql_log(ql_log_warn, base_vha, 0x3026,
508 "Failed to allocate Marker IOCB.\n");
1da177e4
LT
509
510 return (QLA_FUNCTION_FAILED);
511 }
512
2b6c0cee
AV
513 mrk->entry_type = MARKER_TYPE;
514 mrk->modifier = type;
1da177e4 515 if (type != MK_SYNC_ALL) {
bfd7334e 516 if (IS_FWI2_CAPABLE(ha)) {
2b6c0cee
AV
517 mrk24 = (struct mrk_entry_24xx *) mrk;
518 mrk24->nport_handle = cpu_to_le16(loop_id);
9cb78c16 519 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
b797b6de 520 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
e315cd28 521 mrk24->vp_index = vha->vp_idx;
2afa19a9 522 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
2b6c0cee
AV
523 } else {
524 SET_TARGET_ID(ha, mrk->target, loop_id);
9cb78c16 525 mrk->lun = cpu_to_le16((uint16_t)lun);
2b6c0cee 526 }
1da177e4
LT
527 }
528 wmb();
529
5162cf0c 530 qla2x00_start_iocbs(vha, req);
1da177e4
LT
531
532 return (QLA_SUCCESS);
533}
534
fa2a1ce5 535int
9eb9c6dc
QT
536qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
537 uint16_t loop_id, uint64_t lun, uint8_t type)
1da177e4
LT
538{
539 int ret;
540 unsigned long flags = 0;
541
9eb9c6dc
QT
542 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
543 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
544 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1da177e4
LT
545
546 return (ret);
547}
548
2d70c103
NB
549/*
550 * qla2x00_issue_marker
551 *
552 * Issue marker
553 * Caller CAN have hardware lock held as specified by ha_locked parameter.
554 * Might release it, then reaquire.
555 */
556int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
557{
558 if (ha_locked) {
9eb9c6dc 559 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
2d70c103
NB
560 MK_SYNC_ALL) != QLA_SUCCESS)
561 return QLA_FUNCTION_FAILED;
562 } else {
9eb9c6dc 563 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
2d70c103
NB
564 MK_SYNC_ALL) != QLA_SUCCESS)
565 return QLA_FUNCTION_FAILED;
566 }
567 vha->marker_needed = 0;
568
569 return QLA_SUCCESS;
570}
571
5162cf0c
GM
572static inline int
573qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
574 uint16_t tot_dsds)
575{
15b7a68c 576 struct dsd64 *cur_dsd = NULL, *next_dsd;
5162cf0c
GM
577 scsi_qla_host_t *vha;
578 struct qla_hw_data *ha;
579 struct scsi_cmnd *cmd;
580 struct scatterlist *cur_seg;
5162cf0c
GM
581 uint8_t avail_dsds;
582 uint8_t first_iocb = 1;
583 uint32_t dsd_list_len;
584 struct dsd_dma *dsd_ptr;
585 struct ct6_dsd *ctx;
1da177e4 586
9ba56b95 587 cmd = GET_CMD_SP(sp);
a9083016 588
5162cf0c 589 /* Update entry type to indicate Command Type 3 IOCB */
2c26348c 590 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
5162cf0c
GM
591
592 /* No data transfer */
593 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 594 cmd_pkt->byte_count = cpu_to_le32(0);
5162cf0c
GM
595 return 0;
596 }
597
25ff6af1 598 vha = sp->vha;
5162cf0c
GM
599 ha = vha->hw;
600
601 /* Set transfer direction */
602 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
ad950360 603 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
2be21fa2 604 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 605 vha->qla_stats.output_requests++;
5162cf0c 606 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
ad950360 607 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
2be21fa2 608 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 609 vha->qla_stats.input_requests++;
5162cf0c
GM
610 }
611
612 cur_seg = scsi_sglist(cmd);
9ba56b95 613 ctx = GET_CMD_CTX_SP(sp);
5162cf0c
GM
614
615 while (tot_dsds) {
616 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
617 QLA_DSDS_PER_IOCB : tot_dsds;
618 tot_dsds -= avail_dsds;
619 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
620
621 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
622 struct dsd_dma, list);
623 next_dsd = dsd_ptr->dsd_addr;
624 list_del(&dsd_ptr->list);
625 ha->gbl_dsd_avail--;
626 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
627 ctx->dsd_use_cnt++;
628 ha->gbl_dsd_inuse++;
629
630 if (first_iocb) {
631 first_iocb = 0;
15b7a68c
BVA
632 put_unaligned_le64(dsd_ptr->dsd_list_dma,
633 &cmd_pkt->fcp_dsd.address);
634 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
73208dfd 635 } else {
15b7a68c
BVA
636 put_unaligned_le64(dsd_ptr->dsd_list_dma,
637 &cur_dsd->address);
638 cur_dsd->length = cpu_to_le32(dsd_list_len);
639 cur_dsd++;
5162cf0c 640 }
15b7a68c 641 cur_dsd = next_dsd;
5162cf0c 642 while (avail_dsds) {
15b7a68c 643 append_dsd64(&cur_dsd, cur_seg);
5162cf0c
GM
644 cur_seg = sg_next(cur_seg);
645 avail_dsds--;
73208dfd 646 }
2b6c0cee
AV
647 }
648
5162cf0c 649 /* Null termination */
15b7a68c
BVA
650 cur_dsd->address = 0;
651 cur_dsd->length = 0;
652 cur_dsd++;
5162cf0c
GM
653 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
654 return 0;
2b6c0cee
AV
655}
656
5162cf0c
GM
657/*
658 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
659 * for Command Type 6.
2b6c0cee
AV
660 *
661 * @dsds: number of data segment decriptors needed
662 *
5162cf0c 663 * Returns the number of dsd list needed to store @dsds.
2b6c0cee 664 */
2374dd23 665static inline uint16_t
5162cf0c 666qla24xx_calc_dsd_lists(uint16_t dsds)
2b6c0cee 667{
5162cf0c 668 uint16_t dsd_lists = 0;
2b6c0cee 669
5162cf0c
GM
670 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
671 if (dsds % QLA_DSDS_PER_IOCB)
672 dsd_lists++;
673 return dsd_lists;
2b6c0cee
AV
674}
675
5162cf0c 676
2b6c0cee
AV
677/**
678 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
679 * IOCB types.
680 *
681 * @sp: SRB command to process
682 * @cmd_pkt: Command type 3 IOCB
683 * @tot_dsds: Total number of segments to transfer
d7459527 684 * @req: pointer to request queue
2b6c0cee 685 */
d7459527 686inline void
2b6c0cee 687qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
d7459527 688 uint16_t tot_dsds, struct req_que *req)
2b6c0cee
AV
689{
690 uint16_t avail_dsds;
15b7a68c 691 struct dsd64 *cur_dsd;
e315cd28 692 scsi_qla_host_t *vha;
2b6c0cee 693 struct scsi_cmnd *cmd;
385d70b4
FT
694 struct scatterlist *sg;
695 int i;
2b6c0cee 696
9ba56b95 697 cmd = GET_CMD_SP(sp);
2b6c0cee
AV
698
699 /* Update entry type to indicate Command Type 3 IOCB */
2c26348c 700 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
2b6c0cee
AV
701
702 /* No data transfer */
385d70b4 703 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 704 cmd_pkt->byte_count = cpu_to_le32(0);
2b6c0cee
AV
705 return;
706 }
707
25ff6af1 708 vha = sp->vha;
2b6c0cee
AV
709
710 /* Set transfer direction */
49fd462a 711 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
ad950360 712 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
2be21fa2 713 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 714 vha->qla_stats.output_requests++;
49fd462a 715 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
ad950360 716 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
2be21fa2 717 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 718 vha->qla_stats.input_requests++;
49fd462a 719 }
2b6c0cee
AV
720
721 /* One DSD is available in the Command Type 3 IOCB */
722 avail_dsds = 1;
15b7a68c 723 cur_dsd = &cmd_pkt->dsd;
2b6c0cee
AV
724
725 /* Load data segments */
385d70b4
FT
726
727 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
385d70b4
FT
728 cont_a64_entry_t *cont_pkt;
729
730 /* Allocate additional continuation packets? */
731 if (avail_dsds == 0) {
732 /*
733 * Five DSDs are available in the Continuation
734 * Type 1 IOCB.
735 */
d7459527 736 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
15b7a68c
BVA
737 cur_dsd = cont_pkt->dsd;
738 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
2b6c0cee 739 }
385d70b4 740
15b7a68c 741 append_dsd64(&cur_dsd, sg);
385d70b4 742 avail_dsds--;
2b6c0cee
AV
743 }
744}
745
bad75002
AE
746struct fw_dif_context {
747 uint32_t ref_tag;
748 uint16_t app_tag;
749 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
750 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
751};
752
753/*
754 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
755 *
756 */
757static inline void
e02587d7 758qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
bad75002
AE
759 unsigned int protcnt)
760{
9ba56b95 761 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
bad75002
AE
762
763 switch (scsi_get_prot_type(cmd)) {
bad75002 764 case SCSI_PROT_DIF_TYPE0:
8cb2049c
AE
765 /*
766 * No check for ql2xenablehba_err_chk, as it would be an
767 * I/O error if hba tag generation is not done.
768 */
769 pkt->ref_tag = cpu_to_le32((uint32_t)
770 (0xffffffff & scsi_get_lba(cmd)));
e02587d7
AE
771
772 if (!qla2x00_hba_err_chk_enabled(sp))
773 break;
774
8cb2049c
AE
775 pkt->ref_tag_mask[0] = 0xff;
776 pkt->ref_tag_mask[1] = 0xff;
777 pkt->ref_tag_mask[2] = 0xff;
778 pkt->ref_tag_mask[3] = 0xff;
bad75002
AE
779 break;
780
781 /*
782 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
783 * match LBA in CDB + N
784 */
785 case SCSI_PROT_DIF_TYPE2:
ad950360 786 pkt->app_tag = cpu_to_le16(0);
e02587d7
AE
787 pkt->app_tag_mask[0] = 0x0;
788 pkt->app_tag_mask[1] = 0x0;
0c470874
AE
789
790 pkt->ref_tag = cpu_to_le32((uint32_t)
791 (0xffffffff & scsi_get_lba(cmd)));
792
e02587d7
AE
793 if (!qla2x00_hba_err_chk_enabled(sp))
794 break;
795
0c470874
AE
796 /* enable ALL bytes of the ref tag */
797 pkt->ref_tag_mask[0] = 0xff;
798 pkt->ref_tag_mask[1] = 0xff;
799 pkt->ref_tag_mask[2] = 0xff;
800 pkt->ref_tag_mask[3] = 0xff;
bad75002
AE
801 break;
802
803 /* For Type 3 protection: 16 bit GUARD only */
804 case SCSI_PROT_DIF_TYPE3:
805 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
806 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
807 0x00;
808 break;
809
810 /*
811 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
812 * 16 bit app tag.
813 */
814 case SCSI_PROT_DIF_TYPE1:
e02587d7
AE
815 pkt->ref_tag = cpu_to_le32((uint32_t)
816 (0xffffffff & scsi_get_lba(cmd)));
ad950360 817 pkt->app_tag = cpu_to_le16(0);
e02587d7
AE
818 pkt->app_tag_mask[0] = 0x0;
819 pkt->app_tag_mask[1] = 0x0;
820
821 if (!qla2x00_hba_err_chk_enabled(sp))
bad75002
AE
822 break;
823
bad75002
AE
824 /* enable ALL bytes of the ref tag */
825 pkt->ref_tag_mask[0] = 0xff;
826 pkt->ref_tag_mask[1] = 0xff;
827 pkt->ref_tag_mask[2] = 0xff;
828 pkt->ref_tag_mask[3] = 0xff;
829 break;
830 }
bad75002
AE
831}
832
d7459527 833int
8cb2049c
AE
834qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
835 uint32_t *partial)
836{
837 struct scatterlist *sg;
838 uint32_t cumulative_partial, sg_len;
839 dma_addr_t sg_dma_addr;
840
841 if (sgx->num_bytes == sgx->tot_bytes)
842 return 0;
843
844 sg = sgx->cur_sg;
845 cumulative_partial = sgx->tot_partial;
846
847 sg_dma_addr = sg_dma_address(sg);
848 sg_len = sg_dma_len(sg);
849
850 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
851
852 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
853 sgx->dma_len = (blk_sz - cumulative_partial);
854 sgx->tot_partial = 0;
855 sgx->num_bytes += blk_sz;
856 *partial = 0;
857 } else {
858 sgx->dma_len = sg_len - sgx->bytes_consumed;
859 sgx->tot_partial += sgx->dma_len;
860 *partial = 1;
861 }
862
863 sgx->bytes_consumed += sgx->dma_len;
864
865 if (sg_len == sgx->bytes_consumed) {
866 sg = sg_next(sg);
867 sgx->num_sg++;
868 sgx->cur_sg = sg;
869 sgx->bytes_consumed = 0;
870 }
871
872 return 1;
873}
874
f83adb61 875int
8cb2049c 876qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
15b7a68c 877 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
8cb2049c
AE
878{
879 void *next_dsd;
880 uint8_t avail_dsds = 0;
881 uint32_t dsd_list_len;
882 struct dsd_dma *dsd_ptr;
883 struct scatterlist *sg_prot;
15b7a68c 884 struct dsd64 *cur_dsd = dsd;
8cb2049c 885 uint16_t used_dsds = tot_dsds;
f83adb61 886 uint32_t prot_int; /* protection interval */
8cb2049c
AE
887 uint32_t partial;
888 struct qla2_sgx sgx;
889 dma_addr_t sle_dma;
890 uint32_t sle_dma_len, tot_prot_dma_len = 0;
f83adb61 891 struct scsi_cmnd *cmd;
8cb2049c
AE
892
893 memset(&sgx, 0, sizeof(struct qla2_sgx));
f83adb61 894 if (sp) {
f83adb61
QT
895 cmd = GET_CMD_SP(sp);
896 prot_int = cmd->device->sector_size;
897
898 sgx.tot_bytes = scsi_bufflen(cmd);
899 sgx.cur_sg = scsi_sglist(cmd);
900 sgx.sp = sp;
901
902 sg_prot = scsi_prot_sglist(cmd);
903 } else if (tc) {
f83adb61
QT
904 prot_int = tc->blk_sz;
905 sgx.tot_bytes = tc->bufflen;
906 sgx.cur_sg = tc->sg;
907 sg_prot = tc->prot_sg;
908 } else {
909 BUG();
910 return 1;
911 }
8cb2049c
AE
912
913 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
914
915 sle_dma = sgx.dma_addr;
916 sle_dma_len = sgx.dma_len;
917alloc_and_fill:
918 /* Allocate additional continuation packets? */
919 if (avail_dsds == 0) {
920 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
921 QLA_DSDS_PER_IOCB : used_dsds;
922 dsd_list_len = (avail_dsds + 1) * 12;
923 used_dsds -= avail_dsds;
924
925 /* allocate tracking DS */
926 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
927 if (!dsd_ptr)
928 return 1;
929
930 /* allocate new list */
931 dsd_ptr->dsd_addr = next_dsd =
932 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
933 &dsd_ptr->dsd_list_dma);
934
935 if (!next_dsd) {
936 /*
937 * Need to cleanup only this dsd_ptr, rest
938 * will be done by sp_free_dma()
939 */
940 kfree(dsd_ptr);
941 return 1;
942 }
943
f83adb61
QT
944 if (sp) {
945 list_add_tail(&dsd_ptr->list,
946 &((struct crc_context *)
947 sp->u.scmd.ctx)->dsd_list);
948
949 sp->flags |= SRB_CRC_CTX_DSD_VALID;
950 } else {
951 list_add_tail(&dsd_ptr->list,
952 &(tc->ctx->dsd_list));
be25152c 953 *tc->ctx_dsd_alloced = 1;
f83adb61 954 }
8cb2049c 955
8cb2049c
AE
956
957 /* add new list to cmd iocb or last list */
15b7a68c
BVA
958 put_unaligned_le64(dsd_ptr->dsd_list_dma,
959 &cur_dsd->address);
960 cur_dsd->length = cpu_to_le32(dsd_list_len);
961 cur_dsd = next_dsd;
8cb2049c 962 }
15b7a68c
BVA
963 put_unaligned_le64(sle_dma, &cur_dsd->address);
964 cur_dsd->length = cpu_to_le32(sle_dma_len);
965 cur_dsd++;
8cb2049c
AE
966 avail_dsds--;
967
968 if (partial == 0) {
969 /* Got a full protection interval */
970 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
971 sle_dma_len = 8;
bad75002 972
8cb2049c
AE
973 tot_prot_dma_len += sle_dma_len;
974 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
975 tot_prot_dma_len = 0;
976 sg_prot = sg_next(sg_prot);
977 }
978
979 partial = 1; /* So as to not re-enter this block */
980 goto alloc_and_fill;
981 }
982 }
983 /* Null termination */
15b7a68c
BVA
984 cur_dsd->address = 0;
985 cur_dsd->length = 0;
986 cur_dsd++;
8cb2049c
AE
987 return 0;
988}
5162cf0c 989
f83adb61 990int
15b7a68c
BVA
991qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
992 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
bad75002
AE
993{
994 void *next_dsd;
995 uint8_t avail_dsds = 0;
996 uint32_t dsd_list_len;
997 struct dsd_dma *dsd_ptr;
f83adb61 998 struct scatterlist *sg, *sgl;
15b7a68c 999 struct dsd64 *cur_dsd = dsd;
bad75002
AE
1000 int i;
1001 uint16_t used_dsds = tot_dsds;
f83adb61 1002 struct scsi_cmnd *cmd;
f83adb61
QT
1003
1004 if (sp) {
1005 cmd = GET_CMD_SP(sp);
1006 sgl = scsi_sglist(cmd);
f83adb61
QT
1007 } else if (tc) {
1008 sgl = tc->sg;
f83adb61
QT
1009 } else {
1010 BUG();
1011 return 1;
1012 }
bad75002 1013
f83adb61
QT
1014
1015 for_each_sg(sgl, sg, tot_dsds, i) {
bad75002
AE
1016 /* Allocate additional continuation packets? */
1017 if (avail_dsds == 0) {
1018 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1019 QLA_DSDS_PER_IOCB : used_dsds;
1020 dsd_list_len = (avail_dsds + 1) * 12;
1021 used_dsds -= avail_dsds;
1022
1023 /* allocate tracking DS */
1024 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1025 if (!dsd_ptr)
1026 return 1;
1027
1028 /* allocate new list */
1029 dsd_ptr->dsd_addr = next_dsd =
1030 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1031 &dsd_ptr->dsd_list_dma);
1032
1033 if (!next_dsd) {
1034 /*
1035 * Need to cleanup only this dsd_ptr, rest
1036 * will be done by sp_free_dma()
1037 */
1038 kfree(dsd_ptr);
1039 return 1;
1040 }
1041
f83adb61
QT
1042 if (sp) {
1043 list_add_tail(&dsd_ptr->list,
1044 &((struct crc_context *)
1045 sp->u.scmd.ctx)->dsd_list);
bad75002 1046
f83adb61
QT
1047 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1048 } else {
1049 list_add_tail(&dsd_ptr->list,
1050 &(tc->ctx->dsd_list));
be25152c 1051 *tc->ctx_dsd_alloced = 1;
f83adb61 1052 }
bad75002
AE
1053
1054 /* add new list to cmd iocb or last list */
15b7a68c
BVA
1055 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1056 &cur_dsd->address);
1057 cur_dsd->length = cpu_to_le32(dsd_list_len);
1058 cur_dsd = next_dsd;
bad75002 1059 }
15b7a68c 1060 append_dsd64(&cur_dsd, sg);
bad75002
AE
1061 avail_dsds--;
1062
bad75002
AE
1063 }
1064 /* Null termination */
15b7a68c
BVA
1065 cur_dsd->address = 0;
1066 cur_dsd->length = 0;
1067 cur_dsd++;
bad75002
AE
1068 return 0;
1069}
1070
f83adb61 1071int
bad75002 1072qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
15b7a68c 1073 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
bad75002 1074{
50b81275 1075 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
f83adb61 1076 struct scatterlist *sg, *sgl;
50b81275 1077 struct crc_context *difctx = NULL;
f83adb61 1078 struct scsi_qla_host *vha;
50b81275
GM
1079 uint dsd_list_len;
1080 uint avail_dsds = 0;
1081 uint used_dsds = tot_dsds;
1082 bool dif_local_dma_alloc = false;
1083 bool direction_to_device = false;
1084 int i;
f83adb61
QT
1085
1086 if (sp) {
50b81275 1087 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
bd432bb5 1088
f83adb61 1089 sgl = scsi_prot_sglist(cmd);
25ff6af1 1090 vha = sp->vha;
50b81275
GM
1091 difctx = sp->u.scmd.ctx;
1092 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1093 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1094 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1095 __func__, cmd, difctx, sp);
f83adb61
QT
1096 } else if (tc) {
1097 vha = tc->vha;
1098 sgl = tc->prot_sg;
50b81275
GM
1099 difctx = tc->ctx;
1100 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
f83adb61
QT
1101 } else {
1102 BUG();
1103 return 1;
1104 }
bad75002 1105
50b81275
GM
1106 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1107 "%s: enter (write=%u)\n", __func__, direction_to_device);
1108
1109 /* if initiator doing write or target doing read */
1110 if (direction_to_device) {
1111 for_each_sg(sgl, sg, tot_dsds, i) {
038d710f 1112 u64 sle_phys = sg_phys(sg);
50b81275
GM
1113
1114 /* If SGE addr + len flips bits in upper 32-bits */
1115 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1116 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1117 "%s: page boundary crossing (phys=%llx len=%x)\n",
1118 __func__, sle_phys, sg->length);
1119
1120 if (difctx) {
1121 ha->dif_bundle_crossed_pages++;
1122 dif_local_dma_alloc = true;
1123 } else {
1124 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1125 vha, 0xe022,
1126 "%s: difctx pointer is NULL\n",
1127 __func__);
1128 }
1129 break;
1130 }
1131 }
1132 ha->dif_bundle_writes++;
1133 } else {
1134 ha->dif_bundle_reads++;
1135 }
bad75002 1136
50b81275
GM
1137 if (ql2xdifbundlinginternalbuffers)
1138 dif_local_dma_alloc = direction_to_device;
1139
1140 if (dif_local_dma_alloc) {
1141 u32 track_difbundl_buf = 0;
1142 u32 ldma_sg_len = 0;
1143 u8 ldma_needed = 1;
1144
1145 difctx->no_dif_bundl = 0;
1146 difctx->dif_bundl_len = 0;
1147
1148 /* Track DSD buffers */
1149 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1150 /* Track local DMA buffers */
1151 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1152
1153 for_each_sg(sgl, sg, tot_dsds, i) {
1154 u32 sglen = sg_dma_len(sg);
1155
1156 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1157 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
038d710f 1158 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
50b81275
GM
1159 difctx->dif_bundl_len, ldma_needed);
1160
1161 while (sglen) {
1162 u32 xfrlen = 0;
1163
1164 if (ldma_needed) {
1165 /*
1166 * Allocate list item to store
1167 * the DMA buffers
1168 */
1169 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1170 GFP_ATOMIC);
1171 if (!dsd_ptr) {
1172 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1173 "%s: failed alloc dsd_ptr\n",
1174 __func__);
1175 return 1;
1176 }
1177 ha->dif_bundle_kallocs++;
1178
1179 /* allocate dma buffer */
1180 dsd_ptr->dsd_addr = dma_pool_alloc
1181 (ha->dif_bundl_pool, GFP_ATOMIC,
1182 &dsd_ptr->dsd_list_dma);
1183 if (!dsd_ptr->dsd_addr) {
1184 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1185 "%s: failed alloc ->dsd_ptr\n",
1186 __func__);
1187 /*
1188 * need to cleanup only this
1189 * dsd_ptr rest will be done
1190 * by sp_free_dma()
1191 */
1192 kfree(dsd_ptr);
1193 ha->dif_bundle_kallocs--;
1194 return 1;
1195 }
1196 ha->dif_bundle_dma_allocs++;
1197 ldma_needed = 0;
1198 difctx->no_dif_bundl++;
1199 list_add_tail(&dsd_ptr->list,
1200 &difctx->ldif_dma_hndl_list);
1201 }
1202
1203 /* xfrlen is min of dma pool size and sglen */
1204 xfrlen = (sglen >
1205 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1206 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1207 sglen;
1208
1209 /* replace with local allocated dma buffer */
1210 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1211 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1212 difctx->dif_bundl_len);
1213 difctx->dif_bundl_len += xfrlen;
1214 sglen -= xfrlen;
1215 ldma_sg_len += xfrlen;
1216 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1217 sg_is_last(sg)) {
1218 ldma_needed = 1;
1219 ldma_sg_len = 0;
1220 }
bad75002 1221 }
50b81275 1222 }
bad75002 1223
50b81275
GM
1224 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1225 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1226 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1227 difctx->dif_bundl_len, difctx->no_dif_bundl,
1228 track_difbundl_buf);
bad75002 1229
50b81275
GM
1230 if (sp)
1231 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1232 else
1233 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1234
1235 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1236 &difctx->ldif_dma_hndl_list, list) {
1237 u32 sglen = (difctx->dif_bundl_len >
1238 DIF_BUNDLING_DMA_POOL_SIZE) ?
1239 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1240
1241 BUG_ON(track_difbundl_buf == 0);
1242
1243 /* Allocate additional continuation packets? */
1244 if (avail_dsds == 0) {
1245 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1246 0xe024,
1247 "%s: adding continuation iocb's\n",
1248 __func__);
1249 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1250 QLA_DSDS_PER_IOCB : used_dsds;
1251 dsd_list_len = (avail_dsds + 1) * 12;
1252 used_dsds -= avail_dsds;
1253
1254 /* allocate tracking DS */
1255 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1256 if (!dsd_ptr) {
1257 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1258 "%s: failed alloc dsd_ptr\n",
1259 __func__);
1260 return 1;
1261 }
1262 ha->dif_bundle_kallocs++;
1263
1264 difctx->no_ldif_dsd++;
1265 /* allocate new list */
1266 dsd_ptr->dsd_addr =
1267 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1268 &dsd_ptr->dsd_list_dma);
1269 if (!dsd_ptr->dsd_addr) {
1270 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1271 "%s: failed alloc ->dsd_addr\n",
1272 __func__);
1273 /*
1274 * need to cleanup only this dsd_ptr
1275 * rest will be done by sp_free_dma()
1276 */
1277 kfree(dsd_ptr);
1278 ha->dif_bundle_kallocs--;
1279 return 1;
1280 }
1281 ha->dif_bundle_dma_allocs++;
1282
1283 if (sp) {
1284 list_add_tail(&dsd_ptr->list,
1285 &difctx->ldif_dsd_list);
1286 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1287 } else {
1288 list_add_tail(&dsd_ptr->list,
1289 &difctx->ldif_dsd_list);
1290 tc->ctx_dsd_alloced = 1;
1291 }
1292
1293 /* add new list to cmd iocb or last list */
15b7a68c
BVA
1294 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1295 &cur_dsd->address);
1296 cur_dsd->length = cpu_to_le32(dsd_list_len);
50b81275 1297 cur_dsd = dsd_ptr->dsd_addr;
f83adb61 1298 }
15b7a68c
BVA
1299 put_unaligned_le64(dif_dsd->dsd_list_dma,
1300 &cur_dsd->address);
1301 cur_dsd->length = cpu_to_le32(sglen);
1302 cur_dsd++;
50b81275
GM
1303 avail_dsds--;
1304 difctx->dif_bundl_len -= sglen;
1305 track_difbundl_buf--;
bad75002 1306 }
bad75002 1307
50b81275
GM
1308 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1309 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1310 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1311 } else {
1312 for_each_sg(sgl, sg, tot_dsds, i) {
50b81275
GM
1313 /* Allocate additional continuation packets? */
1314 if (avail_dsds == 0) {
1315 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1316 QLA_DSDS_PER_IOCB : used_dsds;
1317 dsd_list_len = (avail_dsds + 1) * 12;
1318 used_dsds -= avail_dsds;
1319
1320 /* allocate tracking DS */
1321 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1322 if (!dsd_ptr) {
1323 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1324 vha, 0xe027,
1325 "%s: failed alloc dsd_dma...\n",
1326 __func__);
1327 return 1;
1328 }
1329
1330 /* allocate new list */
1331 dsd_ptr->dsd_addr =
1332 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1333 &dsd_ptr->dsd_list_dma);
1334 if (!dsd_ptr->dsd_addr) {
1335 /* need to cleanup only this dsd_ptr */
1336 /* rest will be done by sp_free_dma() */
1337 kfree(dsd_ptr);
1338 return 1;
1339 }
1340
1341 if (sp) {
1342 list_add_tail(&dsd_ptr->list,
1343 &difctx->dsd_list);
1344 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1345 } else {
1346 list_add_tail(&dsd_ptr->list,
1347 &difctx->dsd_list);
1348 tc->ctx_dsd_alloced = 1;
1349 }
1350
1351 /* add new list to cmd iocb or last list */
15b7a68c
BVA
1352 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1353 &cur_dsd->address);
1354 cur_dsd->length = cpu_to_le32(dsd_list_len);
50b81275
GM
1355 cur_dsd = dsd_ptr->dsd_addr;
1356 }
15b7a68c 1357 append_dsd64(&cur_dsd, sg);
50b81275
GM
1358 avail_dsds--;
1359 }
bad75002
AE
1360 }
1361 /* Null termination */
15b7a68c
BVA
1362 cur_dsd->address = 0;
1363 cur_dsd->length = 0;
1364 cur_dsd++;
bad75002
AE
1365 return 0;
1366}
bad75002
AE
1367/**
1368 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1369 * Type 6 IOCB types.
1370 *
1371 * @sp: SRB command to process
1372 * @cmd_pkt: Command type 3 IOCB
1373 * @tot_dsds: Total number of segments to transfer
807eb907
BVA
1374 * @tot_prot_dsds: Total number of segments with protection information
1375 * @fw_prot_opts: Protection options to be passed to firmware
bad75002 1376 */
c20605ed 1377static inline int
bad75002
AE
1378qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1379 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1380{
15b7a68c
BVA
1381 struct dsd64 *cur_dsd;
1382 uint32_t *fcp_dl;
bad75002
AE
1383 scsi_qla_host_t *vha;
1384 struct scsi_cmnd *cmd;
8cb2049c 1385 uint32_t total_bytes = 0;
bad75002
AE
1386 uint32_t data_bytes;
1387 uint32_t dif_bytes;
1388 uint8_t bundling = 1;
1389 uint16_t blk_size;
bad75002
AE
1390 struct crc_context *crc_ctx_pkt = NULL;
1391 struct qla_hw_data *ha;
1392 uint8_t additional_fcpcdb_len;
1393 uint16_t fcp_cmnd_len;
1394 struct fcp_cmnd *fcp_cmnd;
1395 dma_addr_t crc_ctx_dma;
1396
9ba56b95 1397 cmd = GET_CMD_SP(sp);
bad75002 1398
bad75002 1399 /* Update entry type to indicate Command Type CRC_2 IOCB */
2c26348c 1400 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
bad75002 1401
25ff6af1 1402 vha = sp->vha;
7c3df132
SK
1403 ha = vha->hw;
1404
bad75002
AE
1405 /* No data transfer */
1406 data_bytes = scsi_bufflen(cmd);
1407 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
ad950360 1408 cmd_pkt->byte_count = cpu_to_le32(0);
bad75002
AE
1409 return QLA_SUCCESS;
1410 }
1411
25ff6af1 1412 cmd_pkt->vp_index = sp->vha->vp_idx;
bad75002
AE
1413
1414 /* Set transfer direction */
1415 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1416 cmd_pkt->control_flags =
ad950360 1417 cpu_to_le16(CF_WRITE_DATA);
bad75002
AE
1418 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1419 cmd_pkt->control_flags =
ad950360 1420 cpu_to_le16(CF_READ_DATA);
bad75002
AE
1421 }
1422
9ba56b95
GM
1423 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1424 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1425 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1426 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
bad75002
AE
1427 bundling = 0;
1428
1429 /* Allocate CRC context from global pool */
9ba56b95 1430 crc_ctx_pkt = sp->u.scmd.ctx =
501017f6 1431 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
bad75002
AE
1432
1433 if (!crc_ctx_pkt)
1434 goto crc_queuing_error;
1435
bad75002
AE
1436 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1437
1438 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1439
1440 /* Set handle */
1441 crc_ctx_pkt->handle = cmd_pkt->handle;
1442
1443 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1444
e02587d7 1445 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
bad75002
AE
1446 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1447
d4556a49 1448 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
bad75002
AE
1449 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1450
1451 /* Determine SCSI command length -- align to 4 byte boundary */
1452 if (cmd->cmd_len > 16) {
bad75002
AE
1453 additional_fcpcdb_len = cmd->cmd_len - 16;
1454 if ((cmd->cmd_len % 4) != 0) {
1455 /* SCSI cmd > 16 bytes must be multiple of 4 */
1456 goto crc_queuing_error;
1457 }
1458 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1459 } else {
1460 additional_fcpcdb_len = 0;
1461 fcp_cmnd_len = 12 + 16 + 4;
1462 }
1463
1464 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1465
1466 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1467 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1468 fcp_cmnd->additional_cdb_len |= 1;
1469 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1470 fcp_cmnd->additional_cdb_len |= 2;
1471
9ba56b95 1472 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
bad75002
AE
1473 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1474 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
d4556a49
BVA
1475 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1476 &cmd_pkt->fcp_cmnd_dseg_address);
65155b37 1477 fcp_cmnd->task_management = 0;
50668633 1478 fcp_cmnd->task_attribute = TSK_SIMPLE;
ff2fc42e 1479
bad75002
AE
1480 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1481
bad75002 1482 /* Compute dif len and adjust data len to incude protection */
bad75002
AE
1483 dif_bytes = 0;
1484 blk_size = cmd->device->sector_size;
8cb2049c
AE
1485 dif_bytes = (data_bytes / blk_size) * 8;
1486
9ba56b95 1487 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
8cb2049c
AE
1488 case SCSI_PROT_READ_INSERT:
1489 case SCSI_PROT_WRITE_STRIP:
2703eaaf
BVA
1490 total_bytes = data_bytes;
1491 data_bytes += dif_bytes;
1492 break;
8cb2049c
AE
1493
1494 case SCSI_PROT_READ_STRIP:
1495 case SCSI_PROT_WRITE_INSERT:
1496 case SCSI_PROT_READ_PASS:
1497 case SCSI_PROT_WRITE_PASS:
2703eaaf
BVA
1498 total_bytes = data_bytes + dif_bytes;
1499 break;
8cb2049c 1500 default:
2703eaaf 1501 BUG();
bad75002
AE
1502 }
1503
e02587d7 1504 if (!qla2x00_hba_err_chk_enabled(sp))
bad75002 1505 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
9e522cd8
AE
1506 /* HBA error checking enabled */
1507 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1508 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1509 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1510 SCSI_PROT_DIF_TYPE2))
1511 fw_prot_opts |= BIT_10;
1512 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1513 SCSI_PROT_DIF_TYPE3)
1514 fw_prot_opts |= BIT_11;
1515 }
bad75002
AE
1516
1517 if (!bundling) {
15b7a68c 1518 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
bad75002
AE
1519 } else {
1520 /*
1521 * Configure Bundling if we need to fetch interlaving
1522 * protection PCI accesses
1523 */
1524 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1525 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1526 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1527 tot_prot_dsds);
15b7a68c 1528 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
bad75002
AE
1529 }
1530
1531 /* Finish the common fields of CRC pkt */
1532 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1533 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1534 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
ad950360 1535 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
bad75002
AE
1536 /* Fibre channel byte count */
1537 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1538 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1539 additional_fcpcdb_len);
1540 *fcp_dl = htonl(total_bytes);
1541
0c470874 1542 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
ad950360 1543 cmd_pkt->byte_count = cpu_to_le32(0);
0c470874
AE
1544 return QLA_SUCCESS;
1545 }
bad75002
AE
1546 /* Walks data segments */
1547
ad950360 1548 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
8cb2049c
AE
1549
1550 if (!bundling && tot_prot_dsds) {
1551 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
f83adb61 1552 cur_dsd, tot_dsds, NULL))
8cb2049c
AE
1553 goto crc_queuing_error;
1554 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
f83adb61 1555 (tot_dsds - tot_prot_dsds), NULL))
bad75002
AE
1556 goto crc_queuing_error;
1557
1558 if (bundling && tot_prot_dsds) {
1559 /* Walks dif segments */
ad950360 1560 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
15b7a68c 1561 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
bad75002 1562 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
f83adb61 1563 tot_prot_dsds, NULL))
bad75002
AE
1564 goto crc_queuing_error;
1565 }
1566 return QLA_SUCCESS;
1567
1568crc_queuing_error:
bad75002
AE
1569 /* Cleanup will be performed by the caller */
1570
1571 return QLA_FUNCTION_FAILED;
1572}
2b6c0cee
AV
1573
1574/**
1575 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1576 * @sp: command to send to the ISP
1577 *
cc3ef7bc 1578 * Returns non-zero if a failure occurred, else zero.
2b6c0cee
AV
1579 */
1580int
1581qla24xx_start_scsi(srb_t *sp)
1582{
52c82823 1583 int nseg;
2b6c0cee 1584 unsigned long flags;
2b6c0cee
AV
1585 uint32_t *clr_ptr;
1586 uint32_t index;
1587 uint32_t handle;
1588 struct cmd_type_7 *cmd_pkt;
2b6c0cee
AV
1589 uint16_t cnt;
1590 uint16_t req_cnt;
1591 uint16_t tot_dsds;
73208dfd 1592 struct req_que *req = NULL;
9ba56b95 1593 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 1594 struct scsi_qla_host *vha = sp->vha;
73208dfd 1595 struct qla_hw_data *ha = vha->hw;
2b6c0cee
AV
1596
1597 /* Setup device pointers. */
59e0b8b0 1598 req = vha->req;
73208dfd 1599
2b6c0cee
AV
1600 /* So we know we haven't pci_map'ed anything yet */
1601 tot_dsds = 0;
1602
1603 /* Send marker if required */
e315cd28 1604 if (vha->marker_needed != 0) {
9eb9c6dc 1605 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
7c3df132 1606 QLA_SUCCESS)
2b6c0cee 1607 return QLA_FUNCTION_FAILED;
e315cd28 1608 vha->marker_needed = 0;
2b6c0cee
AV
1609 }
1610
1611 /* Acquire ring specific lock */
e315cd28 1612 spin_lock_irqsave(&ha->hardware_lock, flags);
2b6c0cee
AV
1613
1614 /* Check for room in outstanding command list. */
e315cd28 1615 handle = req->current_outstanding_cmd;
8d93f550 1616 for (index = 1; index < req->num_outstanding_cmds; index++) {
2b6c0cee 1617 handle++;
8d93f550 1618 if (handle == req->num_outstanding_cmds)
2b6c0cee 1619 handle = 1;
e315cd28 1620 if (!req->outstanding_cmds[handle])
2b6c0cee
AV
1621 break;
1622 }
8d93f550 1623 if (index == req->num_outstanding_cmds)
2b6c0cee
AV
1624 goto queuing_error;
1625
1626 /* Map the sg table so we have an accurate count of sg entries needed */
2c3dfe3f
SJ
1627 if (scsi_sg_count(cmd)) {
1628 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1629 scsi_sg_count(cmd), cmd->sc_data_direction);
1630 if (unlikely(!nseg))
2b6c0cee 1631 goto queuing_error;
2c3dfe3f
SJ
1632 } else
1633 nseg = 0;
1634
385d70b4 1635 tot_dsds = nseg;
7c3df132 1636 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
e315cd28 1637 if (req->cnt < (req_cnt + 2)) {
7c6300e3
JC
1638 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1639 RD_REG_DWORD_RELAXED(req->req_q_out);
e315cd28
AC
1640 if (req->ring_index < cnt)
1641 req->cnt = cnt - req->ring_index;
2b6c0cee 1642 else
e315cd28
AC
1643 req->cnt = req->length -
1644 (req->ring_index - cnt);
a6eb3c9f
CL
1645 if (req->cnt < (req_cnt + 2))
1646 goto queuing_error;
2b6c0cee 1647 }
2b6c0cee
AV
1648
1649 /* Build command packet. */
e315cd28
AC
1650 req->current_outstanding_cmd = handle;
1651 req->outstanding_cmds[handle] = sp;
cf53b069 1652 sp->handle = handle;
9ba56b95 1653 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
e315cd28 1654 req->cnt -= req_cnt;
2b6c0cee 1655
e315cd28 1656 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2afa19a9 1657 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2b6c0cee
AV
1658
1659 /* Zero out remaining portion of packet. */
72df8325 1660 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2b6c0cee
AV
1661 clr_ptr = (uint32_t *)cmd_pkt + 2;
1662 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1663 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1664
1665 /* Set NPORT-ID and LUN number*/
1666 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1667 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1668 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1669 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 1670 cmd_pkt->vp_index = sp->vha->vp_idx;
2b6c0cee 1671
9ba56b95 1672 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
0d4be124 1673 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2b6c0cee 1674
50668633 1675 cmd_pkt->task = TSK_SIMPLE;
ff2fc42e 1676
2b6c0cee
AV
1677 /* Load SCSI command packet. */
1678 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1679 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1680
385d70b4 1681 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2b6c0cee
AV
1682
1683 /* Build IOCB segments */
d7459527 1684 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2b6c0cee
AV
1685
1686 /* Set total data segment count. */
1687 cmd_pkt->entry_count = (uint8_t)req_cnt;
1688 wmb();
2b6c0cee 1689 /* Adjust ring index. */
e315cd28
AC
1690 req->ring_index++;
1691 if (req->ring_index == req->length) {
1692 req->ring_index = 0;
1693 req->ring_ptr = req->ring;
2b6c0cee 1694 } else
e315cd28 1695 req->ring_ptr++;
2b6c0cee
AV
1696
1697 sp->flags |= SRB_DMA_VALID;
2b6c0cee
AV
1698
1699 /* Set chip new ring index. */
08029990 1700 WRT_REG_DWORD(req->req_q_in, req->ring_index);
4fdfefe5 1701
e315cd28 1702 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2b6c0cee
AV
1703 return QLA_SUCCESS;
1704
1705queuing_error:
385d70b4
FT
1706 if (tot_dsds)
1707 scsi_dma_unmap(cmd);
1708
e315cd28 1709 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2b6c0cee
AV
1710
1711 return QLA_FUNCTION_FAILED;
1da177e4 1712}
68ca949c 1713
bad75002
AE
1714/**
1715 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1716 * @sp: command to send to the ISP
1717 *
1718 * Returns non-zero if a failure occurred, else zero.
1719 */
1720int
1721qla24xx_dif_start_scsi(srb_t *sp)
1722{
1723 int nseg;
1724 unsigned long flags;
1725 uint32_t *clr_ptr;
1726 uint32_t index;
1727 uint32_t handle;
1728 uint16_t cnt;
1729 uint16_t req_cnt = 0;
1730 uint16_t tot_dsds;
1731 uint16_t tot_prot_dsds;
1732 uint16_t fw_prot_opts = 0;
1733 struct req_que *req = NULL;
1734 struct rsp_que *rsp = NULL;
9ba56b95 1735 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 1736 struct scsi_qla_host *vha = sp->vha;
bad75002
AE
1737 struct qla_hw_data *ha = vha->hw;
1738 struct cmd_type_crc_2 *cmd_pkt;
1739 uint32_t status = 0;
1740
1741#define QDSS_GOT_Q_SPACE BIT_0
1742
0c470874
AE
1743 /* Only process protection or >16 cdb in this routine */
1744 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1745 if (cmd->cmd_len <= 16)
1746 return qla24xx_start_scsi(sp);
1747 }
bad75002
AE
1748
1749 /* Setup device pointers. */
bad75002 1750 req = vha->req;
d7459527 1751 rsp = req->rsp;
bad75002
AE
1752
1753 /* So we know we haven't pci_map'ed anything yet */
1754 tot_dsds = 0;
1755
1756 /* Send marker if required */
1757 if (vha->marker_needed != 0) {
9eb9c6dc 1758 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
bad75002
AE
1759 QLA_SUCCESS)
1760 return QLA_FUNCTION_FAILED;
1761 vha->marker_needed = 0;
1762 }
1763
1764 /* Acquire ring specific lock */
1765 spin_lock_irqsave(&ha->hardware_lock, flags);
1766
1767 /* Check for room in outstanding command list. */
1768 handle = req->current_outstanding_cmd;
8d93f550 1769 for (index = 1; index < req->num_outstanding_cmds; index++) {
bad75002 1770 handle++;
8d93f550 1771 if (handle == req->num_outstanding_cmds)
bad75002
AE
1772 handle = 1;
1773 if (!req->outstanding_cmds[handle])
1774 break;
1775 }
1776
8d93f550 1777 if (index == req->num_outstanding_cmds)
bad75002
AE
1778 goto queuing_error;
1779
1780 /* Compute number of required data segments */
1781 /* Map the sg table so we have an accurate count of sg entries needed */
1782 if (scsi_sg_count(cmd)) {
1783 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1784 scsi_sg_count(cmd), cmd->sc_data_direction);
1785 if (unlikely(!nseg))
1786 goto queuing_error;
1787 else
1788 sp->flags |= SRB_DMA_VALID;
8cb2049c
AE
1789
1790 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1791 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1792 struct qla2_sgx sgx;
1793 uint32_t partial;
1794
1795 memset(&sgx, 0, sizeof(struct qla2_sgx));
1796 sgx.tot_bytes = scsi_bufflen(cmd);
1797 sgx.cur_sg = scsi_sglist(cmd);
1798 sgx.sp = sp;
1799
1800 nseg = 0;
1801 while (qla24xx_get_one_block_sg(
1802 cmd->device->sector_size, &sgx, &partial))
1803 nseg++;
1804 }
bad75002
AE
1805 } else
1806 nseg = 0;
1807
1808 /* number of required data segments */
1809 tot_dsds = nseg;
1810
1811 /* Compute number of required protection segments */
1812 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1813 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1814 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1815 if (unlikely(!nseg))
1816 goto queuing_error;
1817 else
1818 sp->flags |= SRB_CRC_PROT_DMA_VALID;
8cb2049c
AE
1819
1820 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1821 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1822 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1823 }
bad75002
AE
1824 } else {
1825 nseg = 0;
1826 }
1827
1828 req_cnt = 1;
1829 /* Total Data and protection sg segment(s) */
1830 tot_prot_dsds = nseg;
1831 tot_dsds += nseg;
1832 if (req->cnt < (req_cnt + 2)) {
7c6300e3
JC
1833 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1834 RD_REG_DWORD_RELAXED(req->req_q_out);
bad75002
AE
1835 if (req->ring_index < cnt)
1836 req->cnt = cnt - req->ring_index;
1837 else
1838 req->cnt = req->length -
1839 (req->ring_index - cnt);
a6eb3c9f
CL
1840 if (req->cnt < (req_cnt + 2))
1841 goto queuing_error;
bad75002
AE
1842 }
1843
bad75002
AE
1844 status |= QDSS_GOT_Q_SPACE;
1845
1846 /* Build header part of command packet (excluding the OPCODE). */
1847 req->current_outstanding_cmd = handle;
1848 req->outstanding_cmds[handle] = sp;
8cb2049c 1849 sp->handle = handle;
9ba56b95 1850 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
bad75002
AE
1851 req->cnt -= req_cnt;
1852
1853 /* Fill-in common area */
1854 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1855 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1856
1857 clr_ptr = (uint32_t *)cmd_pkt + 2;
1858 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1859
1860 /* Set NPORT-ID and LUN number*/
1861 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1862 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1863 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1864 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1865
9ba56b95 1866 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
bad75002
AE
1867 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1868
1869 /* Total Data and protection segment(s) */
1870 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1871
1872 /* Build IOCB segments and adjust for data protection segments */
1873 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1874 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1875 QLA_SUCCESS)
1876 goto queuing_error;
1877
1878 cmd_pkt->entry_count = (uint8_t)req_cnt;
1879 /* Specify response queue number where completion should happen */
1880 cmd_pkt->entry_status = (uint8_t) rsp->id;
ad950360 1881 cmd_pkt->timeout = cpu_to_le16(0);
bad75002
AE
1882 wmb();
1883
1884 /* Adjust ring index. */
1885 req->ring_index++;
1886 if (req->ring_index == req->length) {
1887 req->ring_index = 0;
1888 req->ring_ptr = req->ring;
1889 } else
1890 req->ring_ptr++;
1891
1892 /* Set chip new ring index. */
1893 WRT_REG_DWORD(req->req_q_in, req->ring_index);
bad75002
AE
1894
1895 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1896
1897 return QLA_SUCCESS;
1898
1899queuing_error:
1900 if (status & QDSS_GOT_Q_SPACE) {
1901 req->outstanding_cmds[handle] = NULL;
1902 req->cnt += req_cnt;
1903 }
1904 /* Cleanup will be performed by the caller (queuecommand) */
1905
1906 spin_unlock_irqrestore(&ha->hardware_lock, flags);
bad75002
AE
1907 return QLA_FUNCTION_FAILED;
1908}
1909
d7459527
MH
1910/**
1911 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1912 * @sp: command to send to the ISP
1913 *
1914 * Returns non-zero if a failure occurred, else zero.
1915 */
1916static int
1917qla2xxx_start_scsi_mq(srb_t *sp)
68ca949c 1918{
d7459527
MH
1919 int nseg;
1920 unsigned long flags;
1921 uint32_t *clr_ptr;
1922 uint32_t index;
1923 uint32_t handle;
1924 struct cmd_type_7 *cmd_pkt;
1925 uint16_t cnt;
1926 uint16_t req_cnt;
1927 uint16_t tot_dsds;
1928 struct req_que *req = NULL;
9ba56b95 1929 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
d7459527
MH
1930 struct scsi_qla_host *vha = sp->fcport->vha;
1931 struct qla_hw_data *ha = vha->hw;
1932 struct qla_qpair *qpair = sp->qpair;
1933
578079fa
JT
1934 /* Acquire qpair specific lock */
1935 spin_lock_irqsave(&qpair->qp_lock, flags);
1936
d7459527 1937 /* Setup qpair pointers */
d7459527
MH
1938 req = qpair->req;
1939
1940 /* So we know we haven't pci_map'ed anything yet */
1941 tot_dsds = 0;
1942
1943 /* Send marker if required */
1944 if (vha->marker_needed != 0) {
9eb9c6dc 1945 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
578079fa
JT
1946 QLA_SUCCESS) {
1947 spin_unlock_irqrestore(&qpair->qp_lock, flags);
d7459527 1948 return QLA_FUNCTION_FAILED;
578079fa 1949 }
d7459527
MH
1950 vha->marker_needed = 0;
1951 }
1952
d7459527
MH
1953 /* Check for room in outstanding command list. */
1954 handle = req->current_outstanding_cmd;
1955 for (index = 1; index < req->num_outstanding_cmds; index++) {
1956 handle++;
1957 if (handle == req->num_outstanding_cmds)
1958 handle = 1;
1959 if (!req->outstanding_cmds[handle])
1960 break;
1961 }
1962 if (index == req->num_outstanding_cmds)
1963 goto queuing_error;
1964
1965 /* Map the sg table so we have an accurate count of sg entries needed */
1966 if (scsi_sg_count(cmd)) {
1967 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1968 scsi_sg_count(cmd), cmd->sc_data_direction);
1969 if (unlikely(!nseg))
1970 goto queuing_error;
1971 } else
1972 nseg = 0;
1973
1974 tot_dsds = nseg;
1975 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1976 if (req->cnt < (req_cnt + 2)) {
1977 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1978 RD_REG_DWORD_RELAXED(req->req_q_out);
1979 if (req->ring_index < cnt)
1980 req->cnt = cnt - req->ring_index;
1981 else
1982 req->cnt = req->length -
1983 (req->ring_index - cnt);
1984 if (req->cnt < (req_cnt + 2))
1985 goto queuing_error;
1986 }
1987
1988 /* Build command packet. */
1989 req->current_outstanding_cmd = handle;
1990 req->outstanding_cmds[handle] = sp;
1991 sp->handle = handle;
1992 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1993 req->cnt -= req_cnt;
1994
1995 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1996 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1997
1998 /* Zero out remaining portion of packet. */
1999 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2000 clr_ptr = (uint32_t *)cmd_pkt + 2;
2001 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2002 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2003
2004 /* Set NPORT-ID and LUN number*/
2005 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2006 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2007 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2008 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2009 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2010
2011 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2012 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2013
2014 cmd_pkt->task = TSK_SIMPLE;
2015
2016 /* Load SCSI command packet. */
2017 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2018 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2019
2020 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2021
2022 /* Build IOCB segments */
2023 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2024
2025 /* Set total data segment count. */
2026 cmd_pkt->entry_count = (uint8_t)req_cnt;
2027 wmb();
2028 /* Adjust ring index. */
2029 req->ring_index++;
2030 if (req->ring_index == req->length) {
2031 req->ring_index = 0;
2032 req->ring_ptr = req->ring;
2033 } else
2034 req->ring_ptr++;
2035
2036 sp->flags |= SRB_DMA_VALID;
2037
2038 /* Set chip new ring index. */
2039 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2040
d7459527
MH
2041 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2042 return QLA_SUCCESS;
2043
2044queuing_error:
2045 if (tot_dsds)
2046 scsi_dma_unmap(cmd);
2047
2048 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2049
2050 return QLA_FUNCTION_FAILED;
2051}
2052
2053
2054/**
2055 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2056 * @sp: command to send to the ISP
2057 *
2058 * Returns non-zero if a failure occurred, else zero.
2059 */
2060int
2061qla2xxx_dif_start_scsi_mq(srb_t *sp)
2062{
2063 int nseg;
2064 unsigned long flags;
2065 uint32_t *clr_ptr;
2066 uint32_t index;
2067 uint32_t handle;
2068 uint16_t cnt;
2069 uint16_t req_cnt = 0;
2070 uint16_t tot_dsds;
2071 uint16_t tot_prot_dsds;
2072 uint16_t fw_prot_opts = 0;
2073 struct req_que *req = NULL;
2074 struct rsp_que *rsp = NULL;
2075 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2076 struct scsi_qla_host *vha = sp->fcport->vha;
2077 struct qla_hw_data *ha = vha->hw;
2078 struct cmd_type_crc_2 *cmd_pkt;
2079 uint32_t status = 0;
2080 struct qla_qpair *qpair = sp->qpair;
2081
2082#define QDSS_GOT_Q_SPACE BIT_0
2083
2084 /* Check for host side state */
2085 if (!qpair->online) {
2086 cmd->result = DID_NO_CONNECT << 16;
2087 return QLA_INTERFACE_ERROR;
2088 }
2089
2090 if (!qpair->difdix_supported &&
2091 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2092 cmd->result = DID_NO_CONNECT << 16;
2093 return QLA_INTERFACE_ERROR;
2094 }
2095
2096 /* Only process protection or >16 cdb in this routine */
2097 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2098 if (cmd->cmd_len <= 16)
2099 return qla2xxx_start_scsi_mq(sp);
2100 }
2101
578079fa
JT
2102 spin_lock_irqsave(&qpair->qp_lock, flags);
2103
d7459527
MH
2104 /* Setup qpair pointers */
2105 rsp = qpair->rsp;
2106 req = qpair->req;
2107
2108 /* So we know we haven't pci_map'ed anything yet */
2109 tot_dsds = 0;
2110
2111 /* Send marker if required */
2112 if (vha->marker_needed != 0) {
9eb9c6dc 2113 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
578079fa
JT
2114 QLA_SUCCESS) {
2115 spin_unlock_irqrestore(&qpair->qp_lock, flags);
d7459527 2116 return QLA_FUNCTION_FAILED;
578079fa 2117 }
d7459527
MH
2118 vha->marker_needed = 0;
2119 }
2120
d7459527
MH
2121 /* Check for room in outstanding command list. */
2122 handle = req->current_outstanding_cmd;
2123 for (index = 1; index < req->num_outstanding_cmds; index++) {
2124 handle++;
2125 if (handle == req->num_outstanding_cmds)
2126 handle = 1;
2127 if (!req->outstanding_cmds[handle])
2128 break;
2129 }
2130
2131 if (index == req->num_outstanding_cmds)
2132 goto queuing_error;
2133
2134 /* Compute number of required data segments */
2135 /* Map the sg table so we have an accurate count of sg entries needed */
2136 if (scsi_sg_count(cmd)) {
2137 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2138 scsi_sg_count(cmd), cmd->sc_data_direction);
2139 if (unlikely(!nseg))
2140 goto queuing_error;
2141 else
2142 sp->flags |= SRB_DMA_VALID;
2143
2144 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2145 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2146 struct qla2_sgx sgx;
2147 uint32_t partial;
2148
2149 memset(&sgx, 0, sizeof(struct qla2_sgx));
2150 sgx.tot_bytes = scsi_bufflen(cmd);
2151 sgx.cur_sg = scsi_sglist(cmd);
2152 sgx.sp = sp;
2153
2154 nseg = 0;
2155 while (qla24xx_get_one_block_sg(
2156 cmd->device->sector_size, &sgx, &partial))
2157 nseg++;
2158 }
2159 } else
2160 nseg = 0;
2161
2162 /* number of required data segments */
2163 tot_dsds = nseg;
2164
2165 /* Compute number of required protection segments */
2166 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2167 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2168 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2169 if (unlikely(!nseg))
2170 goto queuing_error;
2171 else
2172 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2173
2174 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2175 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2176 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2177 }
2178 } else {
2179 nseg = 0;
2180 }
2181
2182 req_cnt = 1;
2183 /* Total Data and protection sg segment(s) */
2184 tot_prot_dsds = nseg;
2185 tot_dsds += nseg;
2186 if (req->cnt < (req_cnt + 2)) {
2187 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2188 RD_REG_DWORD_RELAXED(req->req_q_out);
2189 if (req->ring_index < cnt)
2190 req->cnt = cnt - req->ring_index;
2191 else
2192 req->cnt = req->length -
2193 (req->ring_index - cnt);
2194 if (req->cnt < (req_cnt + 2))
2195 goto queuing_error;
2196 }
2197
2198 status |= QDSS_GOT_Q_SPACE;
2199
2200 /* Build header part of command packet (excluding the OPCODE). */
2201 req->current_outstanding_cmd = handle;
2202 req->outstanding_cmds[handle] = sp;
2203 sp->handle = handle;
2204 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2205 req->cnt -= req_cnt;
2206
2207 /* Fill-in common area */
2208 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2209 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2210
2211 clr_ptr = (uint32_t *)cmd_pkt + 2;
2212 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2213
2214 /* Set NPORT-ID and LUN number*/
2215 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2216 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2217 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2218 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
68ca949c 2219
d7459527
MH
2220 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2221 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2222
2223 /* Total Data and protection segment(s) */
2224 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2225
2226 /* Build IOCB segments and adjust for data protection segments */
2227 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2228 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2229 QLA_SUCCESS)
2230 goto queuing_error;
2231
2232 cmd_pkt->entry_count = (uint8_t)req_cnt;
2233 cmd_pkt->timeout = cpu_to_le16(0);
2234 wmb();
2235
2236 /* Adjust ring index. */
2237 req->ring_index++;
2238 if (req->ring_index == req->length) {
2239 req->ring_index = 0;
2240 req->ring_ptr = req->ring;
2241 } else
2242 req->ring_ptr++;
2243
2244 /* Set chip new ring index. */
2245 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2246
2247 /* Manage unprocessed RIO/ZIO commands in response queue. */
2248 if (vha->flags.process_response_queue &&
2249 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2250 qla24xx_process_response_queue(vha, rsp);
2251
2252 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2253
2254 return QLA_SUCCESS;
2255
2256queuing_error:
2257 if (status & QDSS_GOT_Q_SPACE) {
2258 req->outstanding_cmds[handle] = NULL;
2259 req->cnt += req_cnt;
2260 }
2261 /* Cleanup will be performed by the caller (queuecommand) */
2262
2263 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2264 return QLA_FUNCTION_FAILED;
68ca949c 2265}
ac280b67
AV
2266
2267/* Generic Control-SRB manipulation functions. */
b6a029e1
AE
2268
2269/* hardware_lock assumed to be held. */
b6a029e1 2270
d94d10e7 2271void *
82de802a 2272__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
ac280b67 2273{
82de802a 2274 scsi_qla_host_t *vha = qpair->vha;
ac280b67 2275 struct qla_hw_data *ha = vha->hw;
82de802a 2276 struct req_que *req = qpair->req;
118e2ef9 2277 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
ac280b67
AV
2278 uint32_t index, handle;
2279 request_t *pkt;
2280 uint16_t cnt, req_cnt;
2281
2282 pkt = NULL;
2283 req_cnt = 1;
d94d10e7
GM
2284 handle = 0;
2285
5e53be8e
QT
2286 if (sp && (sp->type != SRB_SCSI_CMD)) {
2287 /* Adjust entry-counts as needed. */
9ba56b95 2288 req_cnt = sp->iocbs;
5e53be8e 2289 }
5780790e 2290
ac280b67 2291 /* Check for room on request queue. */
94007037 2292 if (req->cnt < req_cnt + 2) {
1586e07a
QT
2293 if (qpair->use_shadow_reg)
2294 cnt = *req->out_ptr;
ecc89f25
JC
2295 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2296 IS_QLA28XX(ha))
ac280b67 2297 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
7ec0effd 2298 else if (IS_P3P_TYPE(ha))
d94d10e7 2299 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
ac280b67
AV
2300 else if (IS_FWI2_CAPABLE(ha))
2301 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
8ae6d9c7
GM
2302 else if (IS_QLAFX00(ha))
2303 cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
ac280b67
AV
2304 else
2305 cnt = qla2x00_debounce_register(
2306 ISP_REQ_Q_OUT(ha, &reg->isp));
2307
2308 if (req->ring_index < cnt)
2309 req->cnt = cnt - req->ring_index;
2310 else
2311 req->cnt = req->length -
2312 (req->ring_index - cnt);
2313 }
94007037 2314 if (req->cnt < req_cnt + 2)
ac280b67
AV
2315 goto queuing_error;
2316
5e53be8e
QT
2317 if (sp) {
2318 /* Check for room in outstanding command list. */
2319 handle = req->current_outstanding_cmd;
2320 for (index = 1; index < req->num_outstanding_cmds; index++) {
2321 handle++;
2322 if (handle == req->num_outstanding_cmds)
2323 handle = 1;
2324 if (!req->outstanding_cmds[handle])
2325 break;
2326 }
2327 if (index == req->num_outstanding_cmds) {
2328 ql_log(ql_log_warn, vha, 0x700b,
2329 "No room on outstanding cmd array.\n");
2330 goto queuing_error;
2331 }
2332
2333 /* Prep command array. */
2334 req->current_outstanding_cmd = handle;
2335 req->outstanding_cmds[handle] = sp;
2336 sp->handle = handle;
2337 }
2338
ac280b67 2339 /* Prep packet */
ac280b67 2340 req->cnt -= req_cnt;
ac280b67
AV
2341 pkt = req->ring_ptr;
2342 memset(pkt, 0, REQUEST_ENTRY_SIZE);
8ae6d9c7 2343 if (IS_QLAFX00(ha)) {
1f8deefe
SK
2344 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2345 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
8ae6d9c7
GM
2346 } else {
2347 pkt->entry_count = req_cnt;
2348 pkt->handle = handle;
2349 }
ac280b67 2350
5e53be8e
QT
2351 return pkt;
2352
ac280b67 2353queuing_error:
60a9eadb 2354 qpair->tgt_counters.num_alloc_iocb_failed++;
ac280b67
AV
2355 return pkt;
2356}
2357
82de802a
QT
2358void *
2359qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2360{
2361 scsi_qla_host_t *vha = qpair->vha;
2362
2363 if (qla2x00_reset_active(vha))
2364 return NULL;
2365
2366 return __qla2x00_alloc_iocbs(qpair, sp);
2367}
2368
2369void *
2370qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2371{
2372 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2373}
2374
a5d42f4c
DG
2375static void
2376qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2377{
2378 struct srb_iocb *lio = &sp->u.iocb_cmd;
2379
2380 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2381 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
03aaa89f 2382 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
a5d42f4c 2383 logio->control_flags |= LCF_NVME_PRLI;
03aaa89f
DT
2384 if (sp->vha->flags.nvme_first_burst)
2385 logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2386 }
a5d42f4c
DG
2387
2388 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2389 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2390 logio->port_id[1] = sp->fcport->d_id.b.area;
2391 logio->port_id[2] = sp->fcport->d_id.b.domain;
2392 logio->vp_index = sp->vha->vp_idx;
2393}
2394
ac280b67
AV
2395static void
2396qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2397{
9ba56b95 2398 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67
AV
2399
2400 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
48acad09
QT
2401 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2402 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2403 } else {
2404 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2405 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2406 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2407 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2408 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2409 }
ac280b67
AV
2410 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2411 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2412 logio->port_id[1] = sp->fcport->d_id.b.area;
2413 logio->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2414 logio->vp_index = sp->vha->vp_idx;
ac280b67
AV
2415}
2416
2417static void
2418qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2419{
25ff6af1 2420 struct qla_hw_data *ha = sp->vha->hw;
9ba56b95 2421 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67
AV
2422 uint16_t opts;
2423
b963752f 2424 mbx->entry_type = MBX_IOCB_TYPE;
ac280b67
AV
2425 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2426 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
4916392b
MI
2427 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2428 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
ac280b67
AV
2429 if (HAS_EXTENDED_IDS(ha)) {
2430 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2431 mbx->mb10 = cpu_to_le16(opts);
2432 } else {
2433 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2434 }
2435 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2436 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2437 sp->fcport->d_id.b.al_pa);
25ff6af1 2438 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
ac280b67
AV
2439}
2440
2441static void
2442qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2443{
2444 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2445 logio->control_flags =
2446 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
0e324e94 2447 if (!sp->fcport->keep_nport_handle)
a6ca8878 2448 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
ac280b67
AV
2449 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2450 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2451 logio->port_id[1] = sp->fcport->d_id.b.area;
2452 logio->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2453 logio->vp_index = sp->vha->vp_idx;
ac280b67
AV
2454}
2455
2456static void
2457qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2458{
25ff6af1 2459 struct qla_hw_data *ha = sp->vha->hw;
ac280b67 2460
b963752f 2461 mbx->entry_type = MBX_IOCB_TYPE;
ac280b67
AV
2462 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2463 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2464 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
58e2753c 2465 cpu_to_le16(sp->fcport->loop_id) :
ac280b67
AV
2466 cpu_to_le16(sp->fcport->loop_id << 8);
2467 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2468 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2469 sp->fcport->d_id.b.al_pa);
25ff6af1 2470 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
ac280b67
AV
2471 /* Implicit: mbx->mbx10 = 0. */
2472}
2473
5ff1d584
AV
2474static void
2475qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2476{
2477 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2478 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2479 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
25ff6af1 2480 logio->vp_index = sp->vha->vp_idx;
5ff1d584
AV
2481}
2482
2483static void
2484qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2485{
25ff6af1 2486 struct qla_hw_data *ha = sp->vha->hw;
5ff1d584
AV
2487
2488 mbx->entry_type = MBX_IOCB_TYPE;
2489 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2490 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2491 if (HAS_EXTENDED_IDS(ha)) {
2492 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2493 mbx->mb10 = cpu_to_le16(BIT_0);
2494 } else {
2495 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2496 }
2497 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2498 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2499 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2500 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
25ff6af1 2501 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
5ff1d584
AV
2502}
2503
3822263e
MI
2504static void
2505qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2506{
2507 uint32_t flags;
9cb78c16 2508 uint64_t lun;
3822263e
MI
2509 struct fc_port *fcport = sp->fcport;
2510 scsi_qla_host_t *vha = fcport->vha;
2511 struct qla_hw_data *ha = vha->hw;
9ba56b95 2512 struct srb_iocb *iocb = &sp->u.iocb_cmd;
3822263e
MI
2513 struct req_que *req = vha->req;
2514
2515 flags = iocb->u.tmf.flags;
2516 lun = iocb->u.tmf.lun;
2517
2518 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2519 tsk->entry_count = 1;
2520 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2521 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2522 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2523 tsk->control_flags = cpu_to_le32(flags);
2524 tsk->port_id[0] = fcport->d_id.b.al_pa;
2525 tsk->port_id[1] = fcport->d_id.b.area;
2526 tsk->port_id[2] = fcport->d_id.b.domain;
c6d39e23 2527 tsk->vp_index = fcport->vha->vp_idx;
3822263e
MI
2528
2529 if (flags == TCF_LUN_RESET) {
2530 int_to_scsilun(lun, &tsk->lun);
2531 host_to_fcp_swap((uint8_t *)&tsk->lun,
2532 sizeof(tsk->lun));
2533 }
2534}
2535
12975426
BVA
2536void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2537{
2538 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2539 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2540 sp->free = qla2x00_sp_free;
12975426
BVA
2541 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2542 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
3a4b6cc7 2543 sp->start_timer = 1;
12975426
BVA
2544}
2545
6eb54715 2546static void
25ff6af1 2547qla2x00_els_dcmd_sp_free(void *data)
6eb54715 2548{
25ff6af1 2549 srb_t *sp = data;
6eb54715
HM
2550 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2551
2552 kfree(sp->fcport);
2553
2554 if (elsio->u.els_logo.els_logo_pyld)
25ff6af1 2555 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
6eb54715
HM
2556 elsio->u.els_logo.els_logo_pyld,
2557 elsio->u.els_logo.els_logo_pyld_dma);
2558
2559 del_timer(&elsio->timer);
25ff6af1 2560 qla2x00_rel_sp(sp);
6eb54715
HM
2561}
2562
2563static void
2564qla2x00_els_dcmd_iocb_timeout(void *data)
2565{
25ff6af1 2566 srb_t *sp = data;
6eb54715 2567 fc_port_t *fcport = sp->fcport;
25ff6af1 2568 struct scsi_qla_host *vha = sp->vha;
25ff6af1 2569 struct srb_iocb *lio = &sp->u.iocb_cmd;
6eb54715
HM
2570
2571 ql_dbg(ql_dbg_io, vha, 0x3069,
2572 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2573 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2574 fcport->d_id.b.al_pa);
2575
6eb54715
HM
2576 complete(&lio->u.els_logo.comp);
2577}
2578
2579static void
25ff6af1 2580qla2x00_els_dcmd_sp_done(void *ptr, int res)
6eb54715 2581{
25ff6af1 2582 srb_t *sp = ptr;
6eb54715
HM
2583 fc_port_t *fcport = sp->fcport;
2584 struct srb_iocb *lio = &sp->u.iocb_cmd;
25ff6af1 2585 struct scsi_qla_host *vha = sp->vha;
6eb54715
HM
2586
2587 ql_dbg(ql_dbg_io, vha, 0x3072,
2588 "%s hdl=%x, portid=%02x%02x%02x done\n",
2589 sp->name, sp->handle, fcport->d_id.b.domain,
2590 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2591
2592 complete(&lio->u.els_logo.comp);
2593}
2594
2595int
2596qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2597 port_id_t remote_did)
2598{
2599 srb_t *sp;
2600 fc_port_t *fcport = NULL;
2601 struct srb_iocb *elsio = NULL;
2602 struct qla_hw_data *ha = vha->hw;
2603 struct els_logo_payload logo_pyld;
2604 int rval = QLA_SUCCESS;
2605
2606 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2607 if (!fcport) {
2608 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2609 return -ENOMEM;
2610 }
2611
2612 /* Alloc SRB structure */
2613 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2614 if (!sp) {
2615 kfree(fcport);
2616 ql_log(ql_log_info, vha, 0x70e6,
2617 "SRB allocation failed\n");
2618 return -ENOMEM;
2619 }
2620
2621 elsio = &sp->u.iocb_cmd;
2622 fcport->loop_id = 0xFFFF;
2623 fcport->d_id.b.domain = remote_did.b.domain;
2624 fcport->d_id.b.area = remote_did.b.area;
2625 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2626
2627 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2628 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2629
2630 sp->type = SRB_ELS_DCMD;
2631 sp->name = "ELS_DCMD";
2632 sp->fcport = fcport;
6eb54715 2633 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
e74e7d95 2634 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
8777e431 2635 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
6eb54715
HM
2636 sp->done = qla2x00_els_dcmd_sp_done;
2637 sp->free = qla2x00_els_dcmd_sp_free;
2638
2639 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2640 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2641 GFP_KERNEL);
2642
2643 if (!elsio->u.els_logo.els_logo_pyld) {
25ff6af1 2644 sp->free(sp);
6eb54715
HM
2645 return QLA_FUNCTION_FAILED;
2646 }
2647
2648 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2649
2650 elsio->u.els_logo.els_cmd = els_opcode;
2651 logo_pyld.opcode = els_opcode;
2652 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2653 logo_pyld.s_id[1] = vha->d_id.b.area;
2654 logo_pyld.s_id[2] = vha->d_id.b.domain;
2655 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2656 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2657
2658 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2659 sizeof(struct els_logo_payload));
2660
2661 rval = qla2x00_start_sp(sp);
2662 if (rval != QLA_SUCCESS) {
25ff6af1 2663 sp->free(sp);
6eb54715
HM
2664 return QLA_FUNCTION_FAILED;
2665 }
2666
2667 ql_dbg(ql_dbg_io, vha, 0x3074,
2668 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2669 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2670 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2671
2672 wait_for_completion(&elsio->u.els_logo.comp);
2673
25ff6af1 2674 sp->free(sp);
6eb54715
HM
2675 return rval;
2676}
2677
2678static void
2679qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2680{
25ff6af1 2681 scsi_qla_host_t *vha = sp->vha;
6eb54715
HM
2682 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2683
2684 els_iocb->entry_type = ELS_IOCB_TYPE;
2685 els_iocb->entry_count = 1;
2686 els_iocb->sys_define = 0;
2687 els_iocb->entry_status = 0;
2688 els_iocb->handle = sp->handle;
2689 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2690 els_iocb->tx_dsd_count = 1;
2691 els_iocb->vp_index = vha->vp_idx;
2692 els_iocb->sof_type = EST_SOFI3;
2693 els_iocb->rx_dsd_count = 0;
2694 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2695
2696 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2697 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2698 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
edd05de1
DG
2699 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2700 els_iocb->s_id[1] = vha->d_id.b.area;
2701 els_iocb->s_id[2] = vha->d_id.b.domain;
6eb54715
HM
2702 els_iocb->control_flags = 0;
2703
edd05de1 2704 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
8777e431
QT
2705 els_iocb->tx_byte_count = els_iocb->tx_len =
2706 sizeof(struct els_plogi_payload);
d4556a49
BVA
2707 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2708 &els_iocb->tx_address);
edd05de1 2709 els_iocb->rx_dsd_count = 1;
8777e431
QT
2710 els_iocb->rx_byte_count = els_iocb->rx_len =
2711 sizeof(struct els_plogi_payload);
d4556a49
BVA
2712 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2713 &els_iocb->rx_address);
8777e431 2714
edd05de1
DG
2715 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2716 "PLOGI ELS IOCB:\n");
2717 ql_dump_buffer(ql_log_info, vha, 0x0109,
2718 (uint8_t *)els_iocb, 0x70);
2719 } else {
2720 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
d4556a49
BVA
2721 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2722 &els_iocb->tx_address);
edd05de1 2723 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
6eb54715 2724
edd05de1 2725 els_iocb->rx_byte_count = 0;
d4556a49 2726 els_iocb->rx_address = 0;
edd05de1
DG
2727 els_iocb->rx_len = 0;
2728 }
6eb54715 2729
25ff6af1 2730 sp->vha->qla_stats.control_requests++;
6eb54715
HM
2731}
2732
edd05de1
DG
2733static void
2734qla2x00_els_dcmd2_iocb_timeout(void *data)
2735{
2736 srb_t *sp = data;
2737 fc_port_t *fcport = sp->fcport;
2738 struct scsi_qla_host *vha = sp->vha;
2739 struct qla_hw_data *ha = vha->hw;
edd05de1
DG
2740 unsigned long flags = 0;
2741 int res;
2742
2743 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2744 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2745 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2746
2747 /* Abort the exchange */
2748 spin_lock_irqsave(&ha->hardware_lock, flags);
2749 res = ha->isp_ops->abort_command(sp);
2750 ql_dbg(ql_dbg_io, vha, 0x3070,
2751 "mbx abort_command %s\n",
2752 (res == QLA_SUCCESS) ? "successful" : "failed");
2753 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2754
8777e431 2755 sp->done(sp, QLA_FUNCTION_TIMEOUT);
edd05de1
DG
2756}
2757
2758static void
2759qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2760{
2761 srb_t *sp = ptr;
2762 fc_port_t *fcport = sp->fcport;
2763 struct srb_iocb *lio = &sp->u.iocb_cmd;
2764 struct scsi_qla_host *vha = sp->vha;
8777e431
QT
2765 struct event_arg ea;
2766 struct qla_work_evt *e;
2767
2768 ql_dbg(ql_dbg_disc, vha, 0x3072,
2769 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2770 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
edd05de1 2771
8777e431
QT
2772 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2773 del_timer(&sp->u.iocb_cmd.timer);
edd05de1 2774
8777e431
QT
2775 if (sp->flags & SRB_WAKEUP_ON_COMP)
2776 complete(&lio->u.els_plogi.comp);
2777 else {
2778 if (res) {
2779 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2780 } else {
2781 memset(&ea, 0, sizeof(ea));
2782 ea.fcport = fcport;
2783 ea.rc = res;
2784 ea.event = FCME_ELS_PLOGI_DONE;
2785 qla2x00_fcport_event_handler(vha, &ea);
2786 }
edd05de1 2787
8777e431
QT
2788 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2789 if (!e) {
2790 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2791
2792 if (elsio->u.els_plogi.els_plogi_pyld)
2793 dma_free_coherent(&sp->vha->hw->pdev->dev,
2794 elsio->u.els_plogi.tx_size,
2795 elsio->u.els_plogi.els_plogi_pyld,
2796 elsio->u.els_plogi.els_plogi_pyld_dma);
2797
2798 if (elsio->u.els_plogi.els_resp_pyld)
2799 dma_free_coherent(&sp->vha->hw->pdev->dev,
2800 elsio->u.els_plogi.rx_size,
2801 elsio->u.els_plogi.els_resp_pyld,
2802 elsio->u.els_plogi.els_resp_pyld_dma);
2803 sp->free(sp);
e9f7be0c 2804 return;
8777e431
QT
2805 }
2806 e->u.iosb.sp = sp;
2807 qla2x00_post_work(vha, e);
2808 }
edd05de1
DG
2809}
2810
2811int
2812qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
8777e431 2813 fc_port_t *fcport, bool wait)
edd05de1
DG
2814{
2815 srb_t *sp;
2816 struct srb_iocb *elsio = NULL;
2817 struct qla_hw_data *ha = vha->hw;
2818 int rval = QLA_SUCCESS;
2819 void *ptr, *resp_ptr;
edd05de1
DG
2820
2821 /* Alloc SRB structure */
2822 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2823 if (!sp) {
2824 ql_log(ql_log_info, vha, 0x70e6,
2825 "SRB allocation failed\n");
2826 return -ENOMEM;
2827 }
2828
2829 elsio = &sp->u.iocb_cmd;
edd05de1
DG
2830 ql_dbg(ql_dbg_io, vha, 0x3073,
2831 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2832
15b6c3c9 2833 fcport->flags |= FCF_ASYNC_SENT;
edd05de1
DG
2834 sp->type = SRB_ELS_DCMD;
2835 sp->name = "ELS_DCMD";
2836 sp->fcport = fcport;
e74e7d95 2837
edd05de1 2838 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
e74e7d95 2839 init_completion(&elsio->u.els_plogi.comp);
8777e431
QT
2840 if (wait)
2841 sp->flags = SRB_WAKEUP_ON_COMP;
2842
2843 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
e74e7d95 2844
edd05de1 2845 sp->done = qla2x00_els_dcmd2_sp_done;
8777e431 2846 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
edd05de1
DG
2847
2848 ptr = elsio->u.els_plogi.els_plogi_pyld =
2849 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2850 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
edd05de1
DG
2851
2852 if (!elsio->u.els_plogi.els_plogi_pyld) {
2853 rval = QLA_FUNCTION_FAILED;
2854 goto out;
2855 }
2856
2857 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2858 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2859 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2860
2861 if (!elsio->u.els_plogi.els_resp_pyld) {
2862 rval = QLA_FUNCTION_FAILED;
2863 goto out;
2864 }
2865
2866 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2867
2868 memset(ptr, 0, sizeof(struct els_plogi_payload));
2869 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
8777e431
QT
2870 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2871 &ha->plogi_els_payld.data,
2872 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2873
edd05de1
DG
2874 elsio->u.els_plogi.els_cmd = els_opcode;
2875 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
edd05de1 2876
8777e431
QT
2877 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2878 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
edd05de1
DG
2879 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2880
edd05de1
DG
2881 rval = qla2x00_start_sp(sp);
2882 if (rval != QLA_SUCCESS) {
2883 rval = QLA_FUNCTION_FAILED;
8777e431
QT
2884 } else {
2885 ql_dbg(ql_dbg_disc, vha, 0x3074,
2886 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2887 sp->name, sp->handle, fcport->loop_id,
2888 fcport->d_id.b24, vha->d_id.b24);
edd05de1
DG
2889 }
2890
8777e431
QT
2891 if (wait) {
2892 wait_for_completion(&elsio->u.els_plogi.comp);
edd05de1 2893
8777e431
QT
2894 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2895 rval = QLA_FUNCTION_FAILED;
2896 } else {
2897 goto done;
2898 }
edd05de1
DG
2899
2900out:
8777e431
QT
2901 fcport->flags &= ~(FCF_ASYNC_SENT);
2902 if (elsio->u.els_plogi.els_plogi_pyld)
2903 dma_free_coherent(&sp->vha->hw->pdev->dev,
2904 elsio->u.els_plogi.tx_size,
2905 elsio->u.els_plogi.els_plogi_pyld,
2906 elsio->u.els_plogi.els_plogi_pyld_dma);
2907
2908 if (elsio->u.els_plogi.els_resp_pyld)
2909 dma_free_coherent(&sp->vha->hw->pdev->dev,
2910 elsio->u.els_plogi.rx_size,
2911 elsio->u.els_plogi.els_resp_pyld,
2912 elsio->u.els_plogi.els_resp_pyld_dma);
2913
edd05de1 2914 sp->free(sp);
8777e431 2915done:
edd05de1
DG
2916 return rval;
2917}
2918
9a069e19
GM
2919static void
2920qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2921{
75cc8cfc 2922 struct bsg_job *bsg_job = sp->u.bsg_job;
01e0e15c 2923 struct fc_bsg_request *bsg_request = bsg_job->request;
9a069e19
GM
2924
2925 els_iocb->entry_type = ELS_IOCB_TYPE;
2926 els_iocb->entry_count = 1;
2927 els_iocb->sys_define = 0;
2928 els_iocb->entry_status = 0;
2929 els_iocb->handle = sp->handle;
2930 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
ad950360 2931 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
25ff6af1 2932 els_iocb->vp_index = sp->vha->vp_idx;
9a069e19 2933 els_iocb->sof_type = EST_SOFI3;
ad950360 2934 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
9a069e19 2935
4916392b 2936 els_iocb->opcode =
9ba56b95 2937 sp->type == SRB_ELS_CMD_RPT ?
01e0e15c
JT
2938 bsg_request->rqst_data.r_els.els_code :
2939 bsg_request->rqst_data.h_els.command_code;
9a069e19
GM
2940 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2941 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2942 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2943 els_iocb->control_flags = 0;
2944 els_iocb->rx_byte_count =
2945 cpu_to_le32(bsg_job->reply_payload.payload_len);
2946 els_iocb->tx_byte_count =
2947 cpu_to_le32(bsg_job->request_payload.payload_len);
2948
d4556a49
BVA
2949 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2950 &els_iocb->tx_address);
9a069e19
GM
2951 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2952 (bsg_job->request_payload.sg_list));
2953
d4556a49
BVA
2954 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2955 &els_iocb->rx_address);
9a069e19
GM
2956 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2957 (bsg_job->reply_payload.sg_list));
fabbb8df 2958
25ff6af1 2959 sp->vha->qla_stats.control_requests++;
9a069e19
GM
2960}
2961
9bc4f4fb
HZ
2962static void
2963qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2964{
2965 uint16_t avail_dsds;
15b7a68c 2966 struct dsd64 *cur_dsd;
9bc4f4fb
HZ
2967 struct scatterlist *sg;
2968 int index;
2969 uint16_t tot_dsds;
25ff6af1 2970 scsi_qla_host_t *vha = sp->vha;
9bc4f4fb 2971 struct qla_hw_data *ha = vha->hw;
75cc8cfc 2972 struct bsg_job *bsg_job = sp->u.bsg_job;
9bc4f4fb
HZ
2973 int entry_count = 1;
2974
2975 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2976 ct_iocb->entry_type = CT_IOCB_TYPE;
2977 ct_iocb->entry_status = 0;
2978 ct_iocb->handle1 = sp->handle;
2979 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
ad950360
BVA
2980 ct_iocb->status = cpu_to_le16(0);
2981 ct_iocb->control_flags = cpu_to_le16(0);
9bc4f4fb
HZ
2982 ct_iocb->timeout = 0;
2983 ct_iocb->cmd_dsd_count =
ad950360 2984 cpu_to_le16(bsg_job->request_payload.sg_cnt);
9bc4f4fb 2985 ct_iocb->total_dsd_count =
ad950360 2986 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
9bc4f4fb
HZ
2987 ct_iocb->req_bytecount =
2988 cpu_to_le32(bsg_job->request_payload.payload_len);
2989 ct_iocb->rsp_bytecount =
2990 cpu_to_le32(bsg_job->reply_payload.payload_len);
2991
d4556a49
BVA
2992 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2993 &ct_iocb->req_dsd.address);
15b7a68c 2994 ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
9bc4f4fb 2995
d4556a49
BVA
2996 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2997 &ct_iocb->rsp_dsd.address);
15b7a68c 2998 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
9bc4f4fb
HZ
2999
3000 avail_dsds = 1;
15b7a68c 3001 cur_dsd = &ct_iocb->rsp_dsd;
9bc4f4fb
HZ
3002 index = 0;
3003 tot_dsds = bsg_job->reply_payload.sg_cnt;
3004
3005 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
9bc4f4fb
HZ
3006 cont_a64_entry_t *cont_pkt;
3007
3008 /* Allocate additional continuation packets? */
3009 if (avail_dsds == 0) {
3010 /*
3011 * Five DSDs are available in the Cont.
3012 * Type 1 IOCB.
3013 */
0d2aa38e
GM
3014 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3015 vha->hw->req_q_map[0]);
15b7a68c 3016 cur_dsd = cont_pkt->dsd;
9bc4f4fb 3017 avail_dsds = 5;
9bc4f4fb
HZ
3018 entry_count++;
3019 }
3020
15b7a68c 3021 append_dsd64(&cur_dsd, sg);
9bc4f4fb
HZ
3022 avail_dsds--;
3023 }
3024 ct_iocb->entry_count = entry_count;
fabbb8df 3025
25ff6af1 3026 sp->vha->qla_stats.control_requests++;
9bc4f4fb
HZ
3027}
3028
9a069e19
GM
3029static void
3030qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3031{
3032 uint16_t avail_dsds;
15b7a68c 3033 struct dsd64 *cur_dsd;
9a069e19
GM
3034 struct scatterlist *sg;
3035 int index;
ce0779c7 3036 uint16_t cmd_dsds, rsp_dsds;
25ff6af1 3037 scsi_qla_host_t *vha = sp->vha;
0d2aa38e 3038 struct qla_hw_data *ha = vha->hw;
75cc8cfc 3039 struct bsg_job *bsg_job = sp->u.bsg_job;
9a069e19 3040 int entry_count = 1;
ce0779c7 3041 cont_a64_entry_t *cont_pkt = NULL;
9a069e19
GM
3042
3043 ct_iocb->entry_type = CT_IOCB_TYPE;
3044 ct_iocb->entry_status = 0;
3045 ct_iocb->sys_define = 0;
3046 ct_iocb->handle = sp->handle;
3047
3048 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
25ff6af1 3049 ct_iocb->vp_index = sp->vha->vp_idx;
ad950360 3050 ct_iocb->comp_status = cpu_to_le16(0);
9a069e19 3051
ce0779c7
GM
3052 cmd_dsds = bsg_job->request_payload.sg_cnt;
3053 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3054
3055 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
9a069e19 3056 ct_iocb->timeout = 0;
ce0779c7 3057 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
9a069e19
GM
3058 ct_iocb->cmd_byte_count =
3059 cpu_to_le32(bsg_job->request_payload.payload_len);
9a069e19 3060
ce0779c7 3061 avail_dsds = 2;
15b7a68c 3062 cur_dsd = ct_iocb->dsd;
9a069e19 3063 index = 0;
9a069e19 3064
ce0779c7 3065 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
ce0779c7
GM
3066 /* Allocate additional continuation packets? */
3067 if (avail_dsds == 0) {
3068 /*
3069 * Five DSDs are available in the Cont.
3070 * Type 1 IOCB.
3071 */
3072 cont_pkt = qla2x00_prep_cont_type1_iocb(
3073 vha, ha->req_q_map[0]);
15b7a68c 3074 cur_dsd = cont_pkt->dsd;
ce0779c7
GM
3075 avail_dsds = 5;
3076 entry_count++;
3077 }
3078
15b7a68c 3079 append_dsd64(&cur_dsd, sg);
ce0779c7
GM
3080 avail_dsds--;
3081 }
3082
3083 index = 0;
3084
3085 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
9a069e19
GM
3086 /* Allocate additional continuation packets? */
3087 if (avail_dsds == 0) {
3088 /*
3089 * Five DSDs are available in the Cont.
3090 * Type 1 IOCB.
3091 */
0d2aa38e
GM
3092 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3093 ha->req_q_map[0]);
15b7a68c 3094 cur_dsd = cont_pkt->dsd;
9a069e19 3095 avail_dsds = 5;
9a069e19
GM
3096 entry_count++;
3097 }
3098
15b7a68c 3099 append_dsd64(&cur_dsd, sg);
9a069e19
GM
3100 avail_dsds--;
3101 }
3102 ct_iocb->entry_count = entry_count;
3103}
3104
5162cf0c
GM
3105/*
3106 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3107 * @sp: command to send to the ISP
3108 *
3109 * Returns non-zero if a failure occurred, else zero.
3110 */
3111int
3112qla82xx_start_scsi(srb_t *sp)
3113{
52c82823 3114 int nseg;
5162cf0c
GM
3115 unsigned long flags;
3116 struct scsi_cmnd *cmd;
3117 uint32_t *clr_ptr;
3118 uint32_t index;
3119 uint32_t handle;
3120 uint16_t cnt;
3121 uint16_t req_cnt;
3122 uint16_t tot_dsds;
3123 struct device_reg_82xx __iomem *reg;
3124 uint32_t dbval;
3125 uint32_t *fcp_dl;
3126 uint8_t additional_cdb_len;
3127 struct ct6_dsd *ctx;
25ff6af1 3128 struct scsi_qla_host *vha = sp->vha;
5162cf0c
GM
3129 struct qla_hw_data *ha = vha->hw;
3130 struct req_que *req = NULL;
3131 struct rsp_que *rsp = NULL;
5162cf0c
GM
3132
3133 /* Setup device pointers. */
5162cf0c 3134 reg = &ha->iobase->isp82;
9ba56b95 3135 cmd = GET_CMD_SP(sp);
5162cf0c
GM
3136 req = vha->req;
3137 rsp = ha->rsp_q_map[0];
3138
3139 /* So we know we haven't pci_map'ed anything yet */
3140 tot_dsds = 0;
3141
3142 dbval = 0x04 | (ha->portnum << 5);
3143
3144 /* Send marker if required */
3145 if (vha->marker_needed != 0) {
9eb9c6dc
QT
3146 if (qla2x00_marker(vha, ha->base_qpair,
3147 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
5162cf0c
GM
3148 ql_log(ql_log_warn, vha, 0x300c,
3149 "qla2x00_marker failed for cmd=%p.\n", cmd);
3150 return QLA_FUNCTION_FAILED;
3151 }
3152 vha->marker_needed = 0;
3153 }
3154
3155 /* Acquire ring specific lock */
3156 spin_lock_irqsave(&ha->hardware_lock, flags);
3157
3158 /* Check for room in outstanding command list. */
3159 handle = req->current_outstanding_cmd;
8d93f550 3160 for (index = 1; index < req->num_outstanding_cmds; index++) {
5162cf0c 3161 handle++;
8d93f550 3162 if (handle == req->num_outstanding_cmds)
5162cf0c
GM
3163 handle = 1;
3164 if (!req->outstanding_cmds[handle])
3165 break;
3166 }
8d93f550 3167 if (index == req->num_outstanding_cmds)
5162cf0c
GM
3168 goto queuing_error;
3169
3170 /* Map the sg table so we have an accurate count of sg entries needed */
3171 if (scsi_sg_count(cmd)) {
3172 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3173 scsi_sg_count(cmd), cmd->sc_data_direction);
3174 if (unlikely(!nseg))
3175 goto queuing_error;
3176 } else
3177 nseg = 0;
3178
3179 tot_dsds = nseg;
3180
3181 if (tot_dsds > ql2xshiftctondsd) {
3182 struct cmd_type_6 *cmd_pkt;
3183 uint16_t more_dsd_lists = 0;
3184 struct dsd_dma *dsd_ptr;
3185 uint16_t i;
3186
3187 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3188 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3189 ql_dbg(ql_dbg_io, vha, 0x300d,
3190 "Num of DSD list %d is than %d for cmd=%p.\n",
3191 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3192 cmd);
3193 goto queuing_error;
3194 }
3195
3196 if (more_dsd_lists <= ha->gbl_dsd_avail)
3197 goto sufficient_dsds;
3198 else
3199 more_dsd_lists -= ha->gbl_dsd_avail;
3200
3201 for (i = 0; i < more_dsd_lists; i++) {
3202 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3203 if (!dsd_ptr) {
3204 ql_log(ql_log_fatal, vha, 0x300e,
3205 "Failed to allocate memory for dsd_dma "
3206 "for cmd=%p.\n", cmd);
3207 goto queuing_error;
3208 }
3209
3210 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3211 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3212 if (!dsd_ptr->dsd_addr) {
3213 kfree(dsd_ptr);
3214 ql_log(ql_log_fatal, vha, 0x300f,
3215 "Failed to allocate memory for dsd_addr "
3216 "for cmd=%p.\n", cmd);
3217 goto queuing_error;
3218 }
3219 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3220 ha->gbl_dsd_avail++;
3221 }
3222
3223sufficient_dsds:
3224 req_cnt = 1;
3225
3226 if (req->cnt < (req_cnt + 2)) {
3227 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3228 &reg->req_q_out[0]);
3229 if (req->ring_index < cnt)
3230 req->cnt = cnt - req->ring_index;
3231 else
3232 req->cnt = req->length -
3233 (req->ring_index - cnt);
a6eb3c9f
CL
3234 if (req->cnt < (req_cnt + 2))
3235 goto queuing_error;
5162cf0c
GM
3236 }
3237
9ba56b95
GM
3238 ctx = sp->u.scmd.ctx =
3239 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3240 if (!ctx) {
5162cf0c
GM
3241 ql_log(ql_log_fatal, vha, 0x3010,
3242 "Failed to allocate ctx for cmd=%p.\n", cmd);
3243 goto queuing_error;
3244 }
9ba56b95 3245
5162cf0c 3246 memset(ctx, 0, sizeof(struct ct6_dsd));
501017f6 3247 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
5162cf0c
GM
3248 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3249 if (!ctx->fcp_cmnd) {
3250 ql_log(ql_log_fatal, vha, 0x3011,
3251 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
841f97bf 3252 goto queuing_error;
5162cf0c
GM
3253 }
3254
3255 /* Initialize the DSD list and dma handle */
3256 INIT_LIST_HEAD(&ctx->dsd_list);
3257 ctx->dsd_use_cnt = 0;
3258
3259 if (cmd->cmd_len > 16) {
3260 additional_cdb_len = cmd->cmd_len - 16;
3261 if ((cmd->cmd_len % 4) != 0) {
3262 /* SCSI command bigger than 16 bytes must be
3263 * multiple of 4
3264 */
3265 ql_log(ql_log_warn, vha, 0x3012,
3266 "scsi cmd len %d not multiple of 4 "
3267 "for cmd=%p.\n", cmd->cmd_len, cmd);
3268 goto queuing_error_fcp_cmnd;
3269 }
3270 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3271 } else {
3272 additional_cdb_len = 0;
3273 ctx->fcp_cmnd_len = 12 + 16 + 4;
3274 }
3275
3276 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3277 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3278
3279 /* Zero out remaining portion of packet. */
3280 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3281 clr_ptr = (uint32_t *)cmd_pkt + 2;
3282 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3283 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3284
3285 /* Set NPORT-ID and LUN number*/
3286 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3287 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3288 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3289 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 3290 cmd_pkt->vp_index = sp->vha->vp_idx;
5162cf0c
GM
3291
3292 /* Build IOCB segments */
3293 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3294 goto queuing_error_fcp_cmnd;
3295
9ba56b95 3296 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
5162cf0c
GM
3297 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3298
3299 /* build FCP_CMND IU */
9ba56b95 3300 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
5162cf0c
GM
3301 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3302
3303 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3304 ctx->fcp_cmnd->additional_cdb_len |= 1;
3305 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3306 ctx->fcp_cmnd->additional_cdb_len |= 2;
3307
a00f6296
SK
3308 /* Populate the FCP_PRIO. */
3309 if (ha->flags.fcp_prio_enabled)
3310 ctx->fcp_cmnd->task_attribute |=
3311 sp->fcport->fcp_prio << 3;
3312
5162cf0c
GM
3313 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3314
3315 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3316 additional_cdb_len);
3317 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3318
3319 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
d4556a49
BVA
3320 put_unaligned_le64(ctx->fcp_cmnd_dma,
3321 &cmd_pkt->fcp_cmnd_dseg_address);
5162cf0c
GM
3322
3323 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3324 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3325 /* Set total data segment count. */
3326 cmd_pkt->entry_count = (uint8_t)req_cnt;
3327 /* Specify response queue number where
3328 * completion should happen
3329 */
3330 cmd_pkt->entry_status = (uint8_t) rsp->id;
3331 } else {
3332 struct cmd_type_7 *cmd_pkt;
bd432bb5 3333
5162cf0c
GM
3334 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3335 if (req->cnt < (req_cnt + 2)) {
3336 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3337 &reg->req_q_out[0]);
3338 if (req->ring_index < cnt)
3339 req->cnt = cnt - req->ring_index;
3340 else
3341 req->cnt = req->length -
3342 (req->ring_index - cnt);
3343 }
3344 if (req->cnt < (req_cnt + 2))
3345 goto queuing_error;
3346
3347 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3348 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3349
3350 /* Zero out remaining portion of packet. */
3351 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3352 clr_ptr = (uint32_t *)cmd_pkt + 2;
3353 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3354 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3355
3356 /* Set NPORT-ID and LUN number*/
3357 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3358 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3359 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3360 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 3361 cmd_pkt->vp_index = sp->vha->vp_idx;
5162cf0c 3362
9ba56b95 3363 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
5162cf0c 3364 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
9ba56b95 3365 sizeof(cmd_pkt->lun));
5162cf0c 3366
a00f6296
SK
3367 /* Populate the FCP_PRIO. */
3368 if (ha->flags.fcp_prio_enabled)
3369 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3370
5162cf0c
GM
3371 /* Load SCSI command packet. */
3372 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3373 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3374
3375 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3376
3377 /* Build IOCB segments */
d7459527 3378 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
5162cf0c
GM
3379
3380 /* Set total data segment count. */
3381 cmd_pkt->entry_count = (uint8_t)req_cnt;
3382 /* Specify response queue number where
3383 * completion should happen.
3384 */
3385 cmd_pkt->entry_status = (uint8_t) rsp->id;
3386
3387 }
3388 /* Build command packet. */
3389 req->current_outstanding_cmd = handle;
3390 req->outstanding_cmds[handle] = sp;
3391 sp->handle = handle;
9ba56b95 3392 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
5162cf0c
GM
3393 req->cnt -= req_cnt;
3394 wmb();
3395
3396 /* Adjust ring index. */
3397 req->ring_index++;
3398 if (req->ring_index == req->length) {
3399 req->ring_index = 0;
3400 req->ring_ptr = req->ring;
3401 } else
3402 req->ring_ptr++;
3403
3404 sp->flags |= SRB_DMA_VALID;
3405
3406 /* Set chip new ring index. */
3407 /* write, read and verify logic */
3408 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3409 if (ql2xdbwr)
8dfa4b5a 3410 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
5162cf0c 3411 else {
8dfa4b5a 3412 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
5162cf0c 3413 wmb();
8dfa4b5a
BVA
3414 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3415 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
5162cf0c
GM
3416 wmb();
3417 }
3418 }
3419
3420 /* Manage unprocessed RIO/ZIO commands in response queue. */
3421 if (vha->flags.process_response_queue &&
3422 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3423 qla24xx_process_response_queue(vha, rsp);
3424
3425 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3426 return QLA_SUCCESS;
3427
3428queuing_error_fcp_cmnd:
3429 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3430queuing_error:
3431 if (tot_dsds)
3432 scsi_dma_unmap(cmd);
3433
9ba56b95
GM
3434 if (sp->u.scmd.ctx) {
3435 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3436 sp->u.scmd.ctx = NULL;
5162cf0c
GM
3437 }
3438 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3439
3440 return QLA_FUNCTION_FAILED;
3441}
3442
6d78e557 3443static void
4440e46d
AB
3444qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3445{
3446 struct srb_iocb *aio = &sp->u.iocb_cmd;
25ff6af1 3447 scsi_qla_host_t *vha = sp->vha;
49cecca7 3448 struct req_que *req = sp->qpair->req;
4440e46d
AB
3449
3450 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3451 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3452 abt_iocb->entry_count = 1;
f3767225 3453 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
49cecca7
QT
3454 if (sp->fcport) {
3455 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3456 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3457 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3458 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3459 }
4440e46d 3460 abt_iocb->handle_to_abort =
f3767225
HM
3461 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3462 aio->u.abt.cmd_hndl));
4440e46d 3463 abt_iocb->vp_index = vha->vp_idx;
b027a5ac 3464 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
4440e46d
AB
3465 /* Send the command to the firmware */
3466 wmb();
3467}
3468
726b8548
QT
3469static void
3470qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3471{
3472 int i, sz;
3473
3474 mbx->entry_type = MBX_IOCB_TYPE;
3475 mbx->handle = sp->handle;
3476 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3477
3478 for (i = 0; i < sz; i++)
3479 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3480}
3481
3482static void
3483qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3484{
3485 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3486 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3487 ct_pkt->handle = sp->handle;
3488}
3489
3490static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3491 struct nack_to_isp *nack)
3492{
3493 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3494
3495 nack->entry_type = NOTIFY_ACK_TYPE;
3496 nack->entry_count = 1;
3497 nack->ox_id = ntfy->ox_id;
3498
3499 nack->u.isp24.handle = sp->handle;
3500 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3501 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3502 nack->u.isp24.flags = ntfy->u.isp24.flags &
3503 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3504 }
3505 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3506 nack->u.isp24.status = ntfy->u.isp24.status;
3507 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3508 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3509 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3510 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3511 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3512 nack->u.isp24.srr_flags = 0;
3513 nack->u.isp24.srr_reject_code = 0;
3514 nack->u.isp24.srr_reject_code_expl = 0;
3515 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3516}
3517
e84067d7
DG
3518/*
3519 * Build NVME LS request
3520 */
3521static int
3522qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3523{
3524 struct srb_iocb *nvme;
3525 int rval = QLA_SUCCESS;
3526
3527 nvme = &sp->u.iocb_cmd;
3528 cmd_pkt->entry_type = PT_LS4_REQUEST;
3529 cmd_pkt->entry_count = 1;
3530 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3531
3532 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3533 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3534 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3535
3536 cmd_pkt->tx_dseg_count = 1;
3537 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
15b7a68c
BVA
3538 cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3539 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
e84067d7
DG
3540
3541 cmd_pkt->rx_dseg_count = 1;
3542 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
15b7a68c
BVA
3543 cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
3544 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
e84067d7
DG
3545
3546 return rval;
3547}
3548
2853192e
QT
3549static void
3550qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3551{
3552 int map, pos;
3553
3554 vce->entry_type = VP_CTRL_IOCB_TYPE;
3555 vce->handle = sp->handle;
3556 vce->entry_count = 1;
3557 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3558 vce->vp_count = cpu_to_le16(1);
3559
3560 /*
3561 * index map in firmware starts with 1; decrement index
3562 * this is ok as we never use index 0
3563 */
3564 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3565 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3566 vce->vp_idx_map[map] |= 1 << pos;
3567}
3568
11aea16a
QT
3569static void
3570qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3571{
3572 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3573 logio->control_flags =
3574 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3575
3576 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3577 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3578 logio->port_id[1] = sp->fcport->d_id.b.area;
3579 logio->port_id[2] = sp->fcport->d_id.b.domain;
3580 logio->vp_index = sp->fcport->vha->vp_idx;
3581}
3582
ac280b67
AV
3583int
3584qla2x00_start_sp(srb_t *sp)
3585{
80676d05 3586 int rval = QLA_SUCCESS;
25ff6af1 3587 scsi_qla_host_t *vha = sp->vha;
726b8548 3588 struct qla_hw_data *ha = vha->hw;
6a629468 3589 struct qla_qpair *qp = sp->qpair;
ac280b67 3590 void *pkt;
ac280b67
AV
3591 unsigned long flags;
3592
6a629468
QT
3593 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3594 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
7c3df132 3595 if (!pkt) {
80676d05 3596 rval = EAGAIN;
726b8548 3597 ql_log(ql_log_warn, vha, 0x700c,
7c3df132 3598 "qla2x00_alloc_iocbs failed.\n");
ac280b67 3599 goto done;
7c3df132 3600 }
ac280b67 3601
9ba56b95 3602 switch (sp->type) {
ac280b67
AV
3603 case SRB_LOGIN_CMD:
3604 IS_FWI2_CAPABLE(ha) ?
5ff1d584 3605 qla24xx_login_iocb(sp, pkt) :
ac280b67
AV
3606 qla2x00_login_iocb(sp, pkt);
3607 break;
a5d42f4c
DG
3608 case SRB_PRLI_CMD:
3609 qla24xx_prli_iocb(sp, pkt);
3610 break;
ac280b67
AV
3611 case SRB_LOGOUT_CMD:
3612 IS_FWI2_CAPABLE(ha) ?
5ff1d584 3613 qla24xx_logout_iocb(sp, pkt) :
ac280b67
AV
3614 qla2x00_logout_iocb(sp, pkt);
3615 break;
9a069e19
GM
3616 case SRB_ELS_CMD_RPT:
3617 case SRB_ELS_CMD_HST:
3618 qla24xx_els_iocb(sp, pkt);
3619 break;
3620 case SRB_CT_CMD:
9bc4f4fb 3621 IS_FWI2_CAPABLE(ha) ?
5780790e
AV
3622 qla24xx_ct_iocb(sp, pkt) :
3623 qla2x00_ct_iocb(sp, pkt);
9a069e19 3624 break;
5ff1d584
AV
3625 case SRB_ADISC_CMD:
3626 IS_FWI2_CAPABLE(ha) ?
3627 qla24xx_adisc_iocb(sp, pkt) :
3628 qla2x00_adisc_iocb(sp, pkt);
3629 break;
3822263e 3630 case SRB_TM_CMD:
8ae6d9c7
GM
3631 IS_QLAFX00(ha) ?
3632 qlafx00_tm_iocb(sp, pkt) :
3633 qla24xx_tm_iocb(sp, pkt);
3634 break;
3635 case SRB_FXIOCB_DCMD:
3636 case SRB_FXIOCB_BCMD:
3637 qlafx00_fxdisc_iocb(sp, pkt);
3638 break;
e84067d7
DG
3639 case SRB_NVME_LS:
3640 qla_nvme_ls(sp, pkt);
3641 break;
8ae6d9c7 3642 case SRB_ABT_CMD:
4440e46d
AB
3643 IS_QLAFX00(ha) ?
3644 qlafx00_abort_iocb(sp, pkt) :
3645 qla24xx_abort_iocb(sp, pkt);
3822263e 3646 break;
6eb54715
HM
3647 case SRB_ELS_DCMD:
3648 qla24xx_els_logo_iocb(sp, pkt);
3649 break;
726b8548
QT
3650 case SRB_CT_PTHRU_CMD:
3651 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3652 break;
3653 case SRB_MB_IOCB:
3654 qla2x00_mb_iocb(sp, pkt);
3655 break;
3656 case SRB_NACK_PLOGI:
3657 case SRB_NACK_PRLI:
3658 case SRB_NACK_LOGO:
3659 qla2x00_send_notify_ack_iocb(sp, pkt);
3660 break;
2853192e
QT
3661 case SRB_CTRL_VP:
3662 qla25xx_ctrlvp_iocb(sp, pkt);
3663 break;
11aea16a
QT
3664 case SRB_PRLO_CMD:
3665 qla24xx_prlo_iocb(sp, pkt);
3666 break;
ac280b67
AV
3667 default:
3668 break;
3669 }
3670
3a4b6cc7
QT
3671 if (sp->start_timer)
3672 add_timer(&sp->u.iocb_cmd.timer);
3673
ac280b67 3674 wmb();
6a629468 3675 qla2x00_start_iocbs(vha, qp->req);
ac280b67 3676done:
6a629468 3677 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
ac280b67
AV
3678 return rval;
3679}
a9b6f722
SK
3680
3681static void
3682qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3683 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3684{
3685 uint16_t avail_dsds;
15b7a68c 3686 struct dsd64 *cur_dsd;
a9b6f722
SK
3687 uint32_t req_data_len = 0;
3688 uint32_t rsp_data_len = 0;
3689 struct scatterlist *sg;
3690 int index;
3691 int entry_count = 1;
75cc8cfc 3692 struct bsg_job *bsg_job = sp->u.bsg_job;
a9b6f722
SK
3693
3694 /*Update entry type to indicate bidir command */
2c26348c 3695 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
a9b6f722
SK
3696
3697 /* Set the transfer direction, in this set both flags
3698 * Also set the BD_WRAP_BACK flag, firmware will take care
3699 * assigning DID=SID for outgoing pkts.
3700 */
3701 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3702 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
ad950360 3703 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
a9b6f722
SK
3704 BD_WRAP_BACK);
3705
3706 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3707 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3708 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3709 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3710
3711 vha->bidi_stats.transfer_bytes += req_data_len;
3712 vha->bidi_stats.io_count++;
3713
fabbb8df
JC
3714 vha->qla_stats.output_bytes += req_data_len;
3715 vha->qla_stats.output_requests++;
3716
a9b6f722
SK
3717 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3718 * are bundled in continuation iocb
3719 */
3720 avail_dsds = 1;
15b7a68c 3721 cur_dsd = &cmd_pkt->fcp_dsd;
a9b6f722
SK
3722
3723 index = 0;
3724
3725 for_each_sg(bsg_job->request_payload.sg_list, sg,
3726 bsg_job->request_payload.sg_cnt, index) {
a9b6f722
SK
3727 cont_a64_entry_t *cont_pkt;
3728
3729 /* Allocate additional continuation packets */
3730 if (avail_dsds == 0) {
3731 /* Continuation type 1 IOCB can accomodate
3732 * 5 DSDS
3733 */
3734 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
15b7a68c 3735 cur_dsd = cont_pkt->dsd;
a9b6f722
SK
3736 avail_dsds = 5;
3737 entry_count++;
3738 }
15b7a68c 3739 append_dsd64(&cur_dsd, sg);
a9b6f722
SK
3740 avail_dsds--;
3741 }
3742 /* For read request DSD will always goes to continuation IOCB
3743 * and follow the write DSD. If there is room on the current IOCB
3744 * then it is added to that IOCB else new continuation IOCB is
3745 * allocated.
3746 */
3747 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3748 bsg_job->reply_payload.sg_cnt, index) {
a9b6f722
SK
3749 cont_a64_entry_t *cont_pkt;
3750
3751 /* Allocate additional continuation packets */
3752 if (avail_dsds == 0) {
3753 /* Continuation type 1 IOCB can accomodate
3754 * 5 DSDS
3755 */
3756 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
15b7a68c 3757 cur_dsd = cont_pkt->dsd;
a9b6f722
SK
3758 avail_dsds = 5;
3759 entry_count++;
3760 }
15b7a68c 3761 append_dsd64(&cur_dsd, sg);
a9b6f722
SK
3762 avail_dsds--;
3763 }
3764 /* This value should be same as number of IOCB required for this cmd */
3765 cmd_pkt->entry_count = entry_count;
3766}
3767
3768int
3769qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3770{
3771
3772 struct qla_hw_data *ha = vha->hw;
3773 unsigned long flags;
3774 uint32_t handle;
3775 uint32_t index;
3776 uint16_t req_cnt;
3777 uint16_t cnt;
3778 uint32_t *clr_ptr;
3779 struct cmd_bidir *cmd_pkt = NULL;
3780 struct rsp_que *rsp;
3781 struct req_que *req;
3782 int rval = EXT_STATUS_OK;
a9b6f722
SK
3783
3784 rval = QLA_SUCCESS;
3785
3786 rsp = ha->rsp_q_map[0];
3787 req = vha->req;
3788
3789 /* Send marker if required */
3790 if (vha->marker_needed != 0) {
9eb9c6dc
QT
3791 if (qla2x00_marker(vha, ha->base_qpair,
3792 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
a9b6f722
SK
3793 return EXT_STATUS_MAILBOX;
3794 vha->marker_needed = 0;
3795 }
3796
3797 /* Acquire ring specific lock */
3798 spin_lock_irqsave(&ha->hardware_lock, flags);
3799
3800 /* Check for room in outstanding command list. */
3801 handle = req->current_outstanding_cmd;
8d93f550 3802 for (index = 1; index < req->num_outstanding_cmds; index++) {
a9b6f722 3803 handle++;
8d2b21db
BVA
3804 if (handle == req->num_outstanding_cmds)
3805 handle = 1;
3806 if (!req->outstanding_cmds[handle])
3807 break;
a9b6f722
SK
3808 }
3809
8d93f550 3810 if (index == req->num_outstanding_cmds) {
a9b6f722
SK
3811 rval = EXT_STATUS_BUSY;
3812 goto queuing_error;
3813 }
3814
3815 /* Calculate number of IOCB required */
3816 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3817
3818 /* Check for room on request queue. */
3819 if (req->cnt < req_cnt + 2) {
7c6300e3
JC
3820 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3821 RD_REG_DWORD_RELAXED(req->req_q_out);
a9b6f722
SK
3822 if (req->ring_index < cnt)
3823 req->cnt = cnt - req->ring_index;
3824 else
3825 req->cnt = req->length -
3826 (req->ring_index - cnt);
3827 }
3828 if (req->cnt < req_cnt + 2) {
3829 rval = EXT_STATUS_BUSY;
3830 goto queuing_error;
3831 }
3832
3833 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3834 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3835
3836 /* Zero out remaining portion of packet. */
3837 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3838 clr_ptr = (uint32_t *)cmd_pkt + 2;
3839 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3840
3841 /* Set NPORT-ID (of vha)*/
3842 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3843 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3844 cmd_pkt->port_id[1] = vha->d_id.b.area;
3845 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3846
3847 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3848 cmd_pkt->entry_status = (uint8_t) rsp->id;
3849 /* Build command packet. */
3850 req->current_outstanding_cmd = handle;
3851 req->outstanding_cmds[handle] = sp;
3852 sp->handle = handle;
3853 req->cnt -= req_cnt;
3854
3855 /* Send the command to the firmware */
3856 wmb();
3857 qla2x00_start_iocbs(vha, req);
3858queuing_error:
3859 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3860 return rval;
3861}