scsi: qla2xxx: Enable type checking for the SRB free and done callback functions
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_iocb.c
CommitLineData
fa90c54f
AV
1/*
2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
1da177e4 4 *
fa90c54f
AV
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
1da177e4 7#include "qla_def.h"
2d70c103 8#include "qla_target.h"
1da177e4
LT
9
10#include <linux/blkdev.h>
11#include <linux/delay.h>
12
13#include <scsi/scsi_tcq.h>
14
1da177e4
LT
15/**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
2db6228d 17 * @sp: SCSI command
1da177e4
LT
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21static inline uint16_t
49fd462a 22qla2x00_get_cmd_direction(srb_t *sp)
1da177e4
LT
23{
24 uint16_t cflags;
9ba56b95 25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 26 struct scsi_qla_host *vha = sp->vha;
1da177e4
LT
27
28 cflags = 0;
29
30 /* Set transfer direction */
9ba56b95 31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1da177e4 32 cflags = CF_WRITE;
2be21fa2 33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 34 vha->qla_stats.output_requests++;
9ba56b95 35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1da177e4 36 cflags = CF_READ;
2be21fa2 37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 38 vha->qla_stats.input_requests++;
49fd462a 39 }
1da177e4
LT
40 return (cflags);
41}
42
43/**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
46 *
47 * @dsds: number of data segment decriptors needed
48 *
49 * Returns the number of IOCB entries needed to store @dsds.
50 */
51uint16_t
52qla2x00_calc_iocbs_32(uint16_t dsds)
53{
54 uint16_t iocbs;
55
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
61 }
62 return (iocbs);
63}
64
65/**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
68 *
69 * @dsds: number of data segment decriptors needed
70 *
71 * Returns the number of IOCB entries needed to store @dsds.
72 */
73uint16_t
74qla2x00_calc_iocbs_64(uint16_t dsds)
75{
76 uint16_t iocbs;
77
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
83 }
84 return (iocbs);
85}
86
87/**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
2db6228d 89 * @vha: HA context
1da177e4
LT
90 *
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */
93static inline cont_entry_t *
67c2e93a 94qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
1da177e4
LT
95{
96 cont_entry_t *cont_pkt;
67c2e93a 97 struct req_que *req = vha->req;
1da177e4 98 /* Adjust ring index. */
e315cd28
AC
99 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
1da177e4 103 } else {
e315cd28 104 req->ring_ptr++;
1da177e4
LT
105 }
106
e315cd28 107 cont_pkt = (cont_entry_t *)req->ring_ptr;
1da177e4
LT
108
109 /* Load packet defaults. */
2c26348c 110 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
1da177e4
LT
111
112 return (cont_pkt);
113}
114
115/**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
2db6228d
BVA
117 * @vha: HA context
118 * @req: request queue
1da177e4
LT
119 *
120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */
122static inline cont_a64_entry_t *
0d2aa38e 123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
1da177e4
LT
124{
125 cont_a64_entry_t *cont_pkt;
126
127 /* Adjust ring index. */
e315cd28
AC
128 req->ring_index++;
129 if (req->ring_index == req->length) {
130 req->ring_index = 0;
131 req->ring_ptr = req->ring;
1da177e4 132 } else {
e315cd28 133 req->ring_ptr++;
1da177e4
LT
134 }
135
e315cd28 136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
1da177e4
LT
137
138 /* Load packet defaults. */
2c26348c
BVA
139 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
140 CONTINUE_A64_TYPE, &cont_pkt->entry_type);
1da177e4
LT
141
142 return (cont_pkt);
143}
144
d7459527 145inline int
bad75002
AE
146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147{
9ba56b95
GM
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
bad75002 150
bad75002
AE
151 /* We always use DIFF Bundling for best performance */
152 *fw_prot_opts = 0;
153
154 /* Translate SCSI opcode to a protection opcode */
9ba56b95 155 switch (scsi_get_prot_op(cmd)) {
bad75002
AE
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 break;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 break;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 break;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 break;
168 case SCSI_PROT_READ_PASS:
bad75002 169 case SCSI_PROT_WRITE_PASS:
9e522cd8
AE
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 else
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
bad75002
AE
174 break;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
178 }
179
9ba56b95 180 return scsi_prot_sg_count(cmd);
bad75002
AE
181}
182
183/*
1da177e4
LT
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
186 *
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
190 */
191void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 uint16_t tot_dsds)
193{
194 uint16_t avail_dsds;
15b7a68c 195 struct dsd32 *cur_dsd;
e315cd28 196 scsi_qla_host_t *vha;
1da177e4 197 struct scsi_cmnd *cmd;
385d70b4
FT
198 struct scatterlist *sg;
199 int i;
1da177e4 200
9ba56b95 201 cmd = GET_CMD_SP(sp);
1da177e4
LT
202
203 /* Update entry type to indicate Command Type 2 IOCB */
2c26348c 204 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
1da177e4
LT
205
206 /* No data transfer */
385d70b4 207 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 208 cmd_pkt->byte_count = cpu_to_le32(0);
1da177e4
LT
209 return;
210 }
211
25ff6af1 212 vha = sp->vha;
49fd462a 213 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
1da177e4
LT
214
215 /* Three DSDs are available in the Command Type 2 IOCB */
15b7a68c
BVA
216 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
217 cur_dsd = cmd_pkt->dsd32;
1da177e4
LT
218
219 /* Load data segments */
385d70b4
FT
220 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221 cont_entry_t *cont_pkt;
222
223 /* Allocate additional continuation packets? */
224 if (avail_dsds == 0) {
225 /*
226 * Seven DSDs are available in the Continuation
227 * Type 0 IOCB.
228 */
67c2e93a 229 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
15b7a68c
BVA
230 cur_dsd = cont_pkt->dsd;
231 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
1da177e4 232 }
385d70b4 233
15b7a68c 234 append_dsd32(&cur_dsd, sg);
385d70b4 235 avail_dsds--;
1da177e4
LT
236 }
237}
238
239/**
240 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
241 * capable IOCB types.
242 *
243 * @sp: SRB command to process
244 * @cmd_pkt: Command type 3 IOCB
245 * @tot_dsds: Total number of segments to transfer
246 */
247void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
248 uint16_t tot_dsds)
249{
250 uint16_t avail_dsds;
15b7a68c 251 struct dsd64 *cur_dsd;
e315cd28 252 scsi_qla_host_t *vha;
1da177e4 253 struct scsi_cmnd *cmd;
385d70b4
FT
254 struct scatterlist *sg;
255 int i;
1da177e4 256
9ba56b95 257 cmd = GET_CMD_SP(sp);
1da177e4
LT
258
259 /* Update entry type to indicate Command Type 3 IOCB */
2c26348c 260 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
1da177e4
LT
261
262 /* No data transfer */
385d70b4 263 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 264 cmd_pkt->byte_count = cpu_to_le32(0);
1da177e4
LT
265 return;
266 }
267
25ff6af1 268 vha = sp->vha;
49fd462a 269 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
1da177e4
LT
270
271 /* Two DSDs are available in the Command Type 3 IOCB */
15b7a68c
BVA
272 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
273 cur_dsd = cmd_pkt->dsd64;
1da177e4
LT
274
275 /* Load data segments */
385d70b4 276 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
385d70b4
FT
277 cont_a64_entry_t *cont_pkt;
278
279 /* Allocate additional continuation packets? */
280 if (avail_dsds == 0) {
281 /*
282 * Five DSDs are available in the Continuation
283 * Type 1 IOCB.
284 */
0d2aa38e 285 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
15b7a68c
BVA
286 cur_dsd = cont_pkt->dsd;
287 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
1da177e4 288 }
385d70b4 289
15b7a68c 290 append_dsd64(&cur_dsd, sg);
385d70b4 291 avail_dsds--;
1da177e4
LT
292 }
293}
294
295/**
296 * qla2x00_start_scsi() - Send a SCSI command to the ISP
297 * @sp: command to send to the ISP
298 *
cc3ef7bc 299 * Returns non-zero if a failure occurred, else zero.
1da177e4
LT
300 */
301int
302qla2x00_start_scsi(srb_t *sp)
303{
52c82823 304 int nseg;
1da177e4 305 unsigned long flags;
e315cd28 306 scsi_qla_host_t *vha;
1da177e4
LT
307 struct scsi_cmnd *cmd;
308 uint32_t *clr_ptr;
309 uint32_t index;
310 uint32_t handle;
311 cmd_entry_t *cmd_pkt;
1da177e4
LT
312 uint16_t cnt;
313 uint16_t req_cnt;
314 uint16_t tot_dsds;
3d71644c 315 struct device_reg_2xxx __iomem *reg;
e315cd28
AC
316 struct qla_hw_data *ha;
317 struct req_que *req;
73208dfd 318 struct rsp_que *rsp;
1da177e4
LT
319
320 /* Setup device pointers. */
25ff6af1 321 vha = sp->vha;
e315cd28 322 ha = vha->hw;
3d71644c 323 reg = &ha->iobase->isp;
9ba56b95 324 cmd = GET_CMD_SP(sp);
73208dfd
AC
325 req = ha->req_q_map[0];
326 rsp = ha->rsp_q_map[0];
83021920 327 /* So we know we haven't pci_map'ed anything yet */
328 tot_dsds = 0;
1da177e4
LT
329
330 /* Send marker if required */
e315cd28 331 if (vha->marker_needed != 0) {
9eb9c6dc 332 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
7c3df132 333 QLA_SUCCESS) {
1da177e4 334 return (QLA_FUNCTION_FAILED);
7c3df132 335 }
e315cd28 336 vha->marker_needed = 0;
1da177e4
LT
337 }
338
339 /* Acquire ring specific lock */
c9c5ced9 340 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4
LT
341
342 /* Check for room in outstanding command list. */
e315cd28 343 handle = req->current_outstanding_cmd;
8d93f550 344 for (index = 1; index < req->num_outstanding_cmds; index++) {
1da177e4 345 handle++;
8d93f550 346 if (handle == req->num_outstanding_cmds)
1da177e4 347 handle = 1;
e315cd28 348 if (!req->outstanding_cmds[handle])
1da177e4
LT
349 break;
350 }
8d93f550 351 if (index == req->num_outstanding_cmds)
1da177e4
LT
352 goto queuing_error;
353
83021920 354 /* Map the sg table so we have an accurate count of sg entries needed */
2c3dfe3f
SJ
355 if (scsi_sg_count(cmd)) {
356 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
357 scsi_sg_count(cmd), cmd->sc_data_direction);
358 if (unlikely(!nseg))
359 goto queuing_error;
360 } else
361 nseg = 0;
362
385d70b4 363 tot_dsds = nseg;
83021920 364
1da177e4 365 /* Calculate the number of request entries needed. */
fd34f556 366 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
e315cd28 367 if (req->cnt < (req_cnt + 2)) {
1da177e4 368 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
e315cd28
AC
369 if (req->ring_index < cnt)
370 req->cnt = cnt - req->ring_index;
1da177e4 371 else
e315cd28
AC
372 req->cnt = req->length -
373 (req->ring_index - cnt);
a6eb3c9f
CL
374 /* If still no head room then bail out */
375 if (req->cnt < (req_cnt + 2))
376 goto queuing_error;
1da177e4 377 }
1da177e4 378
1da177e4 379 /* Build command packet */
e315cd28
AC
380 req->current_outstanding_cmd = handle;
381 req->outstanding_cmds[handle] = sp;
cf53b069 382 sp->handle = handle;
9ba56b95 383 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
e315cd28 384 req->cnt -= req_cnt;
1da177e4 385
e315cd28 386 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
1da177e4
LT
387 cmd_pkt->handle = handle;
388 /* Zero out remaining portion of packet. */
389 clr_ptr = (uint32_t *)cmd_pkt + 2;
390 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
391 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
392
bdf79621 393 /* Set target ID and LUN number*/
394 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
9ba56b95 395 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
ad950360 396 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
1da177e4 397
1da177e4
LT
398 /* Load SCSI command packet. */
399 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
385d70b4 400 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1da177e4
LT
401
402 /* Build IOCB segments */
fd34f556 403 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
1da177e4
LT
404
405 /* Set total data segment count. */
406 cmd_pkt->entry_count = (uint8_t)req_cnt;
407 wmb();
408
409 /* Adjust ring index. */
e315cd28
AC
410 req->ring_index++;
411 if (req->ring_index == req->length) {
412 req->ring_index = 0;
413 req->ring_ptr = req->ring;
1da177e4 414 } else
e315cd28 415 req->ring_ptr++;
1da177e4 416
1da177e4 417 sp->flags |= SRB_DMA_VALID;
1da177e4
LT
418
419 /* Set chip new ring index. */
e315cd28 420 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
1da177e4
LT
421 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
422
4fdfefe5 423 /* Manage unprocessed RIO/ZIO commands in response queue. */
e315cd28 424 if (vha->flags.process_response_queue &&
73208dfd
AC
425 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
426 qla2x00_process_response_queue(rsp);
4fdfefe5 427
c9c5ced9 428 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4
LT
429 return (QLA_SUCCESS);
430
431queuing_error:
385d70b4
FT
432 if (tot_dsds)
433 scsi_dma_unmap(cmd);
434
c9c5ced9 435 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4
LT
436
437 return (QLA_FUNCTION_FAILED);
438}
439
5162cf0c
GM
440/**
441 * qla2x00_start_iocbs() - Execute the IOCB command
2db6228d
BVA
442 * @vha: HA context
443 * @req: request queue
5162cf0c 444 */
2d70c103 445void
5162cf0c
GM
446qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
447{
448 struct qla_hw_data *ha = vha->hw;
118e2ef9 449 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
5162cf0c 450
7ec0effd 451 if (IS_P3P_TYPE(ha)) {
5162cf0c
GM
452 qla82xx_start_iocbs(vha);
453 } else {
454 /* Adjust ring index. */
455 req->ring_index++;
456 if (req->ring_index == req->length) {
457 req->ring_index = 0;
458 req->ring_ptr = req->ring;
459 } else
460 req->ring_ptr++;
461
462 /* Set chip new ring index. */
ecc89f25 463 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
d63b328f
QT
464 WRT_REG_DWORD(req->req_q_in, req->ring_index);
465 } else if (IS_QLA83XX(ha)) {
6246b8a1 466 WRT_REG_DWORD(req->req_q_in, req->ring_index);
98878a16 467 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
8ae6d9c7
GM
468 } else if (IS_QLAFX00(ha)) {
469 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
470 RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
471 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
5162cf0c
GM
472 } else if (IS_FWI2_CAPABLE(ha)) {
473 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
475 } else {
476 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
477 req->ring_index);
478 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
479 }
480 }
481}
482
1da177e4
LT
483/**
484 * qla2x00_marker() - Send a marker IOCB to the firmware.
2db6228d 485 * @vha: HA context
9eb9c6dc 486 * @qpair: queue pair pointer
1da177e4
LT
487 * @loop_id: loop ID
488 * @lun: LUN
489 * @type: marker modifier
490 *
491 * Can be called from both normal and interrupt context.
492 *
cc3ef7bc 493 * Returns non-zero if a failure occurred, else zero.
1da177e4 494 */
3dbe756a 495static int
9eb9c6dc
QT
496__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
497 uint16_t loop_id, uint64_t lun, uint8_t type)
1da177e4 498{
2b6c0cee 499 mrk_entry_t *mrk;
8ae6d9c7 500 struct mrk_entry_24xx *mrk24 = NULL;
9eb9c6dc 501 struct req_que *req = qpair->req;
e315cd28
AC
502 struct qla_hw_data *ha = vha->hw;
503 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 504
9eb9c6dc 505 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
2b6c0cee 506 if (mrk == NULL) {
7c3df132
SK
507 ql_log(ql_log_warn, base_vha, 0x3026,
508 "Failed to allocate Marker IOCB.\n");
1da177e4
LT
509
510 return (QLA_FUNCTION_FAILED);
511 }
512
2b6c0cee
AV
513 mrk->entry_type = MARKER_TYPE;
514 mrk->modifier = type;
1da177e4 515 if (type != MK_SYNC_ALL) {
bfd7334e 516 if (IS_FWI2_CAPABLE(ha)) {
2b6c0cee
AV
517 mrk24 = (struct mrk_entry_24xx *) mrk;
518 mrk24->nport_handle = cpu_to_le16(loop_id);
9cb78c16 519 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
b797b6de 520 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
e315cd28 521 mrk24->vp_index = vha->vp_idx;
2afa19a9 522 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
2b6c0cee
AV
523 } else {
524 SET_TARGET_ID(ha, mrk->target, loop_id);
9cb78c16 525 mrk->lun = cpu_to_le16((uint16_t)lun);
2b6c0cee 526 }
1da177e4
LT
527 }
528 wmb();
529
5162cf0c 530 qla2x00_start_iocbs(vha, req);
1da177e4
LT
531
532 return (QLA_SUCCESS);
533}
534
fa2a1ce5 535int
9eb9c6dc
QT
536qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
537 uint16_t loop_id, uint64_t lun, uint8_t type)
1da177e4
LT
538{
539 int ret;
540 unsigned long flags = 0;
541
9eb9c6dc
QT
542 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
543 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
544 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1da177e4
LT
545
546 return (ret);
547}
548
2d70c103
NB
549/*
550 * qla2x00_issue_marker
551 *
552 * Issue marker
553 * Caller CAN have hardware lock held as specified by ha_locked parameter.
554 * Might release it, then reaquire.
555 */
556int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
557{
558 if (ha_locked) {
9eb9c6dc 559 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
2d70c103
NB
560 MK_SYNC_ALL) != QLA_SUCCESS)
561 return QLA_FUNCTION_FAILED;
562 } else {
9eb9c6dc 563 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
2d70c103
NB
564 MK_SYNC_ALL) != QLA_SUCCESS)
565 return QLA_FUNCTION_FAILED;
566 }
567 vha->marker_needed = 0;
568
569 return QLA_SUCCESS;
570}
571
5162cf0c
GM
572static inline int
573qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
574 uint16_t tot_dsds)
575{
15b7a68c 576 struct dsd64 *cur_dsd = NULL, *next_dsd;
5162cf0c
GM
577 scsi_qla_host_t *vha;
578 struct qla_hw_data *ha;
579 struct scsi_cmnd *cmd;
580 struct scatterlist *cur_seg;
5162cf0c
GM
581 uint8_t avail_dsds;
582 uint8_t first_iocb = 1;
583 uint32_t dsd_list_len;
584 struct dsd_dma *dsd_ptr;
585 struct ct6_dsd *ctx;
1da177e4 586
9ba56b95 587 cmd = GET_CMD_SP(sp);
a9083016 588
5162cf0c 589 /* Update entry type to indicate Command Type 3 IOCB */
2c26348c 590 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
5162cf0c
GM
591
592 /* No data transfer */
593 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 594 cmd_pkt->byte_count = cpu_to_le32(0);
5162cf0c
GM
595 return 0;
596 }
597
25ff6af1 598 vha = sp->vha;
5162cf0c
GM
599 ha = vha->hw;
600
601 /* Set transfer direction */
602 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
ad950360 603 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
2be21fa2 604 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 605 vha->qla_stats.output_requests++;
5162cf0c 606 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
ad950360 607 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
2be21fa2 608 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 609 vha->qla_stats.input_requests++;
5162cf0c
GM
610 }
611
612 cur_seg = scsi_sglist(cmd);
9ba56b95 613 ctx = GET_CMD_CTX_SP(sp);
5162cf0c
GM
614
615 while (tot_dsds) {
616 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
617 QLA_DSDS_PER_IOCB : tot_dsds;
618 tot_dsds -= avail_dsds;
619 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
620
621 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
622 struct dsd_dma, list);
623 next_dsd = dsd_ptr->dsd_addr;
624 list_del(&dsd_ptr->list);
625 ha->gbl_dsd_avail--;
626 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
627 ctx->dsd_use_cnt++;
628 ha->gbl_dsd_inuse++;
629
630 if (first_iocb) {
631 first_iocb = 0;
15b7a68c
BVA
632 put_unaligned_le64(dsd_ptr->dsd_list_dma,
633 &cmd_pkt->fcp_dsd.address);
634 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
73208dfd 635 } else {
15b7a68c
BVA
636 put_unaligned_le64(dsd_ptr->dsd_list_dma,
637 &cur_dsd->address);
638 cur_dsd->length = cpu_to_le32(dsd_list_len);
639 cur_dsd++;
5162cf0c 640 }
15b7a68c 641 cur_dsd = next_dsd;
5162cf0c 642 while (avail_dsds) {
15b7a68c 643 append_dsd64(&cur_dsd, cur_seg);
5162cf0c
GM
644 cur_seg = sg_next(cur_seg);
645 avail_dsds--;
73208dfd 646 }
2b6c0cee
AV
647 }
648
5162cf0c 649 /* Null termination */
15b7a68c
BVA
650 cur_dsd->address = 0;
651 cur_dsd->length = 0;
652 cur_dsd++;
5162cf0c
GM
653 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
654 return 0;
2b6c0cee
AV
655}
656
5162cf0c
GM
657/*
658 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
659 * for Command Type 6.
2b6c0cee
AV
660 *
661 * @dsds: number of data segment decriptors needed
662 *
5162cf0c 663 * Returns the number of dsd list needed to store @dsds.
2b6c0cee 664 */
2374dd23 665static inline uint16_t
5162cf0c 666qla24xx_calc_dsd_lists(uint16_t dsds)
2b6c0cee 667{
5162cf0c 668 uint16_t dsd_lists = 0;
2b6c0cee 669
5162cf0c
GM
670 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
671 if (dsds % QLA_DSDS_PER_IOCB)
672 dsd_lists++;
673 return dsd_lists;
2b6c0cee
AV
674}
675
5162cf0c 676
2b6c0cee
AV
677/**
678 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
679 * IOCB types.
680 *
681 * @sp: SRB command to process
682 * @cmd_pkt: Command type 3 IOCB
683 * @tot_dsds: Total number of segments to transfer
d7459527 684 * @req: pointer to request queue
2b6c0cee 685 */
d7459527 686inline void
2b6c0cee 687qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
d7459527 688 uint16_t tot_dsds, struct req_que *req)
2b6c0cee
AV
689{
690 uint16_t avail_dsds;
15b7a68c 691 struct dsd64 *cur_dsd;
e315cd28 692 scsi_qla_host_t *vha;
2b6c0cee 693 struct scsi_cmnd *cmd;
385d70b4
FT
694 struct scatterlist *sg;
695 int i;
2b6c0cee 696
9ba56b95 697 cmd = GET_CMD_SP(sp);
2b6c0cee
AV
698
699 /* Update entry type to indicate Command Type 3 IOCB */
2c26348c 700 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
2b6c0cee
AV
701
702 /* No data transfer */
385d70b4 703 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 704 cmd_pkt->byte_count = cpu_to_le32(0);
2b6c0cee
AV
705 return;
706 }
707
25ff6af1 708 vha = sp->vha;
2b6c0cee
AV
709
710 /* Set transfer direction */
49fd462a 711 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
ad950360 712 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
2be21fa2 713 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 714 vha->qla_stats.output_requests++;
49fd462a 715 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
ad950360 716 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
2be21fa2 717 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 718 vha->qla_stats.input_requests++;
49fd462a 719 }
2b6c0cee
AV
720
721 /* One DSD is available in the Command Type 3 IOCB */
722 avail_dsds = 1;
15b7a68c 723 cur_dsd = &cmd_pkt->dsd;
2b6c0cee
AV
724
725 /* Load data segments */
385d70b4
FT
726
727 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
385d70b4
FT
728 cont_a64_entry_t *cont_pkt;
729
730 /* Allocate additional continuation packets? */
731 if (avail_dsds == 0) {
732 /*
733 * Five DSDs are available in the Continuation
734 * Type 1 IOCB.
735 */
d7459527 736 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
15b7a68c
BVA
737 cur_dsd = cont_pkt->dsd;
738 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
2b6c0cee 739 }
385d70b4 740
15b7a68c 741 append_dsd64(&cur_dsd, sg);
385d70b4 742 avail_dsds--;
2b6c0cee
AV
743 }
744}
745
bad75002
AE
746struct fw_dif_context {
747 uint32_t ref_tag;
748 uint16_t app_tag;
749 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
750 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
751};
752
753/*
754 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
755 *
756 */
757static inline void
e02587d7 758qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
bad75002
AE
759 unsigned int protcnt)
760{
9ba56b95 761 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
bad75002
AE
762
763 switch (scsi_get_prot_type(cmd)) {
bad75002 764 case SCSI_PROT_DIF_TYPE0:
8cb2049c
AE
765 /*
766 * No check for ql2xenablehba_err_chk, as it would be an
767 * I/O error if hba tag generation is not done.
768 */
769 pkt->ref_tag = cpu_to_le32((uint32_t)
770 (0xffffffff & scsi_get_lba(cmd)));
e02587d7
AE
771
772 if (!qla2x00_hba_err_chk_enabled(sp))
773 break;
774
8cb2049c
AE
775 pkt->ref_tag_mask[0] = 0xff;
776 pkt->ref_tag_mask[1] = 0xff;
777 pkt->ref_tag_mask[2] = 0xff;
778 pkt->ref_tag_mask[3] = 0xff;
bad75002
AE
779 break;
780
781 /*
782 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
783 * match LBA in CDB + N
784 */
785 case SCSI_PROT_DIF_TYPE2:
ad950360 786 pkt->app_tag = cpu_to_le16(0);
e02587d7
AE
787 pkt->app_tag_mask[0] = 0x0;
788 pkt->app_tag_mask[1] = 0x0;
0c470874
AE
789
790 pkt->ref_tag = cpu_to_le32((uint32_t)
791 (0xffffffff & scsi_get_lba(cmd)));
792
e02587d7
AE
793 if (!qla2x00_hba_err_chk_enabled(sp))
794 break;
795
0c470874
AE
796 /* enable ALL bytes of the ref tag */
797 pkt->ref_tag_mask[0] = 0xff;
798 pkt->ref_tag_mask[1] = 0xff;
799 pkt->ref_tag_mask[2] = 0xff;
800 pkt->ref_tag_mask[3] = 0xff;
bad75002
AE
801 break;
802
803 /* For Type 3 protection: 16 bit GUARD only */
804 case SCSI_PROT_DIF_TYPE3:
805 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
806 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
807 0x00;
808 break;
809
810 /*
811 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
812 * 16 bit app tag.
813 */
814 case SCSI_PROT_DIF_TYPE1:
e02587d7
AE
815 pkt->ref_tag = cpu_to_le32((uint32_t)
816 (0xffffffff & scsi_get_lba(cmd)));
ad950360 817 pkt->app_tag = cpu_to_le16(0);
e02587d7
AE
818 pkt->app_tag_mask[0] = 0x0;
819 pkt->app_tag_mask[1] = 0x0;
820
821 if (!qla2x00_hba_err_chk_enabled(sp))
bad75002
AE
822 break;
823
bad75002
AE
824 /* enable ALL bytes of the ref tag */
825 pkt->ref_tag_mask[0] = 0xff;
826 pkt->ref_tag_mask[1] = 0xff;
827 pkt->ref_tag_mask[2] = 0xff;
828 pkt->ref_tag_mask[3] = 0xff;
829 break;
830 }
bad75002
AE
831}
832
d7459527 833int
8cb2049c
AE
834qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
835 uint32_t *partial)
836{
837 struct scatterlist *sg;
838 uint32_t cumulative_partial, sg_len;
839 dma_addr_t sg_dma_addr;
840
841 if (sgx->num_bytes == sgx->tot_bytes)
842 return 0;
843
844 sg = sgx->cur_sg;
845 cumulative_partial = sgx->tot_partial;
846
847 sg_dma_addr = sg_dma_address(sg);
848 sg_len = sg_dma_len(sg);
849
850 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
851
852 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
853 sgx->dma_len = (blk_sz - cumulative_partial);
854 sgx->tot_partial = 0;
855 sgx->num_bytes += blk_sz;
856 *partial = 0;
857 } else {
858 sgx->dma_len = sg_len - sgx->bytes_consumed;
859 sgx->tot_partial += sgx->dma_len;
860 *partial = 1;
861 }
862
863 sgx->bytes_consumed += sgx->dma_len;
864
865 if (sg_len == sgx->bytes_consumed) {
866 sg = sg_next(sg);
867 sgx->num_sg++;
868 sgx->cur_sg = sg;
869 sgx->bytes_consumed = 0;
870 }
871
872 return 1;
873}
874
f83adb61 875int
8cb2049c 876qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
15b7a68c 877 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
8cb2049c
AE
878{
879 void *next_dsd;
880 uint8_t avail_dsds = 0;
881 uint32_t dsd_list_len;
882 struct dsd_dma *dsd_ptr;
883 struct scatterlist *sg_prot;
15b7a68c 884 struct dsd64 *cur_dsd = dsd;
8cb2049c 885 uint16_t used_dsds = tot_dsds;
f83adb61 886 uint32_t prot_int; /* protection interval */
8cb2049c
AE
887 uint32_t partial;
888 struct qla2_sgx sgx;
889 dma_addr_t sle_dma;
890 uint32_t sle_dma_len, tot_prot_dma_len = 0;
f83adb61 891 struct scsi_cmnd *cmd;
8cb2049c
AE
892
893 memset(&sgx, 0, sizeof(struct qla2_sgx));
f83adb61 894 if (sp) {
f83adb61
QT
895 cmd = GET_CMD_SP(sp);
896 prot_int = cmd->device->sector_size;
897
898 sgx.tot_bytes = scsi_bufflen(cmd);
899 sgx.cur_sg = scsi_sglist(cmd);
900 sgx.sp = sp;
901
902 sg_prot = scsi_prot_sglist(cmd);
903 } else if (tc) {
f83adb61
QT
904 prot_int = tc->blk_sz;
905 sgx.tot_bytes = tc->bufflen;
906 sgx.cur_sg = tc->sg;
907 sg_prot = tc->prot_sg;
908 } else {
909 BUG();
910 return 1;
911 }
8cb2049c
AE
912
913 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
914
915 sle_dma = sgx.dma_addr;
916 sle_dma_len = sgx.dma_len;
917alloc_and_fill:
918 /* Allocate additional continuation packets? */
919 if (avail_dsds == 0) {
920 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
921 QLA_DSDS_PER_IOCB : used_dsds;
922 dsd_list_len = (avail_dsds + 1) * 12;
923 used_dsds -= avail_dsds;
924
925 /* allocate tracking DS */
926 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
927 if (!dsd_ptr)
928 return 1;
929
930 /* allocate new list */
931 dsd_ptr->dsd_addr = next_dsd =
932 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
933 &dsd_ptr->dsd_list_dma);
934
935 if (!next_dsd) {
936 /*
937 * Need to cleanup only this dsd_ptr, rest
938 * will be done by sp_free_dma()
939 */
940 kfree(dsd_ptr);
941 return 1;
942 }
943
f83adb61
QT
944 if (sp) {
945 list_add_tail(&dsd_ptr->list,
946 &((struct crc_context *)
947 sp->u.scmd.ctx)->dsd_list);
948
949 sp->flags |= SRB_CRC_CTX_DSD_VALID;
950 } else {
951 list_add_tail(&dsd_ptr->list,
952 &(tc->ctx->dsd_list));
be25152c 953 *tc->ctx_dsd_alloced = 1;
f83adb61 954 }
8cb2049c 955
8cb2049c
AE
956
957 /* add new list to cmd iocb or last list */
15b7a68c
BVA
958 put_unaligned_le64(dsd_ptr->dsd_list_dma,
959 &cur_dsd->address);
960 cur_dsd->length = cpu_to_le32(dsd_list_len);
961 cur_dsd = next_dsd;
8cb2049c 962 }
15b7a68c
BVA
963 put_unaligned_le64(sle_dma, &cur_dsd->address);
964 cur_dsd->length = cpu_to_le32(sle_dma_len);
965 cur_dsd++;
8cb2049c
AE
966 avail_dsds--;
967
968 if (partial == 0) {
969 /* Got a full protection interval */
970 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
971 sle_dma_len = 8;
bad75002 972
8cb2049c
AE
973 tot_prot_dma_len += sle_dma_len;
974 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
975 tot_prot_dma_len = 0;
976 sg_prot = sg_next(sg_prot);
977 }
978
979 partial = 1; /* So as to not re-enter this block */
980 goto alloc_and_fill;
981 }
982 }
983 /* Null termination */
15b7a68c
BVA
984 cur_dsd->address = 0;
985 cur_dsd->length = 0;
986 cur_dsd++;
8cb2049c
AE
987 return 0;
988}
5162cf0c 989
f83adb61 990int
15b7a68c
BVA
991qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
992 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
bad75002
AE
993{
994 void *next_dsd;
995 uint8_t avail_dsds = 0;
996 uint32_t dsd_list_len;
997 struct dsd_dma *dsd_ptr;
f83adb61 998 struct scatterlist *sg, *sgl;
15b7a68c 999 struct dsd64 *cur_dsd = dsd;
bad75002
AE
1000 int i;
1001 uint16_t used_dsds = tot_dsds;
f83adb61 1002 struct scsi_cmnd *cmd;
f83adb61
QT
1003
1004 if (sp) {
1005 cmd = GET_CMD_SP(sp);
1006 sgl = scsi_sglist(cmd);
f83adb61
QT
1007 } else if (tc) {
1008 sgl = tc->sg;
f83adb61
QT
1009 } else {
1010 BUG();
1011 return 1;
1012 }
bad75002 1013
f83adb61
QT
1014
1015 for_each_sg(sgl, sg, tot_dsds, i) {
bad75002
AE
1016 /* Allocate additional continuation packets? */
1017 if (avail_dsds == 0) {
1018 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1019 QLA_DSDS_PER_IOCB : used_dsds;
1020 dsd_list_len = (avail_dsds + 1) * 12;
1021 used_dsds -= avail_dsds;
1022
1023 /* allocate tracking DS */
1024 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1025 if (!dsd_ptr)
1026 return 1;
1027
1028 /* allocate new list */
1029 dsd_ptr->dsd_addr = next_dsd =
1030 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1031 &dsd_ptr->dsd_list_dma);
1032
1033 if (!next_dsd) {
1034 /*
1035 * Need to cleanup only this dsd_ptr, rest
1036 * will be done by sp_free_dma()
1037 */
1038 kfree(dsd_ptr);
1039 return 1;
1040 }
1041
f83adb61
QT
1042 if (sp) {
1043 list_add_tail(&dsd_ptr->list,
1044 &((struct crc_context *)
1045 sp->u.scmd.ctx)->dsd_list);
bad75002 1046
f83adb61
QT
1047 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1048 } else {
1049 list_add_tail(&dsd_ptr->list,
1050 &(tc->ctx->dsd_list));
be25152c 1051 *tc->ctx_dsd_alloced = 1;
f83adb61 1052 }
bad75002
AE
1053
1054 /* add new list to cmd iocb or last list */
15b7a68c
BVA
1055 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1056 &cur_dsd->address);
1057 cur_dsd->length = cpu_to_le32(dsd_list_len);
1058 cur_dsd = next_dsd;
bad75002 1059 }
15b7a68c 1060 append_dsd64(&cur_dsd, sg);
bad75002
AE
1061 avail_dsds--;
1062
bad75002
AE
1063 }
1064 /* Null termination */
15b7a68c
BVA
1065 cur_dsd->address = 0;
1066 cur_dsd->length = 0;
1067 cur_dsd++;
bad75002
AE
1068 return 0;
1069}
1070
f83adb61 1071int
bad75002 1072qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
15b7a68c 1073 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
bad75002 1074{
50b81275 1075 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
f83adb61 1076 struct scatterlist *sg, *sgl;
50b81275 1077 struct crc_context *difctx = NULL;
f83adb61 1078 struct scsi_qla_host *vha;
50b81275
GM
1079 uint dsd_list_len;
1080 uint avail_dsds = 0;
1081 uint used_dsds = tot_dsds;
1082 bool dif_local_dma_alloc = false;
1083 bool direction_to_device = false;
1084 int i;
f83adb61
QT
1085
1086 if (sp) {
50b81275 1087 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
bd432bb5 1088
f83adb61 1089 sgl = scsi_prot_sglist(cmd);
25ff6af1 1090 vha = sp->vha;
50b81275
GM
1091 difctx = sp->u.scmd.ctx;
1092 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1093 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1094 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1095 __func__, cmd, difctx, sp);
f83adb61
QT
1096 } else if (tc) {
1097 vha = tc->vha;
1098 sgl = tc->prot_sg;
50b81275
GM
1099 difctx = tc->ctx;
1100 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
f83adb61
QT
1101 } else {
1102 BUG();
1103 return 1;
1104 }
bad75002 1105
50b81275
GM
1106 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1107 "%s: enter (write=%u)\n", __func__, direction_to_device);
1108
1109 /* if initiator doing write or target doing read */
1110 if (direction_to_device) {
1111 for_each_sg(sgl, sg, tot_dsds, i) {
038d710f 1112 u64 sle_phys = sg_phys(sg);
50b81275
GM
1113
1114 /* If SGE addr + len flips bits in upper 32-bits */
1115 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1116 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1117 "%s: page boundary crossing (phys=%llx len=%x)\n",
1118 __func__, sle_phys, sg->length);
1119
1120 if (difctx) {
1121 ha->dif_bundle_crossed_pages++;
1122 dif_local_dma_alloc = true;
1123 } else {
1124 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1125 vha, 0xe022,
1126 "%s: difctx pointer is NULL\n",
1127 __func__);
1128 }
1129 break;
1130 }
1131 }
1132 ha->dif_bundle_writes++;
1133 } else {
1134 ha->dif_bundle_reads++;
1135 }
bad75002 1136
50b81275
GM
1137 if (ql2xdifbundlinginternalbuffers)
1138 dif_local_dma_alloc = direction_to_device;
1139
1140 if (dif_local_dma_alloc) {
1141 u32 track_difbundl_buf = 0;
1142 u32 ldma_sg_len = 0;
1143 u8 ldma_needed = 1;
1144
1145 difctx->no_dif_bundl = 0;
1146 difctx->dif_bundl_len = 0;
1147
1148 /* Track DSD buffers */
1149 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1150 /* Track local DMA buffers */
1151 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1152
1153 for_each_sg(sgl, sg, tot_dsds, i) {
1154 u32 sglen = sg_dma_len(sg);
1155
1156 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1157 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
038d710f 1158 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
50b81275
GM
1159 difctx->dif_bundl_len, ldma_needed);
1160
1161 while (sglen) {
1162 u32 xfrlen = 0;
1163
1164 if (ldma_needed) {
1165 /*
1166 * Allocate list item to store
1167 * the DMA buffers
1168 */
1169 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1170 GFP_ATOMIC);
1171 if (!dsd_ptr) {
1172 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1173 "%s: failed alloc dsd_ptr\n",
1174 __func__);
1175 return 1;
1176 }
1177 ha->dif_bundle_kallocs++;
1178
1179 /* allocate dma buffer */
1180 dsd_ptr->dsd_addr = dma_pool_alloc
1181 (ha->dif_bundl_pool, GFP_ATOMIC,
1182 &dsd_ptr->dsd_list_dma);
1183 if (!dsd_ptr->dsd_addr) {
1184 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1185 "%s: failed alloc ->dsd_ptr\n",
1186 __func__);
1187 /*
1188 * need to cleanup only this
1189 * dsd_ptr rest will be done
1190 * by sp_free_dma()
1191 */
1192 kfree(dsd_ptr);
1193 ha->dif_bundle_kallocs--;
1194 return 1;
1195 }
1196 ha->dif_bundle_dma_allocs++;
1197 ldma_needed = 0;
1198 difctx->no_dif_bundl++;
1199 list_add_tail(&dsd_ptr->list,
1200 &difctx->ldif_dma_hndl_list);
1201 }
1202
1203 /* xfrlen is min of dma pool size and sglen */
1204 xfrlen = (sglen >
1205 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1206 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1207 sglen;
1208
1209 /* replace with local allocated dma buffer */
1210 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1211 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1212 difctx->dif_bundl_len);
1213 difctx->dif_bundl_len += xfrlen;
1214 sglen -= xfrlen;
1215 ldma_sg_len += xfrlen;
1216 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1217 sg_is_last(sg)) {
1218 ldma_needed = 1;
1219 ldma_sg_len = 0;
1220 }
bad75002 1221 }
50b81275 1222 }
bad75002 1223
50b81275
GM
1224 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1225 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1226 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1227 difctx->dif_bundl_len, difctx->no_dif_bundl,
1228 track_difbundl_buf);
bad75002 1229
50b81275
GM
1230 if (sp)
1231 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1232 else
1233 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1234
1235 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1236 &difctx->ldif_dma_hndl_list, list) {
1237 u32 sglen = (difctx->dif_bundl_len >
1238 DIF_BUNDLING_DMA_POOL_SIZE) ?
1239 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1240
1241 BUG_ON(track_difbundl_buf == 0);
1242
1243 /* Allocate additional continuation packets? */
1244 if (avail_dsds == 0) {
1245 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1246 0xe024,
1247 "%s: adding continuation iocb's\n",
1248 __func__);
1249 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1250 QLA_DSDS_PER_IOCB : used_dsds;
1251 dsd_list_len = (avail_dsds + 1) * 12;
1252 used_dsds -= avail_dsds;
1253
1254 /* allocate tracking DS */
1255 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1256 if (!dsd_ptr) {
1257 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1258 "%s: failed alloc dsd_ptr\n",
1259 __func__);
1260 return 1;
1261 }
1262 ha->dif_bundle_kallocs++;
1263
1264 difctx->no_ldif_dsd++;
1265 /* allocate new list */
1266 dsd_ptr->dsd_addr =
1267 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1268 &dsd_ptr->dsd_list_dma);
1269 if (!dsd_ptr->dsd_addr) {
1270 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1271 "%s: failed alloc ->dsd_addr\n",
1272 __func__);
1273 /*
1274 * need to cleanup only this dsd_ptr
1275 * rest will be done by sp_free_dma()
1276 */
1277 kfree(dsd_ptr);
1278 ha->dif_bundle_kallocs--;
1279 return 1;
1280 }
1281 ha->dif_bundle_dma_allocs++;
1282
1283 if (sp) {
1284 list_add_tail(&dsd_ptr->list,
1285 &difctx->ldif_dsd_list);
1286 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1287 } else {
1288 list_add_tail(&dsd_ptr->list,
1289 &difctx->ldif_dsd_list);
1290 tc->ctx_dsd_alloced = 1;
1291 }
1292
1293 /* add new list to cmd iocb or last list */
15b7a68c
BVA
1294 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1295 &cur_dsd->address);
1296 cur_dsd->length = cpu_to_le32(dsd_list_len);
50b81275 1297 cur_dsd = dsd_ptr->dsd_addr;
f83adb61 1298 }
15b7a68c
BVA
1299 put_unaligned_le64(dif_dsd->dsd_list_dma,
1300 &cur_dsd->address);
1301 cur_dsd->length = cpu_to_le32(sglen);
1302 cur_dsd++;
50b81275
GM
1303 avail_dsds--;
1304 difctx->dif_bundl_len -= sglen;
1305 track_difbundl_buf--;
bad75002 1306 }
bad75002 1307
50b81275
GM
1308 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1309 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1310 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1311 } else {
1312 for_each_sg(sgl, sg, tot_dsds, i) {
50b81275
GM
1313 /* Allocate additional continuation packets? */
1314 if (avail_dsds == 0) {
1315 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1316 QLA_DSDS_PER_IOCB : used_dsds;
1317 dsd_list_len = (avail_dsds + 1) * 12;
1318 used_dsds -= avail_dsds;
1319
1320 /* allocate tracking DS */
1321 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1322 if (!dsd_ptr) {
1323 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1324 vha, 0xe027,
1325 "%s: failed alloc dsd_dma...\n",
1326 __func__);
1327 return 1;
1328 }
1329
1330 /* allocate new list */
1331 dsd_ptr->dsd_addr =
1332 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1333 &dsd_ptr->dsd_list_dma);
1334 if (!dsd_ptr->dsd_addr) {
1335 /* need to cleanup only this dsd_ptr */
1336 /* rest will be done by sp_free_dma() */
1337 kfree(dsd_ptr);
1338 return 1;
1339 }
1340
1341 if (sp) {
1342 list_add_tail(&dsd_ptr->list,
1343 &difctx->dsd_list);
1344 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1345 } else {
1346 list_add_tail(&dsd_ptr->list,
1347 &difctx->dsd_list);
1348 tc->ctx_dsd_alloced = 1;
1349 }
1350
1351 /* add new list to cmd iocb or last list */
15b7a68c
BVA
1352 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1353 &cur_dsd->address);
1354 cur_dsd->length = cpu_to_le32(dsd_list_len);
50b81275
GM
1355 cur_dsd = dsd_ptr->dsd_addr;
1356 }
15b7a68c 1357 append_dsd64(&cur_dsd, sg);
50b81275
GM
1358 avail_dsds--;
1359 }
bad75002
AE
1360 }
1361 /* Null termination */
15b7a68c
BVA
1362 cur_dsd->address = 0;
1363 cur_dsd->length = 0;
1364 cur_dsd++;
bad75002
AE
1365 return 0;
1366}
c1c7178c 1367
bad75002
AE
1368/**
1369 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1370 * Type 6 IOCB types.
1371 *
1372 * @sp: SRB command to process
1373 * @cmd_pkt: Command type 3 IOCB
1374 * @tot_dsds: Total number of segments to transfer
807eb907
BVA
1375 * @tot_prot_dsds: Total number of segments with protection information
1376 * @fw_prot_opts: Protection options to be passed to firmware
bad75002 1377 */
c20605ed 1378static inline int
bad75002
AE
1379qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1380 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1381{
15b7a68c
BVA
1382 struct dsd64 *cur_dsd;
1383 uint32_t *fcp_dl;
bad75002
AE
1384 scsi_qla_host_t *vha;
1385 struct scsi_cmnd *cmd;
8cb2049c 1386 uint32_t total_bytes = 0;
bad75002
AE
1387 uint32_t data_bytes;
1388 uint32_t dif_bytes;
1389 uint8_t bundling = 1;
1390 uint16_t blk_size;
bad75002
AE
1391 struct crc_context *crc_ctx_pkt = NULL;
1392 struct qla_hw_data *ha;
1393 uint8_t additional_fcpcdb_len;
1394 uint16_t fcp_cmnd_len;
1395 struct fcp_cmnd *fcp_cmnd;
1396 dma_addr_t crc_ctx_dma;
1397
9ba56b95 1398 cmd = GET_CMD_SP(sp);
bad75002 1399
bad75002 1400 /* Update entry type to indicate Command Type CRC_2 IOCB */
2c26348c 1401 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
bad75002 1402
25ff6af1 1403 vha = sp->vha;
7c3df132
SK
1404 ha = vha->hw;
1405
bad75002
AE
1406 /* No data transfer */
1407 data_bytes = scsi_bufflen(cmd);
1408 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
ad950360 1409 cmd_pkt->byte_count = cpu_to_le32(0);
bad75002
AE
1410 return QLA_SUCCESS;
1411 }
1412
25ff6af1 1413 cmd_pkt->vp_index = sp->vha->vp_idx;
bad75002
AE
1414
1415 /* Set transfer direction */
1416 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1417 cmd_pkt->control_flags =
ad950360 1418 cpu_to_le16(CF_WRITE_DATA);
bad75002
AE
1419 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1420 cmd_pkt->control_flags =
ad950360 1421 cpu_to_le16(CF_READ_DATA);
bad75002
AE
1422 }
1423
9ba56b95
GM
1424 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1425 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1426 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1427 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
bad75002
AE
1428 bundling = 0;
1429
1430 /* Allocate CRC context from global pool */
9ba56b95 1431 crc_ctx_pkt = sp->u.scmd.ctx =
501017f6 1432 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
bad75002
AE
1433
1434 if (!crc_ctx_pkt)
1435 goto crc_queuing_error;
1436
bad75002
AE
1437 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1438
1439 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1440
1441 /* Set handle */
1442 crc_ctx_pkt->handle = cmd_pkt->handle;
1443
1444 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1445
e02587d7 1446 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
bad75002
AE
1447 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1448
d4556a49 1449 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
bad75002
AE
1450 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1451
1452 /* Determine SCSI command length -- align to 4 byte boundary */
1453 if (cmd->cmd_len > 16) {
bad75002
AE
1454 additional_fcpcdb_len = cmd->cmd_len - 16;
1455 if ((cmd->cmd_len % 4) != 0) {
1456 /* SCSI cmd > 16 bytes must be multiple of 4 */
1457 goto crc_queuing_error;
1458 }
1459 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1460 } else {
1461 additional_fcpcdb_len = 0;
1462 fcp_cmnd_len = 12 + 16 + 4;
1463 }
1464
1465 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1466
1467 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1468 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1469 fcp_cmnd->additional_cdb_len |= 1;
1470 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1471 fcp_cmnd->additional_cdb_len |= 2;
1472
9ba56b95 1473 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
bad75002
AE
1474 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1475 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
d4556a49
BVA
1476 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1477 &cmd_pkt->fcp_cmnd_dseg_address);
65155b37 1478 fcp_cmnd->task_management = 0;
50668633 1479 fcp_cmnd->task_attribute = TSK_SIMPLE;
ff2fc42e 1480
bad75002
AE
1481 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1482
bad75002 1483 /* Compute dif len and adjust data len to incude protection */
bad75002
AE
1484 dif_bytes = 0;
1485 blk_size = cmd->device->sector_size;
8cb2049c
AE
1486 dif_bytes = (data_bytes / blk_size) * 8;
1487
9ba56b95 1488 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
8cb2049c
AE
1489 case SCSI_PROT_READ_INSERT:
1490 case SCSI_PROT_WRITE_STRIP:
2703eaaf
BVA
1491 total_bytes = data_bytes;
1492 data_bytes += dif_bytes;
1493 break;
8cb2049c
AE
1494
1495 case SCSI_PROT_READ_STRIP:
1496 case SCSI_PROT_WRITE_INSERT:
1497 case SCSI_PROT_READ_PASS:
1498 case SCSI_PROT_WRITE_PASS:
2703eaaf
BVA
1499 total_bytes = data_bytes + dif_bytes;
1500 break;
8cb2049c 1501 default:
2703eaaf 1502 BUG();
bad75002
AE
1503 }
1504
e02587d7 1505 if (!qla2x00_hba_err_chk_enabled(sp))
bad75002 1506 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
9e522cd8
AE
1507 /* HBA error checking enabled */
1508 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1509 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1510 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1511 SCSI_PROT_DIF_TYPE2))
1512 fw_prot_opts |= BIT_10;
1513 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1514 SCSI_PROT_DIF_TYPE3)
1515 fw_prot_opts |= BIT_11;
1516 }
bad75002
AE
1517
1518 if (!bundling) {
9e75b5e2 1519 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
bad75002
AE
1520 } else {
1521 /*
1522 * Configure Bundling if we need to fetch interlaving
1523 * protection PCI accesses
1524 */
1525 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1526 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1527 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1528 tot_prot_dsds);
9e75b5e2 1529 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
bad75002
AE
1530 }
1531
1532 /* Finish the common fields of CRC pkt */
1533 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1534 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1535 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
ad950360 1536 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
bad75002
AE
1537 /* Fibre channel byte count */
1538 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1539 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1540 additional_fcpcdb_len);
1541 *fcp_dl = htonl(total_bytes);
1542
0c470874 1543 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
ad950360 1544 cmd_pkt->byte_count = cpu_to_le32(0);
0c470874
AE
1545 return QLA_SUCCESS;
1546 }
bad75002
AE
1547 /* Walks data segments */
1548
ad950360 1549 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
8cb2049c
AE
1550
1551 if (!bundling && tot_prot_dsds) {
1552 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
f83adb61 1553 cur_dsd, tot_dsds, NULL))
8cb2049c
AE
1554 goto crc_queuing_error;
1555 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
f83adb61 1556 (tot_dsds - tot_prot_dsds), NULL))
bad75002
AE
1557 goto crc_queuing_error;
1558
1559 if (bundling && tot_prot_dsds) {
1560 /* Walks dif segments */
ad950360 1561 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
15b7a68c 1562 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
bad75002 1563 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
f83adb61 1564 tot_prot_dsds, NULL))
bad75002
AE
1565 goto crc_queuing_error;
1566 }
1567 return QLA_SUCCESS;
1568
1569crc_queuing_error:
bad75002
AE
1570 /* Cleanup will be performed by the caller */
1571
1572 return QLA_FUNCTION_FAILED;
1573}
2b6c0cee
AV
1574
1575/**
1576 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1577 * @sp: command to send to the ISP
1578 *
cc3ef7bc 1579 * Returns non-zero if a failure occurred, else zero.
2b6c0cee
AV
1580 */
1581int
1582qla24xx_start_scsi(srb_t *sp)
1583{
52c82823 1584 int nseg;
2b6c0cee 1585 unsigned long flags;
2b6c0cee
AV
1586 uint32_t *clr_ptr;
1587 uint32_t index;
1588 uint32_t handle;
1589 struct cmd_type_7 *cmd_pkt;
2b6c0cee
AV
1590 uint16_t cnt;
1591 uint16_t req_cnt;
1592 uint16_t tot_dsds;
73208dfd 1593 struct req_que *req = NULL;
9ba56b95 1594 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 1595 struct scsi_qla_host *vha = sp->vha;
73208dfd 1596 struct qla_hw_data *ha = vha->hw;
2b6c0cee
AV
1597
1598 /* Setup device pointers. */
59e0b8b0 1599 req = vha->req;
73208dfd 1600
2b6c0cee
AV
1601 /* So we know we haven't pci_map'ed anything yet */
1602 tot_dsds = 0;
1603
1604 /* Send marker if required */
e315cd28 1605 if (vha->marker_needed != 0) {
9eb9c6dc 1606 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
7c3df132 1607 QLA_SUCCESS)
2b6c0cee 1608 return QLA_FUNCTION_FAILED;
e315cd28 1609 vha->marker_needed = 0;
2b6c0cee
AV
1610 }
1611
1612 /* Acquire ring specific lock */
e315cd28 1613 spin_lock_irqsave(&ha->hardware_lock, flags);
2b6c0cee
AV
1614
1615 /* Check for room in outstanding command list. */
e315cd28 1616 handle = req->current_outstanding_cmd;
8d93f550 1617 for (index = 1; index < req->num_outstanding_cmds; index++) {
2b6c0cee 1618 handle++;
8d93f550 1619 if (handle == req->num_outstanding_cmds)
2b6c0cee 1620 handle = 1;
e315cd28 1621 if (!req->outstanding_cmds[handle])
2b6c0cee
AV
1622 break;
1623 }
8d93f550 1624 if (index == req->num_outstanding_cmds)
2b6c0cee
AV
1625 goto queuing_error;
1626
1627 /* Map the sg table so we have an accurate count of sg entries needed */
2c3dfe3f
SJ
1628 if (scsi_sg_count(cmd)) {
1629 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1630 scsi_sg_count(cmd), cmd->sc_data_direction);
1631 if (unlikely(!nseg))
2b6c0cee 1632 goto queuing_error;
2c3dfe3f
SJ
1633 } else
1634 nseg = 0;
1635
385d70b4 1636 tot_dsds = nseg;
7c3df132 1637 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
e315cd28 1638 if (req->cnt < (req_cnt + 2)) {
7c6300e3
JC
1639 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1640 RD_REG_DWORD_RELAXED(req->req_q_out);
e315cd28
AC
1641 if (req->ring_index < cnt)
1642 req->cnt = cnt - req->ring_index;
2b6c0cee 1643 else
e315cd28
AC
1644 req->cnt = req->length -
1645 (req->ring_index - cnt);
a6eb3c9f
CL
1646 if (req->cnt < (req_cnt + 2))
1647 goto queuing_error;
2b6c0cee 1648 }
2b6c0cee
AV
1649
1650 /* Build command packet. */
e315cd28
AC
1651 req->current_outstanding_cmd = handle;
1652 req->outstanding_cmds[handle] = sp;
cf53b069 1653 sp->handle = handle;
9ba56b95 1654 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
e315cd28 1655 req->cnt -= req_cnt;
2b6c0cee 1656
e315cd28 1657 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2afa19a9 1658 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2b6c0cee
AV
1659
1660 /* Zero out remaining portion of packet. */
72df8325 1661 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2b6c0cee
AV
1662 clr_ptr = (uint32_t *)cmd_pkt + 2;
1663 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1664 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1665
1666 /* Set NPORT-ID and LUN number*/
1667 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1668 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1669 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1670 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 1671 cmd_pkt->vp_index = sp->vha->vp_idx;
2b6c0cee 1672
9ba56b95 1673 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
0d4be124 1674 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2b6c0cee 1675
50668633 1676 cmd_pkt->task = TSK_SIMPLE;
ff2fc42e 1677
2b6c0cee
AV
1678 /* Load SCSI command packet. */
1679 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1680 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1681
385d70b4 1682 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2b6c0cee
AV
1683
1684 /* Build IOCB segments */
d7459527 1685 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2b6c0cee
AV
1686
1687 /* Set total data segment count. */
1688 cmd_pkt->entry_count = (uint8_t)req_cnt;
1689 wmb();
2b6c0cee 1690 /* Adjust ring index. */
e315cd28
AC
1691 req->ring_index++;
1692 if (req->ring_index == req->length) {
1693 req->ring_index = 0;
1694 req->ring_ptr = req->ring;
2b6c0cee 1695 } else
e315cd28 1696 req->ring_ptr++;
2b6c0cee
AV
1697
1698 sp->flags |= SRB_DMA_VALID;
2b6c0cee
AV
1699
1700 /* Set chip new ring index. */
08029990 1701 WRT_REG_DWORD(req->req_q_in, req->ring_index);
4fdfefe5 1702
e315cd28 1703 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2b6c0cee
AV
1704 return QLA_SUCCESS;
1705
1706queuing_error:
385d70b4
FT
1707 if (tot_dsds)
1708 scsi_dma_unmap(cmd);
1709
e315cd28 1710 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2b6c0cee
AV
1711
1712 return QLA_FUNCTION_FAILED;
1da177e4 1713}
68ca949c 1714
bad75002
AE
1715/**
1716 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1717 * @sp: command to send to the ISP
1718 *
1719 * Returns non-zero if a failure occurred, else zero.
1720 */
1721int
1722qla24xx_dif_start_scsi(srb_t *sp)
1723{
1724 int nseg;
1725 unsigned long flags;
1726 uint32_t *clr_ptr;
1727 uint32_t index;
1728 uint32_t handle;
1729 uint16_t cnt;
1730 uint16_t req_cnt = 0;
1731 uint16_t tot_dsds;
1732 uint16_t tot_prot_dsds;
1733 uint16_t fw_prot_opts = 0;
1734 struct req_que *req = NULL;
1735 struct rsp_que *rsp = NULL;
9ba56b95 1736 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 1737 struct scsi_qla_host *vha = sp->vha;
bad75002
AE
1738 struct qla_hw_data *ha = vha->hw;
1739 struct cmd_type_crc_2 *cmd_pkt;
1740 uint32_t status = 0;
1741
1742#define QDSS_GOT_Q_SPACE BIT_0
1743
0c470874
AE
1744 /* Only process protection or >16 cdb in this routine */
1745 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1746 if (cmd->cmd_len <= 16)
1747 return qla24xx_start_scsi(sp);
1748 }
bad75002
AE
1749
1750 /* Setup device pointers. */
bad75002 1751 req = vha->req;
d7459527 1752 rsp = req->rsp;
bad75002
AE
1753
1754 /* So we know we haven't pci_map'ed anything yet */
1755 tot_dsds = 0;
1756
1757 /* Send marker if required */
1758 if (vha->marker_needed != 0) {
9eb9c6dc 1759 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
bad75002
AE
1760 QLA_SUCCESS)
1761 return QLA_FUNCTION_FAILED;
1762 vha->marker_needed = 0;
1763 }
1764
1765 /* Acquire ring specific lock */
1766 spin_lock_irqsave(&ha->hardware_lock, flags);
1767
1768 /* Check for room in outstanding command list. */
1769 handle = req->current_outstanding_cmd;
8d93f550 1770 for (index = 1; index < req->num_outstanding_cmds; index++) {
bad75002 1771 handle++;
8d93f550 1772 if (handle == req->num_outstanding_cmds)
bad75002
AE
1773 handle = 1;
1774 if (!req->outstanding_cmds[handle])
1775 break;
1776 }
1777
8d93f550 1778 if (index == req->num_outstanding_cmds)
bad75002
AE
1779 goto queuing_error;
1780
1781 /* Compute number of required data segments */
1782 /* Map the sg table so we have an accurate count of sg entries needed */
1783 if (scsi_sg_count(cmd)) {
1784 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1785 scsi_sg_count(cmd), cmd->sc_data_direction);
1786 if (unlikely(!nseg))
1787 goto queuing_error;
1788 else
1789 sp->flags |= SRB_DMA_VALID;
8cb2049c
AE
1790
1791 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1792 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1793 struct qla2_sgx sgx;
1794 uint32_t partial;
1795
1796 memset(&sgx, 0, sizeof(struct qla2_sgx));
1797 sgx.tot_bytes = scsi_bufflen(cmd);
1798 sgx.cur_sg = scsi_sglist(cmd);
1799 sgx.sp = sp;
1800
1801 nseg = 0;
1802 while (qla24xx_get_one_block_sg(
1803 cmd->device->sector_size, &sgx, &partial))
1804 nseg++;
1805 }
bad75002
AE
1806 } else
1807 nseg = 0;
1808
1809 /* number of required data segments */
1810 tot_dsds = nseg;
1811
1812 /* Compute number of required protection segments */
1813 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1814 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1815 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1816 if (unlikely(!nseg))
1817 goto queuing_error;
1818 else
1819 sp->flags |= SRB_CRC_PROT_DMA_VALID;
8cb2049c
AE
1820
1821 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1822 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1823 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1824 }
bad75002
AE
1825 } else {
1826 nseg = 0;
1827 }
1828
1829 req_cnt = 1;
1830 /* Total Data and protection sg segment(s) */
1831 tot_prot_dsds = nseg;
1832 tot_dsds += nseg;
1833 if (req->cnt < (req_cnt + 2)) {
7c6300e3
JC
1834 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1835 RD_REG_DWORD_RELAXED(req->req_q_out);
bad75002
AE
1836 if (req->ring_index < cnt)
1837 req->cnt = cnt - req->ring_index;
1838 else
1839 req->cnt = req->length -
1840 (req->ring_index - cnt);
a6eb3c9f
CL
1841 if (req->cnt < (req_cnt + 2))
1842 goto queuing_error;
bad75002
AE
1843 }
1844
bad75002
AE
1845 status |= QDSS_GOT_Q_SPACE;
1846
1847 /* Build header part of command packet (excluding the OPCODE). */
1848 req->current_outstanding_cmd = handle;
1849 req->outstanding_cmds[handle] = sp;
8cb2049c 1850 sp->handle = handle;
9ba56b95 1851 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
bad75002
AE
1852 req->cnt -= req_cnt;
1853
1854 /* Fill-in common area */
1855 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1856 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1857
1858 clr_ptr = (uint32_t *)cmd_pkt + 2;
1859 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1860
1861 /* Set NPORT-ID and LUN number*/
1862 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1863 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1864 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1865 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1866
9ba56b95 1867 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
bad75002
AE
1868 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1869
1870 /* Total Data and protection segment(s) */
1871 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1872
1873 /* Build IOCB segments and adjust for data protection segments */
1874 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1875 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1876 QLA_SUCCESS)
1877 goto queuing_error;
1878
1879 cmd_pkt->entry_count = (uint8_t)req_cnt;
1880 /* Specify response queue number where completion should happen */
1881 cmd_pkt->entry_status = (uint8_t) rsp->id;
ad950360 1882 cmd_pkt->timeout = cpu_to_le16(0);
bad75002
AE
1883 wmb();
1884
1885 /* Adjust ring index. */
1886 req->ring_index++;
1887 if (req->ring_index == req->length) {
1888 req->ring_index = 0;
1889 req->ring_ptr = req->ring;
1890 } else
1891 req->ring_ptr++;
1892
1893 /* Set chip new ring index. */
1894 WRT_REG_DWORD(req->req_q_in, req->ring_index);
bad75002
AE
1895
1896 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1897
1898 return QLA_SUCCESS;
1899
1900queuing_error:
1901 if (status & QDSS_GOT_Q_SPACE) {
1902 req->outstanding_cmds[handle] = NULL;
1903 req->cnt += req_cnt;
1904 }
1905 /* Cleanup will be performed by the caller (queuecommand) */
1906
1907 spin_unlock_irqrestore(&ha->hardware_lock, flags);
bad75002
AE
1908 return QLA_FUNCTION_FAILED;
1909}
1910
d7459527
MH
1911/**
1912 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1913 * @sp: command to send to the ISP
1914 *
1915 * Returns non-zero if a failure occurred, else zero.
1916 */
1917static int
1918qla2xxx_start_scsi_mq(srb_t *sp)
68ca949c 1919{
d7459527
MH
1920 int nseg;
1921 unsigned long flags;
1922 uint32_t *clr_ptr;
1923 uint32_t index;
1924 uint32_t handle;
1925 struct cmd_type_7 *cmd_pkt;
1926 uint16_t cnt;
1927 uint16_t req_cnt;
1928 uint16_t tot_dsds;
1929 struct req_que *req = NULL;
9ba56b95 1930 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
d7459527
MH
1931 struct scsi_qla_host *vha = sp->fcport->vha;
1932 struct qla_hw_data *ha = vha->hw;
1933 struct qla_qpair *qpair = sp->qpair;
1934
578079fa
JT
1935 /* Acquire qpair specific lock */
1936 spin_lock_irqsave(&qpair->qp_lock, flags);
1937
d7459527 1938 /* Setup qpair pointers */
d7459527
MH
1939 req = qpair->req;
1940
1941 /* So we know we haven't pci_map'ed anything yet */
1942 tot_dsds = 0;
1943
1944 /* Send marker if required */
1945 if (vha->marker_needed != 0) {
9eb9c6dc 1946 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
578079fa
JT
1947 QLA_SUCCESS) {
1948 spin_unlock_irqrestore(&qpair->qp_lock, flags);
d7459527 1949 return QLA_FUNCTION_FAILED;
578079fa 1950 }
d7459527
MH
1951 vha->marker_needed = 0;
1952 }
1953
d7459527
MH
1954 /* Check for room in outstanding command list. */
1955 handle = req->current_outstanding_cmd;
1956 for (index = 1; index < req->num_outstanding_cmds; index++) {
1957 handle++;
1958 if (handle == req->num_outstanding_cmds)
1959 handle = 1;
1960 if (!req->outstanding_cmds[handle])
1961 break;
1962 }
1963 if (index == req->num_outstanding_cmds)
1964 goto queuing_error;
1965
1966 /* Map the sg table so we have an accurate count of sg entries needed */
1967 if (scsi_sg_count(cmd)) {
1968 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1969 scsi_sg_count(cmd), cmd->sc_data_direction);
1970 if (unlikely(!nseg))
1971 goto queuing_error;
1972 } else
1973 nseg = 0;
1974
1975 tot_dsds = nseg;
1976 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1977 if (req->cnt < (req_cnt + 2)) {
1978 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1979 RD_REG_DWORD_RELAXED(req->req_q_out);
1980 if (req->ring_index < cnt)
1981 req->cnt = cnt - req->ring_index;
1982 else
1983 req->cnt = req->length -
1984 (req->ring_index - cnt);
1985 if (req->cnt < (req_cnt + 2))
1986 goto queuing_error;
1987 }
1988
1989 /* Build command packet. */
1990 req->current_outstanding_cmd = handle;
1991 req->outstanding_cmds[handle] = sp;
1992 sp->handle = handle;
1993 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1994 req->cnt -= req_cnt;
1995
1996 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1997 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1998
1999 /* Zero out remaining portion of packet. */
2000 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2001 clr_ptr = (uint32_t *)cmd_pkt + 2;
2002 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2003 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2004
2005 /* Set NPORT-ID and LUN number*/
2006 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2007 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2008 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2009 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2010 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2011
2012 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2013 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2014
2015 cmd_pkt->task = TSK_SIMPLE;
2016
2017 /* Load SCSI command packet. */
2018 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2019 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2020
2021 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2022
2023 /* Build IOCB segments */
2024 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2025
2026 /* Set total data segment count. */
2027 cmd_pkt->entry_count = (uint8_t)req_cnt;
2028 wmb();
2029 /* Adjust ring index. */
2030 req->ring_index++;
2031 if (req->ring_index == req->length) {
2032 req->ring_index = 0;
2033 req->ring_ptr = req->ring;
2034 } else
2035 req->ring_ptr++;
2036
2037 sp->flags |= SRB_DMA_VALID;
2038
2039 /* Set chip new ring index. */
2040 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2041
d7459527
MH
2042 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2043 return QLA_SUCCESS;
2044
2045queuing_error:
2046 if (tot_dsds)
2047 scsi_dma_unmap(cmd);
2048
2049 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2050
2051 return QLA_FUNCTION_FAILED;
2052}
2053
2054
2055/**
2056 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2057 * @sp: command to send to the ISP
2058 *
2059 * Returns non-zero if a failure occurred, else zero.
2060 */
2061int
2062qla2xxx_dif_start_scsi_mq(srb_t *sp)
2063{
2064 int nseg;
2065 unsigned long flags;
2066 uint32_t *clr_ptr;
2067 uint32_t index;
2068 uint32_t handle;
2069 uint16_t cnt;
2070 uint16_t req_cnt = 0;
2071 uint16_t tot_dsds;
2072 uint16_t tot_prot_dsds;
2073 uint16_t fw_prot_opts = 0;
2074 struct req_que *req = NULL;
2075 struct rsp_que *rsp = NULL;
2076 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2077 struct scsi_qla_host *vha = sp->fcport->vha;
2078 struct qla_hw_data *ha = vha->hw;
2079 struct cmd_type_crc_2 *cmd_pkt;
2080 uint32_t status = 0;
2081 struct qla_qpair *qpair = sp->qpair;
2082
2083#define QDSS_GOT_Q_SPACE BIT_0
2084
2085 /* Check for host side state */
2086 if (!qpair->online) {
2087 cmd->result = DID_NO_CONNECT << 16;
2088 return QLA_INTERFACE_ERROR;
2089 }
2090
2091 if (!qpair->difdix_supported &&
2092 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2093 cmd->result = DID_NO_CONNECT << 16;
2094 return QLA_INTERFACE_ERROR;
2095 }
2096
2097 /* Only process protection or >16 cdb in this routine */
2098 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2099 if (cmd->cmd_len <= 16)
2100 return qla2xxx_start_scsi_mq(sp);
2101 }
2102
578079fa
JT
2103 spin_lock_irqsave(&qpair->qp_lock, flags);
2104
d7459527
MH
2105 /* Setup qpair pointers */
2106 rsp = qpair->rsp;
2107 req = qpair->req;
2108
2109 /* So we know we haven't pci_map'ed anything yet */
2110 tot_dsds = 0;
2111
2112 /* Send marker if required */
2113 if (vha->marker_needed != 0) {
9eb9c6dc 2114 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
578079fa
JT
2115 QLA_SUCCESS) {
2116 spin_unlock_irqrestore(&qpair->qp_lock, flags);
d7459527 2117 return QLA_FUNCTION_FAILED;
578079fa 2118 }
d7459527
MH
2119 vha->marker_needed = 0;
2120 }
2121
d7459527
MH
2122 /* Check for room in outstanding command list. */
2123 handle = req->current_outstanding_cmd;
2124 for (index = 1; index < req->num_outstanding_cmds; index++) {
2125 handle++;
2126 if (handle == req->num_outstanding_cmds)
2127 handle = 1;
2128 if (!req->outstanding_cmds[handle])
2129 break;
2130 }
2131
2132 if (index == req->num_outstanding_cmds)
2133 goto queuing_error;
2134
2135 /* Compute number of required data segments */
2136 /* Map the sg table so we have an accurate count of sg entries needed */
2137 if (scsi_sg_count(cmd)) {
2138 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2139 scsi_sg_count(cmd), cmd->sc_data_direction);
2140 if (unlikely(!nseg))
2141 goto queuing_error;
2142 else
2143 sp->flags |= SRB_DMA_VALID;
2144
2145 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2146 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2147 struct qla2_sgx sgx;
2148 uint32_t partial;
2149
2150 memset(&sgx, 0, sizeof(struct qla2_sgx));
2151 sgx.tot_bytes = scsi_bufflen(cmd);
2152 sgx.cur_sg = scsi_sglist(cmd);
2153 sgx.sp = sp;
2154
2155 nseg = 0;
2156 while (qla24xx_get_one_block_sg(
2157 cmd->device->sector_size, &sgx, &partial))
2158 nseg++;
2159 }
2160 } else
2161 nseg = 0;
2162
2163 /* number of required data segments */
2164 tot_dsds = nseg;
2165
2166 /* Compute number of required protection segments */
2167 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2168 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2169 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2170 if (unlikely(!nseg))
2171 goto queuing_error;
2172 else
2173 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2174
2175 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2176 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2177 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2178 }
2179 } else {
2180 nseg = 0;
2181 }
2182
2183 req_cnt = 1;
2184 /* Total Data and protection sg segment(s) */
2185 tot_prot_dsds = nseg;
2186 tot_dsds += nseg;
2187 if (req->cnt < (req_cnt + 2)) {
2188 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2189 RD_REG_DWORD_RELAXED(req->req_q_out);
2190 if (req->ring_index < cnt)
2191 req->cnt = cnt - req->ring_index;
2192 else
2193 req->cnt = req->length -
2194 (req->ring_index - cnt);
2195 if (req->cnt < (req_cnt + 2))
2196 goto queuing_error;
2197 }
2198
2199 status |= QDSS_GOT_Q_SPACE;
2200
2201 /* Build header part of command packet (excluding the OPCODE). */
2202 req->current_outstanding_cmd = handle;
2203 req->outstanding_cmds[handle] = sp;
2204 sp->handle = handle;
2205 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2206 req->cnt -= req_cnt;
2207
2208 /* Fill-in common area */
2209 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2210 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2211
2212 clr_ptr = (uint32_t *)cmd_pkt + 2;
2213 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2214
2215 /* Set NPORT-ID and LUN number*/
2216 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2217 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2218 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2219 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
68ca949c 2220
d7459527
MH
2221 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2222 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2223
2224 /* Total Data and protection segment(s) */
2225 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2226
2227 /* Build IOCB segments and adjust for data protection segments */
2228 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2229 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2230 QLA_SUCCESS)
2231 goto queuing_error;
2232
2233 cmd_pkt->entry_count = (uint8_t)req_cnt;
2234 cmd_pkt->timeout = cpu_to_le16(0);
2235 wmb();
2236
2237 /* Adjust ring index. */
2238 req->ring_index++;
2239 if (req->ring_index == req->length) {
2240 req->ring_index = 0;
2241 req->ring_ptr = req->ring;
2242 } else
2243 req->ring_ptr++;
2244
2245 /* Set chip new ring index. */
2246 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2247
2248 /* Manage unprocessed RIO/ZIO commands in response queue. */
2249 if (vha->flags.process_response_queue &&
2250 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2251 qla24xx_process_response_queue(vha, rsp);
2252
2253 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2254
2255 return QLA_SUCCESS;
2256
2257queuing_error:
2258 if (status & QDSS_GOT_Q_SPACE) {
2259 req->outstanding_cmds[handle] = NULL;
2260 req->cnt += req_cnt;
2261 }
2262 /* Cleanup will be performed by the caller (queuecommand) */
2263
2264 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2265 return QLA_FUNCTION_FAILED;
68ca949c 2266}
ac280b67
AV
2267
2268/* Generic Control-SRB manipulation functions. */
b6a029e1
AE
2269
2270/* hardware_lock assumed to be held. */
b6a029e1 2271
d94d10e7 2272void *
82de802a 2273__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
ac280b67 2274{
82de802a 2275 scsi_qla_host_t *vha = qpair->vha;
ac280b67 2276 struct qla_hw_data *ha = vha->hw;
82de802a 2277 struct req_que *req = qpair->req;
118e2ef9 2278 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
ac280b67
AV
2279 uint32_t index, handle;
2280 request_t *pkt;
2281 uint16_t cnt, req_cnt;
2282
2283 pkt = NULL;
2284 req_cnt = 1;
d94d10e7
GM
2285 handle = 0;
2286
5e53be8e
QT
2287 if (sp && (sp->type != SRB_SCSI_CMD)) {
2288 /* Adjust entry-counts as needed. */
9ba56b95 2289 req_cnt = sp->iocbs;
5e53be8e 2290 }
5780790e 2291
ac280b67 2292 /* Check for room on request queue. */
94007037 2293 if (req->cnt < req_cnt + 2) {
1586e07a
QT
2294 if (qpair->use_shadow_reg)
2295 cnt = *req->out_ptr;
ecc89f25
JC
2296 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2297 IS_QLA28XX(ha))
ac280b67 2298 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
7ec0effd 2299 else if (IS_P3P_TYPE(ha))
d94d10e7 2300 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
ac280b67
AV
2301 else if (IS_FWI2_CAPABLE(ha))
2302 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
8ae6d9c7
GM
2303 else if (IS_QLAFX00(ha))
2304 cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
ac280b67
AV
2305 else
2306 cnt = qla2x00_debounce_register(
2307 ISP_REQ_Q_OUT(ha, &reg->isp));
2308
2309 if (req->ring_index < cnt)
2310 req->cnt = cnt - req->ring_index;
2311 else
2312 req->cnt = req->length -
2313 (req->ring_index - cnt);
2314 }
94007037 2315 if (req->cnt < req_cnt + 2)
ac280b67
AV
2316 goto queuing_error;
2317
5e53be8e
QT
2318 if (sp) {
2319 /* Check for room in outstanding command list. */
2320 handle = req->current_outstanding_cmd;
2321 for (index = 1; index < req->num_outstanding_cmds; index++) {
2322 handle++;
2323 if (handle == req->num_outstanding_cmds)
2324 handle = 1;
2325 if (!req->outstanding_cmds[handle])
2326 break;
2327 }
2328 if (index == req->num_outstanding_cmds) {
2329 ql_log(ql_log_warn, vha, 0x700b,
2330 "No room on outstanding cmd array.\n");
2331 goto queuing_error;
2332 }
2333
2334 /* Prep command array. */
2335 req->current_outstanding_cmd = handle;
2336 req->outstanding_cmds[handle] = sp;
2337 sp->handle = handle;
2338 }
2339
ac280b67 2340 /* Prep packet */
ac280b67 2341 req->cnt -= req_cnt;
ac280b67
AV
2342 pkt = req->ring_ptr;
2343 memset(pkt, 0, REQUEST_ENTRY_SIZE);
8ae6d9c7 2344 if (IS_QLAFX00(ha)) {
1f8deefe
SK
2345 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2346 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
8ae6d9c7
GM
2347 } else {
2348 pkt->entry_count = req_cnt;
2349 pkt->handle = handle;
2350 }
ac280b67 2351
5e53be8e
QT
2352 return pkt;
2353
ac280b67 2354queuing_error:
60a9eadb 2355 qpair->tgt_counters.num_alloc_iocb_failed++;
ac280b67
AV
2356 return pkt;
2357}
2358
82de802a
QT
2359void *
2360qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2361{
2362 scsi_qla_host_t *vha = qpair->vha;
2363
2364 if (qla2x00_reset_active(vha))
2365 return NULL;
2366
2367 return __qla2x00_alloc_iocbs(qpair, sp);
2368}
2369
2370void *
2371qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2372{
2373 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2374}
2375
a5d42f4c
DG
2376static void
2377qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2378{
2379 struct srb_iocb *lio = &sp->u.iocb_cmd;
2380
2381 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2382 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
03aaa89f 2383 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
a5d42f4c 2384 logio->control_flags |= LCF_NVME_PRLI;
03aaa89f
DT
2385 if (sp->vha->flags.nvme_first_burst)
2386 logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST;
2387 }
a5d42f4c
DG
2388
2389 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2390 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2391 logio->port_id[1] = sp->fcport->d_id.b.area;
2392 logio->port_id[2] = sp->fcport->d_id.b.domain;
2393 logio->vp_index = sp->vha->vp_idx;
2394}
2395
ac280b67
AV
2396static void
2397qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2398{
9ba56b95 2399 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67
AV
2400
2401 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
48acad09
QT
2402 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2403 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2404 } else {
2405 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2406 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2407 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2408 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2409 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2410 }
ac280b67
AV
2411 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2412 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2413 logio->port_id[1] = sp->fcport->d_id.b.area;
2414 logio->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2415 logio->vp_index = sp->vha->vp_idx;
ac280b67
AV
2416}
2417
2418static void
2419qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2420{
25ff6af1 2421 struct qla_hw_data *ha = sp->vha->hw;
9ba56b95 2422 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67
AV
2423 uint16_t opts;
2424
b963752f 2425 mbx->entry_type = MBX_IOCB_TYPE;
ac280b67
AV
2426 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2427 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
4916392b
MI
2428 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2429 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
ac280b67
AV
2430 if (HAS_EXTENDED_IDS(ha)) {
2431 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2432 mbx->mb10 = cpu_to_le16(opts);
2433 } else {
2434 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2435 }
2436 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2437 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2438 sp->fcport->d_id.b.al_pa);
25ff6af1 2439 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
ac280b67
AV
2440}
2441
2442static void
2443qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2444{
2445 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2446 logio->control_flags =
2447 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
0e324e94 2448 if (!sp->fcport->keep_nport_handle)
a6ca8878 2449 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
ac280b67
AV
2450 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2451 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2452 logio->port_id[1] = sp->fcport->d_id.b.area;
2453 logio->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2454 logio->vp_index = sp->vha->vp_idx;
ac280b67
AV
2455}
2456
2457static void
2458qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2459{
25ff6af1 2460 struct qla_hw_data *ha = sp->vha->hw;
ac280b67 2461
b963752f 2462 mbx->entry_type = MBX_IOCB_TYPE;
ac280b67
AV
2463 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2464 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2465 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
58e2753c 2466 cpu_to_le16(sp->fcport->loop_id) :
ac280b67
AV
2467 cpu_to_le16(sp->fcport->loop_id << 8);
2468 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2469 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2470 sp->fcport->d_id.b.al_pa);
25ff6af1 2471 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
ac280b67
AV
2472 /* Implicit: mbx->mbx10 = 0. */
2473}
2474
5ff1d584
AV
2475static void
2476qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2477{
2478 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2479 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2480 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
25ff6af1 2481 logio->vp_index = sp->vha->vp_idx;
5ff1d584
AV
2482}
2483
2484static void
2485qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2486{
25ff6af1 2487 struct qla_hw_data *ha = sp->vha->hw;
5ff1d584
AV
2488
2489 mbx->entry_type = MBX_IOCB_TYPE;
2490 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2491 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2492 if (HAS_EXTENDED_IDS(ha)) {
2493 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2494 mbx->mb10 = cpu_to_le16(BIT_0);
2495 } else {
2496 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2497 }
2498 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2499 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2500 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2501 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
25ff6af1 2502 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
5ff1d584
AV
2503}
2504
3822263e
MI
2505static void
2506qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2507{
2508 uint32_t flags;
9cb78c16 2509 uint64_t lun;
3822263e
MI
2510 struct fc_port *fcport = sp->fcport;
2511 scsi_qla_host_t *vha = fcport->vha;
2512 struct qla_hw_data *ha = vha->hw;
9ba56b95 2513 struct srb_iocb *iocb = &sp->u.iocb_cmd;
3822263e
MI
2514 struct req_que *req = vha->req;
2515
2516 flags = iocb->u.tmf.flags;
2517 lun = iocb->u.tmf.lun;
2518
2519 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2520 tsk->entry_count = 1;
2521 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2522 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2523 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2524 tsk->control_flags = cpu_to_le32(flags);
2525 tsk->port_id[0] = fcport->d_id.b.al_pa;
2526 tsk->port_id[1] = fcport->d_id.b.area;
2527 tsk->port_id[2] = fcport->d_id.b.domain;
c6d39e23 2528 tsk->vp_index = fcport->vha->vp_idx;
3822263e
MI
2529
2530 if (flags == TCF_LUN_RESET) {
2531 int_to_scsilun(lun, &tsk->lun);
2532 host_to_fcp_swap((uint8_t *)&tsk->lun,
2533 sizeof(tsk->lun));
2534 }
2535}
2536
12975426
BVA
2537void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
2538{
2539 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2540 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2541 sp->free = qla2x00_sp_free;
12975426
BVA
2542 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2543 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
3a4b6cc7 2544 sp->start_timer = 1;
12975426
BVA
2545}
2546
6c18a43e 2547static void qla2x00_els_dcmd_sp_free(srb_t *sp)
6eb54715 2548{
6eb54715
HM
2549 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2550
2551 kfree(sp->fcport);
2552
2553 if (elsio->u.els_logo.els_logo_pyld)
25ff6af1 2554 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
6eb54715
HM
2555 elsio->u.els_logo.els_logo_pyld,
2556 elsio->u.els_logo.els_logo_pyld_dma);
2557
2558 del_timer(&elsio->timer);
25ff6af1 2559 qla2x00_rel_sp(sp);
6eb54715
HM
2560}
2561
2562static void
2563qla2x00_els_dcmd_iocb_timeout(void *data)
2564{
25ff6af1 2565 srb_t *sp = data;
6eb54715 2566 fc_port_t *fcport = sp->fcport;
25ff6af1 2567 struct scsi_qla_host *vha = sp->vha;
25ff6af1 2568 struct srb_iocb *lio = &sp->u.iocb_cmd;
6eb54715
HM
2569
2570 ql_dbg(ql_dbg_io, vha, 0x3069,
2571 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2572 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2573 fcport->d_id.b.al_pa);
2574
6eb54715
HM
2575 complete(&lio->u.els_logo.comp);
2576}
2577
6c18a43e 2578static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
6eb54715 2579{
6eb54715
HM
2580 fc_port_t *fcport = sp->fcport;
2581 struct srb_iocb *lio = &sp->u.iocb_cmd;
25ff6af1 2582 struct scsi_qla_host *vha = sp->vha;
6eb54715
HM
2583
2584 ql_dbg(ql_dbg_io, vha, 0x3072,
2585 "%s hdl=%x, portid=%02x%02x%02x done\n",
2586 sp->name, sp->handle, fcport->d_id.b.domain,
2587 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2588
2589 complete(&lio->u.els_logo.comp);
2590}
2591
2592int
2593qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2594 port_id_t remote_did)
2595{
2596 srb_t *sp;
2597 fc_port_t *fcport = NULL;
2598 struct srb_iocb *elsio = NULL;
2599 struct qla_hw_data *ha = vha->hw;
2600 struct els_logo_payload logo_pyld;
2601 int rval = QLA_SUCCESS;
2602
2603 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2604 if (!fcport) {
2605 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2606 return -ENOMEM;
2607 }
2608
2609 /* Alloc SRB structure */
2610 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2611 if (!sp) {
2612 kfree(fcport);
2613 ql_log(ql_log_info, vha, 0x70e6,
2614 "SRB allocation failed\n");
2615 return -ENOMEM;
2616 }
2617
2618 elsio = &sp->u.iocb_cmd;
2619 fcport->loop_id = 0xFFFF;
2620 fcport->d_id.b.domain = remote_did.b.domain;
2621 fcport->d_id.b.area = remote_did.b.area;
2622 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2623
2624 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2625 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2626
2627 sp->type = SRB_ELS_DCMD;
2628 sp->name = "ELS_DCMD";
2629 sp->fcport = fcport;
6eb54715 2630 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
e74e7d95 2631 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
8777e431 2632 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
6eb54715
HM
2633 sp->done = qla2x00_els_dcmd_sp_done;
2634 sp->free = qla2x00_els_dcmd_sp_free;
2635
2636 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2637 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2638 GFP_KERNEL);
2639
2640 if (!elsio->u.els_logo.els_logo_pyld) {
25ff6af1 2641 sp->free(sp);
6eb54715
HM
2642 return QLA_FUNCTION_FAILED;
2643 }
2644
2645 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2646
2647 elsio->u.els_logo.els_cmd = els_opcode;
2648 logo_pyld.opcode = els_opcode;
2649 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2650 logo_pyld.s_id[1] = vha->d_id.b.area;
2651 logo_pyld.s_id[2] = vha->d_id.b.domain;
2652 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2653 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2654
2655 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2656 sizeof(struct els_logo_payload));
2657
2658 rval = qla2x00_start_sp(sp);
2659 if (rval != QLA_SUCCESS) {
25ff6af1 2660 sp->free(sp);
6eb54715
HM
2661 return QLA_FUNCTION_FAILED;
2662 }
2663
2664 ql_dbg(ql_dbg_io, vha, 0x3074,
2665 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2666 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2667 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2668
2669 wait_for_completion(&elsio->u.els_logo.comp);
2670
25ff6af1 2671 sp->free(sp);
6eb54715
HM
2672 return rval;
2673}
2674
2675static void
2676qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2677{
25ff6af1 2678 scsi_qla_host_t *vha = sp->vha;
6eb54715
HM
2679 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2680
2681 els_iocb->entry_type = ELS_IOCB_TYPE;
2682 els_iocb->entry_count = 1;
2683 els_iocb->sys_define = 0;
2684 els_iocb->entry_status = 0;
2685 els_iocb->handle = sp->handle;
2686 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2687 els_iocb->tx_dsd_count = 1;
2688 els_iocb->vp_index = vha->vp_idx;
2689 els_iocb->sof_type = EST_SOFI3;
2690 els_iocb->rx_dsd_count = 0;
2691 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2692
2693 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2694 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2695 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
edd05de1
DG
2696 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2697 els_iocb->s_id[1] = vha->d_id.b.area;
2698 els_iocb->s_id[2] = vha->d_id.b.domain;
6eb54715 2699
edd05de1 2700 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
0f8243e6 2701 els_iocb->control_flags = 0;
8777e431 2702 els_iocb->tx_byte_count = els_iocb->tx_len =
9933c050 2703 cpu_to_le32(sizeof(struct els_plogi_payload));
d4556a49
BVA
2704 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2705 &els_iocb->tx_address);
edd05de1 2706 els_iocb->rx_dsd_count = 1;
8777e431 2707 els_iocb->rx_byte_count = els_iocb->rx_len =
9933c050 2708 cpu_to_le32(sizeof(struct els_plogi_payload));
d4556a49
BVA
2709 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2710 &els_iocb->rx_address);
8777e431 2711
edd05de1
DG
2712 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2713 "PLOGI ELS IOCB:\n");
2714 ql_dump_buffer(ql_log_info, vha, 0x0109,
2715 (uint8_t *)els_iocb, 0x70);
2716 } else {
0f8243e6 2717 els_iocb->control_flags = 1 << 13;
9933c050
BVA
2718 els_iocb->tx_byte_count =
2719 cpu_to_le32(sizeof(struct els_logo_payload));
d4556a49
BVA
2720 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2721 &els_iocb->tx_address);
edd05de1 2722 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
6eb54715 2723
edd05de1 2724 els_iocb->rx_byte_count = 0;
d4556a49 2725 els_iocb->rx_address = 0;
edd05de1
DG
2726 els_iocb->rx_len = 0;
2727 }
6eb54715 2728
25ff6af1 2729 sp->vha->qla_stats.control_requests++;
6eb54715
HM
2730}
2731
edd05de1
DG
2732static void
2733qla2x00_els_dcmd2_iocb_timeout(void *data)
2734{
2735 srb_t *sp = data;
2736 fc_port_t *fcport = sp->fcport;
2737 struct scsi_qla_host *vha = sp->vha;
2738 struct qla_hw_data *ha = vha->hw;
edd05de1
DG
2739 unsigned long flags = 0;
2740 int res;
2741
2742 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2743 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2744 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2745
2746 /* Abort the exchange */
2747 spin_lock_irqsave(&ha->hardware_lock, flags);
2748 res = ha->isp_ops->abort_command(sp);
2749 ql_dbg(ql_dbg_io, vha, 0x3070,
2750 "mbx abort_command %s\n",
2751 (res == QLA_SUCCESS) ? "successful" : "failed");
2752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2753
8777e431 2754 sp->done(sp, QLA_FUNCTION_TIMEOUT);
edd05de1
DG
2755}
2756
6c18a43e 2757static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
edd05de1 2758{
edd05de1
DG
2759 fc_port_t *fcport = sp->fcport;
2760 struct srb_iocb *lio = &sp->u.iocb_cmd;
2761 struct scsi_qla_host *vha = sp->vha;
8777e431
QT
2762 struct event_arg ea;
2763 struct qla_work_evt *e;
2764
2765 ql_dbg(ql_dbg_disc, vha, 0x3072,
2766 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2767 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
edd05de1 2768
8777e431
QT
2769 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2770 del_timer(&sp->u.iocb_cmd.timer);
edd05de1 2771
8777e431
QT
2772 if (sp->flags & SRB_WAKEUP_ON_COMP)
2773 complete(&lio->u.els_plogi.comp);
2774 else {
2775 if (res) {
2776 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2777 } else {
2778 memset(&ea, 0, sizeof(ea));
2779 ea.fcport = fcport;
2780 ea.rc = res;
2781 ea.event = FCME_ELS_PLOGI_DONE;
2782 qla2x00_fcport_event_handler(vha, &ea);
2783 }
edd05de1 2784
8777e431
QT
2785 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2786 if (!e) {
2787 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2788
2789 if (elsio->u.els_plogi.els_plogi_pyld)
2790 dma_free_coherent(&sp->vha->hw->pdev->dev,
2791 elsio->u.els_plogi.tx_size,
2792 elsio->u.els_plogi.els_plogi_pyld,
2793 elsio->u.els_plogi.els_plogi_pyld_dma);
2794
2795 if (elsio->u.els_plogi.els_resp_pyld)
2796 dma_free_coherent(&sp->vha->hw->pdev->dev,
2797 elsio->u.els_plogi.rx_size,
2798 elsio->u.els_plogi.els_resp_pyld,
2799 elsio->u.els_plogi.els_resp_pyld_dma);
2800 sp->free(sp);
e9f7be0c 2801 return;
8777e431
QT
2802 }
2803 e->u.iosb.sp = sp;
2804 qla2x00_post_work(vha, e);
2805 }
edd05de1
DG
2806}
2807
2808int
2809qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
8777e431 2810 fc_port_t *fcport, bool wait)
edd05de1
DG
2811{
2812 srb_t *sp;
2813 struct srb_iocb *elsio = NULL;
2814 struct qla_hw_data *ha = vha->hw;
2815 int rval = QLA_SUCCESS;
2816 void *ptr, *resp_ptr;
edd05de1
DG
2817
2818 /* Alloc SRB structure */
2819 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2820 if (!sp) {
2821 ql_log(ql_log_info, vha, 0x70e6,
2822 "SRB allocation failed\n");
2823 return -ENOMEM;
2824 }
2825
2826 elsio = &sp->u.iocb_cmd;
edd05de1
DG
2827 ql_dbg(ql_dbg_io, vha, 0x3073,
2828 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2829
15b6c3c9 2830 fcport->flags |= FCF_ASYNC_SENT;
edd05de1
DG
2831 sp->type = SRB_ELS_DCMD;
2832 sp->name = "ELS_DCMD";
2833 sp->fcport = fcport;
e74e7d95 2834
edd05de1 2835 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
e74e7d95 2836 init_completion(&elsio->u.els_plogi.comp);
8777e431
QT
2837 if (wait)
2838 sp->flags = SRB_WAKEUP_ON_COMP;
2839
2840 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
e74e7d95 2841
edd05de1 2842 sp->done = qla2x00_els_dcmd2_sp_done;
8777e431 2843 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
edd05de1
DG
2844
2845 ptr = elsio->u.els_plogi.els_plogi_pyld =
2846 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2847 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
edd05de1
DG
2848
2849 if (!elsio->u.els_plogi.els_plogi_pyld) {
2850 rval = QLA_FUNCTION_FAILED;
2851 goto out;
2852 }
2853
2854 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2855 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2856 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2857
2858 if (!elsio->u.els_plogi.els_resp_pyld) {
2859 rval = QLA_FUNCTION_FAILED;
2860 goto out;
2861 }
2862
2863 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2864
2865 memset(ptr, 0, sizeof(struct els_plogi_payload));
2866 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
8777e431
QT
2867 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2868 &ha->plogi_els_payld.data,
2869 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2870
edd05de1
DG
2871 elsio->u.els_plogi.els_cmd = els_opcode;
2872 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
edd05de1 2873
8777e431
QT
2874 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2875 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
edd05de1
DG
2876 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2877
edd05de1
DG
2878 rval = qla2x00_start_sp(sp);
2879 if (rval != QLA_SUCCESS) {
2880 rval = QLA_FUNCTION_FAILED;
8777e431
QT
2881 } else {
2882 ql_dbg(ql_dbg_disc, vha, 0x3074,
2883 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2884 sp->name, sp->handle, fcport->loop_id,
2885 fcport->d_id.b24, vha->d_id.b24);
edd05de1
DG
2886 }
2887
8777e431
QT
2888 if (wait) {
2889 wait_for_completion(&elsio->u.els_plogi.comp);
edd05de1 2890
8777e431
QT
2891 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2892 rval = QLA_FUNCTION_FAILED;
2893 } else {
2894 goto done;
2895 }
edd05de1
DG
2896
2897out:
8777e431
QT
2898 fcport->flags &= ~(FCF_ASYNC_SENT);
2899 if (elsio->u.els_plogi.els_plogi_pyld)
2900 dma_free_coherent(&sp->vha->hw->pdev->dev,
2901 elsio->u.els_plogi.tx_size,
2902 elsio->u.els_plogi.els_plogi_pyld,
2903 elsio->u.els_plogi.els_plogi_pyld_dma);
2904
2905 if (elsio->u.els_plogi.els_resp_pyld)
2906 dma_free_coherent(&sp->vha->hw->pdev->dev,
2907 elsio->u.els_plogi.rx_size,
2908 elsio->u.els_plogi.els_resp_pyld,
2909 elsio->u.els_plogi.els_resp_pyld_dma);
2910
edd05de1 2911 sp->free(sp);
8777e431 2912done:
edd05de1
DG
2913 return rval;
2914}
2915
9a069e19
GM
2916static void
2917qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2918{
75cc8cfc 2919 struct bsg_job *bsg_job = sp->u.bsg_job;
01e0e15c 2920 struct fc_bsg_request *bsg_request = bsg_job->request;
9a069e19
GM
2921
2922 els_iocb->entry_type = ELS_IOCB_TYPE;
2923 els_iocb->entry_count = 1;
2924 els_iocb->sys_define = 0;
2925 els_iocb->entry_status = 0;
2926 els_iocb->handle = sp->handle;
2927 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
ad950360 2928 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
25ff6af1 2929 els_iocb->vp_index = sp->vha->vp_idx;
9a069e19 2930 els_iocb->sof_type = EST_SOFI3;
ad950360 2931 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
9a069e19 2932
4916392b 2933 els_iocb->opcode =
9ba56b95 2934 sp->type == SRB_ELS_CMD_RPT ?
01e0e15c
JT
2935 bsg_request->rqst_data.r_els.els_code :
2936 bsg_request->rqst_data.h_els.command_code;
9a069e19
GM
2937 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2938 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2939 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2940 els_iocb->control_flags = 0;
2941 els_iocb->rx_byte_count =
2942 cpu_to_le32(bsg_job->reply_payload.payload_len);
2943 els_iocb->tx_byte_count =
2944 cpu_to_le32(bsg_job->request_payload.payload_len);
2945
d4556a49
BVA
2946 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2947 &els_iocb->tx_address);
9a069e19
GM
2948 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2949 (bsg_job->request_payload.sg_list));
2950
d4556a49
BVA
2951 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2952 &els_iocb->rx_address);
9a069e19
GM
2953 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2954 (bsg_job->reply_payload.sg_list));
fabbb8df 2955
25ff6af1 2956 sp->vha->qla_stats.control_requests++;
9a069e19
GM
2957}
2958
9bc4f4fb
HZ
2959static void
2960qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2961{
2962 uint16_t avail_dsds;
15b7a68c 2963 struct dsd64 *cur_dsd;
9bc4f4fb
HZ
2964 struct scatterlist *sg;
2965 int index;
2966 uint16_t tot_dsds;
25ff6af1 2967 scsi_qla_host_t *vha = sp->vha;
9bc4f4fb 2968 struct qla_hw_data *ha = vha->hw;
75cc8cfc 2969 struct bsg_job *bsg_job = sp->u.bsg_job;
9bc4f4fb
HZ
2970 int entry_count = 1;
2971
2972 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2973 ct_iocb->entry_type = CT_IOCB_TYPE;
2974 ct_iocb->entry_status = 0;
2975 ct_iocb->handle1 = sp->handle;
2976 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
ad950360
BVA
2977 ct_iocb->status = cpu_to_le16(0);
2978 ct_iocb->control_flags = cpu_to_le16(0);
9bc4f4fb
HZ
2979 ct_iocb->timeout = 0;
2980 ct_iocb->cmd_dsd_count =
ad950360 2981 cpu_to_le16(bsg_job->request_payload.sg_cnt);
9bc4f4fb 2982 ct_iocb->total_dsd_count =
ad950360 2983 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
9bc4f4fb
HZ
2984 ct_iocb->req_bytecount =
2985 cpu_to_le32(bsg_job->request_payload.payload_len);
2986 ct_iocb->rsp_bytecount =
2987 cpu_to_le32(bsg_job->reply_payload.payload_len);
2988
d4556a49
BVA
2989 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
2990 &ct_iocb->req_dsd.address);
15b7a68c 2991 ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
9bc4f4fb 2992
d4556a49
BVA
2993 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
2994 &ct_iocb->rsp_dsd.address);
15b7a68c 2995 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
9bc4f4fb
HZ
2996
2997 avail_dsds = 1;
15b7a68c 2998 cur_dsd = &ct_iocb->rsp_dsd;
9bc4f4fb
HZ
2999 index = 0;
3000 tot_dsds = bsg_job->reply_payload.sg_cnt;
3001
3002 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
9bc4f4fb
HZ
3003 cont_a64_entry_t *cont_pkt;
3004
3005 /* Allocate additional continuation packets? */
3006 if (avail_dsds == 0) {
3007 /*
3008 * Five DSDs are available in the Cont.
3009 * Type 1 IOCB.
3010 */
0d2aa38e
GM
3011 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3012 vha->hw->req_q_map[0]);
15b7a68c 3013 cur_dsd = cont_pkt->dsd;
9bc4f4fb 3014 avail_dsds = 5;
9bc4f4fb
HZ
3015 entry_count++;
3016 }
3017
15b7a68c 3018 append_dsd64(&cur_dsd, sg);
9bc4f4fb
HZ
3019 avail_dsds--;
3020 }
3021 ct_iocb->entry_count = entry_count;
fabbb8df 3022
25ff6af1 3023 sp->vha->qla_stats.control_requests++;
9bc4f4fb
HZ
3024}
3025
9a069e19
GM
3026static void
3027qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3028{
3029 uint16_t avail_dsds;
15b7a68c 3030 struct dsd64 *cur_dsd;
9a069e19
GM
3031 struct scatterlist *sg;
3032 int index;
ce0779c7 3033 uint16_t cmd_dsds, rsp_dsds;
25ff6af1 3034 scsi_qla_host_t *vha = sp->vha;
0d2aa38e 3035 struct qla_hw_data *ha = vha->hw;
75cc8cfc 3036 struct bsg_job *bsg_job = sp->u.bsg_job;
9a069e19 3037 int entry_count = 1;
ce0779c7 3038 cont_a64_entry_t *cont_pkt = NULL;
9a069e19
GM
3039
3040 ct_iocb->entry_type = CT_IOCB_TYPE;
3041 ct_iocb->entry_status = 0;
3042 ct_iocb->sys_define = 0;
3043 ct_iocb->handle = sp->handle;
3044
3045 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
25ff6af1 3046 ct_iocb->vp_index = sp->vha->vp_idx;
ad950360 3047 ct_iocb->comp_status = cpu_to_le16(0);
9a069e19 3048
ce0779c7
GM
3049 cmd_dsds = bsg_job->request_payload.sg_cnt;
3050 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3051
3052 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
9a069e19 3053 ct_iocb->timeout = 0;
ce0779c7 3054 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
9a069e19
GM
3055 ct_iocb->cmd_byte_count =
3056 cpu_to_le32(bsg_job->request_payload.payload_len);
9a069e19 3057
ce0779c7 3058 avail_dsds = 2;
15b7a68c 3059 cur_dsd = ct_iocb->dsd;
9a069e19 3060 index = 0;
9a069e19 3061
ce0779c7 3062 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
ce0779c7
GM
3063 /* Allocate additional continuation packets? */
3064 if (avail_dsds == 0) {
3065 /*
3066 * Five DSDs are available in the Cont.
3067 * Type 1 IOCB.
3068 */
3069 cont_pkt = qla2x00_prep_cont_type1_iocb(
3070 vha, ha->req_q_map[0]);
15b7a68c 3071 cur_dsd = cont_pkt->dsd;
ce0779c7
GM
3072 avail_dsds = 5;
3073 entry_count++;
3074 }
3075
15b7a68c 3076 append_dsd64(&cur_dsd, sg);
ce0779c7
GM
3077 avail_dsds--;
3078 }
3079
3080 index = 0;
3081
3082 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
9a069e19
GM
3083 /* Allocate additional continuation packets? */
3084 if (avail_dsds == 0) {
3085 /*
3086 * Five DSDs are available in the Cont.
3087 * Type 1 IOCB.
3088 */
0d2aa38e
GM
3089 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3090 ha->req_q_map[0]);
15b7a68c 3091 cur_dsd = cont_pkt->dsd;
9a069e19 3092 avail_dsds = 5;
9a069e19
GM
3093 entry_count++;
3094 }
3095
15b7a68c 3096 append_dsd64(&cur_dsd, sg);
9a069e19
GM
3097 avail_dsds--;
3098 }
3099 ct_iocb->entry_count = entry_count;
3100}
3101
5162cf0c
GM
3102/*
3103 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3104 * @sp: command to send to the ISP
3105 *
3106 * Returns non-zero if a failure occurred, else zero.
3107 */
3108int
3109qla82xx_start_scsi(srb_t *sp)
3110{
52c82823 3111 int nseg;
5162cf0c
GM
3112 unsigned long flags;
3113 struct scsi_cmnd *cmd;
3114 uint32_t *clr_ptr;
3115 uint32_t index;
3116 uint32_t handle;
3117 uint16_t cnt;
3118 uint16_t req_cnt;
3119 uint16_t tot_dsds;
3120 struct device_reg_82xx __iomem *reg;
3121 uint32_t dbval;
3122 uint32_t *fcp_dl;
3123 uint8_t additional_cdb_len;
3124 struct ct6_dsd *ctx;
25ff6af1 3125 struct scsi_qla_host *vha = sp->vha;
5162cf0c
GM
3126 struct qla_hw_data *ha = vha->hw;
3127 struct req_que *req = NULL;
3128 struct rsp_que *rsp = NULL;
5162cf0c
GM
3129
3130 /* Setup device pointers. */
5162cf0c 3131 reg = &ha->iobase->isp82;
9ba56b95 3132 cmd = GET_CMD_SP(sp);
5162cf0c
GM
3133 req = vha->req;
3134 rsp = ha->rsp_q_map[0];
3135
3136 /* So we know we haven't pci_map'ed anything yet */
3137 tot_dsds = 0;
3138
3139 dbval = 0x04 | (ha->portnum << 5);
3140
3141 /* Send marker if required */
3142 if (vha->marker_needed != 0) {
9eb9c6dc
QT
3143 if (qla2x00_marker(vha, ha->base_qpair,
3144 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
5162cf0c
GM
3145 ql_log(ql_log_warn, vha, 0x300c,
3146 "qla2x00_marker failed for cmd=%p.\n", cmd);
3147 return QLA_FUNCTION_FAILED;
3148 }
3149 vha->marker_needed = 0;
3150 }
3151
3152 /* Acquire ring specific lock */
3153 spin_lock_irqsave(&ha->hardware_lock, flags);
3154
3155 /* Check for room in outstanding command list. */
3156 handle = req->current_outstanding_cmd;
8d93f550 3157 for (index = 1; index < req->num_outstanding_cmds; index++) {
5162cf0c 3158 handle++;
8d93f550 3159 if (handle == req->num_outstanding_cmds)
5162cf0c
GM
3160 handle = 1;
3161 if (!req->outstanding_cmds[handle])
3162 break;
3163 }
8d93f550 3164 if (index == req->num_outstanding_cmds)
5162cf0c
GM
3165 goto queuing_error;
3166
3167 /* Map the sg table so we have an accurate count of sg entries needed */
3168 if (scsi_sg_count(cmd)) {
3169 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3170 scsi_sg_count(cmd), cmd->sc_data_direction);
3171 if (unlikely(!nseg))
3172 goto queuing_error;
3173 } else
3174 nseg = 0;
3175
3176 tot_dsds = nseg;
3177
3178 if (tot_dsds > ql2xshiftctondsd) {
3179 struct cmd_type_6 *cmd_pkt;
3180 uint16_t more_dsd_lists = 0;
3181 struct dsd_dma *dsd_ptr;
3182 uint16_t i;
3183
3184 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3185 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3186 ql_dbg(ql_dbg_io, vha, 0x300d,
3187 "Num of DSD list %d is than %d for cmd=%p.\n",
3188 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3189 cmd);
3190 goto queuing_error;
3191 }
3192
3193 if (more_dsd_lists <= ha->gbl_dsd_avail)
3194 goto sufficient_dsds;
3195 else
3196 more_dsd_lists -= ha->gbl_dsd_avail;
3197
3198 for (i = 0; i < more_dsd_lists; i++) {
3199 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3200 if (!dsd_ptr) {
3201 ql_log(ql_log_fatal, vha, 0x300e,
3202 "Failed to allocate memory for dsd_dma "
3203 "for cmd=%p.\n", cmd);
3204 goto queuing_error;
3205 }
3206
3207 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3208 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3209 if (!dsd_ptr->dsd_addr) {
3210 kfree(dsd_ptr);
3211 ql_log(ql_log_fatal, vha, 0x300f,
3212 "Failed to allocate memory for dsd_addr "
3213 "for cmd=%p.\n", cmd);
3214 goto queuing_error;
3215 }
3216 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3217 ha->gbl_dsd_avail++;
3218 }
3219
3220sufficient_dsds:
3221 req_cnt = 1;
3222
3223 if (req->cnt < (req_cnt + 2)) {
3224 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3225 &reg->req_q_out[0]);
3226 if (req->ring_index < cnt)
3227 req->cnt = cnt - req->ring_index;
3228 else
3229 req->cnt = req->length -
3230 (req->ring_index - cnt);
a6eb3c9f
CL
3231 if (req->cnt < (req_cnt + 2))
3232 goto queuing_error;
5162cf0c
GM
3233 }
3234
9ba56b95
GM
3235 ctx = sp->u.scmd.ctx =
3236 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3237 if (!ctx) {
5162cf0c
GM
3238 ql_log(ql_log_fatal, vha, 0x3010,
3239 "Failed to allocate ctx for cmd=%p.\n", cmd);
3240 goto queuing_error;
3241 }
9ba56b95 3242
5162cf0c 3243 memset(ctx, 0, sizeof(struct ct6_dsd));
501017f6 3244 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
5162cf0c
GM
3245 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3246 if (!ctx->fcp_cmnd) {
3247 ql_log(ql_log_fatal, vha, 0x3011,
3248 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
841f97bf 3249 goto queuing_error;
5162cf0c
GM
3250 }
3251
3252 /* Initialize the DSD list and dma handle */
3253 INIT_LIST_HEAD(&ctx->dsd_list);
3254 ctx->dsd_use_cnt = 0;
3255
3256 if (cmd->cmd_len > 16) {
3257 additional_cdb_len = cmd->cmd_len - 16;
3258 if ((cmd->cmd_len % 4) != 0) {
3259 /* SCSI command bigger than 16 bytes must be
3260 * multiple of 4
3261 */
3262 ql_log(ql_log_warn, vha, 0x3012,
3263 "scsi cmd len %d not multiple of 4 "
3264 "for cmd=%p.\n", cmd->cmd_len, cmd);
3265 goto queuing_error_fcp_cmnd;
3266 }
3267 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3268 } else {
3269 additional_cdb_len = 0;
3270 ctx->fcp_cmnd_len = 12 + 16 + 4;
3271 }
3272
3273 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3274 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3275
3276 /* Zero out remaining portion of packet. */
3277 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3278 clr_ptr = (uint32_t *)cmd_pkt + 2;
3279 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3280 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3281
3282 /* Set NPORT-ID and LUN number*/
3283 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3284 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3285 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3286 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 3287 cmd_pkt->vp_index = sp->vha->vp_idx;
5162cf0c
GM
3288
3289 /* Build IOCB segments */
3290 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3291 goto queuing_error_fcp_cmnd;
3292
9ba56b95 3293 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
5162cf0c
GM
3294 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3295
3296 /* build FCP_CMND IU */
9ba56b95 3297 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
5162cf0c
GM
3298 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3299
3300 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3301 ctx->fcp_cmnd->additional_cdb_len |= 1;
3302 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3303 ctx->fcp_cmnd->additional_cdb_len |= 2;
3304
a00f6296
SK
3305 /* Populate the FCP_PRIO. */
3306 if (ha->flags.fcp_prio_enabled)
3307 ctx->fcp_cmnd->task_attribute |=
3308 sp->fcport->fcp_prio << 3;
3309
5162cf0c
GM
3310 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3311
3312 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3313 additional_cdb_len);
3314 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3315
3316 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
d4556a49
BVA
3317 put_unaligned_le64(ctx->fcp_cmnd_dma,
3318 &cmd_pkt->fcp_cmnd_dseg_address);
5162cf0c
GM
3319
3320 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3321 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3322 /* Set total data segment count. */
3323 cmd_pkt->entry_count = (uint8_t)req_cnt;
3324 /* Specify response queue number where
3325 * completion should happen
3326 */
3327 cmd_pkt->entry_status = (uint8_t) rsp->id;
3328 } else {
3329 struct cmd_type_7 *cmd_pkt;
bd432bb5 3330
5162cf0c
GM
3331 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3332 if (req->cnt < (req_cnt + 2)) {
3333 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3334 &reg->req_q_out[0]);
3335 if (req->ring_index < cnt)
3336 req->cnt = cnt - req->ring_index;
3337 else
3338 req->cnt = req->length -
3339 (req->ring_index - cnt);
3340 }
3341 if (req->cnt < (req_cnt + 2))
3342 goto queuing_error;
3343
3344 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3345 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3346
3347 /* Zero out remaining portion of packet. */
3348 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3349 clr_ptr = (uint32_t *)cmd_pkt + 2;
3350 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3351 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3352
3353 /* Set NPORT-ID and LUN number*/
3354 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3355 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3356 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3357 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 3358 cmd_pkt->vp_index = sp->vha->vp_idx;
5162cf0c 3359
9ba56b95 3360 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
5162cf0c 3361 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
9ba56b95 3362 sizeof(cmd_pkt->lun));
5162cf0c 3363
a00f6296
SK
3364 /* Populate the FCP_PRIO. */
3365 if (ha->flags.fcp_prio_enabled)
3366 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3367
5162cf0c
GM
3368 /* Load SCSI command packet. */
3369 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3370 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3371
3372 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3373
3374 /* Build IOCB segments */
d7459527 3375 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
5162cf0c
GM
3376
3377 /* Set total data segment count. */
3378 cmd_pkt->entry_count = (uint8_t)req_cnt;
3379 /* Specify response queue number where
3380 * completion should happen.
3381 */
3382 cmd_pkt->entry_status = (uint8_t) rsp->id;
3383
3384 }
3385 /* Build command packet. */
3386 req->current_outstanding_cmd = handle;
3387 req->outstanding_cmds[handle] = sp;
3388 sp->handle = handle;
9ba56b95 3389 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
5162cf0c
GM
3390 req->cnt -= req_cnt;
3391 wmb();
3392
3393 /* Adjust ring index. */
3394 req->ring_index++;
3395 if (req->ring_index == req->length) {
3396 req->ring_index = 0;
3397 req->ring_ptr = req->ring;
3398 } else
3399 req->ring_ptr++;
3400
3401 sp->flags |= SRB_DMA_VALID;
3402
3403 /* Set chip new ring index. */
3404 /* write, read and verify logic */
3405 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3406 if (ql2xdbwr)
8dfa4b5a 3407 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
5162cf0c 3408 else {
8dfa4b5a 3409 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
5162cf0c 3410 wmb();
8dfa4b5a
BVA
3411 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3412 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
5162cf0c
GM
3413 wmb();
3414 }
3415 }
3416
3417 /* Manage unprocessed RIO/ZIO commands in response queue. */
3418 if (vha->flags.process_response_queue &&
3419 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3420 qla24xx_process_response_queue(vha, rsp);
3421
3422 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3423 return QLA_SUCCESS;
3424
3425queuing_error_fcp_cmnd:
3426 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3427queuing_error:
3428 if (tot_dsds)
3429 scsi_dma_unmap(cmd);
3430
9ba56b95
GM
3431 if (sp->u.scmd.ctx) {
3432 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3433 sp->u.scmd.ctx = NULL;
5162cf0c
GM
3434 }
3435 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3436
3437 return QLA_FUNCTION_FAILED;
3438}
3439
6d78e557 3440static void
4440e46d
AB
3441qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3442{
3443 struct srb_iocb *aio = &sp->u.iocb_cmd;
25ff6af1 3444 scsi_qla_host_t *vha = sp->vha;
49cecca7 3445 struct req_que *req = sp->qpair->req;
4440e46d
AB
3446
3447 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3448 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3449 abt_iocb->entry_count = 1;
f3767225 3450 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
49cecca7
QT
3451 if (sp->fcport) {
3452 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3453 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3454 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3455 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3456 }
4440e46d 3457 abt_iocb->handle_to_abort =
f3767225
HM
3458 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3459 aio->u.abt.cmd_hndl));
4440e46d 3460 abt_iocb->vp_index = vha->vp_idx;
b027a5ac 3461 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
4440e46d
AB
3462 /* Send the command to the firmware */
3463 wmb();
3464}
3465
726b8548
QT
3466static void
3467qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3468{
3469 int i, sz;
3470
3471 mbx->entry_type = MBX_IOCB_TYPE;
3472 mbx->handle = sp->handle;
3473 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3474
3475 for (i = 0; i < sz; i++)
3476 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3477}
3478
3479static void
3480qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3481{
3482 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3483 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3484 ct_pkt->handle = sp->handle;
3485}
3486
3487static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3488 struct nack_to_isp *nack)
3489{
3490 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3491
3492 nack->entry_type = NOTIFY_ACK_TYPE;
3493 nack->entry_count = 1;
3494 nack->ox_id = ntfy->ox_id;
3495
3496 nack->u.isp24.handle = sp->handle;
3497 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3498 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3499 nack->u.isp24.flags = ntfy->u.isp24.flags &
3500 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3501 }
3502 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3503 nack->u.isp24.status = ntfy->u.isp24.status;
3504 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3505 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3506 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3507 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3508 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3509 nack->u.isp24.srr_flags = 0;
3510 nack->u.isp24.srr_reject_code = 0;
3511 nack->u.isp24.srr_reject_code_expl = 0;
3512 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3513}
3514
e84067d7
DG
3515/*
3516 * Build NVME LS request
3517 */
3518static int
3519qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3520{
3521 struct srb_iocb *nvme;
3522 int rval = QLA_SUCCESS;
3523
3524 nvme = &sp->u.iocb_cmd;
3525 cmd_pkt->entry_type = PT_LS4_REQUEST;
3526 cmd_pkt->entry_count = 1;
3527 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3528
3529 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3530 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3531 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3532
3533 cmd_pkt->tx_dseg_count = 1;
3534 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
15b7a68c
BVA
3535 cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3536 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
e84067d7
DG
3537
3538 cmd_pkt->rx_dseg_count = 1;
3539 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
15b7a68c
BVA
3540 cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
3541 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
e84067d7
DG
3542
3543 return rval;
3544}
3545
2853192e
QT
3546static void
3547qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3548{
3549 int map, pos;
3550
3551 vce->entry_type = VP_CTRL_IOCB_TYPE;
3552 vce->handle = sp->handle;
3553 vce->entry_count = 1;
3554 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3555 vce->vp_count = cpu_to_le16(1);
3556
3557 /*
3558 * index map in firmware starts with 1; decrement index
3559 * this is ok as we never use index 0
3560 */
3561 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3562 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3563 vce->vp_idx_map[map] |= 1 << pos;
3564}
3565
11aea16a
QT
3566static void
3567qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3568{
3569 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3570 logio->control_flags =
3571 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3572
3573 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3574 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3575 logio->port_id[1] = sp->fcport->d_id.b.area;
3576 logio->port_id[2] = sp->fcport->d_id.b.domain;
3577 logio->vp_index = sp->fcport->vha->vp_idx;
3578}
3579
ac280b67
AV
3580int
3581qla2x00_start_sp(srb_t *sp)
3582{
80676d05 3583 int rval = QLA_SUCCESS;
25ff6af1 3584 scsi_qla_host_t *vha = sp->vha;
726b8548 3585 struct qla_hw_data *ha = vha->hw;
6a629468 3586 struct qla_qpair *qp = sp->qpair;
ac280b67 3587 void *pkt;
ac280b67
AV
3588 unsigned long flags;
3589
6a629468
QT
3590 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3591 pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
7c3df132 3592 if (!pkt) {
80676d05 3593 rval = EAGAIN;
726b8548 3594 ql_log(ql_log_warn, vha, 0x700c,
7c3df132 3595 "qla2x00_alloc_iocbs failed.\n");
ac280b67 3596 goto done;
7c3df132 3597 }
ac280b67 3598
9ba56b95 3599 switch (sp->type) {
ac280b67
AV
3600 case SRB_LOGIN_CMD:
3601 IS_FWI2_CAPABLE(ha) ?
5ff1d584 3602 qla24xx_login_iocb(sp, pkt) :
ac280b67
AV
3603 qla2x00_login_iocb(sp, pkt);
3604 break;
a5d42f4c
DG
3605 case SRB_PRLI_CMD:
3606 qla24xx_prli_iocb(sp, pkt);
3607 break;
ac280b67
AV
3608 case SRB_LOGOUT_CMD:
3609 IS_FWI2_CAPABLE(ha) ?
5ff1d584 3610 qla24xx_logout_iocb(sp, pkt) :
ac280b67
AV
3611 qla2x00_logout_iocb(sp, pkt);
3612 break;
9a069e19
GM
3613 case SRB_ELS_CMD_RPT:
3614 case SRB_ELS_CMD_HST:
3615 qla24xx_els_iocb(sp, pkt);
3616 break;
3617 case SRB_CT_CMD:
9bc4f4fb 3618 IS_FWI2_CAPABLE(ha) ?
5780790e
AV
3619 qla24xx_ct_iocb(sp, pkt) :
3620 qla2x00_ct_iocb(sp, pkt);
9a069e19 3621 break;
5ff1d584
AV
3622 case SRB_ADISC_CMD:
3623 IS_FWI2_CAPABLE(ha) ?
3624 qla24xx_adisc_iocb(sp, pkt) :
3625 qla2x00_adisc_iocb(sp, pkt);
3626 break;
3822263e 3627 case SRB_TM_CMD:
8ae6d9c7
GM
3628 IS_QLAFX00(ha) ?
3629 qlafx00_tm_iocb(sp, pkt) :
3630 qla24xx_tm_iocb(sp, pkt);
3631 break;
3632 case SRB_FXIOCB_DCMD:
3633 case SRB_FXIOCB_BCMD:
3634 qlafx00_fxdisc_iocb(sp, pkt);
3635 break;
e84067d7
DG
3636 case SRB_NVME_LS:
3637 qla_nvme_ls(sp, pkt);
3638 break;
8ae6d9c7 3639 case SRB_ABT_CMD:
4440e46d
AB
3640 IS_QLAFX00(ha) ?
3641 qlafx00_abort_iocb(sp, pkt) :
3642 qla24xx_abort_iocb(sp, pkt);
3822263e 3643 break;
6eb54715
HM
3644 case SRB_ELS_DCMD:
3645 qla24xx_els_logo_iocb(sp, pkt);
3646 break;
726b8548
QT
3647 case SRB_CT_PTHRU_CMD:
3648 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3649 break;
3650 case SRB_MB_IOCB:
3651 qla2x00_mb_iocb(sp, pkt);
3652 break;
3653 case SRB_NACK_PLOGI:
3654 case SRB_NACK_PRLI:
3655 case SRB_NACK_LOGO:
3656 qla2x00_send_notify_ack_iocb(sp, pkt);
3657 break;
2853192e
QT
3658 case SRB_CTRL_VP:
3659 qla25xx_ctrlvp_iocb(sp, pkt);
3660 break;
11aea16a
QT
3661 case SRB_PRLO_CMD:
3662 qla24xx_prlo_iocb(sp, pkt);
3663 break;
ac280b67
AV
3664 default:
3665 break;
3666 }
3667
3a4b6cc7
QT
3668 if (sp->start_timer)
3669 add_timer(&sp->u.iocb_cmd.timer);
3670
ac280b67 3671 wmb();
6a629468 3672 qla2x00_start_iocbs(vha, qp->req);
ac280b67 3673done:
6a629468 3674 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
ac280b67
AV
3675 return rval;
3676}
a9b6f722
SK
3677
3678static void
3679qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3680 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3681{
3682 uint16_t avail_dsds;
15b7a68c 3683 struct dsd64 *cur_dsd;
a9b6f722
SK
3684 uint32_t req_data_len = 0;
3685 uint32_t rsp_data_len = 0;
3686 struct scatterlist *sg;
3687 int index;
3688 int entry_count = 1;
75cc8cfc 3689 struct bsg_job *bsg_job = sp->u.bsg_job;
a9b6f722
SK
3690
3691 /*Update entry type to indicate bidir command */
2c26348c 3692 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
a9b6f722
SK
3693
3694 /* Set the transfer direction, in this set both flags
3695 * Also set the BD_WRAP_BACK flag, firmware will take care
3696 * assigning DID=SID for outgoing pkts.
3697 */
3698 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3699 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
ad950360 3700 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
a9b6f722
SK
3701 BD_WRAP_BACK);
3702
3703 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3704 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3705 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3706 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3707
3708 vha->bidi_stats.transfer_bytes += req_data_len;
3709 vha->bidi_stats.io_count++;
3710
fabbb8df
JC
3711 vha->qla_stats.output_bytes += req_data_len;
3712 vha->qla_stats.output_requests++;
3713
a9b6f722
SK
3714 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3715 * are bundled in continuation iocb
3716 */
3717 avail_dsds = 1;
15b7a68c 3718 cur_dsd = &cmd_pkt->fcp_dsd;
a9b6f722
SK
3719
3720 index = 0;
3721
3722 for_each_sg(bsg_job->request_payload.sg_list, sg,
3723 bsg_job->request_payload.sg_cnt, index) {
a9b6f722
SK
3724 cont_a64_entry_t *cont_pkt;
3725
3726 /* Allocate additional continuation packets */
3727 if (avail_dsds == 0) {
3728 /* Continuation type 1 IOCB can accomodate
3729 * 5 DSDS
3730 */
3731 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
15b7a68c 3732 cur_dsd = cont_pkt->dsd;
a9b6f722
SK
3733 avail_dsds = 5;
3734 entry_count++;
3735 }
15b7a68c 3736 append_dsd64(&cur_dsd, sg);
a9b6f722
SK
3737 avail_dsds--;
3738 }
3739 /* For read request DSD will always goes to continuation IOCB
3740 * and follow the write DSD. If there is room on the current IOCB
3741 * then it is added to that IOCB else new continuation IOCB is
3742 * allocated.
3743 */
3744 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3745 bsg_job->reply_payload.sg_cnt, index) {
a9b6f722
SK
3746 cont_a64_entry_t *cont_pkt;
3747
3748 /* Allocate additional continuation packets */
3749 if (avail_dsds == 0) {
3750 /* Continuation type 1 IOCB can accomodate
3751 * 5 DSDS
3752 */
3753 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
15b7a68c 3754 cur_dsd = cont_pkt->dsd;
a9b6f722
SK
3755 avail_dsds = 5;
3756 entry_count++;
3757 }
15b7a68c 3758 append_dsd64(&cur_dsd, sg);
a9b6f722
SK
3759 avail_dsds--;
3760 }
3761 /* This value should be same as number of IOCB required for this cmd */
3762 cmd_pkt->entry_count = entry_count;
3763}
3764
3765int
3766qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3767{
3768
3769 struct qla_hw_data *ha = vha->hw;
3770 unsigned long flags;
3771 uint32_t handle;
3772 uint32_t index;
3773 uint16_t req_cnt;
3774 uint16_t cnt;
3775 uint32_t *clr_ptr;
3776 struct cmd_bidir *cmd_pkt = NULL;
3777 struct rsp_que *rsp;
3778 struct req_que *req;
3779 int rval = EXT_STATUS_OK;
a9b6f722
SK
3780
3781 rval = QLA_SUCCESS;
3782
3783 rsp = ha->rsp_q_map[0];
3784 req = vha->req;
3785
3786 /* Send marker if required */
3787 if (vha->marker_needed != 0) {
9eb9c6dc
QT
3788 if (qla2x00_marker(vha, ha->base_qpair,
3789 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
a9b6f722
SK
3790 return EXT_STATUS_MAILBOX;
3791 vha->marker_needed = 0;
3792 }
3793
3794 /* Acquire ring specific lock */
3795 spin_lock_irqsave(&ha->hardware_lock, flags);
3796
3797 /* Check for room in outstanding command list. */
3798 handle = req->current_outstanding_cmd;
8d93f550 3799 for (index = 1; index < req->num_outstanding_cmds; index++) {
a9b6f722 3800 handle++;
8d2b21db
BVA
3801 if (handle == req->num_outstanding_cmds)
3802 handle = 1;
3803 if (!req->outstanding_cmds[handle])
3804 break;
a9b6f722
SK
3805 }
3806
8d93f550 3807 if (index == req->num_outstanding_cmds) {
a9b6f722
SK
3808 rval = EXT_STATUS_BUSY;
3809 goto queuing_error;
3810 }
3811
3812 /* Calculate number of IOCB required */
3813 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3814
3815 /* Check for room on request queue. */
3816 if (req->cnt < req_cnt + 2) {
7c6300e3
JC
3817 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3818 RD_REG_DWORD_RELAXED(req->req_q_out);
a9b6f722
SK
3819 if (req->ring_index < cnt)
3820 req->cnt = cnt - req->ring_index;
3821 else
3822 req->cnt = req->length -
3823 (req->ring_index - cnt);
3824 }
3825 if (req->cnt < req_cnt + 2) {
3826 rval = EXT_STATUS_BUSY;
3827 goto queuing_error;
3828 }
3829
3830 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3831 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3832
3833 /* Zero out remaining portion of packet. */
3834 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3835 clr_ptr = (uint32_t *)cmd_pkt + 2;
3836 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3837
3838 /* Set NPORT-ID (of vha)*/
3839 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3840 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3841 cmd_pkt->port_id[1] = vha->d_id.b.area;
3842 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3843
3844 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3845 cmd_pkt->entry_status = (uint8_t) rsp->id;
3846 /* Build command packet. */
3847 req->current_outstanding_cmd = handle;
3848 req->outstanding_cmds[handle] = sp;
3849 sp->handle = handle;
3850 req->cnt -= req_cnt;
3851
3852 /* Send the command to the firmware */
3853 wmb();
3854 qla2x00_start_iocbs(vha, req);
3855queuing_error:
3856 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3857 return rval;
3858}