scsi: sun_esp: fix device reference leaks
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_iocb.c
CommitLineData
fa90c54f
AV
1/*
2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
1da177e4 4 *
fa90c54f
AV
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
1da177e4 7#include "qla_def.h"
2d70c103 8#include "qla_target.h"
1da177e4
LT
9
10#include <linux/blkdev.h>
11#include <linux/delay.h>
12
13#include <scsi/scsi_tcq.h>
14
1da177e4
LT
15/**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @cmd: SCSI command
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21static inline uint16_t
49fd462a 22qla2x00_get_cmd_direction(srb_t *sp)
1da177e4
LT
23{
24 uint16_t cflags;
9ba56b95 25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 26 struct scsi_qla_host *vha = sp->vha;
1da177e4
LT
27
28 cflags = 0;
29
30 /* Set transfer direction */
9ba56b95 31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1da177e4 32 cflags = CF_WRITE;
2be21fa2 33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 34 vha->qla_stats.output_requests++;
9ba56b95 35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1da177e4 36 cflags = CF_READ;
2be21fa2 37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 38 vha->qla_stats.input_requests++;
49fd462a 39 }
1da177e4
LT
40 return (cflags);
41}
42
43/**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
46 *
47 * @dsds: number of data segment decriptors needed
48 *
49 * Returns the number of IOCB entries needed to store @dsds.
50 */
51uint16_t
52qla2x00_calc_iocbs_32(uint16_t dsds)
53{
54 uint16_t iocbs;
55
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
61 }
62 return (iocbs);
63}
64
65/**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
68 *
69 * @dsds: number of data segment decriptors needed
70 *
71 * Returns the number of IOCB entries needed to store @dsds.
72 */
73uint16_t
74qla2x00_calc_iocbs_64(uint16_t dsds)
75{
76 uint16_t iocbs;
77
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
83 }
84 return (iocbs);
85}
86
87/**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89 * @ha: HA context
90 *
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */
93static inline cont_entry_t *
67c2e93a 94qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
1da177e4
LT
95{
96 cont_entry_t *cont_pkt;
67c2e93a 97 struct req_que *req = vha->req;
1da177e4 98 /* Adjust ring index. */
e315cd28
AC
99 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
1da177e4 103 } else {
e315cd28 104 req->ring_ptr++;
1da177e4
LT
105 }
106
e315cd28 107 cont_pkt = (cont_entry_t *)req->ring_ptr;
1da177e4
LT
108
109 /* Load packet defaults. */
ad950360 110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
1da177e4
LT
111
112 return (cont_pkt);
113}
114
115/**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @ha: HA context
118 *
119 * Returns a pointer to the continuation type 1 IOCB packet.
120 */
121static inline cont_a64_entry_t *
0d2aa38e 122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
1da177e4
LT
123{
124 cont_a64_entry_t *cont_pkt;
125
126 /* Adjust ring index. */
e315cd28
AC
127 req->ring_index++;
128 if (req->ring_index == req->length) {
129 req->ring_index = 0;
130 req->ring_ptr = req->ring;
1da177e4 131 } else {
e315cd28 132 req->ring_ptr++;
1da177e4
LT
133 }
134
e315cd28 135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
1da177e4
LT
136
137 /* Load packet defaults. */
8ae6d9c7 138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
ad950360
BVA
139 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140 cpu_to_le32(CONTINUE_A64_TYPE);
1da177e4
LT
141
142 return (cont_pkt);
143}
144
d7459527 145inline int
bad75002
AE
146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147{
9ba56b95
GM
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
bad75002 150
bad75002
AE
151 /* We always use DIFF Bundling for best performance */
152 *fw_prot_opts = 0;
153
154 /* Translate SCSI opcode to a protection opcode */
9ba56b95 155 switch (scsi_get_prot_op(cmd)) {
bad75002
AE
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 break;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 break;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 break;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 break;
168 case SCSI_PROT_READ_PASS:
bad75002 169 case SCSI_PROT_WRITE_PASS:
9e522cd8
AE
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 else
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
bad75002
AE
174 break;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
178 }
179
9ba56b95 180 return scsi_prot_sg_count(cmd);
bad75002
AE
181}
182
183/*
1da177e4
LT
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
186 *
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
190 */
191void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 uint16_t tot_dsds)
193{
194 uint16_t avail_dsds;
195 uint32_t *cur_dsd;
e315cd28 196 scsi_qla_host_t *vha;
1da177e4 197 struct scsi_cmnd *cmd;
385d70b4
FT
198 struct scatterlist *sg;
199 int i;
1da177e4 200
9ba56b95 201 cmd = GET_CMD_SP(sp);
1da177e4
LT
202
203 /* Update entry type to indicate Command Type 2 IOCB */
204 *((uint32_t *)(&cmd_pkt->entry_type)) =
ad950360 205 cpu_to_le32(COMMAND_TYPE);
1da177e4
LT
206
207 /* No data transfer */
385d70b4 208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 209 cmd_pkt->byte_count = cpu_to_le32(0);
1da177e4
LT
210 return;
211 }
212
25ff6af1 213 vha = sp->vha;
49fd462a 214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
1da177e4
LT
215
216 /* Three DSDs are available in the Command Type 2 IOCB */
217 avail_dsds = 3;
218 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219
220 /* Load data segments */
385d70b4
FT
221 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 cont_entry_t *cont_pkt;
223
224 /* Allocate additional continuation packets? */
225 if (avail_dsds == 0) {
226 /*
227 * Seven DSDs are available in the Continuation
228 * Type 0 IOCB.
229 */
67c2e93a 230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
385d70b4
FT
231 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232 avail_dsds = 7;
1da177e4 233 }
385d70b4
FT
234
235 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237 avail_dsds--;
1da177e4
LT
238 }
239}
240
241/**
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
244 *
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
248 */
249void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 uint16_t tot_dsds)
251{
252 uint16_t avail_dsds;
253 uint32_t *cur_dsd;
e315cd28 254 scsi_qla_host_t *vha;
1da177e4 255 struct scsi_cmnd *cmd;
385d70b4
FT
256 struct scatterlist *sg;
257 int i;
1da177e4 258
9ba56b95 259 cmd = GET_CMD_SP(sp);
1da177e4
LT
260
261 /* Update entry type to indicate Command Type 3 IOCB */
ad950360 262 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
1da177e4
LT
263
264 /* No data transfer */
385d70b4 265 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 266 cmd_pkt->byte_count = cpu_to_le32(0);
1da177e4
LT
267 return;
268 }
269
25ff6af1 270 vha = sp->vha;
49fd462a 271 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
1da177e4
LT
272
273 /* Two DSDs are available in the Command Type 3 IOCB */
274 avail_dsds = 2;
275 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
276
277 /* Load data segments */
385d70b4
FT
278 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279 dma_addr_t sle_dma;
280 cont_a64_entry_t *cont_pkt;
281
282 /* Allocate additional continuation packets? */
283 if (avail_dsds == 0) {
284 /*
285 * Five DSDs are available in the Continuation
286 * Type 1 IOCB.
287 */
0d2aa38e 288 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
385d70b4
FT
289 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290 avail_dsds = 5;
1da177e4 291 }
385d70b4
FT
292
293 sle_dma = sg_dma_address(sg);
294 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
297 avail_dsds--;
1da177e4
LT
298 }
299}
300
301/**
302 * qla2x00_start_scsi() - Send a SCSI command to the ISP
303 * @sp: command to send to the ISP
304 *
cc3ef7bc 305 * Returns non-zero if a failure occurred, else zero.
1da177e4
LT
306 */
307int
308qla2x00_start_scsi(srb_t *sp)
309{
52c82823 310 int nseg;
1da177e4 311 unsigned long flags;
e315cd28 312 scsi_qla_host_t *vha;
1da177e4
LT
313 struct scsi_cmnd *cmd;
314 uint32_t *clr_ptr;
315 uint32_t index;
316 uint32_t handle;
317 cmd_entry_t *cmd_pkt;
1da177e4
LT
318 uint16_t cnt;
319 uint16_t req_cnt;
320 uint16_t tot_dsds;
3d71644c 321 struct device_reg_2xxx __iomem *reg;
e315cd28
AC
322 struct qla_hw_data *ha;
323 struct req_que *req;
73208dfd 324 struct rsp_que *rsp;
1da177e4
LT
325
326 /* Setup device pointers. */
25ff6af1 327 vha = sp->vha;
e315cd28 328 ha = vha->hw;
3d71644c 329 reg = &ha->iobase->isp;
9ba56b95 330 cmd = GET_CMD_SP(sp);
73208dfd
AC
331 req = ha->req_q_map[0];
332 rsp = ha->rsp_q_map[0];
83021920 333 /* So we know we haven't pci_map'ed anything yet */
334 tot_dsds = 0;
1da177e4
LT
335
336 /* Send marker if required */
e315cd28 337 if (vha->marker_needed != 0) {
7c3df132
SK
338 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
339 QLA_SUCCESS) {
1da177e4 340 return (QLA_FUNCTION_FAILED);
7c3df132 341 }
e315cd28 342 vha->marker_needed = 0;
1da177e4
LT
343 }
344
345 /* Acquire ring specific lock */
c9c5ced9 346 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4
LT
347
348 /* Check for room in outstanding command list. */
e315cd28 349 handle = req->current_outstanding_cmd;
8d93f550 350 for (index = 1; index < req->num_outstanding_cmds; index++) {
1da177e4 351 handle++;
8d93f550 352 if (handle == req->num_outstanding_cmds)
1da177e4 353 handle = 1;
e315cd28 354 if (!req->outstanding_cmds[handle])
1da177e4
LT
355 break;
356 }
8d93f550 357 if (index == req->num_outstanding_cmds)
1da177e4
LT
358 goto queuing_error;
359
83021920 360 /* Map the sg table so we have an accurate count of sg entries needed */
2c3dfe3f
SJ
361 if (scsi_sg_count(cmd)) {
362 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
363 scsi_sg_count(cmd), cmd->sc_data_direction);
364 if (unlikely(!nseg))
365 goto queuing_error;
366 } else
367 nseg = 0;
368
385d70b4 369 tot_dsds = nseg;
83021920 370
1da177e4 371 /* Calculate the number of request entries needed. */
fd34f556 372 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
e315cd28 373 if (req->cnt < (req_cnt + 2)) {
1da177e4 374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
e315cd28
AC
375 if (req->ring_index < cnt)
376 req->cnt = cnt - req->ring_index;
1da177e4 377 else
e315cd28
AC
378 req->cnt = req->length -
379 (req->ring_index - cnt);
a6eb3c9f
CL
380 /* If still no head room then bail out */
381 if (req->cnt < (req_cnt + 2))
382 goto queuing_error;
1da177e4 383 }
1da177e4 384
1da177e4 385 /* Build command packet */
e315cd28
AC
386 req->current_outstanding_cmd = handle;
387 req->outstanding_cmds[handle] = sp;
cf53b069 388 sp->handle = handle;
9ba56b95 389 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
e315cd28 390 req->cnt -= req_cnt;
1da177e4 391
e315cd28 392 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
1da177e4
LT
393 cmd_pkt->handle = handle;
394 /* Zero out remaining portion of packet. */
395 clr_ptr = (uint32_t *)cmd_pkt + 2;
396 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
397 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
398
bdf79621 399 /* Set target ID and LUN number*/
400 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
9ba56b95 401 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
ad950360 402 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
1da177e4 403
1da177e4
LT
404 /* Load SCSI command packet. */
405 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
385d70b4 406 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1da177e4
LT
407
408 /* Build IOCB segments */
fd34f556 409 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
1da177e4
LT
410
411 /* Set total data segment count. */
412 cmd_pkt->entry_count = (uint8_t)req_cnt;
413 wmb();
414
415 /* Adjust ring index. */
e315cd28
AC
416 req->ring_index++;
417 if (req->ring_index == req->length) {
418 req->ring_index = 0;
419 req->ring_ptr = req->ring;
1da177e4 420 } else
e315cd28 421 req->ring_ptr++;
1da177e4 422
1da177e4 423 sp->flags |= SRB_DMA_VALID;
1da177e4
LT
424
425 /* Set chip new ring index. */
e315cd28 426 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
1da177e4
LT
427 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
428
4fdfefe5 429 /* Manage unprocessed RIO/ZIO commands in response queue. */
e315cd28 430 if (vha->flags.process_response_queue &&
73208dfd
AC
431 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
432 qla2x00_process_response_queue(rsp);
4fdfefe5 433
c9c5ced9 434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4
LT
435 return (QLA_SUCCESS);
436
437queuing_error:
385d70b4
FT
438 if (tot_dsds)
439 scsi_dma_unmap(cmd);
440
c9c5ced9 441 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4
LT
442
443 return (QLA_FUNCTION_FAILED);
444}
445
5162cf0c
GM
446/**
447 * qla2x00_start_iocbs() - Execute the IOCB command
448 */
2d70c103 449void
5162cf0c
GM
450qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
451{
452 struct qla_hw_data *ha = vha->hw;
118e2ef9 453 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
5162cf0c 454
7ec0effd 455 if (IS_P3P_TYPE(ha)) {
5162cf0c
GM
456 qla82xx_start_iocbs(vha);
457 } else {
458 /* Adjust ring index. */
459 req->ring_index++;
460 if (req->ring_index == req->length) {
461 req->ring_index = 0;
462 req->ring_ptr = req->ring;
463 } else
464 req->ring_ptr++;
465
466 /* Set chip new ring index. */
d63b328f
QT
467 if (ha->mqenable || IS_QLA27XX(ha)) {
468 WRT_REG_DWORD(req->req_q_in, req->ring_index);
469 } else if (IS_QLA83XX(ha)) {
6246b8a1 470 WRT_REG_DWORD(req->req_q_in, req->ring_index);
98878a16 471 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
8ae6d9c7
GM
472 } else if (IS_QLAFX00(ha)) {
473 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
475 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
5162cf0c
GM
476 } else if (IS_FWI2_CAPABLE(ha)) {
477 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
478 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
479 } else {
480 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
481 req->ring_index);
482 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
483 }
484 }
485}
486
1da177e4
LT
487/**
488 * qla2x00_marker() - Send a marker IOCB to the firmware.
489 * @ha: HA context
490 * @loop_id: loop ID
491 * @lun: LUN
492 * @type: marker modifier
493 *
494 * Can be called from both normal and interrupt context.
495 *
cc3ef7bc 496 * Returns non-zero if a failure occurred, else zero.
1da177e4 497 */
3dbe756a 498static int
73208dfd
AC
499__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
500 struct rsp_que *rsp, uint16_t loop_id,
9cb78c16 501 uint64_t lun, uint8_t type)
1da177e4 502{
2b6c0cee 503 mrk_entry_t *mrk;
8ae6d9c7 504 struct mrk_entry_24xx *mrk24 = NULL;
8ae6d9c7 505
e315cd28
AC
506 struct qla_hw_data *ha = vha->hw;
507 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 508
99b8212c 509 req = ha->req_q_map[0];
fa492630 510 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
2b6c0cee 511 if (mrk == NULL) {
7c3df132
SK
512 ql_log(ql_log_warn, base_vha, 0x3026,
513 "Failed to allocate Marker IOCB.\n");
1da177e4
LT
514
515 return (QLA_FUNCTION_FAILED);
516 }
517
2b6c0cee
AV
518 mrk->entry_type = MARKER_TYPE;
519 mrk->modifier = type;
1da177e4 520 if (type != MK_SYNC_ALL) {
bfd7334e 521 if (IS_FWI2_CAPABLE(ha)) {
2b6c0cee
AV
522 mrk24 = (struct mrk_entry_24xx *) mrk;
523 mrk24->nport_handle = cpu_to_le16(loop_id);
9cb78c16 524 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
b797b6de 525 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
e315cd28 526 mrk24->vp_index = vha->vp_idx;
2afa19a9 527 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
2b6c0cee
AV
528 } else {
529 SET_TARGET_ID(ha, mrk->target, loop_id);
9cb78c16 530 mrk->lun = cpu_to_le16((uint16_t)lun);
2b6c0cee 531 }
1da177e4
LT
532 }
533 wmb();
534
5162cf0c 535 qla2x00_start_iocbs(vha, req);
1da177e4
LT
536
537 return (QLA_SUCCESS);
538}
539
fa2a1ce5 540int
73208dfd 541qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
9cb78c16 542 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
73208dfd 543 uint8_t type)
1da177e4
LT
544{
545 int ret;
546 unsigned long flags = 0;
547
73208dfd
AC
548 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
549 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
550 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
1da177e4
LT
551
552 return (ret);
553}
554
2d70c103
NB
555/*
556 * qla2x00_issue_marker
557 *
558 * Issue marker
559 * Caller CAN have hardware lock held as specified by ha_locked parameter.
560 * Might release it, then reaquire.
561 */
562int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
563{
564 if (ha_locked) {
565 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
566 MK_SYNC_ALL) != QLA_SUCCESS)
567 return QLA_FUNCTION_FAILED;
568 } else {
569 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
570 MK_SYNC_ALL) != QLA_SUCCESS)
571 return QLA_FUNCTION_FAILED;
572 }
573 vha->marker_needed = 0;
574
575 return QLA_SUCCESS;
576}
577
5162cf0c
GM
578static inline int
579qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
580 uint16_t tot_dsds)
581{
582 uint32_t *cur_dsd = NULL;
583 scsi_qla_host_t *vha;
584 struct qla_hw_data *ha;
585 struct scsi_cmnd *cmd;
586 struct scatterlist *cur_seg;
587 uint32_t *dsd_seg;
588 void *next_dsd;
589 uint8_t avail_dsds;
590 uint8_t first_iocb = 1;
591 uint32_t dsd_list_len;
592 struct dsd_dma *dsd_ptr;
593 struct ct6_dsd *ctx;
1da177e4 594
9ba56b95 595 cmd = GET_CMD_SP(sp);
a9083016 596
5162cf0c 597 /* Update entry type to indicate Command Type 3 IOCB */
ad950360 598 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
5162cf0c
GM
599
600 /* No data transfer */
601 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 602 cmd_pkt->byte_count = cpu_to_le32(0);
5162cf0c
GM
603 return 0;
604 }
605
25ff6af1 606 vha = sp->vha;
5162cf0c
GM
607 ha = vha->hw;
608
609 /* Set transfer direction */
610 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
ad950360 611 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
2be21fa2 612 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 613 vha->qla_stats.output_requests++;
5162cf0c 614 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
ad950360 615 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
2be21fa2 616 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 617 vha->qla_stats.input_requests++;
5162cf0c
GM
618 }
619
620 cur_seg = scsi_sglist(cmd);
9ba56b95 621 ctx = GET_CMD_CTX_SP(sp);
5162cf0c
GM
622
623 while (tot_dsds) {
624 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
625 QLA_DSDS_PER_IOCB : tot_dsds;
626 tot_dsds -= avail_dsds;
627 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
628
629 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
630 struct dsd_dma, list);
631 next_dsd = dsd_ptr->dsd_addr;
632 list_del(&dsd_ptr->list);
633 ha->gbl_dsd_avail--;
634 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
635 ctx->dsd_use_cnt++;
636 ha->gbl_dsd_inuse++;
637
638 if (first_iocb) {
639 first_iocb = 0;
640 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
641 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
642 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
643 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
73208dfd 644 } else {
5162cf0c
GM
645 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
646 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
647 *cur_dsd++ = cpu_to_le32(dsd_list_len);
648 }
649 cur_dsd = (uint32_t *)next_dsd;
650 while (avail_dsds) {
651 dma_addr_t sle_dma;
652
653 sle_dma = sg_dma_address(cur_seg);
654 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
655 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
656 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
657 cur_seg = sg_next(cur_seg);
658 avail_dsds--;
73208dfd 659 }
2b6c0cee
AV
660 }
661
5162cf0c
GM
662 /* Null termination */
663 *cur_dsd++ = 0;
664 *cur_dsd++ = 0;
665 *cur_dsd++ = 0;
666 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
667 return 0;
2b6c0cee
AV
668}
669
5162cf0c
GM
670/*
671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
672 * for Command Type 6.
2b6c0cee
AV
673 *
674 * @dsds: number of data segment decriptors needed
675 *
5162cf0c 676 * Returns the number of dsd list needed to store @dsds.
2b6c0cee 677 */
2374dd23 678static inline uint16_t
5162cf0c 679qla24xx_calc_dsd_lists(uint16_t dsds)
2b6c0cee 680{
5162cf0c 681 uint16_t dsd_lists = 0;
2b6c0cee 682
5162cf0c
GM
683 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
684 if (dsds % QLA_DSDS_PER_IOCB)
685 dsd_lists++;
686 return dsd_lists;
2b6c0cee
AV
687}
688
5162cf0c 689
2b6c0cee
AV
690/**
691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
692 * IOCB types.
693 *
694 * @sp: SRB command to process
695 * @cmd_pkt: Command type 3 IOCB
696 * @tot_dsds: Total number of segments to transfer
d7459527 697 * @req: pointer to request queue
2b6c0cee 698 */
d7459527 699inline void
2b6c0cee 700qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
d7459527 701 uint16_t tot_dsds, struct req_que *req)
2b6c0cee
AV
702{
703 uint16_t avail_dsds;
704 uint32_t *cur_dsd;
e315cd28 705 scsi_qla_host_t *vha;
2b6c0cee 706 struct scsi_cmnd *cmd;
385d70b4
FT
707 struct scatterlist *sg;
708 int i;
2b6c0cee 709
9ba56b95 710 cmd = GET_CMD_SP(sp);
2b6c0cee
AV
711
712 /* Update entry type to indicate Command Type 3 IOCB */
ad950360 713 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
2b6c0cee
AV
714
715 /* No data transfer */
385d70b4 716 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 717 cmd_pkt->byte_count = cpu_to_le32(0);
2b6c0cee
AV
718 return;
719 }
720
25ff6af1 721 vha = sp->vha;
2b6c0cee
AV
722
723 /* Set transfer direction */
49fd462a 724 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
ad950360 725 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
2be21fa2 726 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 727 vha->qla_stats.output_requests++;
49fd462a 728 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
ad950360 729 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
2be21fa2 730 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 731 vha->qla_stats.input_requests++;
49fd462a 732 }
2b6c0cee
AV
733
734 /* One DSD is available in the Command Type 3 IOCB */
735 avail_dsds = 1;
736 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
737
738 /* Load data segments */
385d70b4
FT
739
740 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
741 dma_addr_t sle_dma;
742 cont_a64_entry_t *cont_pkt;
743
744 /* Allocate additional continuation packets? */
745 if (avail_dsds == 0) {
746 /*
747 * Five DSDs are available in the Continuation
748 * Type 1 IOCB.
749 */
d7459527 750 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
385d70b4
FT
751 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
752 avail_dsds = 5;
2b6c0cee 753 }
385d70b4
FT
754
755 sle_dma = sg_dma_address(sg);
756 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
757 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
758 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
759 avail_dsds--;
2b6c0cee
AV
760 }
761}
762
bad75002
AE
763struct fw_dif_context {
764 uint32_t ref_tag;
765 uint16_t app_tag;
766 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
767 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
768};
769
770/*
771 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
772 *
773 */
774static inline void
e02587d7 775qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
bad75002
AE
776 unsigned int protcnt)
777{
9ba56b95 778 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
bad75002
AE
779
780 switch (scsi_get_prot_type(cmd)) {
bad75002 781 case SCSI_PROT_DIF_TYPE0:
8cb2049c
AE
782 /*
783 * No check for ql2xenablehba_err_chk, as it would be an
784 * I/O error if hba tag generation is not done.
785 */
786 pkt->ref_tag = cpu_to_le32((uint32_t)
787 (0xffffffff & scsi_get_lba(cmd)));
e02587d7
AE
788
789 if (!qla2x00_hba_err_chk_enabled(sp))
790 break;
791
8cb2049c
AE
792 pkt->ref_tag_mask[0] = 0xff;
793 pkt->ref_tag_mask[1] = 0xff;
794 pkt->ref_tag_mask[2] = 0xff;
795 pkt->ref_tag_mask[3] = 0xff;
bad75002
AE
796 break;
797
798 /*
799 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
800 * match LBA in CDB + N
801 */
802 case SCSI_PROT_DIF_TYPE2:
ad950360 803 pkt->app_tag = cpu_to_le16(0);
e02587d7
AE
804 pkt->app_tag_mask[0] = 0x0;
805 pkt->app_tag_mask[1] = 0x0;
0c470874
AE
806
807 pkt->ref_tag = cpu_to_le32((uint32_t)
808 (0xffffffff & scsi_get_lba(cmd)));
809
e02587d7
AE
810 if (!qla2x00_hba_err_chk_enabled(sp))
811 break;
812
0c470874
AE
813 /* enable ALL bytes of the ref tag */
814 pkt->ref_tag_mask[0] = 0xff;
815 pkt->ref_tag_mask[1] = 0xff;
816 pkt->ref_tag_mask[2] = 0xff;
817 pkt->ref_tag_mask[3] = 0xff;
bad75002
AE
818 break;
819
820 /* For Type 3 protection: 16 bit GUARD only */
821 case SCSI_PROT_DIF_TYPE3:
822 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
823 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
824 0x00;
825 break;
826
827 /*
828 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
829 * 16 bit app tag.
830 */
831 case SCSI_PROT_DIF_TYPE1:
e02587d7
AE
832 pkt->ref_tag = cpu_to_le32((uint32_t)
833 (0xffffffff & scsi_get_lba(cmd)));
ad950360 834 pkt->app_tag = cpu_to_le16(0);
e02587d7
AE
835 pkt->app_tag_mask[0] = 0x0;
836 pkt->app_tag_mask[1] = 0x0;
837
838 if (!qla2x00_hba_err_chk_enabled(sp))
bad75002
AE
839 break;
840
bad75002
AE
841 /* enable ALL bytes of the ref tag */
842 pkt->ref_tag_mask[0] = 0xff;
843 pkt->ref_tag_mask[1] = 0xff;
844 pkt->ref_tag_mask[2] = 0xff;
845 pkt->ref_tag_mask[3] = 0xff;
846 break;
847 }
bad75002
AE
848}
849
d7459527 850int
8cb2049c
AE
851qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
852 uint32_t *partial)
853{
854 struct scatterlist *sg;
855 uint32_t cumulative_partial, sg_len;
856 dma_addr_t sg_dma_addr;
857
858 if (sgx->num_bytes == sgx->tot_bytes)
859 return 0;
860
861 sg = sgx->cur_sg;
862 cumulative_partial = sgx->tot_partial;
863
864 sg_dma_addr = sg_dma_address(sg);
865 sg_len = sg_dma_len(sg);
866
867 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
868
869 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
870 sgx->dma_len = (blk_sz - cumulative_partial);
871 sgx->tot_partial = 0;
872 sgx->num_bytes += blk_sz;
873 *partial = 0;
874 } else {
875 sgx->dma_len = sg_len - sgx->bytes_consumed;
876 sgx->tot_partial += sgx->dma_len;
877 *partial = 1;
878 }
879
880 sgx->bytes_consumed += sgx->dma_len;
881
882 if (sg_len == sgx->bytes_consumed) {
883 sg = sg_next(sg);
884 sgx->num_sg++;
885 sgx->cur_sg = sg;
886 sgx->bytes_consumed = 0;
887 }
888
889 return 1;
890}
891
f83adb61 892int
8cb2049c 893qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
be25152c 894 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
8cb2049c
AE
895{
896 void *next_dsd;
897 uint8_t avail_dsds = 0;
898 uint32_t dsd_list_len;
899 struct dsd_dma *dsd_ptr;
900 struct scatterlist *sg_prot;
901 uint32_t *cur_dsd = dsd;
902 uint16_t used_dsds = tot_dsds;
f83adb61 903 uint32_t prot_int; /* protection interval */
8cb2049c
AE
904 uint32_t partial;
905 struct qla2_sgx sgx;
906 dma_addr_t sle_dma;
907 uint32_t sle_dma_len, tot_prot_dma_len = 0;
f83adb61 908 struct scsi_cmnd *cmd;
8cb2049c
AE
909
910 memset(&sgx, 0, sizeof(struct qla2_sgx));
f83adb61 911 if (sp) {
f83adb61
QT
912 cmd = GET_CMD_SP(sp);
913 prot_int = cmd->device->sector_size;
914
915 sgx.tot_bytes = scsi_bufflen(cmd);
916 sgx.cur_sg = scsi_sglist(cmd);
917 sgx.sp = sp;
918
919 sg_prot = scsi_prot_sglist(cmd);
920 } else if (tc) {
f83adb61
QT
921 prot_int = tc->blk_sz;
922 sgx.tot_bytes = tc->bufflen;
923 sgx.cur_sg = tc->sg;
924 sg_prot = tc->prot_sg;
925 } else {
926 BUG();
927 return 1;
928 }
8cb2049c
AE
929
930 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
931
932 sle_dma = sgx.dma_addr;
933 sle_dma_len = sgx.dma_len;
934alloc_and_fill:
935 /* Allocate additional continuation packets? */
936 if (avail_dsds == 0) {
937 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
938 QLA_DSDS_PER_IOCB : used_dsds;
939 dsd_list_len = (avail_dsds + 1) * 12;
940 used_dsds -= avail_dsds;
941
942 /* allocate tracking DS */
943 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
944 if (!dsd_ptr)
945 return 1;
946
947 /* allocate new list */
948 dsd_ptr->dsd_addr = next_dsd =
949 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
950 &dsd_ptr->dsd_list_dma);
951
952 if (!next_dsd) {
953 /*
954 * Need to cleanup only this dsd_ptr, rest
955 * will be done by sp_free_dma()
956 */
957 kfree(dsd_ptr);
958 return 1;
959 }
960
f83adb61
QT
961 if (sp) {
962 list_add_tail(&dsd_ptr->list,
963 &((struct crc_context *)
964 sp->u.scmd.ctx)->dsd_list);
965
966 sp->flags |= SRB_CRC_CTX_DSD_VALID;
967 } else {
968 list_add_tail(&dsd_ptr->list,
969 &(tc->ctx->dsd_list));
be25152c 970 *tc->ctx_dsd_alloced = 1;
f83adb61 971 }
8cb2049c 972
8cb2049c
AE
973
974 /* add new list to cmd iocb or last list */
975 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
976 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
977 *cur_dsd++ = dsd_list_len;
978 cur_dsd = (uint32_t *)next_dsd;
979 }
980 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
981 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
982 *cur_dsd++ = cpu_to_le32(sle_dma_len);
983 avail_dsds--;
984
985 if (partial == 0) {
986 /* Got a full protection interval */
987 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
988 sle_dma_len = 8;
bad75002 989
8cb2049c
AE
990 tot_prot_dma_len += sle_dma_len;
991 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
992 tot_prot_dma_len = 0;
993 sg_prot = sg_next(sg_prot);
994 }
995
996 partial = 1; /* So as to not re-enter this block */
997 goto alloc_and_fill;
998 }
999 }
1000 /* Null termination */
1001 *cur_dsd++ = 0;
1002 *cur_dsd++ = 0;
1003 *cur_dsd++ = 0;
1004 return 0;
1005}
5162cf0c 1006
f83adb61 1007int
bad75002 1008qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
be25152c 1009 uint16_t tot_dsds, struct qla_tc_param *tc)
bad75002
AE
1010{
1011 void *next_dsd;
1012 uint8_t avail_dsds = 0;
1013 uint32_t dsd_list_len;
1014 struct dsd_dma *dsd_ptr;
f83adb61 1015 struct scatterlist *sg, *sgl;
bad75002
AE
1016 uint32_t *cur_dsd = dsd;
1017 int i;
1018 uint16_t used_dsds = tot_dsds;
f83adb61 1019 struct scsi_cmnd *cmd;
f83adb61
QT
1020
1021 if (sp) {
1022 cmd = GET_CMD_SP(sp);
1023 sgl = scsi_sglist(cmd);
f83adb61
QT
1024 } else if (tc) {
1025 sgl = tc->sg;
f83adb61
QT
1026 } else {
1027 BUG();
1028 return 1;
1029 }
bad75002 1030
f83adb61
QT
1031
1032 for_each_sg(sgl, sg, tot_dsds, i) {
bad75002
AE
1033 dma_addr_t sle_dma;
1034
1035 /* Allocate additional continuation packets? */
1036 if (avail_dsds == 0) {
1037 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1038 QLA_DSDS_PER_IOCB : used_dsds;
1039 dsd_list_len = (avail_dsds + 1) * 12;
1040 used_dsds -= avail_dsds;
1041
1042 /* allocate tracking DS */
1043 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1044 if (!dsd_ptr)
1045 return 1;
1046
1047 /* allocate new list */
1048 dsd_ptr->dsd_addr = next_dsd =
1049 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1050 &dsd_ptr->dsd_list_dma);
1051
1052 if (!next_dsd) {
1053 /*
1054 * Need to cleanup only this dsd_ptr, rest
1055 * will be done by sp_free_dma()
1056 */
1057 kfree(dsd_ptr);
1058 return 1;
1059 }
1060
f83adb61
QT
1061 if (sp) {
1062 list_add_tail(&dsd_ptr->list,
1063 &((struct crc_context *)
1064 sp->u.scmd.ctx)->dsd_list);
bad75002 1065
f83adb61
QT
1066 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1067 } else {
1068 list_add_tail(&dsd_ptr->list,
1069 &(tc->ctx->dsd_list));
be25152c 1070 *tc->ctx_dsd_alloced = 1;
f83adb61 1071 }
bad75002
AE
1072
1073 /* add new list to cmd iocb or last list */
1074 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1075 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1076 *cur_dsd++ = dsd_list_len;
1077 cur_dsd = (uint32_t *)next_dsd;
1078 }
1079 sle_dma = sg_dma_address(sg);
9e522cd8 1080
bad75002
AE
1081 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1082 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1083 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1084 avail_dsds--;
1085
bad75002
AE
1086 }
1087 /* Null termination */
1088 *cur_dsd++ = 0;
1089 *cur_dsd++ = 0;
1090 *cur_dsd++ = 0;
1091 return 0;
1092}
1093
f83adb61 1094int
bad75002 1095qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
be25152c 1096 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
bad75002
AE
1097{
1098 void *next_dsd;
1099 uint8_t avail_dsds = 0;
1100 uint32_t dsd_list_len;
1101 struct dsd_dma *dsd_ptr;
f83adb61 1102 struct scatterlist *sg, *sgl;
bad75002
AE
1103 int i;
1104 struct scsi_cmnd *cmd;
1105 uint32_t *cur_dsd = dsd;
f83adb61
QT
1106 uint16_t used_dsds = tot_dsds;
1107 struct scsi_qla_host *vha;
1108
1109 if (sp) {
1110 cmd = GET_CMD_SP(sp);
1111 sgl = scsi_prot_sglist(cmd);
25ff6af1 1112 vha = sp->vha;
f83adb61
QT
1113 } else if (tc) {
1114 vha = tc->vha;
1115 sgl = tc->prot_sg;
1116 } else {
1117 BUG();
1118 return 1;
1119 }
bad75002 1120
f83adb61
QT
1121 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1122 "%s: enter\n", __func__);
1123
1124 for_each_sg(sgl, sg, tot_dsds, i) {
bad75002
AE
1125 dma_addr_t sle_dma;
1126
1127 /* Allocate additional continuation packets? */
1128 if (avail_dsds == 0) {
1129 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130 QLA_DSDS_PER_IOCB : used_dsds;
1131 dsd_list_len = (avail_dsds + 1) * 12;
1132 used_dsds -= avail_dsds;
1133
1134 /* allocate tracking DS */
1135 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1136 if (!dsd_ptr)
1137 return 1;
1138
1139 /* allocate new list */
1140 dsd_ptr->dsd_addr = next_dsd =
1141 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142 &dsd_ptr->dsd_list_dma);
1143
1144 if (!next_dsd) {
1145 /*
1146 * Need to cleanup only this dsd_ptr, rest
1147 * will be done by sp_free_dma()
1148 */
1149 kfree(dsd_ptr);
1150 return 1;
1151 }
1152
f83adb61
QT
1153 if (sp) {
1154 list_add_tail(&dsd_ptr->list,
1155 &((struct crc_context *)
1156 sp->u.scmd.ctx)->dsd_list);
bad75002 1157
f83adb61
QT
1158 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1159 } else {
1160 list_add_tail(&dsd_ptr->list,
1161 &(tc->ctx->dsd_list));
be25152c 1162 *tc->ctx_dsd_alloced = 1;
f83adb61 1163 }
bad75002
AE
1164
1165 /* add new list to cmd iocb or last list */
1166 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1167 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1168 *cur_dsd++ = dsd_list_len;
1169 cur_dsd = (uint32_t *)next_dsd;
1170 }
1171 sle_dma = sg_dma_address(sg);
9e522cd8 1172
bad75002
AE
1173 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1174 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1175 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1176
bad75002
AE
1177 avail_dsds--;
1178 }
1179 /* Null termination */
1180 *cur_dsd++ = 0;
1181 *cur_dsd++ = 0;
1182 *cur_dsd++ = 0;
1183 return 0;
1184}
1185
1186/**
1187 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1188 * Type 6 IOCB types.
1189 *
1190 * @sp: SRB command to process
1191 * @cmd_pkt: Command type 3 IOCB
1192 * @tot_dsds: Total number of segments to transfer
1193 */
d7459527 1194inline int
bad75002
AE
1195qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1196 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1197{
1198 uint32_t *cur_dsd, *fcp_dl;
1199 scsi_qla_host_t *vha;
1200 struct scsi_cmnd *cmd;
8cb2049c 1201 uint32_t total_bytes = 0;
bad75002
AE
1202 uint32_t data_bytes;
1203 uint32_t dif_bytes;
1204 uint8_t bundling = 1;
1205 uint16_t blk_size;
1206 uint8_t *clr_ptr;
1207 struct crc_context *crc_ctx_pkt = NULL;
1208 struct qla_hw_data *ha;
1209 uint8_t additional_fcpcdb_len;
1210 uint16_t fcp_cmnd_len;
1211 struct fcp_cmnd *fcp_cmnd;
1212 dma_addr_t crc_ctx_dma;
1213
9ba56b95 1214 cmd = GET_CMD_SP(sp);
bad75002 1215
bad75002 1216 /* Update entry type to indicate Command Type CRC_2 IOCB */
ad950360 1217 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
bad75002 1218
25ff6af1 1219 vha = sp->vha;
7c3df132
SK
1220 ha = vha->hw;
1221
bad75002
AE
1222 /* No data transfer */
1223 data_bytes = scsi_bufflen(cmd);
1224 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
ad950360 1225 cmd_pkt->byte_count = cpu_to_le32(0);
bad75002
AE
1226 return QLA_SUCCESS;
1227 }
1228
25ff6af1 1229 cmd_pkt->vp_index = sp->vha->vp_idx;
bad75002
AE
1230
1231 /* Set transfer direction */
1232 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1233 cmd_pkt->control_flags =
ad950360 1234 cpu_to_le16(CF_WRITE_DATA);
bad75002
AE
1235 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1236 cmd_pkt->control_flags =
ad950360 1237 cpu_to_le16(CF_READ_DATA);
bad75002
AE
1238 }
1239
9ba56b95
GM
1240 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1241 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1242 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1243 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
bad75002
AE
1244 bundling = 0;
1245
1246 /* Allocate CRC context from global pool */
9ba56b95
GM
1247 crc_ctx_pkt = sp->u.scmd.ctx =
1248 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
bad75002
AE
1249
1250 if (!crc_ctx_pkt)
1251 goto crc_queuing_error;
1252
1253 /* Zero out CTX area. */
1254 clr_ptr = (uint8_t *)crc_ctx_pkt;
1255 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1256
1257 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1258
1259 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1260
1261 /* Set handle */
1262 crc_ctx_pkt->handle = cmd_pkt->handle;
1263
1264 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1265
e02587d7 1266 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
bad75002
AE
1267 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1268
1269 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1270 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1271 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1272
1273 /* Determine SCSI command length -- align to 4 byte boundary */
1274 if (cmd->cmd_len > 16) {
bad75002
AE
1275 additional_fcpcdb_len = cmd->cmd_len - 16;
1276 if ((cmd->cmd_len % 4) != 0) {
1277 /* SCSI cmd > 16 bytes must be multiple of 4 */
1278 goto crc_queuing_error;
1279 }
1280 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1281 } else {
1282 additional_fcpcdb_len = 0;
1283 fcp_cmnd_len = 12 + 16 + 4;
1284 }
1285
1286 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1287
1288 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1289 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1290 fcp_cmnd->additional_cdb_len |= 1;
1291 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1292 fcp_cmnd->additional_cdb_len |= 2;
1293
9ba56b95 1294 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
bad75002
AE
1295 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1296 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1297 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1298 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1300 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
65155b37 1301 fcp_cmnd->task_management = 0;
50668633 1302 fcp_cmnd->task_attribute = TSK_SIMPLE;
ff2fc42e 1303
bad75002
AE
1304 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1305
bad75002 1306 /* Compute dif len and adjust data len to incude protection */
bad75002
AE
1307 dif_bytes = 0;
1308 blk_size = cmd->device->sector_size;
8cb2049c
AE
1309 dif_bytes = (data_bytes / blk_size) * 8;
1310
9ba56b95 1311 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
8cb2049c
AE
1312 case SCSI_PROT_READ_INSERT:
1313 case SCSI_PROT_WRITE_STRIP:
1314 total_bytes = data_bytes;
1315 data_bytes += dif_bytes;
1316 break;
1317
1318 case SCSI_PROT_READ_STRIP:
1319 case SCSI_PROT_WRITE_INSERT:
1320 case SCSI_PROT_READ_PASS:
1321 case SCSI_PROT_WRITE_PASS:
1322 total_bytes = data_bytes + dif_bytes;
1323 break;
1324 default:
1325 BUG();
bad75002
AE
1326 }
1327
e02587d7 1328 if (!qla2x00_hba_err_chk_enabled(sp))
bad75002 1329 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
9e522cd8
AE
1330 /* HBA error checking enabled */
1331 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1332 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1333 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1334 SCSI_PROT_DIF_TYPE2))
1335 fw_prot_opts |= BIT_10;
1336 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1337 SCSI_PROT_DIF_TYPE3)
1338 fw_prot_opts |= BIT_11;
1339 }
bad75002
AE
1340
1341 if (!bundling) {
1342 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1343 } else {
1344 /*
1345 * Configure Bundling if we need to fetch interlaving
1346 * protection PCI accesses
1347 */
1348 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1349 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1350 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1351 tot_prot_dsds);
1352 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1353 }
1354
1355 /* Finish the common fields of CRC pkt */
1356 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1357 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1358 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
ad950360 1359 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
bad75002
AE
1360 /* Fibre channel byte count */
1361 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1362 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1363 additional_fcpcdb_len);
1364 *fcp_dl = htonl(total_bytes);
1365
0c470874 1366 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
ad950360 1367 cmd_pkt->byte_count = cpu_to_le32(0);
0c470874
AE
1368 return QLA_SUCCESS;
1369 }
bad75002
AE
1370 /* Walks data segments */
1371
ad950360 1372 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
8cb2049c
AE
1373
1374 if (!bundling && tot_prot_dsds) {
1375 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
f83adb61 1376 cur_dsd, tot_dsds, NULL))
8cb2049c
AE
1377 goto crc_queuing_error;
1378 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
f83adb61 1379 (tot_dsds - tot_prot_dsds), NULL))
bad75002
AE
1380 goto crc_queuing_error;
1381
1382 if (bundling && tot_prot_dsds) {
1383 /* Walks dif segments */
ad950360 1384 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
bad75002
AE
1385 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1386 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
f83adb61 1387 tot_prot_dsds, NULL))
bad75002
AE
1388 goto crc_queuing_error;
1389 }
1390 return QLA_SUCCESS;
1391
1392crc_queuing_error:
bad75002
AE
1393 /* Cleanup will be performed by the caller */
1394
1395 return QLA_FUNCTION_FAILED;
1396}
2b6c0cee
AV
1397
1398/**
1399 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1400 * @sp: command to send to the ISP
1401 *
cc3ef7bc 1402 * Returns non-zero if a failure occurred, else zero.
2b6c0cee
AV
1403 */
1404int
1405qla24xx_start_scsi(srb_t *sp)
1406{
52c82823 1407 int nseg;
2b6c0cee 1408 unsigned long flags;
2b6c0cee
AV
1409 uint32_t *clr_ptr;
1410 uint32_t index;
1411 uint32_t handle;
1412 struct cmd_type_7 *cmd_pkt;
2b6c0cee
AV
1413 uint16_t cnt;
1414 uint16_t req_cnt;
1415 uint16_t tot_dsds;
73208dfd
AC
1416 struct req_que *req = NULL;
1417 struct rsp_que *rsp = NULL;
9ba56b95 1418 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 1419 struct scsi_qla_host *vha = sp->vha;
73208dfd 1420 struct qla_hw_data *ha = vha->hw;
2b6c0cee
AV
1421
1422 /* Setup device pointers. */
59e0b8b0 1423 req = vha->req;
d7459527 1424 rsp = req->rsp;
73208dfd 1425
2b6c0cee
AV
1426 /* So we know we haven't pci_map'ed anything yet */
1427 tot_dsds = 0;
1428
1429 /* Send marker if required */
e315cd28 1430 if (vha->marker_needed != 0) {
7c3df132
SK
1431 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1432 QLA_SUCCESS)
2b6c0cee 1433 return QLA_FUNCTION_FAILED;
e315cd28 1434 vha->marker_needed = 0;
2b6c0cee
AV
1435 }
1436
1437 /* Acquire ring specific lock */
e315cd28 1438 spin_lock_irqsave(&ha->hardware_lock, flags);
2b6c0cee
AV
1439
1440 /* Check for room in outstanding command list. */
e315cd28 1441 handle = req->current_outstanding_cmd;
8d93f550 1442 for (index = 1; index < req->num_outstanding_cmds; index++) {
2b6c0cee 1443 handle++;
8d93f550 1444 if (handle == req->num_outstanding_cmds)
2b6c0cee 1445 handle = 1;
e315cd28 1446 if (!req->outstanding_cmds[handle])
2b6c0cee
AV
1447 break;
1448 }
8d93f550 1449 if (index == req->num_outstanding_cmds)
2b6c0cee
AV
1450 goto queuing_error;
1451
1452 /* Map the sg table so we have an accurate count of sg entries needed */
2c3dfe3f
SJ
1453 if (scsi_sg_count(cmd)) {
1454 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1455 scsi_sg_count(cmd), cmd->sc_data_direction);
1456 if (unlikely(!nseg))
2b6c0cee 1457 goto queuing_error;
2c3dfe3f
SJ
1458 } else
1459 nseg = 0;
1460
385d70b4 1461 tot_dsds = nseg;
7c3df132 1462 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
e315cd28 1463 if (req->cnt < (req_cnt + 2)) {
7c6300e3
JC
1464 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1465 RD_REG_DWORD_RELAXED(req->req_q_out);
e315cd28
AC
1466 if (req->ring_index < cnt)
1467 req->cnt = cnt - req->ring_index;
2b6c0cee 1468 else
e315cd28
AC
1469 req->cnt = req->length -
1470 (req->ring_index - cnt);
a6eb3c9f
CL
1471 if (req->cnt < (req_cnt + 2))
1472 goto queuing_error;
2b6c0cee 1473 }
2b6c0cee
AV
1474
1475 /* Build command packet. */
e315cd28
AC
1476 req->current_outstanding_cmd = handle;
1477 req->outstanding_cmds[handle] = sp;
cf53b069 1478 sp->handle = handle;
9ba56b95 1479 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
e315cd28 1480 req->cnt -= req_cnt;
2b6c0cee 1481
e315cd28 1482 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2afa19a9 1483 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2b6c0cee
AV
1484
1485 /* Zero out remaining portion of packet. */
72df8325 1486 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2b6c0cee
AV
1487 clr_ptr = (uint32_t *)cmd_pkt + 2;
1488 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1489 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1490
1491 /* Set NPORT-ID and LUN number*/
1492 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1493 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1494 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1495 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 1496 cmd_pkt->vp_index = sp->vha->vp_idx;
2b6c0cee 1497
9ba56b95 1498 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
0d4be124 1499 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2b6c0cee 1500
50668633 1501 cmd_pkt->task = TSK_SIMPLE;
ff2fc42e 1502
2b6c0cee
AV
1503 /* Load SCSI command packet. */
1504 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1505 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1506
385d70b4 1507 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2b6c0cee
AV
1508
1509 /* Build IOCB segments */
d7459527 1510 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2b6c0cee
AV
1511
1512 /* Set total data segment count. */
1513 cmd_pkt->entry_count = (uint8_t)req_cnt;
1514 wmb();
2b6c0cee 1515 /* Adjust ring index. */
e315cd28
AC
1516 req->ring_index++;
1517 if (req->ring_index == req->length) {
1518 req->ring_index = 0;
1519 req->ring_ptr = req->ring;
2b6c0cee 1520 } else
e315cd28 1521 req->ring_ptr++;
2b6c0cee
AV
1522
1523 sp->flags |= SRB_DMA_VALID;
2b6c0cee
AV
1524
1525 /* Set chip new ring index. */
08029990
AV
1526 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1527 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
2b6c0cee 1528
4fdfefe5 1529 /* Manage unprocessed RIO/ZIO commands in response queue. */
e315cd28 1530 if (vha->flags.process_response_queue &&
73208dfd 1531 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2afa19a9 1532 qla24xx_process_response_queue(vha, rsp);
4fdfefe5 1533
e315cd28 1534 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2b6c0cee
AV
1535 return QLA_SUCCESS;
1536
1537queuing_error:
385d70b4
FT
1538 if (tot_dsds)
1539 scsi_dma_unmap(cmd);
1540
e315cd28 1541 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2b6c0cee
AV
1542
1543 return QLA_FUNCTION_FAILED;
1da177e4 1544}
68ca949c 1545
bad75002
AE
1546/**
1547 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1548 * @sp: command to send to the ISP
1549 *
1550 * Returns non-zero if a failure occurred, else zero.
1551 */
1552int
1553qla24xx_dif_start_scsi(srb_t *sp)
1554{
1555 int nseg;
1556 unsigned long flags;
1557 uint32_t *clr_ptr;
1558 uint32_t index;
1559 uint32_t handle;
1560 uint16_t cnt;
1561 uint16_t req_cnt = 0;
1562 uint16_t tot_dsds;
1563 uint16_t tot_prot_dsds;
1564 uint16_t fw_prot_opts = 0;
1565 struct req_que *req = NULL;
1566 struct rsp_que *rsp = NULL;
9ba56b95 1567 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 1568 struct scsi_qla_host *vha = sp->vha;
bad75002
AE
1569 struct qla_hw_data *ha = vha->hw;
1570 struct cmd_type_crc_2 *cmd_pkt;
1571 uint32_t status = 0;
1572
1573#define QDSS_GOT_Q_SPACE BIT_0
1574
0c470874
AE
1575 /* Only process protection or >16 cdb in this routine */
1576 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1577 if (cmd->cmd_len <= 16)
1578 return qla24xx_start_scsi(sp);
1579 }
bad75002
AE
1580
1581 /* Setup device pointers. */
bad75002 1582 req = vha->req;
d7459527 1583 rsp = req->rsp;
bad75002
AE
1584
1585 /* So we know we haven't pci_map'ed anything yet */
1586 tot_dsds = 0;
1587
1588 /* Send marker if required */
1589 if (vha->marker_needed != 0) {
1590 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1591 QLA_SUCCESS)
1592 return QLA_FUNCTION_FAILED;
1593 vha->marker_needed = 0;
1594 }
1595
1596 /* Acquire ring specific lock */
1597 spin_lock_irqsave(&ha->hardware_lock, flags);
1598
1599 /* Check for room in outstanding command list. */
1600 handle = req->current_outstanding_cmd;
8d93f550 1601 for (index = 1; index < req->num_outstanding_cmds; index++) {
bad75002 1602 handle++;
8d93f550 1603 if (handle == req->num_outstanding_cmds)
bad75002
AE
1604 handle = 1;
1605 if (!req->outstanding_cmds[handle])
1606 break;
1607 }
1608
8d93f550 1609 if (index == req->num_outstanding_cmds)
bad75002
AE
1610 goto queuing_error;
1611
1612 /* Compute number of required data segments */
1613 /* Map the sg table so we have an accurate count of sg entries needed */
1614 if (scsi_sg_count(cmd)) {
1615 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1616 scsi_sg_count(cmd), cmd->sc_data_direction);
1617 if (unlikely(!nseg))
1618 goto queuing_error;
1619 else
1620 sp->flags |= SRB_DMA_VALID;
8cb2049c
AE
1621
1622 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1623 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1624 struct qla2_sgx sgx;
1625 uint32_t partial;
1626
1627 memset(&sgx, 0, sizeof(struct qla2_sgx));
1628 sgx.tot_bytes = scsi_bufflen(cmd);
1629 sgx.cur_sg = scsi_sglist(cmd);
1630 sgx.sp = sp;
1631
1632 nseg = 0;
1633 while (qla24xx_get_one_block_sg(
1634 cmd->device->sector_size, &sgx, &partial))
1635 nseg++;
1636 }
bad75002
AE
1637 } else
1638 nseg = 0;
1639
1640 /* number of required data segments */
1641 tot_dsds = nseg;
1642
1643 /* Compute number of required protection segments */
1644 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1645 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1646 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1647 if (unlikely(!nseg))
1648 goto queuing_error;
1649 else
1650 sp->flags |= SRB_CRC_PROT_DMA_VALID;
8cb2049c
AE
1651
1652 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1653 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1654 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1655 }
bad75002
AE
1656 } else {
1657 nseg = 0;
1658 }
1659
1660 req_cnt = 1;
1661 /* Total Data and protection sg segment(s) */
1662 tot_prot_dsds = nseg;
1663 tot_dsds += nseg;
1664 if (req->cnt < (req_cnt + 2)) {
7c6300e3
JC
1665 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1666 RD_REG_DWORD_RELAXED(req->req_q_out);
bad75002
AE
1667 if (req->ring_index < cnt)
1668 req->cnt = cnt - req->ring_index;
1669 else
1670 req->cnt = req->length -
1671 (req->ring_index - cnt);
a6eb3c9f
CL
1672 if (req->cnt < (req_cnt + 2))
1673 goto queuing_error;
bad75002
AE
1674 }
1675
bad75002
AE
1676 status |= QDSS_GOT_Q_SPACE;
1677
1678 /* Build header part of command packet (excluding the OPCODE). */
1679 req->current_outstanding_cmd = handle;
1680 req->outstanding_cmds[handle] = sp;
8cb2049c 1681 sp->handle = handle;
9ba56b95 1682 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
bad75002
AE
1683 req->cnt -= req_cnt;
1684
1685 /* Fill-in common area */
1686 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1687 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1688
1689 clr_ptr = (uint32_t *)cmd_pkt + 2;
1690 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1691
1692 /* Set NPORT-ID and LUN number*/
1693 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1694 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1695 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1696 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1697
9ba56b95 1698 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
bad75002
AE
1699 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1700
1701 /* Total Data and protection segment(s) */
1702 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1703
1704 /* Build IOCB segments and adjust for data protection segments */
1705 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1706 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1707 QLA_SUCCESS)
1708 goto queuing_error;
1709
1710 cmd_pkt->entry_count = (uint8_t)req_cnt;
1711 /* Specify response queue number where completion should happen */
1712 cmd_pkt->entry_status = (uint8_t) rsp->id;
ad950360 1713 cmd_pkt->timeout = cpu_to_le16(0);
bad75002
AE
1714 wmb();
1715
1716 /* Adjust ring index. */
1717 req->ring_index++;
1718 if (req->ring_index == req->length) {
1719 req->ring_index = 0;
1720 req->ring_ptr = req->ring;
1721 } else
1722 req->ring_ptr++;
1723
1724 /* Set chip new ring index. */
1725 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1726 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1727
1728 /* Manage unprocessed RIO/ZIO commands in response queue. */
1729 if (vha->flags.process_response_queue &&
1730 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1731 qla24xx_process_response_queue(vha, rsp);
1732
1733 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1734
1735 return QLA_SUCCESS;
1736
1737queuing_error:
1738 if (status & QDSS_GOT_Q_SPACE) {
1739 req->outstanding_cmds[handle] = NULL;
1740 req->cnt += req_cnt;
1741 }
1742 /* Cleanup will be performed by the caller (queuecommand) */
1743
1744 spin_unlock_irqrestore(&ha->hardware_lock, flags);
bad75002
AE
1745 return QLA_FUNCTION_FAILED;
1746}
1747
d7459527
MH
1748/**
1749 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1750 * @sp: command to send to the ISP
1751 *
1752 * Returns non-zero if a failure occurred, else zero.
1753 */
1754static int
1755qla2xxx_start_scsi_mq(srb_t *sp)
68ca949c 1756{
d7459527
MH
1757 int nseg;
1758 unsigned long flags;
1759 uint32_t *clr_ptr;
1760 uint32_t index;
1761 uint32_t handle;
1762 struct cmd_type_7 *cmd_pkt;
1763 uint16_t cnt;
1764 uint16_t req_cnt;
1765 uint16_t tot_dsds;
1766 struct req_que *req = NULL;
1767 struct rsp_que *rsp = NULL;
9ba56b95 1768 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
d7459527
MH
1769 struct scsi_qla_host *vha = sp->fcport->vha;
1770 struct qla_hw_data *ha = vha->hw;
1771 struct qla_qpair *qpair = sp->qpair;
1772
1773 /* Setup qpair pointers */
1774 rsp = qpair->rsp;
1775 req = qpair->req;
1776
1777 /* So we know we haven't pci_map'ed anything yet */
1778 tot_dsds = 0;
1779
1780 /* Send marker if required */
1781 if (vha->marker_needed != 0) {
1782 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1783 QLA_SUCCESS)
1784 return QLA_FUNCTION_FAILED;
1785 vha->marker_needed = 0;
1786 }
1787
1788 /* Acquire qpair specific lock */
1789 spin_lock_irqsave(&qpair->qp_lock, flags);
1790
1791 /* Check for room in outstanding command list. */
1792 handle = req->current_outstanding_cmd;
1793 for (index = 1; index < req->num_outstanding_cmds; index++) {
1794 handle++;
1795 if (handle == req->num_outstanding_cmds)
1796 handle = 1;
1797 if (!req->outstanding_cmds[handle])
1798 break;
1799 }
1800 if (index == req->num_outstanding_cmds)
1801 goto queuing_error;
1802
1803 /* Map the sg table so we have an accurate count of sg entries needed */
1804 if (scsi_sg_count(cmd)) {
1805 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1806 scsi_sg_count(cmd), cmd->sc_data_direction);
1807 if (unlikely(!nseg))
1808 goto queuing_error;
1809 } else
1810 nseg = 0;
1811
1812 tot_dsds = nseg;
1813 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1814 if (req->cnt < (req_cnt + 2)) {
1815 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1816 RD_REG_DWORD_RELAXED(req->req_q_out);
1817 if (req->ring_index < cnt)
1818 req->cnt = cnt - req->ring_index;
1819 else
1820 req->cnt = req->length -
1821 (req->ring_index - cnt);
1822 if (req->cnt < (req_cnt + 2))
1823 goto queuing_error;
1824 }
1825
1826 /* Build command packet. */
1827 req->current_outstanding_cmd = handle;
1828 req->outstanding_cmds[handle] = sp;
1829 sp->handle = handle;
1830 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1831 req->cnt -= req_cnt;
1832
1833 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1834 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1835
1836 /* Zero out remaining portion of packet. */
1837 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1838 clr_ptr = (uint32_t *)cmd_pkt + 2;
1839 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1840 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1841
1842 /* Set NPORT-ID and LUN number*/
1843 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1844 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1845 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1846 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1847 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1848
1849 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1850 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1851
1852 cmd_pkt->task = TSK_SIMPLE;
1853
1854 /* Load SCSI command packet. */
1855 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1856 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1857
1858 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1859
1860 /* Build IOCB segments */
1861 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1862
1863 /* Set total data segment count. */
1864 cmd_pkt->entry_count = (uint8_t)req_cnt;
1865 wmb();
1866 /* Adjust ring index. */
1867 req->ring_index++;
1868 if (req->ring_index == req->length) {
1869 req->ring_index = 0;
1870 req->ring_ptr = req->ring;
1871 } else
1872 req->ring_ptr++;
1873
1874 sp->flags |= SRB_DMA_VALID;
1875
1876 /* Set chip new ring index. */
1877 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1878
1879 /* Manage unprocessed RIO/ZIO commands in response queue. */
1880 if (vha->flags.process_response_queue &&
1881 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1882 qla24xx_process_response_queue(vha, rsp);
1883
1884 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1885 return QLA_SUCCESS;
1886
1887queuing_error:
1888 if (tot_dsds)
1889 scsi_dma_unmap(cmd);
1890
1891 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1892
1893 return QLA_FUNCTION_FAILED;
1894}
1895
1896
1897/**
1898 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1899 * @sp: command to send to the ISP
1900 *
1901 * Returns non-zero if a failure occurred, else zero.
1902 */
1903int
1904qla2xxx_dif_start_scsi_mq(srb_t *sp)
1905{
1906 int nseg;
1907 unsigned long flags;
1908 uint32_t *clr_ptr;
1909 uint32_t index;
1910 uint32_t handle;
1911 uint16_t cnt;
1912 uint16_t req_cnt = 0;
1913 uint16_t tot_dsds;
1914 uint16_t tot_prot_dsds;
1915 uint16_t fw_prot_opts = 0;
1916 struct req_que *req = NULL;
1917 struct rsp_que *rsp = NULL;
1918 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1919 struct scsi_qla_host *vha = sp->fcport->vha;
1920 struct qla_hw_data *ha = vha->hw;
1921 struct cmd_type_crc_2 *cmd_pkt;
1922 uint32_t status = 0;
1923 struct qla_qpair *qpair = sp->qpair;
1924
1925#define QDSS_GOT_Q_SPACE BIT_0
1926
1927 /* Check for host side state */
1928 if (!qpair->online) {
1929 cmd->result = DID_NO_CONNECT << 16;
1930 return QLA_INTERFACE_ERROR;
1931 }
1932
1933 if (!qpair->difdix_supported &&
1934 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1935 cmd->result = DID_NO_CONNECT << 16;
1936 return QLA_INTERFACE_ERROR;
1937 }
1938
1939 /* Only process protection or >16 cdb in this routine */
1940 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1941 if (cmd->cmd_len <= 16)
1942 return qla2xxx_start_scsi_mq(sp);
1943 }
1944
1945 /* Setup qpair pointers */
1946 rsp = qpair->rsp;
1947 req = qpair->req;
1948
1949 /* So we know we haven't pci_map'ed anything yet */
1950 tot_dsds = 0;
1951
1952 /* Send marker if required */
1953 if (vha->marker_needed != 0) {
1954 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1955 QLA_SUCCESS)
1956 return QLA_FUNCTION_FAILED;
1957 vha->marker_needed = 0;
1958 }
1959
1960 /* Acquire ring specific lock */
1961 spin_lock_irqsave(&qpair->qp_lock, flags);
1962
1963 /* Check for room in outstanding command list. */
1964 handle = req->current_outstanding_cmd;
1965 for (index = 1; index < req->num_outstanding_cmds; index++) {
1966 handle++;
1967 if (handle == req->num_outstanding_cmds)
1968 handle = 1;
1969 if (!req->outstanding_cmds[handle])
1970 break;
1971 }
1972
1973 if (index == req->num_outstanding_cmds)
1974 goto queuing_error;
1975
1976 /* Compute number of required data segments */
1977 /* Map the sg table so we have an accurate count of sg entries needed */
1978 if (scsi_sg_count(cmd)) {
1979 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1980 scsi_sg_count(cmd), cmd->sc_data_direction);
1981 if (unlikely(!nseg))
1982 goto queuing_error;
1983 else
1984 sp->flags |= SRB_DMA_VALID;
1985
1986 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1987 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1988 struct qla2_sgx sgx;
1989 uint32_t partial;
1990
1991 memset(&sgx, 0, sizeof(struct qla2_sgx));
1992 sgx.tot_bytes = scsi_bufflen(cmd);
1993 sgx.cur_sg = scsi_sglist(cmd);
1994 sgx.sp = sp;
1995
1996 nseg = 0;
1997 while (qla24xx_get_one_block_sg(
1998 cmd->device->sector_size, &sgx, &partial))
1999 nseg++;
2000 }
2001 } else
2002 nseg = 0;
2003
2004 /* number of required data segments */
2005 tot_dsds = nseg;
2006
2007 /* Compute number of required protection segments */
2008 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2009 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2010 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2011 if (unlikely(!nseg))
2012 goto queuing_error;
2013 else
2014 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2015
2016 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2017 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2018 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2019 }
2020 } else {
2021 nseg = 0;
2022 }
2023
2024 req_cnt = 1;
2025 /* Total Data and protection sg segment(s) */
2026 tot_prot_dsds = nseg;
2027 tot_dsds += nseg;
2028 if (req->cnt < (req_cnt + 2)) {
2029 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2030 RD_REG_DWORD_RELAXED(req->req_q_out);
2031 if (req->ring_index < cnt)
2032 req->cnt = cnt - req->ring_index;
2033 else
2034 req->cnt = req->length -
2035 (req->ring_index - cnt);
2036 if (req->cnt < (req_cnt + 2))
2037 goto queuing_error;
2038 }
2039
2040 status |= QDSS_GOT_Q_SPACE;
2041
2042 /* Build header part of command packet (excluding the OPCODE). */
2043 req->current_outstanding_cmd = handle;
2044 req->outstanding_cmds[handle] = sp;
2045 sp->handle = handle;
2046 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2047 req->cnt -= req_cnt;
2048
2049 /* Fill-in common area */
2050 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2051 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2052
2053 clr_ptr = (uint32_t *)cmd_pkt + 2;
2054 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2055
2056 /* Set NPORT-ID and LUN number*/
2057 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2058 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2059 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2060 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
68ca949c 2061
d7459527
MH
2062 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2063 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2064
2065 /* Total Data and protection segment(s) */
2066 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2067
2068 /* Build IOCB segments and adjust for data protection segments */
2069 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2070 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2071 QLA_SUCCESS)
2072 goto queuing_error;
2073
2074 cmd_pkt->entry_count = (uint8_t)req_cnt;
2075 cmd_pkt->timeout = cpu_to_le16(0);
2076 wmb();
2077
2078 /* Adjust ring index. */
2079 req->ring_index++;
2080 if (req->ring_index == req->length) {
2081 req->ring_index = 0;
2082 req->ring_ptr = req->ring;
2083 } else
2084 req->ring_ptr++;
2085
2086 /* Set chip new ring index. */
2087 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2088
2089 /* Manage unprocessed RIO/ZIO commands in response queue. */
2090 if (vha->flags.process_response_queue &&
2091 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2092 qla24xx_process_response_queue(vha, rsp);
2093
2094 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2095
2096 return QLA_SUCCESS;
2097
2098queuing_error:
2099 if (status & QDSS_GOT_Q_SPACE) {
2100 req->outstanding_cmds[handle] = NULL;
2101 req->cnt += req_cnt;
2102 }
2103 /* Cleanup will be performed by the caller (queuecommand) */
2104
2105 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2106 return QLA_FUNCTION_FAILED;
68ca949c 2107}
ac280b67
AV
2108
2109/* Generic Control-SRB manipulation functions. */
b6a029e1
AE
2110
2111/* hardware_lock assumed to be held. */
b6a029e1 2112
d94d10e7 2113void *
82de802a 2114__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
ac280b67 2115{
82de802a 2116 scsi_qla_host_t *vha = qpair->vha;
ac280b67 2117 struct qla_hw_data *ha = vha->hw;
82de802a 2118 struct req_que *req = qpair->req;
118e2ef9 2119 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
ac280b67
AV
2120 uint32_t index, handle;
2121 request_t *pkt;
2122 uint16_t cnt, req_cnt;
2123
2124 pkt = NULL;
2125 req_cnt = 1;
d94d10e7
GM
2126 handle = 0;
2127
2128 if (!sp)
2129 goto skip_cmd_array;
ac280b67
AV
2130
2131 /* Check for room in outstanding command list. */
2132 handle = req->current_outstanding_cmd;
4b4f30cc 2133 for (index = 1; index < req->num_outstanding_cmds; index++) {
ac280b67 2134 handle++;
8d93f550 2135 if (handle == req->num_outstanding_cmds)
ac280b67
AV
2136 handle = 1;
2137 if (!req->outstanding_cmds[handle])
2138 break;
2139 }
8d93f550 2140 if (index == req->num_outstanding_cmds) {
7c3df132 2141 ql_log(ql_log_warn, vha, 0x700b,
d6a03581 2142 "No room on outstanding cmd array.\n");
ac280b67 2143 goto queuing_error;
7c3df132 2144 }
ac280b67 2145
d94d10e7
GM
2146 /* Prep command array. */
2147 req->current_outstanding_cmd = handle;
2148 req->outstanding_cmds[handle] = sp;
2149 sp->handle = handle;
2150
5780790e 2151 /* Adjust entry-counts as needed. */
9ba56b95
GM
2152 if (sp->type != SRB_SCSI_CMD)
2153 req_cnt = sp->iocbs;
5780790e 2154
d94d10e7 2155skip_cmd_array:
ac280b67 2156 /* Check for room on request queue. */
94007037 2157 if (req->cnt < req_cnt + 2) {
f73cb695 2158 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
ac280b67 2159 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
7ec0effd 2160 else if (IS_P3P_TYPE(ha))
d94d10e7 2161 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
ac280b67
AV
2162 else if (IS_FWI2_CAPABLE(ha))
2163 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
8ae6d9c7
GM
2164 else if (IS_QLAFX00(ha))
2165 cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
ac280b67
AV
2166 else
2167 cnt = qla2x00_debounce_register(
2168 ISP_REQ_Q_OUT(ha, &reg->isp));
2169
2170 if (req->ring_index < cnt)
2171 req->cnt = cnt - req->ring_index;
2172 else
2173 req->cnt = req->length -
2174 (req->ring_index - cnt);
2175 }
94007037 2176 if (req->cnt < req_cnt + 2)
ac280b67
AV
2177 goto queuing_error;
2178
2179 /* Prep packet */
ac280b67 2180 req->cnt -= req_cnt;
ac280b67
AV
2181 pkt = req->ring_ptr;
2182 memset(pkt, 0, REQUEST_ENTRY_SIZE);
8ae6d9c7 2183 if (IS_QLAFX00(ha)) {
1f8deefe
SK
2184 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2185 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
8ae6d9c7
GM
2186 } else {
2187 pkt->entry_count = req_cnt;
2188 pkt->handle = handle;
2189 }
ac280b67
AV
2190
2191queuing_error:
60a9eadb 2192 qpair->tgt_counters.num_alloc_iocb_failed++;
ac280b67
AV
2193 return pkt;
2194}
2195
82de802a
QT
2196void *
2197qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2198{
2199 scsi_qla_host_t *vha = qpair->vha;
2200
2201 if (qla2x00_reset_active(vha))
2202 return NULL;
2203
2204 return __qla2x00_alloc_iocbs(qpair, sp);
2205}
2206
2207void *
2208qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2209{
2210 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2211}
2212
a5d42f4c
DG
2213static void
2214qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2215{
2216 struct srb_iocb *lio = &sp->u.iocb_cmd;
2217
2218 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2219 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2220 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2221 logio->control_flags |= LCF_NVME_PRLI;
2222
2223 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2224 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2225 logio->port_id[1] = sp->fcport->d_id.b.area;
2226 logio->port_id[2] = sp->fcport->d_id.b.domain;
2227 logio->vp_index = sp->vha->vp_idx;
2228}
2229
ac280b67
AV
2230static void
2231qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2232{
9ba56b95 2233 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67
AV
2234
2235 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2236 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
a5d42f4c 2237
4916392b 2238 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
ac280b67 2239 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
4916392b 2240 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
ac280b67
AV
2241 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2242 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2243 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2244 logio->port_id[1] = sp->fcport->d_id.b.area;
2245 logio->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2246 logio->vp_index = sp->vha->vp_idx;
ac280b67
AV
2247}
2248
2249static void
2250qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2251{
25ff6af1 2252 struct qla_hw_data *ha = sp->vha->hw;
9ba56b95 2253 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67
AV
2254 uint16_t opts;
2255
b963752f 2256 mbx->entry_type = MBX_IOCB_TYPE;
ac280b67
AV
2257 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2258 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
4916392b
MI
2259 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2260 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
ac280b67
AV
2261 if (HAS_EXTENDED_IDS(ha)) {
2262 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2263 mbx->mb10 = cpu_to_le16(opts);
2264 } else {
2265 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2266 }
2267 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2268 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2269 sp->fcport->d_id.b.al_pa);
25ff6af1 2270 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
ac280b67
AV
2271}
2272
2273static void
2274qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2275{
2276 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2277 logio->control_flags =
2278 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
726b8548 2279 if (!sp->fcport->se_sess ||
5d964837 2280 !sp->fcport->keep_nport_handle)
a6ca8878 2281 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
ac280b67
AV
2282 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2283 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2284 logio->port_id[1] = sp->fcport->d_id.b.area;
2285 logio->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2286 logio->vp_index = sp->vha->vp_idx;
ac280b67
AV
2287}
2288
2289static void
2290qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2291{
25ff6af1 2292 struct qla_hw_data *ha = sp->vha->hw;
ac280b67 2293
b963752f 2294 mbx->entry_type = MBX_IOCB_TYPE;
ac280b67
AV
2295 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2296 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2297 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2298 cpu_to_le16(sp->fcport->loop_id):
2299 cpu_to_le16(sp->fcport->loop_id << 8);
2300 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2301 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2302 sp->fcport->d_id.b.al_pa);
25ff6af1 2303 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
ac280b67
AV
2304 /* Implicit: mbx->mbx10 = 0. */
2305}
2306
5ff1d584
AV
2307static void
2308qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2309{
2310 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2311 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2312 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
25ff6af1 2313 logio->vp_index = sp->vha->vp_idx;
5ff1d584
AV
2314}
2315
2316static void
2317qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2318{
25ff6af1 2319 struct qla_hw_data *ha = sp->vha->hw;
5ff1d584
AV
2320
2321 mbx->entry_type = MBX_IOCB_TYPE;
2322 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2323 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2324 if (HAS_EXTENDED_IDS(ha)) {
2325 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2326 mbx->mb10 = cpu_to_le16(BIT_0);
2327 } else {
2328 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2329 }
2330 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2331 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2332 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2333 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
25ff6af1 2334 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
5ff1d584
AV
2335}
2336
3822263e
MI
2337static void
2338qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2339{
2340 uint32_t flags;
9cb78c16 2341 uint64_t lun;
3822263e
MI
2342 struct fc_port *fcport = sp->fcport;
2343 scsi_qla_host_t *vha = fcport->vha;
2344 struct qla_hw_data *ha = vha->hw;
9ba56b95 2345 struct srb_iocb *iocb = &sp->u.iocb_cmd;
3822263e
MI
2346 struct req_que *req = vha->req;
2347
2348 flags = iocb->u.tmf.flags;
2349 lun = iocb->u.tmf.lun;
2350
2351 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2352 tsk->entry_count = 1;
2353 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2354 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2355 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2356 tsk->control_flags = cpu_to_le32(flags);
2357 tsk->port_id[0] = fcport->d_id.b.al_pa;
2358 tsk->port_id[1] = fcport->d_id.b.area;
2359 tsk->port_id[2] = fcport->d_id.b.domain;
c6d39e23 2360 tsk->vp_index = fcport->vha->vp_idx;
3822263e
MI
2361
2362 if (flags == TCF_LUN_RESET) {
2363 int_to_scsilun(lun, &tsk->lun);
2364 host_to_fcp_swap((uint8_t *)&tsk->lun,
2365 sizeof(tsk->lun));
2366 }
2367}
2368
6eb54715 2369static void
25ff6af1 2370qla2x00_els_dcmd_sp_free(void *data)
6eb54715 2371{
25ff6af1 2372 srb_t *sp = data;
6eb54715
HM
2373 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2374
2375 kfree(sp->fcport);
2376
2377 if (elsio->u.els_logo.els_logo_pyld)
25ff6af1 2378 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
6eb54715
HM
2379 elsio->u.els_logo.els_logo_pyld,
2380 elsio->u.els_logo.els_logo_pyld_dma);
2381
2382 del_timer(&elsio->timer);
25ff6af1 2383 qla2x00_rel_sp(sp);
6eb54715
HM
2384}
2385
2386static void
2387qla2x00_els_dcmd_iocb_timeout(void *data)
2388{
25ff6af1 2389 srb_t *sp = data;
6eb54715 2390 fc_port_t *fcport = sp->fcport;
25ff6af1 2391 struct scsi_qla_host *vha = sp->vha;
6eb54715 2392 struct qla_hw_data *ha = vha->hw;
25ff6af1 2393 struct srb_iocb *lio = &sp->u.iocb_cmd;
6eb54715
HM
2394 unsigned long flags = 0;
2395
2396 ql_dbg(ql_dbg_io, vha, 0x3069,
2397 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2398 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2399 fcport->d_id.b.al_pa);
2400
2401 /* Abort the exchange */
2402 spin_lock_irqsave(&ha->hardware_lock, flags);
2403 if (ha->isp_ops->abort_command(sp)) {
2404 ql_dbg(ql_dbg_io, vha, 0x3070,
2405 "mbx abort_command failed.\n");
2406 } else {
2407 ql_dbg(ql_dbg_io, vha, 0x3071,
2408 "mbx abort_command success.\n");
2409 }
2410 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2411
2412 complete(&lio->u.els_logo.comp);
2413}
2414
2415static void
25ff6af1 2416qla2x00_els_dcmd_sp_done(void *ptr, int res)
6eb54715 2417{
25ff6af1 2418 srb_t *sp = ptr;
6eb54715
HM
2419 fc_port_t *fcport = sp->fcport;
2420 struct srb_iocb *lio = &sp->u.iocb_cmd;
25ff6af1 2421 struct scsi_qla_host *vha = sp->vha;
6eb54715
HM
2422
2423 ql_dbg(ql_dbg_io, vha, 0x3072,
2424 "%s hdl=%x, portid=%02x%02x%02x done\n",
2425 sp->name, sp->handle, fcport->d_id.b.domain,
2426 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2427
2428 complete(&lio->u.els_logo.comp);
2429}
2430
2431int
2432qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2433 port_id_t remote_did)
2434{
2435 srb_t *sp;
2436 fc_port_t *fcport = NULL;
2437 struct srb_iocb *elsio = NULL;
2438 struct qla_hw_data *ha = vha->hw;
2439 struct els_logo_payload logo_pyld;
2440 int rval = QLA_SUCCESS;
2441
2442 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2443 if (!fcport) {
2444 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2445 return -ENOMEM;
2446 }
2447
2448 /* Alloc SRB structure */
2449 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2450 if (!sp) {
2451 kfree(fcport);
2452 ql_log(ql_log_info, vha, 0x70e6,
2453 "SRB allocation failed\n");
2454 return -ENOMEM;
2455 }
2456
2457 elsio = &sp->u.iocb_cmd;
2458 fcport->loop_id = 0xFFFF;
2459 fcport->d_id.b.domain = remote_did.b.domain;
2460 fcport->d_id.b.area = remote_did.b.area;
2461 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2462
2463 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2464 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2465
2466 sp->type = SRB_ELS_DCMD;
2467 sp->name = "ELS_DCMD";
2468 sp->fcport = fcport;
2469 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2470 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2471 sp->done = qla2x00_els_dcmd_sp_done;
2472 sp->free = qla2x00_els_dcmd_sp_free;
2473
2474 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2475 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2476 GFP_KERNEL);
2477
2478 if (!elsio->u.els_logo.els_logo_pyld) {
25ff6af1 2479 sp->free(sp);
6eb54715
HM
2480 return QLA_FUNCTION_FAILED;
2481 }
2482
2483 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2484
2485 elsio->u.els_logo.els_cmd = els_opcode;
2486 logo_pyld.opcode = els_opcode;
2487 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2488 logo_pyld.s_id[1] = vha->d_id.b.area;
2489 logo_pyld.s_id[2] = vha->d_id.b.domain;
2490 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2491 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2492
2493 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2494 sizeof(struct els_logo_payload));
2495
2496 rval = qla2x00_start_sp(sp);
2497 if (rval != QLA_SUCCESS) {
25ff6af1 2498 sp->free(sp);
6eb54715
HM
2499 return QLA_FUNCTION_FAILED;
2500 }
2501
2502 ql_dbg(ql_dbg_io, vha, 0x3074,
2503 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2504 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2505 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2506
2507 wait_for_completion(&elsio->u.els_logo.comp);
2508
25ff6af1 2509 sp->free(sp);
6eb54715
HM
2510 return rval;
2511}
2512
2513static void
2514qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2515{
25ff6af1 2516 scsi_qla_host_t *vha = sp->vha;
6eb54715
HM
2517 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2518
2519 els_iocb->entry_type = ELS_IOCB_TYPE;
2520 els_iocb->entry_count = 1;
2521 els_iocb->sys_define = 0;
2522 els_iocb->entry_status = 0;
2523 els_iocb->handle = sp->handle;
2524 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2525 els_iocb->tx_dsd_count = 1;
2526 els_iocb->vp_index = vha->vp_idx;
2527 els_iocb->sof_type = EST_SOFI3;
2528 els_iocb->rx_dsd_count = 0;
2529 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2530
2531 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2532 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2533 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2534 els_iocb->control_flags = 0;
2535
2536 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2537 els_iocb->tx_address[0] =
2538 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2539 els_iocb->tx_address[1] =
2540 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2541 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2542
2543 els_iocb->rx_byte_count = 0;
2544 els_iocb->rx_address[0] = 0;
2545 els_iocb->rx_address[1] = 0;
2546 els_iocb->rx_len = 0;
2547
25ff6af1 2548 sp->vha->qla_stats.control_requests++;
6eb54715
HM
2549}
2550
9a069e19
GM
2551static void
2552qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2553{
75cc8cfc 2554 struct bsg_job *bsg_job = sp->u.bsg_job;
01e0e15c 2555 struct fc_bsg_request *bsg_request = bsg_job->request;
9a069e19
GM
2556
2557 els_iocb->entry_type = ELS_IOCB_TYPE;
2558 els_iocb->entry_count = 1;
2559 els_iocb->sys_define = 0;
2560 els_iocb->entry_status = 0;
2561 els_iocb->handle = sp->handle;
2562 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
ad950360 2563 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
25ff6af1 2564 els_iocb->vp_index = sp->vha->vp_idx;
9a069e19 2565 els_iocb->sof_type = EST_SOFI3;
ad950360 2566 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
9a069e19 2567
4916392b 2568 els_iocb->opcode =
9ba56b95 2569 sp->type == SRB_ELS_CMD_RPT ?
01e0e15c
JT
2570 bsg_request->rqst_data.r_els.els_code :
2571 bsg_request->rqst_data.h_els.command_code;
9a069e19
GM
2572 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2573 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2574 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2575 els_iocb->control_flags = 0;
2576 els_iocb->rx_byte_count =
2577 cpu_to_le32(bsg_job->reply_payload.payload_len);
2578 els_iocb->tx_byte_count =
2579 cpu_to_le32(bsg_job->request_payload.payload_len);
2580
2581 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2582 (bsg_job->request_payload.sg_list)));
2583 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2584 (bsg_job->request_payload.sg_list)));
2585 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2586 (bsg_job->request_payload.sg_list));
2587
2588 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2589 (bsg_job->reply_payload.sg_list)));
2590 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2591 (bsg_job->reply_payload.sg_list)));
2592 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2593 (bsg_job->reply_payload.sg_list));
fabbb8df 2594
25ff6af1 2595 sp->vha->qla_stats.control_requests++;
9a069e19
GM
2596}
2597
9bc4f4fb
HZ
2598static void
2599qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2600{
2601 uint16_t avail_dsds;
2602 uint32_t *cur_dsd;
2603 struct scatterlist *sg;
2604 int index;
2605 uint16_t tot_dsds;
25ff6af1 2606 scsi_qla_host_t *vha = sp->vha;
9bc4f4fb 2607 struct qla_hw_data *ha = vha->hw;
75cc8cfc 2608 struct bsg_job *bsg_job = sp->u.bsg_job;
9bc4f4fb 2609 int loop_iterartion = 0;
9bc4f4fb
HZ
2610 int entry_count = 1;
2611
2612 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2613 ct_iocb->entry_type = CT_IOCB_TYPE;
2614 ct_iocb->entry_status = 0;
2615 ct_iocb->handle1 = sp->handle;
2616 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
ad950360
BVA
2617 ct_iocb->status = cpu_to_le16(0);
2618 ct_iocb->control_flags = cpu_to_le16(0);
9bc4f4fb
HZ
2619 ct_iocb->timeout = 0;
2620 ct_iocb->cmd_dsd_count =
ad950360 2621 cpu_to_le16(bsg_job->request_payload.sg_cnt);
9bc4f4fb 2622 ct_iocb->total_dsd_count =
ad950360 2623 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
9bc4f4fb
HZ
2624 ct_iocb->req_bytecount =
2625 cpu_to_le32(bsg_job->request_payload.payload_len);
2626 ct_iocb->rsp_bytecount =
2627 cpu_to_le32(bsg_job->reply_payload.payload_len);
2628
2629 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2630 (bsg_job->request_payload.sg_list)));
2631 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2632 (bsg_job->request_payload.sg_list)));
2633 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2634
2635 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2636 (bsg_job->reply_payload.sg_list)));
2637 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2638 (bsg_job->reply_payload.sg_list)));
2639 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2640
2641 avail_dsds = 1;
2642 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2643 index = 0;
2644 tot_dsds = bsg_job->reply_payload.sg_cnt;
2645
2646 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2647 dma_addr_t sle_dma;
2648 cont_a64_entry_t *cont_pkt;
2649
2650 /* Allocate additional continuation packets? */
2651 if (avail_dsds == 0) {
2652 /*
2653 * Five DSDs are available in the Cont.
2654 * Type 1 IOCB.
2655 */
0d2aa38e
GM
2656 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2657 vha->hw->req_q_map[0]);
9bc4f4fb
HZ
2658 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2659 avail_dsds = 5;
9bc4f4fb
HZ
2660 entry_count++;
2661 }
2662
2663 sle_dma = sg_dma_address(sg);
2664 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2665 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2666 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2667 loop_iterartion++;
2668 avail_dsds--;
2669 }
2670 ct_iocb->entry_count = entry_count;
fabbb8df 2671
25ff6af1 2672 sp->vha->qla_stats.control_requests++;
9bc4f4fb
HZ
2673}
2674
9a069e19
GM
2675static void
2676qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2677{
2678 uint16_t avail_dsds;
2679 uint32_t *cur_dsd;
2680 struct scatterlist *sg;
2681 int index;
2682 uint16_t tot_dsds;
25ff6af1 2683 scsi_qla_host_t *vha = sp->vha;
0d2aa38e 2684 struct qla_hw_data *ha = vha->hw;
75cc8cfc 2685 struct bsg_job *bsg_job = sp->u.bsg_job;
9a069e19 2686 int loop_iterartion = 0;
9a069e19
GM
2687 int entry_count = 1;
2688
2689 ct_iocb->entry_type = CT_IOCB_TYPE;
2690 ct_iocb->entry_status = 0;
2691 ct_iocb->sys_define = 0;
2692 ct_iocb->handle = sp->handle;
2693
2694 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
25ff6af1 2695 ct_iocb->vp_index = sp->vha->vp_idx;
ad950360 2696 ct_iocb->comp_status = cpu_to_le16(0);
9a069e19
GM
2697
2698 ct_iocb->cmd_dsd_count =
ad950360 2699 cpu_to_le16(bsg_job->request_payload.sg_cnt);
9a069e19
GM
2700 ct_iocb->timeout = 0;
2701 ct_iocb->rsp_dsd_count =
ad950360 2702 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
9a069e19
GM
2703 ct_iocb->rsp_byte_count =
2704 cpu_to_le32(bsg_job->reply_payload.payload_len);
2705 ct_iocb->cmd_byte_count =
2706 cpu_to_le32(bsg_job->request_payload.payload_len);
2707 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2708 (bsg_job->request_payload.sg_list)));
2709 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2710 (bsg_job->request_payload.sg_list)));
2711 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2712 (bsg_job->request_payload.sg_list));
2713
2714 avail_dsds = 1;
2715 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2716 index = 0;
2717 tot_dsds = bsg_job->reply_payload.sg_cnt;
2718
2719 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2720 dma_addr_t sle_dma;
2721 cont_a64_entry_t *cont_pkt;
2722
2723 /* Allocate additional continuation packets? */
2724 if (avail_dsds == 0) {
2725 /*
2726 * Five DSDs are available in the Cont.
2727 * Type 1 IOCB.
2728 */
0d2aa38e
GM
2729 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2730 ha->req_q_map[0]);
9a069e19
GM
2731 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2732 avail_dsds = 5;
9a069e19
GM
2733 entry_count++;
2734 }
2735
2736 sle_dma = sg_dma_address(sg);
2737 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2738 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2739 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2740 loop_iterartion++;
2741 avail_dsds--;
2742 }
2743 ct_iocb->entry_count = entry_count;
2744}
2745
5162cf0c
GM
2746/*
2747 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2748 * @sp: command to send to the ISP
2749 *
2750 * Returns non-zero if a failure occurred, else zero.
2751 */
2752int
2753qla82xx_start_scsi(srb_t *sp)
2754{
52c82823 2755 int nseg;
5162cf0c
GM
2756 unsigned long flags;
2757 struct scsi_cmnd *cmd;
2758 uint32_t *clr_ptr;
2759 uint32_t index;
2760 uint32_t handle;
2761 uint16_t cnt;
2762 uint16_t req_cnt;
2763 uint16_t tot_dsds;
2764 struct device_reg_82xx __iomem *reg;
2765 uint32_t dbval;
2766 uint32_t *fcp_dl;
2767 uint8_t additional_cdb_len;
2768 struct ct6_dsd *ctx;
25ff6af1 2769 struct scsi_qla_host *vha = sp->vha;
5162cf0c
GM
2770 struct qla_hw_data *ha = vha->hw;
2771 struct req_que *req = NULL;
2772 struct rsp_que *rsp = NULL;
5162cf0c
GM
2773
2774 /* Setup device pointers. */
5162cf0c 2775 reg = &ha->iobase->isp82;
9ba56b95 2776 cmd = GET_CMD_SP(sp);
5162cf0c
GM
2777 req = vha->req;
2778 rsp = ha->rsp_q_map[0];
2779
2780 /* So we know we haven't pci_map'ed anything yet */
2781 tot_dsds = 0;
2782
2783 dbval = 0x04 | (ha->portnum << 5);
2784
2785 /* Send marker if required */
2786 if (vha->marker_needed != 0) {
2787 if (qla2x00_marker(vha, req,
2788 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2789 ql_log(ql_log_warn, vha, 0x300c,
2790 "qla2x00_marker failed for cmd=%p.\n", cmd);
2791 return QLA_FUNCTION_FAILED;
2792 }
2793 vha->marker_needed = 0;
2794 }
2795
2796 /* Acquire ring specific lock */
2797 spin_lock_irqsave(&ha->hardware_lock, flags);
2798
2799 /* Check for room in outstanding command list. */
2800 handle = req->current_outstanding_cmd;
8d93f550 2801 for (index = 1; index < req->num_outstanding_cmds; index++) {
5162cf0c 2802 handle++;
8d93f550 2803 if (handle == req->num_outstanding_cmds)
5162cf0c
GM
2804 handle = 1;
2805 if (!req->outstanding_cmds[handle])
2806 break;
2807 }
8d93f550 2808 if (index == req->num_outstanding_cmds)
5162cf0c
GM
2809 goto queuing_error;
2810
2811 /* Map the sg table so we have an accurate count of sg entries needed */
2812 if (scsi_sg_count(cmd)) {
2813 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2814 scsi_sg_count(cmd), cmd->sc_data_direction);
2815 if (unlikely(!nseg))
2816 goto queuing_error;
2817 } else
2818 nseg = 0;
2819
2820 tot_dsds = nseg;
2821
2822 if (tot_dsds > ql2xshiftctondsd) {
2823 struct cmd_type_6 *cmd_pkt;
2824 uint16_t more_dsd_lists = 0;
2825 struct dsd_dma *dsd_ptr;
2826 uint16_t i;
2827
2828 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2829 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2830 ql_dbg(ql_dbg_io, vha, 0x300d,
2831 "Num of DSD list %d is than %d for cmd=%p.\n",
2832 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2833 cmd);
2834 goto queuing_error;
2835 }
2836
2837 if (more_dsd_lists <= ha->gbl_dsd_avail)
2838 goto sufficient_dsds;
2839 else
2840 more_dsd_lists -= ha->gbl_dsd_avail;
2841
2842 for (i = 0; i < more_dsd_lists; i++) {
2843 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2844 if (!dsd_ptr) {
2845 ql_log(ql_log_fatal, vha, 0x300e,
2846 "Failed to allocate memory for dsd_dma "
2847 "for cmd=%p.\n", cmd);
2848 goto queuing_error;
2849 }
2850
2851 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2852 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2853 if (!dsd_ptr->dsd_addr) {
2854 kfree(dsd_ptr);
2855 ql_log(ql_log_fatal, vha, 0x300f,
2856 "Failed to allocate memory for dsd_addr "
2857 "for cmd=%p.\n", cmd);
2858 goto queuing_error;
2859 }
2860 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2861 ha->gbl_dsd_avail++;
2862 }
2863
2864sufficient_dsds:
2865 req_cnt = 1;
2866
2867 if (req->cnt < (req_cnt + 2)) {
2868 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2869 &reg->req_q_out[0]);
2870 if (req->ring_index < cnt)
2871 req->cnt = cnt - req->ring_index;
2872 else
2873 req->cnt = req->length -
2874 (req->ring_index - cnt);
a6eb3c9f
CL
2875 if (req->cnt < (req_cnt + 2))
2876 goto queuing_error;
5162cf0c
GM
2877 }
2878
9ba56b95
GM
2879 ctx = sp->u.scmd.ctx =
2880 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2881 if (!ctx) {
5162cf0c
GM
2882 ql_log(ql_log_fatal, vha, 0x3010,
2883 "Failed to allocate ctx for cmd=%p.\n", cmd);
2884 goto queuing_error;
2885 }
9ba56b95 2886
5162cf0c
GM
2887 memset(ctx, 0, sizeof(struct ct6_dsd));
2888 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2889 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2890 if (!ctx->fcp_cmnd) {
2891 ql_log(ql_log_fatal, vha, 0x3011,
2892 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
841f97bf 2893 goto queuing_error;
5162cf0c
GM
2894 }
2895
2896 /* Initialize the DSD list and dma handle */
2897 INIT_LIST_HEAD(&ctx->dsd_list);
2898 ctx->dsd_use_cnt = 0;
2899
2900 if (cmd->cmd_len > 16) {
2901 additional_cdb_len = cmd->cmd_len - 16;
2902 if ((cmd->cmd_len % 4) != 0) {
2903 /* SCSI command bigger than 16 bytes must be
2904 * multiple of 4
2905 */
2906 ql_log(ql_log_warn, vha, 0x3012,
2907 "scsi cmd len %d not multiple of 4 "
2908 "for cmd=%p.\n", cmd->cmd_len, cmd);
2909 goto queuing_error_fcp_cmnd;
2910 }
2911 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2912 } else {
2913 additional_cdb_len = 0;
2914 ctx->fcp_cmnd_len = 12 + 16 + 4;
2915 }
2916
2917 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2918 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2919
2920 /* Zero out remaining portion of packet. */
2921 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2922 clr_ptr = (uint32_t *)cmd_pkt + 2;
2923 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2924 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2925
2926 /* Set NPORT-ID and LUN number*/
2927 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2928 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2929 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2930 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2931 cmd_pkt->vp_index = sp->vha->vp_idx;
5162cf0c
GM
2932
2933 /* Build IOCB segments */
2934 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2935 goto queuing_error_fcp_cmnd;
2936
9ba56b95 2937 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
5162cf0c
GM
2938 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2939
2940 /* build FCP_CMND IU */
2941 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
9ba56b95 2942 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
5162cf0c
GM
2943 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2944
2945 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2946 ctx->fcp_cmnd->additional_cdb_len |= 1;
2947 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2948 ctx->fcp_cmnd->additional_cdb_len |= 2;
2949
a00f6296
SK
2950 /* Populate the FCP_PRIO. */
2951 if (ha->flags.fcp_prio_enabled)
2952 ctx->fcp_cmnd->task_attribute |=
2953 sp->fcport->fcp_prio << 3;
2954
5162cf0c
GM
2955 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2956
2957 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2958 additional_cdb_len);
2959 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2960
2961 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2962 cmd_pkt->fcp_cmnd_dseg_address[0] =
2963 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2964 cmd_pkt->fcp_cmnd_dseg_address[1] =
2965 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2966
2967 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2968 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2969 /* Set total data segment count. */
2970 cmd_pkt->entry_count = (uint8_t)req_cnt;
2971 /* Specify response queue number where
2972 * completion should happen
2973 */
2974 cmd_pkt->entry_status = (uint8_t) rsp->id;
2975 } else {
2976 struct cmd_type_7 *cmd_pkt;
2977 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2978 if (req->cnt < (req_cnt + 2)) {
2979 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2980 &reg->req_q_out[0]);
2981 if (req->ring_index < cnt)
2982 req->cnt = cnt - req->ring_index;
2983 else
2984 req->cnt = req->length -
2985 (req->ring_index - cnt);
2986 }
2987 if (req->cnt < (req_cnt + 2))
2988 goto queuing_error;
2989
2990 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2991 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2992
2993 /* Zero out remaining portion of packet. */
2994 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2995 clr_ptr = (uint32_t *)cmd_pkt + 2;
2996 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2997 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2998
2999 /* Set NPORT-ID and LUN number*/
3000 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3001 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3002 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3003 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 3004 cmd_pkt->vp_index = sp->vha->vp_idx;
5162cf0c 3005
9ba56b95 3006 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
5162cf0c 3007 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
9ba56b95 3008 sizeof(cmd_pkt->lun));
5162cf0c 3009
a00f6296
SK
3010 /* Populate the FCP_PRIO. */
3011 if (ha->flags.fcp_prio_enabled)
3012 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3013
5162cf0c
GM
3014 /* Load SCSI command packet. */
3015 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3016 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3017
3018 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3019
3020 /* Build IOCB segments */
d7459527 3021 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
5162cf0c
GM
3022
3023 /* Set total data segment count. */
3024 cmd_pkt->entry_count = (uint8_t)req_cnt;
3025 /* Specify response queue number where
3026 * completion should happen.
3027 */
3028 cmd_pkt->entry_status = (uint8_t) rsp->id;
3029
3030 }
3031 /* Build command packet. */
3032 req->current_outstanding_cmd = handle;
3033 req->outstanding_cmds[handle] = sp;
3034 sp->handle = handle;
9ba56b95 3035 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
5162cf0c
GM
3036 req->cnt -= req_cnt;
3037 wmb();
3038
3039 /* Adjust ring index. */
3040 req->ring_index++;
3041 if (req->ring_index == req->length) {
3042 req->ring_index = 0;
3043 req->ring_ptr = req->ring;
3044 } else
3045 req->ring_ptr++;
3046
3047 sp->flags |= SRB_DMA_VALID;
3048
3049 /* Set chip new ring index. */
3050 /* write, read and verify logic */
3051 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3052 if (ql2xdbwr)
8dfa4b5a 3053 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
5162cf0c 3054 else {
8dfa4b5a 3055 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
5162cf0c 3056 wmb();
8dfa4b5a
BVA
3057 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3058 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
5162cf0c
GM
3059 wmb();
3060 }
3061 }
3062
3063 /* Manage unprocessed RIO/ZIO commands in response queue. */
3064 if (vha->flags.process_response_queue &&
3065 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3066 qla24xx_process_response_queue(vha, rsp);
3067
3068 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3069 return QLA_SUCCESS;
3070
3071queuing_error_fcp_cmnd:
3072 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3073queuing_error:
3074 if (tot_dsds)
3075 scsi_dma_unmap(cmd);
3076
9ba56b95
GM
3077 if (sp->u.scmd.ctx) {
3078 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3079 sp->u.scmd.ctx = NULL;
5162cf0c
GM
3080 }
3081 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3082
3083 return QLA_FUNCTION_FAILED;
3084}
3085
6d78e557 3086static void
4440e46d
AB
3087qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3088{
3089 struct srb_iocb *aio = &sp->u.iocb_cmd;
25ff6af1 3090 scsi_qla_host_t *vha = sp->vha;
4440e46d
AB
3091 struct req_que *req = vha->req;
3092
3093 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3094 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3095 abt_iocb->entry_count = 1;
3096 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3097 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3098 abt_iocb->handle_to_abort =
3099 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
3100 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3101 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3102 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3103 abt_iocb->vp_index = vha->vp_idx;
3104 abt_iocb->req_que_no = cpu_to_le16(req->id);
3105 /* Send the command to the firmware */
3106 wmb();
3107}
3108
726b8548
QT
3109static void
3110qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3111{
3112 int i, sz;
3113
3114 mbx->entry_type = MBX_IOCB_TYPE;
3115 mbx->handle = sp->handle;
3116 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3117
3118 for (i = 0; i < sz; i++)
3119 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3120}
3121
3122static void
3123qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3124{
3125 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3126 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3127 ct_pkt->handle = sp->handle;
3128}
3129
3130static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3131 struct nack_to_isp *nack)
3132{
3133 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3134
3135 nack->entry_type = NOTIFY_ACK_TYPE;
3136 nack->entry_count = 1;
3137 nack->ox_id = ntfy->ox_id;
3138
3139 nack->u.isp24.handle = sp->handle;
3140 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3141 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3142 nack->u.isp24.flags = ntfy->u.isp24.flags &
3143 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3144 }
3145 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3146 nack->u.isp24.status = ntfy->u.isp24.status;
3147 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3148 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3149 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3150 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3151 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3152 nack->u.isp24.srr_flags = 0;
3153 nack->u.isp24.srr_reject_code = 0;
3154 nack->u.isp24.srr_reject_code_expl = 0;
3155 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3156}
3157
e84067d7
DG
3158/*
3159 * Build NVME LS request
3160 */
3161static int
3162qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3163{
3164 struct srb_iocb *nvme;
3165 int rval = QLA_SUCCESS;
3166
3167 nvme = &sp->u.iocb_cmd;
3168 cmd_pkt->entry_type = PT_LS4_REQUEST;
3169 cmd_pkt->entry_count = 1;
3170 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3171
3172 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3173 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3174 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3175
3176 cmd_pkt->tx_dseg_count = 1;
3177 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3178 cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3179 cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3180 cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3181
3182 cmd_pkt->rx_dseg_count = 1;
3183 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3184 cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
3185 cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3186 cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3187
3188 return rval;
3189}
3190
ac280b67
AV
3191int
3192qla2x00_start_sp(srb_t *sp)
3193{
3194 int rval;
25ff6af1 3195 scsi_qla_host_t *vha = sp->vha;
726b8548 3196 struct qla_hw_data *ha = vha->hw;
ac280b67 3197 void *pkt;
ac280b67
AV
3198 unsigned long flags;
3199
3200 rval = QLA_FUNCTION_FAILED;
3201 spin_lock_irqsave(&ha->hardware_lock, flags);
726b8548 3202 pkt = qla2x00_alloc_iocbs(vha, sp);
7c3df132 3203 if (!pkt) {
726b8548 3204 ql_log(ql_log_warn, vha, 0x700c,
7c3df132 3205 "qla2x00_alloc_iocbs failed.\n");
ac280b67 3206 goto done;
7c3df132 3207 }
ac280b67
AV
3208
3209 rval = QLA_SUCCESS;
9ba56b95 3210 switch (sp->type) {
ac280b67
AV
3211 case SRB_LOGIN_CMD:
3212 IS_FWI2_CAPABLE(ha) ?
5ff1d584 3213 qla24xx_login_iocb(sp, pkt) :
ac280b67
AV
3214 qla2x00_login_iocb(sp, pkt);
3215 break;
a5d42f4c
DG
3216 case SRB_PRLI_CMD:
3217 qla24xx_prli_iocb(sp, pkt);
3218 break;
ac280b67
AV
3219 case SRB_LOGOUT_CMD:
3220 IS_FWI2_CAPABLE(ha) ?
5ff1d584 3221 qla24xx_logout_iocb(sp, pkt) :
ac280b67
AV
3222 qla2x00_logout_iocb(sp, pkt);
3223 break;
9a069e19
GM
3224 case SRB_ELS_CMD_RPT:
3225 case SRB_ELS_CMD_HST:
3226 qla24xx_els_iocb(sp, pkt);
3227 break;
3228 case SRB_CT_CMD:
9bc4f4fb 3229 IS_FWI2_CAPABLE(ha) ?
5780790e
AV
3230 qla24xx_ct_iocb(sp, pkt) :
3231 qla2x00_ct_iocb(sp, pkt);
9a069e19 3232 break;
5ff1d584
AV
3233 case SRB_ADISC_CMD:
3234 IS_FWI2_CAPABLE(ha) ?
3235 qla24xx_adisc_iocb(sp, pkt) :
3236 qla2x00_adisc_iocb(sp, pkt);
3237 break;
3822263e 3238 case SRB_TM_CMD:
8ae6d9c7
GM
3239 IS_QLAFX00(ha) ?
3240 qlafx00_tm_iocb(sp, pkt) :
3241 qla24xx_tm_iocb(sp, pkt);
3242 break;
3243 case SRB_FXIOCB_DCMD:
3244 case SRB_FXIOCB_BCMD:
3245 qlafx00_fxdisc_iocb(sp, pkt);
3246 break;
e84067d7
DG
3247 case SRB_NVME_LS:
3248 qla_nvme_ls(sp, pkt);
3249 break;
8ae6d9c7 3250 case SRB_ABT_CMD:
4440e46d
AB
3251 IS_QLAFX00(ha) ?
3252 qlafx00_abort_iocb(sp, pkt) :
3253 qla24xx_abort_iocb(sp, pkt);
3822263e 3254 break;
6eb54715
HM
3255 case SRB_ELS_DCMD:
3256 qla24xx_els_logo_iocb(sp, pkt);
3257 break;
726b8548
QT
3258 case SRB_CT_PTHRU_CMD:
3259 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3260 break;
3261 case SRB_MB_IOCB:
3262 qla2x00_mb_iocb(sp, pkt);
3263 break;
3264 case SRB_NACK_PLOGI:
3265 case SRB_NACK_PRLI:
3266 case SRB_NACK_LOGO:
3267 qla2x00_send_notify_ack_iocb(sp, pkt);
3268 break;
ac280b67
AV
3269 default:
3270 break;
3271 }
3272
3273 wmb();
726b8548 3274 qla2x00_start_iocbs(vha, ha->req_q_map[0]);
ac280b67
AV
3275done:
3276 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3277 return rval;
3278}
a9b6f722
SK
3279
3280static void
3281qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3282 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3283{
3284 uint16_t avail_dsds;
3285 uint32_t *cur_dsd;
3286 uint32_t req_data_len = 0;
3287 uint32_t rsp_data_len = 0;
3288 struct scatterlist *sg;
3289 int index;
3290 int entry_count = 1;
75cc8cfc 3291 struct bsg_job *bsg_job = sp->u.bsg_job;
a9b6f722
SK
3292
3293 /*Update entry type to indicate bidir command */
3294 *((uint32_t *)(&cmd_pkt->entry_type)) =
ad950360 3295 cpu_to_le32(COMMAND_BIDIRECTIONAL);
a9b6f722
SK
3296
3297 /* Set the transfer direction, in this set both flags
3298 * Also set the BD_WRAP_BACK flag, firmware will take care
3299 * assigning DID=SID for outgoing pkts.
3300 */
3301 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3302 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
ad950360 3303 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
a9b6f722
SK
3304 BD_WRAP_BACK);
3305
3306 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3307 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3308 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3309 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3310
3311 vha->bidi_stats.transfer_bytes += req_data_len;
3312 vha->bidi_stats.io_count++;
3313
fabbb8df
JC
3314 vha->qla_stats.output_bytes += req_data_len;
3315 vha->qla_stats.output_requests++;
3316
a9b6f722
SK
3317 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3318 * are bundled in continuation iocb
3319 */
3320 avail_dsds = 1;
3321 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3322
3323 index = 0;
3324
3325 for_each_sg(bsg_job->request_payload.sg_list, sg,
3326 bsg_job->request_payload.sg_cnt, index) {
3327 dma_addr_t sle_dma;
3328 cont_a64_entry_t *cont_pkt;
3329
3330 /* Allocate additional continuation packets */
3331 if (avail_dsds == 0) {
3332 /* Continuation type 1 IOCB can accomodate
3333 * 5 DSDS
3334 */
3335 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3336 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3337 avail_dsds = 5;
3338 entry_count++;
3339 }
3340 sle_dma = sg_dma_address(sg);
3341 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3342 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3343 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3344 avail_dsds--;
3345 }
3346 /* For read request DSD will always goes to continuation IOCB
3347 * and follow the write DSD. If there is room on the current IOCB
3348 * then it is added to that IOCB else new continuation IOCB is
3349 * allocated.
3350 */
3351 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3352 bsg_job->reply_payload.sg_cnt, index) {
3353 dma_addr_t sle_dma;
3354 cont_a64_entry_t *cont_pkt;
3355
3356 /* Allocate additional continuation packets */
3357 if (avail_dsds == 0) {
3358 /* Continuation type 1 IOCB can accomodate
3359 * 5 DSDS
3360 */
3361 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3362 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3363 avail_dsds = 5;
3364 entry_count++;
3365 }
3366 sle_dma = sg_dma_address(sg);
3367 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3368 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3369 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3370 avail_dsds--;
3371 }
3372 /* This value should be same as number of IOCB required for this cmd */
3373 cmd_pkt->entry_count = entry_count;
3374}
3375
3376int
3377qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3378{
3379
3380 struct qla_hw_data *ha = vha->hw;
3381 unsigned long flags;
3382 uint32_t handle;
3383 uint32_t index;
3384 uint16_t req_cnt;
3385 uint16_t cnt;
3386 uint32_t *clr_ptr;
3387 struct cmd_bidir *cmd_pkt = NULL;
3388 struct rsp_que *rsp;
3389 struct req_que *req;
3390 int rval = EXT_STATUS_OK;
a9b6f722
SK
3391
3392 rval = QLA_SUCCESS;
3393
3394 rsp = ha->rsp_q_map[0];
3395 req = vha->req;
3396
3397 /* Send marker if required */
3398 if (vha->marker_needed != 0) {
3399 if (qla2x00_marker(vha, req,
3400 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3401 return EXT_STATUS_MAILBOX;
3402 vha->marker_needed = 0;
3403 }
3404
3405 /* Acquire ring specific lock */
3406 spin_lock_irqsave(&ha->hardware_lock, flags);
3407
3408 /* Check for room in outstanding command list. */
3409 handle = req->current_outstanding_cmd;
8d93f550 3410 for (index = 1; index < req->num_outstanding_cmds; index++) {
a9b6f722 3411 handle++;
8d2b21db
BVA
3412 if (handle == req->num_outstanding_cmds)
3413 handle = 1;
3414 if (!req->outstanding_cmds[handle])
3415 break;
a9b6f722
SK
3416 }
3417
8d93f550 3418 if (index == req->num_outstanding_cmds) {
a9b6f722
SK
3419 rval = EXT_STATUS_BUSY;
3420 goto queuing_error;
3421 }
3422
3423 /* Calculate number of IOCB required */
3424 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3425
3426 /* Check for room on request queue. */
3427 if (req->cnt < req_cnt + 2) {
7c6300e3
JC
3428 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3429 RD_REG_DWORD_RELAXED(req->req_q_out);
a9b6f722
SK
3430 if (req->ring_index < cnt)
3431 req->cnt = cnt - req->ring_index;
3432 else
3433 req->cnt = req->length -
3434 (req->ring_index - cnt);
3435 }
3436 if (req->cnt < req_cnt + 2) {
3437 rval = EXT_STATUS_BUSY;
3438 goto queuing_error;
3439 }
3440
3441 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3442 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3443
3444 /* Zero out remaining portion of packet. */
3445 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3446 clr_ptr = (uint32_t *)cmd_pkt + 2;
3447 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3448
3449 /* Set NPORT-ID (of vha)*/
3450 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3451 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3452 cmd_pkt->port_id[1] = vha->d_id.b.area;
3453 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3454
3455 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3456 cmd_pkt->entry_status = (uint8_t) rsp->id;
3457 /* Build command packet. */
3458 req->current_outstanding_cmd = handle;
3459 req->outstanding_cmds[handle] = sp;
3460 sp->handle = handle;
3461 req->cnt -= req_cnt;
3462
3463 /* Send the command to the firmware */
3464 wmb();
3465 qla2x00_start_iocbs(vha, req);
3466queuing_error:
3467 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3468 return rval;
3469}