scsi: qla2xxx: Update driver version to 9.01.00.00-k
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_iocb.c
CommitLineData
fa90c54f
AV
1/*
2 * QLogic Fibre Channel HBA Driver
bd21eaf9 3 * Copyright (c) 2003-2014 QLogic Corporation
1da177e4 4 *
fa90c54f
AV
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
1da177e4 7#include "qla_def.h"
2d70c103 8#include "qla_target.h"
1da177e4
LT
9
10#include <linux/blkdev.h>
11#include <linux/delay.h>
12
13#include <scsi/scsi_tcq.h>
14
1da177e4
LT
15/**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @cmd: SCSI command
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21static inline uint16_t
49fd462a 22qla2x00_get_cmd_direction(srb_t *sp)
1da177e4
LT
23{
24 uint16_t cflags;
9ba56b95 25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 26 struct scsi_qla_host *vha = sp->vha;
1da177e4
LT
27
28 cflags = 0;
29
30 /* Set transfer direction */
9ba56b95 31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1da177e4 32 cflags = CF_WRITE;
2be21fa2 33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 34 vha->qla_stats.output_requests++;
9ba56b95 35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1da177e4 36 cflags = CF_READ;
2be21fa2 37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 38 vha->qla_stats.input_requests++;
49fd462a 39 }
1da177e4
LT
40 return (cflags);
41}
42
43/**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
46 *
47 * @dsds: number of data segment decriptors needed
48 *
49 * Returns the number of IOCB entries needed to store @dsds.
50 */
51uint16_t
52qla2x00_calc_iocbs_32(uint16_t dsds)
53{
54 uint16_t iocbs;
55
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
61 }
62 return (iocbs);
63}
64
65/**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
68 *
69 * @dsds: number of data segment decriptors needed
70 *
71 * Returns the number of IOCB entries needed to store @dsds.
72 */
73uint16_t
74qla2x00_calc_iocbs_64(uint16_t dsds)
75{
76 uint16_t iocbs;
77
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
83 }
84 return (iocbs);
85}
86
87/**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89 * @ha: HA context
90 *
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */
93static inline cont_entry_t *
67c2e93a 94qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
1da177e4
LT
95{
96 cont_entry_t *cont_pkt;
67c2e93a 97 struct req_que *req = vha->req;
1da177e4 98 /* Adjust ring index. */
e315cd28
AC
99 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
1da177e4 103 } else {
e315cd28 104 req->ring_ptr++;
1da177e4
LT
105 }
106
e315cd28 107 cont_pkt = (cont_entry_t *)req->ring_ptr;
1da177e4
LT
108
109 /* Load packet defaults. */
ad950360 110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
1da177e4
LT
111
112 return (cont_pkt);
113}
114
115/**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @ha: HA context
118 *
119 * Returns a pointer to the continuation type 1 IOCB packet.
120 */
121static inline cont_a64_entry_t *
0d2aa38e 122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
1da177e4
LT
123{
124 cont_a64_entry_t *cont_pkt;
125
126 /* Adjust ring index. */
e315cd28
AC
127 req->ring_index++;
128 if (req->ring_index == req->length) {
129 req->ring_index = 0;
130 req->ring_ptr = req->ring;
1da177e4 131 } else {
e315cd28 132 req->ring_ptr++;
1da177e4
LT
133 }
134
e315cd28 135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
1da177e4
LT
136
137 /* Load packet defaults. */
8ae6d9c7 138 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
ad950360
BVA
139 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140 cpu_to_le32(CONTINUE_A64_TYPE);
1da177e4
LT
141
142 return (cont_pkt);
143}
144
d7459527 145inline int
bad75002
AE
146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147{
9ba56b95
GM
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 uint8_t guard = scsi_host_get_guard(cmd->device->host);
bad75002 150
bad75002
AE
151 /* We always use DIFF Bundling for best performance */
152 *fw_prot_opts = 0;
153
154 /* Translate SCSI opcode to a protection opcode */
9ba56b95 155 switch (scsi_get_prot_op(cmd)) {
bad75002
AE
156 case SCSI_PROT_READ_STRIP:
157 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 break;
159 case SCSI_PROT_WRITE_INSERT:
160 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 break;
162 case SCSI_PROT_READ_INSERT:
163 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 break;
165 case SCSI_PROT_WRITE_STRIP:
166 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 break;
168 case SCSI_PROT_READ_PASS:
bad75002 169 case SCSI_PROT_WRITE_PASS:
9e522cd8
AE
170 if (guard & SHOST_DIX_GUARD_IP)
171 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 else
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
bad75002
AE
174 break;
175 default: /* Normal Request */
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
178 }
179
9ba56b95 180 return scsi_prot_sg_count(cmd);
bad75002
AE
181}
182
183/*
1da177e4
LT
184 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185 * capable IOCB types.
186 *
187 * @sp: SRB command to process
188 * @cmd_pkt: Command type 2 IOCB
189 * @tot_dsds: Total number of segments to transfer
190 */
191void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192 uint16_t tot_dsds)
193{
194 uint16_t avail_dsds;
195 uint32_t *cur_dsd;
e315cd28 196 scsi_qla_host_t *vha;
1da177e4 197 struct scsi_cmnd *cmd;
385d70b4
FT
198 struct scatterlist *sg;
199 int i;
1da177e4 200
9ba56b95 201 cmd = GET_CMD_SP(sp);
1da177e4
LT
202
203 /* Update entry type to indicate Command Type 2 IOCB */
204 *((uint32_t *)(&cmd_pkt->entry_type)) =
ad950360 205 cpu_to_le32(COMMAND_TYPE);
1da177e4
LT
206
207 /* No data transfer */
385d70b4 208 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 209 cmd_pkt->byte_count = cpu_to_le32(0);
1da177e4
LT
210 return;
211 }
212
25ff6af1 213 vha = sp->vha;
49fd462a 214 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
1da177e4
LT
215
216 /* Three DSDs are available in the Command Type 2 IOCB */
217 avail_dsds = 3;
218 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219
220 /* Load data segments */
385d70b4
FT
221 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 cont_entry_t *cont_pkt;
223
224 /* Allocate additional continuation packets? */
225 if (avail_dsds == 0) {
226 /*
227 * Seven DSDs are available in the Continuation
228 * Type 0 IOCB.
229 */
67c2e93a 230 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
385d70b4
FT
231 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232 avail_dsds = 7;
1da177e4 233 }
385d70b4
FT
234
235 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237 avail_dsds--;
1da177e4
LT
238 }
239}
240
241/**
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
244 *
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
248 */
249void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 uint16_t tot_dsds)
251{
252 uint16_t avail_dsds;
253 uint32_t *cur_dsd;
e315cd28 254 scsi_qla_host_t *vha;
1da177e4 255 struct scsi_cmnd *cmd;
385d70b4
FT
256 struct scatterlist *sg;
257 int i;
1da177e4 258
9ba56b95 259 cmd = GET_CMD_SP(sp);
1da177e4
LT
260
261 /* Update entry type to indicate Command Type 3 IOCB */
ad950360 262 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
1da177e4
LT
263
264 /* No data transfer */
385d70b4 265 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 266 cmd_pkt->byte_count = cpu_to_le32(0);
1da177e4
LT
267 return;
268 }
269
25ff6af1 270 vha = sp->vha;
49fd462a 271 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
1da177e4
LT
272
273 /* Two DSDs are available in the Command Type 3 IOCB */
274 avail_dsds = 2;
275 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
276
277 /* Load data segments */
385d70b4
FT
278 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279 dma_addr_t sle_dma;
280 cont_a64_entry_t *cont_pkt;
281
282 /* Allocate additional continuation packets? */
283 if (avail_dsds == 0) {
284 /*
285 * Five DSDs are available in the Continuation
286 * Type 1 IOCB.
287 */
0d2aa38e 288 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
385d70b4
FT
289 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290 avail_dsds = 5;
1da177e4 291 }
385d70b4
FT
292
293 sle_dma = sg_dma_address(sg);
294 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
297 avail_dsds--;
1da177e4
LT
298 }
299}
300
301/**
302 * qla2x00_start_scsi() - Send a SCSI command to the ISP
303 * @sp: command to send to the ISP
304 *
cc3ef7bc 305 * Returns non-zero if a failure occurred, else zero.
1da177e4
LT
306 */
307int
308qla2x00_start_scsi(srb_t *sp)
309{
52c82823 310 int nseg;
1da177e4 311 unsigned long flags;
e315cd28 312 scsi_qla_host_t *vha;
1da177e4
LT
313 struct scsi_cmnd *cmd;
314 uint32_t *clr_ptr;
315 uint32_t index;
316 uint32_t handle;
317 cmd_entry_t *cmd_pkt;
1da177e4
LT
318 uint16_t cnt;
319 uint16_t req_cnt;
320 uint16_t tot_dsds;
3d71644c 321 struct device_reg_2xxx __iomem *reg;
e315cd28
AC
322 struct qla_hw_data *ha;
323 struct req_que *req;
73208dfd 324 struct rsp_que *rsp;
1da177e4
LT
325
326 /* Setup device pointers. */
25ff6af1 327 vha = sp->vha;
e315cd28 328 ha = vha->hw;
3d71644c 329 reg = &ha->iobase->isp;
9ba56b95 330 cmd = GET_CMD_SP(sp);
73208dfd
AC
331 req = ha->req_q_map[0];
332 rsp = ha->rsp_q_map[0];
83021920 333 /* So we know we haven't pci_map'ed anything yet */
334 tot_dsds = 0;
1da177e4
LT
335
336 /* Send marker if required */
e315cd28 337 if (vha->marker_needed != 0) {
7c3df132
SK
338 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
339 QLA_SUCCESS) {
1da177e4 340 return (QLA_FUNCTION_FAILED);
7c3df132 341 }
e315cd28 342 vha->marker_needed = 0;
1da177e4
LT
343 }
344
345 /* Acquire ring specific lock */
c9c5ced9 346 spin_lock_irqsave(&ha->hardware_lock, flags);
1da177e4
LT
347
348 /* Check for room in outstanding command list. */
e315cd28 349 handle = req->current_outstanding_cmd;
8d93f550 350 for (index = 1; index < req->num_outstanding_cmds; index++) {
1da177e4 351 handle++;
8d93f550 352 if (handle == req->num_outstanding_cmds)
1da177e4 353 handle = 1;
e315cd28 354 if (!req->outstanding_cmds[handle])
1da177e4
LT
355 break;
356 }
8d93f550 357 if (index == req->num_outstanding_cmds)
1da177e4
LT
358 goto queuing_error;
359
83021920 360 /* Map the sg table so we have an accurate count of sg entries needed */
2c3dfe3f
SJ
361 if (scsi_sg_count(cmd)) {
362 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
363 scsi_sg_count(cmd), cmd->sc_data_direction);
364 if (unlikely(!nseg))
365 goto queuing_error;
366 } else
367 nseg = 0;
368
385d70b4 369 tot_dsds = nseg;
83021920 370
1da177e4 371 /* Calculate the number of request entries needed. */
fd34f556 372 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
e315cd28 373 if (req->cnt < (req_cnt + 2)) {
1da177e4 374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
e315cd28
AC
375 if (req->ring_index < cnt)
376 req->cnt = cnt - req->ring_index;
1da177e4 377 else
e315cd28
AC
378 req->cnt = req->length -
379 (req->ring_index - cnt);
a6eb3c9f
CL
380 /* If still no head room then bail out */
381 if (req->cnt < (req_cnt + 2))
382 goto queuing_error;
1da177e4 383 }
1da177e4 384
1da177e4 385 /* Build command packet */
e315cd28
AC
386 req->current_outstanding_cmd = handle;
387 req->outstanding_cmds[handle] = sp;
cf53b069 388 sp->handle = handle;
9ba56b95 389 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
e315cd28 390 req->cnt -= req_cnt;
1da177e4 391
e315cd28 392 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
1da177e4
LT
393 cmd_pkt->handle = handle;
394 /* Zero out remaining portion of packet. */
395 clr_ptr = (uint32_t *)cmd_pkt + 2;
396 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
397 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
398
bdf79621 399 /* Set target ID and LUN number*/
400 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
9ba56b95 401 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
ad950360 402 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
1da177e4 403
1da177e4
LT
404 /* Load SCSI command packet. */
405 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
385d70b4 406 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1da177e4
LT
407
408 /* Build IOCB segments */
fd34f556 409 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
1da177e4
LT
410
411 /* Set total data segment count. */
412 cmd_pkt->entry_count = (uint8_t)req_cnt;
413 wmb();
414
415 /* Adjust ring index. */
e315cd28
AC
416 req->ring_index++;
417 if (req->ring_index == req->length) {
418 req->ring_index = 0;
419 req->ring_ptr = req->ring;
1da177e4 420 } else
e315cd28 421 req->ring_ptr++;
1da177e4 422
1da177e4 423 sp->flags |= SRB_DMA_VALID;
1da177e4
LT
424
425 /* Set chip new ring index. */
e315cd28 426 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
1da177e4
LT
427 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
428
4fdfefe5 429 /* Manage unprocessed RIO/ZIO commands in response queue. */
e315cd28 430 if (vha->flags.process_response_queue &&
73208dfd
AC
431 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
432 qla2x00_process_response_queue(rsp);
4fdfefe5 433
c9c5ced9 434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4
LT
435 return (QLA_SUCCESS);
436
437queuing_error:
385d70b4
FT
438 if (tot_dsds)
439 scsi_dma_unmap(cmd);
440
c9c5ced9 441 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1da177e4
LT
442
443 return (QLA_FUNCTION_FAILED);
444}
445
5162cf0c
GM
446/**
447 * qla2x00_start_iocbs() - Execute the IOCB command
448 */
2d70c103 449void
5162cf0c
GM
450qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
451{
452 struct qla_hw_data *ha = vha->hw;
118e2ef9 453 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
5162cf0c 454
7ec0effd 455 if (IS_P3P_TYPE(ha)) {
5162cf0c
GM
456 qla82xx_start_iocbs(vha);
457 } else {
458 /* Adjust ring index. */
459 req->ring_index++;
460 if (req->ring_index == req->length) {
461 req->ring_index = 0;
462 req->ring_ptr = req->ring;
463 } else
464 req->ring_ptr++;
465
466 /* Set chip new ring index. */
d63b328f
QT
467 if (ha->mqenable || IS_QLA27XX(ha)) {
468 WRT_REG_DWORD(req->req_q_in, req->ring_index);
469 } else if (IS_QLA83XX(ha)) {
6246b8a1 470 WRT_REG_DWORD(req->req_q_in, req->ring_index);
98878a16 471 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
8ae6d9c7
GM
472 } else if (IS_QLAFX00(ha)) {
473 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
475 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
5162cf0c
GM
476 } else if (IS_FWI2_CAPABLE(ha)) {
477 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
478 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
479 } else {
480 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
481 req->ring_index);
482 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
483 }
484 }
485}
486
1da177e4
LT
487/**
488 * qla2x00_marker() - Send a marker IOCB to the firmware.
489 * @ha: HA context
490 * @loop_id: loop ID
491 * @lun: LUN
492 * @type: marker modifier
493 *
494 * Can be called from both normal and interrupt context.
495 *
cc3ef7bc 496 * Returns non-zero if a failure occurred, else zero.
1da177e4 497 */
3dbe756a 498static int
73208dfd
AC
499__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
500 struct rsp_que *rsp, uint16_t loop_id,
9cb78c16 501 uint64_t lun, uint8_t type)
1da177e4 502{
2b6c0cee 503 mrk_entry_t *mrk;
8ae6d9c7 504 struct mrk_entry_24xx *mrk24 = NULL;
8ae6d9c7 505
e315cd28
AC
506 struct qla_hw_data *ha = vha->hw;
507 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1da177e4 508
99b8212c 509 req = ha->req_q_map[0];
fa492630 510 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
2b6c0cee 511 if (mrk == NULL) {
7c3df132
SK
512 ql_log(ql_log_warn, base_vha, 0x3026,
513 "Failed to allocate Marker IOCB.\n");
1da177e4
LT
514
515 return (QLA_FUNCTION_FAILED);
516 }
517
2b6c0cee
AV
518 mrk->entry_type = MARKER_TYPE;
519 mrk->modifier = type;
1da177e4 520 if (type != MK_SYNC_ALL) {
bfd7334e 521 if (IS_FWI2_CAPABLE(ha)) {
2b6c0cee
AV
522 mrk24 = (struct mrk_entry_24xx *) mrk;
523 mrk24->nport_handle = cpu_to_le16(loop_id);
9cb78c16 524 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
b797b6de 525 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
e315cd28 526 mrk24->vp_index = vha->vp_idx;
2afa19a9 527 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
2b6c0cee
AV
528 } else {
529 SET_TARGET_ID(ha, mrk->target, loop_id);
9cb78c16 530 mrk->lun = cpu_to_le16((uint16_t)lun);
2b6c0cee 531 }
1da177e4
LT
532 }
533 wmb();
534
5162cf0c 535 qla2x00_start_iocbs(vha, req);
1da177e4
LT
536
537 return (QLA_SUCCESS);
538}
539
fa2a1ce5 540int
73208dfd 541qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
9cb78c16 542 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
73208dfd 543 uint8_t type)
1da177e4
LT
544{
545 int ret;
546 unsigned long flags = 0;
547
73208dfd
AC
548 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
549 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
550 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
1da177e4
LT
551
552 return (ret);
553}
554
2d70c103
NB
555/*
556 * qla2x00_issue_marker
557 *
558 * Issue marker
559 * Caller CAN have hardware lock held as specified by ha_locked parameter.
560 * Might release it, then reaquire.
561 */
562int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
563{
564 if (ha_locked) {
565 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
566 MK_SYNC_ALL) != QLA_SUCCESS)
567 return QLA_FUNCTION_FAILED;
568 } else {
569 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
570 MK_SYNC_ALL) != QLA_SUCCESS)
571 return QLA_FUNCTION_FAILED;
572 }
573 vha->marker_needed = 0;
574
575 return QLA_SUCCESS;
576}
577
5162cf0c
GM
578static inline int
579qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
580 uint16_t tot_dsds)
581{
582 uint32_t *cur_dsd = NULL;
583 scsi_qla_host_t *vha;
584 struct qla_hw_data *ha;
585 struct scsi_cmnd *cmd;
586 struct scatterlist *cur_seg;
587 uint32_t *dsd_seg;
588 void *next_dsd;
589 uint8_t avail_dsds;
590 uint8_t first_iocb = 1;
591 uint32_t dsd_list_len;
592 struct dsd_dma *dsd_ptr;
593 struct ct6_dsd *ctx;
1da177e4 594
9ba56b95 595 cmd = GET_CMD_SP(sp);
a9083016 596
5162cf0c 597 /* Update entry type to indicate Command Type 3 IOCB */
ad950360 598 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
5162cf0c
GM
599
600 /* No data transfer */
601 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 602 cmd_pkt->byte_count = cpu_to_le32(0);
5162cf0c
GM
603 return 0;
604 }
605
25ff6af1 606 vha = sp->vha;
5162cf0c
GM
607 ha = vha->hw;
608
609 /* Set transfer direction */
610 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
ad950360 611 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
2be21fa2 612 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 613 vha->qla_stats.output_requests++;
5162cf0c 614 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
ad950360 615 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
2be21fa2 616 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 617 vha->qla_stats.input_requests++;
5162cf0c
GM
618 }
619
620 cur_seg = scsi_sglist(cmd);
9ba56b95 621 ctx = GET_CMD_CTX_SP(sp);
5162cf0c
GM
622
623 while (tot_dsds) {
624 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
625 QLA_DSDS_PER_IOCB : tot_dsds;
626 tot_dsds -= avail_dsds;
627 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
628
629 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
630 struct dsd_dma, list);
631 next_dsd = dsd_ptr->dsd_addr;
632 list_del(&dsd_ptr->list);
633 ha->gbl_dsd_avail--;
634 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
635 ctx->dsd_use_cnt++;
636 ha->gbl_dsd_inuse++;
637
638 if (first_iocb) {
639 first_iocb = 0;
640 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
641 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
642 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
643 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
73208dfd 644 } else {
5162cf0c
GM
645 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
646 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
647 *cur_dsd++ = cpu_to_le32(dsd_list_len);
648 }
649 cur_dsd = (uint32_t *)next_dsd;
650 while (avail_dsds) {
651 dma_addr_t sle_dma;
652
653 sle_dma = sg_dma_address(cur_seg);
654 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
655 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
656 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
657 cur_seg = sg_next(cur_seg);
658 avail_dsds--;
73208dfd 659 }
2b6c0cee
AV
660 }
661
5162cf0c
GM
662 /* Null termination */
663 *cur_dsd++ = 0;
664 *cur_dsd++ = 0;
665 *cur_dsd++ = 0;
666 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
667 return 0;
2b6c0cee
AV
668}
669
5162cf0c
GM
670/*
671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
672 * for Command Type 6.
2b6c0cee
AV
673 *
674 * @dsds: number of data segment decriptors needed
675 *
5162cf0c 676 * Returns the number of dsd list needed to store @dsds.
2b6c0cee 677 */
2374dd23 678static inline uint16_t
5162cf0c 679qla24xx_calc_dsd_lists(uint16_t dsds)
2b6c0cee 680{
5162cf0c 681 uint16_t dsd_lists = 0;
2b6c0cee 682
5162cf0c
GM
683 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
684 if (dsds % QLA_DSDS_PER_IOCB)
685 dsd_lists++;
686 return dsd_lists;
2b6c0cee
AV
687}
688
5162cf0c 689
2b6c0cee
AV
690/**
691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
692 * IOCB types.
693 *
694 * @sp: SRB command to process
695 * @cmd_pkt: Command type 3 IOCB
696 * @tot_dsds: Total number of segments to transfer
d7459527 697 * @req: pointer to request queue
2b6c0cee 698 */
d7459527 699inline void
2b6c0cee 700qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
d7459527 701 uint16_t tot_dsds, struct req_que *req)
2b6c0cee
AV
702{
703 uint16_t avail_dsds;
704 uint32_t *cur_dsd;
e315cd28 705 scsi_qla_host_t *vha;
2b6c0cee 706 struct scsi_cmnd *cmd;
385d70b4
FT
707 struct scatterlist *sg;
708 int i;
2b6c0cee 709
9ba56b95 710 cmd = GET_CMD_SP(sp);
2b6c0cee
AV
711
712 /* Update entry type to indicate Command Type 3 IOCB */
ad950360 713 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
2b6c0cee
AV
714
715 /* No data transfer */
385d70b4 716 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
ad950360 717 cmd_pkt->byte_count = cpu_to_le32(0);
2b6c0cee
AV
718 return;
719 }
720
25ff6af1 721 vha = sp->vha;
2b6c0cee
AV
722
723 /* Set transfer direction */
49fd462a 724 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
ad950360 725 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
2be21fa2 726 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
fabbb8df 727 vha->qla_stats.output_requests++;
49fd462a 728 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
ad950360 729 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
2be21fa2 730 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
fabbb8df 731 vha->qla_stats.input_requests++;
49fd462a 732 }
2b6c0cee
AV
733
734 /* One DSD is available in the Command Type 3 IOCB */
735 avail_dsds = 1;
736 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
737
738 /* Load data segments */
385d70b4
FT
739
740 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
741 dma_addr_t sle_dma;
742 cont_a64_entry_t *cont_pkt;
743
744 /* Allocate additional continuation packets? */
745 if (avail_dsds == 0) {
746 /*
747 * Five DSDs are available in the Continuation
748 * Type 1 IOCB.
749 */
d7459527 750 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
385d70b4
FT
751 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
752 avail_dsds = 5;
2b6c0cee 753 }
385d70b4
FT
754
755 sle_dma = sg_dma_address(sg);
756 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
757 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
758 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
759 avail_dsds--;
2b6c0cee
AV
760 }
761}
762
bad75002
AE
763struct fw_dif_context {
764 uint32_t ref_tag;
765 uint16_t app_tag;
766 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
767 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
768};
769
770/*
771 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
772 *
773 */
774static inline void
e02587d7 775qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
bad75002
AE
776 unsigned int protcnt)
777{
9ba56b95 778 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
bad75002
AE
779
780 switch (scsi_get_prot_type(cmd)) {
bad75002 781 case SCSI_PROT_DIF_TYPE0:
8cb2049c
AE
782 /*
783 * No check for ql2xenablehba_err_chk, as it would be an
784 * I/O error if hba tag generation is not done.
785 */
786 pkt->ref_tag = cpu_to_le32((uint32_t)
787 (0xffffffff & scsi_get_lba(cmd)));
e02587d7
AE
788
789 if (!qla2x00_hba_err_chk_enabled(sp))
790 break;
791
8cb2049c
AE
792 pkt->ref_tag_mask[0] = 0xff;
793 pkt->ref_tag_mask[1] = 0xff;
794 pkt->ref_tag_mask[2] = 0xff;
795 pkt->ref_tag_mask[3] = 0xff;
bad75002
AE
796 break;
797
798 /*
799 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
800 * match LBA in CDB + N
801 */
802 case SCSI_PROT_DIF_TYPE2:
ad950360 803 pkt->app_tag = cpu_to_le16(0);
e02587d7
AE
804 pkt->app_tag_mask[0] = 0x0;
805 pkt->app_tag_mask[1] = 0x0;
0c470874
AE
806
807 pkt->ref_tag = cpu_to_le32((uint32_t)
808 (0xffffffff & scsi_get_lba(cmd)));
809
e02587d7
AE
810 if (!qla2x00_hba_err_chk_enabled(sp))
811 break;
812
0c470874
AE
813 /* enable ALL bytes of the ref tag */
814 pkt->ref_tag_mask[0] = 0xff;
815 pkt->ref_tag_mask[1] = 0xff;
816 pkt->ref_tag_mask[2] = 0xff;
817 pkt->ref_tag_mask[3] = 0xff;
bad75002
AE
818 break;
819
820 /* For Type 3 protection: 16 bit GUARD only */
821 case SCSI_PROT_DIF_TYPE3:
822 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
823 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
824 0x00;
825 break;
826
827 /*
828 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
829 * 16 bit app tag.
830 */
831 case SCSI_PROT_DIF_TYPE1:
e02587d7
AE
832 pkt->ref_tag = cpu_to_le32((uint32_t)
833 (0xffffffff & scsi_get_lba(cmd)));
ad950360 834 pkt->app_tag = cpu_to_le16(0);
e02587d7
AE
835 pkt->app_tag_mask[0] = 0x0;
836 pkt->app_tag_mask[1] = 0x0;
837
838 if (!qla2x00_hba_err_chk_enabled(sp))
bad75002
AE
839 break;
840
bad75002
AE
841 /* enable ALL bytes of the ref tag */
842 pkt->ref_tag_mask[0] = 0xff;
843 pkt->ref_tag_mask[1] = 0xff;
844 pkt->ref_tag_mask[2] = 0xff;
845 pkt->ref_tag_mask[3] = 0xff;
846 break;
847 }
bad75002
AE
848}
849
d7459527 850int
8cb2049c
AE
851qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
852 uint32_t *partial)
853{
854 struct scatterlist *sg;
855 uint32_t cumulative_partial, sg_len;
856 dma_addr_t sg_dma_addr;
857
858 if (sgx->num_bytes == sgx->tot_bytes)
859 return 0;
860
861 sg = sgx->cur_sg;
862 cumulative_partial = sgx->tot_partial;
863
864 sg_dma_addr = sg_dma_address(sg);
865 sg_len = sg_dma_len(sg);
866
867 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
868
869 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
870 sgx->dma_len = (blk_sz - cumulative_partial);
871 sgx->tot_partial = 0;
872 sgx->num_bytes += blk_sz;
873 *partial = 0;
874 } else {
875 sgx->dma_len = sg_len - sgx->bytes_consumed;
876 sgx->tot_partial += sgx->dma_len;
877 *partial = 1;
878 }
879
880 sgx->bytes_consumed += sgx->dma_len;
881
882 if (sg_len == sgx->bytes_consumed) {
883 sg = sg_next(sg);
884 sgx->num_sg++;
885 sgx->cur_sg = sg;
886 sgx->bytes_consumed = 0;
887 }
888
889 return 1;
890}
891
f83adb61 892int
8cb2049c 893qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
be25152c 894 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
8cb2049c
AE
895{
896 void *next_dsd;
897 uint8_t avail_dsds = 0;
898 uint32_t dsd_list_len;
899 struct dsd_dma *dsd_ptr;
900 struct scatterlist *sg_prot;
901 uint32_t *cur_dsd = dsd;
902 uint16_t used_dsds = tot_dsds;
f83adb61 903 uint32_t prot_int; /* protection interval */
8cb2049c
AE
904 uint32_t partial;
905 struct qla2_sgx sgx;
906 dma_addr_t sle_dma;
907 uint32_t sle_dma_len, tot_prot_dma_len = 0;
f83adb61 908 struct scsi_cmnd *cmd;
8cb2049c
AE
909
910 memset(&sgx, 0, sizeof(struct qla2_sgx));
f83adb61 911 if (sp) {
f83adb61
QT
912 cmd = GET_CMD_SP(sp);
913 prot_int = cmd->device->sector_size;
914
915 sgx.tot_bytes = scsi_bufflen(cmd);
916 sgx.cur_sg = scsi_sglist(cmd);
917 sgx.sp = sp;
918
919 sg_prot = scsi_prot_sglist(cmd);
920 } else if (tc) {
f83adb61
QT
921 prot_int = tc->blk_sz;
922 sgx.tot_bytes = tc->bufflen;
923 sgx.cur_sg = tc->sg;
924 sg_prot = tc->prot_sg;
925 } else {
926 BUG();
927 return 1;
928 }
8cb2049c
AE
929
930 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
931
932 sle_dma = sgx.dma_addr;
933 sle_dma_len = sgx.dma_len;
934alloc_and_fill:
935 /* Allocate additional continuation packets? */
936 if (avail_dsds == 0) {
937 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
938 QLA_DSDS_PER_IOCB : used_dsds;
939 dsd_list_len = (avail_dsds + 1) * 12;
940 used_dsds -= avail_dsds;
941
942 /* allocate tracking DS */
943 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
944 if (!dsd_ptr)
945 return 1;
946
947 /* allocate new list */
948 dsd_ptr->dsd_addr = next_dsd =
949 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
950 &dsd_ptr->dsd_list_dma);
951
952 if (!next_dsd) {
953 /*
954 * Need to cleanup only this dsd_ptr, rest
955 * will be done by sp_free_dma()
956 */
957 kfree(dsd_ptr);
958 return 1;
959 }
960
f83adb61
QT
961 if (sp) {
962 list_add_tail(&dsd_ptr->list,
963 &((struct crc_context *)
964 sp->u.scmd.ctx)->dsd_list);
965
966 sp->flags |= SRB_CRC_CTX_DSD_VALID;
967 } else {
968 list_add_tail(&dsd_ptr->list,
969 &(tc->ctx->dsd_list));
be25152c 970 *tc->ctx_dsd_alloced = 1;
f83adb61 971 }
8cb2049c 972
8cb2049c
AE
973
974 /* add new list to cmd iocb or last list */
975 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
976 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
977 *cur_dsd++ = dsd_list_len;
978 cur_dsd = (uint32_t *)next_dsd;
979 }
980 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
981 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
982 *cur_dsd++ = cpu_to_le32(sle_dma_len);
983 avail_dsds--;
984
985 if (partial == 0) {
986 /* Got a full protection interval */
987 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
988 sle_dma_len = 8;
bad75002 989
8cb2049c
AE
990 tot_prot_dma_len += sle_dma_len;
991 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
992 tot_prot_dma_len = 0;
993 sg_prot = sg_next(sg_prot);
994 }
995
996 partial = 1; /* So as to not re-enter this block */
997 goto alloc_and_fill;
998 }
999 }
1000 /* Null termination */
1001 *cur_dsd++ = 0;
1002 *cur_dsd++ = 0;
1003 *cur_dsd++ = 0;
1004 return 0;
1005}
5162cf0c 1006
f83adb61 1007int
bad75002 1008qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
be25152c 1009 uint16_t tot_dsds, struct qla_tc_param *tc)
bad75002
AE
1010{
1011 void *next_dsd;
1012 uint8_t avail_dsds = 0;
1013 uint32_t dsd_list_len;
1014 struct dsd_dma *dsd_ptr;
f83adb61 1015 struct scatterlist *sg, *sgl;
bad75002
AE
1016 uint32_t *cur_dsd = dsd;
1017 int i;
1018 uint16_t used_dsds = tot_dsds;
f83adb61 1019 struct scsi_cmnd *cmd;
f83adb61
QT
1020
1021 if (sp) {
1022 cmd = GET_CMD_SP(sp);
1023 sgl = scsi_sglist(cmd);
f83adb61
QT
1024 } else if (tc) {
1025 sgl = tc->sg;
f83adb61
QT
1026 } else {
1027 BUG();
1028 return 1;
1029 }
bad75002 1030
f83adb61
QT
1031
1032 for_each_sg(sgl, sg, tot_dsds, i) {
bad75002
AE
1033 dma_addr_t sle_dma;
1034
1035 /* Allocate additional continuation packets? */
1036 if (avail_dsds == 0) {
1037 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1038 QLA_DSDS_PER_IOCB : used_dsds;
1039 dsd_list_len = (avail_dsds + 1) * 12;
1040 used_dsds -= avail_dsds;
1041
1042 /* allocate tracking DS */
1043 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1044 if (!dsd_ptr)
1045 return 1;
1046
1047 /* allocate new list */
1048 dsd_ptr->dsd_addr = next_dsd =
1049 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1050 &dsd_ptr->dsd_list_dma);
1051
1052 if (!next_dsd) {
1053 /*
1054 * Need to cleanup only this dsd_ptr, rest
1055 * will be done by sp_free_dma()
1056 */
1057 kfree(dsd_ptr);
1058 return 1;
1059 }
1060
f83adb61
QT
1061 if (sp) {
1062 list_add_tail(&dsd_ptr->list,
1063 &((struct crc_context *)
1064 sp->u.scmd.ctx)->dsd_list);
bad75002 1065
f83adb61
QT
1066 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1067 } else {
1068 list_add_tail(&dsd_ptr->list,
1069 &(tc->ctx->dsd_list));
be25152c 1070 *tc->ctx_dsd_alloced = 1;
f83adb61 1071 }
bad75002
AE
1072
1073 /* add new list to cmd iocb or last list */
1074 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1075 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1076 *cur_dsd++ = dsd_list_len;
1077 cur_dsd = (uint32_t *)next_dsd;
1078 }
1079 sle_dma = sg_dma_address(sg);
9e522cd8 1080
bad75002
AE
1081 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1082 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1083 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1084 avail_dsds--;
1085
bad75002
AE
1086 }
1087 /* Null termination */
1088 *cur_dsd++ = 0;
1089 *cur_dsd++ = 0;
1090 *cur_dsd++ = 0;
1091 return 0;
1092}
1093
f83adb61 1094int
bad75002 1095qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
be25152c 1096 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
bad75002
AE
1097{
1098 void *next_dsd;
1099 uint8_t avail_dsds = 0;
1100 uint32_t dsd_list_len;
1101 struct dsd_dma *dsd_ptr;
f83adb61 1102 struct scatterlist *sg, *sgl;
bad75002
AE
1103 int i;
1104 struct scsi_cmnd *cmd;
1105 uint32_t *cur_dsd = dsd;
f83adb61
QT
1106 uint16_t used_dsds = tot_dsds;
1107 struct scsi_qla_host *vha;
1108
1109 if (sp) {
1110 cmd = GET_CMD_SP(sp);
1111 sgl = scsi_prot_sglist(cmd);
25ff6af1 1112 vha = sp->vha;
f83adb61
QT
1113 } else if (tc) {
1114 vha = tc->vha;
1115 sgl = tc->prot_sg;
1116 } else {
1117 BUG();
1118 return 1;
1119 }
bad75002 1120
f83adb61
QT
1121 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1122 "%s: enter\n", __func__);
1123
1124 for_each_sg(sgl, sg, tot_dsds, i) {
bad75002
AE
1125 dma_addr_t sle_dma;
1126
1127 /* Allocate additional continuation packets? */
1128 if (avail_dsds == 0) {
1129 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1130 QLA_DSDS_PER_IOCB : used_dsds;
1131 dsd_list_len = (avail_dsds + 1) * 12;
1132 used_dsds -= avail_dsds;
1133
1134 /* allocate tracking DS */
1135 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1136 if (!dsd_ptr)
1137 return 1;
1138
1139 /* allocate new list */
1140 dsd_ptr->dsd_addr = next_dsd =
1141 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1142 &dsd_ptr->dsd_list_dma);
1143
1144 if (!next_dsd) {
1145 /*
1146 * Need to cleanup only this dsd_ptr, rest
1147 * will be done by sp_free_dma()
1148 */
1149 kfree(dsd_ptr);
1150 return 1;
1151 }
1152
f83adb61
QT
1153 if (sp) {
1154 list_add_tail(&dsd_ptr->list,
1155 &((struct crc_context *)
1156 sp->u.scmd.ctx)->dsd_list);
bad75002 1157
f83adb61
QT
1158 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1159 } else {
1160 list_add_tail(&dsd_ptr->list,
1161 &(tc->ctx->dsd_list));
be25152c 1162 *tc->ctx_dsd_alloced = 1;
f83adb61 1163 }
bad75002
AE
1164
1165 /* add new list to cmd iocb or last list */
1166 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1167 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1168 *cur_dsd++ = dsd_list_len;
1169 cur_dsd = (uint32_t *)next_dsd;
1170 }
1171 sle_dma = sg_dma_address(sg);
9e522cd8 1172
bad75002
AE
1173 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1174 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1175 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1176
bad75002
AE
1177 avail_dsds--;
1178 }
1179 /* Null termination */
1180 *cur_dsd++ = 0;
1181 *cur_dsd++ = 0;
1182 *cur_dsd++ = 0;
1183 return 0;
1184}
1185
1186/**
1187 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1188 * Type 6 IOCB types.
1189 *
1190 * @sp: SRB command to process
1191 * @cmd_pkt: Command type 3 IOCB
1192 * @tot_dsds: Total number of segments to transfer
1193 */
d7459527 1194inline int
bad75002
AE
1195qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1196 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1197{
1198 uint32_t *cur_dsd, *fcp_dl;
1199 scsi_qla_host_t *vha;
1200 struct scsi_cmnd *cmd;
8cb2049c 1201 uint32_t total_bytes = 0;
bad75002
AE
1202 uint32_t data_bytes;
1203 uint32_t dif_bytes;
1204 uint8_t bundling = 1;
1205 uint16_t blk_size;
1206 uint8_t *clr_ptr;
1207 struct crc_context *crc_ctx_pkt = NULL;
1208 struct qla_hw_data *ha;
1209 uint8_t additional_fcpcdb_len;
1210 uint16_t fcp_cmnd_len;
1211 struct fcp_cmnd *fcp_cmnd;
1212 dma_addr_t crc_ctx_dma;
1213
9ba56b95 1214 cmd = GET_CMD_SP(sp);
bad75002 1215
bad75002 1216 /* Update entry type to indicate Command Type CRC_2 IOCB */
ad950360 1217 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
bad75002 1218
25ff6af1 1219 vha = sp->vha;
7c3df132
SK
1220 ha = vha->hw;
1221
bad75002
AE
1222 /* No data transfer */
1223 data_bytes = scsi_bufflen(cmd);
1224 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
ad950360 1225 cmd_pkt->byte_count = cpu_to_le32(0);
bad75002
AE
1226 return QLA_SUCCESS;
1227 }
1228
25ff6af1 1229 cmd_pkt->vp_index = sp->vha->vp_idx;
bad75002
AE
1230
1231 /* Set transfer direction */
1232 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1233 cmd_pkt->control_flags =
ad950360 1234 cpu_to_le16(CF_WRITE_DATA);
bad75002
AE
1235 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1236 cmd_pkt->control_flags =
ad950360 1237 cpu_to_le16(CF_READ_DATA);
bad75002
AE
1238 }
1239
9ba56b95
GM
1240 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1241 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1242 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1243 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
bad75002
AE
1244 bundling = 0;
1245
1246 /* Allocate CRC context from global pool */
9ba56b95
GM
1247 crc_ctx_pkt = sp->u.scmd.ctx =
1248 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
bad75002
AE
1249
1250 if (!crc_ctx_pkt)
1251 goto crc_queuing_error;
1252
1253 /* Zero out CTX area. */
1254 clr_ptr = (uint8_t *)crc_ctx_pkt;
1255 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1256
1257 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1258
1259 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1260
1261 /* Set handle */
1262 crc_ctx_pkt->handle = cmd_pkt->handle;
1263
1264 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1265
e02587d7 1266 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
bad75002
AE
1267 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1268
1269 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1270 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1271 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1272
1273 /* Determine SCSI command length -- align to 4 byte boundary */
1274 if (cmd->cmd_len > 16) {
bad75002
AE
1275 additional_fcpcdb_len = cmd->cmd_len - 16;
1276 if ((cmd->cmd_len % 4) != 0) {
1277 /* SCSI cmd > 16 bytes must be multiple of 4 */
1278 goto crc_queuing_error;
1279 }
1280 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1281 } else {
1282 additional_fcpcdb_len = 0;
1283 fcp_cmnd_len = 12 + 16 + 4;
1284 }
1285
1286 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1287
1288 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1289 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1290 fcp_cmnd->additional_cdb_len |= 1;
1291 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1292 fcp_cmnd->additional_cdb_len |= 2;
1293
9ba56b95 1294 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
bad75002
AE
1295 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1296 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1297 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1298 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1299 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1300 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
65155b37 1301 fcp_cmnd->task_management = 0;
50668633 1302 fcp_cmnd->task_attribute = TSK_SIMPLE;
ff2fc42e 1303
bad75002
AE
1304 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1305
bad75002 1306 /* Compute dif len and adjust data len to incude protection */
bad75002
AE
1307 dif_bytes = 0;
1308 blk_size = cmd->device->sector_size;
8cb2049c
AE
1309 dif_bytes = (data_bytes / blk_size) * 8;
1310
9ba56b95 1311 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
8cb2049c
AE
1312 case SCSI_PROT_READ_INSERT:
1313 case SCSI_PROT_WRITE_STRIP:
1314 total_bytes = data_bytes;
1315 data_bytes += dif_bytes;
1316 break;
1317
1318 case SCSI_PROT_READ_STRIP:
1319 case SCSI_PROT_WRITE_INSERT:
1320 case SCSI_PROT_READ_PASS:
1321 case SCSI_PROT_WRITE_PASS:
1322 total_bytes = data_bytes + dif_bytes;
1323 break;
1324 default:
1325 BUG();
bad75002
AE
1326 }
1327
e02587d7 1328 if (!qla2x00_hba_err_chk_enabled(sp))
bad75002 1329 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
9e522cd8
AE
1330 /* HBA error checking enabled */
1331 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1332 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1333 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1334 SCSI_PROT_DIF_TYPE2))
1335 fw_prot_opts |= BIT_10;
1336 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1337 SCSI_PROT_DIF_TYPE3)
1338 fw_prot_opts |= BIT_11;
1339 }
bad75002
AE
1340
1341 if (!bundling) {
1342 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1343 } else {
1344 /*
1345 * Configure Bundling if we need to fetch interlaving
1346 * protection PCI accesses
1347 */
1348 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1349 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1350 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1351 tot_prot_dsds);
1352 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1353 }
1354
1355 /* Finish the common fields of CRC pkt */
1356 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1357 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1358 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
ad950360 1359 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
bad75002
AE
1360 /* Fibre channel byte count */
1361 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1362 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1363 additional_fcpcdb_len);
1364 *fcp_dl = htonl(total_bytes);
1365
0c470874 1366 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
ad950360 1367 cmd_pkt->byte_count = cpu_to_le32(0);
0c470874
AE
1368 return QLA_SUCCESS;
1369 }
bad75002
AE
1370 /* Walks data segments */
1371
ad950360 1372 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
8cb2049c
AE
1373
1374 if (!bundling && tot_prot_dsds) {
1375 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
f83adb61 1376 cur_dsd, tot_dsds, NULL))
8cb2049c
AE
1377 goto crc_queuing_error;
1378 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
f83adb61 1379 (tot_dsds - tot_prot_dsds), NULL))
bad75002
AE
1380 goto crc_queuing_error;
1381
1382 if (bundling && tot_prot_dsds) {
1383 /* Walks dif segments */
ad950360 1384 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
bad75002
AE
1385 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1386 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
f83adb61 1387 tot_prot_dsds, NULL))
bad75002
AE
1388 goto crc_queuing_error;
1389 }
1390 return QLA_SUCCESS;
1391
1392crc_queuing_error:
bad75002
AE
1393 /* Cleanup will be performed by the caller */
1394
1395 return QLA_FUNCTION_FAILED;
1396}
2b6c0cee
AV
1397
1398/**
1399 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1400 * @sp: command to send to the ISP
1401 *
cc3ef7bc 1402 * Returns non-zero if a failure occurred, else zero.
2b6c0cee
AV
1403 */
1404int
1405qla24xx_start_scsi(srb_t *sp)
1406{
52c82823 1407 int nseg;
2b6c0cee 1408 unsigned long flags;
2b6c0cee
AV
1409 uint32_t *clr_ptr;
1410 uint32_t index;
1411 uint32_t handle;
1412 struct cmd_type_7 *cmd_pkt;
2b6c0cee
AV
1413 uint16_t cnt;
1414 uint16_t req_cnt;
1415 uint16_t tot_dsds;
73208dfd
AC
1416 struct req_que *req = NULL;
1417 struct rsp_que *rsp = NULL;
9ba56b95 1418 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 1419 struct scsi_qla_host *vha = sp->vha;
73208dfd 1420 struct qla_hw_data *ha = vha->hw;
2b6c0cee
AV
1421
1422 /* Setup device pointers. */
59e0b8b0 1423 req = vha->req;
d7459527 1424 rsp = req->rsp;
73208dfd 1425
2b6c0cee
AV
1426 /* So we know we haven't pci_map'ed anything yet */
1427 tot_dsds = 0;
1428
1429 /* Send marker if required */
e315cd28 1430 if (vha->marker_needed != 0) {
7c3df132
SK
1431 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1432 QLA_SUCCESS)
2b6c0cee 1433 return QLA_FUNCTION_FAILED;
e315cd28 1434 vha->marker_needed = 0;
2b6c0cee
AV
1435 }
1436
1437 /* Acquire ring specific lock */
e315cd28 1438 spin_lock_irqsave(&ha->hardware_lock, flags);
2b6c0cee
AV
1439
1440 /* Check for room in outstanding command list. */
e315cd28 1441 handle = req->current_outstanding_cmd;
8d93f550 1442 for (index = 1; index < req->num_outstanding_cmds; index++) {
2b6c0cee 1443 handle++;
8d93f550 1444 if (handle == req->num_outstanding_cmds)
2b6c0cee 1445 handle = 1;
e315cd28 1446 if (!req->outstanding_cmds[handle])
2b6c0cee
AV
1447 break;
1448 }
8d93f550 1449 if (index == req->num_outstanding_cmds)
2b6c0cee
AV
1450 goto queuing_error;
1451
1452 /* Map the sg table so we have an accurate count of sg entries needed */
2c3dfe3f
SJ
1453 if (scsi_sg_count(cmd)) {
1454 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1455 scsi_sg_count(cmd), cmd->sc_data_direction);
1456 if (unlikely(!nseg))
2b6c0cee 1457 goto queuing_error;
2c3dfe3f
SJ
1458 } else
1459 nseg = 0;
1460
385d70b4 1461 tot_dsds = nseg;
7c3df132 1462 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
e315cd28 1463 if (req->cnt < (req_cnt + 2)) {
7c6300e3
JC
1464 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1465 RD_REG_DWORD_RELAXED(req->req_q_out);
e315cd28
AC
1466 if (req->ring_index < cnt)
1467 req->cnt = cnt - req->ring_index;
2b6c0cee 1468 else
e315cd28
AC
1469 req->cnt = req->length -
1470 (req->ring_index - cnt);
a6eb3c9f
CL
1471 if (req->cnt < (req_cnt + 2))
1472 goto queuing_error;
2b6c0cee 1473 }
2b6c0cee
AV
1474
1475 /* Build command packet. */
e315cd28
AC
1476 req->current_outstanding_cmd = handle;
1477 req->outstanding_cmds[handle] = sp;
cf53b069 1478 sp->handle = handle;
9ba56b95 1479 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
e315cd28 1480 req->cnt -= req_cnt;
2b6c0cee 1481
e315cd28 1482 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2afa19a9 1483 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2b6c0cee
AV
1484
1485 /* Zero out remaining portion of packet. */
72df8325 1486 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2b6c0cee
AV
1487 clr_ptr = (uint32_t *)cmd_pkt + 2;
1488 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1489 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1490
1491 /* Set NPORT-ID and LUN number*/
1492 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1493 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1494 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1495 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 1496 cmd_pkt->vp_index = sp->vha->vp_idx;
2b6c0cee 1497
9ba56b95 1498 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
0d4be124 1499 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2b6c0cee 1500
50668633 1501 cmd_pkt->task = TSK_SIMPLE;
ff2fc42e 1502
2b6c0cee
AV
1503 /* Load SCSI command packet. */
1504 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1505 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1506
385d70b4 1507 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2b6c0cee
AV
1508
1509 /* Build IOCB segments */
d7459527 1510 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2b6c0cee
AV
1511
1512 /* Set total data segment count. */
1513 cmd_pkt->entry_count = (uint8_t)req_cnt;
1514 wmb();
2b6c0cee 1515 /* Adjust ring index. */
e315cd28
AC
1516 req->ring_index++;
1517 if (req->ring_index == req->length) {
1518 req->ring_index = 0;
1519 req->ring_ptr = req->ring;
2b6c0cee 1520 } else
e315cd28 1521 req->ring_ptr++;
2b6c0cee
AV
1522
1523 sp->flags |= SRB_DMA_VALID;
2b6c0cee
AV
1524
1525 /* Set chip new ring index. */
08029990
AV
1526 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1527 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
2b6c0cee 1528
4fdfefe5 1529 /* Manage unprocessed RIO/ZIO commands in response queue. */
e315cd28 1530 if (vha->flags.process_response_queue &&
73208dfd 1531 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2afa19a9 1532 qla24xx_process_response_queue(vha, rsp);
4fdfefe5 1533
e315cd28 1534 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2b6c0cee
AV
1535 return QLA_SUCCESS;
1536
1537queuing_error:
385d70b4
FT
1538 if (tot_dsds)
1539 scsi_dma_unmap(cmd);
1540
e315cd28 1541 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2b6c0cee
AV
1542
1543 return QLA_FUNCTION_FAILED;
1da177e4 1544}
68ca949c 1545
bad75002
AE
1546/**
1547 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1548 * @sp: command to send to the ISP
1549 *
1550 * Returns non-zero if a failure occurred, else zero.
1551 */
1552int
1553qla24xx_dif_start_scsi(srb_t *sp)
1554{
1555 int nseg;
1556 unsigned long flags;
1557 uint32_t *clr_ptr;
1558 uint32_t index;
1559 uint32_t handle;
1560 uint16_t cnt;
1561 uint16_t req_cnt = 0;
1562 uint16_t tot_dsds;
1563 uint16_t tot_prot_dsds;
1564 uint16_t fw_prot_opts = 0;
1565 struct req_que *req = NULL;
1566 struct rsp_que *rsp = NULL;
9ba56b95 1567 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25ff6af1 1568 struct scsi_qla_host *vha = sp->vha;
bad75002
AE
1569 struct qla_hw_data *ha = vha->hw;
1570 struct cmd_type_crc_2 *cmd_pkt;
1571 uint32_t status = 0;
1572
1573#define QDSS_GOT_Q_SPACE BIT_0
1574
0c470874
AE
1575 /* Only process protection or >16 cdb in this routine */
1576 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1577 if (cmd->cmd_len <= 16)
1578 return qla24xx_start_scsi(sp);
1579 }
bad75002
AE
1580
1581 /* Setup device pointers. */
bad75002 1582 req = vha->req;
d7459527 1583 rsp = req->rsp;
bad75002
AE
1584
1585 /* So we know we haven't pci_map'ed anything yet */
1586 tot_dsds = 0;
1587
1588 /* Send marker if required */
1589 if (vha->marker_needed != 0) {
1590 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1591 QLA_SUCCESS)
1592 return QLA_FUNCTION_FAILED;
1593 vha->marker_needed = 0;
1594 }
1595
1596 /* Acquire ring specific lock */
1597 spin_lock_irqsave(&ha->hardware_lock, flags);
1598
1599 /* Check for room in outstanding command list. */
1600 handle = req->current_outstanding_cmd;
8d93f550 1601 for (index = 1; index < req->num_outstanding_cmds; index++) {
bad75002 1602 handle++;
8d93f550 1603 if (handle == req->num_outstanding_cmds)
bad75002
AE
1604 handle = 1;
1605 if (!req->outstanding_cmds[handle])
1606 break;
1607 }
1608
8d93f550 1609 if (index == req->num_outstanding_cmds)
bad75002
AE
1610 goto queuing_error;
1611
1612 /* Compute number of required data segments */
1613 /* Map the sg table so we have an accurate count of sg entries needed */
1614 if (scsi_sg_count(cmd)) {
1615 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1616 scsi_sg_count(cmd), cmd->sc_data_direction);
1617 if (unlikely(!nseg))
1618 goto queuing_error;
1619 else
1620 sp->flags |= SRB_DMA_VALID;
8cb2049c
AE
1621
1622 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1623 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1624 struct qla2_sgx sgx;
1625 uint32_t partial;
1626
1627 memset(&sgx, 0, sizeof(struct qla2_sgx));
1628 sgx.tot_bytes = scsi_bufflen(cmd);
1629 sgx.cur_sg = scsi_sglist(cmd);
1630 sgx.sp = sp;
1631
1632 nseg = 0;
1633 while (qla24xx_get_one_block_sg(
1634 cmd->device->sector_size, &sgx, &partial))
1635 nseg++;
1636 }
bad75002
AE
1637 } else
1638 nseg = 0;
1639
1640 /* number of required data segments */
1641 tot_dsds = nseg;
1642
1643 /* Compute number of required protection segments */
1644 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1645 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1646 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1647 if (unlikely(!nseg))
1648 goto queuing_error;
1649 else
1650 sp->flags |= SRB_CRC_PROT_DMA_VALID;
8cb2049c
AE
1651
1652 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1653 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1654 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1655 }
bad75002
AE
1656 } else {
1657 nseg = 0;
1658 }
1659
1660 req_cnt = 1;
1661 /* Total Data and protection sg segment(s) */
1662 tot_prot_dsds = nseg;
1663 tot_dsds += nseg;
1664 if (req->cnt < (req_cnt + 2)) {
7c6300e3
JC
1665 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1666 RD_REG_DWORD_RELAXED(req->req_q_out);
bad75002
AE
1667 if (req->ring_index < cnt)
1668 req->cnt = cnt - req->ring_index;
1669 else
1670 req->cnt = req->length -
1671 (req->ring_index - cnt);
a6eb3c9f
CL
1672 if (req->cnt < (req_cnt + 2))
1673 goto queuing_error;
bad75002
AE
1674 }
1675
bad75002
AE
1676 status |= QDSS_GOT_Q_SPACE;
1677
1678 /* Build header part of command packet (excluding the OPCODE). */
1679 req->current_outstanding_cmd = handle;
1680 req->outstanding_cmds[handle] = sp;
8cb2049c 1681 sp->handle = handle;
9ba56b95 1682 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
bad75002
AE
1683 req->cnt -= req_cnt;
1684
1685 /* Fill-in common area */
1686 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1687 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1688
1689 clr_ptr = (uint32_t *)cmd_pkt + 2;
1690 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1691
1692 /* Set NPORT-ID and LUN number*/
1693 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1694 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1695 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1696 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1697
9ba56b95 1698 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
bad75002
AE
1699 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1700
1701 /* Total Data and protection segment(s) */
1702 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1703
1704 /* Build IOCB segments and adjust for data protection segments */
1705 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1706 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1707 QLA_SUCCESS)
1708 goto queuing_error;
1709
1710 cmd_pkt->entry_count = (uint8_t)req_cnt;
1711 /* Specify response queue number where completion should happen */
1712 cmd_pkt->entry_status = (uint8_t) rsp->id;
ad950360 1713 cmd_pkt->timeout = cpu_to_le16(0);
bad75002
AE
1714 wmb();
1715
1716 /* Adjust ring index. */
1717 req->ring_index++;
1718 if (req->ring_index == req->length) {
1719 req->ring_index = 0;
1720 req->ring_ptr = req->ring;
1721 } else
1722 req->ring_ptr++;
1723
1724 /* Set chip new ring index. */
1725 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1726 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1727
1728 /* Manage unprocessed RIO/ZIO commands in response queue. */
1729 if (vha->flags.process_response_queue &&
1730 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1731 qla24xx_process_response_queue(vha, rsp);
1732
1733 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1734
1735 return QLA_SUCCESS;
1736
1737queuing_error:
1738 if (status & QDSS_GOT_Q_SPACE) {
1739 req->outstanding_cmds[handle] = NULL;
1740 req->cnt += req_cnt;
1741 }
1742 /* Cleanup will be performed by the caller (queuecommand) */
1743
1744 spin_unlock_irqrestore(&ha->hardware_lock, flags);
bad75002
AE
1745 return QLA_FUNCTION_FAILED;
1746}
1747
d7459527
MH
1748/**
1749 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1750 * @sp: command to send to the ISP
1751 *
1752 * Returns non-zero if a failure occurred, else zero.
1753 */
1754static int
1755qla2xxx_start_scsi_mq(srb_t *sp)
68ca949c 1756{
d7459527
MH
1757 int nseg;
1758 unsigned long flags;
1759 uint32_t *clr_ptr;
1760 uint32_t index;
1761 uint32_t handle;
1762 struct cmd_type_7 *cmd_pkt;
1763 uint16_t cnt;
1764 uint16_t req_cnt;
1765 uint16_t tot_dsds;
1766 struct req_que *req = NULL;
1767 struct rsp_que *rsp = NULL;
9ba56b95 1768 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
d7459527
MH
1769 struct scsi_qla_host *vha = sp->fcport->vha;
1770 struct qla_hw_data *ha = vha->hw;
1771 struct qla_qpair *qpair = sp->qpair;
1772
1773 /* Setup qpair pointers */
1774 rsp = qpair->rsp;
1775 req = qpair->req;
1776
1777 /* So we know we haven't pci_map'ed anything yet */
1778 tot_dsds = 0;
1779
1780 /* Send marker if required */
1781 if (vha->marker_needed != 0) {
1782 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1783 QLA_SUCCESS)
1784 return QLA_FUNCTION_FAILED;
1785 vha->marker_needed = 0;
1786 }
1787
1788 /* Acquire qpair specific lock */
1789 spin_lock_irqsave(&qpair->qp_lock, flags);
1790
1791 /* Check for room in outstanding command list. */
1792 handle = req->current_outstanding_cmd;
1793 for (index = 1; index < req->num_outstanding_cmds; index++) {
1794 handle++;
1795 if (handle == req->num_outstanding_cmds)
1796 handle = 1;
1797 if (!req->outstanding_cmds[handle])
1798 break;
1799 }
1800 if (index == req->num_outstanding_cmds)
1801 goto queuing_error;
1802
1803 /* Map the sg table so we have an accurate count of sg entries needed */
1804 if (scsi_sg_count(cmd)) {
1805 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1806 scsi_sg_count(cmd), cmd->sc_data_direction);
1807 if (unlikely(!nseg))
1808 goto queuing_error;
1809 } else
1810 nseg = 0;
1811
1812 tot_dsds = nseg;
1813 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1814 if (req->cnt < (req_cnt + 2)) {
1815 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1816 RD_REG_DWORD_RELAXED(req->req_q_out);
1817 if (req->ring_index < cnt)
1818 req->cnt = cnt - req->ring_index;
1819 else
1820 req->cnt = req->length -
1821 (req->ring_index - cnt);
1822 if (req->cnt < (req_cnt + 2))
1823 goto queuing_error;
1824 }
1825
1826 /* Build command packet. */
1827 req->current_outstanding_cmd = handle;
1828 req->outstanding_cmds[handle] = sp;
1829 sp->handle = handle;
1830 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1831 req->cnt -= req_cnt;
1832
1833 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1834 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1835
1836 /* Zero out remaining portion of packet. */
1837 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1838 clr_ptr = (uint32_t *)cmd_pkt + 2;
1839 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1840 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1841
1842 /* Set NPORT-ID and LUN number*/
1843 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1844 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1845 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1846 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1847 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1848
1849 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1850 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1851
1852 cmd_pkt->task = TSK_SIMPLE;
1853
1854 /* Load SCSI command packet. */
1855 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1856 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1857
1858 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1859
1860 /* Build IOCB segments */
1861 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1862
1863 /* Set total data segment count. */
1864 cmd_pkt->entry_count = (uint8_t)req_cnt;
1865 wmb();
1866 /* Adjust ring index. */
1867 req->ring_index++;
1868 if (req->ring_index == req->length) {
1869 req->ring_index = 0;
1870 req->ring_ptr = req->ring;
1871 } else
1872 req->ring_ptr++;
1873
1874 sp->flags |= SRB_DMA_VALID;
1875
1876 /* Set chip new ring index. */
1877 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1878
1879 /* Manage unprocessed RIO/ZIO commands in response queue. */
1880 if (vha->flags.process_response_queue &&
1881 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1882 qla24xx_process_response_queue(vha, rsp);
1883
1884 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1885 return QLA_SUCCESS;
1886
1887queuing_error:
1888 if (tot_dsds)
1889 scsi_dma_unmap(cmd);
1890
1891 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1892
1893 return QLA_FUNCTION_FAILED;
1894}
1895
1896
1897/**
1898 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1899 * @sp: command to send to the ISP
1900 *
1901 * Returns non-zero if a failure occurred, else zero.
1902 */
1903int
1904qla2xxx_dif_start_scsi_mq(srb_t *sp)
1905{
1906 int nseg;
1907 unsigned long flags;
1908 uint32_t *clr_ptr;
1909 uint32_t index;
1910 uint32_t handle;
1911 uint16_t cnt;
1912 uint16_t req_cnt = 0;
1913 uint16_t tot_dsds;
1914 uint16_t tot_prot_dsds;
1915 uint16_t fw_prot_opts = 0;
1916 struct req_que *req = NULL;
1917 struct rsp_que *rsp = NULL;
1918 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1919 struct scsi_qla_host *vha = sp->fcport->vha;
1920 struct qla_hw_data *ha = vha->hw;
1921 struct cmd_type_crc_2 *cmd_pkt;
1922 uint32_t status = 0;
1923 struct qla_qpair *qpair = sp->qpair;
1924
1925#define QDSS_GOT_Q_SPACE BIT_0
1926
1927 /* Check for host side state */
1928 if (!qpair->online) {
1929 cmd->result = DID_NO_CONNECT << 16;
1930 return QLA_INTERFACE_ERROR;
1931 }
1932
1933 if (!qpair->difdix_supported &&
1934 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1935 cmd->result = DID_NO_CONNECT << 16;
1936 return QLA_INTERFACE_ERROR;
1937 }
1938
1939 /* Only process protection or >16 cdb in this routine */
1940 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1941 if (cmd->cmd_len <= 16)
1942 return qla2xxx_start_scsi_mq(sp);
1943 }
1944
1945 /* Setup qpair pointers */
1946 rsp = qpair->rsp;
1947 req = qpair->req;
1948
1949 /* So we know we haven't pci_map'ed anything yet */
1950 tot_dsds = 0;
1951
1952 /* Send marker if required */
1953 if (vha->marker_needed != 0) {
1954 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1955 QLA_SUCCESS)
1956 return QLA_FUNCTION_FAILED;
1957 vha->marker_needed = 0;
1958 }
1959
1960 /* Acquire ring specific lock */
1961 spin_lock_irqsave(&qpair->qp_lock, flags);
1962
1963 /* Check for room in outstanding command list. */
1964 handle = req->current_outstanding_cmd;
1965 for (index = 1; index < req->num_outstanding_cmds; index++) {
1966 handle++;
1967 if (handle == req->num_outstanding_cmds)
1968 handle = 1;
1969 if (!req->outstanding_cmds[handle])
1970 break;
1971 }
1972
1973 if (index == req->num_outstanding_cmds)
1974 goto queuing_error;
1975
1976 /* Compute number of required data segments */
1977 /* Map the sg table so we have an accurate count of sg entries needed */
1978 if (scsi_sg_count(cmd)) {
1979 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1980 scsi_sg_count(cmd), cmd->sc_data_direction);
1981 if (unlikely(!nseg))
1982 goto queuing_error;
1983 else
1984 sp->flags |= SRB_DMA_VALID;
1985
1986 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1987 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1988 struct qla2_sgx sgx;
1989 uint32_t partial;
1990
1991 memset(&sgx, 0, sizeof(struct qla2_sgx));
1992 sgx.tot_bytes = scsi_bufflen(cmd);
1993 sgx.cur_sg = scsi_sglist(cmd);
1994 sgx.sp = sp;
1995
1996 nseg = 0;
1997 while (qla24xx_get_one_block_sg(
1998 cmd->device->sector_size, &sgx, &partial))
1999 nseg++;
2000 }
2001 } else
2002 nseg = 0;
2003
2004 /* number of required data segments */
2005 tot_dsds = nseg;
2006
2007 /* Compute number of required protection segments */
2008 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2009 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2010 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2011 if (unlikely(!nseg))
2012 goto queuing_error;
2013 else
2014 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2015
2016 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2017 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2018 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2019 }
2020 } else {
2021 nseg = 0;
2022 }
2023
2024 req_cnt = 1;
2025 /* Total Data and protection sg segment(s) */
2026 tot_prot_dsds = nseg;
2027 tot_dsds += nseg;
2028 if (req->cnt < (req_cnt + 2)) {
2029 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2030 RD_REG_DWORD_RELAXED(req->req_q_out);
2031 if (req->ring_index < cnt)
2032 req->cnt = cnt - req->ring_index;
2033 else
2034 req->cnt = req->length -
2035 (req->ring_index - cnt);
2036 if (req->cnt < (req_cnt + 2))
2037 goto queuing_error;
2038 }
2039
2040 status |= QDSS_GOT_Q_SPACE;
2041
2042 /* Build header part of command packet (excluding the OPCODE). */
2043 req->current_outstanding_cmd = handle;
2044 req->outstanding_cmds[handle] = sp;
2045 sp->handle = handle;
2046 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2047 req->cnt -= req_cnt;
2048
2049 /* Fill-in common area */
2050 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2051 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2052
2053 clr_ptr = (uint32_t *)cmd_pkt + 2;
2054 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2055
2056 /* Set NPORT-ID and LUN number*/
2057 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2058 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2059 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2060 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
68ca949c 2061
d7459527
MH
2062 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2063 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2064
2065 /* Total Data and protection segment(s) */
2066 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2067
2068 /* Build IOCB segments and adjust for data protection segments */
2069 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2070 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2071 QLA_SUCCESS)
2072 goto queuing_error;
2073
2074 cmd_pkt->entry_count = (uint8_t)req_cnt;
2075 cmd_pkt->timeout = cpu_to_le16(0);
2076 wmb();
2077
2078 /* Adjust ring index. */
2079 req->ring_index++;
2080 if (req->ring_index == req->length) {
2081 req->ring_index = 0;
2082 req->ring_ptr = req->ring;
2083 } else
2084 req->ring_ptr++;
2085
2086 /* Set chip new ring index. */
2087 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2088
2089 /* Manage unprocessed RIO/ZIO commands in response queue. */
2090 if (vha->flags.process_response_queue &&
2091 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2092 qla24xx_process_response_queue(vha, rsp);
2093
2094 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2095
2096 return QLA_SUCCESS;
2097
2098queuing_error:
2099 if (status & QDSS_GOT_Q_SPACE) {
2100 req->outstanding_cmds[handle] = NULL;
2101 req->cnt += req_cnt;
2102 }
2103 /* Cleanup will be performed by the caller (queuecommand) */
2104
2105 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2106 return QLA_FUNCTION_FAILED;
68ca949c 2107}
ac280b67
AV
2108
2109/* Generic Control-SRB manipulation functions. */
b6a029e1
AE
2110
2111/* hardware_lock assumed to be held. */
b6a029e1 2112
d94d10e7 2113void *
82de802a 2114__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
ac280b67 2115{
82de802a 2116 scsi_qla_host_t *vha = qpair->vha;
ac280b67 2117 struct qla_hw_data *ha = vha->hw;
82de802a 2118 struct req_que *req = qpair->req;
118e2ef9 2119 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
ac280b67
AV
2120 uint32_t index, handle;
2121 request_t *pkt;
2122 uint16_t cnt, req_cnt;
2123
2124 pkt = NULL;
2125 req_cnt = 1;
d94d10e7
GM
2126 handle = 0;
2127
2128 if (!sp)
2129 goto skip_cmd_array;
ac280b67
AV
2130
2131 /* Check for room in outstanding command list. */
2132 handle = req->current_outstanding_cmd;
4b4f30cc 2133 for (index = 1; index < req->num_outstanding_cmds; index++) {
ac280b67 2134 handle++;
8d93f550 2135 if (handle == req->num_outstanding_cmds)
ac280b67
AV
2136 handle = 1;
2137 if (!req->outstanding_cmds[handle])
2138 break;
2139 }
8d93f550 2140 if (index == req->num_outstanding_cmds) {
7c3df132 2141 ql_log(ql_log_warn, vha, 0x700b,
d6a03581 2142 "No room on outstanding cmd array.\n");
ac280b67 2143 goto queuing_error;
7c3df132 2144 }
ac280b67 2145
d94d10e7
GM
2146 /* Prep command array. */
2147 req->current_outstanding_cmd = handle;
2148 req->outstanding_cmds[handle] = sp;
2149 sp->handle = handle;
2150
5780790e 2151 /* Adjust entry-counts as needed. */
9ba56b95
GM
2152 if (sp->type != SRB_SCSI_CMD)
2153 req_cnt = sp->iocbs;
5780790e 2154
d94d10e7 2155skip_cmd_array:
ac280b67 2156 /* Check for room on request queue. */
94007037 2157 if (req->cnt < req_cnt + 2) {
f73cb695 2158 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
ac280b67 2159 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
7ec0effd 2160 else if (IS_P3P_TYPE(ha))
d94d10e7 2161 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
ac280b67
AV
2162 else if (IS_FWI2_CAPABLE(ha))
2163 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
8ae6d9c7
GM
2164 else if (IS_QLAFX00(ha))
2165 cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
ac280b67
AV
2166 else
2167 cnt = qla2x00_debounce_register(
2168 ISP_REQ_Q_OUT(ha, &reg->isp));
2169
2170 if (req->ring_index < cnt)
2171 req->cnt = cnt - req->ring_index;
2172 else
2173 req->cnt = req->length -
2174 (req->ring_index - cnt);
2175 }
94007037 2176 if (req->cnt < req_cnt + 2)
ac280b67
AV
2177 goto queuing_error;
2178
2179 /* Prep packet */
ac280b67 2180 req->cnt -= req_cnt;
ac280b67
AV
2181 pkt = req->ring_ptr;
2182 memset(pkt, 0, REQUEST_ENTRY_SIZE);
8ae6d9c7 2183 if (IS_QLAFX00(ha)) {
1f8deefe
SK
2184 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2185 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
8ae6d9c7
GM
2186 } else {
2187 pkt->entry_count = req_cnt;
2188 pkt->handle = handle;
2189 }
ac280b67
AV
2190
2191queuing_error:
60a9eadb 2192 qpair->tgt_counters.num_alloc_iocb_failed++;
ac280b67
AV
2193 return pkt;
2194}
2195
82de802a
QT
2196void *
2197qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2198{
2199 scsi_qla_host_t *vha = qpair->vha;
2200
2201 if (qla2x00_reset_active(vha))
2202 return NULL;
2203
2204 return __qla2x00_alloc_iocbs(qpair, sp);
2205}
2206
2207void *
2208qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2209{
2210 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2211}
2212
ac280b67
AV
2213static void
2214qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2215{
9ba56b95 2216 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67
AV
2217
2218 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2219 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
4916392b 2220 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
ac280b67 2221 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
4916392b 2222 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
ac280b67
AV
2223 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2224 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2225 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2226 logio->port_id[1] = sp->fcport->d_id.b.area;
2227 logio->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2228 logio->vp_index = sp->vha->vp_idx;
ac280b67
AV
2229}
2230
2231static void
2232qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2233{
25ff6af1 2234 struct qla_hw_data *ha = sp->vha->hw;
9ba56b95 2235 struct srb_iocb *lio = &sp->u.iocb_cmd;
ac280b67
AV
2236 uint16_t opts;
2237
b963752f 2238 mbx->entry_type = MBX_IOCB_TYPE;
ac280b67
AV
2239 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2240 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
4916392b
MI
2241 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2242 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
ac280b67
AV
2243 if (HAS_EXTENDED_IDS(ha)) {
2244 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2245 mbx->mb10 = cpu_to_le16(opts);
2246 } else {
2247 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2248 }
2249 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2250 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2251 sp->fcport->d_id.b.al_pa);
25ff6af1 2252 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
ac280b67
AV
2253}
2254
2255static void
2256qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2257{
2258 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2259 logio->control_flags =
2260 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
726b8548 2261 if (!sp->fcport->se_sess ||
5d964837 2262 !sp->fcport->keep_nport_handle)
a6ca8878 2263 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
ac280b67
AV
2264 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2265 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2266 logio->port_id[1] = sp->fcport->d_id.b.area;
2267 logio->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2268 logio->vp_index = sp->vha->vp_idx;
ac280b67
AV
2269}
2270
2271static void
2272qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2273{
25ff6af1 2274 struct qla_hw_data *ha = sp->vha->hw;
ac280b67 2275
b963752f 2276 mbx->entry_type = MBX_IOCB_TYPE;
ac280b67
AV
2277 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2278 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2279 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2280 cpu_to_le16(sp->fcport->loop_id):
2281 cpu_to_le16(sp->fcport->loop_id << 8);
2282 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2283 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2284 sp->fcport->d_id.b.al_pa);
25ff6af1 2285 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
ac280b67
AV
2286 /* Implicit: mbx->mbx10 = 0. */
2287}
2288
5ff1d584
AV
2289static void
2290qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2291{
2292 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2293 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2294 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
25ff6af1 2295 logio->vp_index = sp->vha->vp_idx;
5ff1d584
AV
2296}
2297
2298static void
2299qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2300{
25ff6af1 2301 struct qla_hw_data *ha = sp->vha->hw;
5ff1d584
AV
2302
2303 mbx->entry_type = MBX_IOCB_TYPE;
2304 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2305 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2306 if (HAS_EXTENDED_IDS(ha)) {
2307 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2308 mbx->mb10 = cpu_to_le16(BIT_0);
2309 } else {
2310 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2311 }
2312 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2313 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2314 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2315 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
25ff6af1 2316 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
5ff1d584
AV
2317}
2318
3822263e
MI
2319static void
2320qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2321{
2322 uint32_t flags;
9cb78c16 2323 uint64_t lun;
3822263e
MI
2324 struct fc_port *fcport = sp->fcport;
2325 scsi_qla_host_t *vha = fcport->vha;
2326 struct qla_hw_data *ha = vha->hw;
9ba56b95 2327 struct srb_iocb *iocb = &sp->u.iocb_cmd;
3822263e
MI
2328 struct req_que *req = vha->req;
2329
2330 flags = iocb->u.tmf.flags;
2331 lun = iocb->u.tmf.lun;
2332
2333 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2334 tsk->entry_count = 1;
2335 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2336 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2337 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2338 tsk->control_flags = cpu_to_le32(flags);
2339 tsk->port_id[0] = fcport->d_id.b.al_pa;
2340 tsk->port_id[1] = fcport->d_id.b.area;
2341 tsk->port_id[2] = fcport->d_id.b.domain;
c6d39e23 2342 tsk->vp_index = fcport->vha->vp_idx;
3822263e
MI
2343
2344 if (flags == TCF_LUN_RESET) {
2345 int_to_scsilun(lun, &tsk->lun);
2346 host_to_fcp_swap((uint8_t *)&tsk->lun,
2347 sizeof(tsk->lun));
2348 }
2349}
2350
6eb54715 2351static void
25ff6af1 2352qla2x00_els_dcmd_sp_free(void *data)
6eb54715 2353{
25ff6af1 2354 srb_t *sp = data;
6eb54715
HM
2355 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2356
2357 kfree(sp->fcport);
2358
2359 if (elsio->u.els_logo.els_logo_pyld)
25ff6af1 2360 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
6eb54715
HM
2361 elsio->u.els_logo.els_logo_pyld,
2362 elsio->u.els_logo.els_logo_pyld_dma);
2363
2364 del_timer(&elsio->timer);
25ff6af1 2365 qla2x00_rel_sp(sp);
6eb54715
HM
2366}
2367
2368static void
2369qla2x00_els_dcmd_iocb_timeout(void *data)
2370{
25ff6af1 2371 srb_t *sp = data;
6eb54715 2372 fc_port_t *fcport = sp->fcport;
25ff6af1 2373 struct scsi_qla_host *vha = sp->vha;
6eb54715 2374 struct qla_hw_data *ha = vha->hw;
25ff6af1 2375 struct srb_iocb *lio = &sp->u.iocb_cmd;
6eb54715
HM
2376 unsigned long flags = 0;
2377
2378 ql_dbg(ql_dbg_io, vha, 0x3069,
2379 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2380 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2381 fcport->d_id.b.al_pa);
2382
2383 /* Abort the exchange */
2384 spin_lock_irqsave(&ha->hardware_lock, flags);
2385 if (ha->isp_ops->abort_command(sp)) {
2386 ql_dbg(ql_dbg_io, vha, 0x3070,
2387 "mbx abort_command failed.\n");
2388 } else {
2389 ql_dbg(ql_dbg_io, vha, 0x3071,
2390 "mbx abort_command success.\n");
2391 }
2392 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2393
2394 complete(&lio->u.els_logo.comp);
2395}
2396
2397static void
25ff6af1 2398qla2x00_els_dcmd_sp_done(void *ptr, int res)
6eb54715 2399{
25ff6af1 2400 srb_t *sp = ptr;
6eb54715
HM
2401 fc_port_t *fcport = sp->fcport;
2402 struct srb_iocb *lio = &sp->u.iocb_cmd;
25ff6af1 2403 struct scsi_qla_host *vha = sp->vha;
6eb54715
HM
2404
2405 ql_dbg(ql_dbg_io, vha, 0x3072,
2406 "%s hdl=%x, portid=%02x%02x%02x done\n",
2407 sp->name, sp->handle, fcport->d_id.b.domain,
2408 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2409
2410 complete(&lio->u.els_logo.comp);
2411}
2412
2413int
2414qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2415 port_id_t remote_did)
2416{
2417 srb_t *sp;
2418 fc_port_t *fcport = NULL;
2419 struct srb_iocb *elsio = NULL;
2420 struct qla_hw_data *ha = vha->hw;
2421 struct els_logo_payload logo_pyld;
2422 int rval = QLA_SUCCESS;
2423
2424 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2425 if (!fcport) {
2426 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2427 return -ENOMEM;
2428 }
2429
2430 /* Alloc SRB structure */
2431 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2432 if (!sp) {
2433 kfree(fcport);
2434 ql_log(ql_log_info, vha, 0x70e6,
2435 "SRB allocation failed\n");
2436 return -ENOMEM;
2437 }
2438
2439 elsio = &sp->u.iocb_cmd;
2440 fcport->loop_id = 0xFFFF;
2441 fcport->d_id.b.domain = remote_did.b.domain;
2442 fcport->d_id.b.area = remote_did.b.area;
2443 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2444
2445 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2446 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2447
2448 sp->type = SRB_ELS_DCMD;
2449 sp->name = "ELS_DCMD";
2450 sp->fcport = fcport;
2451 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2452 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2453 sp->done = qla2x00_els_dcmd_sp_done;
2454 sp->free = qla2x00_els_dcmd_sp_free;
2455
2456 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2457 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2458 GFP_KERNEL);
2459
2460 if (!elsio->u.els_logo.els_logo_pyld) {
25ff6af1 2461 sp->free(sp);
6eb54715
HM
2462 return QLA_FUNCTION_FAILED;
2463 }
2464
2465 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2466
2467 elsio->u.els_logo.els_cmd = els_opcode;
2468 logo_pyld.opcode = els_opcode;
2469 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2470 logo_pyld.s_id[1] = vha->d_id.b.area;
2471 logo_pyld.s_id[2] = vha->d_id.b.domain;
2472 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2473 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2474
2475 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2476 sizeof(struct els_logo_payload));
2477
2478 rval = qla2x00_start_sp(sp);
2479 if (rval != QLA_SUCCESS) {
25ff6af1 2480 sp->free(sp);
6eb54715
HM
2481 return QLA_FUNCTION_FAILED;
2482 }
2483
2484 ql_dbg(ql_dbg_io, vha, 0x3074,
2485 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2486 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2487 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2488
2489 wait_for_completion(&elsio->u.els_logo.comp);
2490
25ff6af1 2491 sp->free(sp);
6eb54715
HM
2492 return rval;
2493}
2494
2495static void
2496qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2497{
25ff6af1 2498 scsi_qla_host_t *vha = sp->vha;
6eb54715
HM
2499 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2500
2501 els_iocb->entry_type = ELS_IOCB_TYPE;
2502 els_iocb->entry_count = 1;
2503 els_iocb->sys_define = 0;
2504 els_iocb->entry_status = 0;
2505 els_iocb->handle = sp->handle;
2506 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2507 els_iocb->tx_dsd_count = 1;
2508 els_iocb->vp_index = vha->vp_idx;
2509 els_iocb->sof_type = EST_SOFI3;
2510 els_iocb->rx_dsd_count = 0;
2511 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2512
2513 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2514 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2515 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2516 els_iocb->control_flags = 0;
2517
2518 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2519 els_iocb->tx_address[0] =
2520 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2521 els_iocb->tx_address[1] =
2522 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2523 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2524
2525 els_iocb->rx_byte_count = 0;
2526 els_iocb->rx_address[0] = 0;
2527 els_iocb->rx_address[1] = 0;
2528 els_iocb->rx_len = 0;
2529
25ff6af1 2530 sp->vha->qla_stats.control_requests++;
6eb54715
HM
2531}
2532
9a069e19
GM
2533static void
2534qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2535{
75cc8cfc 2536 struct bsg_job *bsg_job = sp->u.bsg_job;
01e0e15c 2537 struct fc_bsg_request *bsg_request = bsg_job->request;
9a069e19
GM
2538
2539 els_iocb->entry_type = ELS_IOCB_TYPE;
2540 els_iocb->entry_count = 1;
2541 els_iocb->sys_define = 0;
2542 els_iocb->entry_status = 0;
2543 els_iocb->handle = sp->handle;
2544 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
ad950360 2545 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
25ff6af1 2546 els_iocb->vp_index = sp->vha->vp_idx;
9a069e19 2547 els_iocb->sof_type = EST_SOFI3;
ad950360 2548 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
9a069e19 2549
4916392b 2550 els_iocb->opcode =
9ba56b95 2551 sp->type == SRB_ELS_CMD_RPT ?
01e0e15c
JT
2552 bsg_request->rqst_data.r_els.els_code :
2553 bsg_request->rqst_data.h_els.command_code;
9a069e19
GM
2554 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2555 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2556 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2557 els_iocb->control_flags = 0;
2558 els_iocb->rx_byte_count =
2559 cpu_to_le32(bsg_job->reply_payload.payload_len);
2560 els_iocb->tx_byte_count =
2561 cpu_to_le32(bsg_job->request_payload.payload_len);
2562
2563 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2564 (bsg_job->request_payload.sg_list)));
2565 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2566 (bsg_job->request_payload.sg_list)));
2567 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2568 (bsg_job->request_payload.sg_list));
2569
2570 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2571 (bsg_job->reply_payload.sg_list)));
2572 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2573 (bsg_job->reply_payload.sg_list)));
2574 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2575 (bsg_job->reply_payload.sg_list));
fabbb8df 2576
25ff6af1 2577 sp->vha->qla_stats.control_requests++;
9a069e19
GM
2578}
2579
9bc4f4fb
HZ
2580static void
2581qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2582{
2583 uint16_t avail_dsds;
2584 uint32_t *cur_dsd;
2585 struct scatterlist *sg;
2586 int index;
2587 uint16_t tot_dsds;
25ff6af1 2588 scsi_qla_host_t *vha = sp->vha;
9bc4f4fb 2589 struct qla_hw_data *ha = vha->hw;
75cc8cfc 2590 struct bsg_job *bsg_job = sp->u.bsg_job;
9bc4f4fb 2591 int loop_iterartion = 0;
9bc4f4fb
HZ
2592 int entry_count = 1;
2593
2594 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2595 ct_iocb->entry_type = CT_IOCB_TYPE;
2596 ct_iocb->entry_status = 0;
2597 ct_iocb->handle1 = sp->handle;
2598 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
ad950360
BVA
2599 ct_iocb->status = cpu_to_le16(0);
2600 ct_iocb->control_flags = cpu_to_le16(0);
9bc4f4fb
HZ
2601 ct_iocb->timeout = 0;
2602 ct_iocb->cmd_dsd_count =
ad950360 2603 cpu_to_le16(bsg_job->request_payload.sg_cnt);
9bc4f4fb 2604 ct_iocb->total_dsd_count =
ad950360 2605 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
9bc4f4fb
HZ
2606 ct_iocb->req_bytecount =
2607 cpu_to_le32(bsg_job->request_payload.payload_len);
2608 ct_iocb->rsp_bytecount =
2609 cpu_to_le32(bsg_job->reply_payload.payload_len);
2610
2611 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2612 (bsg_job->request_payload.sg_list)));
2613 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2614 (bsg_job->request_payload.sg_list)));
2615 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2616
2617 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2618 (bsg_job->reply_payload.sg_list)));
2619 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2620 (bsg_job->reply_payload.sg_list)));
2621 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2622
2623 avail_dsds = 1;
2624 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2625 index = 0;
2626 tot_dsds = bsg_job->reply_payload.sg_cnt;
2627
2628 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2629 dma_addr_t sle_dma;
2630 cont_a64_entry_t *cont_pkt;
2631
2632 /* Allocate additional continuation packets? */
2633 if (avail_dsds == 0) {
2634 /*
2635 * Five DSDs are available in the Cont.
2636 * Type 1 IOCB.
2637 */
0d2aa38e
GM
2638 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2639 vha->hw->req_q_map[0]);
9bc4f4fb
HZ
2640 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2641 avail_dsds = 5;
9bc4f4fb
HZ
2642 entry_count++;
2643 }
2644
2645 sle_dma = sg_dma_address(sg);
2646 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2647 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2648 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2649 loop_iterartion++;
2650 avail_dsds--;
2651 }
2652 ct_iocb->entry_count = entry_count;
fabbb8df 2653
25ff6af1 2654 sp->vha->qla_stats.control_requests++;
9bc4f4fb
HZ
2655}
2656
9a069e19
GM
2657static void
2658qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2659{
2660 uint16_t avail_dsds;
2661 uint32_t *cur_dsd;
2662 struct scatterlist *sg;
2663 int index;
2664 uint16_t tot_dsds;
25ff6af1 2665 scsi_qla_host_t *vha = sp->vha;
0d2aa38e 2666 struct qla_hw_data *ha = vha->hw;
75cc8cfc 2667 struct bsg_job *bsg_job = sp->u.bsg_job;
9a069e19 2668 int loop_iterartion = 0;
9a069e19
GM
2669 int entry_count = 1;
2670
2671 ct_iocb->entry_type = CT_IOCB_TYPE;
2672 ct_iocb->entry_status = 0;
2673 ct_iocb->sys_define = 0;
2674 ct_iocb->handle = sp->handle;
2675
2676 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
25ff6af1 2677 ct_iocb->vp_index = sp->vha->vp_idx;
ad950360 2678 ct_iocb->comp_status = cpu_to_le16(0);
9a069e19
GM
2679
2680 ct_iocb->cmd_dsd_count =
ad950360 2681 cpu_to_le16(bsg_job->request_payload.sg_cnt);
9a069e19
GM
2682 ct_iocb->timeout = 0;
2683 ct_iocb->rsp_dsd_count =
ad950360 2684 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
9a069e19
GM
2685 ct_iocb->rsp_byte_count =
2686 cpu_to_le32(bsg_job->reply_payload.payload_len);
2687 ct_iocb->cmd_byte_count =
2688 cpu_to_le32(bsg_job->request_payload.payload_len);
2689 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2690 (bsg_job->request_payload.sg_list)));
2691 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2692 (bsg_job->request_payload.sg_list)));
2693 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2694 (bsg_job->request_payload.sg_list));
2695
2696 avail_dsds = 1;
2697 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2698 index = 0;
2699 tot_dsds = bsg_job->reply_payload.sg_cnt;
2700
2701 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2702 dma_addr_t sle_dma;
2703 cont_a64_entry_t *cont_pkt;
2704
2705 /* Allocate additional continuation packets? */
2706 if (avail_dsds == 0) {
2707 /*
2708 * Five DSDs are available in the Cont.
2709 * Type 1 IOCB.
2710 */
0d2aa38e
GM
2711 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2712 ha->req_q_map[0]);
9a069e19
GM
2713 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2714 avail_dsds = 5;
9a069e19
GM
2715 entry_count++;
2716 }
2717
2718 sle_dma = sg_dma_address(sg);
2719 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2720 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2721 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2722 loop_iterartion++;
2723 avail_dsds--;
2724 }
2725 ct_iocb->entry_count = entry_count;
2726}
2727
5162cf0c
GM
2728/*
2729 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2730 * @sp: command to send to the ISP
2731 *
2732 * Returns non-zero if a failure occurred, else zero.
2733 */
2734int
2735qla82xx_start_scsi(srb_t *sp)
2736{
52c82823 2737 int nseg;
5162cf0c
GM
2738 unsigned long flags;
2739 struct scsi_cmnd *cmd;
2740 uint32_t *clr_ptr;
2741 uint32_t index;
2742 uint32_t handle;
2743 uint16_t cnt;
2744 uint16_t req_cnt;
2745 uint16_t tot_dsds;
2746 struct device_reg_82xx __iomem *reg;
2747 uint32_t dbval;
2748 uint32_t *fcp_dl;
2749 uint8_t additional_cdb_len;
2750 struct ct6_dsd *ctx;
25ff6af1 2751 struct scsi_qla_host *vha = sp->vha;
5162cf0c
GM
2752 struct qla_hw_data *ha = vha->hw;
2753 struct req_que *req = NULL;
2754 struct rsp_que *rsp = NULL;
5162cf0c
GM
2755
2756 /* Setup device pointers. */
5162cf0c 2757 reg = &ha->iobase->isp82;
9ba56b95 2758 cmd = GET_CMD_SP(sp);
5162cf0c
GM
2759 req = vha->req;
2760 rsp = ha->rsp_q_map[0];
2761
2762 /* So we know we haven't pci_map'ed anything yet */
2763 tot_dsds = 0;
2764
2765 dbval = 0x04 | (ha->portnum << 5);
2766
2767 /* Send marker if required */
2768 if (vha->marker_needed != 0) {
2769 if (qla2x00_marker(vha, req,
2770 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2771 ql_log(ql_log_warn, vha, 0x300c,
2772 "qla2x00_marker failed for cmd=%p.\n", cmd);
2773 return QLA_FUNCTION_FAILED;
2774 }
2775 vha->marker_needed = 0;
2776 }
2777
2778 /* Acquire ring specific lock */
2779 spin_lock_irqsave(&ha->hardware_lock, flags);
2780
2781 /* Check for room in outstanding command list. */
2782 handle = req->current_outstanding_cmd;
8d93f550 2783 for (index = 1; index < req->num_outstanding_cmds; index++) {
5162cf0c 2784 handle++;
8d93f550 2785 if (handle == req->num_outstanding_cmds)
5162cf0c
GM
2786 handle = 1;
2787 if (!req->outstanding_cmds[handle])
2788 break;
2789 }
8d93f550 2790 if (index == req->num_outstanding_cmds)
5162cf0c
GM
2791 goto queuing_error;
2792
2793 /* Map the sg table so we have an accurate count of sg entries needed */
2794 if (scsi_sg_count(cmd)) {
2795 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2796 scsi_sg_count(cmd), cmd->sc_data_direction);
2797 if (unlikely(!nseg))
2798 goto queuing_error;
2799 } else
2800 nseg = 0;
2801
2802 tot_dsds = nseg;
2803
2804 if (tot_dsds > ql2xshiftctondsd) {
2805 struct cmd_type_6 *cmd_pkt;
2806 uint16_t more_dsd_lists = 0;
2807 struct dsd_dma *dsd_ptr;
2808 uint16_t i;
2809
2810 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2811 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2812 ql_dbg(ql_dbg_io, vha, 0x300d,
2813 "Num of DSD list %d is than %d for cmd=%p.\n",
2814 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2815 cmd);
2816 goto queuing_error;
2817 }
2818
2819 if (more_dsd_lists <= ha->gbl_dsd_avail)
2820 goto sufficient_dsds;
2821 else
2822 more_dsd_lists -= ha->gbl_dsd_avail;
2823
2824 for (i = 0; i < more_dsd_lists; i++) {
2825 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2826 if (!dsd_ptr) {
2827 ql_log(ql_log_fatal, vha, 0x300e,
2828 "Failed to allocate memory for dsd_dma "
2829 "for cmd=%p.\n", cmd);
2830 goto queuing_error;
2831 }
2832
2833 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2834 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2835 if (!dsd_ptr->dsd_addr) {
2836 kfree(dsd_ptr);
2837 ql_log(ql_log_fatal, vha, 0x300f,
2838 "Failed to allocate memory for dsd_addr "
2839 "for cmd=%p.\n", cmd);
2840 goto queuing_error;
2841 }
2842 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2843 ha->gbl_dsd_avail++;
2844 }
2845
2846sufficient_dsds:
2847 req_cnt = 1;
2848
2849 if (req->cnt < (req_cnt + 2)) {
2850 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2851 &reg->req_q_out[0]);
2852 if (req->ring_index < cnt)
2853 req->cnt = cnt - req->ring_index;
2854 else
2855 req->cnt = req->length -
2856 (req->ring_index - cnt);
a6eb3c9f
CL
2857 if (req->cnt < (req_cnt + 2))
2858 goto queuing_error;
5162cf0c
GM
2859 }
2860
9ba56b95
GM
2861 ctx = sp->u.scmd.ctx =
2862 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2863 if (!ctx) {
5162cf0c
GM
2864 ql_log(ql_log_fatal, vha, 0x3010,
2865 "Failed to allocate ctx for cmd=%p.\n", cmd);
2866 goto queuing_error;
2867 }
9ba56b95 2868
5162cf0c
GM
2869 memset(ctx, 0, sizeof(struct ct6_dsd));
2870 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2871 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2872 if (!ctx->fcp_cmnd) {
2873 ql_log(ql_log_fatal, vha, 0x3011,
2874 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
841f97bf 2875 goto queuing_error;
5162cf0c
GM
2876 }
2877
2878 /* Initialize the DSD list and dma handle */
2879 INIT_LIST_HEAD(&ctx->dsd_list);
2880 ctx->dsd_use_cnt = 0;
2881
2882 if (cmd->cmd_len > 16) {
2883 additional_cdb_len = cmd->cmd_len - 16;
2884 if ((cmd->cmd_len % 4) != 0) {
2885 /* SCSI command bigger than 16 bytes must be
2886 * multiple of 4
2887 */
2888 ql_log(ql_log_warn, vha, 0x3012,
2889 "scsi cmd len %d not multiple of 4 "
2890 "for cmd=%p.\n", cmd->cmd_len, cmd);
2891 goto queuing_error_fcp_cmnd;
2892 }
2893 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2894 } else {
2895 additional_cdb_len = 0;
2896 ctx->fcp_cmnd_len = 12 + 16 + 4;
2897 }
2898
2899 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2900 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2901
2902 /* Zero out remaining portion of packet. */
2903 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2904 clr_ptr = (uint32_t *)cmd_pkt + 2;
2905 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2906 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2907
2908 /* Set NPORT-ID and LUN number*/
2909 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2910 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2911 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2912 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2913 cmd_pkt->vp_index = sp->vha->vp_idx;
5162cf0c
GM
2914
2915 /* Build IOCB segments */
2916 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2917 goto queuing_error_fcp_cmnd;
2918
9ba56b95 2919 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
5162cf0c
GM
2920 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2921
2922 /* build FCP_CMND IU */
2923 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
9ba56b95 2924 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
5162cf0c
GM
2925 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2926
2927 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2928 ctx->fcp_cmnd->additional_cdb_len |= 1;
2929 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2930 ctx->fcp_cmnd->additional_cdb_len |= 2;
2931
a00f6296
SK
2932 /* Populate the FCP_PRIO. */
2933 if (ha->flags.fcp_prio_enabled)
2934 ctx->fcp_cmnd->task_attribute |=
2935 sp->fcport->fcp_prio << 3;
2936
5162cf0c
GM
2937 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2938
2939 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2940 additional_cdb_len);
2941 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2942
2943 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2944 cmd_pkt->fcp_cmnd_dseg_address[0] =
2945 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2946 cmd_pkt->fcp_cmnd_dseg_address[1] =
2947 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2948
2949 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2950 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2951 /* Set total data segment count. */
2952 cmd_pkt->entry_count = (uint8_t)req_cnt;
2953 /* Specify response queue number where
2954 * completion should happen
2955 */
2956 cmd_pkt->entry_status = (uint8_t) rsp->id;
2957 } else {
2958 struct cmd_type_7 *cmd_pkt;
2959 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2960 if (req->cnt < (req_cnt + 2)) {
2961 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2962 &reg->req_q_out[0]);
2963 if (req->ring_index < cnt)
2964 req->cnt = cnt - req->ring_index;
2965 else
2966 req->cnt = req->length -
2967 (req->ring_index - cnt);
2968 }
2969 if (req->cnt < (req_cnt + 2))
2970 goto queuing_error;
2971
2972 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2973 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2974
2975 /* Zero out remaining portion of packet. */
2976 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2977 clr_ptr = (uint32_t *)cmd_pkt + 2;
2978 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2979 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2980
2981 /* Set NPORT-ID and LUN number*/
2982 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2983 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2984 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2985 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
25ff6af1 2986 cmd_pkt->vp_index = sp->vha->vp_idx;
5162cf0c 2987
9ba56b95 2988 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
5162cf0c 2989 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
9ba56b95 2990 sizeof(cmd_pkt->lun));
5162cf0c 2991
a00f6296
SK
2992 /* Populate the FCP_PRIO. */
2993 if (ha->flags.fcp_prio_enabled)
2994 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2995
5162cf0c
GM
2996 /* Load SCSI command packet. */
2997 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2998 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2999
3000 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3001
3002 /* Build IOCB segments */
d7459527 3003 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
5162cf0c
GM
3004
3005 /* Set total data segment count. */
3006 cmd_pkt->entry_count = (uint8_t)req_cnt;
3007 /* Specify response queue number where
3008 * completion should happen.
3009 */
3010 cmd_pkt->entry_status = (uint8_t) rsp->id;
3011
3012 }
3013 /* Build command packet. */
3014 req->current_outstanding_cmd = handle;
3015 req->outstanding_cmds[handle] = sp;
3016 sp->handle = handle;
9ba56b95 3017 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
5162cf0c
GM
3018 req->cnt -= req_cnt;
3019 wmb();
3020
3021 /* Adjust ring index. */
3022 req->ring_index++;
3023 if (req->ring_index == req->length) {
3024 req->ring_index = 0;
3025 req->ring_ptr = req->ring;
3026 } else
3027 req->ring_ptr++;
3028
3029 sp->flags |= SRB_DMA_VALID;
3030
3031 /* Set chip new ring index. */
3032 /* write, read and verify logic */
3033 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3034 if (ql2xdbwr)
8dfa4b5a 3035 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
5162cf0c 3036 else {
8dfa4b5a 3037 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
5162cf0c 3038 wmb();
8dfa4b5a
BVA
3039 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3040 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
5162cf0c
GM
3041 wmb();
3042 }
3043 }
3044
3045 /* Manage unprocessed RIO/ZIO commands in response queue. */
3046 if (vha->flags.process_response_queue &&
3047 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3048 qla24xx_process_response_queue(vha, rsp);
3049
3050 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3051 return QLA_SUCCESS;
3052
3053queuing_error_fcp_cmnd:
3054 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3055queuing_error:
3056 if (tot_dsds)
3057 scsi_dma_unmap(cmd);
3058
9ba56b95
GM
3059 if (sp->u.scmd.ctx) {
3060 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3061 sp->u.scmd.ctx = NULL;
5162cf0c
GM
3062 }
3063 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3064
3065 return QLA_FUNCTION_FAILED;
3066}
3067
6d78e557 3068static void
4440e46d
AB
3069qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3070{
3071 struct srb_iocb *aio = &sp->u.iocb_cmd;
25ff6af1 3072 scsi_qla_host_t *vha = sp->vha;
4440e46d
AB
3073 struct req_que *req = vha->req;
3074
3075 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3076 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3077 abt_iocb->entry_count = 1;
3078 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3079 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3080 abt_iocb->handle_to_abort =
3081 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
3082 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3083 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3084 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3085 abt_iocb->vp_index = vha->vp_idx;
3086 abt_iocb->req_que_no = cpu_to_le16(req->id);
3087 /* Send the command to the firmware */
3088 wmb();
3089}
3090
726b8548
QT
3091static void
3092qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3093{
3094 int i, sz;
3095
3096 mbx->entry_type = MBX_IOCB_TYPE;
3097 mbx->handle = sp->handle;
3098 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3099
3100 for (i = 0; i < sz; i++)
3101 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3102}
3103
3104static void
3105qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3106{
3107 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3108 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3109 ct_pkt->handle = sp->handle;
3110}
3111
3112static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3113 struct nack_to_isp *nack)
3114{
3115 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3116
3117 nack->entry_type = NOTIFY_ACK_TYPE;
3118 nack->entry_count = 1;
3119 nack->ox_id = ntfy->ox_id;
3120
3121 nack->u.isp24.handle = sp->handle;
3122 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3123 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3124 nack->u.isp24.flags = ntfy->u.isp24.flags &
3125 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3126 }
3127 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3128 nack->u.isp24.status = ntfy->u.isp24.status;
3129 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3130 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3131 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3132 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3133 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3134 nack->u.isp24.srr_flags = 0;
3135 nack->u.isp24.srr_reject_code = 0;
3136 nack->u.isp24.srr_reject_code_expl = 0;
3137 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3138}
3139
ac280b67
AV
3140int
3141qla2x00_start_sp(srb_t *sp)
3142{
3143 int rval;
25ff6af1 3144 scsi_qla_host_t *vha = sp->vha;
726b8548 3145 struct qla_hw_data *ha = vha->hw;
ac280b67 3146 void *pkt;
ac280b67
AV
3147 unsigned long flags;
3148
3149 rval = QLA_FUNCTION_FAILED;
3150 spin_lock_irqsave(&ha->hardware_lock, flags);
726b8548 3151 pkt = qla2x00_alloc_iocbs(vha, sp);
7c3df132 3152 if (!pkt) {
726b8548 3153 ql_log(ql_log_warn, vha, 0x700c,
7c3df132 3154 "qla2x00_alloc_iocbs failed.\n");
ac280b67 3155 goto done;
7c3df132 3156 }
ac280b67
AV
3157
3158 rval = QLA_SUCCESS;
9ba56b95 3159 switch (sp->type) {
ac280b67
AV
3160 case SRB_LOGIN_CMD:
3161 IS_FWI2_CAPABLE(ha) ?
5ff1d584 3162 qla24xx_login_iocb(sp, pkt) :
ac280b67
AV
3163 qla2x00_login_iocb(sp, pkt);
3164 break;
3165 case SRB_LOGOUT_CMD:
3166 IS_FWI2_CAPABLE(ha) ?
5ff1d584 3167 qla24xx_logout_iocb(sp, pkt) :
ac280b67
AV
3168 qla2x00_logout_iocb(sp, pkt);
3169 break;
9a069e19
GM
3170 case SRB_ELS_CMD_RPT:
3171 case SRB_ELS_CMD_HST:
3172 qla24xx_els_iocb(sp, pkt);
3173 break;
3174 case SRB_CT_CMD:
9bc4f4fb 3175 IS_FWI2_CAPABLE(ha) ?
5780790e
AV
3176 qla24xx_ct_iocb(sp, pkt) :
3177 qla2x00_ct_iocb(sp, pkt);
9a069e19 3178 break;
5ff1d584
AV
3179 case SRB_ADISC_CMD:
3180 IS_FWI2_CAPABLE(ha) ?
3181 qla24xx_adisc_iocb(sp, pkt) :
3182 qla2x00_adisc_iocb(sp, pkt);
3183 break;
3822263e 3184 case SRB_TM_CMD:
8ae6d9c7
GM
3185 IS_QLAFX00(ha) ?
3186 qlafx00_tm_iocb(sp, pkt) :
3187 qla24xx_tm_iocb(sp, pkt);
3188 break;
3189 case SRB_FXIOCB_DCMD:
3190 case SRB_FXIOCB_BCMD:
3191 qlafx00_fxdisc_iocb(sp, pkt);
3192 break;
3193 case SRB_ABT_CMD:
4440e46d
AB
3194 IS_QLAFX00(ha) ?
3195 qlafx00_abort_iocb(sp, pkt) :
3196 qla24xx_abort_iocb(sp, pkt);
3822263e 3197 break;
6eb54715
HM
3198 case SRB_ELS_DCMD:
3199 qla24xx_els_logo_iocb(sp, pkt);
3200 break;
726b8548
QT
3201 case SRB_CT_PTHRU_CMD:
3202 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3203 break;
3204 case SRB_MB_IOCB:
3205 qla2x00_mb_iocb(sp, pkt);
3206 break;
3207 case SRB_NACK_PLOGI:
3208 case SRB_NACK_PRLI:
3209 case SRB_NACK_LOGO:
3210 qla2x00_send_notify_ack_iocb(sp, pkt);
3211 break;
ac280b67
AV
3212 default:
3213 break;
3214 }
3215
3216 wmb();
726b8548 3217 qla2x00_start_iocbs(vha, ha->req_q_map[0]);
ac280b67
AV
3218done:
3219 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3220 return rval;
3221}
a9b6f722
SK
3222
3223static void
3224qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3225 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3226{
3227 uint16_t avail_dsds;
3228 uint32_t *cur_dsd;
3229 uint32_t req_data_len = 0;
3230 uint32_t rsp_data_len = 0;
3231 struct scatterlist *sg;
3232 int index;
3233 int entry_count = 1;
75cc8cfc 3234 struct bsg_job *bsg_job = sp->u.bsg_job;
a9b6f722
SK
3235
3236 /*Update entry type to indicate bidir command */
3237 *((uint32_t *)(&cmd_pkt->entry_type)) =
ad950360 3238 cpu_to_le32(COMMAND_BIDIRECTIONAL);
a9b6f722
SK
3239
3240 /* Set the transfer direction, in this set both flags
3241 * Also set the BD_WRAP_BACK flag, firmware will take care
3242 * assigning DID=SID for outgoing pkts.
3243 */
3244 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3245 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
ad950360 3246 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
a9b6f722
SK
3247 BD_WRAP_BACK);
3248
3249 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3250 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3251 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3252 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3253
3254 vha->bidi_stats.transfer_bytes += req_data_len;
3255 vha->bidi_stats.io_count++;
3256
fabbb8df
JC
3257 vha->qla_stats.output_bytes += req_data_len;
3258 vha->qla_stats.output_requests++;
3259
a9b6f722
SK
3260 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3261 * are bundled in continuation iocb
3262 */
3263 avail_dsds = 1;
3264 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3265
3266 index = 0;
3267
3268 for_each_sg(bsg_job->request_payload.sg_list, sg,
3269 bsg_job->request_payload.sg_cnt, index) {
3270 dma_addr_t sle_dma;
3271 cont_a64_entry_t *cont_pkt;
3272
3273 /* Allocate additional continuation packets */
3274 if (avail_dsds == 0) {
3275 /* Continuation type 1 IOCB can accomodate
3276 * 5 DSDS
3277 */
3278 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3279 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3280 avail_dsds = 5;
3281 entry_count++;
3282 }
3283 sle_dma = sg_dma_address(sg);
3284 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3285 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3286 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3287 avail_dsds--;
3288 }
3289 /* For read request DSD will always goes to continuation IOCB
3290 * and follow the write DSD. If there is room on the current IOCB
3291 * then it is added to that IOCB else new continuation IOCB is
3292 * allocated.
3293 */
3294 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3295 bsg_job->reply_payload.sg_cnt, index) {
3296 dma_addr_t sle_dma;
3297 cont_a64_entry_t *cont_pkt;
3298
3299 /* Allocate additional continuation packets */
3300 if (avail_dsds == 0) {
3301 /* Continuation type 1 IOCB can accomodate
3302 * 5 DSDS
3303 */
3304 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3305 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3306 avail_dsds = 5;
3307 entry_count++;
3308 }
3309 sle_dma = sg_dma_address(sg);
3310 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3311 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3312 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3313 avail_dsds--;
3314 }
3315 /* This value should be same as number of IOCB required for this cmd */
3316 cmd_pkt->entry_count = entry_count;
3317}
3318
3319int
3320qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3321{
3322
3323 struct qla_hw_data *ha = vha->hw;
3324 unsigned long flags;
3325 uint32_t handle;
3326 uint32_t index;
3327 uint16_t req_cnt;
3328 uint16_t cnt;
3329 uint32_t *clr_ptr;
3330 struct cmd_bidir *cmd_pkt = NULL;
3331 struct rsp_que *rsp;
3332 struct req_que *req;
3333 int rval = EXT_STATUS_OK;
a9b6f722
SK
3334
3335 rval = QLA_SUCCESS;
3336
3337 rsp = ha->rsp_q_map[0];
3338 req = vha->req;
3339
3340 /* Send marker if required */
3341 if (vha->marker_needed != 0) {
3342 if (qla2x00_marker(vha, req,
3343 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3344 return EXT_STATUS_MAILBOX;
3345 vha->marker_needed = 0;
3346 }
3347
3348 /* Acquire ring specific lock */
3349 spin_lock_irqsave(&ha->hardware_lock, flags);
3350
3351 /* Check for room in outstanding command list. */
3352 handle = req->current_outstanding_cmd;
8d93f550 3353 for (index = 1; index < req->num_outstanding_cmds; index++) {
a9b6f722 3354 handle++;
8d2b21db
BVA
3355 if (handle == req->num_outstanding_cmds)
3356 handle = 1;
3357 if (!req->outstanding_cmds[handle])
3358 break;
a9b6f722
SK
3359 }
3360
8d93f550 3361 if (index == req->num_outstanding_cmds) {
a9b6f722
SK
3362 rval = EXT_STATUS_BUSY;
3363 goto queuing_error;
3364 }
3365
3366 /* Calculate number of IOCB required */
3367 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3368
3369 /* Check for room on request queue. */
3370 if (req->cnt < req_cnt + 2) {
7c6300e3
JC
3371 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3372 RD_REG_DWORD_RELAXED(req->req_q_out);
a9b6f722
SK
3373 if (req->ring_index < cnt)
3374 req->cnt = cnt - req->ring_index;
3375 else
3376 req->cnt = req->length -
3377 (req->ring_index - cnt);
3378 }
3379 if (req->cnt < req_cnt + 2) {
3380 rval = EXT_STATUS_BUSY;
3381 goto queuing_error;
3382 }
3383
3384 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3385 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3386
3387 /* Zero out remaining portion of packet. */
3388 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3389 clr_ptr = (uint32_t *)cmd_pkt + 2;
3390 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3391
3392 /* Set NPORT-ID (of vha)*/
3393 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3394 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3395 cmd_pkt->port_id[1] = vha->d_id.b.area;
3396 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3397
3398 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3399 cmd_pkt->entry_status = (uint8_t) rsp->id;
3400 /* Build command packet. */
3401 req->current_outstanding_cmd = handle;
3402 req->outstanding_cmds[handle] = sp;
3403 sp->handle = handle;
3404 req->cnt -= req_cnt;
3405
3406 /* Send the command to the firmware */
3407 wmb();
3408 qla2x00_start_iocbs(vha, req);
3409queuing_error:
3410 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3411 return rval;
3412}