[SCSI] qla2xxx: fix compile warning
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_iocb.c
CommitLineData
fa90c54f
AV
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2005 QLogic Corporation
1da177e4 4 *
fa90c54f
AV
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
1da177e4
LT
7#include "qla_def.h"
8
9#include <linux/blkdev.h>
10#include <linux/delay.h>
11
12#include <scsi/scsi_tcq.h>
13
14static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
15static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
16static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
17static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha);
18
19/**
20 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21 * @cmd: SCSI command
22 *
23 * Returns the proper CF_* direction based on CDB.
24 */
25static inline uint16_t
26qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
27{
28 uint16_t cflags;
29
30 cflags = 0;
31
32 /* Set transfer direction */
33 if (cmd->sc_data_direction == DMA_TO_DEVICE)
34 cflags = CF_WRITE;
35 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
36 cflags = CF_READ;
37 return (cflags);
38}
39
40/**
41 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
42 * Continuation Type 0 IOCBs to allocate.
43 *
44 * @dsds: number of data segment decriptors needed
45 *
46 * Returns the number of IOCB entries needed to store @dsds.
47 */
48uint16_t
49qla2x00_calc_iocbs_32(uint16_t dsds)
50{
51 uint16_t iocbs;
52
53 iocbs = 1;
54 if (dsds > 3) {
55 iocbs += (dsds - 3) / 7;
56 if ((dsds - 3) % 7)
57 iocbs++;
58 }
59 return (iocbs);
60}
61
62/**
63 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
64 * Continuation Type 1 IOCBs to allocate.
65 *
66 * @dsds: number of data segment decriptors needed
67 *
68 * Returns the number of IOCB entries needed to store @dsds.
69 */
70uint16_t
71qla2x00_calc_iocbs_64(uint16_t dsds)
72{
73 uint16_t iocbs;
74
75 iocbs = 1;
76 if (dsds > 2) {
77 iocbs += (dsds - 2) / 5;
78 if ((dsds - 2) % 5)
79 iocbs++;
80 }
81 return (iocbs);
82}
83
84/**
85 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
86 * @ha: HA context
87 *
88 * Returns a pointer to the Continuation Type 0 IOCB packet.
89 */
90static inline cont_entry_t *
91qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
92{
93 cont_entry_t *cont_pkt;
94
95 /* Adjust ring index. */
96 ha->req_ring_index++;
97 if (ha->req_ring_index == ha->request_q_length) {
98 ha->req_ring_index = 0;
99 ha->request_ring_ptr = ha->request_ring;
100 } else {
101 ha->request_ring_ptr++;
102 }
103
104 cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
105
106 /* Load packet defaults. */
107 *((uint32_t *)(&cont_pkt->entry_type)) =
108 __constant_cpu_to_le32(CONTINUE_TYPE);
109
110 return (cont_pkt);
111}
112
113/**
114 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
115 * @ha: HA context
116 *
117 * Returns a pointer to the continuation type 1 IOCB packet.
118 */
119static inline cont_a64_entry_t *
120qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
121{
122 cont_a64_entry_t *cont_pkt;
123
124 /* Adjust ring index. */
125 ha->req_ring_index++;
126 if (ha->req_ring_index == ha->request_q_length) {
127 ha->req_ring_index = 0;
128 ha->request_ring_ptr = ha->request_ring;
129 } else {
130 ha->request_ring_ptr++;
131 }
132
133 cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
134
135 /* Load packet defaults. */
136 *((uint32_t *)(&cont_pkt->entry_type)) =
137 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
138
139 return (cont_pkt);
140}
141
142/**
143 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
144 * capable IOCB types.
145 *
146 * @sp: SRB command to process
147 * @cmd_pkt: Command type 2 IOCB
148 * @tot_dsds: Total number of segments to transfer
149 */
150void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
151 uint16_t tot_dsds)
152{
153 uint16_t avail_dsds;
154 uint32_t *cur_dsd;
155 scsi_qla_host_t *ha;
156 struct scsi_cmnd *cmd;
157
158 cmd = sp->cmd;
159
160 /* Update entry type to indicate Command Type 2 IOCB */
161 *((uint32_t *)(&cmd_pkt->entry_type)) =
162 __constant_cpu_to_le32(COMMAND_TYPE);
163
164 /* No data transfer */
165 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
166 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
167 return;
168 }
169
170 ha = sp->ha;
171
172 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
173
174 /* Three DSDs are available in the Command Type 2 IOCB */
175 avail_dsds = 3;
176 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
177
178 /* Load data segments */
179 if (cmd->use_sg != 0) {
180 struct scatterlist *cur_seg;
181 struct scatterlist *end_seg;
182
183 cur_seg = (struct scatterlist *)cmd->request_buffer;
184 end_seg = cur_seg + tot_dsds;
185 while (cur_seg < end_seg) {
186 cont_entry_t *cont_pkt;
187
188 /* Allocate additional continuation packets? */
189 if (avail_dsds == 0) {
190 /*
191 * Seven DSDs are available in the Continuation
192 * Type 0 IOCB.
193 */
194 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
195 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
196 avail_dsds = 7;
197 }
198
199 *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
200 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
201 avail_dsds--;
202
203 cur_seg++;
204 }
205 } else {
83021920 206 *cur_dsd++ = cpu_to_le32(sp->dma_handle);
1da177e4
LT
207 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
208 }
209}
210
211/**
212 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
213 * capable IOCB types.
214 *
215 * @sp: SRB command to process
216 * @cmd_pkt: Command type 3 IOCB
217 * @tot_dsds: Total number of segments to transfer
218 */
219void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
220 uint16_t tot_dsds)
221{
222 uint16_t avail_dsds;
223 uint32_t *cur_dsd;
224 scsi_qla_host_t *ha;
225 struct scsi_cmnd *cmd;
226
227 cmd = sp->cmd;
228
229 /* Update entry type to indicate Command Type 3 IOCB */
230 *((uint32_t *)(&cmd_pkt->entry_type)) =
231 __constant_cpu_to_le32(COMMAND_A64_TYPE);
232
233 /* No data transfer */
234 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
235 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
236 return;
237 }
238
239 ha = sp->ha;
240
241 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
242
243 /* Two DSDs are available in the Command Type 3 IOCB */
244 avail_dsds = 2;
245 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
246
247 /* Load data segments */
248 if (cmd->use_sg != 0) {
249 struct scatterlist *cur_seg;
250 struct scatterlist *end_seg;
251
252 cur_seg = (struct scatterlist *)cmd->request_buffer;
253 end_seg = cur_seg + tot_dsds;
254 while (cur_seg < end_seg) {
255 dma_addr_t sle_dma;
256 cont_a64_entry_t *cont_pkt;
257
258 /* Allocate additional continuation packets? */
259 if (avail_dsds == 0) {
260 /*
261 * Five DSDs are available in the Continuation
262 * Type 1 IOCB.
263 */
264 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
265 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
266 avail_dsds = 5;
267 }
268
269 sle_dma = sg_dma_address(cur_seg);
270 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
271 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
272 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
273 avail_dsds--;
274
275 cur_seg++;
276 }
277 } else {
83021920 278 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
279 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
1da177e4
LT
280 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
281 }
282}
283
284/**
285 * qla2x00_start_scsi() - Send a SCSI command to the ISP
286 * @sp: command to send to the ISP
287 *
288 * Returns non-zero if a failure occured, else zero.
289 */
290int
291qla2x00_start_scsi(srb_t *sp)
292{
293 int ret;
294 unsigned long flags;
295 scsi_qla_host_t *ha;
1da177e4
LT
296 struct scsi_cmnd *cmd;
297 uint32_t *clr_ptr;
298 uint32_t index;
299 uint32_t handle;
300 cmd_entry_t *cmd_pkt;
1da177e4
LT
301 struct scatterlist *sg;
302 uint16_t cnt;
303 uint16_t req_cnt;
304 uint16_t tot_dsds;
3d71644c 305 struct device_reg_2xxx __iomem *reg;
1da177e4
LT
306 char tag[2];
307
308 /* Setup device pointers. */
309 ret = 0;
bdf79621 310 ha = sp->ha;
3d71644c 311 reg = &ha->iobase->isp;
1da177e4 312 cmd = sp->cmd;
83021920 313 /* So we know we haven't pci_map'ed anything yet */
314 tot_dsds = 0;
1da177e4
LT
315
316 /* Send marker if required */
317 if (ha->marker_needed != 0) {
318 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
319 return (QLA_FUNCTION_FAILED);
320 }
321 ha->marker_needed = 0;
322 }
323
324 /* Acquire ring specific lock */
325 spin_lock_irqsave(&ha->hardware_lock, flags);
326
327 /* Check for room in outstanding command list. */
328 handle = ha->current_outstanding_cmd;
329 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
330 handle++;
331 if (handle == MAX_OUTSTANDING_COMMANDS)
332 handle = 1;
333 if (ha->outstanding_cmds[handle] == 0)
334 break;
335 }
336 if (index == MAX_OUTSTANDING_COMMANDS)
337 goto queuing_error;
338
83021920 339 /* Map the sg table so we have an accurate count of sg entries needed */
340 if (cmd->use_sg) {
341 sg = (struct scatterlist *) cmd->request_buffer;
342 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
343 cmd->sc_data_direction);
344 if (tot_dsds == 0)
345 goto queuing_error;
346 } else if (cmd->request_bufflen) {
347 dma_addr_t req_dma;
348
349 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
350 cmd->request_bufflen, cmd->sc_data_direction);
351 if (dma_mapping_error(req_dma))
352 goto queuing_error;
353
354 sp->dma_handle = req_dma;
355 tot_dsds = 1;
356 }
357
1da177e4 358 /* Calculate the number of request entries needed. */
abbd8870 359 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
1da177e4
LT
360 if (ha->req_q_cnt < (req_cnt + 2)) {
361 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
362 if (ha->req_ring_index < cnt)
363 ha->req_q_cnt = cnt - ha->req_ring_index;
364 else
365 ha->req_q_cnt = ha->request_q_length -
366 (ha->req_ring_index - cnt);
367 }
368 if (ha->req_q_cnt < (req_cnt + 2))
369 goto queuing_error;
370
1da177e4
LT
371 /* Build command packet */
372 ha->current_outstanding_cmd = handle;
373 ha->outstanding_cmds[handle] = sp;
374 sp->ha = ha;
375 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
376 ha->req_q_cnt -= req_cnt;
377
378 cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
379 cmd_pkt->handle = handle;
380 /* Zero out remaining portion of packet. */
381 clr_ptr = (uint32_t *)cmd_pkt + 2;
382 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
383 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
384
bdf79621 385 /* Set target ID and LUN number*/
386 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
387 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
1da177e4
LT
388
389 /* Update tagged queuing modifier */
390 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
391 if (scsi_populate_tag_msg(cmd, tag)) {
392 switch (tag[0]) {
393 case MSG_HEAD_TAG:
394 cmd_pkt->control_flags =
395 __constant_cpu_to_le16(CF_HEAD_TAG);
396 break;
397 case MSG_ORDERED_TAG:
398 cmd_pkt->control_flags =
399 __constant_cpu_to_le16(CF_ORDERED_TAG);
400 break;
401 }
402 }
403
1da177e4
LT
404 /* Load SCSI command packet. */
405 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
406 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
407
408 /* Build IOCB segments */
abbd8870 409 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
1da177e4
LT
410
411 /* Set total data segment count. */
412 cmd_pkt->entry_count = (uint8_t)req_cnt;
413 wmb();
414
415 /* Adjust ring index. */
416 ha->req_ring_index++;
417 if (ha->req_ring_index == ha->request_q_length) {
418 ha->req_ring_index = 0;
419 ha->request_ring_ptr = ha->request_ring;
420 } else
421 ha->request_ring_ptr++;
422
1da177e4
LT
423 sp->flags |= SRB_DMA_VALID;
424 sp->state = SRB_ACTIVE_STATE;
1da177e4
LT
425
426 /* Set chip new ring index. */
427 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
428 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
429
4fdfefe5
AV
430 /* Manage unprocessed RIO/ZIO commands in response queue. */
431 if (ha->flags.process_response_queue &&
432 ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
433 qla2x00_process_response_queue(ha);
434
1da177e4
LT
435 spin_unlock_irqrestore(&ha->hardware_lock, flags);
436 return (QLA_SUCCESS);
437
438queuing_error:
83021920 439 if (cmd->use_sg && tot_dsds) {
440 sg = (struct scatterlist *) cmd->request_buffer;
441 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
442 cmd->sc_data_direction);
443 } else if (tot_dsds) {
444 pci_unmap_single(ha->pdev, sp->dma_handle,
445 cmd->request_bufflen, cmd->sc_data_direction);
446 }
1da177e4
LT
447 spin_unlock_irqrestore(&ha->hardware_lock, flags);
448
449 return (QLA_FUNCTION_FAILED);
450}
451
452/**
453 * qla2x00_marker() - Send a marker IOCB to the firmware.
454 * @ha: HA context
455 * @loop_id: loop ID
456 * @lun: LUN
457 * @type: marker modifier
458 *
459 * Can be called from both normal and interrupt context.
460 *
461 * Returns non-zero if a failure occured, else zero.
462 */
fa2a1ce5 463int
1da177e4
LT
464__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
465 uint8_t type)
466{
2b6c0cee
AV
467 mrk_entry_t *mrk;
468 struct mrk_entry_24xx *mrk24;
1da177e4 469
2b6c0cee
AV
470 mrk24 = NULL;
471 mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
472 if (mrk == NULL) {
473 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
474 __func__, ha->host_no));
1da177e4
LT
475
476 return (QLA_FUNCTION_FAILED);
477 }
478
2b6c0cee
AV
479 mrk->entry_type = MARKER_TYPE;
480 mrk->modifier = type;
1da177e4 481 if (type != MK_SYNC_ALL) {
2b6c0cee
AV
482 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
483 mrk24 = (struct mrk_entry_24xx *) mrk;
484 mrk24->nport_handle = cpu_to_le16(loop_id);
485 mrk24->lun[1] = LSB(lun);
486 mrk24->lun[2] = MSB(lun);
487 } else {
488 SET_TARGET_ID(ha, mrk->target, loop_id);
489 mrk->lun = cpu_to_le16(lun);
490 }
1da177e4
LT
491 }
492 wmb();
493
1da177e4
LT
494 qla2x00_isp_cmd(ha);
495
496 return (QLA_SUCCESS);
497}
498
fa2a1ce5 499int
1da177e4
LT
500qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
501 uint8_t type)
502{
503 int ret;
504 unsigned long flags = 0;
505
506 spin_lock_irqsave(&ha->hardware_lock, flags);
507 ret = __qla2x00_marker(ha, loop_id, lun, type);
508 spin_unlock_irqrestore(&ha->hardware_lock, flags);
509
510 return (ret);
511}
512
513/**
514 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
515 * @ha: HA context
516 *
517 * Note: The caller must hold the hardware lock before calling this routine.
518 *
519 * Returns NULL if function failed, else, a pointer to the request packet.
520 */
521static request_t *
522qla2x00_req_pkt(scsi_qla_host_t *ha)
523{
2b6c0cee 524 device_reg_t __iomem *reg = ha->iobase;
1da177e4
LT
525 request_t *pkt = NULL;
526 uint16_t cnt;
527 uint32_t *dword_ptr;
528 uint32_t timer;
529 uint16_t req_cnt = 1;
530
531 /* Wait 1 second for slot. */
532 for (timer = HZ; timer; timer--) {
533 if ((req_cnt + 2) >= ha->req_q_cnt) {
534 /* Calculate number of free request entries. */
2b6c0cee
AV
535 if (IS_QLA24XX(ha) || IS_QLA25XX(ha))
536 cnt = (uint16_t)RD_REG_DWORD(
537 &reg->isp24.req_q_out);
538 else
539 cnt = qla2x00_debounce_register(
540 ISP_REQ_Q_OUT(ha, &reg->isp));
1da177e4
LT
541 if (ha->req_ring_index < cnt)
542 ha->req_q_cnt = cnt - ha->req_ring_index;
543 else
544 ha->req_q_cnt = ha->request_q_length -
545 (ha->req_ring_index - cnt);
546 }
547 /* If room for request in request ring. */
548 if ((req_cnt + 2) < ha->req_q_cnt) {
549 ha->req_q_cnt--;
550 pkt = ha->request_ring_ptr;
551
552 /* Zero out packet. */
553 dword_ptr = (uint32_t *)pkt;
554 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
555 *dword_ptr++ = 0;
556
557 /* Set system defined field. */
558 pkt->sys_define = (uint8_t)ha->req_ring_index;
559
560 /* Set entry count. */
561 pkt->entry_count = 1;
562
563 break;
564 }
565
566 /* Release ring specific lock */
567 spin_unlock(&ha->hardware_lock);
568
569 udelay(2); /* 2 us */
570
571 /* Check for pending interrupts. */
572 /* During init we issue marker directly */
573 if (!ha->marker_needed)
574 qla2x00_poll(ha);
575
576 spin_lock_irq(&ha->hardware_lock);
577 }
578 if (!pkt) {
579 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
580 }
581
582 return (pkt);
583}
584
585/**
586 * qla2x00_isp_cmd() - Modify the request ring pointer.
587 * @ha: HA context
588 *
589 * Note: The caller must hold the hardware lock before calling this routine.
590 */
591void
592qla2x00_isp_cmd(scsi_qla_host_t *ha)
593{
2b6c0cee 594 device_reg_t __iomem *reg = ha->iobase;
1da177e4
LT
595
596 DEBUG5(printk("%s(): IOCB data:\n", __func__));
597 DEBUG5(qla2x00_dump_buffer(
598 (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
599
600 /* Adjust ring index. */
601 ha->req_ring_index++;
602 if (ha->req_ring_index == ha->request_q_length) {
603 ha->req_ring_index = 0;
604 ha->request_ring_ptr = ha->request_ring;
605 } else
606 ha->request_ring_ptr++;
607
608 /* Set chip new ring index. */
2b6c0cee
AV
609 if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
610 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
611 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
612 } else {
613 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
614 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
615 }
616
617}
618
619/**
620 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
621 * Continuation Type 1 IOCBs to allocate.
622 *
623 * @dsds: number of data segment decriptors needed
624 *
625 * Returns the number of IOCB entries needed to store @dsds.
626 */
627static inline uint16_t
628qla24xx_calc_iocbs(uint16_t dsds)
629{
630 uint16_t iocbs;
631
632 iocbs = 1;
633 if (dsds > 1) {
634 iocbs += (dsds - 1) / 5;
635 if ((dsds - 1) % 5)
636 iocbs++;
637 }
638 return iocbs;
639}
640
641/**
642 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
643 * IOCB types.
644 *
645 * @sp: SRB command to process
646 * @cmd_pkt: Command type 3 IOCB
647 * @tot_dsds: Total number of segments to transfer
648 */
649static inline void
650qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
651 uint16_t tot_dsds)
652{
653 uint16_t avail_dsds;
654 uint32_t *cur_dsd;
655 scsi_qla_host_t *ha;
656 struct scsi_cmnd *cmd;
657
658 cmd = sp->cmd;
659
660 /* Update entry type to indicate Command Type 3 IOCB */
661 *((uint32_t *)(&cmd_pkt->entry_type)) =
662 __constant_cpu_to_le32(COMMAND_TYPE_7);
663
664 /* No data transfer */
665 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
666 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
667 return;
668 }
669
670 ha = sp->ha;
671
672 /* Set transfer direction */
673 if (cmd->sc_data_direction == DMA_TO_DEVICE)
674 cmd_pkt->task_mgmt_flags =
675 __constant_cpu_to_le16(TMF_WRITE_DATA);
676 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
677 cmd_pkt->task_mgmt_flags =
678 __constant_cpu_to_le16(TMF_READ_DATA);
679
680 /* One DSD is available in the Command Type 3 IOCB */
681 avail_dsds = 1;
682 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
683
684 /* Load data segments */
685 if (cmd->use_sg != 0) {
686 struct scatterlist *cur_seg;
687 struct scatterlist *end_seg;
688
689 cur_seg = (struct scatterlist *)cmd->request_buffer;
690 end_seg = cur_seg + tot_dsds;
691 while (cur_seg < end_seg) {
692 dma_addr_t sle_dma;
693 cont_a64_entry_t *cont_pkt;
694
695 /* Allocate additional continuation packets? */
696 if (avail_dsds == 0) {
697 /*
698 * Five DSDs are available in the Continuation
699 * Type 1 IOCB.
700 */
701 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
702 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
703 avail_dsds = 5;
704 }
705
706 sle_dma = sg_dma_address(cur_seg);
707 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
708 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
709 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
710 avail_dsds--;
711
712 cur_seg++;
713 }
714 } else {
715 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle));
716 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle));
717 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
718 }
719}
720
721
722/**
723 * qla24xx_start_scsi() - Send a SCSI command to the ISP
724 * @sp: command to send to the ISP
725 *
726 * Returns non-zero if a failure occured, else zero.
727 */
728int
729qla24xx_start_scsi(srb_t *sp)
730{
731 int ret;
732 unsigned long flags;
733 scsi_qla_host_t *ha;
734 struct scsi_cmnd *cmd;
735 uint32_t *clr_ptr;
736 uint32_t index;
737 uint32_t handle;
738 struct cmd_type_7 *cmd_pkt;
739 struct scatterlist *sg;
740 uint16_t cnt;
741 uint16_t req_cnt;
742 uint16_t tot_dsds;
db776a14 743 struct device_reg_24xx __iomem *reg;
2b6c0cee
AV
744 char tag[2];
745
746 /* Setup device pointers. */
747 ret = 0;
748 ha = sp->ha;
749 reg = &ha->iobase->isp24;
750 cmd = sp->cmd;
751 /* So we know we haven't pci_map'ed anything yet */
752 tot_dsds = 0;
753
754 /* Send marker if required */
755 if (ha->marker_needed != 0) {
756 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
757 return QLA_FUNCTION_FAILED;
758 }
759 ha->marker_needed = 0;
760 }
761
762 /* Acquire ring specific lock */
763 spin_lock_irqsave(&ha->hardware_lock, flags);
764
765 /* Check for room in outstanding command list. */
766 handle = ha->current_outstanding_cmd;
767 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
768 handle++;
769 if (handle == MAX_OUTSTANDING_COMMANDS)
770 handle = 1;
771 if (ha->outstanding_cmds[handle] == 0)
772 break;
773 }
774 if (index == MAX_OUTSTANDING_COMMANDS)
775 goto queuing_error;
776
777 /* Map the sg table so we have an accurate count of sg entries needed */
778 if (cmd->use_sg) {
779 sg = (struct scatterlist *) cmd->request_buffer;
780 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
781 cmd->sc_data_direction);
782 if (tot_dsds == 0)
783 goto queuing_error;
784 } else if (cmd->request_bufflen) {
785 dma_addr_t req_dma;
786
787 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
788 cmd->request_bufflen, cmd->sc_data_direction);
789 if (dma_mapping_error(req_dma))
790 goto queuing_error;
791
792 sp->dma_handle = req_dma;
793 tot_dsds = 1;
794 }
795
796 req_cnt = qla24xx_calc_iocbs(tot_dsds);
797 if (ha->req_q_cnt < (req_cnt + 2)) {
798 cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
799 if (ha->req_ring_index < cnt)
800 ha->req_q_cnt = cnt - ha->req_ring_index;
801 else
802 ha->req_q_cnt = ha->request_q_length -
803 (ha->req_ring_index - cnt);
804 }
131736d3 805 if (ha->req_q_cnt < (req_cnt + 2))
2b6c0cee 806 goto queuing_error;
2b6c0cee
AV
807
808 /* Build command packet. */
809 ha->current_outstanding_cmd = handle;
810 ha->outstanding_cmds[handle] = sp;
811 sp->ha = ha;
812 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
813 ha->req_q_cnt -= req_cnt;
814
815 cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr;
816 cmd_pkt->handle = handle;
817
818 /* Zero out remaining portion of packet. */
819 clr_ptr = (uint32_t *)cmd_pkt + 2;
820 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
821 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
822
823 /* Set NPORT-ID and LUN number*/
824 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
825 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
826 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
827 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
828
661c3f6c 829 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2b6c0cee
AV
830
831 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
832 if (scsi_populate_tag_msg(cmd, tag)) {
833 switch (tag[0]) {
834 case MSG_HEAD_TAG:
835 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
836 break;
837 case MSG_ORDERED_TAG:
838 cmd_pkt->task = TSK_ORDERED;
839 break;
840 }
841 }
842
843 /* Load SCSI command packet. */
844 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
845 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
846
847 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
848
849 /* Build IOCB segments */
850 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
851
852 /* Set total data segment count. */
853 cmd_pkt->entry_count = (uint8_t)req_cnt;
854 wmb();
855
856 /* Adjust ring index. */
857 ha->req_ring_index++;
858 if (ha->req_ring_index == ha->request_q_length) {
859 ha->req_ring_index = 0;
860 ha->request_ring_ptr = ha->request_ring;
861 } else
862 ha->request_ring_ptr++;
863
864 sp->flags |= SRB_DMA_VALID;
865 sp->state = SRB_ACTIVE_STATE;
866
867 /* Set chip new ring index. */
868 WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index);
869 RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
870
4fdfefe5
AV
871 /* Manage unprocessed RIO/ZIO commands in response queue. */
872 if (ha->flags.process_response_queue &&
873 ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
874 qla24xx_process_response_queue(ha);
875
2b6c0cee
AV
876 spin_unlock_irqrestore(&ha->hardware_lock, flags);
877 return QLA_SUCCESS;
878
879queuing_error:
880 if (cmd->use_sg && tot_dsds) {
881 sg = (struct scatterlist *) cmd->request_buffer;
882 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
883 cmd->sc_data_direction);
884 } else if (tot_dsds) {
885 pci_unmap_single(ha->pdev, sp->dma_handle,
886 cmd->request_bufflen, cmd->sc_data_direction);
887 }
888 spin_unlock_irqrestore(&ha->hardware_lock, flags);
889
890 return QLA_FUNCTION_FAILED;
1da177e4 891}