1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25 #include <asm/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_eh.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi_transport_fc.h>
34 #include "lpfc_version.h"
38 #include "lpfc_sli4.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_vport.h"
47 #define LPFC_RESET_WAIT 2
48 #define LPFC_ABORT_WAIT 2
52 static char *dif_op_str[] = {
54 "SCSI_PROT_READ_INSERT",
55 "SCSI_PROT_WRITE_STRIP",
56 "SCSI_PROT_READ_STRIP",
57 "SCSI_PROT_WRITE_INSERT",
58 "SCSI_PROT_READ_PASS",
59 "SCSI_PROT_WRITE_PASS",
62 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
64 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
67 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
70 struct scatterlist *sgde = scsi_sglist(cmnd);
72 if (!_dump_buf_data) {
73 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
74 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
81 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
82 "9051 BLKGRD: ERROR: data scatterlist is null\n");
86 dst = (void *) _dump_buf_data;
89 memcpy(dst, src, sgde->length);
96 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
99 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
101 if (!_dump_buf_dif) {
102 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
103 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
109 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
110 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
117 memcpy(dst, src, sgde->length);
119 sgde = sg_next(sgde);
124 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
125 * @phba: Pointer to HBA object.
126 * @lpfc_cmd: lpfc scsi command object pointer.
128 * This function is called from the lpfc_prep_task_mgmt_cmd function to
129 * set the last bit in the response sge entry.
132 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
133 struct lpfc_scsi_buf *lpfc_cmd)
135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
138 sgl->word2 = le32_to_cpu(sgl->word2);
139 bf_set(lpfc_sli4_sge_last, sgl, 1);
140 sgl->word2 = cpu_to_le32(sgl->word2);
145 * lpfc_update_stats - Update statistical data for the command completion
146 * @phba: Pointer to HBA object.
147 * @lpfc_cmd: lpfc scsi command object pointer.
149 * This function is called when there is a command completion and this
150 * function updates the statistical data for the command completion.
153 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
155 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
156 struct lpfc_nodelist *pnode = rdata->pnode;
157 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
159 struct Scsi_Host *shost = cmd->device->host;
160 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
161 unsigned long latency;
167 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
169 spin_lock_irqsave(shost->host_lock, flags);
170 if (!vport->stat_data_enabled ||
171 vport->stat_data_blocked ||
174 (phba->bucket_type == LPFC_NO_BUCKET)) {
175 spin_unlock_irqrestore(shost->host_lock, flags);
179 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
180 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
182 /* check array subscript bounds */
185 else if (i >= LPFC_MAX_BUCKET_COUNT)
186 i = LPFC_MAX_BUCKET_COUNT - 1;
188 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
189 if (latency <= (phba->bucket_base +
190 ((1<<i)*phba->bucket_step)))
194 pnode->lat_data[i].cmd_count++;
195 spin_unlock_irqrestore(shost->host_lock, flags);
199 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
200 * @phba: Pointer to HBA context object.
201 * @vport: Pointer to vport object.
202 * @ndlp: Pointer to FC node associated with the target.
203 * @lun: Lun number of the scsi device.
204 * @old_val: Old value of the queue depth.
205 * @new_val: New value of the queue depth.
207 * This function sends an event to the mgmt application indicating
208 * there is a change in the scsi device queue depth.
211 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
212 struct lpfc_vport *vport,
213 struct lpfc_nodelist *ndlp,
218 struct lpfc_fast_path_event *fast_path_evt;
221 fast_path_evt = lpfc_alloc_fast_evt(phba);
225 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
227 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
228 LPFC_EVENT_VARQUEDEPTH;
230 /* Report all luns with change in queue depth */
231 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
232 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
233 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
234 &ndlp->nlp_portname, sizeof(struct lpfc_name));
235 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
236 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
239 fast_path_evt->un.queue_depth_evt.oldval = old_val;
240 fast_path_evt->un.queue_depth_evt.newval = new_val;
241 fast_path_evt->vport = vport;
243 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
244 spin_lock_irqsave(&phba->hbalock, flags);
245 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
246 spin_unlock_irqrestore(&phba->hbalock, flags);
247 lpfc_worker_wake_up(phba);
253 * lpfc_change_queue_depth - Alter scsi device queue depth
254 * @sdev: Pointer the scsi device on which to change the queue depth.
255 * @qdepth: New queue depth to set the sdev to.
256 * @reason: The reason for the queue depth change.
258 * This function is called by the midlayer and the LLD to alter the queue
259 * depth for a scsi device. This function sets the queue depth to the new
260 * value and sends an event out to log the queue depth change.
263 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
265 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
266 struct lpfc_hba *phba = vport->phba;
267 struct lpfc_rport_data *rdata;
268 unsigned long new_queue_depth, old_queue_depth;
270 old_queue_depth = sdev->queue_depth;
271 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
272 new_queue_depth = sdev->queue_depth;
273 rdata = sdev->hostdata;
275 lpfc_send_sdev_queuedepth_change_event(phba, vport,
276 rdata->pnode, sdev->lun,
279 return sdev->queue_depth;
283 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
284 * @phba: The Hba for which this call is being executed.
286 * This routine is called when there is resource error in driver or firmware.
287 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
288 * posts at most 1 event each second. This routine wakes up worker thread of
289 * @phba to process WORKER_RAM_DOWN_EVENT event.
291 * This routine should be called with no lock held.
294 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
299 spin_lock_irqsave(&phba->hbalock, flags);
300 atomic_inc(&phba->num_rsrc_err);
301 phba->last_rsrc_error_time = jiffies;
303 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
304 spin_unlock_irqrestore(&phba->hbalock, flags);
308 phba->last_ramp_down_time = jiffies;
310 spin_unlock_irqrestore(&phba->hbalock, flags);
312 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
313 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
315 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
316 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
319 lpfc_worker_wake_up(phba);
324 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
325 * @phba: The Hba for which this call is being executed.
327 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
328 * post at most 1 event every 5 minute after last_ramp_up_time or
329 * last_rsrc_error_time. This routine wakes up worker thread of @phba
330 * to process WORKER_RAM_DOWN_EVENT event.
332 * This routine should be called with no lock held.
335 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
336 uint32_t queue_depth)
339 struct lpfc_hba *phba = vport->phba;
341 atomic_inc(&phba->num_cmd_success);
343 if (vport->cfg_lun_queue_depth <= queue_depth)
345 spin_lock_irqsave(&phba->hbalock, flags);
346 if (time_before(jiffies,
347 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
349 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
350 spin_unlock_irqrestore(&phba->hbalock, flags);
353 phba->last_ramp_up_time = jiffies;
354 spin_unlock_irqrestore(&phba->hbalock, flags);
356 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
357 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
359 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
360 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
363 lpfc_worker_wake_up(phba);
368 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
369 * @phba: The Hba for which this call is being executed.
371 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
372 * thread.This routine reduces queue depth for all scsi device on each vport
373 * associated with @phba.
376 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
378 struct lpfc_vport **vports;
379 struct Scsi_Host *shost;
380 struct scsi_device *sdev;
381 unsigned long new_queue_depth;
382 unsigned long num_rsrc_err, num_cmd_success;
385 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
386 num_cmd_success = atomic_read(&phba->num_cmd_success);
388 vports = lpfc_create_vport_work_array(phba);
390 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
391 shost = lpfc_shost_from_vport(vports[i]);
392 shost_for_each_device(sdev, shost) {
394 sdev->queue_depth * num_rsrc_err /
395 (num_rsrc_err + num_cmd_success);
396 if (!new_queue_depth)
397 new_queue_depth = sdev->queue_depth - 1;
399 new_queue_depth = sdev->queue_depth -
401 lpfc_change_queue_depth(sdev, new_queue_depth,
402 SCSI_QDEPTH_DEFAULT);
405 lpfc_destroy_vport_work_array(phba, vports);
406 atomic_set(&phba->num_rsrc_err, 0);
407 atomic_set(&phba->num_cmd_success, 0);
411 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
412 * @phba: The Hba for which this call is being executed.
414 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
415 * thread.This routine increases queue depth for all scsi device on each vport
416 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
417 * num_cmd_success to zero.
420 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
422 struct lpfc_vport **vports;
423 struct Scsi_Host *shost;
424 struct scsi_device *sdev;
427 vports = lpfc_create_vport_work_array(phba);
429 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
430 shost = lpfc_shost_from_vport(vports[i]);
431 shost_for_each_device(sdev, shost) {
432 if (vports[i]->cfg_lun_queue_depth <=
435 lpfc_change_queue_depth(sdev,
437 SCSI_QDEPTH_RAMP_UP);
440 lpfc_destroy_vport_work_array(phba, vports);
441 atomic_set(&phba->num_rsrc_err, 0);
442 atomic_set(&phba->num_cmd_success, 0);
446 * lpfc_scsi_dev_block - set all scsi hosts to block state
447 * @phba: Pointer to HBA context object.
449 * This function walks vport list and set each SCSI host to block state
450 * by invoking fc_remote_port_delete() routine. This function is invoked
451 * with EEH when device's PCI slot has been permanently disabled.
454 lpfc_scsi_dev_block(struct lpfc_hba *phba)
456 struct lpfc_vport **vports;
457 struct Scsi_Host *shost;
458 struct scsi_device *sdev;
459 struct fc_rport *rport;
462 vports = lpfc_create_vport_work_array(phba);
464 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
465 shost = lpfc_shost_from_vport(vports[i]);
466 shost_for_each_device(sdev, shost) {
467 rport = starget_to_rport(scsi_target(sdev));
468 fc_remote_port_delete(rport);
471 lpfc_destroy_vport_work_array(phba, vports);
475 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
476 * @vport: The virtual port for which this call being executed.
477 * @num_to_allocate: The requested number of buffers to allocate.
479 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
480 * the scsi buffer contains all the necessary information needed to initiate
481 * a SCSI I/O. The non-DMAable buffer region contains information to build
482 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
483 * and the initial BPL. In addition to allocating memory, the FCP CMND and
484 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
487 * int - number of scsi buffers that were allocated.
488 * 0 = failure, less than num_to_alloc is a partial failure.
491 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
493 struct lpfc_hba *phba = vport->phba;
494 struct lpfc_scsi_buf *psb;
495 struct ulp_bde64 *bpl;
497 dma_addr_t pdma_phys_fcp_cmd;
498 dma_addr_t pdma_phys_fcp_rsp;
499 dma_addr_t pdma_phys_bpl;
503 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
504 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
509 * Get memory from the pci pool to map the virt space to pci
510 * bus space for an I/O. The DMA buffer includes space for the
511 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
512 * necessary to support the sg_tablesize.
514 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
515 GFP_KERNEL, &psb->dma_handle);
521 /* Initialize virtual ptrs to dma_buf region. */
522 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
524 /* Allocate iotag for psb->cur_iocbq. */
525 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
527 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
528 psb->data, psb->dma_handle);
532 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
534 psb->fcp_cmnd = psb->data;
535 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
536 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
537 sizeof(struct fcp_rsp);
539 /* Initialize local short-hand pointers. */
541 pdma_phys_fcp_cmd = psb->dma_handle;
542 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
543 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
544 sizeof(struct fcp_rsp);
547 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
548 * are sg list bdes. Initialize the first two and leave the
549 * rest for queuecommand.
551 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
552 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
553 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
554 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
555 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
557 /* Setup the physical region for the FCP RSP */
558 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
559 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
560 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
561 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
562 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
565 * Since the IOCB for the FCP I/O is built into this
566 * lpfc_scsi_buf, initialize it with all known data now.
568 iocb = &psb->cur_iocbq.iocb;
569 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
570 if ((phba->sli_rev == 3) &&
571 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
572 /* fill in immediate fcp command BDE */
573 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
574 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
575 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
577 iocb->un.fcpi64.bdl.addrHigh = 0;
578 iocb->ulpBdeCount = 0;
580 /* fill in responce BDE */
581 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
583 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
584 sizeof(struct fcp_rsp);
585 iocb->unsli3.fcp_ext.rbde.addrLow =
586 putPaddrLow(pdma_phys_fcp_rsp);
587 iocb->unsli3.fcp_ext.rbde.addrHigh =
588 putPaddrHigh(pdma_phys_fcp_rsp);
590 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
591 iocb->un.fcpi64.bdl.bdeSize =
592 (2 * sizeof(struct ulp_bde64));
593 iocb->un.fcpi64.bdl.addrLow =
594 putPaddrLow(pdma_phys_bpl);
595 iocb->un.fcpi64.bdl.addrHigh =
596 putPaddrHigh(pdma_phys_bpl);
597 iocb->ulpBdeCount = 1;
600 iocb->ulpClass = CLASS3;
601 psb->status = IOSTAT_SUCCESS;
602 /* Put it back into the SCSI buffer list */
603 psb->cur_iocbq.context1 = psb;
604 lpfc_release_scsi_buf_s3(phba, psb);
612 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
613 * @vport: pointer to lpfc vport data structure.
615 * This routine is invoked by the vport cleanup for deletions and the cleanup
616 * for an ndlp on removal.
619 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
621 struct lpfc_hba *phba = vport->phba;
622 struct lpfc_scsi_buf *psb, *next_psb;
623 unsigned long iflag = 0;
625 spin_lock_irqsave(&phba->hbalock, iflag);
626 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
627 list_for_each_entry_safe(psb, next_psb,
628 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
629 if (psb->rdata && psb->rdata->pnode
630 && psb->rdata->pnode->vport == vport)
633 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
634 spin_unlock_irqrestore(&phba->hbalock, iflag);
638 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
639 * @phba: pointer to lpfc hba data structure.
640 * @axri: pointer to the fcp xri abort wcqe structure.
642 * This routine is invoked by the worker thread to process a SLI4 fast-path
646 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
647 struct sli4_wcqe_xri_aborted *axri)
649 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
650 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
651 struct lpfc_scsi_buf *psb, *next_psb;
652 unsigned long iflag = 0;
653 struct lpfc_iocbq *iocbq;
655 struct lpfc_nodelist *ndlp;
657 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
659 spin_lock_irqsave(&phba->hbalock, iflag);
660 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
661 list_for_each_entry_safe(psb, next_psb,
662 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
663 if (psb->cur_iocbq.sli4_xritag == xri) {
664 list_del(&psb->list);
666 psb->status = IOSTAT_SUCCESS;
668 &phba->sli4_hba.abts_scsi_buf_list_lock);
669 if (psb->rdata && psb->rdata->pnode)
670 ndlp = psb->rdata->pnode;
674 rrq_empty = list_empty(&phba->active_rrq_list);
675 spin_unlock_irqrestore(&phba->hbalock, iflag);
677 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
678 lpfc_release_scsi_buf_s4(phba, psb);
680 lpfc_worker_wake_up(phba);
684 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
685 for (i = 1; i <= phba->sli.last_iotag; i++) {
686 iocbq = phba->sli.iocbq_lookup[i];
688 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
689 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
691 if (iocbq->sli4_xritag != xri)
693 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
695 spin_unlock_irqrestore(&phba->hbalock, iflag);
697 lpfc_worker_wake_up(phba);
701 spin_unlock_irqrestore(&phba->hbalock, iflag);
705 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
706 * @phba: pointer to lpfc hba data structure.
708 * This routine walks the list of scsi buffers that have been allocated and
709 * repost them to the HBA by using SGL block post. This is needed after a
710 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
711 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
712 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
714 * Returns: 0 = success, non-zero failure.
717 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
719 struct lpfc_scsi_buf *psb;
720 int index, status, bcnt = 0, rcnt = 0, rc = 0;
723 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
724 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
726 /* Remove from SCSI buffer list */
727 list_del(&psb->list);
728 /* Add it to a local SCSI buffer list */
729 list_add_tail(&psb->list, &sblist);
730 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
735 /* A hole present in the XRI array, need to skip */
738 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
739 /* End of XRI array for SCSI buffer, complete */
742 /* Continue until collect up to a nembed page worth of sgls */
745 /* Now, post the SCSI buffer list sgls as a block */
746 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
747 /* Reset SCSI buffer count for next round of posting */
749 while (!list_empty(&sblist)) {
750 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
753 /* Put this back on the abort scsi list */
758 psb->status = IOSTAT_SUCCESS;
760 /* Put it back into the SCSI buffer list */
761 lpfc_release_scsi_buf_s4(phba, psb);
768 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
769 * @vport: The virtual port for which this call being executed.
770 * @num_to_allocate: The requested number of buffers to allocate.
772 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
773 * the scsi buffer contains all the necessary information needed to initiate
777 * int - number of scsi buffers that were allocated.
778 * 0 = failure, less than num_to_alloc is a partial failure.
781 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
783 struct lpfc_hba *phba = vport->phba;
784 struct lpfc_scsi_buf *psb;
785 struct sli4_sge *sgl;
787 dma_addr_t pdma_phys_fcp_cmd;
788 dma_addr_t pdma_phys_fcp_rsp;
789 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
790 uint16_t iotag, last_xritag = NO_XRI;
791 int status = 0, index;
793 int non_sequential_xri = 0;
796 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
797 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
802 * Get memory from the pci pool to map the virt space to pci bus
803 * space for an I/O. The DMA buffer includes space for the
804 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
805 * necessary to support the sg_tablesize.
807 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
808 GFP_KERNEL, &psb->dma_handle);
814 /* Initialize virtual ptrs to dma_buf region. */
815 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
817 /* Allocate iotag for psb->cur_iocbq. */
818 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
820 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
821 psb->data, psb->dma_handle);
826 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
827 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
828 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
829 psb->data, psb->dma_handle);
833 if (last_xritag != NO_XRI
834 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
835 non_sequential_xri = 1;
837 list_add_tail(&psb->list, &sblist);
838 last_xritag = psb->cur_iocbq.sli4_xritag;
840 index = phba->sli4_hba.scsi_xri_cnt++;
841 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
843 psb->fcp_bpl = psb->data;
844 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
845 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
846 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
847 sizeof(struct fcp_cmnd));
849 /* Initialize local short-hand pointers. */
850 sgl = (struct sli4_sge *)psb->fcp_bpl;
851 pdma_phys_bpl = psb->dma_handle;
853 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
854 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
855 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
858 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
859 * are sg list bdes. Initialize the first two and leave the
860 * rest for queuecommand.
862 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
863 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
864 bf_set(lpfc_sli4_sge_last, sgl, 0);
865 sgl->word2 = cpu_to_le32(sgl->word2);
866 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
869 /* Setup the physical region for the FCP RSP */
870 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
871 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
872 bf_set(lpfc_sli4_sge_last, sgl, 1);
873 sgl->word2 = cpu_to_le32(sgl->word2);
874 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
877 * Since the IOCB for the FCP I/O is built into this
878 * lpfc_scsi_buf, initialize it with all known data now.
880 iocb = &psb->cur_iocbq.iocb;
881 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
882 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
883 /* setting the BLP size to 2 * sizeof BDE may not be correct.
884 * We are setting the bpl to point to out sgl. An sgl's
885 * entries are 16 bytes, a bpl entries are 12 bytes.
887 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
888 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
889 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
890 iocb->ulpBdeCount = 1;
892 iocb->ulpClass = CLASS3;
893 psb->cur_iocbq.context1 = psb;
894 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
895 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
898 psb->dma_phys_bpl = pdma_phys_bpl;
899 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
900 if (non_sequential_xri) {
901 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
903 psb->cur_iocbq.sli4_xritag);
905 /* Put this back on the abort scsi list */
909 psb->status = IOSTAT_SUCCESS;
911 /* Put it back into the SCSI buffer list */
912 lpfc_release_scsi_buf_s4(phba, psb);
917 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
918 /* Reset SCSI buffer count for next round of posting */
919 while (!list_empty(&sblist)) {
920 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
923 /* Put this back on the abort scsi list */
927 psb->status = IOSTAT_SUCCESS;
929 /* Put it back into the SCSI buffer list */
930 lpfc_release_scsi_buf_s4(phba, psb);
934 return bcnt + non_sequential_xri;
938 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
939 * @vport: The virtual port for which this call being executed.
940 * @num_to_allocate: The requested number of buffers to allocate.
942 * This routine wraps the actual SCSI buffer allocator function pointer from
943 * the lpfc_hba struct.
946 * int - number of scsi buffers that were allocated.
947 * 0 = failure, less than num_to_alloc is a partial failure.
950 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
952 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
956 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
957 * @phba: The HBA for which this call is being executed.
959 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
960 * and returns to caller.
964 * Pointer to lpfc_scsi_buf - Success
966 static struct lpfc_scsi_buf*
967 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
969 struct lpfc_scsi_buf * lpfc_cmd = NULL;
970 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
971 unsigned long iflag = 0;
973 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
974 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
976 lpfc_cmd->seg_cnt = 0;
977 lpfc_cmd->nonsg_phys = 0;
978 lpfc_cmd->prot_seg_cnt = 0;
980 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
984 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
985 * @phba: The HBA for which this call is being executed.
987 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
988 * and returns to caller.
992 * Pointer to lpfc_scsi_buf - Success
994 static struct lpfc_scsi_buf*
995 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
997 struct lpfc_scsi_buf *lpfc_cmd ;
998 unsigned long iflag = 0;
1001 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1002 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
1004 if (lpfc_test_rrq_active(phba, ndlp,
1005 lpfc_cmd->cur_iocbq.sli4_xritag))
1007 list_del(&lpfc_cmd->list);
1009 lpfc_cmd->seg_cnt = 0;
1010 lpfc_cmd->nonsg_phys = 0;
1011 lpfc_cmd->prot_seg_cnt = 0;
1014 spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
1022 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1023 * @phba: The HBA for which this call is being executed.
1025 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1026 * and returns to caller.
1030 * Pointer to lpfc_scsi_buf - Success
1032 static struct lpfc_scsi_buf*
1033 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1035 return phba->lpfc_get_scsi_buf(phba, ndlp);
1039 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1040 * @phba: The Hba for which this call is being executed.
1041 * @psb: The scsi buffer which is being released.
1043 * This routine releases @psb scsi buffer by adding it to tail of @phba
1044 * lpfc_scsi_buf_list list.
1047 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1049 unsigned long iflag = 0;
1051 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1053 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1054 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1058 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1059 * @phba: The Hba for which this call is being executed.
1060 * @psb: The scsi buffer which is being released.
1062 * This routine releases @psb scsi buffer by adding it to tail of @phba
1063 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1064 * and cannot be reused for at least RA_TOV amount of time if it was
1068 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1070 unsigned long iflag = 0;
1072 if (psb->exch_busy) {
1073 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1076 list_add_tail(&psb->list,
1077 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
1078 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1082 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1084 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1085 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1090 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1091 * @phba: The Hba for which this call is being executed.
1092 * @psb: The scsi buffer which is being released.
1094 * This routine releases @psb scsi buffer by adding it to tail of @phba
1095 * lpfc_scsi_buf_list list.
1098 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1101 phba->lpfc_release_scsi_buf(phba, psb);
1105 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1106 * @phba: The Hba for which this call is being executed.
1107 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1109 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1110 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1111 * through sg elements and format the bdea. This routine also initializes all
1112 * IOCB fields which are dependent on scsi command request buffer.
1119 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1121 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1122 struct scatterlist *sgel = NULL;
1123 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1124 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1125 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1126 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1127 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1128 dma_addr_t physaddr;
1129 uint32_t num_bde = 0;
1130 int nseg, datadir = scsi_cmnd->sc_data_direction;
1133 * There are three possibilities here - use scatter-gather segment, use
1134 * the single mapping, or neither. Start the lpfc command prep by
1135 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1139 if (scsi_sg_count(scsi_cmnd)) {
1141 * The driver stores the segment count returned from pci_map_sg
1142 * because this a count of dma-mappings used to map the use_sg
1143 * pages. They are not guaranteed to be the same for those
1144 * architectures that implement an IOMMU.
1147 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1148 scsi_sg_count(scsi_cmnd), datadir);
1149 if (unlikely(!nseg))
1152 lpfc_cmd->seg_cnt = nseg;
1153 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1154 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1155 "9064 BLKGRD: %s: Too many sg segments from "
1156 "dma_map_sg. Config %d, seg_cnt %d\n",
1157 __func__, phba->cfg_sg_seg_cnt,
1159 scsi_dma_unmap(scsi_cmnd);
1164 * The driver established a maximum scatter-gather segment count
1165 * during probe that limits the number of sg elements in any
1166 * single scsi command. Just run through the seg_cnt and format
1168 * When using SLI-3 the driver will try to fit all the BDEs into
1169 * the IOCB. If it can't then the BDEs get added to a BPL as it
1170 * does for SLI-2 mode.
1172 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1173 physaddr = sg_dma_address(sgel);
1174 if (phba->sli_rev == 3 &&
1175 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1176 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1177 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1178 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1179 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1180 data_bde->addrLow = putPaddrLow(physaddr);
1181 data_bde->addrHigh = putPaddrHigh(physaddr);
1184 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1185 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1186 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1188 le32_to_cpu(putPaddrLow(physaddr));
1190 le32_to_cpu(putPaddrHigh(physaddr));
1197 * Finish initializing those IOCB fields that are dependent on the
1198 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1199 * explicitly reinitialized and for SLI-3 the extended bde count is
1200 * explicitly reinitialized since all iocb memory resources are reused.
1202 if (phba->sli_rev == 3 &&
1203 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1204 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1205 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1207 * The extended IOCB format can only fit 3 BDE or a BPL.
1208 * This I/O has more than 3 BDE so the 1st data bde will
1209 * be a BPL that is filled in here.
1211 physaddr = lpfc_cmd->dma_handle;
1212 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1213 data_bde->tus.f.bdeSize = (num_bde *
1214 sizeof(struct ulp_bde64));
1215 physaddr += (sizeof(struct fcp_cmnd) +
1216 sizeof(struct fcp_rsp) +
1217 (2 * sizeof(struct ulp_bde64)));
1218 data_bde->addrHigh = putPaddrHigh(physaddr);
1219 data_bde->addrLow = putPaddrLow(physaddr);
1220 /* ebde count includes the responce bde and data bpl */
1221 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1223 /* ebde count includes the responce bde and data bdes */
1224 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1227 iocb_cmd->un.fcpi64.bdl.bdeSize =
1228 ((num_bde + 2) * sizeof(struct ulp_bde64));
1229 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1231 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1234 * Due to difference in data length between DIF/non-DIF paths,
1235 * we need to set word 4 of IOCB here
1237 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1242 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
1243 * @sc: The SCSI command to examine
1244 * @txopt: (out) BlockGuard operation for transmitted data
1245 * @rxopt: (out) BlockGuard operation for received data
1247 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1251 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1252 uint8_t *txop, uint8_t *rxop)
1254 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1257 if (guard_type == SHOST_DIX_GUARD_IP) {
1258 switch (scsi_get_prot_op(sc)) {
1259 case SCSI_PROT_READ_INSERT:
1260 case SCSI_PROT_WRITE_STRIP:
1261 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1262 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1265 case SCSI_PROT_READ_STRIP:
1266 case SCSI_PROT_WRITE_INSERT:
1267 *txop = BG_OP_IN_NODIF_OUT_CRC;
1268 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1271 case SCSI_PROT_READ_PASS:
1272 case SCSI_PROT_WRITE_PASS:
1273 *txop = BG_OP_IN_CSUM_OUT_CRC;
1274 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1277 case SCSI_PROT_NORMAL:
1279 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1280 "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
1281 scsi_get_prot_op(sc), guard_type);
1286 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
1287 switch (scsi_get_prot_op(sc)) {
1288 case SCSI_PROT_READ_STRIP:
1289 case SCSI_PROT_WRITE_INSERT:
1290 *txop = BG_OP_IN_NODIF_OUT_CRC;
1291 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1294 case SCSI_PROT_READ_PASS:
1295 case SCSI_PROT_WRITE_PASS:
1296 *txop = BG_OP_IN_CRC_OUT_CRC;
1297 *rxop = BG_OP_IN_CRC_OUT_CRC;
1300 case SCSI_PROT_READ_INSERT:
1301 case SCSI_PROT_WRITE_STRIP:
1302 case SCSI_PROT_NORMAL:
1304 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1305 "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
1306 scsi_get_prot_op(sc), guard_type);
1311 /* unsupported format */
1318 struct scsi_dif_tuple {
1319 __be16 guard_tag; /* Checksum */
1320 __be16 app_tag; /* Opaque storage */
1321 __be32 ref_tag; /* Target LBA or indirect LBA */
1324 static inline unsigned
1325 lpfc_cmd_blksize(struct scsi_cmnd *sc)
1327 return sc->device->sector_size;
1331 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
1332 * @sc: in: SCSI command
1333 * @apptagmask: out: app tag mask
1334 * @apptagval: out: app tag value
1335 * @reftag: out: ref tag (reference tag)
1338 * Extract DIF parameters from the command if possible. Otherwise,
1339 * use default parameters.
1343 lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1344 uint16_t *apptagval, uint32_t *reftag)
1346 struct scsi_dif_tuple *spt;
1347 unsigned char op = scsi_get_prot_op(sc);
1348 unsigned int protcnt = scsi_prot_sg_count(sc);
1351 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
1352 op == SCSI_PROT_WRITE_PASS)) {
1355 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
1356 scsi_prot_sglist(sc)[0].offset;
1359 *reftag = cpu_to_be32(spt->ref_tag);
1362 /* SBC defines ref tag to be lower 32bits of LBA */
1363 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
1370 * This function sets up buffer list for protection groups of
1371 * type LPFC_PG_TYPE_NO_DIF
1373 * This is usually used when the HBA is instructed to generate
1374 * DIFs and insert them into data stream (or strip DIF from
1375 * incoming data stream)
1377 * The buffer list consists of just one protection group described
1379 * +-------------------------+
1380 * start of prot group --> | PDE_5 |
1381 * +-------------------------+
1383 * +-------------------------+
1385 * +-------------------------+
1386 * |more Data BDE's ... (opt)|
1387 * +-------------------------+
1389 * @sc: pointer to scsi command we're working on
1390 * @bpl: pointer to buffer list for protection groups
1391 * @datacnt: number of segments of data that have been dma mapped
1393 * Note: Data s/g buffers have been dma mapped
1396 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1397 struct ulp_bde64 *bpl, int datasegcnt)
1399 struct scatterlist *sgde = NULL; /* s/g data entry */
1400 struct lpfc_pde5 *pde5 = NULL;
1401 struct lpfc_pde6 *pde6 = NULL;
1402 dma_addr_t physaddr;
1403 int i = 0, num_bde = 0, status;
1404 int datadir = sc->sc_data_direction;
1407 uint16_t apptagmask, apptagval;
1410 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1414 /* extract some info from the scsi command for pde*/
1415 blksize = lpfc_cmd_blksize(sc);
1416 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1418 /* setup PDE5 with what we have */
1419 pde5 = (struct lpfc_pde5 *) bpl;
1420 memset(pde5, 0, sizeof(struct lpfc_pde5));
1421 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1422 pde5->reftag = reftag;
1424 /* Endianness conversion if necessary for PDE5 */
1425 pde5->word0 = cpu_to_le32(pde5->word0);
1426 pde5->reftag = cpu_to_le32(pde5->reftag);
1428 /* advance bpl and increment bde count */
1431 pde6 = (struct lpfc_pde6 *) bpl;
1433 /* setup PDE6 with the rest of the info */
1434 memset(pde6, 0, sizeof(struct lpfc_pde6));
1435 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1436 bf_set(pde6_optx, pde6, txop);
1437 bf_set(pde6_oprx, pde6, rxop);
1438 if (datadir == DMA_FROM_DEVICE) {
1439 bf_set(pde6_ce, pde6, 1);
1440 bf_set(pde6_re, pde6, 1);
1441 bf_set(pde6_ae, pde6, 1);
1443 bf_set(pde6_ai, pde6, 1);
1444 bf_set(pde6_apptagval, pde6, apptagval);
1446 /* Endianness conversion if necessary for PDE6 */
1447 pde6->word0 = cpu_to_le32(pde6->word0);
1448 pde6->word1 = cpu_to_le32(pde6->word1);
1449 pde6->word2 = cpu_to_le32(pde6->word2);
1451 /* advance bpl and increment bde count */
1455 /* assumption: caller has already run dma_map_sg on command data */
1456 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1457 physaddr = sg_dma_address(sgde);
1458 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1459 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1460 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1461 if (datadir == DMA_TO_DEVICE)
1462 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1464 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1465 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1475 * This function sets up buffer list for protection groups of
1476 * type LPFC_PG_TYPE_DIF_BUF
1478 * This is usually used when DIFs are in their own buffers,
1479 * separate from the data. The HBA can then by instructed
1480 * to place the DIFs in the outgoing stream. For read operations,
1481 * The HBA could extract the DIFs and place it in DIF buffers.
1483 * The buffer list for this type consists of one or more of the
1484 * protection groups described below:
1485 * +-------------------------+
1486 * start of first prot group --> | PDE_5 |
1487 * +-------------------------+
1489 * +-------------------------+
1490 * | PDE_7 (Prot BDE) |
1491 * +-------------------------+
1493 * +-------------------------+
1494 * |more Data BDE's ... (opt)|
1495 * +-------------------------+
1496 * start of new prot group --> | PDE_5 |
1497 * +-------------------------+
1499 * +-------------------------+
1501 * @sc: pointer to scsi command we're working on
1502 * @bpl: pointer to buffer list for protection groups
1503 * @datacnt: number of segments of data that have been dma mapped
1504 * @protcnt: number of segment of protection data that have been dma mapped
1506 * Note: It is assumed that both data and protection s/g buffers have been
1510 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1511 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1513 struct scatterlist *sgde = NULL; /* s/g data entry */
1514 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1515 struct lpfc_pde5 *pde5 = NULL;
1516 struct lpfc_pde6 *pde6 = NULL;
1517 struct ulp_bde64 *prot_bde = NULL;
1518 dma_addr_t dataphysaddr, protphysaddr;
1519 unsigned short curr_data = 0, curr_prot = 0;
1520 unsigned int split_offset, protgroup_len;
1521 unsigned int protgrp_blks, protgrp_bytes;
1522 unsigned int remainder, subtotal;
1524 int datadir = sc->sc_data_direction;
1525 unsigned char pgdone = 0, alldone = 0;
1528 uint16_t apptagmask, apptagval;
1532 sgpe = scsi_prot_sglist(sc);
1533 sgde = scsi_sglist(sc);
1535 if (!sgpe || !sgde) {
1536 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1537 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
1542 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1546 /* extract some info from the scsi command */
1547 blksize = lpfc_cmd_blksize(sc);
1548 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1552 /* setup PDE5 with what we have */
1553 pde5 = (struct lpfc_pde5 *) bpl;
1554 memset(pde5, 0, sizeof(struct lpfc_pde5));
1555 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1556 pde5->reftag = reftag;
1558 /* Endianness conversion if necessary for PDE5 */
1559 pde5->word0 = cpu_to_le32(pde5->word0);
1560 pde5->reftag = cpu_to_le32(pde5->reftag);
1562 /* advance bpl and increment bde count */
1565 pde6 = (struct lpfc_pde6 *) bpl;
1567 /* setup PDE6 with the rest of the info */
1568 memset(pde6, 0, sizeof(struct lpfc_pde6));
1569 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1570 bf_set(pde6_optx, pde6, txop);
1571 bf_set(pde6_oprx, pde6, rxop);
1572 bf_set(pde6_ce, pde6, 1);
1573 bf_set(pde6_re, pde6, 1);
1574 bf_set(pde6_ae, pde6, 1);
1575 bf_set(pde6_ai, pde6, 1);
1576 bf_set(pde6_apptagval, pde6, apptagval);
1578 /* Endianness conversion if necessary for PDE6 */
1579 pde6->word0 = cpu_to_le32(pde6->word0);
1580 pde6->word1 = cpu_to_le32(pde6->word1);
1581 pde6->word2 = cpu_to_le32(pde6->word2);
1583 /* advance bpl and increment bde count */
1587 /* setup the first BDE that points to protection buffer */
1588 prot_bde = (struct ulp_bde64 *) bpl;
1589 protphysaddr = sg_dma_address(sgpe);
1590 prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
1591 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
1592 protgroup_len = sg_dma_len(sgpe);
1594 /* must be integer multiple of the DIF block length */
1595 BUG_ON(protgroup_len % 8);
1597 protgrp_blks = protgroup_len / 8;
1598 protgrp_bytes = protgrp_blks * blksize;
1600 prot_bde->tus.f.bdeSize = protgroup_len;
1601 prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
1602 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1607 /* setup BDE's for data blocks associated with DIF data */
1609 subtotal = 0; /* total bytes processed for current prot grp */
1612 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1613 "9065 BLKGRD:%s Invalid data segment\n",
1618 dataphysaddr = sg_dma_address(sgde) + split_offset;
1619 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1620 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1622 remainder = sg_dma_len(sgde) - split_offset;
1624 if ((subtotal + remainder) <= protgrp_bytes) {
1625 /* we can use this whole buffer */
1626 bpl->tus.f.bdeSize = remainder;
1629 if ((subtotal + remainder) == protgrp_bytes)
1632 /* must split this buffer with next prot grp */
1633 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1634 split_offset += bpl->tus.f.bdeSize;
1637 subtotal += bpl->tus.f.bdeSize;
1639 if (datadir == DMA_TO_DEVICE)
1640 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1642 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1643 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1651 /* Move to the next s/g segment if possible */
1652 sgde = sg_next(sgde);
1657 if (curr_prot == protcnt) {
1659 } else if (curr_prot < protcnt) {
1660 /* advance to next prot buffer */
1661 sgpe = sg_next(sgpe);
1664 /* update the reference tag */
1665 reftag += protgrp_blks;
1667 /* if we're here, we have a bug */
1668 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1669 "9054 BLKGRD: bug in %s\n", __func__);
1679 * Given a SCSI command that supports DIF, determine composition of protection
1680 * groups involved in setting up buffer lists
1683 * for DIF (for both read and write)
1686 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1688 int ret = LPFC_PG_TYPE_INVALID;
1689 unsigned char op = scsi_get_prot_op(sc);
1692 case SCSI_PROT_READ_STRIP:
1693 case SCSI_PROT_WRITE_INSERT:
1694 ret = LPFC_PG_TYPE_NO_DIF;
1696 case SCSI_PROT_READ_INSERT:
1697 case SCSI_PROT_WRITE_STRIP:
1698 case SCSI_PROT_READ_PASS:
1699 case SCSI_PROT_WRITE_PASS:
1700 ret = LPFC_PG_TYPE_DIF_BUF;
1703 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1704 "9021 Unsupported protection op:%d\n", op);
1712 * This is the protection/DIF aware version of
1713 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1714 * two functions eventually, but for now, it's here
1717 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1718 struct lpfc_scsi_buf *lpfc_cmd)
1720 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1721 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1722 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1723 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1724 uint32_t num_bde = 0;
1725 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1726 int prot_group_type = 0;
1731 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1732 * fcp_rsp regions to the first data bde entry
1735 if (scsi_sg_count(scsi_cmnd)) {
1737 * The driver stores the segment count returned from pci_map_sg
1738 * because this a count of dma-mappings used to map the use_sg
1739 * pages. They are not guaranteed to be the same for those
1740 * architectures that implement an IOMMU.
1742 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1743 scsi_sglist(scsi_cmnd),
1744 scsi_sg_count(scsi_cmnd), datadir);
1745 if (unlikely(!datasegcnt))
1748 lpfc_cmd->seg_cnt = datasegcnt;
1749 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1750 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1751 "9067 BLKGRD: %s: Too many sg segments"
1752 " from dma_map_sg. Config %d, seg_cnt"
1754 __func__, phba->cfg_sg_seg_cnt,
1756 scsi_dma_unmap(scsi_cmnd);
1760 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1762 switch (prot_group_type) {
1763 case LPFC_PG_TYPE_NO_DIF:
1764 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1766 /* we should have 2 or more entries in buffer list */
1770 case LPFC_PG_TYPE_DIF_BUF:{
1772 * This type indicates that protection buffers are
1773 * passed to the driver, so that needs to be prepared
1776 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1777 scsi_prot_sglist(scsi_cmnd),
1778 scsi_prot_sg_count(scsi_cmnd), datadir);
1779 if (unlikely(!protsegcnt)) {
1780 scsi_dma_unmap(scsi_cmnd);
1784 lpfc_cmd->prot_seg_cnt = protsegcnt;
1785 if (lpfc_cmd->prot_seg_cnt
1786 > phba->cfg_prot_sg_seg_cnt) {
1787 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1788 "9068 BLKGRD: %s: Too many prot sg "
1789 "segments from dma_map_sg. Config %d,"
1790 "prot_seg_cnt %d\n", __func__,
1791 phba->cfg_prot_sg_seg_cnt,
1792 lpfc_cmd->prot_seg_cnt);
1793 dma_unmap_sg(&phba->pcidev->dev,
1794 scsi_prot_sglist(scsi_cmnd),
1795 scsi_prot_sg_count(scsi_cmnd),
1797 scsi_dma_unmap(scsi_cmnd);
1801 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1802 datasegcnt, protsegcnt);
1803 /* we should have 3 or more entries in buffer list */
1808 case LPFC_PG_TYPE_INVALID:
1810 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1811 "9022 Unexpected protection group %i\n",
1818 * Finish initializing those IOCB fields that are dependent on the
1819 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1820 * reinitialized since all iocb memory resources are used many times
1821 * for transmit, receive, and continuation bpl's.
1823 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1824 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1825 iocb_cmd->ulpBdeCount = 1;
1826 iocb_cmd->ulpLe = 1;
1828 fcpdl = scsi_bufflen(scsi_cmnd);
1830 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1832 * We are in DIF Type 1 mode
1833 * Every data block has a 8 byte DIF (trailer)
1834 * attached to it. Must ajust FCP data length
1836 blksize = lpfc_cmd_blksize(scsi_cmnd);
1837 diflen = (fcpdl / blksize) * 8;
1840 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1843 * Due to difference in data length between DIF/non-DIF paths,
1844 * we need to set word 4 of IOCB here
1846 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1850 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1851 "9023 Could not setup all needed BDE's"
1852 "prot_group_type=%d, num_bde=%d\n",
1853 prot_group_type, num_bde);
1858 * This function checks for BlockGuard errors detected by
1859 * the HBA. In case of errors, the ASC/ASCQ fields in the
1860 * sense buffer will be set accordingly, paired with
1861 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1862 * detected corruption.
1865 * 0 - No error found
1866 * 1 - BlockGuard error found
1867 * -1 - Internal error (bad profile, ...etc)
1870 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1871 struct lpfc_iocbq *pIocbOut)
1873 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1874 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1876 uint32_t bghm = bgf->bghm;
1877 uint32_t bgstat = bgf->bgstat;
1878 uint64_t failing_sector = 0;
1880 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
1881 " 0x%x lba 0x%llx blk cnt 0x%x "
1882 "bgstat=0x%x bghm=0x%x\n",
1883 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1884 blk_rq_sectors(cmd->request), bgstat, bghm);
1886 spin_lock(&_dump_buf_lock);
1887 if (!_dump_buf_done) {
1888 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
1889 " Data for %u blocks to debugfs\n",
1890 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1891 lpfc_debug_save_data(phba, cmd);
1893 /* If we have a prot sgl, save the DIF buffer */
1894 if (lpfc_prot_group_type(phba, cmd) ==
1895 LPFC_PG_TYPE_DIF_BUF) {
1896 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
1897 "Saving DIF for %u blocks to debugfs\n",
1898 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1899 lpfc_debug_save_dif(phba, cmd);
1904 spin_unlock(&_dump_buf_lock);
1906 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1907 cmd->result = ScsiResult(DID_ERROR, 0);
1908 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
1909 " BlockGuard profile. bgstat:0x%x\n",
1915 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1916 cmd->result = ScsiResult(DID_ERROR, 0);
1917 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
1918 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1924 if (lpfc_bgs_get_guard_err(bgstat)) {
1927 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1929 cmd->result = DRIVER_SENSE << 24
1930 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1931 phba->bg_guard_err_cnt++;
1932 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1933 "9055 BLKGRD: guard_tag error\n");
1936 if (lpfc_bgs_get_reftag_err(bgstat)) {
1939 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1941 cmd->result = DRIVER_SENSE << 24
1942 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1944 phba->bg_reftag_err_cnt++;
1945 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1946 "9056 BLKGRD: ref_tag error\n");
1949 if (lpfc_bgs_get_apptag_err(bgstat)) {
1952 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1954 cmd->result = DRIVER_SENSE << 24
1955 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1957 phba->bg_apptag_err_cnt++;
1958 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1959 "9061 BLKGRD: app_tag error\n");
1962 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1964 * setup sense data descriptor 0 per SPC-4 as an information
1965 * field, and put the failing LBA in it
1967 cmd->sense_buffer[8] = 0; /* Information */
1968 cmd->sense_buffer[9] = 0xa; /* Add. length */
1969 bghm /= cmd->device->sector_size;
1971 failing_sector = scsi_get_lba(cmd);
1972 failing_sector += bghm;
1974 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1978 /* No error was reported - problem in FW? */
1979 cmd->result = ScsiResult(DID_ERROR, 0);
1980 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1981 "9057 BLKGRD: no errors reported!\n");
1989 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1990 * @phba: The Hba for which this call is being executed.
1991 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1993 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1994 * field of @lpfc_cmd for device with SLI-4 interface spec.
2001 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2003 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2004 struct scatterlist *sgel = NULL;
2005 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2006 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
2007 struct sli4_sge *first_data_sgl;
2008 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2009 dma_addr_t physaddr;
2010 uint32_t num_bde = 0;
2012 uint32_t dma_offset = 0;
2014 struct ulp_bde64 *bde;
2017 * There are three possibilities here - use scatter-gather segment, use
2018 * the single mapping, or neither. Start the lpfc command prep by
2019 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2022 if (scsi_sg_count(scsi_cmnd)) {
2024 * The driver stores the segment count returned from pci_map_sg
2025 * because this a count of dma-mappings used to map the use_sg
2026 * pages. They are not guaranteed to be the same for those
2027 * architectures that implement an IOMMU.
2030 nseg = scsi_dma_map(scsi_cmnd);
2031 if (unlikely(!nseg))
2034 /* clear the last flag in the fcp_rsp map entry */
2035 sgl->word2 = le32_to_cpu(sgl->word2);
2036 bf_set(lpfc_sli4_sge_last, sgl, 0);
2037 sgl->word2 = cpu_to_le32(sgl->word2);
2039 first_data_sgl = sgl;
2040 lpfc_cmd->seg_cnt = nseg;
2041 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2042 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
2043 " %s: Too many sg segments from "
2044 "dma_map_sg. Config %d, seg_cnt %d\n",
2045 __func__, phba->cfg_sg_seg_cnt,
2047 scsi_dma_unmap(scsi_cmnd);
2052 * The driver established a maximum scatter-gather segment count
2053 * during probe that limits the number of sg elements in any
2054 * single scsi command. Just run through the seg_cnt and format
2056 * When using SLI-3 the driver will try to fit all the BDEs into
2057 * the IOCB. If it can't then the BDEs get added to a BPL as it
2058 * does for SLI-2 mode.
2060 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
2061 physaddr = sg_dma_address(sgel);
2062 dma_len = sg_dma_len(sgel);
2063 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2064 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2065 if ((num_bde + 1) == nseg)
2066 bf_set(lpfc_sli4_sge_last, sgl, 1);
2068 bf_set(lpfc_sli4_sge_last, sgl, 0);
2069 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2070 sgl->word2 = cpu_to_le32(sgl->word2);
2071 sgl->sge_len = cpu_to_le32(dma_len);
2072 dma_offset += dma_len;
2075 /* setup the performance hint (first data BDE) if enabled */
2076 if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
2077 bde = (struct ulp_bde64 *)
2078 &(iocb_cmd->unsli3.sli3Words[5]);
2079 bde->addrLow = first_data_sgl->addr_lo;
2080 bde->addrHigh = first_data_sgl->addr_hi;
2081 bde->tus.f.bdeSize =
2082 le32_to_cpu(first_data_sgl->sge_len);
2083 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2084 bde->tus.w = cpu_to_le32(bde->tus.w);
2088 /* clear the last flag in the fcp_rsp map entry */
2089 sgl->word2 = le32_to_cpu(sgl->word2);
2090 bf_set(lpfc_sli4_sge_last, sgl, 1);
2091 sgl->word2 = cpu_to_le32(sgl->word2);
2095 * Finish initializing those IOCB fields that are dependent on the
2096 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
2097 * explicitly reinitialized.
2098 * all iocb memory resources are reused.
2100 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
2103 * Due to difference in data length between DIF/non-DIF paths,
2104 * we need to set word 4 of IOCB here
2106 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
2111 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
2112 * @phba: The Hba for which this call is being executed.
2113 * @lpfc_cmd: The scsi buffer which is going to be mapped.
2115 * This routine wraps the actual DMA mapping function pointer from the
2123 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2125 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2129 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
2130 * @phba: Pointer to hba context object.
2131 * @vport: Pointer to vport object.
2132 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
2133 * @rsp_iocb: Pointer to response iocb object which reported error.
2135 * This function posts an event when there is a SCSI command reporting
2136 * error from the scsi device.
2139 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
2140 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
2141 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
2142 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
2143 uint32_t resp_info = fcprsp->rspStatus2;
2144 uint32_t scsi_status = fcprsp->rspStatus3;
2145 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
2146 struct lpfc_fast_path_event *fast_path_evt = NULL;
2147 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
2148 unsigned long flags;
2150 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2153 /* If there is queuefull or busy condition send a scsi event */
2154 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
2155 (cmnd->result == SAM_STAT_BUSY)) {
2156 fast_path_evt = lpfc_alloc_fast_evt(phba);
2159 fast_path_evt->un.scsi_evt.event_type =
2161 fast_path_evt->un.scsi_evt.subcategory =
2162 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
2163 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
2164 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
2165 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
2166 &pnode->nlp_portname, sizeof(struct lpfc_name));
2167 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
2168 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2169 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
2170 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
2171 fast_path_evt = lpfc_alloc_fast_evt(phba);
2174 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
2176 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
2177 LPFC_EVENT_CHECK_COND;
2178 fast_path_evt->un.check_cond_evt.scsi_event.lun =
2180 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
2181 &pnode->nlp_portname, sizeof(struct lpfc_name));
2182 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
2183 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2184 fast_path_evt->un.check_cond_evt.sense_key =
2185 cmnd->sense_buffer[2] & 0xf;
2186 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
2187 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
2188 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
2190 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
2191 ((scsi_status == SAM_STAT_GOOD) &&
2192 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
2194 * If status is good or resid does not match with fcp_param and
2195 * there is valid fcpi_parm, then there is a read_check error
2197 fast_path_evt = lpfc_alloc_fast_evt(phba);
2200 fast_path_evt->un.read_check_error.header.event_type =
2201 FC_REG_FABRIC_EVENT;
2202 fast_path_evt->un.read_check_error.header.subcategory =
2203 LPFC_EVENT_FCPRDCHKERR;
2204 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
2205 &pnode->nlp_portname, sizeof(struct lpfc_name));
2206 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
2207 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2208 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
2209 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
2210 fast_path_evt->un.read_check_error.fcpiparam =
2215 fast_path_evt->vport = vport;
2216 spin_lock_irqsave(&phba->hbalock, flags);
2217 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
2218 spin_unlock_irqrestore(&phba->hbalock, flags);
2219 lpfc_worker_wake_up(phba);
2224 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
2225 * @phba: The HBA for which this call is being executed.
2226 * @psb: The scsi buffer which is going to be un-mapped.
2228 * This routine does DMA un-mapping of scatter gather list of scsi command
2229 * field of @lpfc_cmd for device with SLI-3 interface spec.
2232 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2235 * There are only two special cases to consider. (1) the scsi command
2236 * requested scatter-gather usage or (2) the scsi command allocated
2237 * a request buffer, but did not request use_sg. There is a third
2238 * case, but it does not require resource deallocation.
2240 if (psb->seg_cnt > 0)
2241 scsi_dma_unmap(psb->pCmd);
2242 if (psb->prot_seg_cnt > 0)
2243 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
2244 scsi_prot_sg_count(psb->pCmd),
2245 psb->pCmd->sc_data_direction);
2249 * lpfc_handler_fcp_err - FCP response handler
2250 * @vport: The virtual port for which this call is being executed.
2251 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2252 * @rsp_iocb: The response IOCB which contains FCP error.
2254 * This routine is called to process response IOCB with status field
2255 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
2256 * based upon SCSI and FCP error.
2259 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2260 struct lpfc_iocbq *rsp_iocb)
2262 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
2263 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
2264 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
2265 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
2266 uint32_t resp_info = fcprsp->rspStatus2;
2267 uint32_t scsi_status = fcprsp->rspStatus3;
2269 uint32_t host_status = DID_OK;
2270 uint32_t rsplen = 0;
2271 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
2275 * If this is a task management command, there is no
2276 * scsi packet associated with this lpfc_cmd. The driver
2279 if (fcpcmd->fcpCntl2) {
2284 if (resp_info & RSP_LEN_VALID) {
2285 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2286 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
2287 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2288 "2719 Invalid response length: "
2289 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
2291 cmnd->device->lun, cmnd->cmnd[0],
2293 host_status = DID_ERROR;
2296 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
2297 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2298 "2757 Protocol failure detected during "
2299 "processing of FCP I/O op: "
2300 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
2302 cmnd->device->lun, cmnd->cmnd[0],
2304 host_status = DID_ERROR;
2309 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
2310 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
2311 if (snslen > SCSI_SENSE_BUFFERSIZE)
2312 snslen = SCSI_SENSE_BUFFERSIZE;
2314 if (resp_info & RSP_LEN_VALID)
2315 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2316 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
2318 lp = (uint32_t *)cmnd->sense_buffer;
2320 if (!scsi_status && (resp_info & RESID_UNDER))
2323 lpfc_printf_vlog(vport, KERN_WARNING, logit,
2324 "9024 FCP command x%x failed: x%x SNS x%x x%x "
2325 "Data: x%x x%x x%x x%x x%x\n",
2326 cmnd->cmnd[0], scsi_status,
2327 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
2328 be32_to_cpu(fcprsp->rspResId),
2329 be32_to_cpu(fcprsp->rspSnsLen),
2330 be32_to_cpu(fcprsp->rspRspLen),
2333 scsi_set_resid(cmnd, 0);
2334 if (resp_info & RESID_UNDER) {
2335 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
2337 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2338 "9025 FCP Read Underrun, expected %d, "
2339 "residual %d Data: x%x x%x x%x\n",
2340 be32_to_cpu(fcpcmd->fcpDl),
2341 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
2345 * If there is an under run check if under run reported by
2346 * storage array is same as the under run reported by HBA.
2347 * If this is not same, there is a dropped frame.
2349 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
2351 (scsi_get_resid(cmnd) != fcpi_parm)) {
2352 lpfc_printf_vlog(vport, KERN_WARNING,
2353 LOG_FCP | LOG_FCP_ERROR,
2354 "9026 FCP Read Check Error "
2355 "and Underrun Data: x%x x%x x%x x%x\n",
2356 be32_to_cpu(fcpcmd->fcpDl),
2357 scsi_get_resid(cmnd), fcpi_parm,
2359 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2360 host_status = DID_ERROR;
2363 * The cmnd->underflow is the minimum number of bytes that must
2364 * be transfered for this command. Provided a sense condition
2365 * is not present, make sure the actual amount transferred is at
2366 * least the underflow value or fail.
2368 if (!(resp_info & SNS_LEN_VALID) &&
2369 (scsi_status == SAM_STAT_GOOD) &&
2370 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
2371 < cmnd->underflow)) {
2372 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2373 "9027 FCP command x%x residual "
2374 "underrun converted to error "
2375 "Data: x%x x%x x%x\n",
2376 cmnd->cmnd[0], scsi_bufflen(cmnd),
2377 scsi_get_resid(cmnd), cmnd->underflow);
2378 host_status = DID_ERROR;
2380 } else if (resp_info & RESID_OVER) {
2381 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2382 "9028 FCP command x%x residual overrun error. "
2383 "Data: x%x x%x\n", cmnd->cmnd[0],
2384 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
2385 host_status = DID_ERROR;
2388 * Check SLI validation that all the transfer was actually done
2389 * (fcpi_parm should be zero). Apply check only to reads.
2391 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
2392 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
2393 "9029 FCP Read Check Error Data: "
2394 "x%x x%x x%x x%x x%x\n",
2395 be32_to_cpu(fcpcmd->fcpDl),
2396 be32_to_cpu(fcprsp->rspResId),
2397 fcpi_parm, cmnd->cmnd[0], scsi_status);
2398 switch (scsi_status) {
2400 case SAM_STAT_CHECK_CONDITION:
2401 /* Fabric dropped a data frame. Fail any successful
2402 * command in which we detected dropped frames.
2403 * A status of good or some check conditions could
2404 * be considered a successful command.
2406 host_status = DID_ERROR;
2409 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2413 cmnd->result = ScsiResult(host_status, scsi_status);
2414 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
2418 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
2419 * @phba: The Hba for which this call is being executed.
2420 * @pIocbIn: The command IOCBQ for the scsi cmnd.
2421 * @pIocbOut: The response IOCBQ for the scsi cmnd.
2423 * This routine assigns scsi command result by looking into response IOCB
2424 * status field appropriately. This routine handles QUEUE FULL condition as
2425 * well by ramping down device queue depth.
2428 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2429 struct lpfc_iocbq *pIocbOut)
2431 struct lpfc_scsi_buf *lpfc_cmd =
2432 (struct lpfc_scsi_buf *) pIocbIn->context1;
2433 struct lpfc_vport *vport = pIocbIn->vport;
2434 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2435 struct lpfc_nodelist *pnode = rdata->pnode;
2436 struct scsi_cmnd *cmd;
2438 struct scsi_device *tmp_sdev;
2440 unsigned long flags;
2441 struct lpfc_fast_path_event *fast_path_evt;
2442 struct Scsi_Host *shost;
2443 uint32_t queue_depth, scsi_id;
2445 /* Sanity check on return of outstanding command */
2446 if (!(lpfc_cmd->pCmd))
2448 cmd = lpfc_cmd->pCmd;
2449 shost = cmd->device->host;
2451 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2452 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
2453 /* pick up SLI4 exhange busy status from HBA */
2454 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
2456 if (pnode && NLP_CHK_NODE_ACT(pnode))
2457 atomic_dec(&pnode->cmd_pending);
2459 if (lpfc_cmd->status) {
2460 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2461 (lpfc_cmd->result & IOERR_DRVR_MASK))
2462 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2463 else if (lpfc_cmd->status >= IOSTAT_CNT)
2464 lpfc_cmd->status = IOSTAT_DEFAULT;
2466 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2467 "9030 FCP cmd x%x failed <%d/%d> "
2468 "status: x%x result: x%x Data: x%x x%x\n",
2470 cmd->device ? cmd->device->id : 0xffff,
2471 cmd->device ? cmd->device->lun : 0xffff,
2472 lpfc_cmd->status, lpfc_cmd->result,
2473 pIocbOut->iocb.ulpContext,
2474 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
2476 switch (lpfc_cmd->status) {
2477 case IOSTAT_FCP_RSP_ERROR:
2478 /* Call FCP RSP handler to determine result */
2479 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
2481 case IOSTAT_NPORT_BSY:
2482 case IOSTAT_FABRIC_BSY:
2483 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2484 fast_path_evt = lpfc_alloc_fast_evt(phba);
2487 fast_path_evt->un.fabric_evt.event_type =
2488 FC_REG_FABRIC_EVENT;
2489 fast_path_evt->un.fabric_evt.subcategory =
2490 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
2491 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
2492 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2493 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
2494 &pnode->nlp_portname,
2495 sizeof(struct lpfc_name));
2496 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
2497 &pnode->nlp_nodename,
2498 sizeof(struct lpfc_name));
2500 fast_path_evt->vport = vport;
2501 fast_path_evt->work_evt.evt =
2502 LPFC_EVT_FASTPATH_MGMT_EVT;
2503 spin_lock_irqsave(&phba->hbalock, flags);
2504 list_add_tail(&fast_path_evt->work_evt.evt_listp,
2506 spin_unlock_irqrestore(&phba->hbalock, flags);
2507 lpfc_worker_wake_up(phba);
2509 case IOSTAT_LOCAL_REJECT:
2510 case IOSTAT_REMOTE_STOP:
2511 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
2513 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
2514 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
2516 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
2517 cmd->result = ScsiResult(DID_NO_CONNECT, 0);
2520 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
2521 lpfc_cmd->result == IOERR_NO_RESOURCES ||
2522 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
2523 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
2524 cmd->result = ScsiResult(DID_REQUEUE, 0);
2527 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
2528 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
2529 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
2530 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2532 * This is a response for a BG enabled
2533 * cmd. Parse BG error
2535 lpfc_parse_bg_err(phba, lpfc_cmd,
2539 lpfc_printf_vlog(vport, KERN_WARNING,
2541 "9031 non-zero BGSTAT "
2542 "on unprotected cmd\n");
2545 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
2546 && (phba->sli_rev == LPFC_SLI_REV4)
2547 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
2548 /* This IO was aborted by the target, we don't
2549 * know the rxid and because we did not send the
2550 * ABTS we cannot generate and RRQ.
2552 lpfc_set_rrq_active(phba, pnode,
2553 lpfc_cmd->cur_iocbq.sli4_xritag,
2556 /* else: fall through */
2558 cmd->result = ScsiResult(DID_ERROR, 0);
2562 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
2563 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
2564 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
2567 cmd->result = ScsiResult(DID_OK, 0);
2569 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
2570 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
2572 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2573 "0710 Iodone <%d/%d> cmd %p, error "
2574 "x%x SNS x%x x%x Data: x%x x%x\n",
2575 cmd->device->id, cmd->device->lun, cmd,
2576 cmd->result, *lp, *(lp + 3), cmd->retries,
2577 scsi_get_resid(cmd));
2580 lpfc_update_stats(phba, lpfc_cmd);
2581 result = cmd->result;
2582 if (vport->cfg_max_scsicmpl_time &&
2583 time_after(jiffies, lpfc_cmd->start_time +
2584 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
2585 spin_lock_irqsave(shost->host_lock, flags);
2586 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2587 if (pnode->cmd_qdepth >
2588 atomic_read(&pnode->cmd_pending) &&
2589 (atomic_read(&pnode->cmd_pending) >
2590 LPFC_MIN_TGT_QDEPTH) &&
2591 ((cmd->cmnd[0] == READ_10) ||
2592 (cmd->cmnd[0] == WRITE_10)))
2594 atomic_read(&pnode->cmd_pending);
2596 pnode->last_change_time = jiffies;
2598 spin_unlock_irqrestore(shost->host_lock, flags);
2599 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2600 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
2601 time_after(jiffies, pnode->last_change_time +
2602 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
2603 spin_lock_irqsave(shost->host_lock, flags);
2604 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
2606 depth = depth ? depth : 1;
2607 pnode->cmd_qdepth += depth;
2608 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
2609 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
2610 pnode->last_change_time = jiffies;
2611 spin_unlock_irqrestore(shost->host_lock, flags);
2615 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
2617 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
2618 queue_depth = cmd->device->queue_depth;
2619 scsi_id = cmd->device->id;
2620 cmd->scsi_done(cmd);
2622 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2624 * If there is a thread waiting for command completion
2625 * wake up the thread.
2627 spin_lock_irqsave(shost->host_lock, flags);
2628 lpfc_cmd->pCmd = NULL;
2629 if (lpfc_cmd->waitq)
2630 wake_up(lpfc_cmd->waitq);
2631 spin_unlock_irqrestore(shost->host_lock, flags);
2632 lpfc_release_scsi_buf(phba, lpfc_cmd);
2637 lpfc_rampup_queue_depth(vport, queue_depth);
2640 * Check for queue full. If the lun is reporting queue full, then
2641 * back off the lun queue depth to prevent target overloads.
2643 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
2644 NLP_CHK_NODE_ACT(pnode)) {
2645 shost_for_each_device(tmp_sdev, shost) {
2646 if (tmp_sdev->id != scsi_id)
2648 depth = scsi_track_queue_full(tmp_sdev,
2649 tmp_sdev->queue_depth-1);
2652 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2653 "0711 detected queue full - lun queue "
2654 "depth adjusted to %d.\n", depth);
2655 lpfc_send_sdev_queuedepth_change_event(phba, vport,
2663 * If there is a thread waiting for command completion
2664 * wake up the thread.
2666 spin_lock_irqsave(shost->host_lock, flags);
2667 lpfc_cmd->pCmd = NULL;
2668 if (lpfc_cmd->waitq)
2669 wake_up(lpfc_cmd->waitq);
2670 spin_unlock_irqrestore(shost->host_lock, flags);
2672 lpfc_release_scsi_buf(phba, lpfc_cmd);
2676 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
2677 * @data: A pointer to the immediate command data portion of the IOCB.
2678 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
2680 * The routine copies the entire FCP command from @fcp_cmnd to @data while
2681 * byte swapping the data to big endian format for transmission on the wire.
2684 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
2687 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
2688 i += sizeof(uint32_t), j++) {
2689 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
2694 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
2695 * @vport: The virtual port for which this call is being executed.
2696 * @lpfc_cmd: The scsi command which needs to send.
2697 * @pnode: Pointer to lpfc_nodelist.
2699 * This routine initializes fcp_cmnd and iocb data structure from scsi command
2700 * to transfer for device with SLI3 interface spec.
2703 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2704 struct lpfc_nodelist *pnode)
2706 struct lpfc_hba *phba = vport->phba;
2707 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2708 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2709 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2710 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
2711 int datadir = scsi_cmnd->sc_data_direction;
2714 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2717 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
2718 /* clear task management bits */
2719 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
2721 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
2722 &lpfc_cmd->fcp_cmnd->fcp_lun);
2724 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
2726 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
2728 case HEAD_OF_QUEUE_TAG:
2729 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
2731 case ORDERED_QUEUE_TAG:
2732 fcp_cmnd->fcpCntl1 = ORDERED_Q;
2735 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
2739 fcp_cmnd->fcpCntl1 = 0;
2742 * There are three possibilities here - use scatter-gather segment, use
2743 * the single mapping, or neither. Start the lpfc command prep by
2744 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2747 if (scsi_sg_count(scsi_cmnd)) {
2748 if (datadir == DMA_TO_DEVICE) {
2749 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2750 if (phba->sli_rev < LPFC_SLI_REV4) {
2751 iocb_cmd->un.fcpi.fcpi_parm = 0;
2752 iocb_cmd->ulpPU = 0;
2754 iocb_cmd->ulpPU = PARM_READ_CHECK;
2755 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2756 phba->fc4OutputRequests++;
2758 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
2759 iocb_cmd->ulpPU = PARM_READ_CHECK;
2760 fcp_cmnd->fcpCntl3 = READ_DATA;
2761 phba->fc4InputRequests++;
2764 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
2765 iocb_cmd->un.fcpi.fcpi_parm = 0;
2766 iocb_cmd->ulpPU = 0;
2767 fcp_cmnd->fcpCntl3 = 0;
2768 phba->fc4ControlRequests++;
2770 if (phba->sli_rev == 3 &&
2771 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2772 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
2774 * Finish initializing those IOCB fields that are independent
2775 * of the scsi_cmnd request_buffer
2777 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2778 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2779 piocbq->iocb.ulpFCP2Rcvy = 1;
2781 piocbq->iocb.ulpFCP2Rcvy = 0;
2783 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
2784 piocbq->context1 = lpfc_cmd;
2785 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2786 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2787 piocbq->vport = vport;
2791 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
2792 * @vport: The virtual port for which this call is being executed.
2793 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2794 * @lun: Logical unit number.
2795 * @task_mgmt_cmd: SCSI task management command.
2797 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2798 * for device with SLI-3 interface spec.
2805 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2806 struct lpfc_scsi_buf *lpfc_cmd,
2808 uint8_t task_mgmt_cmd)
2810 struct lpfc_iocbq *piocbq;
2812 struct fcp_cmnd *fcp_cmnd;
2813 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2814 struct lpfc_nodelist *ndlp = rdata->pnode;
2816 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2817 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
2820 piocbq = &(lpfc_cmd->cur_iocbq);
2821 piocbq->vport = vport;
2823 piocb = &piocbq->iocb;
2825 fcp_cmnd = lpfc_cmd->fcp_cmnd;
2826 /* Clear out any old data in the FCP command area */
2827 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2828 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
2829 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
2830 if (vport->phba->sli_rev == 3 &&
2831 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2832 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2833 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2834 piocb->ulpContext = ndlp->nlp_rpi;
2835 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2836 piocb->ulpFCP2Rcvy = 1;
2838 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
2840 /* ulpTimeout is only one byte */
2841 if (lpfc_cmd->timeout > 0xff) {
2843 * Do not timeout the command at the firmware level.
2844 * The driver will provide the timeout mechanism.
2846 piocb->ulpTimeout = 0;
2848 piocb->ulpTimeout = lpfc_cmd->timeout;
2850 if (vport->phba->sli_rev == LPFC_SLI_REV4)
2851 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
2857 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2858 * @phba: The hba struct for which this call is being executed.
2859 * @dev_grp: The HBA PCI-Device group number.
2861 * This routine sets up the SCSI interface API function jump table in @phba
2863 * Returns: 0 - success, -ENODEV - failure.
2866 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2869 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2870 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2873 case LPFC_PCI_DEV_LP:
2874 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2875 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2876 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2877 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
2879 case LPFC_PCI_DEV_OC:
2880 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2881 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2882 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2883 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
2886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2887 "1418 Invalid HBA PCI-device group: 0x%x\n",
2892 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2893 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2898 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2899 * @phba: The Hba for which this call is being executed.
2900 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2901 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2903 * This routine is IOCB completion routine for device reset and target reset
2904 * routine. This routine release scsi buffer associated with lpfc_cmd.
2907 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2908 struct lpfc_iocbq *cmdiocbq,
2909 struct lpfc_iocbq *rspiocbq)
2911 struct lpfc_scsi_buf *lpfc_cmd =
2912 (struct lpfc_scsi_buf *) cmdiocbq->context1;
2914 lpfc_release_scsi_buf(phba, lpfc_cmd);
2919 * lpfc_info - Info entry point of scsi_host_template data structure
2920 * @host: The scsi host for which this call is being executed.
2922 * This routine provides module information about hba.
2925 * Pointer to char - Success.
2928 lpfc_info(struct Scsi_Host *host)
2930 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
2931 struct lpfc_hba *phba = vport->phba;
2933 static char lpfcinfobuf[384];
2935 memset(lpfcinfobuf,0,384);
2936 if (phba && phba->pcidev){
2937 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
2938 len = strlen(lpfcinfobuf);
2939 snprintf(lpfcinfobuf + len,
2941 " on PCI bus %02x device %02x irq %d",
2942 phba->pcidev->bus->number,
2943 phba->pcidev->devfn,
2945 len = strlen(lpfcinfobuf);
2946 if (phba->Port[0]) {
2947 snprintf(lpfcinfobuf + len,
2952 len = strlen(lpfcinfobuf);
2953 if (phba->sli4_hba.link_state.logical_speed) {
2954 snprintf(lpfcinfobuf + len,
2956 " Logical Link Speed: %d Mbps",
2957 phba->sli4_hba.link_state.logical_speed * 10);
2964 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
2965 * @phba: The Hba for which this call is being executed.
2967 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2968 * The default value of cfg_poll_tmo is 10 milliseconds.
2970 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
2972 unsigned long poll_tmo_expires =
2973 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
2975 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
2976 mod_timer(&phba->fcp_poll_timer,
2981 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
2982 * @phba: The Hba for which this call is being executed.
2984 * This routine starts the fcp_poll_timer of @phba.
2986 void lpfc_poll_start_timer(struct lpfc_hba * phba)
2988 lpfc_poll_rearm_timer(phba);
2992 * lpfc_poll_timeout - Restart polling timer
2993 * @ptr: Map to lpfc_hba data structure pointer.
2995 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2996 * and FCP Ring interrupt is disable.
2999 void lpfc_poll_timeout(unsigned long ptr)
3001 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
3003 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3004 lpfc_sli_handle_fast_ring_event(phba,
3005 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3007 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3008 lpfc_poll_rearm_timer(phba);
3013 * lpfc_queuecommand - scsi_host_template queuecommand entry point
3014 * @cmnd: Pointer to scsi_cmnd data structure.
3015 * @done: Pointer to done routine.
3017 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
3018 * This routine prepares an IOCB from scsi command and provides to firmware.
3019 * The @done callback is invoked after driver finished processing the command.
3023 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
3026 lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
3028 struct Scsi_Host *shost = cmnd->device->host;
3029 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3030 struct lpfc_hba *phba = vport->phba;
3031 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3032 struct lpfc_nodelist *ndlp;
3033 struct lpfc_scsi_buf *lpfc_cmd;
3034 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3037 err = fc_remote_port_chkready(rport);
3040 goto out_fail_command;
3042 ndlp = rdata->pnode;
3044 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
3045 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
3047 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3048 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
3049 " op:%02x str=%s without registering for"
3050 " BlockGuard - Rejecting command\n",
3051 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
3052 dif_op_str[scsi_get_prot_op(cmnd)]);
3053 goto out_fail_command;
3057 * Catch race where our node has transitioned, but the
3058 * transport is still transitioning.
3060 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
3061 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
3062 goto out_fail_command;
3064 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
3067 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
3068 if (lpfc_cmd == NULL) {
3069 lpfc_rampdown_queue_depth(phba);
3071 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3072 "0707 driver's buffer pool is empty, "
3078 * Store the midlayer's command structure for the completion phase
3079 * and complete the command initialization.
3081 lpfc_cmd->pCmd = cmnd;
3082 lpfc_cmd->rdata = rdata;
3083 lpfc_cmd->timeout = 0;
3084 lpfc_cmd->start_time = jiffies;
3085 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
3086 cmnd->scsi_done = done;
3088 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
3089 if (vport->phba->cfg_enable_bg) {
3090 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3091 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
3093 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
3094 dif_op_str[scsi_get_prot_op(cmnd)]);
3095 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3096 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
3097 "%02x %02x %02x %02x %02x\n",
3098 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
3099 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
3100 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
3102 if (cmnd->cmnd[0] == READ_10)
3103 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3104 "9035 BLKGRD: READ @ sector %llu, "
3106 (unsigned long long)scsi_get_lba(cmnd),
3107 blk_rq_sectors(cmnd->request));
3108 else if (cmnd->cmnd[0] == WRITE_10)
3109 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3110 "9036 BLKGRD: WRITE @ sector %llu, "
3111 "count %u cmd=%p\n",
3112 (unsigned long long)scsi_get_lba(cmnd),
3113 blk_rq_sectors(cmnd->request),
3117 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3119 if (vport->phba->cfg_enable_bg) {
3120 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3121 "9038 BLKGRD: rcvd unprotected cmd:"
3122 "%02x op:%02x str=%s\n",
3123 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
3124 dif_op_str[scsi_get_prot_op(cmnd)]);
3125 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3126 "9039 BLKGRD: CDB: %02x %02x %02x "
3127 "%02x %02x %02x %02x %02x %02x %02x\n",
3128 cmnd->cmnd[0], cmnd->cmnd[1],
3129 cmnd->cmnd[2], cmnd->cmnd[3],
3130 cmnd->cmnd[4], cmnd->cmnd[5],
3131 cmnd->cmnd[6], cmnd->cmnd[7],
3132 cmnd->cmnd[8], cmnd->cmnd[9]);
3133 if (cmnd->cmnd[0] == READ_10)
3134 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3135 "9040 dbg: READ @ sector %llu, "
3137 (unsigned long long)scsi_get_lba(cmnd),
3138 blk_rq_sectors(cmnd->request));
3139 else if (cmnd->cmnd[0] == WRITE_10)
3140 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3141 "9041 dbg: WRITE @ sector %llu, "
3142 "count %u cmd=%p\n",
3143 (unsigned long long)scsi_get_lba(cmnd),
3144 blk_rq_sectors(cmnd->request), cmnd);
3146 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3147 "9042 dbg: parser not implemented\n");
3149 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3153 goto out_host_busy_free_buf;
3155 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
3157 atomic_inc(&ndlp->cmd_pending);
3158 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
3159 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
3161 atomic_dec(&ndlp->cmd_pending);
3162 goto out_host_busy_free_buf;
3164 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3165 spin_unlock(shost->host_lock);
3166 lpfc_sli_handle_fast_ring_event(phba,
3167 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3169 spin_lock(shost->host_lock);
3170 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3171 lpfc_poll_rearm_timer(phba);
3176 out_host_busy_free_buf:
3177 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
3178 lpfc_release_scsi_buf(phba, lpfc_cmd);
3180 return SCSI_MLQUEUE_HOST_BUSY;
3187 static DEF_SCSI_QCMD(lpfc_queuecommand)
3190 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
3191 * @cmnd: Pointer to scsi_cmnd data structure.
3193 * This routine aborts @cmnd pending in base driver.
3200 lpfc_abort_handler(struct scsi_cmnd *cmnd)
3202 struct Scsi_Host *shost = cmnd->device->host;
3203 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3204 struct lpfc_hba *phba = vport->phba;
3205 struct lpfc_iocbq *iocb;
3206 struct lpfc_iocbq *abtsiocb;
3207 struct lpfc_scsi_buf *lpfc_cmd;
3210 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
3212 ret = fc_block_scsi_eh(cmnd);
3215 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
3217 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3218 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
3220 "LUN %d snum %#lx\n", ret, cmnd->device->id,
3221 cmnd->device->lun, cmnd->serial_number);
3226 * If pCmd field of the corresponding lpfc_scsi_buf structure
3227 * points to a different SCSI command, then the driver has
3228 * already completed this command, but the midlayer did not
3229 * see the completion before the eh fired. Just return
3232 iocb = &lpfc_cmd->cur_iocbq;
3233 if (lpfc_cmd->pCmd != cmnd)
3236 BUG_ON(iocb->context1 != lpfc_cmd);
3238 abtsiocb = lpfc_sli_get_iocbq(phba);
3239 if (abtsiocb == NULL) {
3245 * The scsi command can not be in txq and it is in flight because the
3246 * pCmd is still pointig at the SCSI command we have to abort. There
3247 * is no need to search the txcmplq. Just send an abort to the FW.
3251 icmd = &abtsiocb->iocb;
3252 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
3253 icmd->un.acxri.abortContextTag = cmd->ulpContext;
3254 if (phba->sli_rev == LPFC_SLI_REV4)
3255 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3257 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
3260 icmd->ulpClass = cmd->ulpClass;
3262 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3263 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
3264 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
3266 if (lpfc_is_link_up(phba))
3267 icmd->ulpCommand = CMD_ABORT_XRI_CN;
3269 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
3271 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3272 abtsiocb->vport = vport;
3273 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3275 lpfc_sli_release_iocbq(phba, abtsiocb);
3280 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3281 lpfc_sli_handle_fast_ring_event(phba,
3282 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3284 lpfc_cmd->waitq = &waitq;
3285 /* Wait for abort to complete */
3286 wait_event_timeout(waitq,
3287 (lpfc_cmd->pCmd != cmnd),
3288 (2*vport->cfg_devloss_tmo*HZ));
3290 spin_lock_irq(shost->host_lock);
3291 lpfc_cmd->waitq = NULL;
3292 spin_unlock_irq(shost->host_lock);
3294 if (lpfc_cmd->pCmd == cmnd) {
3296 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3297 "0748 abort handler timed out waiting "
3298 "for abort to complete: ret %#x, ID %d, "
3299 "LUN %d, snum %#lx\n",
3300 ret, cmnd->device->id, cmnd->device->lun,
3301 cmnd->serial_number);
3305 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3306 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
3307 "LUN %d snum %#lx\n", ret, cmnd->device->id,
3308 cmnd->device->lun, cmnd->serial_number);
3313 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
3315 switch (task_mgmt_cmd) {
3316 case FCP_ABORT_TASK_SET:
3317 return "ABORT_TASK_SET";
3318 case FCP_CLEAR_TASK_SET:
3319 return "FCP_CLEAR_TASK_SET";
3321 return "FCP_BUS_RESET";
3323 return "FCP_LUN_RESET";
3324 case FCP_TARGET_RESET:
3325 return "FCP_TARGET_RESET";
3327 return "FCP_CLEAR_ACA";
3328 case FCP_TERMINATE_TASK:
3329 return "FCP_TERMINATE_TASK";
3336 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
3337 * @vport: The virtual port for which this call is being executed.
3338 * @rdata: Pointer to remote port local data
3339 * @tgt_id: Target ID of remote device.
3340 * @lun_id: Lun number for the TMF
3341 * @task_mgmt_cmd: type of TMF to send
3343 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
3351 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3352 unsigned tgt_id, unsigned int lun_id,
3353 uint8_t task_mgmt_cmd)
3355 struct lpfc_hba *phba = vport->phba;
3356 struct lpfc_scsi_buf *lpfc_cmd;
3357 struct lpfc_iocbq *iocbq;
3358 struct lpfc_iocbq *iocbqrsp;
3359 struct lpfc_nodelist *pnode = rdata->pnode;
3363 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3366 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
3367 if (lpfc_cmd == NULL)
3369 lpfc_cmd->timeout = 60;
3370 lpfc_cmd->rdata = rdata;
3372 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
3375 lpfc_release_scsi_buf(phba, lpfc_cmd);
3379 iocbq = &lpfc_cmd->cur_iocbq;
3380 iocbqrsp = lpfc_sli_get_iocbq(phba);
3381 if (iocbqrsp == NULL) {
3382 lpfc_release_scsi_buf(phba, lpfc_cmd);
3386 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3387 "0702 Issue %s to TGT %d LUN %d "
3388 "rpi x%x nlp_flag x%x\n",
3389 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3390 pnode->nlp_rpi, pnode->nlp_flag);
3392 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3393 iocbq, iocbqrsp, lpfc_cmd->timeout);
3394 if (status != IOCB_SUCCESS) {
3395 if (status == IOCB_TIMEDOUT) {
3396 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
3397 ret = TIMEOUT_ERROR;
3400 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3401 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3402 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
3403 lpfc_taskmgmt_name(task_mgmt_cmd),
3404 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3405 iocbqrsp->iocb.un.ulpWord[4]);
3406 } else if (status == IOCB_BUSY)
3411 lpfc_sli_release_iocbq(phba, iocbqrsp);
3413 if (ret != TIMEOUT_ERROR)
3414 lpfc_release_scsi_buf(phba, lpfc_cmd);
3420 * lpfc_chk_tgt_mapped -
3421 * @vport: The virtual port to check on
3422 * @cmnd: Pointer to scsi_cmnd data structure.
3424 * This routine delays until the scsi target (aka rport) for the
3425 * command exists (is present and logged in) or we declare it non-existent.
3432 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3434 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3435 struct lpfc_nodelist *pnode;
3436 unsigned long later;
3439 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3440 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
3443 pnode = rdata->pnode;
3445 * If target is not in a MAPPED state, delay until
3446 * target is rediscovered or devloss timeout expires.
3448 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3449 while (time_after(later, jiffies)) {
3450 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3452 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3454 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3455 rdata = cmnd->device->hostdata;
3458 pnode = rdata->pnode;
3460 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
3461 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3467 * lpfc_reset_flush_io_context -
3468 * @vport: The virtual port (scsi_host) for the flush context
3469 * @tgt_id: If aborting by Target contect - specifies the target id
3470 * @lun_id: If aborting by Lun context - specifies the lun id
3471 * @context: specifies the context level to flush at.
3473 * After a reset condition via TMF, we need to flush orphaned i/o
3474 * contexts from the adapter. This routine aborts any contexts
3475 * outstanding, then waits for their completions. The wait is
3476 * bounded by devloss_tmo though.
3483 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
3484 uint64_t lun_id, lpfc_ctx_cmd context)
3486 struct lpfc_hba *phba = vport->phba;
3487 unsigned long later;
3490 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3492 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
3493 tgt_id, lun_id, context);
3494 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3495 while (time_after(later, jiffies) && cnt) {
3496 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
3497 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3500 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3501 "0724 I/O flush failure for context %s : cnt x%x\n",
3502 ((context == LPFC_CTX_LUN) ? "LUN" :
3503 ((context == LPFC_CTX_TGT) ? "TGT" :
3504 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
3512 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
3513 * @cmnd: Pointer to scsi_cmnd data structure.
3515 * This routine does a device reset by sending a LUN_RESET task management
3523 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3525 struct Scsi_Host *shost = cmnd->device->host;
3526 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3527 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3528 struct lpfc_nodelist *pnode;
3529 unsigned tgt_id = cmnd->device->id;
3530 unsigned int lun_id = cmnd->device->lun;
3531 struct lpfc_scsi_event_header scsi_event;
3535 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3536 "0798 Device Reset rport failure: rdata x%p\n", rdata);
3539 pnode = rdata->pnode;
3540 status = fc_block_scsi_eh(cmnd);
3544 status = lpfc_chk_tgt_mapped(vport, cmnd);
3545 if (status == FAILED) {
3546 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3547 "0721 Device Reset rport failure: rdata x%p\n", rdata);
3551 scsi_event.event_type = FC_REG_SCSI_EVENT;
3552 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
3553 scsi_event.lun = lun_id;
3554 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3555 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3557 fc_host_post_vendor_event(shost, fc_get_event_number(),
3558 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3560 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3563 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3564 "0713 SCSI layer issued Device Reset (%d, %d) "
3565 "return x%x\n", tgt_id, lun_id, status);
3568 * We have to clean up i/o as : they may be orphaned by the TMF;
3569 * or if the TMF failed, they may be in an indeterminate state.
3571 * We will report success if all the i/o aborts successfully.
3573 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3579 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
3580 * @cmnd: Pointer to scsi_cmnd data structure.
3582 * This routine does a target reset by sending a TARGET_RESET task management
3590 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3592 struct Scsi_Host *shost = cmnd->device->host;
3593 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3594 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3595 struct lpfc_nodelist *pnode;
3596 unsigned tgt_id = cmnd->device->id;
3597 unsigned int lun_id = cmnd->device->lun;
3598 struct lpfc_scsi_event_header scsi_event;
3602 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3603 "0799 Target Reset rport failure: rdata x%p\n", rdata);
3606 pnode = rdata->pnode;
3607 status = fc_block_scsi_eh(cmnd);
3611 status = lpfc_chk_tgt_mapped(vport, cmnd);
3612 if (status == FAILED) {
3613 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3614 "0722 Target Reset rport failure: rdata x%p\n", rdata);
3618 scsi_event.event_type = FC_REG_SCSI_EVENT;
3619 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3621 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3622 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3624 fc_host_post_vendor_event(shost, fc_get_event_number(),
3625 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3627 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3630 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3631 "0723 SCSI layer issued Target Reset (%d, %d) "
3632 "return x%x\n", tgt_id, lun_id, status);
3635 * We have to clean up i/o as : they may be orphaned by the TMF;
3636 * or if the TMF failed, they may be in an indeterminate state.
3638 * We will report success if all the i/o aborts successfully.
3640 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3646 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
3647 * @cmnd: Pointer to scsi_cmnd data structure.
3649 * This routine does target reset to all targets on @cmnd->device->host.
3650 * This emulates Parallel SCSI Bus Reset Semantics.
3657 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3659 struct Scsi_Host *shost = cmnd->device->host;
3660 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3661 struct lpfc_nodelist *ndlp = NULL;
3662 struct lpfc_scsi_event_header scsi_event;
3664 int ret = SUCCESS, status, i;
3666 scsi_event.event_type = FC_REG_SCSI_EVENT;
3667 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
3669 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
3670 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
3672 fc_host_post_vendor_event(shost, fc_get_event_number(),
3673 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3675 ret = fc_block_scsi_eh(cmnd);
3680 * Since the driver manages a single bus device, reset all
3681 * targets known to the driver. Should any target reset
3682 * fail, this routine returns failure to the midlayer.
3684 for (i = 0; i < LPFC_MAX_TARGET; i++) {
3685 /* Search for mapped node by target ID */
3687 spin_lock_irq(shost->host_lock);
3688 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3689 if (!NLP_CHK_NODE_ACT(ndlp))
3691 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
3692 ndlp->nlp_sid == i &&
3698 spin_unlock_irq(shost->host_lock);
3702 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
3703 i, 0, FCP_TARGET_RESET);
3705 if (status != SUCCESS) {
3706 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3707 "0700 Bus Reset on target %d failed\n",
3713 * We have to clean up i/o as : they may be orphaned by the TMFs
3714 * above; or if any of the TMFs failed, they may be in an
3715 * indeterminate state.
3716 * We will report success if all the i/o aborts successfully.
3719 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
3720 if (status != SUCCESS)
3723 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3724 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
3729 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
3730 * @sdev: Pointer to scsi_device.
3732 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
3733 * globally available list of scsi buffers. This routine also makes sure scsi
3734 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
3735 * of scsi buffer exists for the lifetime of the driver.
3742 lpfc_slave_alloc(struct scsi_device *sdev)
3744 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3745 struct lpfc_hba *phba = vport->phba;
3746 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3748 uint32_t num_to_alloc = 0;
3749 int num_allocated = 0;
3752 if (!rport || fc_remote_port_chkready(rport))
3755 sdev->hostdata = rport->dd_data;
3756 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
3759 * Populate the cmds_per_lun count scsi_bufs into this host's globally
3760 * available list of scsi buffers. Don't allocate more than the
3761 * HBA limit conveyed to the midlayer via the host structure. The
3762 * formula accounts for the lun_queue_depth + error handlers + 1
3763 * extra. This list of scsi bufs exists for the lifetime of the driver.
3765 total = phba->total_scsi_bufs;
3766 num_to_alloc = vport->cfg_lun_queue_depth + 2;
3768 /* If allocated buffers are enough do nothing */
3769 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
3772 /* Allow some exchanges to be available always to complete discovery */
3773 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3774 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3775 "0704 At limitation of %d preallocated "
3776 "command buffers\n", total);
3778 /* Allow some exchanges to be available always to complete discovery */
3779 } else if (total + num_to_alloc >
3780 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3781 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3782 "0705 Allocation request of %d "
3783 "command buffers will exceed max of %d. "
3784 "Reducing allocation request to %d.\n",
3785 num_to_alloc, phba->cfg_hba_queue_depth,
3786 (phba->cfg_hba_queue_depth - total));
3787 num_to_alloc = phba->cfg_hba_queue_depth - total;
3789 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
3790 if (num_to_alloc != num_allocated) {
3791 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3792 "0708 Allocation request of %d "
3793 "command buffers did not succeed. "
3794 "Allocated %d buffers.\n",
3795 num_to_alloc, num_allocated);
3797 if (num_allocated > 0)
3798 phba->total_scsi_bufs += num_allocated;
3803 * lpfc_slave_configure - scsi_host_template slave_configure entry point
3804 * @sdev: Pointer to scsi_device.
3806 * This routine configures following items
3807 * - Tag command queuing support for @sdev if supported.
3808 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
3814 lpfc_slave_configure(struct scsi_device *sdev)
3816 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3817 struct lpfc_hba *phba = vport->phba;
3819 if (sdev->tagged_supported)
3820 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
3822 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
3824 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3825 lpfc_sli_handle_fast_ring_event(phba,
3826 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3827 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3828 lpfc_poll_rearm_timer(phba);
3835 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
3836 * @sdev: Pointer to scsi_device.
3838 * This routine sets @sdev hostatdata filed to null.
3841 lpfc_slave_destroy(struct scsi_device *sdev)
3843 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3844 struct lpfc_hba *phba = vport->phba;
3845 atomic_dec(&phba->sdev_cnt);
3846 sdev->hostdata = NULL;
3851 struct scsi_host_template lpfc_template = {
3852 .module = THIS_MODULE,
3853 .name = LPFC_DRIVER_NAME,
3855 .queuecommand = lpfc_queuecommand,
3856 .eh_abort_handler = lpfc_abort_handler,
3857 .eh_device_reset_handler = lpfc_device_reset_handler,
3858 .eh_target_reset_handler = lpfc_target_reset_handler,
3859 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3860 .slave_alloc = lpfc_slave_alloc,
3861 .slave_configure = lpfc_slave_configure,
3862 .slave_destroy = lpfc_slave_destroy,
3863 .scan_finished = lpfc_scan_finished,
3865 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3866 .cmd_per_lun = LPFC_CMD_PER_LUN,
3867 .use_clustering = ENABLE_CLUSTERING,
3868 .shost_attrs = lpfc_hba_attrs,
3869 .max_sectors = 0xFFFF,
3870 .vendor_id = LPFC_NL_VENDOR_ID,
3871 .change_queue_depth = lpfc_change_queue_depth,
3874 struct scsi_host_template lpfc_vport_template = {
3875 .module = THIS_MODULE,
3876 .name = LPFC_DRIVER_NAME,
3878 .queuecommand = lpfc_queuecommand,
3879 .eh_abort_handler = lpfc_abort_handler,
3880 .eh_device_reset_handler = lpfc_device_reset_handler,
3881 .eh_target_reset_handler = lpfc_target_reset_handler,
3882 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3883 .slave_alloc = lpfc_slave_alloc,
3884 .slave_configure = lpfc_slave_configure,
3885 .slave_destroy = lpfc_slave_destroy,
3886 .scan_finished = lpfc_scan_finished,
3888 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3889 .cmd_per_lun = LPFC_CMD_PER_LUN,
3890 .use_clustering = ENABLE_CLUSTERING,
3891 .shost_attrs = lpfc_vport_attrs,
3892 .max_sectors = 0xFFFF,
3893 .change_queue_depth = lpfc_change_queue_depth,