[SCSI] lpfc 8.3.0 : Fix issue with assumed Interrupt Enable Block support
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_scsi.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
e47c9093 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
21
dea3101e 22#include <linux/pci.h>
23#include <linux/interrupt.h>
a90f5684 24#include <linux/delay.h>
dea3101e 25
26#include <scsi/scsi.h>
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_tcq.h>
30#include <scsi/scsi_transport_fc.h>
31
32#include "lpfc_version.h"
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
ea2151b4 35#include "lpfc_nl.h"
dea3101e 36#include "lpfc_disc.h"
37#include "lpfc_scsi.h"
38#include "lpfc.h"
39#include "lpfc_logmsg.h"
40#include "lpfc_crtn.h"
92d7f7b0 41#include "lpfc_vport.h"
dea3101e 42
43#define LPFC_RESET_WAIT 2
44#define LPFC_ABORT_WAIT 2
45
ea2151b4
JS
46/**
47 * lpfc_update_stats: Update statistical data for the command completion.
48 * @phba: Pointer to HBA object.
49 * @lpfc_cmd: lpfc scsi command object pointer.
50 *
51 * This function is called when there is a command completion and this
52 * function updates the statistical data for the command completion.
53 **/
54static void
55lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
56{
57 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
58 struct lpfc_nodelist *pnode = rdata->pnode;
59 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
60 unsigned long flags;
61 struct Scsi_Host *shost = cmd->device->host;
62 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
63 unsigned long latency;
64 int i;
65
66 if (cmd->result)
67 return;
68
69 spin_lock_irqsave(shost->host_lock, flags);
70 if (!vport->stat_data_enabled ||
71 vport->stat_data_blocked ||
72 !pnode->lat_data ||
73 (phba->bucket_type == LPFC_NO_BUCKET)) {
74 spin_unlock_irqrestore(shost->host_lock, flags);
75 return;
76 }
77 latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
78
79 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
80 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
81 phba->bucket_step;
82 if (i >= LPFC_MAX_BUCKET_COUNT)
83 i = LPFC_MAX_BUCKET_COUNT;
84 } else {
85 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
86 if (latency <= (phba->bucket_base +
87 ((1<<i)*phba->bucket_step)))
88 break;
89 }
90
91 pnode->lat_data[i].cmd_count++;
92 spin_unlock_irqrestore(shost->host_lock, flags);
93}
94
ea2151b4
JS
95/**
96 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
97 * event.
98 * @phba: Pointer to HBA context object.
99 * @vport: Pointer to vport object.
100 * @ndlp: Pointer to FC node associated with the target.
101 * @lun: Lun number of the scsi device.
102 * @old_val: Old value of the queue depth.
103 * @new_val: New value of the queue depth.
104 *
105 * This function sends an event to the mgmt application indicating
106 * there is a change in the scsi device queue depth.
107 **/
108static void
109lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
110 struct lpfc_vport *vport,
111 struct lpfc_nodelist *ndlp,
112 uint32_t lun,
113 uint32_t old_val,
114 uint32_t new_val)
115{
116 struct lpfc_fast_path_event *fast_path_evt;
117 unsigned long flags;
118
119 fast_path_evt = lpfc_alloc_fast_evt(phba);
120 if (!fast_path_evt)
121 return;
122
123 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
124 FC_REG_SCSI_EVENT;
125 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
126 LPFC_EVENT_VARQUEDEPTH;
127
128 /* Report all luns with change in queue depth */
129 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
130 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
131 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
132 &ndlp->nlp_portname, sizeof(struct lpfc_name));
133 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
134 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
135 }
136
137 fast_path_evt->un.queue_depth_evt.oldval = old_val;
138 fast_path_evt->un.queue_depth_evt.newval = new_val;
139 fast_path_evt->vport = vport;
140
141 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
142 spin_lock_irqsave(&phba->hbalock, flags);
143 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
144 spin_unlock_irqrestore(&phba->hbalock, flags);
145 lpfc_worker_wake_up(phba);
146
147 return;
148}
149
9bad7671
JS
150/**
151 * lpfc_adjust_queue_depth: Post RAMP_DOWN_QUEUE event for worker thread.
152 * @phba: The Hba for which this call is being executed.
153 *
154 * This routine is called when there is resource error in driver or firmware.
155 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
156 * posts at most 1 event each second. This routine wakes up worker thread of
157 * @phba to process WORKER_RAM_DOWN_EVENT event.
158 *
159 * This routine should be called with no lock held.
160 **/
92d7f7b0
JS
161void
162lpfc_adjust_queue_depth(struct lpfc_hba *phba)
163{
164 unsigned long flags;
5e9d9b82 165 uint32_t evt_posted;
92d7f7b0
JS
166
167 spin_lock_irqsave(&phba->hbalock, flags);
168 atomic_inc(&phba->num_rsrc_err);
169 phba->last_rsrc_error_time = jiffies;
170
171 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
172 spin_unlock_irqrestore(&phba->hbalock, flags);
173 return;
174 }
175
176 phba->last_ramp_down_time = jiffies;
177
178 spin_unlock_irqrestore(&phba->hbalock, flags);
179
180 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
181 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
182 if (!evt_posted)
92d7f7b0 183 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
92d7f7b0
JS
184 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
185
5e9d9b82
JS
186 if (!evt_posted)
187 lpfc_worker_wake_up(phba);
92d7f7b0
JS
188 return;
189}
190
9bad7671
JS
191/**
192 * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
193 * @phba: The Hba for which this call is being executed.
194 *
195 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
196 * post at most 1 event every 5 minute after last_ramp_up_time or
197 * last_rsrc_error_time. This routine wakes up worker thread of @phba
198 * to process WORKER_RAM_DOWN_EVENT event.
199 *
200 * This routine should be called with no lock held.
201 **/
92d7f7b0 202static inline void
3de2a653 203lpfc_rampup_queue_depth(struct lpfc_vport *vport,
92d7f7b0
JS
204 struct scsi_device *sdev)
205{
206 unsigned long flags;
3de2a653 207 struct lpfc_hba *phba = vport->phba;
5e9d9b82 208 uint32_t evt_posted;
92d7f7b0
JS
209 atomic_inc(&phba->num_cmd_success);
210
3de2a653 211 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
92d7f7b0 212 return;
92d7f7b0
JS
213 spin_lock_irqsave(&phba->hbalock, flags);
214 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
215 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
216 spin_unlock_irqrestore(&phba->hbalock, flags);
217 return;
218 }
92d7f7b0
JS
219 phba->last_ramp_up_time = jiffies;
220 spin_unlock_irqrestore(&phba->hbalock, flags);
221
222 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
223 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
224 if (!evt_posted)
92d7f7b0 225 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
92d7f7b0
JS
226 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
227
5e9d9b82
JS
228 if (!evt_posted)
229 lpfc_worker_wake_up(phba);
230 return;
92d7f7b0
JS
231}
232
9bad7671
JS
233/**
234 * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
235 * @phba: The Hba for which this call is being executed.
236 *
237 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
238 * thread.This routine reduces queue depth for all scsi device on each vport
239 * associated with @phba.
240 **/
92d7f7b0
JS
241void
242lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
243{
549e55cd
JS
244 struct lpfc_vport **vports;
245 struct Scsi_Host *shost;
92d7f7b0 246 struct scsi_device *sdev;
ea2151b4 247 unsigned long new_queue_depth, old_queue_depth;
92d7f7b0 248 unsigned long num_rsrc_err, num_cmd_success;
549e55cd 249 int i;
ea2151b4 250 struct lpfc_rport_data *rdata;
92d7f7b0
JS
251
252 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
253 num_cmd_success = atomic_read(&phba->num_cmd_success);
254
549e55cd
JS
255 vports = lpfc_create_vport_work_array(phba);
256 if (vports != NULL)
09372820 257 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
549e55cd
JS
258 shost = lpfc_shost_from_vport(vports[i]);
259 shost_for_each_device(sdev, shost) {
92d7f7b0 260 new_queue_depth =
549e55cd
JS
261 sdev->queue_depth * num_rsrc_err /
262 (num_rsrc_err + num_cmd_success);
263 if (!new_queue_depth)
264 new_queue_depth = sdev->queue_depth - 1;
265 else
266 new_queue_depth = sdev->queue_depth -
267 new_queue_depth;
ea2151b4 268 old_queue_depth = sdev->queue_depth;
549e55cd
JS
269 if (sdev->ordered_tags)
270 scsi_adjust_queue_depth(sdev,
271 MSG_ORDERED_TAG,
272 new_queue_depth);
273 else
274 scsi_adjust_queue_depth(sdev,
275 MSG_SIMPLE_TAG,
276 new_queue_depth);
ea2151b4
JS
277 rdata = sdev->hostdata;
278 if (rdata)
279 lpfc_send_sdev_queuedepth_change_event(
280 phba, vports[i],
281 rdata->pnode,
282 sdev->lun, old_queue_depth,
283 new_queue_depth);
549e55cd 284 }
92d7f7b0 285 }
09372820 286 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
287 atomic_set(&phba->num_rsrc_err, 0);
288 atomic_set(&phba->num_cmd_success, 0);
289}
290
9bad7671
JS
291/**
292 * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
293 * @phba: The Hba for which this call is being executed.
294 *
295 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
296 * thread.This routine increases queue depth for all scsi device on each vport
297 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
298 * num_cmd_success to zero.
299 **/
92d7f7b0
JS
300void
301lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
302{
549e55cd
JS
303 struct lpfc_vport **vports;
304 struct Scsi_Host *shost;
92d7f7b0 305 struct scsi_device *sdev;
549e55cd 306 int i;
ea2151b4 307 struct lpfc_rport_data *rdata;
549e55cd
JS
308
309 vports = lpfc_create_vport_work_array(phba);
310 if (vports != NULL)
09372820 311 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
549e55cd
JS
312 shost = lpfc_shost_from_vport(vports[i]);
313 shost_for_each_device(sdev, shost) {
97eab634
JS
314 if (vports[i]->cfg_lun_queue_depth <=
315 sdev->queue_depth)
316 continue;
549e55cd
JS
317 if (sdev->ordered_tags)
318 scsi_adjust_queue_depth(sdev,
319 MSG_ORDERED_TAG,
320 sdev->queue_depth+1);
321 else
322 scsi_adjust_queue_depth(sdev,
323 MSG_SIMPLE_TAG,
324 sdev->queue_depth+1);
ea2151b4
JS
325 rdata = sdev->hostdata;
326 if (rdata)
327 lpfc_send_sdev_queuedepth_change_event(
328 phba, vports[i],
329 rdata->pnode,
330 sdev->lun,
331 sdev->queue_depth - 1,
332 sdev->queue_depth);
549e55cd 333 }
92d7f7b0 334 }
09372820 335 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
336 atomic_set(&phba->num_rsrc_err, 0);
337 atomic_set(&phba->num_cmd_success, 0);
338}
339
a8e497d5
JS
340/**
341 * lpfc_scsi_dev_block: set all scsi hosts to block state.
342 * @phba: Pointer to HBA context object.
343 *
344 * This function walks vport list and set each SCSI host to block state
345 * by invoking fc_remote_port_delete() routine. This function is invoked
346 * with EEH when device's PCI slot has been permanently disabled.
347 **/
348void
349lpfc_scsi_dev_block(struct lpfc_hba *phba)
350{
351 struct lpfc_vport **vports;
352 struct Scsi_Host *shost;
353 struct scsi_device *sdev;
354 struct fc_rport *rport;
355 int i;
356
357 vports = lpfc_create_vport_work_array(phba);
358 if (vports != NULL)
359 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
360 shost = lpfc_shost_from_vport(vports[i]);
361 shost_for_each_device(sdev, shost) {
362 rport = starget_to_rport(scsi_target(sdev));
363 fc_remote_port_delete(rport);
364 }
365 }
366 lpfc_destroy_vport_work_array(phba, vports);
367}
368
9bad7671
JS
369/**
370 * lpfc_new_scsi_buf: Scsi buffer allocator.
371 * @vport: The virtual port for which this call being executed.
372 *
dea3101e 373 * This routine allocates a scsi buffer, which contains all the necessary
374 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
375 * contains information to build the IOCB. The DMAable region contains
9bad7671
JS
376 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
377 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
dea3101e 378 * and the BPL BDE is setup in the IOCB.
9bad7671
JS
379 *
380 * Return codes:
381 * NULL - Error
382 * Pointer to lpfc_scsi_buf data structure - Success
383 **/
dea3101e 384static struct lpfc_scsi_buf *
2e0fef85 385lpfc_new_scsi_buf(struct lpfc_vport *vport)
dea3101e 386{
2e0fef85 387 struct lpfc_hba *phba = vport->phba;
dea3101e 388 struct lpfc_scsi_buf *psb;
389 struct ulp_bde64 *bpl;
390 IOCB_t *iocb;
34b02dcd
JS
391 dma_addr_t pdma_phys_fcp_cmd;
392 dma_addr_t pdma_phys_fcp_rsp;
393 dma_addr_t pdma_phys_bpl;
604a3e30 394 uint16_t iotag;
dea3101e 395
bbfbbbc1 396 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
dea3101e 397 if (!psb)
398 return NULL;
dea3101e 399
400 /*
401 * Get memory from the pci pool to map the virt space to pci bus space
402 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
403 * struct fcp_rsp and the number of bde's necessary to support the
404 * sg_tablesize.
405 */
406 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
407 &psb->dma_handle);
408 if (!psb->data) {
409 kfree(psb);
410 return NULL;
411 }
412
413 /* Initialize virtual ptrs to dma_buf region. */
414 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
415
604a3e30
JB
416 /* Allocate iotag for psb->cur_iocbq. */
417 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
418 if (iotag == 0) {
419 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
420 psb->data, psb->dma_handle);
421 kfree (psb);
422 return NULL;
423 }
0bd4ca25 424 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
604a3e30 425
dea3101e 426 psb->fcp_cmnd = psb->data;
427 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
428 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
429 sizeof(struct fcp_rsp);
430
431 /* Initialize local short-hand pointers. */
432 bpl = psb->fcp_bpl;
34b02dcd
JS
433 pdma_phys_fcp_cmd = psb->dma_handle;
434 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
435 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
436 sizeof(struct fcp_rsp);
dea3101e 437
438 /*
439 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
440 * list bdes. Initialize the first two and leave the rest for
441 * queuecommand.
442 */
34b02dcd
JS
443 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
444 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
445 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
446 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
447 bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
dea3101e 448
449 /* Setup the physical region for the FCP RSP */
34b02dcd
JS
450 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
451 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
452 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
453 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
454 bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
dea3101e 455
456 /*
457 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
458 * initialize it with all known data now.
459 */
dea3101e 460 iocb = &psb->cur_iocbq.iocb;
461 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
34b02dcd
JS
462 if (phba->sli_rev == 3) {
463 /* fill in immediate fcp command BDE */
464 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
465 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
466 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
467 unsli3.fcp_ext.icd);
468 iocb->un.fcpi64.bdl.addrHigh = 0;
469 iocb->ulpBdeCount = 0;
470 iocb->ulpLe = 0;
471 /* fill in responce BDE */
472 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
473 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
474 sizeof(struct fcp_rsp);
475 iocb->unsli3.fcp_ext.rbde.addrLow =
476 putPaddrLow(pdma_phys_fcp_rsp);
477 iocb->unsli3.fcp_ext.rbde.addrHigh =
478 putPaddrHigh(pdma_phys_fcp_rsp);
479 } else {
480 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
481 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
482 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
483 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
484 iocb->ulpBdeCount = 1;
485 iocb->ulpLe = 1;
486 }
dea3101e 487 iocb->ulpClass = CLASS3;
488
489 return psb;
490}
491
9bad7671
JS
492/**
493 * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
494 * @phba: The Hba for which this call is being executed.
495 *
496 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
497 * and returns to caller.
498 *
499 * Return codes:
500 * NULL - Error
501 * Pointer to lpfc_scsi_buf - Success
502 **/
455c53ec 503static struct lpfc_scsi_buf*
875fbdfe 504lpfc_get_scsi_buf(struct lpfc_hba * phba)
dea3101e 505{
0bd4ca25
JSEC
506 struct lpfc_scsi_buf * lpfc_cmd = NULL;
507 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
875fbdfe 508 unsigned long iflag = 0;
0bd4ca25 509
875fbdfe 510 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
0bd4ca25 511 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1dcb58e5
JS
512 if (lpfc_cmd) {
513 lpfc_cmd->seg_cnt = 0;
514 lpfc_cmd->nonsg_phys = 0;
515 }
875fbdfe 516 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
0bd4ca25
JSEC
517 return lpfc_cmd;
518}
dea3101e 519
9bad7671
JS
520/**
521 * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
522 * @phba: The Hba for which this call is being executed.
523 * @psb: The scsi buffer which is being released.
524 *
525 * This routine releases @psb scsi buffer by adding it to tail of @phba
526 * lpfc_scsi_buf_list list.
527 **/
0bd4ca25 528static void
92d7f7b0 529lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
0bd4ca25 530{
875fbdfe 531 unsigned long iflag = 0;
dea3101e 532
875fbdfe 533 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
0bd4ca25 534 psb->pCmd = NULL;
dea3101e 535 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
875fbdfe 536 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
dea3101e 537}
538
9bad7671
JS
539/**
540 * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
541 * @phba: The Hba for which this call is being executed.
542 * @lpfc_cmd: The scsi buffer which is going to be mapped.
543 *
544 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
545 * field of @lpfc_cmd. This routine scans through sg elements and format the
546 * bdea. This routine also initializes all IOCB fields which are dependent on
547 * scsi command request buffer.
548 *
549 * Return codes:
550 * 1 - Error
551 * 0 - Success
552 **/
dea3101e 553static int
92d7f7b0 554lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dea3101e 555{
556 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
557 struct scatterlist *sgel = NULL;
558 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
559 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
560 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
34b02dcd 561 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dea3101e 562 dma_addr_t physaddr;
34b02dcd 563 uint32_t num_bde = 0;
a0b4f78f 564 int nseg, datadir = scsi_cmnd->sc_data_direction;
dea3101e 565
566 /*
567 * There are three possibilities here - use scatter-gather segment, use
568 * the single mapping, or neither. Start the lpfc command prep by
569 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
570 * data bde entry.
571 */
572 bpl += 2;
c59fd9eb 573 if (scsi_sg_count(scsi_cmnd)) {
dea3101e 574 /*
575 * The driver stores the segment count returned from pci_map_sg
576 * because this a count of dma-mappings used to map the use_sg
577 * pages. They are not guaranteed to be the same for those
578 * architectures that implement an IOMMU.
579 */
dea3101e 580
c59fd9eb
FT
581 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
582 scsi_sg_count(scsi_cmnd), datadir);
583 if (unlikely(!nseg))
584 return 1;
585
a0b4f78f 586 lpfc_cmd->seg_cnt = nseg;
dea3101e 587 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
588 printk(KERN_ERR "%s: Too many sg segments from "
589 "dma_map_sg. Config %d, seg_cnt %d",
cadbd4a5 590 __func__, phba->cfg_sg_seg_cnt,
dea3101e 591 lpfc_cmd->seg_cnt);
a0b4f78f 592 scsi_dma_unmap(scsi_cmnd);
dea3101e 593 return 1;
594 }
595
596 /*
597 * The driver established a maximum scatter-gather segment count
598 * during probe that limits the number of sg elements in any
599 * single scsi command. Just run through the seg_cnt and format
600 * the bde's.
34b02dcd
JS
601 * When using SLI-3 the driver will try to fit all the BDEs into
602 * the IOCB. If it can't then the BDEs get added to a BPL as it
603 * does for SLI-2 mode.
dea3101e 604 */
34b02dcd 605 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
dea3101e 606 physaddr = sg_dma_address(sgel);
34b02dcd
JS
607 if (phba->sli_rev == 3 &&
608 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
609 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
610 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
611 data_bde->addrLow = putPaddrLow(physaddr);
612 data_bde->addrHigh = putPaddrHigh(physaddr);
613 data_bde++;
614 } else {
615 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
616 bpl->tus.f.bdeSize = sg_dma_len(sgel);
617 bpl->tus.w = le32_to_cpu(bpl->tus.w);
618 bpl->addrLow =
619 le32_to_cpu(putPaddrLow(physaddr));
620 bpl->addrHigh =
621 le32_to_cpu(putPaddrHigh(physaddr));
622 bpl++;
623 }
dea3101e 624 }
c59fd9eb 625 }
dea3101e 626
627 /*
628 * Finish initializing those IOCB fields that are dependent on the
34b02dcd
JS
629 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
630 * explicitly reinitialized and for SLI-3 the extended bde count is
631 * explicitly reinitialized since all iocb memory resources are reused.
dea3101e 632 */
34b02dcd
JS
633 if (phba->sli_rev == 3) {
634 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
635 /*
636 * The extended IOCB format can only fit 3 BDE or a BPL.
637 * This I/O has more than 3 BDE so the 1st data bde will
638 * be a BPL that is filled in here.
639 */
640 physaddr = lpfc_cmd->dma_handle;
641 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
642 data_bde->tus.f.bdeSize = (num_bde *
643 sizeof(struct ulp_bde64));
644 physaddr += (sizeof(struct fcp_cmnd) +
645 sizeof(struct fcp_rsp) +
646 (2 * sizeof(struct ulp_bde64)));
647 data_bde->addrHigh = putPaddrHigh(physaddr);
648 data_bde->addrLow = putPaddrLow(physaddr);
649 /* ebde count includes the responce bde and data bpl */
650 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
651 } else {
652 /* ebde count includes the responce bde and data bdes */
653 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
654 }
655 } else {
656 iocb_cmd->un.fcpi64.bdl.bdeSize =
657 ((num_bde + 2) * sizeof(struct ulp_bde64));
658 }
09372820 659 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
dea3101e 660 return 0;
661}
662
ea2151b4
JS
663/**
664 * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
665 * @phba: Pointer to hba context object.
666 * @vport: Pointer to vport object.
667 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
668 * @rsp_iocb: Pointer to response iocb object which reported error.
669 *
670 * This function posts an event when there is a SCSI command reporting
671 * error from the scsi device.
672 **/
673static void
674lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
675 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
676 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
677 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
678 uint32_t resp_info = fcprsp->rspStatus2;
679 uint32_t scsi_status = fcprsp->rspStatus3;
680 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
681 struct lpfc_fast_path_event *fast_path_evt = NULL;
682 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
683 unsigned long flags;
684
685 /* If there is queuefull or busy condition send a scsi event */
686 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
687 (cmnd->result == SAM_STAT_BUSY)) {
688 fast_path_evt = lpfc_alloc_fast_evt(phba);
689 if (!fast_path_evt)
690 return;
691 fast_path_evt->un.scsi_evt.event_type =
692 FC_REG_SCSI_EVENT;
693 fast_path_evt->un.scsi_evt.subcategory =
694 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
695 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
696 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
697 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
698 &pnode->nlp_portname, sizeof(struct lpfc_name));
699 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
700 &pnode->nlp_nodename, sizeof(struct lpfc_name));
701 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
702 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
703 fast_path_evt = lpfc_alloc_fast_evt(phba);
704 if (!fast_path_evt)
705 return;
706 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
707 FC_REG_SCSI_EVENT;
708 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
709 LPFC_EVENT_CHECK_COND;
710 fast_path_evt->un.check_cond_evt.scsi_event.lun =
711 cmnd->device->lun;
712 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
713 &pnode->nlp_portname, sizeof(struct lpfc_name));
714 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
715 &pnode->nlp_nodename, sizeof(struct lpfc_name));
716 fast_path_evt->un.check_cond_evt.sense_key =
717 cmnd->sense_buffer[2] & 0xf;
718 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
719 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
720 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
721 fcpi_parm &&
722 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
723 ((scsi_status == SAM_STAT_GOOD) &&
724 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
725 /*
726 * If status is good or resid does not match with fcp_param and
727 * there is valid fcpi_parm, then there is a read_check error
728 */
729 fast_path_evt = lpfc_alloc_fast_evt(phba);
730 if (!fast_path_evt)
731 return;
732 fast_path_evt->un.read_check_error.header.event_type =
733 FC_REG_FABRIC_EVENT;
734 fast_path_evt->un.read_check_error.header.subcategory =
735 LPFC_EVENT_FCPRDCHKERR;
736 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
737 &pnode->nlp_portname, sizeof(struct lpfc_name));
738 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
739 &pnode->nlp_nodename, sizeof(struct lpfc_name));
740 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
741 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
742 fast_path_evt->un.read_check_error.fcpiparam =
743 fcpi_parm;
744 } else
745 return;
746
747 fast_path_evt->vport = vport;
748 spin_lock_irqsave(&phba->hbalock, flags);
749 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
750 spin_unlock_irqrestore(&phba->hbalock, flags);
751 lpfc_worker_wake_up(phba);
752 return;
753}
9bad7671
JS
754
755/**
756 * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
757 * @phba: The Hba for which this call is being executed.
758 * @psb: The scsi buffer which is going to be un-mapped.
759 *
760 * This routine does DMA un-mapping of scatter gather list of scsi command
761 * field of @lpfc_cmd.
762 **/
bcf4dbfa
JS
763static void
764lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
765{
766 /*
767 * There are only two special cases to consider. (1) the scsi command
768 * requested scatter-gather usage or (2) the scsi command allocated
769 * a request buffer, but did not request use_sg. There is a third
770 * case, but it does not require resource deallocation.
771 */
a0b4f78f
FT
772 if (psb->seg_cnt > 0)
773 scsi_dma_unmap(psb->pCmd);
bcf4dbfa
JS
774}
775
9bad7671
JS
776/**
777 * lpfc_handler_fcp_err: FCP response handler.
778 * @vport: The virtual port for which this call is being executed.
779 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
780 * @rsp_iocb: The response IOCB which contains FCP error.
781 *
782 * This routine is called to process response IOCB with status field
783 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
784 * based upon SCSI and FCP error.
785 **/
dea3101e 786static void
2e0fef85
JS
787lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
788 struct lpfc_iocbq *rsp_iocb)
dea3101e 789{
790 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
791 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
792 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
7054a606 793 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
dea3101e 794 uint32_t resp_info = fcprsp->rspStatus2;
795 uint32_t scsi_status = fcprsp->rspStatus3;
c7743956 796 uint32_t *lp;
dea3101e 797 uint32_t host_status = DID_OK;
798 uint32_t rsplen = 0;
c7743956 799 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
dea3101e 800
ea2151b4 801
dea3101e 802 /*
803 * If this is a task management command, there is no
804 * scsi packet associated with this lpfc_cmd. The driver
805 * consumes it.
806 */
807 if (fcpcmd->fcpCntl2) {
808 scsi_status = 0;
809 goto out;
810 }
811
c7743956
JS
812 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
813 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
814 if (snslen > SCSI_SENSE_BUFFERSIZE)
815 snslen = SCSI_SENSE_BUFFERSIZE;
816
817 if (resp_info & RSP_LEN_VALID)
818 rsplen = be32_to_cpu(fcprsp->rspRspLen);
819 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
820 }
821 lp = (uint32_t *)cmnd->sense_buffer;
822
823 if (!scsi_status && (resp_info & RESID_UNDER))
824 logit = LOG_FCP;
825
e8b62011
JS
826 lpfc_printf_vlog(vport, KERN_WARNING, logit,
827 "0730 FCP command x%x failed: x%x SNS x%x x%x "
828 "Data: x%x x%x x%x x%x x%x\n",
829 cmnd->cmnd[0], scsi_status,
830 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
831 be32_to_cpu(fcprsp->rspResId),
832 be32_to_cpu(fcprsp->rspSnsLen),
833 be32_to_cpu(fcprsp->rspRspLen),
834 fcprsp->rspInfo3);
dea3101e 835
836 if (resp_info & RSP_LEN_VALID) {
837 rsplen = be32_to_cpu(fcprsp->rspRspLen);
838 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
839 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
840 host_status = DID_ERROR;
841 goto out;
842 }
843 }
844
a0b4f78f 845 scsi_set_resid(cmnd, 0);
dea3101e 846 if (resp_info & RESID_UNDER) {
a0b4f78f 847 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
dea3101e 848
e8b62011
JS
849 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
850 "0716 FCP Read Underrun, expected %d, "
851 "residual %d Data: x%x x%x x%x\n",
852 be32_to_cpu(fcpcmd->fcpDl),
853 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
854 cmnd->underflow);
dea3101e 855
7054a606
JS
856 /*
857 * If there is an under run check if under run reported by
858 * storage array is same as the under run reported by HBA.
859 * If this is not same, there is a dropped frame.
860 */
861 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
862 fcpi_parm &&
a0b4f78f 863 (scsi_get_resid(cmnd) != fcpi_parm)) {
e8b62011
JS
864 lpfc_printf_vlog(vport, KERN_WARNING,
865 LOG_FCP | LOG_FCP_ERROR,
866 "0735 FCP Read Check Error "
867 "and Underrun Data: x%x x%x x%x x%x\n",
868 be32_to_cpu(fcpcmd->fcpDl),
869 scsi_get_resid(cmnd), fcpi_parm,
870 cmnd->cmnd[0]);
a0b4f78f 871 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
7054a606
JS
872 host_status = DID_ERROR;
873 }
dea3101e 874 /*
875 * The cmnd->underflow is the minimum number of bytes that must
876 * be transfered for this command. Provided a sense condition
877 * is not present, make sure the actual amount transferred is at
878 * least the underflow value or fail.
879 */
880 if (!(resp_info & SNS_LEN_VALID) &&
881 (scsi_status == SAM_STAT_GOOD) &&
a0b4f78f
FT
882 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
883 < cmnd->underflow)) {
e8b62011
JS
884 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
885 "0717 FCP command x%x residual "
886 "underrun converted to error "
887 "Data: x%x x%x x%x\n",
66dbfbe6 888 cmnd->cmnd[0], scsi_bufflen(cmnd),
e8b62011 889 scsi_get_resid(cmnd), cmnd->underflow);
dea3101e 890 host_status = DID_ERROR;
891 }
892 } else if (resp_info & RESID_OVER) {
e8b62011
JS
893 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
894 "0720 FCP command x%x residual overrun error. "
895 "Data: x%x x%x \n", cmnd->cmnd[0],
896 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
dea3101e 897 host_status = DID_ERROR;
898
899 /*
900 * Check SLI validation that all the transfer was actually done
901 * (fcpi_parm should be zero). Apply check only to reads.
902 */
903 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
904 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
e8b62011
JS
905 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
906 "0734 FCP Read Check Error Data: "
907 "x%x x%x x%x x%x\n",
908 be32_to_cpu(fcpcmd->fcpDl),
909 be32_to_cpu(fcprsp->rspResId),
910 fcpi_parm, cmnd->cmnd[0]);
dea3101e 911 host_status = DID_ERROR;
a0b4f78f 912 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
dea3101e 913 }
914
915 out:
916 cmnd->result = ScsiResult(host_status, scsi_status);
ea2151b4 917 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
dea3101e 918}
919
9bad7671
JS
920/**
921 * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
922 * @phba: The Hba for which this call is being executed.
923 * @pIocbIn: The command IOCBQ for the scsi cmnd.
924 * @pIocbOut: The response IOCBQ for the scsi cmnd .
925 *
926 * This routine assigns scsi command result by looking into response IOCB
927 * status field appropriately. This routine handles QUEUE FULL condition as
928 * well by ramping down device queue depth.
929 **/
dea3101e 930static void
931lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
932 struct lpfc_iocbq *pIocbOut)
933{
934 struct lpfc_scsi_buf *lpfc_cmd =
935 (struct lpfc_scsi_buf *) pIocbIn->context1;
2e0fef85 936 struct lpfc_vport *vport = pIocbIn->vport;
dea3101e 937 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
938 struct lpfc_nodelist *pnode = rdata->pnode;
939 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
445cf4f4
JSEC
940 int result;
941 struct scsi_device *sdev, *tmp_sdev;
942 int depth = 0;
fa61a54e 943 unsigned long flags;
ea2151b4 944 struct lpfc_fast_path_event *fast_path_evt;
dea3101e 945
946 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
947 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
109f6ed0
JS
948 if (pnode && NLP_CHK_NODE_ACT(pnode))
949 atomic_dec(&pnode->cmd_pending);
dea3101e 950
951 if (lpfc_cmd->status) {
952 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
953 (lpfc_cmd->result & IOERR_DRVR_MASK))
954 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
955 else if (lpfc_cmd->status >= IOSTAT_CNT)
956 lpfc_cmd->status = IOSTAT_DEFAULT;
957
e8b62011
JS
958 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
959 "0729 FCP cmd x%x failed <%d/%d> "
960 "status: x%x result: x%x Data: x%x x%x\n",
961 cmd->cmnd[0],
962 cmd->device ? cmd->device->id : 0xffff,
963 cmd->device ? cmd->device->lun : 0xffff,
964 lpfc_cmd->status, lpfc_cmd->result,
965 pIocbOut->iocb.ulpContext,
966 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
dea3101e 967
968 switch (lpfc_cmd->status) {
969 case IOSTAT_FCP_RSP_ERROR:
970 /* Call FCP RSP handler to determine result */
2e0fef85 971 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
dea3101e 972 break;
973 case IOSTAT_NPORT_BSY:
974 case IOSTAT_FABRIC_BSY:
0f1f53a7 975 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
ea2151b4
JS
976 fast_path_evt = lpfc_alloc_fast_evt(phba);
977 if (!fast_path_evt)
978 break;
979 fast_path_evt->un.fabric_evt.event_type =
980 FC_REG_FABRIC_EVENT;
981 fast_path_evt->un.fabric_evt.subcategory =
982 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
983 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
984 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
985 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
986 &pnode->nlp_portname,
987 sizeof(struct lpfc_name));
988 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
989 &pnode->nlp_nodename,
990 sizeof(struct lpfc_name));
991 }
992 fast_path_evt->vport = vport;
993 fast_path_evt->work_evt.evt =
994 LPFC_EVT_FASTPATH_MGMT_EVT;
995 spin_lock_irqsave(&phba->hbalock, flags);
996 list_add_tail(&fast_path_evt->work_evt.evt_listp,
997 &phba->work_list);
998 spin_unlock_irqrestore(&phba->hbalock, flags);
999 lpfc_worker_wake_up(phba);
dea3101e 1000 break;
92d7f7b0 1001 case IOSTAT_LOCAL_REJECT:
d7c255b2 1002 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
92d7f7b0 1003 lpfc_cmd->result == IOERR_NO_RESOURCES ||
d7c255b2 1004 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
92d7f7b0 1005 cmd->result = ScsiResult(DID_REQUEUE, 0);
58da1ffb
JS
1006 break;
1007 } /* else: fall through */
dea3101e 1008 default:
1009 cmd->result = ScsiResult(DID_ERROR, 0);
1010 break;
1011 }
1012
58da1ffb 1013 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
19a7b4ae 1014 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
0f1f53a7
JS
1015 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
1016 SAM_STAT_BUSY);
dea3101e 1017 } else {
1018 cmd->result = ScsiResult(DID_OK, 0);
1019 }
1020
1021 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
1022 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
1023
e8b62011
JS
1024 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1025 "0710 Iodone <%d/%d> cmd %p, error "
1026 "x%x SNS x%x x%x Data: x%x x%x\n",
1027 cmd->device->id, cmd->device->lun, cmd,
1028 cmd->result, *lp, *(lp + 3), cmd->retries,
1029 scsi_get_resid(cmd));
dea3101e 1030 }
1031
ea2151b4 1032 lpfc_update_stats(phba, lpfc_cmd);
445cf4f4
JSEC
1033 result = cmd->result;
1034 sdev = cmd->device;
977b5a0a
JS
1035 if (vport->cfg_max_scsicmpl_time &&
1036 time_after(jiffies, lpfc_cmd->start_time +
1037 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
1038 spin_lock_irqsave(sdev->host->host_lock, flags);
109f6ed0
JS
1039 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1040 if (pnode->cmd_qdepth >
1041 atomic_read(&pnode->cmd_pending) &&
1042 (atomic_read(&pnode->cmd_pending) >
1043 LPFC_MIN_TGT_QDEPTH) &&
1044 ((cmd->cmnd[0] == READ_10) ||
1045 (cmd->cmnd[0] == WRITE_10)))
1046 pnode->cmd_qdepth =
1047 atomic_read(&pnode->cmd_pending);
1048
1049 pnode->last_change_time = jiffies;
1050 }
977b5a0a 1051 spin_unlock_irqrestore(sdev->host->host_lock, flags);
109f6ed0
JS
1052 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1053 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
977b5a0a 1054 time_after(jiffies, pnode->last_change_time +
109f6ed0
JS
1055 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
1056 spin_lock_irqsave(sdev->host->host_lock, flags);
1057 pnode->cmd_qdepth += pnode->cmd_qdepth *
1058 LPFC_TGTQ_RAMPUP_PCENT / 100;
1059 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
1060 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
1061 pnode->last_change_time = jiffies;
1062 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1063 }
977b5a0a
JS
1064 }
1065
1dcb58e5 1066 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
0bd4ca25
JSEC
1067 cmd->scsi_done(cmd);
1068
b808608b 1069 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
fa61a54e
JS
1070 /*
1071 * If there is a thread waiting for command completion
1072 * wake up the thread.
1073 */
1074 spin_lock_irqsave(sdev->host->host_lock, flags);
495a714c 1075 lpfc_cmd->pCmd = NULL;
fa61a54e
JS
1076 if (lpfc_cmd->waitq)
1077 wake_up(lpfc_cmd->waitq);
1078 spin_unlock_irqrestore(sdev->host->host_lock, flags);
b808608b
JW
1079 lpfc_release_scsi_buf(phba, lpfc_cmd);
1080 return;
1081 }
1082
92d7f7b0
JS
1083
1084 if (!result)
3de2a653 1085 lpfc_rampup_queue_depth(vport, sdev);
92d7f7b0 1086
58da1ffb 1087 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
445cf4f4
JSEC
1088 ((jiffies - pnode->last_ramp_up_time) >
1089 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1090 ((jiffies - pnode->last_q_full_time) >
1091 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
3de2a653 1092 (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
445cf4f4 1093 shost_for_each_device(tmp_sdev, sdev->host) {
3de2a653 1094 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
445cf4f4
JSEC
1095 if (tmp_sdev->id != sdev->id)
1096 continue;
1097 if (tmp_sdev->ordered_tags)
1098 scsi_adjust_queue_depth(tmp_sdev,
1099 MSG_ORDERED_TAG,
1100 tmp_sdev->queue_depth+1);
1101 else
1102 scsi_adjust_queue_depth(tmp_sdev,
1103 MSG_SIMPLE_TAG,
1104 tmp_sdev->queue_depth+1);
1105
1106 pnode->last_ramp_up_time = jiffies;
1107 }
1108 }
ea2151b4
JS
1109 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1110 0xFFFFFFFF,
1111 sdev->queue_depth - 1, sdev->queue_depth);
445cf4f4
JSEC
1112 }
1113
1114 /*
1115 * Check for queue full. If the lun is reporting queue full, then
1116 * back off the lun queue depth to prevent target overloads.
1117 */
58da1ffb
JS
1118 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
1119 NLP_CHK_NODE_ACT(pnode)) {
445cf4f4
JSEC
1120 pnode->last_q_full_time = jiffies;
1121
1122 shost_for_each_device(tmp_sdev, sdev->host) {
1123 if (tmp_sdev->id != sdev->id)
1124 continue;
1125 depth = scsi_track_queue_full(tmp_sdev,
1126 tmp_sdev->queue_depth - 1);
1127 }
1128 /*
2e0fef85 1129 * The queue depth cannot be lowered any more.
445cf4f4
JSEC
1130 * Modify the returned error code to store
1131 * the final depth value set by
1132 * scsi_track_queue_full.
1133 */
1134 if (depth == -1)
1135 depth = sdev->host->cmd_per_lun;
1136
1137 if (depth) {
e8b62011
JS
1138 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1139 "0711 detected queue full - lun queue "
1140 "depth adjusted to %d.\n", depth);
ea2151b4
JS
1141 lpfc_send_sdev_queuedepth_change_event(phba, vport,
1142 pnode, 0xFFFFFFFF,
1143 depth+1, depth);
445cf4f4
JSEC
1144 }
1145 }
1146
fa61a54e
JS
1147 /*
1148 * If there is a thread waiting for command completion
1149 * wake up the thread.
1150 */
1151 spin_lock_irqsave(sdev->host->host_lock, flags);
495a714c 1152 lpfc_cmd->pCmd = NULL;
fa61a54e
JS
1153 if (lpfc_cmd->waitq)
1154 wake_up(lpfc_cmd->waitq);
1155 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1156
0bd4ca25 1157 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 1158}
1159
34b02dcd
JS
1160/**
1161 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
1162 * @data: A pointer to the immediate command data portion of the IOCB.
1163 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1164 *
1165 * The routine copies the entire FCP command from @fcp_cmnd to @data while
1166 * byte swapping the data to big endian format for transmission on the wire.
1167 **/
1168static void
1169lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1170{
1171 int i, j;
1172 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1173 i += sizeof(uint32_t), j++) {
1174 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1175 }
1176}
1177
9bad7671
JS
1178/**
1179 * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit.
1180 * @vport: The virtual port for which this call is being executed.
1181 * @lpfc_cmd: The scsi command which needs to send.
1182 * @pnode: Pointer to lpfc_nodelist.
1183 *
1184 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1185 * to transfer.
1186 **/
dea3101e 1187static void
2e0fef85
JS
1188lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1189 struct lpfc_nodelist *pnode)
dea3101e 1190{
2e0fef85 1191 struct lpfc_hba *phba = vport->phba;
dea3101e 1192 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1193 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1194 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1195 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
1196 int datadir = scsi_cmnd->sc_data_direction;
7e2b19fb 1197 char tag[2];
dea3101e 1198
58da1ffb
JS
1199 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1200 return;
1201
dea3101e 1202 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
69859dc4
JSEC
1203 /* clear task management bits */
1204 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
dea3101e 1205
91886523
JSEC
1206 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
1207 &lpfc_cmd->fcp_cmnd->fcp_lun);
dea3101e 1208
1209 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
1210
7e2b19fb
JS
1211 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
1212 switch (tag[0]) {
dea3101e 1213 case HEAD_OF_QUEUE_TAG:
1214 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
1215 break;
1216 case ORDERED_QUEUE_TAG:
1217 fcp_cmnd->fcpCntl1 = ORDERED_Q;
1218 break;
1219 default:
1220 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
1221 break;
1222 }
1223 } else
1224 fcp_cmnd->fcpCntl1 = 0;
1225
1226 /*
1227 * There are three possibilities here - use scatter-gather segment, use
1228 * the single mapping, or neither. Start the lpfc command prep by
1229 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1230 * data bde entry.
1231 */
a0b4f78f 1232 if (scsi_sg_count(scsi_cmnd)) {
dea3101e 1233 if (datadir == DMA_TO_DEVICE) {
1234 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
1235 iocb_cmd->un.fcpi.fcpi_parm = 0;
1236 iocb_cmd->ulpPU = 0;
1237 fcp_cmnd->fcpCntl3 = WRITE_DATA;
1238 phba->fc4OutputRequests++;
1239 } else {
1240 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
1241 iocb_cmd->ulpPU = PARM_READ_CHECK;
a0b4f78f 1242 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
dea3101e 1243 fcp_cmnd->fcpCntl3 = READ_DATA;
1244 phba->fc4InputRequests++;
1245 }
1246 } else {
1247 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
1248 iocb_cmd->un.fcpi.fcpi_parm = 0;
1249 iocb_cmd->ulpPU = 0;
1250 fcp_cmnd->fcpCntl3 = 0;
1251 phba->fc4ControlRequests++;
1252 }
34b02dcd
JS
1253 if (phba->sli_rev == 3)
1254 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 1255 /*
1256 * Finish initializing those IOCB fields that are independent
1257 * of the scsi_cmnd request_buffer
1258 */
1259 piocbq->iocb.ulpContext = pnode->nlp_rpi;
1260 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
1261 piocbq->iocb.ulpFCP2Rcvy = 1;
09372820
JS
1262 else
1263 piocbq->iocb.ulpFCP2Rcvy = 0;
dea3101e 1264
1265 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
1266 piocbq->context1 = lpfc_cmd;
1267 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
1268 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2e0fef85 1269 piocbq->vport = vport;
dea3101e 1270}
1271
9bad7671
JS
1272/**
1273 * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
1274 * @vport: The virtual port for which this call is being executed.
1275 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
1276 * @lun: Logical unit number.
1277 * @task_mgmt_cmd: SCSI task management command.
1278 *
1279 * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
1280 *
1281 * Return codes:
1282 * 0 - Error
1283 * 1 - Success
1284 **/
dea3101e 1285static int
2e0fef85 1286lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
dea3101e 1287 struct lpfc_scsi_buf *lpfc_cmd,
420b630d 1288 unsigned int lun,
dea3101e 1289 uint8_t task_mgmt_cmd)
1290{
dea3101e 1291 struct lpfc_iocbq *piocbq;
1292 IOCB_t *piocb;
1293 struct fcp_cmnd *fcp_cmnd;
0b18ac42 1294 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
dea3101e 1295 struct lpfc_nodelist *ndlp = rdata->pnode;
1296
58da1ffb
JS
1297 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1298 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
dea3101e 1299 return 0;
dea3101e 1300
dea3101e 1301 piocbq = &(lpfc_cmd->cur_iocbq);
2e0fef85
JS
1302 piocbq->vport = vport;
1303
dea3101e 1304 piocb = &piocbq->iocb;
1305
1306 fcp_cmnd = lpfc_cmd->fcp_cmnd;
34b02dcd
JS
1307 /* Clear out any old data in the FCP command area */
1308 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1309 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
dea3101e 1310 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
34b02dcd
JS
1311 if (vport->phba->sli_rev == 3)
1312 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 1313 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
dea3101e 1314 piocb->ulpContext = ndlp->nlp_rpi;
1315 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
1316 piocb->ulpFCP2Rcvy = 1;
1317 }
1318 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
1319
1320 /* ulpTimeout is only one byte */
1321 if (lpfc_cmd->timeout > 0xff) {
1322 /*
1323 * Do not timeout the command at the firmware level.
1324 * The driver will provide the timeout mechanism.
1325 */
1326 piocb->ulpTimeout = 0;
1327 } else {
1328 piocb->ulpTimeout = lpfc_cmd->timeout;
1329 }
1330
2e0fef85 1331 return 1;
dea3101e 1332}
1333
9bad7671
JS
1334/**
1335 * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
1336 * @phba: The Hba for which this call is being executed.
1337 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
1338 * @rspiocbq: Pointer to lpfc_iocbq data structure.
1339 *
1340 * This routine is IOCB completion routine for device reset and target reset
1341 * routine. This routine release scsi buffer associated with lpfc_cmd.
1342 **/
7054a606
JS
1343static void
1344lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1345 struct lpfc_iocbq *cmdiocbq,
1346 struct lpfc_iocbq *rspiocbq)
1347{
1348 struct lpfc_scsi_buf *lpfc_cmd =
1349 (struct lpfc_scsi_buf *) cmdiocbq->context1;
1350 if (lpfc_cmd)
1351 lpfc_release_scsi_buf(phba, lpfc_cmd);
1352 return;
1353}
1354
9bad7671
JS
1355/**
1356 * lpfc_scsi_tgt_reset: Target reset handler.
1357 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
1358 * @vport: The virtual port for which this call is being executed.
1359 * @tgt_id: Target ID.
1360 * @lun: Lun number.
1361 * @rdata: Pointer to lpfc_rport_data.
1362 *
1363 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
1364 *
1365 * Return Code:
1366 * 0x2003 - Error
1367 * 0x2002 - Success.
1368 **/
dea3101e 1369static int
2e0fef85 1370lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
420b630d
JS
1371 unsigned tgt_id, unsigned int lun,
1372 struct lpfc_rport_data *rdata)
dea3101e 1373{
2e0fef85 1374 struct lpfc_hba *phba = vport->phba;
dea3101e 1375 struct lpfc_iocbq *iocbq;
0bd4ca25 1376 struct lpfc_iocbq *iocbqrsp;
dea3101e 1377 int ret;
915caaaf 1378 int status;
dea3101e 1379
58da1ffb 1380 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
f5603511
JS
1381 return FAILED;
1382
0b18ac42 1383 lpfc_cmd->rdata = rdata;
915caaaf 1384 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
420b630d 1385 FCP_TARGET_RESET);
915caaaf 1386 if (!status)
dea3101e 1387 return FAILED;
1388
dea3101e 1389 iocbq = &lpfc_cmd->cur_iocbq;
0bd4ca25
JSEC
1390 iocbqrsp = lpfc_sli_get_iocbq(phba);
1391
dea3101e 1392 if (!iocbqrsp)
1393 return FAILED;
dea3101e 1394
0b18ac42 1395 /* Issue Target Reset to TGT <num> */
e8b62011
JS
1396 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1397 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
1398 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
915caaaf 1399 status = lpfc_sli_issue_iocb_wait(phba,
68876920
JSEC
1400 &phba->sli.ring[phba->sli.fcp_ring],
1401 iocbq, iocbqrsp, lpfc_cmd->timeout);
915caaaf
JS
1402 if (status != IOCB_SUCCESS) {
1403 if (status == IOCB_TIMEDOUT) {
7054a606 1404 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
915caaaf
JS
1405 ret = TIMEOUT_ERROR;
1406 } else
1407 ret = FAILED;
dea3101e 1408 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
dea3101e 1409 } else {
1410 ret = SUCCESS;
1411 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
1412 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
1413 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
1414 (lpfc_cmd->result & IOERR_DRVR_MASK))
1415 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1416 }
1417
604a3e30 1418 lpfc_sli_release_iocbq(phba, iocbqrsp);
dea3101e 1419 return ret;
1420}
1421
9bad7671
JS
1422/**
1423 * lpfc_info: Info entry point of scsi_host_template data structure.
1424 * @host: The scsi host for which this call is being executed.
1425 *
1426 * This routine provides module information about hba.
1427 *
1428 * Reutrn code:
1429 * Pointer to char - Success.
1430 **/
dea3101e 1431const char *
1432lpfc_info(struct Scsi_Host *host)
1433{
2e0fef85
JS
1434 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
1435 struct lpfc_hba *phba = vport->phba;
dea3101e 1436 int len;
1437 static char lpfcinfobuf[384];
1438
1439 memset(lpfcinfobuf,0,384);
1440 if (phba && phba->pcidev){
1441 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
1442 len = strlen(lpfcinfobuf);
1443 snprintf(lpfcinfobuf + len,
1444 384-len,
1445 " on PCI bus %02x device %02x irq %d",
1446 phba->pcidev->bus->number,
1447 phba->pcidev->devfn,
1448 phba->pcidev->irq);
1449 len = strlen(lpfcinfobuf);
1450 if (phba->Port[0]) {
1451 snprintf(lpfcinfobuf + len,
1452 384-len,
1453 " port %s",
1454 phba->Port);
1455 }
1456 }
1457 return lpfcinfobuf;
1458}
1459
9bad7671
JS
1460/**
1461 * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
1462 * @phba: The Hba for which this call is being executed.
1463 *
1464 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
1465 * The default value of cfg_poll_tmo is 10 milliseconds.
1466 **/
875fbdfe
JSEC
1467static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1468{
1469 unsigned long poll_tmo_expires =
1470 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
1471
1472 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
1473 mod_timer(&phba->fcp_poll_timer,
1474 poll_tmo_expires);
1475}
1476
9bad7671
JS
1477/**
1478 * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
1479 * @phba: The Hba for which this call is being executed.
1480 *
1481 * This routine starts the fcp_poll_timer of @phba.
1482 **/
875fbdfe
JSEC
1483void lpfc_poll_start_timer(struct lpfc_hba * phba)
1484{
1485 lpfc_poll_rearm_timer(phba);
1486}
1487
9bad7671
JS
1488/**
1489 * lpfc_poll_timeout: Restart polling timer.
1490 * @ptr: Map to lpfc_hba data structure pointer.
1491 *
1492 * This routine restarts fcp_poll timer, when FCP ring polling is enable
1493 * and FCP Ring interrupt is disable.
1494 **/
1495
875fbdfe
JSEC
1496void lpfc_poll_timeout(unsigned long ptr)
1497{
2e0fef85 1498 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
875fbdfe
JSEC
1499
1500 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1501 lpfc_sli_poll_fcp_ring (phba);
1502 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1503 lpfc_poll_rearm_timer(phba);
1504 }
875fbdfe
JSEC
1505}
1506
9bad7671
JS
1507/**
1508 * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
1509 * structure.
1510 * @cmnd: Pointer to scsi_cmnd data structure.
1511 * @done: Pointer to done routine.
1512 *
1513 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
1514 * This routine prepares an IOCB from scsi command and provides to firmware.
1515 * The @done callback is invoked after driver finished processing the command.
1516 *
1517 * Return value :
1518 * 0 - Success
1519 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
1520 **/
dea3101e 1521static int
1522lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1523{
2e0fef85
JS
1524 struct Scsi_Host *shost = cmnd->device->host;
1525 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1526 struct lpfc_hba *phba = vport->phba;
1527 struct lpfc_sli *psli = &phba->sli;
dea3101e 1528 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1529 struct lpfc_nodelist *ndlp = rdata->pnode;
0bd4ca25 1530 struct lpfc_scsi_buf *lpfc_cmd;
19a7b4ae 1531 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
19a7b4ae 1532 int err;
dea3101e 1533
19a7b4ae
JSEC
1534 err = fc_remote_port_chkready(rport);
1535 if (err) {
1536 cmnd->result = err;
dea3101e 1537 goto out_fail_command;
1538 }
1539
1540 /*
19a7b4ae
JSEC
1541 * Catch race where our node has transitioned, but the
1542 * transport is still transitioning.
dea3101e 1543 */
b522d7d4
JS
1544 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1545 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1546 goto out_fail_command;
1547 }
109f6ed0
JS
1548 if (vport->cfg_max_scsicmpl_time &&
1549 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
977b5a0a 1550 goto out_host_busy;
a93ce024 1551
ed957684 1552 lpfc_cmd = lpfc_get_scsi_buf(phba);
dea3101e 1553 if (lpfc_cmd == NULL) {
92d7f7b0
JS
1554 lpfc_adjust_queue_depth(phba);
1555
e8b62011
JS
1556 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1557 "0707 driver's buffer pool is empty, "
1558 "IO busied\n");
dea3101e 1559 goto out_host_busy;
1560 }
1561
ea2151b4 1562 lpfc_cmd->start_time = jiffies;
dea3101e 1563 /*
1564 * Store the midlayer's command structure for the completion phase
1565 * and complete the command initialization.
1566 */
1567 lpfc_cmd->pCmd = cmnd;
1568 lpfc_cmd->rdata = rdata;
1569 lpfc_cmd->timeout = 0;
977b5a0a 1570 lpfc_cmd->start_time = jiffies;
dea3101e 1571 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
1572 cmnd->scsi_done = done;
1573
1574 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1575 if (err)
1576 goto out_host_busy_free_buf;
1577
2e0fef85 1578 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
dea3101e 1579
977b5a0a 1580 atomic_inc(&ndlp->cmd_pending);
dea3101e 1581 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
92d7f7b0 1582 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
dea3101e 1583 if (err)
1584 goto out_host_busy_free_buf;
875fbdfe
JSEC
1585
1586 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1587 lpfc_sli_poll_fcp_ring(phba);
1588 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1589 lpfc_poll_rearm_timer(phba);
1590 }
1591
dea3101e 1592 return 0;
1593
1594 out_host_busy_free_buf:
977b5a0a 1595 atomic_dec(&ndlp->cmd_pending);
bcf4dbfa 1596 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
0bd4ca25 1597 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 1598 out_host_busy:
1599 return SCSI_MLQUEUE_HOST_BUSY;
1600
1601 out_fail_command:
1602 done(cmnd);
1603 return 0;
1604}
1605
9bad7671
JS
1606/**
1607 * lpfc_block_error_handler: Routine to block error handler.
1608 * @cmnd: Pointer to scsi_cmnd data structure.
1609 *
1610 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
1611 **/
a90f5684
JS
1612static void
1613lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1614{
1615 struct Scsi_Host *shost = cmnd->device->host;
1616 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1617
1618 spin_lock_irq(shost->host_lock);
1619 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1620 spin_unlock_irq(shost->host_lock);
1621 msleep(1000);
1622 spin_lock_irq(shost->host_lock);
1623 }
1624 spin_unlock_irq(shost->host_lock);
1625 return;
1626}
63c59c3b 1627
9bad7671
JS
1628/**
1629 * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
1630 *structure.
1631 * @cmnd: Pointer to scsi_cmnd data structure.
1632 *
1633 * This routine aborts @cmnd pending in base driver.
1634 *
1635 * Return code :
1636 * 0x2003 - Error
1637 * 0x2002 - Success
1638 **/
dea3101e 1639static int
63c59c3b 1640lpfc_abort_handler(struct scsi_cmnd *cmnd)
dea3101e 1641{
2e0fef85
JS
1642 struct Scsi_Host *shost = cmnd->device->host;
1643 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1644 struct lpfc_hba *phba = vport->phba;
dea3101e 1645 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
0bd4ca25
JSEC
1646 struct lpfc_iocbq *iocb;
1647 struct lpfc_iocbq *abtsiocb;
dea3101e 1648 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 1649 IOCB_t *cmd, *icmd;
0bd4ca25 1650 int ret = SUCCESS;
fa61a54e 1651 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
dea3101e 1652
a90f5684 1653 lpfc_block_error_handler(cmnd);
0bd4ca25
JSEC
1654 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
1655 BUG_ON(!lpfc_cmd);
dea3101e 1656
0bd4ca25
JSEC
1657 /*
1658 * If pCmd field of the corresponding lpfc_scsi_buf structure
1659 * points to a different SCSI command, then the driver has
1660 * already completed this command, but the midlayer did not
1661 * see the completion before the eh fired. Just return
1662 * SUCCESS.
1663 */
1664 iocb = &lpfc_cmd->cur_iocbq;
1665 if (lpfc_cmd->pCmd != cmnd)
1666 goto out;
dea3101e 1667
0bd4ca25 1668 BUG_ON(iocb->context1 != lpfc_cmd);
dea3101e 1669
0bd4ca25
JSEC
1670 abtsiocb = lpfc_sli_get_iocbq(phba);
1671 if (abtsiocb == NULL) {
1672 ret = FAILED;
dea3101e 1673 goto out;
1674 }
1675
dea3101e 1676 /*
0bd4ca25
JSEC
1677 * The scsi command can not be in txq and it is in flight because the
1678 * pCmd is still pointig at the SCSI command we have to abort. There
1679 * is no need to search the txcmplq. Just send an abort to the FW.
dea3101e 1680 */
dea3101e 1681
0bd4ca25
JSEC
1682 cmd = &iocb->iocb;
1683 icmd = &abtsiocb->iocb;
1684 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
1685 icmd->un.acxri.abortContextTag = cmd->ulpContext;
1686 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 1687
0bd4ca25
JSEC
1688 icmd->ulpLe = 1;
1689 icmd->ulpClass = cmd->ulpClass;
2e0fef85 1690 if (lpfc_is_link_up(phba))
0bd4ca25
JSEC
1691 icmd->ulpCommand = CMD_ABORT_XRI_CN;
1692 else
1693 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 1694
0bd4ca25 1695 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2e0fef85 1696 abtsiocb->vport = vport;
0bd4ca25
JSEC
1697 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
1698 lpfc_sli_release_iocbq(phba, abtsiocb);
1699 ret = FAILED;
1700 goto out;
1701 }
dea3101e 1702
875fbdfe
JSEC
1703 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1704 lpfc_sli_poll_fcp_ring (phba);
1705
fa61a54e 1706 lpfc_cmd->waitq = &waitq;
0bd4ca25 1707 /* Wait for abort to complete */
fa61a54e
JS
1708 wait_event_timeout(waitq,
1709 (lpfc_cmd->pCmd != cmnd),
1710 (2*vport->cfg_devloss_tmo*HZ));
875fbdfe 1711
fa61a54e
JS
1712 spin_lock_irq(shost->host_lock);
1713 lpfc_cmd->waitq = NULL;
1714 spin_unlock_irq(shost->host_lock);
dea3101e 1715
0bd4ca25
JSEC
1716 if (lpfc_cmd->pCmd == cmnd) {
1717 ret = FAILED;
e8b62011
JS
1718 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1719 "0748 abort handler timed out waiting "
1720 "for abort to complete: ret %#x, ID %d, "
1721 "LUN %d, snum %#lx\n",
1722 ret, cmnd->device->id, cmnd->device->lun,
1723 cmnd->serial_number);
dea3101e 1724 }
1725
1726 out:
e8b62011
JS
1727 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1728 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1729 "LUN %d snum %#lx\n", ret, cmnd->device->id,
1730 cmnd->device->lun, cmnd->serial_number);
63c59c3b 1731 return ret;
8fa728a2
JG
1732}
1733
9bad7671
JS
1734/**
1735 * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
1736 *data structure.
1737 * @cmnd: Pointer to scsi_cmnd data structure.
1738 *
1739 * This routine does a device reset by sending a TARGET_RESET task management
1740 * command.
1741 *
1742 * Return code :
1743 * 0x2003 - Error
1744 * 0ex2002 - Success
1745 **/
dea3101e 1746static int
7054a606 1747lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
dea3101e 1748{
2e0fef85
JS
1749 struct Scsi_Host *shost = cmnd->device->host;
1750 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1751 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
1752 struct lpfc_scsi_buf *lpfc_cmd;
1753 struct lpfc_iocbq *iocbq, *iocbqrsp;
dea3101e 1754 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1755 struct lpfc_nodelist *pnode = rdata->pnode;
915caaaf
JS
1756 unsigned long later;
1757 int ret = SUCCESS;
1758 int status;
1759 int cnt;
ea2151b4 1760 struct lpfc_scsi_event_header scsi_event;
dea3101e 1761
a90f5684 1762 lpfc_block_error_handler(cmnd);
dea3101e 1763 /*
1764 * If target is not in a MAPPED state, delay the reset until
c01f3208 1765 * target is rediscovered or devloss timeout expires.
dea3101e 1766 */
915caaaf
JS
1767 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1768 while (time_after(later, jiffies)) {
58da1ffb 1769 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
915caaaf 1770 return FAILED;
f5603511 1771 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
dea3101e 1772 break;
915caaaf
JS
1773 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1774 rdata = cmnd->device->hostdata;
1775 if (!rdata)
1776 break;
1777 pnode = rdata->pnode;
1778 }
ea2151b4
JS
1779
1780 scsi_event.event_type = FC_REG_SCSI_EVENT;
1781 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
1782 scsi_event.lun = 0;
1783 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
1784 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
1785
1786 fc_host_post_vendor_event(shost,
1787 fc_get_event_number(),
1788 sizeof(scsi_event),
1789 (char *)&scsi_event,
ddcc50f0 1790 LPFC_NL_VENDOR_ID);
ea2151b4 1791
915caaaf
JS
1792 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1793 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1794 "0721 LUN Reset rport "
1795 "failure: msec x%x rdata x%p\n",
1796 jiffies_to_msecs(jiffies - later), rdata);
1797 return FAILED;
dea3101e 1798 }
2e0fef85 1799 lpfc_cmd = lpfc_get_scsi_buf(phba);
dea3101e 1800 if (lpfc_cmd == NULL)
915caaaf 1801 return FAILED;
dea3101e 1802 lpfc_cmd->timeout = 60;
0b18ac42 1803 lpfc_cmd->rdata = rdata;
dea3101e 1804
915caaaf
JS
1805 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
1806 cmnd->device->lun,
1807 FCP_TARGET_RESET);
1808 if (!status) {
1809 lpfc_release_scsi_buf(phba, lpfc_cmd);
1810 return FAILED;
1811 }
dea3101e 1812 iocbq = &lpfc_cmd->cur_iocbq;
1813
1814 /* get a buffer for this IOCB command response */
0bd4ca25 1815 iocbqrsp = lpfc_sli_get_iocbq(phba);
915caaaf
JS
1816 if (iocbqrsp == NULL) {
1817 lpfc_release_scsi_buf(phba, lpfc_cmd);
1818 return FAILED;
1819 }
e8b62011
JS
1820 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1821 "0703 Issue target reset to TGT %d LUN %d "
1822 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
1823 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
915caaaf
JS
1824 status = lpfc_sli_issue_iocb_wait(phba,
1825 &phba->sli.ring[phba->sli.fcp_ring],
1826 iocbq, iocbqrsp, lpfc_cmd->timeout);
1827 if (status == IOCB_TIMEDOUT) {
7054a606 1828 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
915caaaf
JS
1829 ret = TIMEOUT_ERROR;
1830 } else {
1831 if (status != IOCB_SUCCESS)
1832 ret = FAILED;
1833 lpfc_release_scsi_buf(phba, lpfc_cmd);
1834 }
1835 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1836 "0713 SCSI layer issued device reset (%d, %d) "
1837 "return x%x status x%x result x%x\n",
1838 cmnd->device->id, cmnd->device->lun, ret,
1839 iocbqrsp->iocb.ulpStatus,
1840 iocbqrsp->iocb.un.ulpWord[4]);
6175c02a 1841 lpfc_sli_release_iocbq(phba, iocbqrsp);
51ef4c26 1842 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
915caaaf 1843 LPFC_CTX_TGT);
6175c02a 1844 if (cnt)
51ef4c26 1845 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
6175c02a 1846 cmnd->device->id, cmnd->device->lun,
915caaaf
JS
1847 LPFC_CTX_TGT);
1848 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1849 while (time_after(later, jiffies) && cnt) {
1850 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
51ef4c26 1851 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
915caaaf 1852 cmnd->device->lun, LPFC_CTX_TGT);
dea3101e 1853 }
dea3101e 1854 if (cnt) {
e8b62011
JS
1855 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1856 "0719 device reset I/O flush failure: "
1857 "cnt x%x\n", cnt);
0bd4ca25 1858 ret = FAILED;
dea3101e 1859 }
dea3101e 1860 return ret;
1861}
1862
9bad7671
JS
1863/**
1864 * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
1865 * Template data structure.
1866 * @cmnd: Pointer to scsi_cmnd data structure.
1867 *
1868 * This routine does target reset to all target on @cmnd->device->host.
1869 *
1870 * Return Code:
1871 * 0x2003 - Error
1872 * 0x2002 - Success
1873 **/
94d0e7b8 1874static int
7054a606 1875lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
dea3101e 1876{
2e0fef85
JS
1877 struct Scsi_Host *shost = cmnd->device->host;
1878 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1879 struct lpfc_hba *phba = vport->phba;
dea3101e 1880 struct lpfc_nodelist *ndlp = NULL;
1881 int match;
d7c255b2 1882 int ret = SUCCESS, status = SUCCESS, i;
915caaaf 1883 int cnt;
0bd4ca25 1884 struct lpfc_scsi_buf * lpfc_cmd;
915caaaf 1885 unsigned long later;
ea2151b4
JS
1886 struct lpfc_scsi_event_header scsi_event;
1887
1888 scsi_event.event_type = FC_REG_SCSI_EVENT;
1889 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
1890 scsi_event.lun = 0;
1891 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
1892 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
1893
1894 fc_host_post_vendor_event(shost,
1895 fc_get_event_number(),
1896 sizeof(scsi_event),
1897 (char *)&scsi_event,
ddcc50f0 1898 LPFC_NL_VENDOR_ID);
dea3101e 1899
a90f5684 1900 lpfc_block_error_handler(cmnd);
dea3101e 1901 /*
1902 * Since the driver manages a single bus device, reset all
1903 * targets known to the driver. Should any target reset
1904 * fail, this routine returns failure to the midlayer.
1905 */
e17da18e 1906 for (i = 0; i < LPFC_MAX_TARGET; i++) {
685f0bf7 1907 /* Search for mapped node by target ID */
dea3101e 1908 match = 0;
2e0fef85
JS
1909 spin_lock_irq(shost->host_lock);
1910 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
1911 if (!NLP_CHK_NODE_ACT(ndlp))
1912 continue;
685f0bf7 1913 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
915caaaf 1914 ndlp->nlp_sid == i &&
685f0bf7 1915 ndlp->rport) {
dea3101e 1916 match = 1;
1917 break;
1918 }
1919 }
2e0fef85 1920 spin_unlock_irq(shost->host_lock);
dea3101e 1921 if (!match)
1922 continue;
915caaaf
JS
1923 lpfc_cmd = lpfc_get_scsi_buf(phba);
1924 if (lpfc_cmd) {
1925 lpfc_cmd->timeout = 60;
1926 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1927 cmnd->device->lun,
1928 ndlp->rport->dd_data);
1929 if (status != TIMEOUT_ERROR)
1930 lpfc_release_scsi_buf(phba, lpfc_cmd);
1931 }
1932 if (!lpfc_cmd || status != SUCCESS) {
e8b62011
JS
1933 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1934 "0700 Bus Reset on target %d failed\n",
1935 i);
915caaaf 1936 ret = FAILED;
dea3101e 1937 }
1938 }
6175c02a
JSEC
1939 /*
1940 * All outstanding txcmplq I/Os should have been aborted by
1941 * the targets. Unfortunately, some targets do not abide by
1942 * this forcing the driver to double check.
1943 */
51ef4c26 1944 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
6175c02a 1945 if (cnt)
51ef4c26
JS
1946 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1947 0, 0, LPFC_CTX_HOST);
915caaaf
JS
1948 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1949 while (time_after(later, jiffies) && cnt) {
1950 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
51ef4c26 1951 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
dea3101e 1952 }
dea3101e 1953 if (cnt) {
e8b62011
JS
1954 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1955 "0715 Bus Reset I/O flush failure: "
1956 "cnt x%x left x%x\n", cnt, i);
0bd4ca25 1957 ret = FAILED;
6175c02a 1958 }
e8b62011
JS
1959 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1960 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
dea3101e 1961 return ret;
1962}
1963
9bad7671
JS
1964/**
1965 * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
1966 * structure.
1967 * @sdev: Pointer to scsi_device.
1968 *
1969 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
1970 * globally available list of scsi buffers. This routine also makes sure scsi
1971 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
1972 * of scsi buffer exists for the lifetime of the driver.
1973 *
1974 * Return codes:
1975 * non-0 - Error
1976 * 0 - Success
1977 **/
dea3101e 1978static int
1979lpfc_slave_alloc(struct scsi_device *sdev)
1980{
2e0fef85
JS
1981 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1982 struct lpfc_hba *phba = vport->phba;
dea3101e 1983 struct lpfc_scsi_buf *scsi_buf = NULL;
19a7b4ae 1984 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
dea3101e 1985 uint32_t total = 0, i;
1986 uint32_t num_to_alloc = 0;
1987 unsigned long flags;
dea3101e 1988
19a7b4ae 1989 if (!rport || fc_remote_port_chkready(rport))
dea3101e 1990 return -ENXIO;
1991
19a7b4ae 1992 sdev->hostdata = rport->dd_data;
dea3101e 1993
1994 /*
1995 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1996 * available list of scsi buffers. Don't allocate more than the
a784efbf
JSEC
1997 * HBA limit conveyed to the midlayer via the host structure. The
1998 * formula accounts for the lun_queue_depth + error handlers + 1
1999 * extra. This list of scsi bufs exists for the lifetime of the driver.
dea3101e 2000 */
2001 total = phba->total_scsi_bufs;
3de2a653 2002 num_to_alloc = vport->cfg_lun_queue_depth + 2;
92d7f7b0
JS
2003
2004 /* Allow some exchanges to be available always to complete discovery */
2005 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
2006 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2007 "0704 At limitation of %d preallocated "
2008 "command buffers\n", total);
dea3101e 2009 return 0;
92d7f7b0
JS
2010 /* Allow some exchanges to be available always to complete discovery */
2011 } else if (total + num_to_alloc >
2012 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
2013 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2014 "0705 Allocation request of %d "
2015 "command buffers will exceed max of %d. "
2016 "Reducing allocation request to %d.\n",
2017 num_to_alloc, phba->cfg_hba_queue_depth,
2018 (phba->cfg_hba_queue_depth - total));
dea3101e 2019 num_to_alloc = phba->cfg_hba_queue_depth - total;
2020 }
2021
2022 for (i = 0; i < num_to_alloc; i++) {
2e0fef85 2023 scsi_buf = lpfc_new_scsi_buf(vport);
dea3101e 2024 if (!scsi_buf) {
e8b62011
JS
2025 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2026 "0706 Failed to allocate "
2027 "command buffer\n");
dea3101e 2028 break;
2029 }
2030
875fbdfe 2031 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
dea3101e 2032 phba->total_scsi_bufs++;
2033 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
875fbdfe 2034 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
dea3101e 2035 }
2036 return 0;
2037}
2038
9bad7671
JS
2039/**
2040 * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
2041 * structure.
2042 * @sdev: Pointer to scsi_device.
2043 *
2044 * This routine configures following items
2045 * - Tag command queuing support for @sdev if supported.
2046 * - Dev loss time out value of fc_rport.
2047 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
2048 *
2049 * Return codes:
2050 * 0 - Success
2051 **/
dea3101e 2052static int
2053lpfc_slave_configure(struct scsi_device *sdev)
2054{
2e0fef85
JS
2055 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2056 struct lpfc_hba *phba = vport->phba;
2057 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
dea3101e 2058
2059 if (sdev->tagged_supported)
3de2a653 2060 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 2061 else
3de2a653 2062 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 2063
2064 /*
2065 * Initialize the fc transport attributes for the target
2066 * containing this scsi device. Also note that the driver's
2067 * target pointer is stored in the starget_data for the
2068 * driver's sysfs entry point functions.
2069 */
3de2a653 2070 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
dea3101e 2071
875fbdfe
JSEC
2072 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2073 lpfc_sli_poll_fcp_ring(phba);
2074 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2075 lpfc_poll_rearm_timer(phba);
2076 }
2077
dea3101e 2078 return 0;
2079}
2080
9bad7671
JS
2081/**
2082 * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
2083 * @sdev: Pointer to scsi_device.
2084 *
2085 * This routine sets @sdev hostatdata filed to null.
2086 **/
dea3101e 2087static void
2088lpfc_slave_destroy(struct scsi_device *sdev)
2089{
2090 sdev->hostdata = NULL;
2091 return;
2092}
2093
92d7f7b0 2094
dea3101e 2095struct scsi_host_template lpfc_template = {
2096 .module = THIS_MODULE,
2097 .name = LPFC_DRIVER_NAME,
2098 .info = lpfc_info,
2099 .queuecommand = lpfc_queuecommand,
2100 .eh_abort_handler = lpfc_abort_handler,
7054a606
JS
2101 .eh_device_reset_handler= lpfc_device_reset_handler,
2102 .eh_bus_reset_handler = lpfc_bus_reset_handler,
dea3101e 2103 .slave_alloc = lpfc_slave_alloc,
2104 .slave_configure = lpfc_slave_configure,
2105 .slave_destroy = lpfc_slave_destroy,
47a8617c 2106 .scan_finished = lpfc_scan_finished,
dea3101e 2107 .this_id = -1,
83108bd3 2108 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
dea3101e 2109 .cmd_per_lun = LPFC_CMD_PER_LUN,
2110 .use_clustering = ENABLE_CLUSTERING,
2e0fef85 2111 .shost_attrs = lpfc_hba_attrs,
564b2960 2112 .max_sectors = 0xFFFF,
dea3101e 2113};
3de2a653
JS
2114
2115struct scsi_host_template lpfc_vport_template = {
2116 .module = THIS_MODULE,
2117 .name = LPFC_DRIVER_NAME,
2118 .info = lpfc_info,
2119 .queuecommand = lpfc_queuecommand,
2120 .eh_abort_handler = lpfc_abort_handler,
2121 .eh_device_reset_handler= lpfc_device_reset_handler,
2122 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2123 .slave_alloc = lpfc_slave_alloc,
2124 .slave_configure = lpfc_slave_configure,
2125 .slave_destroy = lpfc_slave_destroy,
2126 .scan_finished = lpfc_scan_finished,
2127 .this_id = -1,
83108bd3 2128 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3de2a653
JS
2129 .cmd_per_lun = LPFC_CMD_PER_LUN,
2130 .use_clustering = ENABLE_CLUSTERING,
2131 .shost_attrs = lpfc_vport_attrs,
2132 .max_sectors = 0xFFFF,
2133};