[SCSI] ipr: fix resource type update and add sdev and shost attributes
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_scsi.c
CommitLineData
dea3101e 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
d8e93df1 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
dea3101e 21#include <linux/pci.h>
5a0e3ad6 22#include <linux/slab.h>
dea3101e 23#include <linux/interrupt.h>
a90f5684 24#include <linux/delay.h>
e2a0a9d6 25#include <asm/unaligned.h>
dea3101e 26
27#include <scsi/scsi.h>
28#include <scsi/scsi_device.h>
e2a0a9d6 29#include <scsi/scsi_eh.h>
dea3101e 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_tcq.h>
32#include <scsi/scsi_transport_fc.h>
33
34#include "lpfc_version.h"
da0436e9 35#include "lpfc_hw4.h"
dea3101e 36#include "lpfc_hw.h"
37#include "lpfc_sli.h"
da0436e9 38#include "lpfc_sli4.h"
ea2151b4 39#include "lpfc_nl.h"
dea3101e 40#include "lpfc_disc.h"
41#include "lpfc_scsi.h"
42#include "lpfc.h"
43#include "lpfc_logmsg.h"
44#include "lpfc_crtn.h"
92d7f7b0 45#include "lpfc_vport.h"
dea3101e 46
47#define LPFC_RESET_WAIT 2
48#define LPFC_ABORT_WAIT 2
49
e2a0a9d6
JS
50int _dump_buf_done;
51
52static char *dif_op_str[] = {
53 "SCSI_PROT_NORMAL",
54 "SCSI_PROT_READ_INSERT",
55 "SCSI_PROT_WRITE_STRIP",
56 "SCSI_PROT_READ_STRIP",
57 "SCSI_PROT_WRITE_INSERT",
58 "SCSI_PROT_READ_PASS",
59 "SCSI_PROT_WRITE_PASS",
e2a0a9d6 60};
da0436e9
JS
61static void
62lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
1c6f4ef5
JS
63static void
64lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
e2a0a9d6
JS
65
66static void
6a9c52cf 67lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
e2a0a9d6
JS
68{
69 void *src, *dst;
70 struct scatterlist *sgde = scsi_sglist(cmnd);
71
72 if (!_dump_buf_data) {
6a9c52cf
JS
73 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
74 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
e2a0a9d6
JS
75 __func__);
76 return;
77 }
78
79
80 if (!sgde) {
6a9c52cf
JS
81 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
82 "9051 BLKGRD: ERROR: data scatterlist is null\n");
e2a0a9d6
JS
83 return;
84 }
85
86 dst = (void *) _dump_buf_data;
87 while (sgde) {
88 src = sg_virt(sgde);
89 memcpy(dst, src, sgde->length);
90 dst += sgde->length;
91 sgde = sg_next(sgde);
92 }
93}
94
95static void
6a9c52cf 96lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
e2a0a9d6
JS
97{
98 void *src, *dst;
99 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
100
101 if (!_dump_buf_dif) {
6a9c52cf
JS
102 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
103 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
e2a0a9d6
JS
104 __func__);
105 return;
106 }
107
108 if (!sgde) {
6a9c52cf
JS
109 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
110 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
e2a0a9d6
JS
111 return;
112 }
113
114 dst = _dump_buf_dif;
115 while (sgde) {
116 src = sg_virt(sgde);
117 memcpy(dst, src, sgde->length);
118 dst += sgde->length;
119 sgde = sg_next(sgde);
120 }
121}
122
f1126688
JS
123/**
124 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
125 * @phba: Pointer to HBA object.
126 * @lpfc_cmd: lpfc scsi command object pointer.
127 *
128 * This function is called from the lpfc_prep_task_mgmt_cmd function to
129 * set the last bit in the response sge entry.
130 **/
131static void
132lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
133 struct lpfc_scsi_buf *lpfc_cmd)
134{
135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
136 if (sgl) {
137 sgl += 1;
138 sgl->word2 = le32_to_cpu(sgl->word2);
139 bf_set(lpfc_sli4_sge_last, sgl, 1);
140 sgl->word2 = cpu_to_le32(sgl->word2);
141 }
142}
143
ea2151b4 144/**
3621a710 145 * lpfc_update_stats - Update statistical data for the command completion
ea2151b4
JS
146 * @phba: Pointer to HBA object.
147 * @lpfc_cmd: lpfc scsi command object pointer.
148 *
149 * This function is called when there is a command completion and this
150 * function updates the statistical data for the command completion.
151 **/
152static void
153lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
154{
155 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
156 struct lpfc_nodelist *pnode = rdata->pnode;
157 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
158 unsigned long flags;
159 struct Scsi_Host *shost = cmd->device->host;
160 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
161 unsigned long latency;
162 int i;
163
164 if (cmd->result)
165 return;
166
9f1e1b50
JS
167 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
168
ea2151b4
JS
169 spin_lock_irqsave(shost->host_lock, flags);
170 if (!vport->stat_data_enabled ||
171 vport->stat_data_blocked ||
172 !pnode->lat_data ||
173 (phba->bucket_type == LPFC_NO_BUCKET)) {
174 spin_unlock_irqrestore(shost->host_lock, flags);
175 return;
176 }
ea2151b4
JS
177
178 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
179 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
180 phba->bucket_step;
9f1e1b50
JS
181 /* check array subscript bounds */
182 if (i < 0)
183 i = 0;
184 else if (i >= LPFC_MAX_BUCKET_COUNT)
185 i = LPFC_MAX_BUCKET_COUNT - 1;
ea2151b4
JS
186 } else {
187 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
188 if (latency <= (phba->bucket_base +
189 ((1<<i)*phba->bucket_step)))
190 break;
191 }
192
193 pnode->lat_data[i].cmd_count++;
194 spin_unlock_irqrestore(shost->host_lock, flags);
195}
196
ea2151b4 197/**
3621a710 198 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
ea2151b4
JS
199 * @phba: Pointer to HBA context object.
200 * @vport: Pointer to vport object.
201 * @ndlp: Pointer to FC node associated with the target.
202 * @lun: Lun number of the scsi device.
203 * @old_val: Old value of the queue depth.
204 * @new_val: New value of the queue depth.
205 *
206 * This function sends an event to the mgmt application indicating
207 * there is a change in the scsi device queue depth.
208 **/
209static void
210lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
211 struct lpfc_vport *vport,
212 struct lpfc_nodelist *ndlp,
213 uint32_t lun,
214 uint32_t old_val,
215 uint32_t new_val)
216{
217 struct lpfc_fast_path_event *fast_path_evt;
218 unsigned long flags;
219
220 fast_path_evt = lpfc_alloc_fast_evt(phba);
221 if (!fast_path_evt)
222 return;
223
224 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
225 FC_REG_SCSI_EVENT;
226 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
227 LPFC_EVENT_VARQUEDEPTH;
228
229 /* Report all luns with change in queue depth */
230 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
231 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
232 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
233 &ndlp->nlp_portname, sizeof(struct lpfc_name));
234 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
235 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
236 }
237
238 fast_path_evt->un.queue_depth_evt.oldval = old_val;
239 fast_path_evt->un.queue_depth_evt.newval = new_val;
240 fast_path_evt->vport = vport;
241
242 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
243 spin_lock_irqsave(&phba->hbalock, flags);
244 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
245 spin_unlock_irqrestore(&phba->hbalock, flags);
246 lpfc_worker_wake_up(phba);
247
248 return;
249}
250
5ffc266e
JS
251/**
252 * lpfc_change_queue_depth - Alter scsi device queue depth
253 * @sdev: Pointer the scsi device on which to change the queue depth.
254 * @qdepth: New queue depth to set the sdev to.
255 * @reason: The reason for the queue depth change.
256 *
257 * This function is called by the midlayer and the LLD to alter the queue
258 * depth for a scsi device. This function sets the queue depth to the new
259 * value and sends an event out to log the queue depth change.
260 **/
261int
262lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
263{
264 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
265 struct lpfc_hba *phba = vport->phba;
266 struct lpfc_rport_data *rdata;
267 unsigned long new_queue_depth, old_queue_depth;
268
269 old_queue_depth = sdev->queue_depth;
270 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
271 new_queue_depth = sdev->queue_depth;
272 rdata = sdev->hostdata;
273 if (rdata)
274 lpfc_send_sdev_queuedepth_change_event(phba, vport,
275 rdata->pnode, sdev->lun,
276 old_queue_depth,
277 new_queue_depth);
278 return sdev->queue_depth;
279}
280
9bad7671 281/**
3621a710 282 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
9bad7671
JS
283 * @phba: The Hba for which this call is being executed.
284 *
285 * This routine is called when there is resource error in driver or firmware.
286 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
287 * posts at most 1 event each second. This routine wakes up worker thread of
288 * @phba to process WORKER_RAM_DOWN_EVENT event.
289 *
290 * This routine should be called with no lock held.
291 **/
92d7f7b0 292void
eaf15d5b 293lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
92d7f7b0
JS
294{
295 unsigned long flags;
5e9d9b82 296 uint32_t evt_posted;
92d7f7b0
JS
297
298 spin_lock_irqsave(&phba->hbalock, flags);
299 atomic_inc(&phba->num_rsrc_err);
300 phba->last_rsrc_error_time = jiffies;
301
302 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
303 spin_unlock_irqrestore(&phba->hbalock, flags);
304 return;
305 }
306
307 phba->last_ramp_down_time = jiffies;
308
309 spin_unlock_irqrestore(&phba->hbalock, flags);
310
311 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
312 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
313 if (!evt_posted)
92d7f7b0 314 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
92d7f7b0
JS
315 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
316
5e9d9b82
JS
317 if (!evt_posted)
318 lpfc_worker_wake_up(phba);
92d7f7b0
JS
319 return;
320}
321
9bad7671 322/**
3621a710 323 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
9bad7671
JS
324 * @phba: The Hba for which this call is being executed.
325 *
326 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
327 * post at most 1 event every 5 minute after last_ramp_up_time or
328 * last_rsrc_error_time. This routine wakes up worker thread of @phba
329 * to process WORKER_RAM_DOWN_EVENT event.
330 *
331 * This routine should be called with no lock held.
332 **/
92d7f7b0 333static inline void
3de2a653 334lpfc_rampup_queue_depth(struct lpfc_vport *vport,
a257bf90 335 uint32_t queue_depth)
92d7f7b0
JS
336{
337 unsigned long flags;
3de2a653 338 struct lpfc_hba *phba = vport->phba;
5e9d9b82 339 uint32_t evt_posted;
92d7f7b0
JS
340 atomic_inc(&phba->num_cmd_success);
341
a257bf90 342 if (vport->cfg_lun_queue_depth <= queue_depth)
92d7f7b0 343 return;
92d7f7b0 344 spin_lock_irqsave(&phba->hbalock, flags);
5ffc266e
JS
345 if (time_before(jiffies,
346 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
347 time_before(jiffies,
348 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
92d7f7b0
JS
349 spin_unlock_irqrestore(&phba->hbalock, flags);
350 return;
351 }
92d7f7b0
JS
352 phba->last_ramp_up_time = jiffies;
353 spin_unlock_irqrestore(&phba->hbalock, flags);
354
355 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
356 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
357 if (!evt_posted)
92d7f7b0 358 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
92d7f7b0
JS
359 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
360
5e9d9b82
JS
361 if (!evt_posted)
362 lpfc_worker_wake_up(phba);
363 return;
92d7f7b0
JS
364}
365
9bad7671 366/**
3621a710 367 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
9bad7671
JS
368 * @phba: The Hba for which this call is being executed.
369 *
370 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
371 * thread.This routine reduces queue depth for all scsi device on each vport
372 * associated with @phba.
373 **/
92d7f7b0
JS
374void
375lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
376{
549e55cd
JS
377 struct lpfc_vport **vports;
378 struct Scsi_Host *shost;
92d7f7b0 379 struct scsi_device *sdev;
5ffc266e 380 unsigned long new_queue_depth;
92d7f7b0 381 unsigned long num_rsrc_err, num_cmd_success;
549e55cd 382 int i;
92d7f7b0
JS
383
384 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
385 num_cmd_success = atomic_read(&phba->num_cmd_success);
386
549e55cd
JS
387 vports = lpfc_create_vport_work_array(phba);
388 if (vports != NULL)
21e9a0a5 389 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
390 shost = lpfc_shost_from_vport(vports[i]);
391 shost_for_each_device(sdev, shost) {
92d7f7b0 392 new_queue_depth =
549e55cd
JS
393 sdev->queue_depth * num_rsrc_err /
394 (num_rsrc_err + num_cmd_success);
395 if (!new_queue_depth)
396 new_queue_depth = sdev->queue_depth - 1;
397 else
398 new_queue_depth = sdev->queue_depth -
399 new_queue_depth;
5ffc266e
JS
400 lpfc_change_queue_depth(sdev, new_queue_depth,
401 SCSI_QDEPTH_DEFAULT);
549e55cd 402 }
92d7f7b0 403 }
09372820 404 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
405 atomic_set(&phba->num_rsrc_err, 0);
406 atomic_set(&phba->num_cmd_success, 0);
407}
408
9bad7671 409/**
3621a710 410 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
9bad7671
JS
411 * @phba: The Hba for which this call is being executed.
412 *
413 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
414 * thread.This routine increases queue depth for all scsi device on each vport
415 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
416 * num_cmd_success to zero.
417 **/
92d7f7b0
JS
418void
419lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
420{
549e55cd
JS
421 struct lpfc_vport **vports;
422 struct Scsi_Host *shost;
92d7f7b0 423 struct scsi_device *sdev;
549e55cd
JS
424 int i;
425
426 vports = lpfc_create_vport_work_array(phba);
427 if (vports != NULL)
21e9a0a5 428 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
429 shost = lpfc_shost_from_vport(vports[i]);
430 shost_for_each_device(sdev, shost) {
97eab634
JS
431 if (vports[i]->cfg_lun_queue_depth <=
432 sdev->queue_depth)
433 continue;
5ffc266e
JS
434 lpfc_change_queue_depth(sdev,
435 sdev->queue_depth+1,
436 SCSI_QDEPTH_RAMP_UP);
549e55cd 437 }
92d7f7b0 438 }
09372820 439 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
440 atomic_set(&phba->num_rsrc_err, 0);
441 atomic_set(&phba->num_cmd_success, 0);
442}
443
a8e497d5 444/**
3621a710 445 * lpfc_scsi_dev_block - set all scsi hosts to block state
a8e497d5
JS
446 * @phba: Pointer to HBA context object.
447 *
448 * This function walks vport list and set each SCSI host to block state
449 * by invoking fc_remote_port_delete() routine. This function is invoked
450 * with EEH when device's PCI slot has been permanently disabled.
451 **/
452void
453lpfc_scsi_dev_block(struct lpfc_hba *phba)
454{
455 struct lpfc_vport **vports;
456 struct Scsi_Host *shost;
457 struct scsi_device *sdev;
458 struct fc_rport *rport;
459 int i;
460
461 vports = lpfc_create_vport_work_array(phba);
462 if (vports != NULL)
21e9a0a5 463 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8e497d5
JS
464 shost = lpfc_shost_from_vport(vports[i]);
465 shost_for_each_device(sdev, shost) {
466 rport = starget_to_rport(scsi_target(sdev));
467 fc_remote_port_delete(rport);
468 }
469 }
470 lpfc_destroy_vport_work_array(phba, vports);
471}
472
9bad7671 473/**
3772a991 474 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
9bad7671 475 * @vport: The virtual port for which this call being executed.
3772a991 476 * @num_to_allocate: The requested number of buffers to allocate.
9bad7671 477 *
3772a991
JS
478 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
479 * the scsi buffer contains all the necessary information needed to initiate
480 * a SCSI I/O. The non-DMAable buffer region contains information to build
481 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
482 * and the initial BPL. In addition to allocating memory, the FCP CMND and
483 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
9bad7671
JS
484 *
485 * Return codes:
3772a991
JS
486 * int - number of scsi buffers that were allocated.
487 * 0 = failure, less than num_to_alloc is a partial failure.
9bad7671 488 **/
3772a991
JS
489static int
490lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
dea3101e 491{
2e0fef85 492 struct lpfc_hba *phba = vport->phba;
dea3101e 493 struct lpfc_scsi_buf *psb;
494 struct ulp_bde64 *bpl;
495 IOCB_t *iocb;
34b02dcd
JS
496 dma_addr_t pdma_phys_fcp_cmd;
497 dma_addr_t pdma_phys_fcp_rsp;
498 dma_addr_t pdma_phys_bpl;
604a3e30 499 uint16_t iotag;
3772a991 500 int bcnt;
dea3101e 501
3772a991
JS
502 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
503 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
504 if (!psb)
505 break;
dea3101e 506
3772a991
JS
507 /*
508 * Get memory from the pci pool to map the virt space to pci
509 * bus space for an I/O. The DMA buffer includes space for the
510 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
511 * necessary to support the sg_tablesize.
512 */
513 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
514 GFP_KERNEL, &psb->dma_handle);
515 if (!psb->data) {
516 kfree(psb);
517 break;
518 }
519
520 /* Initialize virtual ptrs to dma_buf region. */
521 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
522
523 /* Allocate iotag for psb->cur_iocbq. */
524 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
525 if (iotag == 0) {
526 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
527 psb->data, psb->dma_handle);
528 kfree(psb);
529 break;
530 }
531 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
532
533 psb->fcp_cmnd = psb->data;
534 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
535 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
34b02dcd 536 sizeof(struct fcp_rsp);
dea3101e 537
3772a991
JS
538 /* Initialize local short-hand pointers. */
539 bpl = psb->fcp_bpl;
540 pdma_phys_fcp_cmd = psb->dma_handle;
541 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
542 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
543 sizeof(struct fcp_rsp);
544
545 /*
546 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
547 * are sg list bdes. Initialize the first two and leave the
548 * rest for queuecommand.
549 */
550 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
551 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
552 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
553 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
554 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
555
556 /* Setup the physical region for the FCP RSP */
557 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
558 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
559 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
560 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
561 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
562
563 /*
564 * Since the IOCB for the FCP I/O is built into this
565 * lpfc_scsi_buf, initialize it with all known data now.
566 */
567 iocb = &psb->cur_iocbq.iocb;
568 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
569 if ((phba->sli_rev == 3) &&
570 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
571 /* fill in immediate fcp command BDE */
572 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
573 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
574 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
575 unsli3.fcp_ext.icd);
576 iocb->un.fcpi64.bdl.addrHigh = 0;
577 iocb->ulpBdeCount = 0;
578 iocb->ulpLe = 0;
579 /* fill in responce BDE */
580 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
581 BUFF_TYPE_BDE_64;
582 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
583 sizeof(struct fcp_rsp);
584 iocb->unsli3.fcp_ext.rbde.addrLow =
585 putPaddrLow(pdma_phys_fcp_rsp);
586 iocb->unsli3.fcp_ext.rbde.addrHigh =
587 putPaddrHigh(pdma_phys_fcp_rsp);
588 } else {
589 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
590 iocb->un.fcpi64.bdl.bdeSize =
591 (2 * sizeof(struct ulp_bde64));
592 iocb->un.fcpi64.bdl.addrLow =
593 putPaddrLow(pdma_phys_bpl);
594 iocb->un.fcpi64.bdl.addrHigh =
595 putPaddrHigh(pdma_phys_bpl);
596 iocb->ulpBdeCount = 1;
597 iocb->ulpLe = 1;
598 }
599 iocb->ulpClass = CLASS3;
600 psb->status = IOSTAT_SUCCESS;
da0436e9 601 /* Put it back into the SCSI buffer list */
1c6f4ef5 602 lpfc_release_scsi_buf_s3(phba, psb);
dea3101e 603
34b02dcd 604 }
dea3101e 605
3772a991 606 return bcnt;
dea3101e 607}
608
da0436e9
JS
609/**
610 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
611 * @phba: pointer to lpfc hba data structure.
612 * @axri: pointer to the fcp xri abort wcqe structure.
613 *
614 * This routine is invoked by the worker thread to process a SLI4 fast-path
615 * FCP aborted xri.
616 **/
617void
618lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
619 struct sli4_wcqe_xri_aborted *axri)
620{
621 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
622 struct lpfc_scsi_buf *psb, *next_psb;
623 unsigned long iflag = 0;
0f65ff68
JS
624 struct lpfc_iocbq *iocbq;
625 int i;
da0436e9 626
0f65ff68
JS
627 spin_lock_irqsave(&phba->hbalock, iflag);
628 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
da0436e9
JS
629 list_for_each_entry_safe(psb, next_psb,
630 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
631 if (psb->cur_iocbq.sli4_xritag == xri) {
632 list_del(&psb->list);
341af102 633 psb->exch_busy = 0;
da0436e9 634 psb->status = IOSTAT_SUCCESS;
0f65ff68
JS
635 spin_unlock(
636 &phba->sli4_hba.abts_scsi_buf_list_lock);
637 spin_unlock_irqrestore(&phba->hbalock, iflag);
da0436e9
JS
638 lpfc_release_scsi_buf_s4(phba, psb);
639 return;
640 }
641 }
0f65ff68
JS
642 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
643 for (i = 1; i <= phba->sli.last_iotag; i++) {
644 iocbq = phba->sli.iocbq_lookup[i];
645
646 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
647 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
648 continue;
649 if (iocbq->sli4_xritag != xri)
650 continue;
651 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
652 psb->exch_busy = 0;
653 spin_unlock_irqrestore(&phba->hbalock, iflag);
654 return;
655
656 }
657 spin_unlock_irqrestore(&phba->hbalock, iflag);
da0436e9
JS
658}
659
660/**
661 * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
662 * @phba: pointer to lpfc hba data structure.
663 *
664 * This routine walks the list of scsi buffers that have been allocated and
665 * repost them to the HBA by using SGL block post. This is needed after a
666 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
667 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
668 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
669 *
670 * Returns: 0 = success, non-zero failure.
671 **/
672int
673lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
674{
675 struct lpfc_scsi_buf *psb;
676 int index, status, bcnt = 0, rcnt = 0, rc = 0;
677 LIST_HEAD(sblist);
678
679 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
680 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
681 if (psb) {
682 /* Remove from SCSI buffer list */
683 list_del(&psb->list);
684 /* Add it to a local SCSI buffer list */
685 list_add_tail(&psb->list, &sblist);
686 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
687 bcnt = rcnt;
688 rcnt = 0;
689 }
690 } else
691 /* A hole present in the XRI array, need to skip */
692 bcnt = rcnt;
693
694 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
695 /* End of XRI array for SCSI buffer, complete */
696 bcnt = rcnt;
697
698 /* Continue until collect up to a nembed page worth of sgls */
699 if (bcnt == 0)
700 continue;
701 /* Now, post the SCSI buffer list sgls as a block */
702 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
703 /* Reset SCSI buffer count for next round of posting */
704 bcnt = 0;
705 while (!list_empty(&sblist)) {
706 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
707 list);
708 if (status) {
709 /* Put this back on the abort scsi list */
341af102 710 psb->exch_busy = 1;
da0436e9 711 rc++;
341af102
JS
712 } else {
713 psb->exch_busy = 0;
da0436e9 714 psb->status = IOSTAT_SUCCESS;
341af102 715 }
da0436e9
JS
716 /* Put it back into the SCSI buffer list */
717 lpfc_release_scsi_buf_s4(phba, psb);
718 }
719 }
720 return rc;
721}
722
723/**
724 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
725 * @vport: The virtual port for which this call being executed.
726 * @num_to_allocate: The requested number of buffers to allocate.
727 *
728 * This routine allocates a scsi buffer for device with SLI-4 interface spec,
729 * the scsi buffer contains all the necessary information needed to initiate
730 * a SCSI I/O.
731 *
732 * Return codes:
733 * int - number of scsi buffers that were allocated.
734 * 0 = failure, less than num_to_alloc is a partial failure.
735 **/
736static int
737lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
738{
739 struct lpfc_hba *phba = vport->phba;
740 struct lpfc_scsi_buf *psb;
741 struct sli4_sge *sgl;
742 IOCB_t *iocb;
743 dma_addr_t pdma_phys_fcp_cmd;
744 dma_addr_t pdma_phys_fcp_rsp;
745 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
746 uint16_t iotag, last_xritag = NO_XRI;
747 int status = 0, index;
748 int bcnt;
749 int non_sequential_xri = 0;
da0436e9
JS
750 LIST_HEAD(sblist);
751
752 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
753 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
754 if (!psb)
755 break;
756
757 /*
758 * Get memory from the pci pool to map the virt space to pci bus
759 * space for an I/O. The DMA buffer includes space for the
760 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
761 * necessary to support the sg_tablesize.
762 */
763 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
764 GFP_KERNEL, &psb->dma_handle);
765 if (!psb->data) {
766 kfree(psb);
767 break;
768 }
769
770 /* Initialize virtual ptrs to dma_buf region. */
771 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
772
773 /* Allocate iotag for psb->cur_iocbq. */
774 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
775 if (iotag == 0) {
b92938b4
JS
776 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
777 psb->data, psb->dma_handle);
da0436e9
JS
778 kfree(psb);
779 break;
780 }
781
782 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
783 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
784 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
785 psb->data, psb->dma_handle);
786 kfree(psb);
787 break;
788 }
789 if (last_xritag != NO_XRI
790 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
791 non_sequential_xri = 1;
792 } else
793 list_add_tail(&psb->list, &sblist);
794 last_xritag = psb->cur_iocbq.sli4_xritag;
795
796 index = phba->sli4_hba.scsi_xri_cnt++;
797 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
798
799 psb->fcp_bpl = psb->data;
800 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
801 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
802 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
803 sizeof(struct fcp_cmnd));
804
805 /* Initialize local short-hand pointers. */
806 sgl = (struct sli4_sge *)psb->fcp_bpl;
807 pdma_phys_bpl = psb->dma_handle;
808 pdma_phys_fcp_cmd =
809 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
810 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
811 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
812
813 /*
814 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
815 * are sg list bdes. Initialize the first two and leave the
816 * rest for queuecommand.
817 */
818 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
819 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
da0436e9
JS
820 bf_set(lpfc_sli4_sge_last, sgl, 0);
821 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 822 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
da0436e9
JS
823 sgl++;
824
825 /* Setup the physical region for the FCP RSP */
826 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
827 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
da0436e9
JS
828 bf_set(lpfc_sli4_sge_last, sgl, 1);
829 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 830 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
da0436e9
JS
831
832 /*
833 * Since the IOCB for the FCP I/O is built into this
834 * lpfc_scsi_buf, initialize it with all known data now.
835 */
836 iocb = &psb->cur_iocbq.iocb;
837 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
838 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
839 /* setting the BLP size to 2 * sizeof BDE may not be correct.
840 * We are setting the bpl to point to out sgl. An sgl's
841 * entries are 16 bytes, a bpl entries are 12 bytes.
842 */
843 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
844 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
845 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
846 iocb->ulpBdeCount = 1;
847 iocb->ulpLe = 1;
848 iocb->ulpClass = CLASS3;
849 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
850 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
851 else
852 pdma_phys_bpl1 = 0;
853 psb->dma_phys_bpl = pdma_phys_bpl;
854 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
855 if (non_sequential_xri) {
856 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
857 pdma_phys_bpl1,
858 psb->cur_iocbq.sli4_xritag);
859 if (status) {
860 /* Put this back on the abort scsi list */
341af102 861 psb->exch_busy = 1;
341af102
JS
862 } else {
863 psb->exch_busy = 0;
da0436e9 864 psb->status = IOSTAT_SUCCESS;
341af102 865 }
da0436e9
JS
866 /* Put it back into the SCSI buffer list */
867 lpfc_release_scsi_buf_s4(phba, psb);
868 break;
869 }
870 }
871 if (bcnt) {
872 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
873 /* Reset SCSI buffer count for next round of posting */
874 while (!list_empty(&sblist)) {
875 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
876 list);
877 if (status) {
878 /* Put this back on the abort scsi list */
341af102 879 psb->exch_busy = 1;
341af102
JS
880 } else {
881 psb->exch_busy = 0;
da0436e9 882 psb->status = IOSTAT_SUCCESS;
341af102 883 }
da0436e9
JS
884 /* Put it back into the SCSI buffer list */
885 lpfc_release_scsi_buf_s4(phba, psb);
886 }
887 }
888
d7c47992 889 return bcnt + non_sequential_xri;
da0436e9
JS
890}
891
9bad7671 892/**
3772a991
JS
893 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
894 * @vport: The virtual port for which this call being executed.
895 * @num_to_allocate: The requested number of buffers to allocate.
896 *
897 * This routine wraps the actual SCSI buffer allocator function pointer from
898 * the lpfc_hba struct.
899 *
900 * Return codes:
901 * int - number of scsi buffers that were allocated.
902 * 0 = failure, less than num_to_alloc is a partial failure.
903 **/
904static inline int
905lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
906{
907 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
908}
909
910/**
911 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
912 * @phba: The HBA for which this call is being executed.
9bad7671
JS
913 *
914 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
915 * and returns to caller.
916 *
917 * Return codes:
918 * NULL - Error
919 * Pointer to lpfc_scsi_buf - Success
920 **/
455c53ec 921static struct lpfc_scsi_buf*
875fbdfe 922lpfc_get_scsi_buf(struct lpfc_hba * phba)
dea3101e 923{
0bd4ca25
JSEC
924 struct lpfc_scsi_buf * lpfc_cmd = NULL;
925 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
875fbdfe 926 unsigned long iflag = 0;
0bd4ca25 927
875fbdfe 928 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
0bd4ca25 929 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1dcb58e5
JS
930 if (lpfc_cmd) {
931 lpfc_cmd->seg_cnt = 0;
932 lpfc_cmd->nonsg_phys = 0;
e2a0a9d6 933 lpfc_cmd->prot_seg_cnt = 0;
1dcb58e5 934 }
875fbdfe 935 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
0bd4ca25
JSEC
936 return lpfc_cmd;
937}
dea3101e 938
9bad7671 939/**
3772a991 940 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
9bad7671
JS
941 * @phba: The Hba for which this call is being executed.
942 * @psb: The scsi buffer which is being released.
943 *
944 * This routine releases @psb scsi buffer by adding it to tail of @phba
945 * lpfc_scsi_buf_list list.
946 **/
0bd4ca25 947static void
3772a991 948lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
0bd4ca25 949{
875fbdfe 950 unsigned long iflag = 0;
dea3101e 951
875fbdfe 952 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
0bd4ca25 953 psb->pCmd = NULL;
dea3101e 954 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
875fbdfe 955 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
dea3101e 956}
957
da0436e9
JS
958/**
959 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
960 * @phba: The Hba for which this call is being executed.
961 * @psb: The scsi buffer which is being released.
962 *
963 * This routine releases @psb scsi buffer by adding it to tail of @phba
964 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
965 * and cannot be reused for at least RA_TOV amount of time if it was
966 * aborted.
967 **/
968static void
969lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
970{
971 unsigned long iflag = 0;
972
341af102 973 if (psb->exch_busy) {
da0436e9
JS
974 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
975 iflag);
976 psb->pCmd = NULL;
977 list_add_tail(&psb->list,
978 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
979 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
980 iflag);
981 } else {
982
983 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
984 psb->pCmd = NULL;
985 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
986 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
987 }
988}
989
9bad7671 990/**
3772a991
JS
991 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
992 * @phba: The Hba for which this call is being executed.
993 * @psb: The scsi buffer which is being released.
994 *
995 * This routine releases @psb scsi buffer by adding it to tail of @phba
996 * lpfc_scsi_buf_list list.
997 **/
998static void
999lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1000{
1001
1002 phba->lpfc_release_scsi_buf(phba, psb);
1003}
1004
1005/**
1006 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
9bad7671
JS
1007 * @phba: The Hba for which this call is being executed.
1008 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1009 *
1010 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3772a991
JS
1011 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1012 * through sg elements and format the bdea. This routine also initializes all
1013 * IOCB fields which are dependent on scsi command request buffer.
9bad7671
JS
1014 *
1015 * Return codes:
1016 * 1 - Error
1017 * 0 - Success
1018 **/
dea3101e 1019static int
3772a991 1020lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dea3101e 1021{
1022 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1023 struct scatterlist *sgel = NULL;
1024 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1025 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
0f65ff68 1026 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
dea3101e 1027 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
34b02dcd 1028 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dea3101e 1029 dma_addr_t physaddr;
34b02dcd 1030 uint32_t num_bde = 0;
a0b4f78f 1031 int nseg, datadir = scsi_cmnd->sc_data_direction;
dea3101e 1032
1033 /*
1034 * There are three possibilities here - use scatter-gather segment, use
1035 * the single mapping, or neither. Start the lpfc command prep by
1036 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1037 * data bde entry.
1038 */
1039 bpl += 2;
c59fd9eb 1040 if (scsi_sg_count(scsi_cmnd)) {
dea3101e 1041 /*
1042 * The driver stores the segment count returned from pci_map_sg
1043 * because this a count of dma-mappings used to map the use_sg
1044 * pages. They are not guaranteed to be the same for those
1045 * architectures that implement an IOMMU.
1046 */
dea3101e 1047
c59fd9eb
FT
1048 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1049 scsi_sg_count(scsi_cmnd), datadir);
1050 if (unlikely(!nseg))
1051 return 1;
1052
a0b4f78f 1053 lpfc_cmd->seg_cnt = nseg;
dea3101e 1054 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
1055 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1056 "9064 BLKGRD: %s: Too many sg segments from "
e2a0a9d6 1057 "dma_map_sg. Config %d, seg_cnt %d\n",
cadbd4a5 1058 __func__, phba->cfg_sg_seg_cnt,
dea3101e 1059 lpfc_cmd->seg_cnt);
a0b4f78f 1060 scsi_dma_unmap(scsi_cmnd);
dea3101e 1061 return 1;
1062 }
1063
1064 /*
1065 * The driver established a maximum scatter-gather segment count
1066 * during probe that limits the number of sg elements in any
1067 * single scsi command. Just run through the seg_cnt and format
1068 * the bde's.
34b02dcd
JS
1069 * When using SLI-3 the driver will try to fit all the BDEs into
1070 * the IOCB. If it can't then the BDEs get added to a BPL as it
1071 * does for SLI-2 mode.
dea3101e 1072 */
34b02dcd 1073 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
dea3101e 1074 physaddr = sg_dma_address(sgel);
34b02dcd 1075 if (phba->sli_rev == 3 &&
e2a0a9d6 1076 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
0f65ff68 1077 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
34b02dcd
JS
1078 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1079 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1080 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1081 data_bde->addrLow = putPaddrLow(physaddr);
1082 data_bde->addrHigh = putPaddrHigh(physaddr);
1083 data_bde++;
1084 } else {
1085 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1086 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1087 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1088 bpl->addrLow =
1089 le32_to_cpu(putPaddrLow(physaddr));
1090 bpl->addrHigh =
1091 le32_to_cpu(putPaddrHigh(physaddr));
1092 bpl++;
1093 }
dea3101e 1094 }
c59fd9eb 1095 }
dea3101e 1096
1097 /*
1098 * Finish initializing those IOCB fields that are dependent on the
34b02dcd
JS
1099 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1100 * explicitly reinitialized and for SLI-3 the extended bde count is
1101 * explicitly reinitialized since all iocb memory resources are reused.
dea3101e 1102 */
e2a0a9d6 1103 if (phba->sli_rev == 3 &&
0f65ff68
JS
1104 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1105 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
34b02dcd
JS
1106 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1107 /*
1108 * The extended IOCB format can only fit 3 BDE or a BPL.
1109 * This I/O has more than 3 BDE so the 1st data bde will
1110 * be a BPL that is filled in here.
1111 */
1112 physaddr = lpfc_cmd->dma_handle;
1113 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1114 data_bde->tus.f.bdeSize = (num_bde *
1115 sizeof(struct ulp_bde64));
1116 physaddr += (sizeof(struct fcp_cmnd) +
1117 sizeof(struct fcp_rsp) +
1118 (2 * sizeof(struct ulp_bde64)));
1119 data_bde->addrHigh = putPaddrHigh(physaddr);
1120 data_bde->addrLow = putPaddrLow(physaddr);
1121 /* ebde count includes the responce bde and data bpl */
1122 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1123 } else {
1124 /* ebde count includes the responce bde and data bdes */
1125 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1126 }
1127 } else {
1128 iocb_cmd->un.fcpi64.bdl.bdeSize =
1129 ((num_bde + 2) * sizeof(struct ulp_bde64));
0f65ff68 1130 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
34b02dcd 1131 }
09372820 1132 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
e2a0a9d6
JS
1133
1134 /*
1135 * Due to difference in data length between DIF/non-DIF paths,
1136 * we need to set word 4 of IOCB here
1137 */
a257bf90 1138 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
e2a0a9d6
JS
1139 return 0;
1140}
1141
1142/*
6c8eea54
JS
1143 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
1144 * @sc: The SCSI command to examine
1145 * @txopt: (out) BlockGuard operation for transmitted data
1146 * @rxopt: (out) BlockGuard operation for received data
1147 *
1148 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1149 *
e2a0a9d6
JS
1150 */
1151static int
6c8eea54
JS
1152lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1153 uint8_t *txop, uint8_t *rxop)
e2a0a9d6
JS
1154{
1155 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
6c8eea54 1156 uint8_t ret = 0;
e2a0a9d6
JS
1157
1158 if (guard_type == SHOST_DIX_GUARD_IP) {
1159 switch (scsi_get_prot_op(sc)) {
1160 case SCSI_PROT_READ_INSERT:
1161 case SCSI_PROT_WRITE_STRIP:
6c8eea54
JS
1162 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1163 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
e2a0a9d6
JS
1164 break;
1165
1166 case SCSI_PROT_READ_STRIP:
1167 case SCSI_PROT_WRITE_INSERT:
6c8eea54
JS
1168 *txop = BG_OP_IN_NODIF_OUT_CRC;
1169 *rxop = BG_OP_IN_CRC_OUT_NODIF;
e2a0a9d6
JS
1170 break;
1171
c6af4042
MP
1172 case SCSI_PROT_READ_PASS:
1173 case SCSI_PROT_WRITE_PASS:
6c8eea54
JS
1174 *txop = BG_OP_IN_CSUM_OUT_CRC;
1175 *rxop = BG_OP_IN_CRC_OUT_CSUM;
e2a0a9d6
JS
1176 break;
1177
e2a0a9d6
JS
1178 case SCSI_PROT_NORMAL:
1179 default:
6a9c52cf 1180 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
6c8eea54 1181 "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
e2a0a9d6 1182 scsi_get_prot_op(sc), guard_type);
6c8eea54 1183 ret = 1;
e2a0a9d6
JS
1184 break;
1185
1186 }
1187 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
1188 switch (scsi_get_prot_op(sc)) {
1189 case SCSI_PROT_READ_STRIP:
1190 case SCSI_PROT_WRITE_INSERT:
6c8eea54
JS
1191 *txop = BG_OP_IN_NODIF_OUT_CRC;
1192 *rxop = BG_OP_IN_CRC_OUT_NODIF;
e2a0a9d6
JS
1193 break;
1194
1195 case SCSI_PROT_READ_PASS:
1196 case SCSI_PROT_WRITE_PASS:
6c8eea54
JS
1197 *txop = BG_OP_IN_CRC_OUT_CRC;
1198 *rxop = BG_OP_IN_CRC_OUT_CRC;
e2a0a9d6
JS
1199 break;
1200
e2a0a9d6
JS
1201 case SCSI_PROT_READ_INSERT:
1202 case SCSI_PROT_WRITE_STRIP:
1203 case SCSI_PROT_NORMAL:
1204 default:
6a9c52cf
JS
1205 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1206 "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
e2a0a9d6 1207 scsi_get_prot_op(sc), guard_type);
6c8eea54 1208 ret = 1;
e2a0a9d6
JS
1209 break;
1210 }
1211 } else {
1212 /* unsupported format */
1213 BUG();
1214 }
1215
6c8eea54 1216 return ret;
e2a0a9d6
JS
1217}
1218
1219struct scsi_dif_tuple {
1220 __be16 guard_tag; /* Checksum */
1221 __be16 app_tag; /* Opaque storage */
1222 __be32 ref_tag; /* Target LBA or indirect LBA */
1223};
1224
1225static inline unsigned
1226lpfc_cmd_blksize(struct scsi_cmnd *sc)
1227{
1228 return sc->device->sector_size;
1229}
1230
1231/**
1232 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
1233 * @sc: in: SCSI command
3621a710
JS
1234 * @apptagmask: out: app tag mask
1235 * @apptagval: out: app tag value
1236 * @reftag: out: ref tag (reference tag)
e2a0a9d6
JS
1237 *
1238 * Description:
98a1708d
MO
1239 * Extract DIF parameters from the command if possible. Otherwise,
1240 * use default parameters.
e2a0a9d6
JS
1241 *
1242 **/
1243static inline void
1244lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1245 uint16_t *apptagval, uint32_t *reftag)
1246{
1247 struct scsi_dif_tuple *spt;
1248 unsigned char op = scsi_get_prot_op(sc);
1249 unsigned int protcnt = scsi_prot_sg_count(sc);
1250 static int cnt;
1251
1252 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
c6af4042 1253 op == SCSI_PROT_WRITE_PASS)) {
e2a0a9d6
JS
1254
1255 cnt++;
1256 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
1257 scsi_prot_sglist(sc)[0].offset;
1258 *apptagmask = 0;
1259 *apptagval = 0;
1260 *reftag = cpu_to_be32(spt->ref_tag);
1261
1262 } else {
1263 /* SBC defines ref tag to be lower 32bits of LBA */
1264 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
1265 *apptagmask = 0;
1266 *apptagval = 0;
1267 }
1268}
1269
1270/*
1271 * This function sets up buffer list for protection groups of
1272 * type LPFC_PG_TYPE_NO_DIF
1273 *
1274 * This is usually used when the HBA is instructed to generate
1275 * DIFs and insert them into data stream (or strip DIF from
1276 * incoming data stream)
1277 *
1278 * The buffer list consists of just one protection group described
1279 * below:
1280 * +-------------------------+
6c8eea54
JS
1281 * start of prot group --> | PDE_5 |
1282 * +-------------------------+
1283 * | PDE_6 |
e2a0a9d6
JS
1284 * +-------------------------+
1285 * | Data BDE |
1286 * +-------------------------+
1287 * |more Data BDE's ... (opt)|
1288 * +-------------------------+
1289 *
1290 * @sc: pointer to scsi command we're working on
1291 * @bpl: pointer to buffer list for protection groups
1292 * @datacnt: number of segments of data that have been dma mapped
1293 *
1294 * Note: Data s/g buffers have been dma mapped
1295 */
1296static int
1297lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1298 struct ulp_bde64 *bpl, int datasegcnt)
1299{
1300 struct scatterlist *sgde = NULL; /* s/g data entry */
6c8eea54
JS
1301 struct lpfc_pde5 *pde5 = NULL;
1302 struct lpfc_pde6 *pde6 = NULL;
e2a0a9d6 1303 dma_addr_t physaddr;
6c8eea54 1304 int i = 0, num_bde = 0, status;
e2a0a9d6 1305 int datadir = sc->sc_data_direction;
e2a0a9d6
JS
1306 unsigned blksize;
1307 uint32_t reftag;
1308 uint16_t apptagmask, apptagval;
6c8eea54 1309 uint8_t txop, rxop;
e2a0a9d6 1310
6c8eea54
JS
1311 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1312 if (status)
e2a0a9d6
JS
1313 goto out;
1314
6c8eea54 1315 /* extract some info from the scsi command for pde*/
e2a0a9d6
JS
1316 blksize = lpfc_cmd_blksize(sc);
1317 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1318
6c8eea54
JS
1319 /* setup PDE5 with what we have */
1320 pde5 = (struct lpfc_pde5 *) bpl;
1321 memset(pde5, 0, sizeof(struct lpfc_pde5));
1322 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1323 pde5->reftag = reftag;
1324
1325 /* advance bpl and increment bde count */
1326 num_bde++;
1327 bpl++;
1328 pde6 = (struct lpfc_pde6 *) bpl;
1329
1330 /* setup PDE6 with the rest of the info */
1331 memset(pde6, 0, sizeof(struct lpfc_pde6));
1332 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1333 bf_set(pde6_optx, pde6, txop);
1334 bf_set(pde6_oprx, pde6, rxop);
1335 if (datadir == DMA_FROM_DEVICE) {
1336 bf_set(pde6_ce, pde6, 1);
1337 bf_set(pde6_re, pde6, 1);
1338 bf_set(pde6_ae, pde6, 1);
1339 }
1340 bf_set(pde6_ai, pde6, 1);
1341 bf_set(pde6_apptagval, pde6, apptagval);
e2a0a9d6 1342
6c8eea54 1343 /* advance bpl and increment bde count */
e2a0a9d6
JS
1344 num_bde++;
1345 bpl++;
1346
1347 /* assumption: caller has already run dma_map_sg on command data */
1348 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1349 physaddr = sg_dma_address(sgde);
1350 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1351 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1352 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1353 if (datadir == DMA_TO_DEVICE)
1354 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1355 else
1356 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1357 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1358 bpl++;
1359 num_bde++;
1360 }
1361
1362out:
1363 return num_bde;
1364}
1365
1366/*
1367 * This function sets up buffer list for protection groups of
1368 * type LPFC_PG_TYPE_DIF_BUF
1369 *
1370 * This is usually used when DIFs are in their own buffers,
1371 * separate from the data. The HBA can then by instructed
1372 * to place the DIFs in the outgoing stream. For read operations,
1373 * The HBA could extract the DIFs and place it in DIF buffers.
1374 *
1375 * The buffer list for this type consists of one or more of the
1376 * protection groups described below:
1377 * +-------------------------+
6c8eea54 1378 * start of first prot group --> | PDE_5 |
e2a0a9d6 1379 * +-------------------------+
6c8eea54
JS
1380 * | PDE_6 |
1381 * +-------------------------+
1382 * | PDE_7 (Prot BDE) |
e2a0a9d6
JS
1383 * +-------------------------+
1384 * | Data BDE |
1385 * +-------------------------+
1386 * |more Data BDE's ... (opt)|
1387 * +-------------------------+
6c8eea54 1388 * start of new prot group --> | PDE_5 |
e2a0a9d6
JS
1389 * +-------------------------+
1390 * | ... |
1391 * +-------------------------+
1392 *
1393 * @sc: pointer to scsi command we're working on
1394 * @bpl: pointer to buffer list for protection groups
1395 * @datacnt: number of segments of data that have been dma mapped
1396 * @protcnt: number of segment of protection data that have been dma mapped
1397 *
1398 * Note: It is assumed that both data and protection s/g buffers have been
1399 * mapped for DMA
1400 */
1401static int
1402lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1403 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1404{
1405 struct scatterlist *sgde = NULL; /* s/g data entry */
1406 struct scatterlist *sgpe = NULL; /* s/g prot entry */
6c8eea54
JS
1407 struct lpfc_pde5 *pde5 = NULL;
1408 struct lpfc_pde6 *pde6 = NULL;
e2a0a9d6
JS
1409 struct ulp_bde64 *prot_bde = NULL;
1410 dma_addr_t dataphysaddr, protphysaddr;
1411 unsigned short curr_data = 0, curr_prot = 0;
1412 unsigned int split_offset, protgroup_len;
1413 unsigned int protgrp_blks, protgrp_bytes;
1414 unsigned int remainder, subtotal;
6c8eea54 1415 int status;
e2a0a9d6
JS
1416 int datadir = sc->sc_data_direction;
1417 unsigned char pgdone = 0, alldone = 0;
1418 unsigned blksize;
1419 uint32_t reftag;
1420 uint16_t apptagmask, apptagval;
6c8eea54 1421 uint8_t txop, rxop;
e2a0a9d6
JS
1422 int num_bde = 0;
1423
1424 sgpe = scsi_prot_sglist(sc);
1425 sgde = scsi_sglist(sc);
1426
1427 if (!sgpe || !sgde) {
1428 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1429 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
1430 sgpe, sgde);
1431 return 0;
1432 }
1433
6c8eea54
JS
1434 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1435 if (status)
e2a0a9d6
JS
1436 goto out;
1437
6c8eea54 1438 /* extract some info from the scsi command */
e2a0a9d6
JS
1439 blksize = lpfc_cmd_blksize(sc);
1440 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1441
1442 split_offset = 0;
1443 do {
6c8eea54
JS
1444 /* setup PDE5 with what we have */
1445 pde5 = (struct lpfc_pde5 *) bpl;
1446 memset(pde5, 0, sizeof(struct lpfc_pde5));
1447 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1448 pde5->reftag = reftag;
e2a0a9d6 1449
6c8eea54
JS
1450 /* advance bpl and increment bde count */
1451 num_bde++;
1452 bpl++;
1453 pde6 = (struct lpfc_pde6 *) bpl;
1454
1455 /* setup PDE6 with the rest of the info */
1456 memset(pde6, 0, sizeof(struct lpfc_pde6));
1457 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1458 bf_set(pde6_optx, pde6, txop);
1459 bf_set(pde6_oprx, pde6, rxop);
1460 bf_set(pde6_ce, pde6, 1);
1461 bf_set(pde6_re, pde6, 1);
1462 bf_set(pde6_ae, pde6, 1);
1463 bf_set(pde6_ai, pde6, 1);
1464 bf_set(pde6_apptagval, pde6, apptagval);
1465
1466 /* advance bpl and increment bde count */
e2a0a9d6
JS
1467 num_bde++;
1468 bpl++;
1469
1470 /* setup the first BDE that points to protection buffer */
1471 prot_bde = (struct ulp_bde64 *) bpl;
1472 protphysaddr = sg_dma_address(sgpe);
6c8eea54
JS
1473 prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
1474 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
e2a0a9d6
JS
1475 protgroup_len = sg_dma_len(sgpe);
1476
1477
1478 /* must be integer multiple of the DIF block length */
1479 BUG_ON(protgroup_len % 8);
1480
1481 protgrp_blks = protgroup_len / 8;
1482 protgrp_bytes = protgrp_blks * blksize;
1483
1484 prot_bde->tus.f.bdeSize = protgroup_len;
6c8eea54 1485 prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
e2a0a9d6
JS
1486 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1487
1488 curr_prot++;
1489 num_bde++;
1490
1491 /* setup BDE's for data blocks associated with DIF data */
1492 pgdone = 0;
1493 subtotal = 0; /* total bytes processed for current prot grp */
1494 while (!pgdone) {
1495 if (!sgde) {
6a9c52cf
JS
1496 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1497 "9065 BLKGRD:%s Invalid data segment\n",
e2a0a9d6
JS
1498 __func__);
1499 return 0;
1500 }
1501 bpl++;
1502 dataphysaddr = sg_dma_address(sgde) + split_offset;
1503 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1504 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1505
1506 remainder = sg_dma_len(sgde) - split_offset;
1507
1508 if ((subtotal + remainder) <= protgrp_bytes) {
1509 /* we can use this whole buffer */
1510 bpl->tus.f.bdeSize = remainder;
1511 split_offset = 0;
1512
1513 if ((subtotal + remainder) == protgrp_bytes)
1514 pgdone = 1;
1515 } else {
1516 /* must split this buffer with next prot grp */
1517 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1518 split_offset += bpl->tus.f.bdeSize;
1519 }
1520
1521 subtotal += bpl->tus.f.bdeSize;
1522
1523 if (datadir == DMA_TO_DEVICE)
1524 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1525 else
1526 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1527 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1528
1529 num_bde++;
1530 curr_data++;
1531
1532 if (split_offset)
1533 break;
1534
1535 /* Move to the next s/g segment if possible */
1536 sgde = sg_next(sgde);
6c8eea54 1537
e2a0a9d6
JS
1538 }
1539
1540 /* are we done ? */
1541 if (curr_prot == protcnt) {
1542 alldone = 1;
1543 } else if (curr_prot < protcnt) {
1544 /* advance to next prot buffer */
1545 sgpe = sg_next(sgpe);
1546 bpl++;
1547
1548 /* update the reference tag */
1549 reftag += protgrp_blks;
1550 } else {
1551 /* if we're here, we have a bug */
6a9c52cf
JS
1552 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1553 "9054 BLKGRD: bug in %s\n", __func__);
e2a0a9d6
JS
1554 }
1555
1556 } while (!alldone);
1557
1558out:
1559
e2a0a9d6
JS
1560 return num_bde;
1561}
1562/*
1563 * Given a SCSI command that supports DIF, determine composition of protection
1564 * groups involved in setting up buffer lists
1565 *
1566 * Returns:
1567 * for DIF (for both read and write)
1568 * */
1569static int
1570lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1571{
1572 int ret = LPFC_PG_TYPE_INVALID;
1573 unsigned char op = scsi_get_prot_op(sc);
1574
1575 switch (op) {
1576 case SCSI_PROT_READ_STRIP:
1577 case SCSI_PROT_WRITE_INSERT:
1578 ret = LPFC_PG_TYPE_NO_DIF;
1579 break;
1580 case SCSI_PROT_READ_INSERT:
1581 case SCSI_PROT_WRITE_STRIP:
1582 case SCSI_PROT_READ_PASS:
1583 case SCSI_PROT_WRITE_PASS:
e2a0a9d6
JS
1584 ret = LPFC_PG_TYPE_DIF_BUF;
1585 break;
1586 default:
1587 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1588 "9021 Unsupported protection op:%d\n", op);
1589 break;
1590 }
1591
1592 return ret;
1593}
1594
1595/*
1596 * This is the protection/DIF aware version of
1597 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1598 * two functions eventually, but for now, it's here
1599 */
1600static int
1601lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1602 struct lpfc_scsi_buf *lpfc_cmd)
1603{
1604 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1605 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1606 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1607 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1608 uint32_t num_bde = 0;
1609 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1610 int prot_group_type = 0;
1611 int diflen, fcpdl;
1612 unsigned blksize;
1613
1614 /*
1615 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1616 * fcp_rsp regions to the first data bde entry
1617 */
1618 bpl += 2;
1619 if (scsi_sg_count(scsi_cmnd)) {
1620 /*
1621 * The driver stores the segment count returned from pci_map_sg
1622 * because this a count of dma-mappings used to map the use_sg
1623 * pages. They are not guaranteed to be the same for those
1624 * architectures that implement an IOMMU.
1625 */
1626 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1627 scsi_sglist(scsi_cmnd),
1628 scsi_sg_count(scsi_cmnd), datadir);
1629 if (unlikely(!datasegcnt))
1630 return 1;
1631
1632 lpfc_cmd->seg_cnt = datasegcnt;
1633 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
1634 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1635 "9067 BLKGRD: %s: Too many sg segments"
1636 " from dma_map_sg. Config %d, seg_cnt"
1637 " %d\n",
e2a0a9d6
JS
1638 __func__, phba->cfg_sg_seg_cnt,
1639 lpfc_cmd->seg_cnt);
1640 scsi_dma_unmap(scsi_cmnd);
1641 return 1;
1642 }
1643
1644 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1645
1646 switch (prot_group_type) {
1647 case LPFC_PG_TYPE_NO_DIF:
1648 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1649 datasegcnt);
c9404c9c 1650 /* we should have 2 or more entries in buffer list */
e2a0a9d6
JS
1651 if (num_bde < 2)
1652 goto err;
1653 break;
1654 case LPFC_PG_TYPE_DIF_BUF:{
1655 /*
1656 * This type indicates that protection buffers are
1657 * passed to the driver, so that needs to be prepared
1658 * for DMA
1659 */
1660 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1661 scsi_prot_sglist(scsi_cmnd),
1662 scsi_prot_sg_count(scsi_cmnd), datadir);
1663 if (unlikely(!protsegcnt)) {
1664 scsi_dma_unmap(scsi_cmnd);
1665 return 1;
1666 }
1667
1668 lpfc_cmd->prot_seg_cnt = protsegcnt;
1669 if (lpfc_cmd->prot_seg_cnt
1670 > phba->cfg_prot_sg_seg_cnt) {
6a9c52cf
JS
1671 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1672 "9068 BLKGRD: %s: Too many prot sg "
1673 "segments from dma_map_sg. Config %d,"
e2a0a9d6
JS
1674 "prot_seg_cnt %d\n", __func__,
1675 phba->cfg_prot_sg_seg_cnt,
1676 lpfc_cmd->prot_seg_cnt);
1677 dma_unmap_sg(&phba->pcidev->dev,
1678 scsi_prot_sglist(scsi_cmnd),
1679 scsi_prot_sg_count(scsi_cmnd),
1680 datadir);
1681 scsi_dma_unmap(scsi_cmnd);
1682 return 1;
1683 }
1684
1685 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1686 datasegcnt, protsegcnt);
c9404c9c 1687 /* we should have 3 or more entries in buffer list */
e2a0a9d6
JS
1688 if (num_bde < 3)
1689 goto err;
1690 break;
1691 }
1692 case LPFC_PG_TYPE_INVALID:
1693 default:
1694 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1695 "9022 Unexpected protection group %i\n",
1696 prot_group_type);
1697 return 1;
1698 }
1699 }
1700
1701 /*
1702 * Finish initializing those IOCB fields that are dependent on the
1703 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1704 * reinitialized since all iocb memory resources are used many times
1705 * for transmit, receive, and continuation bpl's.
1706 */
1707 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1708 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1709 iocb_cmd->ulpBdeCount = 1;
1710 iocb_cmd->ulpLe = 1;
1711
1712 fcpdl = scsi_bufflen(scsi_cmnd);
1713
1714 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1715 /*
1716 * We are in DIF Type 1 mode
1717 * Every data block has a 8 byte DIF (trailer)
1718 * attached to it. Must ajust FCP data length
1719 */
1720 blksize = lpfc_cmd_blksize(scsi_cmnd);
1721 diflen = (fcpdl / blksize) * 8;
1722 fcpdl += diflen;
1723 }
1724 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1725
1726 /*
1727 * Due to difference in data length between DIF/non-DIF paths,
1728 * we need to set word 4 of IOCB here
1729 */
1730 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1731
dea3101e 1732 return 0;
e2a0a9d6
JS
1733err:
1734 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1735 "9023 Could not setup all needed BDE's"
1736 "prot_group_type=%d, num_bde=%d\n",
1737 prot_group_type, num_bde);
1738 return 1;
1739}
1740
1741/*
1742 * This function checks for BlockGuard errors detected by
1743 * the HBA. In case of errors, the ASC/ASCQ fields in the
1744 * sense buffer will be set accordingly, paired with
1745 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1746 * detected corruption.
1747 *
1748 * Returns:
1749 * 0 - No error found
1750 * 1 - BlockGuard error found
1751 * -1 - Internal error (bad profile, ...etc)
1752 */
1753static int
1754lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1755 struct lpfc_iocbq *pIocbOut)
1756{
1757 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1758 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1759 int ret = 0;
1760 uint32_t bghm = bgf->bghm;
1761 uint32_t bgstat = bgf->bgstat;
1762 uint64_t failing_sector = 0;
1763
6a9c52cf
JS
1764 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
1765 " 0x%x lba 0x%llx blk cnt 0x%x "
e2a0a9d6 1766 "bgstat=0x%x bghm=0x%x\n",
87b5c328 1767 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
83096ebf 1768 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
1769
1770 spin_lock(&_dump_buf_lock);
1771 if (!_dump_buf_done) {
6a9c52cf
JS
1772 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
1773 " Data for %u blocks to debugfs\n",
e2a0a9d6 1774 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
6a9c52cf 1775 lpfc_debug_save_data(phba, cmd);
e2a0a9d6
JS
1776
1777 /* If we have a prot sgl, save the DIF buffer */
1778 if (lpfc_prot_group_type(phba, cmd) ==
1779 LPFC_PG_TYPE_DIF_BUF) {
6a9c52cf
JS
1780 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
1781 "Saving DIF for %u blocks to debugfs\n",
1782 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1783 lpfc_debug_save_dif(phba, cmd);
e2a0a9d6
JS
1784 }
1785
1786 _dump_buf_done = 1;
1787 }
1788 spin_unlock(&_dump_buf_lock);
1789
1790 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1791 cmd->result = ScsiResult(DID_ERROR, 0);
6a9c52cf
JS
1792 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
1793 " BlockGuard profile. bgstat:0x%x\n",
1794 bgstat);
e2a0a9d6
JS
1795 ret = (-1);
1796 goto out;
1797 }
1798
1799 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1800 cmd->result = ScsiResult(DID_ERROR, 0);
6a9c52cf
JS
1801 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
1802 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
e2a0a9d6
JS
1803 bgstat);
1804 ret = (-1);
1805 goto out;
1806 }
1807
1808 if (lpfc_bgs_get_guard_err(bgstat)) {
1809 ret = 1;
1810
1811 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1812 0x10, 0x1);
1c9fbafc 1813 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
1814 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1815 phba->bg_guard_err_cnt++;
6a9c52cf
JS
1816 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1817 "9055 BLKGRD: guard_tag error\n");
e2a0a9d6
JS
1818 }
1819
1820 if (lpfc_bgs_get_reftag_err(bgstat)) {
1821 ret = 1;
1822
1823 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1824 0x10, 0x3);
1c9fbafc 1825 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
1826 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1827
1828 phba->bg_reftag_err_cnt++;
6a9c52cf
JS
1829 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1830 "9056 BLKGRD: ref_tag error\n");
e2a0a9d6
JS
1831 }
1832
1833 if (lpfc_bgs_get_apptag_err(bgstat)) {
1834 ret = 1;
1835
1836 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1837 0x10, 0x2);
1c9fbafc 1838 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
1839 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1840
1841 phba->bg_apptag_err_cnt++;
6a9c52cf
JS
1842 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1843 "9061 BLKGRD: app_tag error\n");
e2a0a9d6
JS
1844 }
1845
1846 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1847 /*
1848 * setup sense data descriptor 0 per SPC-4 as an information
1849 * field, and put the failing LBA in it
1850 */
1851 cmd->sense_buffer[8] = 0; /* Information */
1852 cmd->sense_buffer[9] = 0xa; /* Add. length */
2344b5b6 1853 bghm /= cmd->device->sector_size;
e2a0a9d6
JS
1854
1855 failing_sector = scsi_get_lba(cmd);
1856 failing_sector += bghm;
1857
1858 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1859 }
1860
1861 if (!ret) {
1862 /* No error was reported - problem in FW? */
1863 cmd->result = ScsiResult(DID_ERROR, 0);
6a9c52cf
JS
1864 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1865 "9057 BLKGRD: no errors reported!\n");
e2a0a9d6
JS
1866 }
1867
1868out:
1869 return ret;
dea3101e 1870}
1871
da0436e9
JS
1872/**
1873 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
1874 * @phba: The Hba for which this call is being executed.
1875 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1876 *
1877 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1878 * field of @lpfc_cmd for device with SLI-4 interface spec.
1879 *
1880 * Return codes:
6c8eea54
JS
1881 * 1 - Error
1882 * 0 - Success
da0436e9
JS
1883 **/
1884static int
1885lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1886{
1887 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1888 struct scatterlist *sgel = NULL;
1889 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1890 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1891 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1892 dma_addr_t physaddr;
1893 uint32_t num_bde = 0;
1894 uint32_t dma_len;
1895 uint32_t dma_offset = 0;
1896 int nseg;
1897
1898 /*
1899 * There are three possibilities here - use scatter-gather segment, use
1900 * the single mapping, or neither. Start the lpfc command prep by
1901 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1902 * data bde entry.
1903 */
1904 if (scsi_sg_count(scsi_cmnd)) {
1905 /*
1906 * The driver stores the segment count returned from pci_map_sg
1907 * because this a count of dma-mappings used to map the use_sg
1908 * pages. They are not guaranteed to be the same for those
1909 * architectures that implement an IOMMU.
1910 */
1911
1912 nseg = scsi_dma_map(scsi_cmnd);
1913 if (unlikely(!nseg))
1914 return 1;
1915 sgl += 1;
1916 /* clear the last flag in the fcp_rsp map entry */
1917 sgl->word2 = le32_to_cpu(sgl->word2);
1918 bf_set(lpfc_sli4_sge_last, sgl, 0);
1919 sgl->word2 = cpu_to_le32(sgl->word2);
1920 sgl += 1;
1921
1922 lpfc_cmd->seg_cnt = nseg;
1923 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
1924 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
1925 " %s: Too many sg segments from "
1926 "dma_map_sg. Config %d, seg_cnt %d\n",
1927 __func__, phba->cfg_sg_seg_cnt,
da0436e9
JS
1928 lpfc_cmd->seg_cnt);
1929 scsi_dma_unmap(scsi_cmnd);
1930 return 1;
1931 }
1932
1933 /*
1934 * The driver established a maximum scatter-gather segment count
1935 * during probe that limits the number of sg elements in any
1936 * single scsi command. Just run through the seg_cnt and format
1937 * the sge's.
1938 * When using SLI-3 the driver will try to fit all the BDEs into
1939 * the IOCB. If it can't then the BDEs get added to a BPL as it
1940 * does for SLI-2 mode.
1941 */
1942 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1943 physaddr = sg_dma_address(sgel);
1944 dma_len = sg_dma_len(sgel);
da0436e9
JS
1945 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1946 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1947 if ((num_bde + 1) == nseg)
1948 bf_set(lpfc_sli4_sge_last, sgl, 1);
1949 else
1950 bf_set(lpfc_sli4_sge_last, sgl, 0);
1951 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1952 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 1953 sgl->sge_len = cpu_to_le32(dma_len);
da0436e9
JS
1954 dma_offset += dma_len;
1955 sgl++;
1956 }
1957 } else {
1958 sgl += 1;
1959 /* clear the last flag in the fcp_rsp map entry */
1960 sgl->word2 = le32_to_cpu(sgl->word2);
1961 bf_set(lpfc_sli4_sge_last, sgl, 1);
1962 sgl->word2 = cpu_to_le32(sgl->word2);
1963 }
1964
1965 /*
1966 * Finish initializing those IOCB fields that are dependent on the
1967 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1968 * explicitly reinitialized.
1969 * all iocb memory resources are reused.
1970 */
1971 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1972
1973 /*
1974 * Due to difference in data length between DIF/non-DIF paths,
1975 * we need to set word 4 of IOCB here
1976 */
1977 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1978 return 0;
1979}
1980
3772a991
JS
1981/**
1982 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
1983 * @phba: The Hba for which this call is being executed.
1984 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1985 *
1986 * This routine wraps the actual DMA mapping function pointer from the
1987 * lpfc_hba struct.
1988 *
1989 * Return codes:
6c8eea54
JS
1990 * 1 - Error
1991 * 0 - Success
3772a991
JS
1992 **/
1993static inline int
1994lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1995{
1996 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1997}
1998
ea2151b4 1999/**
3621a710 2000 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
ea2151b4
JS
2001 * @phba: Pointer to hba context object.
2002 * @vport: Pointer to vport object.
2003 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
2004 * @rsp_iocb: Pointer to response iocb object which reported error.
2005 *
2006 * This function posts an event when there is a SCSI command reporting
2007 * error from the scsi device.
2008 **/
2009static void
2010lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
2011 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
2012 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
2013 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
2014 uint32_t resp_info = fcprsp->rspStatus2;
2015 uint32_t scsi_status = fcprsp->rspStatus3;
2016 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
2017 struct lpfc_fast_path_event *fast_path_evt = NULL;
2018 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
2019 unsigned long flags;
2020
2021 /* If there is queuefull or busy condition send a scsi event */
2022 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
2023 (cmnd->result == SAM_STAT_BUSY)) {
2024 fast_path_evt = lpfc_alloc_fast_evt(phba);
2025 if (!fast_path_evt)
2026 return;
2027 fast_path_evt->un.scsi_evt.event_type =
2028 FC_REG_SCSI_EVENT;
2029 fast_path_evt->un.scsi_evt.subcategory =
2030 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
2031 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
2032 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
2033 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
2034 &pnode->nlp_portname, sizeof(struct lpfc_name));
2035 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
2036 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2037 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
2038 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
2039 fast_path_evt = lpfc_alloc_fast_evt(phba);
2040 if (!fast_path_evt)
2041 return;
2042 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
2043 FC_REG_SCSI_EVENT;
2044 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
2045 LPFC_EVENT_CHECK_COND;
2046 fast_path_evt->un.check_cond_evt.scsi_event.lun =
2047 cmnd->device->lun;
2048 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
2049 &pnode->nlp_portname, sizeof(struct lpfc_name));
2050 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
2051 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2052 fast_path_evt->un.check_cond_evt.sense_key =
2053 cmnd->sense_buffer[2] & 0xf;
2054 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
2055 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
2056 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
2057 fcpi_parm &&
2058 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
2059 ((scsi_status == SAM_STAT_GOOD) &&
2060 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
2061 /*
2062 * If status is good or resid does not match with fcp_param and
2063 * there is valid fcpi_parm, then there is a read_check error
2064 */
2065 fast_path_evt = lpfc_alloc_fast_evt(phba);
2066 if (!fast_path_evt)
2067 return;
2068 fast_path_evt->un.read_check_error.header.event_type =
2069 FC_REG_FABRIC_EVENT;
2070 fast_path_evt->un.read_check_error.header.subcategory =
2071 LPFC_EVENT_FCPRDCHKERR;
2072 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
2073 &pnode->nlp_portname, sizeof(struct lpfc_name));
2074 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
2075 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2076 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
2077 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
2078 fast_path_evt->un.read_check_error.fcpiparam =
2079 fcpi_parm;
2080 } else
2081 return;
2082
2083 fast_path_evt->vport = vport;
2084 spin_lock_irqsave(&phba->hbalock, flags);
2085 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
2086 spin_unlock_irqrestore(&phba->hbalock, flags);
2087 lpfc_worker_wake_up(phba);
2088 return;
2089}
9bad7671
JS
2090
2091/**
f1126688 2092 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3772a991 2093 * @phba: The HBA for which this call is being executed.
9bad7671
JS
2094 * @psb: The scsi buffer which is going to be un-mapped.
2095 *
2096 * This routine does DMA un-mapping of scatter gather list of scsi command
3772a991 2097 * field of @lpfc_cmd for device with SLI-3 interface spec.
9bad7671 2098 **/
bcf4dbfa 2099static void
f1126688 2100lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
bcf4dbfa
JS
2101{
2102 /*
2103 * There are only two special cases to consider. (1) the scsi command
2104 * requested scatter-gather usage or (2) the scsi command allocated
2105 * a request buffer, but did not request use_sg. There is a third
2106 * case, but it does not require resource deallocation.
2107 */
a0b4f78f
FT
2108 if (psb->seg_cnt > 0)
2109 scsi_dma_unmap(psb->pCmd);
e2a0a9d6
JS
2110 if (psb->prot_seg_cnt > 0)
2111 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
2112 scsi_prot_sg_count(psb->pCmd),
2113 psb->pCmd->sc_data_direction);
bcf4dbfa
JS
2114}
2115
9bad7671 2116/**
3621a710 2117 * lpfc_handler_fcp_err - FCP response handler
9bad7671
JS
2118 * @vport: The virtual port for which this call is being executed.
2119 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2120 * @rsp_iocb: The response IOCB which contains FCP error.
2121 *
2122 * This routine is called to process response IOCB with status field
2123 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
2124 * based upon SCSI and FCP error.
2125 **/
dea3101e 2126static void
2e0fef85
JS
2127lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2128 struct lpfc_iocbq *rsp_iocb)
dea3101e 2129{
2130 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
2131 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
2132 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
7054a606 2133 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
dea3101e 2134 uint32_t resp_info = fcprsp->rspStatus2;
2135 uint32_t scsi_status = fcprsp->rspStatus3;
c7743956 2136 uint32_t *lp;
dea3101e 2137 uint32_t host_status = DID_OK;
2138 uint32_t rsplen = 0;
c7743956 2139 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
dea3101e 2140
ea2151b4 2141
dea3101e 2142 /*
2143 * If this is a task management command, there is no
2144 * scsi packet associated with this lpfc_cmd. The driver
2145 * consumes it.
2146 */
2147 if (fcpcmd->fcpCntl2) {
2148 scsi_status = 0;
2149 goto out;
2150 }
2151
6a9c52cf
JS
2152 if (resp_info & RSP_LEN_VALID) {
2153 rsplen = be32_to_cpu(fcprsp->rspRspLen);
e40a02c1 2154 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
6a9c52cf
JS
2155 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2156 "2719 Invalid response length: "
2157 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
2158 cmnd->device->id,
2159 cmnd->device->lun, cmnd->cmnd[0],
2160 rsplen);
2161 host_status = DID_ERROR;
2162 goto out;
2163 }
e40a02c1
JS
2164 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
2165 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2166 "2757 Protocol failure detected during "
2167 "processing of FCP I/O op: "
2168 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
2169 cmnd->device->id,
2170 cmnd->device->lun, cmnd->cmnd[0],
2171 fcprsp->rspInfo3);
2172 host_status = DID_ERROR;
2173 goto out;
2174 }
6a9c52cf
JS
2175 }
2176
c7743956
JS
2177 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
2178 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
2179 if (snslen > SCSI_SENSE_BUFFERSIZE)
2180 snslen = SCSI_SENSE_BUFFERSIZE;
2181
2182 if (resp_info & RSP_LEN_VALID)
2183 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2184 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
2185 }
2186 lp = (uint32_t *)cmnd->sense_buffer;
2187
2188 if (!scsi_status && (resp_info & RESID_UNDER))
2189 logit = LOG_FCP;
2190
e8b62011 2191 lpfc_printf_vlog(vport, KERN_WARNING, logit,
e2a0a9d6 2192 "9024 FCP command x%x failed: x%x SNS x%x x%x "
e8b62011
JS
2193 "Data: x%x x%x x%x x%x x%x\n",
2194 cmnd->cmnd[0], scsi_status,
2195 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
2196 be32_to_cpu(fcprsp->rspResId),
2197 be32_to_cpu(fcprsp->rspSnsLen),
2198 be32_to_cpu(fcprsp->rspRspLen),
2199 fcprsp->rspInfo3);
dea3101e 2200
a0b4f78f 2201 scsi_set_resid(cmnd, 0);
dea3101e 2202 if (resp_info & RESID_UNDER) {
a0b4f78f 2203 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
dea3101e 2204
e8b62011 2205 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
e2a0a9d6 2206 "9025 FCP Read Underrun, expected %d, "
e8b62011
JS
2207 "residual %d Data: x%x x%x x%x\n",
2208 be32_to_cpu(fcpcmd->fcpDl),
2209 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
2210 cmnd->underflow);
dea3101e 2211
7054a606
JS
2212 /*
2213 * If there is an under run check if under run reported by
2214 * storage array is same as the under run reported by HBA.
2215 * If this is not same, there is a dropped frame.
2216 */
2217 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
2218 fcpi_parm &&
a0b4f78f 2219 (scsi_get_resid(cmnd) != fcpi_parm)) {
e8b62011
JS
2220 lpfc_printf_vlog(vport, KERN_WARNING,
2221 LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 2222 "9026 FCP Read Check Error "
e8b62011
JS
2223 "and Underrun Data: x%x x%x x%x x%x\n",
2224 be32_to_cpu(fcpcmd->fcpDl),
2225 scsi_get_resid(cmnd), fcpi_parm,
2226 cmnd->cmnd[0]);
a0b4f78f 2227 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
7054a606
JS
2228 host_status = DID_ERROR;
2229 }
dea3101e 2230 /*
2231 * The cmnd->underflow is the minimum number of bytes that must
2232 * be transfered for this command. Provided a sense condition
2233 * is not present, make sure the actual amount transferred is at
2234 * least the underflow value or fail.
2235 */
2236 if (!(resp_info & SNS_LEN_VALID) &&
2237 (scsi_status == SAM_STAT_GOOD) &&
a0b4f78f
FT
2238 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
2239 < cmnd->underflow)) {
e8b62011 2240 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
e2a0a9d6 2241 "9027 FCP command x%x residual "
e8b62011
JS
2242 "underrun converted to error "
2243 "Data: x%x x%x x%x\n",
66dbfbe6 2244 cmnd->cmnd[0], scsi_bufflen(cmnd),
e8b62011 2245 scsi_get_resid(cmnd), cmnd->underflow);
dea3101e 2246 host_status = DID_ERROR;
2247 }
2248 } else if (resp_info & RESID_OVER) {
e8b62011 2249 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
e2a0a9d6 2250 "9028 FCP command x%x residual overrun error. "
e4e74273 2251 "Data: x%x x%x\n", cmnd->cmnd[0],
e8b62011 2252 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
dea3101e 2253 host_status = DID_ERROR;
2254
2255 /*
2256 * Check SLI validation that all the transfer was actually done
2257 * (fcpi_parm should be zero). Apply check only to reads.
2258 */
2259 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
2260 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
e8b62011 2261 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 2262 "9029 FCP Read Check Error Data: "
e8b62011
JS
2263 "x%x x%x x%x x%x\n",
2264 be32_to_cpu(fcpcmd->fcpDl),
2265 be32_to_cpu(fcprsp->rspResId),
2266 fcpi_parm, cmnd->cmnd[0]);
dea3101e 2267 host_status = DID_ERROR;
a0b4f78f 2268 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
dea3101e 2269 }
2270
2271 out:
2272 cmnd->result = ScsiResult(host_status, scsi_status);
ea2151b4 2273 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
dea3101e 2274}
2275
9bad7671 2276/**
3621a710 2277 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
9bad7671
JS
2278 * @phba: The Hba for which this call is being executed.
2279 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3772a991 2280 * @pIocbOut: The response IOCBQ for the scsi cmnd.
9bad7671
JS
2281 *
2282 * This routine assigns scsi command result by looking into response IOCB
2283 * status field appropriately. This routine handles QUEUE FULL condition as
2284 * well by ramping down device queue depth.
2285 **/
dea3101e 2286static void
2287lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2288 struct lpfc_iocbq *pIocbOut)
2289{
2290 struct lpfc_scsi_buf *lpfc_cmd =
2291 (struct lpfc_scsi_buf *) pIocbIn->context1;
2e0fef85 2292 struct lpfc_vport *vport = pIocbIn->vport;
dea3101e 2293 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2294 struct lpfc_nodelist *pnode = rdata->pnode;
75baf696 2295 struct scsi_cmnd *cmd;
445cf4f4 2296 int result;
a257bf90 2297 struct scsi_device *tmp_sdev;
5ffc266e 2298 int depth;
fa61a54e 2299 unsigned long flags;
ea2151b4 2300 struct lpfc_fast_path_event *fast_path_evt;
75baf696 2301 struct Scsi_Host *shost;
a257bf90 2302 uint32_t queue_depth, scsi_id;
dea3101e 2303
75baf696
JS
2304 /* Sanity check on return of outstanding command */
2305 if (!(lpfc_cmd->pCmd))
2306 return;
2307 cmd = lpfc_cmd->pCmd;
2308 shost = cmd->device->host;
2309
dea3101e 2310 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2311 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
341af102
JS
2312 /* pick up SLI4 exhange busy status from HBA */
2313 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
2314
109f6ed0
JS
2315 if (pnode && NLP_CHK_NODE_ACT(pnode))
2316 atomic_dec(&pnode->cmd_pending);
dea3101e 2317
2318 if (lpfc_cmd->status) {
2319 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2320 (lpfc_cmd->result & IOERR_DRVR_MASK))
2321 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2322 else if (lpfc_cmd->status >= IOSTAT_CNT)
2323 lpfc_cmd->status = IOSTAT_DEFAULT;
2324
e8b62011 2325 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
e2a0a9d6 2326 "9030 FCP cmd x%x failed <%d/%d> "
e8b62011
JS
2327 "status: x%x result: x%x Data: x%x x%x\n",
2328 cmd->cmnd[0],
2329 cmd->device ? cmd->device->id : 0xffff,
2330 cmd->device ? cmd->device->lun : 0xffff,
2331 lpfc_cmd->status, lpfc_cmd->result,
2332 pIocbOut->iocb.ulpContext,
2333 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
dea3101e 2334
2335 switch (lpfc_cmd->status) {
2336 case IOSTAT_FCP_RSP_ERROR:
2337 /* Call FCP RSP handler to determine result */
2e0fef85 2338 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
dea3101e 2339 break;
2340 case IOSTAT_NPORT_BSY:
2341 case IOSTAT_FABRIC_BSY:
0f1f53a7 2342 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
ea2151b4
JS
2343 fast_path_evt = lpfc_alloc_fast_evt(phba);
2344 if (!fast_path_evt)
2345 break;
2346 fast_path_evt->un.fabric_evt.event_type =
2347 FC_REG_FABRIC_EVENT;
2348 fast_path_evt->un.fabric_evt.subcategory =
2349 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
2350 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
2351 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2352 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
2353 &pnode->nlp_portname,
2354 sizeof(struct lpfc_name));
2355 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
2356 &pnode->nlp_nodename,
2357 sizeof(struct lpfc_name));
2358 }
2359 fast_path_evt->vport = vport;
2360 fast_path_evt->work_evt.evt =
2361 LPFC_EVT_FASTPATH_MGMT_EVT;
2362 spin_lock_irqsave(&phba->hbalock, flags);
2363 list_add_tail(&fast_path_evt->work_evt.evt_listp,
2364 &phba->work_list);
2365 spin_unlock_irqrestore(&phba->hbalock, flags);
2366 lpfc_worker_wake_up(phba);
dea3101e 2367 break;
92d7f7b0 2368 case IOSTAT_LOCAL_REJECT:
d7c255b2 2369 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
92d7f7b0 2370 lpfc_cmd->result == IOERR_NO_RESOURCES ||
b92938b4
JS
2371 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
2372 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
92d7f7b0 2373 cmd->result = ScsiResult(DID_REQUEUE, 0);
58da1ffb 2374 break;
e2a0a9d6
JS
2375 }
2376
2377 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
2378 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
2379 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
2380 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2381 /*
2382 * This is a response for a BG enabled
2383 * cmd. Parse BG error
2384 */
2385 lpfc_parse_bg_err(phba, lpfc_cmd,
2386 pIocbOut);
2387 break;
2388 } else {
2389 lpfc_printf_vlog(vport, KERN_WARNING,
2390 LOG_BG,
2391 "9031 non-zero BGSTAT "
6a9c52cf 2392 "on unprotected cmd\n");
e2a0a9d6
JS
2393 }
2394 }
2395
2396 /* else: fall through */
dea3101e 2397 default:
2398 cmd->result = ScsiResult(DID_ERROR, 0);
2399 break;
2400 }
2401
58da1ffb 2402 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
19a7b4ae 2403 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
0f1f53a7
JS
2404 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
2405 SAM_STAT_BUSY);
dea3101e 2406 } else {
2407 cmd->result = ScsiResult(DID_OK, 0);
2408 }
2409
2410 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
2411 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
2412
e8b62011
JS
2413 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2414 "0710 Iodone <%d/%d> cmd %p, error "
2415 "x%x SNS x%x x%x Data: x%x x%x\n",
2416 cmd->device->id, cmd->device->lun, cmd,
2417 cmd->result, *lp, *(lp + 3), cmd->retries,
2418 scsi_get_resid(cmd));
dea3101e 2419 }
2420
ea2151b4 2421 lpfc_update_stats(phba, lpfc_cmd);
445cf4f4 2422 result = cmd->result;
977b5a0a
JS
2423 if (vport->cfg_max_scsicmpl_time &&
2424 time_after(jiffies, lpfc_cmd->start_time +
2425 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
a257bf90 2426 spin_lock_irqsave(shost->host_lock, flags);
109f6ed0
JS
2427 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2428 if (pnode->cmd_qdepth >
2429 atomic_read(&pnode->cmd_pending) &&
2430 (atomic_read(&pnode->cmd_pending) >
2431 LPFC_MIN_TGT_QDEPTH) &&
2432 ((cmd->cmnd[0] == READ_10) ||
2433 (cmd->cmnd[0] == WRITE_10)))
2434 pnode->cmd_qdepth =
2435 atomic_read(&pnode->cmd_pending);
2436
2437 pnode->last_change_time = jiffies;
2438 }
a257bf90 2439 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0
JS
2440 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2441 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
977b5a0a 2442 time_after(jiffies, pnode->last_change_time +
109f6ed0 2443 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
a257bf90 2444 spin_lock_irqsave(shost->host_lock, flags);
109f6ed0
JS
2445 pnode->cmd_qdepth += pnode->cmd_qdepth *
2446 LPFC_TGTQ_RAMPUP_PCENT / 100;
2447 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
2448 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
2449 pnode->last_change_time = jiffies;
a257bf90 2450 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0 2451 }
977b5a0a
JS
2452 }
2453
1dcb58e5 2454 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
a257bf90
JS
2455
2456 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
2457 queue_depth = cmd->device->queue_depth;
2458 scsi_id = cmd->device->id;
0bd4ca25
JSEC
2459 cmd->scsi_done(cmd);
2460
b808608b 2461 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
fa61a54e
JS
2462 /*
2463 * If there is a thread waiting for command completion
2464 * wake up the thread.
2465 */
a257bf90 2466 spin_lock_irqsave(shost->host_lock, flags);
495a714c 2467 lpfc_cmd->pCmd = NULL;
fa61a54e
JS
2468 if (lpfc_cmd->waitq)
2469 wake_up(lpfc_cmd->waitq);
a257bf90 2470 spin_unlock_irqrestore(shost->host_lock, flags);
b808608b
JW
2471 lpfc_release_scsi_buf(phba, lpfc_cmd);
2472 return;
2473 }
2474
92d7f7b0 2475 if (!result)
a257bf90 2476 lpfc_rampup_queue_depth(vport, queue_depth);
92d7f7b0 2477
445cf4f4
JSEC
2478 /*
2479 * Check for queue full. If the lun is reporting queue full, then
2480 * back off the lun queue depth to prevent target overloads.
2481 */
58da1ffb
JS
2482 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
2483 NLP_CHK_NODE_ACT(pnode)) {
a257bf90
JS
2484 shost_for_each_device(tmp_sdev, shost) {
2485 if (tmp_sdev->id != scsi_id)
445cf4f4
JSEC
2486 continue;
2487 depth = scsi_track_queue_full(tmp_sdev,
5ffc266e
JS
2488 tmp_sdev->queue_depth-1);
2489 if (depth <= 0)
2490 continue;
e8b62011
JS
2491 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2492 "0711 detected queue full - lun queue "
2493 "depth adjusted to %d.\n", depth);
ea2151b4 2494 lpfc_send_sdev_queuedepth_change_event(phba, vport,
5ffc266e
JS
2495 pnode,
2496 tmp_sdev->lun,
2497 depth+1, depth);
445cf4f4
JSEC
2498 }
2499 }
2500
fa61a54e
JS
2501 /*
2502 * If there is a thread waiting for command completion
2503 * wake up the thread.
2504 */
a257bf90 2505 spin_lock_irqsave(shost->host_lock, flags);
495a714c 2506 lpfc_cmd->pCmd = NULL;
fa61a54e
JS
2507 if (lpfc_cmd->waitq)
2508 wake_up(lpfc_cmd->waitq);
a257bf90 2509 spin_unlock_irqrestore(shost->host_lock, flags);
fa61a54e 2510
0bd4ca25 2511 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 2512}
2513
34b02dcd 2514/**
3621a710 2515 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
34b02dcd
JS
2516 * @data: A pointer to the immediate command data portion of the IOCB.
2517 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
2518 *
2519 * The routine copies the entire FCP command from @fcp_cmnd to @data while
2520 * byte swapping the data to big endian format for transmission on the wire.
2521 **/
2522static void
2523lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
2524{
2525 int i, j;
2526 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
2527 i += sizeof(uint32_t), j++) {
2528 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
2529 }
2530}
2531
9bad7671 2532/**
f1126688 2533 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
9bad7671
JS
2534 * @vport: The virtual port for which this call is being executed.
2535 * @lpfc_cmd: The scsi command which needs to send.
2536 * @pnode: Pointer to lpfc_nodelist.
2537 *
2538 * This routine initializes fcp_cmnd and iocb data structure from scsi command
3772a991 2539 * to transfer for device with SLI3 interface spec.
9bad7671 2540 **/
dea3101e 2541static void
f1126688 2542lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2e0fef85 2543 struct lpfc_nodelist *pnode)
dea3101e 2544{
2e0fef85 2545 struct lpfc_hba *phba = vport->phba;
dea3101e 2546 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2547 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2548 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2549 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
2550 int datadir = scsi_cmnd->sc_data_direction;
7e2b19fb 2551 char tag[2];
dea3101e 2552
58da1ffb
JS
2553 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2554 return;
2555
dea3101e 2556 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
69859dc4
JSEC
2557 /* clear task management bits */
2558 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
dea3101e 2559
91886523
JSEC
2560 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
2561 &lpfc_cmd->fcp_cmnd->fcp_lun);
dea3101e 2562
2563 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
2564
7e2b19fb
JS
2565 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
2566 switch (tag[0]) {
dea3101e 2567 case HEAD_OF_QUEUE_TAG:
2568 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
2569 break;
2570 case ORDERED_QUEUE_TAG:
2571 fcp_cmnd->fcpCntl1 = ORDERED_Q;
2572 break;
2573 default:
2574 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
2575 break;
2576 }
2577 } else
2578 fcp_cmnd->fcpCntl1 = 0;
2579
2580 /*
2581 * There are three possibilities here - use scatter-gather segment, use
2582 * the single mapping, or neither. Start the lpfc command prep by
2583 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2584 * data bde entry.
2585 */
a0b4f78f 2586 if (scsi_sg_count(scsi_cmnd)) {
dea3101e 2587 if (datadir == DMA_TO_DEVICE) {
2588 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
3772a991
JS
2589 if (phba->sli_rev < LPFC_SLI_REV4) {
2590 iocb_cmd->un.fcpi.fcpi_parm = 0;
2591 iocb_cmd->ulpPU = 0;
2592 } else
2593 iocb_cmd->ulpPU = PARM_READ_CHECK;
dea3101e 2594 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2595 phba->fc4OutputRequests++;
2596 } else {
2597 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
2598 iocb_cmd->ulpPU = PARM_READ_CHECK;
dea3101e 2599 fcp_cmnd->fcpCntl3 = READ_DATA;
2600 phba->fc4InputRequests++;
2601 }
2602 } else {
2603 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
2604 iocb_cmd->un.fcpi.fcpi_parm = 0;
2605 iocb_cmd->ulpPU = 0;
2606 fcp_cmnd->fcpCntl3 = 0;
2607 phba->fc4ControlRequests++;
2608 }
e2a0a9d6
JS
2609 if (phba->sli_rev == 3 &&
2610 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 2611 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 2612 /*
2613 * Finish initializing those IOCB fields that are independent
2614 * of the scsi_cmnd request_buffer
2615 */
2616 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2617 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2618 piocbq->iocb.ulpFCP2Rcvy = 1;
09372820
JS
2619 else
2620 piocbq->iocb.ulpFCP2Rcvy = 0;
dea3101e 2621
2622 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
2623 piocbq->context1 = lpfc_cmd;
2624 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2625 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2e0fef85 2626 piocbq->vport = vport;
dea3101e 2627}
2628
da0436e9 2629/**
f1126688 2630 * lpfc_scsi_prep_task_mgmt_cmnd - Convert SLI3 scsi TM cmd to FCP info unit
9bad7671
JS
2631 * @vport: The virtual port for which this call is being executed.
2632 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2633 * @lun: Logical unit number.
2634 * @task_mgmt_cmd: SCSI task management command.
2635 *
3772a991
JS
2636 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
2637 * for device with SLI-3 interface spec.
9bad7671
JS
2638 *
2639 * Return codes:
2640 * 0 - Error
2641 * 1 - Success
2642 **/
dea3101e 2643static int
f1126688 2644lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
dea3101e 2645 struct lpfc_scsi_buf *lpfc_cmd,
420b630d 2646 unsigned int lun,
dea3101e 2647 uint8_t task_mgmt_cmd)
2648{
dea3101e 2649 struct lpfc_iocbq *piocbq;
2650 IOCB_t *piocb;
2651 struct fcp_cmnd *fcp_cmnd;
0b18ac42 2652 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
dea3101e 2653 struct lpfc_nodelist *ndlp = rdata->pnode;
2654
58da1ffb
JS
2655 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2656 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
dea3101e 2657 return 0;
dea3101e 2658
dea3101e 2659 piocbq = &(lpfc_cmd->cur_iocbq);
2e0fef85
JS
2660 piocbq->vport = vport;
2661
dea3101e 2662 piocb = &piocbq->iocb;
2663
2664 fcp_cmnd = lpfc_cmd->fcp_cmnd;
34b02dcd
JS
2665 /* Clear out any old data in the FCP command area */
2666 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2667 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
dea3101e 2668 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
e2a0a9d6
JS
2669 if (vport->phba->sli_rev == 3 &&
2670 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 2671 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 2672 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
dea3101e 2673 piocb->ulpContext = ndlp->nlp_rpi;
2674 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2675 piocb->ulpFCP2Rcvy = 1;
2676 }
2677 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
2678
2679 /* ulpTimeout is only one byte */
2680 if (lpfc_cmd->timeout > 0xff) {
2681 /*
2682 * Do not timeout the command at the firmware level.
2683 * The driver will provide the timeout mechanism.
2684 */
2685 piocb->ulpTimeout = 0;
f1126688 2686 } else
dea3101e 2687 piocb->ulpTimeout = lpfc_cmd->timeout;
da0436e9 2688
f1126688
JS
2689 if (vport->phba->sli_rev == LPFC_SLI_REV4)
2690 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
3772a991 2691
f1126688 2692 return 1;
3772a991
JS
2693}
2694
2695/**
2696 * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
2697 * @phba: The hba struct for which this call is being executed.
2698 * @dev_grp: The HBA PCI-Device group number.
2699 *
2700 * This routine sets up the SCSI interface API function jump table in @phba
2701 * struct.
2702 * Returns: 0 - success, -ENODEV - failure.
2703 **/
2704int
2705lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2706{
2707
f1126688
JS
2708 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2709 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2710 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2711
3772a991
JS
2712 switch (dev_grp) {
2713 case LPFC_PCI_DEV_LP:
2714 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2715 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
3772a991
JS
2716 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2717 break;
da0436e9
JS
2718 case LPFC_PCI_DEV_OC:
2719 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2720 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
da0436e9
JS
2721 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2722 break;
3772a991
JS
2723 default:
2724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2725 "1418 Invalid HBA PCI-device group: 0x%x\n",
2726 dev_grp);
2727 return -ENODEV;
2728 break;
2729 }
2730 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
2731 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
84d1b006 2732 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
3772a991
JS
2733 return 0;
2734}
2735
9bad7671 2736/**
3621a710 2737 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
9bad7671
JS
2738 * @phba: The Hba for which this call is being executed.
2739 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2740 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2741 *
2742 * This routine is IOCB completion routine for device reset and target reset
2743 * routine. This routine release scsi buffer associated with lpfc_cmd.
2744 **/
7054a606
JS
2745static void
2746lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2747 struct lpfc_iocbq *cmdiocbq,
2748 struct lpfc_iocbq *rspiocbq)
2749{
2750 struct lpfc_scsi_buf *lpfc_cmd =
2751 (struct lpfc_scsi_buf *) cmdiocbq->context1;
2752 if (lpfc_cmd)
2753 lpfc_release_scsi_buf(phba, lpfc_cmd);
2754 return;
2755}
2756
9bad7671 2757/**
3621a710 2758 * lpfc_info - Info entry point of scsi_host_template data structure
9bad7671
JS
2759 * @host: The scsi host for which this call is being executed.
2760 *
2761 * This routine provides module information about hba.
2762 *
2763 * Reutrn code:
2764 * Pointer to char - Success.
2765 **/
dea3101e 2766const char *
2767lpfc_info(struct Scsi_Host *host)
2768{
2e0fef85
JS
2769 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
2770 struct lpfc_hba *phba = vport->phba;
dea3101e 2771 int len;
2772 static char lpfcinfobuf[384];
2773
2774 memset(lpfcinfobuf,0,384);
2775 if (phba && phba->pcidev){
2776 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
2777 len = strlen(lpfcinfobuf);
2778 snprintf(lpfcinfobuf + len,
2779 384-len,
2780 " on PCI bus %02x device %02x irq %d",
2781 phba->pcidev->bus->number,
2782 phba->pcidev->devfn,
2783 phba->pcidev->irq);
2784 len = strlen(lpfcinfobuf);
2785 if (phba->Port[0]) {
2786 snprintf(lpfcinfobuf + len,
2787 384-len,
2788 " port %s",
2789 phba->Port);
2790 }
65467b6b
JS
2791 len = strlen(lpfcinfobuf);
2792 if (phba->sli4_hba.link_state.logical_speed) {
2793 snprintf(lpfcinfobuf + len,
2794 384-len,
2795 " Logical Link Speed: %d Mbps",
2796 phba->sli4_hba.link_state.logical_speed * 10);
2797 }
dea3101e 2798 }
2799 return lpfcinfobuf;
2800}
2801
9bad7671 2802/**
3621a710 2803 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
9bad7671
JS
2804 * @phba: The Hba for which this call is being executed.
2805 *
2806 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2807 * The default value of cfg_poll_tmo is 10 milliseconds.
2808 **/
875fbdfe
JSEC
2809static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
2810{
2811 unsigned long poll_tmo_expires =
2812 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
2813
2814 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
2815 mod_timer(&phba->fcp_poll_timer,
2816 poll_tmo_expires);
2817}
2818
9bad7671 2819/**
3621a710 2820 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
9bad7671
JS
2821 * @phba: The Hba for which this call is being executed.
2822 *
2823 * This routine starts the fcp_poll_timer of @phba.
2824 **/
875fbdfe
JSEC
2825void lpfc_poll_start_timer(struct lpfc_hba * phba)
2826{
2827 lpfc_poll_rearm_timer(phba);
2828}
2829
9bad7671 2830/**
3621a710 2831 * lpfc_poll_timeout - Restart polling timer
9bad7671
JS
2832 * @ptr: Map to lpfc_hba data structure pointer.
2833 *
2834 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2835 * and FCP Ring interrupt is disable.
2836 **/
2837
875fbdfe
JSEC
2838void lpfc_poll_timeout(unsigned long ptr)
2839{
2e0fef85 2840 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
875fbdfe
JSEC
2841
2842 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
2843 lpfc_sli_handle_fast_ring_event(phba,
2844 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2845
875fbdfe
JSEC
2846 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2847 lpfc_poll_rearm_timer(phba);
2848 }
875fbdfe
JSEC
2849}
2850
9bad7671 2851/**
3621a710 2852 * lpfc_queuecommand - scsi_host_template queuecommand entry point
9bad7671
JS
2853 * @cmnd: Pointer to scsi_cmnd data structure.
2854 * @done: Pointer to done routine.
2855 *
2856 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2857 * This routine prepares an IOCB from scsi command and provides to firmware.
2858 * The @done callback is invoked after driver finished processing the command.
2859 *
2860 * Return value :
2861 * 0 - Success
2862 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2863 **/
dea3101e 2864static int
2865lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2866{
2e0fef85
JS
2867 struct Scsi_Host *shost = cmnd->device->host;
2868 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2869 struct lpfc_hba *phba = vport->phba;
dea3101e 2870 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 2871 struct lpfc_nodelist *ndlp;
0bd4ca25 2872 struct lpfc_scsi_buf *lpfc_cmd;
19a7b4ae 2873 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
19a7b4ae 2874 int err;
dea3101e 2875
19a7b4ae
JSEC
2876 err = fc_remote_port_chkready(rport);
2877 if (err) {
2878 cmnd->result = err;
dea3101e 2879 goto out_fail_command;
2880 }
1c6f4ef5 2881 ndlp = rdata->pnode;
dea3101e 2882
e2a0a9d6
JS
2883 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2884 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2885
6a9c52cf
JS
2886 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2887 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
2888 " op:%02x str=%s without registering for"
2889 " BlockGuard - Rejecting command\n",
e2a0a9d6
JS
2890 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2891 dif_op_str[scsi_get_prot_op(cmnd)]);
2892 goto out_fail_command;
2893 }
2894
dea3101e 2895 /*
19a7b4ae
JSEC
2896 * Catch race where our node has transitioned, but the
2897 * transport is still transitioning.
dea3101e 2898 */
b522d7d4
JS
2899 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2900 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2901 goto out_fail_command;
2902 }
109f6ed0
JS
2903 if (vport->cfg_max_scsicmpl_time &&
2904 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
977b5a0a 2905 goto out_host_busy;
a93ce024 2906
ed957684 2907 lpfc_cmd = lpfc_get_scsi_buf(phba);
dea3101e 2908 if (lpfc_cmd == NULL) {
eaf15d5b 2909 lpfc_rampdown_queue_depth(phba);
92d7f7b0 2910
e8b62011
JS
2911 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2912 "0707 driver's buffer pool is empty, "
2913 "IO busied\n");
dea3101e 2914 goto out_host_busy;
2915 }
2916
2917 /*
2918 * Store the midlayer's command structure for the completion phase
2919 * and complete the command initialization.
2920 */
2921 lpfc_cmd->pCmd = cmnd;
2922 lpfc_cmd->rdata = rdata;
2923 lpfc_cmd->timeout = 0;
977b5a0a 2924 lpfc_cmd->start_time = jiffies;
dea3101e 2925 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
2926 cmnd->scsi_done = done;
2927
e2a0a9d6 2928 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
6a9c52cf
JS
2929 if (vport->phba->cfg_enable_bg) {
2930 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
e2a0a9d6
JS
2931 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2932 "str=%s\n",
2933 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2934 dif_op_str[scsi_get_prot_op(cmnd)]);
6a9c52cf 2935 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
e2a0a9d6 2936 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
e4e74273 2937 "%02x %02x %02x %02x %02x\n",
e2a0a9d6
JS
2938 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2939 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2940 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2941 cmnd->cmnd[9]);
6a9c52cf
JS
2942 if (cmnd->cmnd[0] == READ_10)
2943 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
e2a0a9d6 2944 "9035 BLKGRD: READ @ sector %llu, "
83096ebf
TH
2945 "count %u\n",
2946 (unsigned long long)scsi_get_lba(cmnd),
2947 blk_rq_sectors(cmnd->request));
6a9c52cf
JS
2948 else if (cmnd->cmnd[0] == WRITE_10)
2949 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
e2a0a9d6 2950 "9036 BLKGRD: WRITE @ sector %llu, "
83096ebf 2951 "count %u cmd=%p\n",
87b5c328 2952 (unsigned long long)scsi_get_lba(cmnd),
83096ebf 2953 blk_rq_sectors(cmnd->request),
e2a0a9d6 2954 cmnd);
6a9c52cf 2955 }
e2a0a9d6
JS
2956
2957 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2958 } else {
6a9c52cf 2959 if (vport->phba->cfg_enable_bg) {
e2a0a9d6 2960 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
6a9c52cf
JS
2961 "9038 BLKGRD: rcvd unprotected cmd:"
2962 "%02x op:%02x str=%s\n",
2963 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2964 dif_op_str[scsi_get_prot_op(cmnd)]);
2965 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2966 "9039 BLKGRD: CDB: %02x %02x %02x "
2967 "%02x %02x %02x %02x %02x %02x %02x\n",
2968 cmnd->cmnd[0], cmnd->cmnd[1],
2969 cmnd->cmnd[2], cmnd->cmnd[3],
2970 cmnd->cmnd[4], cmnd->cmnd[5],
2971 cmnd->cmnd[6], cmnd->cmnd[7],
2972 cmnd->cmnd[8], cmnd->cmnd[9]);
2973 if (cmnd->cmnd[0] == READ_10)
2974 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2975 "9040 dbg: READ @ sector %llu, "
2976 "count %u\n",
2977 (unsigned long long)scsi_get_lba(cmnd),
83096ebf 2978 blk_rq_sectors(cmnd->request));
6a9c52cf
JS
2979 else if (cmnd->cmnd[0] == WRITE_10)
2980 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
e2a0a9d6 2981 "9041 dbg: WRITE @ sector %llu, "
83096ebf 2982 "count %u cmd=%p\n",
87b5c328 2983 (unsigned long long)scsi_get_lba(cmnd),
83096ebf 2984 blk_rq_sectors(cmnd->request), cmnd);
6a9c52cf
JS
2985 else
2986 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
e2a0a9d6 2987 "9042 dbg: parser not implemented\n");
6a9c52cf 2988 }
e2a0a9d6
JS
2989 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2990 }
2991
dea3101e 2992 if (err)
2993 goto out_host_busy_free_buf;
2994
2e0fef85 2995 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
dea3101e 2996
977b5a0a 2997 atomic_inc(&ndlp->cmd_pending);
3772a991 2998 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
92d7f7b0 2999 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
eaf15d5b
JS
3000 if (err) {
3001 atomic_dec(&ndlp->cmd_pending);
dea3101e 3002 goto out_host_busy_free_buf;
eaf15d5b 3003 }
875fbdfe 3004 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
3005 spin_unlock(shost->host_lock);
3006 lpfc_sli_handle_fast_ring_event(phba,
3007 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3008
3009 spin_lock(shost->host_lock);
875fbdfe
JSEC
3010 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3011 lpfc_poll_rearm_timer(phba);
3012 }
3013
dea3101e 3014 return 0;
3015
3016 out_host_busy_free_buf:
bcf4dbfa 3017 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
0bd4ca25 3018 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 3019 out_host_busy:
3020 return SCSI_MLQUEUE_HOST_BUSY;
3021
3022 out_fail_command:
3023 done(cmnd);
3024 return 0;
3025}
3026
9bad7671 3027/**
3621a710 3028 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
9bad7671
JS
3029 * @cmnd: Pointer to scsi_cmnd data structure.
3030 *
3031 * This routine aborts @cmnd pending in base driver.
3032 *
3033 * Return code :
3034 * 0x2003 - Error
3035 * 0x2002 - Success
3036 **/
dea3101e 3037static int
63c59c3b 3038lpfc_abort_handler(struct scsi_cmnd *cmnd)
dea3101e 3039{
2e0fef85
JS
3040 struct Scsi_Host *shost = cmnd->device->host;
3041 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3042 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
3043 struct lpfc_iocbq *iocb;
3044 struct lpfc_iocbq *abtsiocb;
dea3101e 3045 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 3046 IOCB_t *cmd, *icmd;
0bd4ca25 3047 int ret = SUCCESS;
fa61a54e 3048 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
dea3101e 3049
65d430fa 3050 fc_block_scsi_eh(cmnd);
0bd4ca25
JSEC
3051 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
3052 BUG_ON(!lpfc_cmd);
dea3101e 3053
0bd4ca25
JSEC
3054 /*
3055 * If pCmd field of the corresponding lpfc_scsi_buf structure
3056 * points to a different SCSI command, then the driver has
3057 * already completed this command, but the midlayer did not
3058 * see the completion before the eh fired. Just return
3059 * SUCCESS.
3060 */
3061 iocb = &lpfc_cmd->cur_iocbq;
3062 if (lpfc_cmd->pCmd != cmnd)
3063 goto out;
dea3101e 3064
0bd4ca25 3065 BUG_ON(iocb->context1 != lpfc_cmd);
dea3101e 3066
0bd4ca25
JSEC
3067 abtsiocb = lpfc_sli_get_iocbq(phba);
3068 if (abtsiocb == NULL) {
3069 ret = FAILED;
dea3101e 3070 goto out;
3071 }
3072
dea3101e 3073 /*
0bd4ca25
JSEC
3074 * The scsi command can not be in txq and it is in flight because the
3075 * pCmd is still pointig at the SCSI command we have to abort. There
3076 * is no need to search the txcmplq. Just send an abort to the FW.
dea3101e 3077 */
dea3101e 3078
0bd4ca25
JSEC
3079 cmd = &iocb->iocb;
3080 icmd = &abtsiocb->iocb;
3081 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
3082 icmd->un.acxri.abortContextTag = cmd->ulpContext;
3772a991
JS
3083 if (phba->sli_rev == LPFC_SLI_REV4)
3084 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3085 else
3086 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 3087
0bd4ca25
JSEC
3088 icmd->ulpLe = 1;
3089 icmd->ulpClass = cmd->ulpClass;
5ffc266e
JS
3090
3091 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
3092 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
341af102 3093 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 3094
2e0fef85 3095 if (lpfc_is_link_up(phba))
0bd4ca25
JSEC
3096 icmd->ulpCommand = CMD_ABORT_XRI_CN;
3097 else
3098 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 3099
0bd4ca25 3100 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2e0fef85 3101 abtsiocb->vport = vport;
3772a991
JS
3102 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3103 IOCB_ERROR) {
0bd4ca25
JSEC
3104 lpfc_sli_release_iocbq(phba, abtsiocb);
3105 ret = FAILED;
3106 goto out;
3107 }
dea3101e 3108
875fbdfe 3109 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
45ed1190
JS
3110 lpfc_sli_handle_fast_ring_event(phba,
3111 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe 3112
fa61a54e 3113 lpfc_cmd->waitq = &waitq;
0bd4ca25 3114 /* Wait for abort to complete */
fa61a54e
JS
3115 wait_event_timeout(waitq,
3116 (lpfc_cmd->pCmd != cmnd),
3117 (2*vport->cfg_devloss_tmo*HZ));
875fbdfe 3118
fa61a54e
JS
3119 spin_lock_irq(shost->host_lock);
3120 lpfc_cmd->waitq = NULL;
3121 spin_unlock_irq(shost->host_lock);
dea3101e 3122
0bd4ca25
JSEC
3123 if (lpfc_cmd->pCmd == cmnd) {
3124 ret = FAILED;
e8b62011
JS
3125 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3126 "0748 abort handler timed out waiting "
3127 "for abort to complete: ret %#x, ID %d, "
3128 "LUN %d, snum %#lx\n",
3129 ret, cmnd->device->id, cmnd->device->lun,
3130 cmnd->serial_number);
dea3101e 3131 }
3132
3133 out:
e8b62011
JS
3134 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3135 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
3136 "LUN %d snum %#lx\n", ret, cmnd->device->id,
3137 cmnd->device->lun, cmnd->serial_number);
63c59c3b 3138 return ret;
8fa728a2
JG
3139}
3140
bbb9d180
JS
3141static char *
3142lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
3143{
3144 switch (task_mgmt_cmd) {
3145 case FCP_ABORT_TASK_SET:
3146 return "ABORT_TASK_SET";
3147 case FCP_CLEAR_TASK_SET:
3148 return "FCP_CLEAR_TASK_SET";
3149 case FCP_BUS_RESET:
3150 return "FCP_BUS_RESET";
3151 case FCP_LUN_RESET:
3152 return "FCP_LUN_RESET";
3153 case FCP_TARGET_RESET:
3154 return "FCP_TARGET_RESET";
3155 case FCP_CLEAR_ACA:
3156 return "FCP_CLEAR_ACA";
3157 case FCP_TERMINATE_TASK:
3158 return "FCP_TERMINATE_TASK";
3159 default:
3160 return "unknown";
3161 }
3162}
3163
9bad7671 3164/**
bbb9d180
JS
3165 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
3166 * @vport: The virtual port for which this call is being executed.
3167 * @rdata: Pointer to remote port local data
3168 * @tgt_id: Target ID of remote device.
3169 * @lun_id: Lun number for the TMF
3170 * @task_mgmt_cmd: type of TMF to send
9bad7671 3171 *
bbb9d180
JS
3172 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
3173 * a remote port.
9bad7671 3174 *
bbb9d180
JS
3175 * Return Code:
3176 * 0x2003 - Error
3177 * 0x2002 - Success.
9bad7671 3178 **/
dea3101e 3179static int
bbb9d180
JS
3180lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3181 unsigned tgt_id, unsigned int lun_id,
3182 uint8_t task_mgmt_cmd)
dea3101e 3183{
2e0fef85 3184 struct lpfc_hba *phba = vport->phba;
0bd4ca25 3185 struct lpfc_scsi_buf *lpfc_cmd;
bbb9d180
JS
3186 struct lpfc_iocbq *iocbq;
3187 struct lpfc_iocbq *iocbqrsp;
3188 int ret;
915caaaf 3189 int status;
dea3101e 3190
bbb9d180 3191 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
915caaaf 3192 return FAILED;
bbb9d180 3193
2e0fef85 3194 lpfc_cmd = lpfc_get_scsi_buf(phba);
dea3101e 3195 if (lpfc_cmd == NULL)
915caaaf 3196 return FAILED;
dea3101e 3197 lpfc_cmd->timeout = 60;
0b18ac42 3198 lpfc_cmd->rdata = rdata;
dea3101e 3199
bbb9d180
JS
3200 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
3201 task_mgmt_cmd);
915caaaf
JS
3202 if (!status) {
3203 lpfc_release_scsi_buf(phba, lpfc_cmd);
3204 return FAILED;
3205 }
dea3101e 3206
bbb9d180 3207 iocbq = &lpfc_cmd->cur_iocbq;
0bd4ca25 3208 iocbqrsp = lpfc_sli_get_iocbq(phba);
915caaaf
JS
3209 if (iocbqrsp == NULL) {
3210 lpfc_release_scsi_buf(phba, lpfc_cmd);
3211 return FAILED;
3212 }
bbb9d180 3213
e8b62011 3214 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
bbb9d180
JS
3215 "0702 Issue %s to TGT %d LUN %d "
3216 "rpi x%x nlp_flag x%x\n",
3217 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3218 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
3219
3772a991 3220 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
915caaaf 3221 iocbq, iocbqrsp, lpfc_cmd->timeout);
bbb9d180
JS
3222 if (status != IOCB_SUCCESS) {
3223 if (status == IOCB_TIMEDOUT) {
3224 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
3225 ret = TIMEOUT_ERROR;
3226 } else
915caaaf 3227 ret = FAILED;
bbb9d180
JS
3228 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3229 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3230 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
3231 lpfc_taskmgmt_name(task_mgmt_cmd),
3232 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
915caaaf 3233 iocbqrsp->iocb.un.ulpWord[4]);
2a9bf3d0
JS
3234 } else if (status == IOCB_BUSY)
3235 ret = FAILED;
3236 else
bbb9d180
JS
3237 ret = SUCCESS;
3238
6175c02a 3239 lpfc_sli_release_iocbq(phba, iocbqrsp);
bbb9d180
JS
3240
3241 if (ret != TIMEOUT_ERROR)
3242 lpfc_release_scsi_buf(phba, lpfc_cmd);
3243
3244 return ret;
3245}
3246
3247/**
3248 * lpfc_chk_tgt_mapped -
3249 * @vport: The virtual port to check on
3250 * @cmnd: Pointer to scsi_cmnd data structure.
3251 *
3252 * This routine delays until the scsi target (aka rport) for the
3253 * command exists (is present and logged in) or we declare it non-existent.
3254 *
3255 * Return code :
3256 * 0x2003 - Error
3257 * 0x2002 - Success
3258 **/
3259static int
3260lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3261{
3262 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 3263 struct lpfc_nodelist *pnode;
bbb9d180
JS
3264 unsigned long later;
3265
1c6f4ef5
JS
3266 if (!rdata) {
3267 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3268 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
3269 return FAILED;
3270 }
3271 pnode = rdata->pnode;
bbb9d180
JS
3272 /*
3273 * If target is not in a MAPPED state, delay until
3274 * target is rediscovered or devloss timeout expires.
3275 */
3276 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3277 while (time_after(later, jiffies)) {
3278 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3279 return FAILED;
3280 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3281 return SUCCESS;
3282 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3283 rdata = cmnd->device->hostdata;
3284 if (!rdata)
3285 return FAILED;
3286 pnode = rdata->pnode;
3287 }
3288 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
3289 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3290 return FAILED;
3291 return SUCCESS;
3292}
3293
3294/**
3295 * lpfc_reset_flush_io_context -
3296 * @vport: The virtual port (scsi_host) for the flush context
3297 * @tgt_id: If aborting by Target contect - specifies the target id
3298 * @lun_id: If aborting by Lun context - specifies the lun id
3299 * @context: specifies the context level to flush at.
3300 *
3301 * After a reset condition via TMF, we need to flush orphaned i/o
3302 * contexts from the adapter. This routine aborts any contexts
3303 * outstanding, then waits for their completions. The wait is
3304 * bounded by devloss_tmo though.
3305 *
3306 * Return code :
3307 * 0x2003 - Error
3308 * 0x2002 - Success
3309 **/
3310static int
3311lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
3312 uint64_t lun_id, lpfc_ctx_cmd context)
3313{
3314 struct lpfc_hba *phba = vport->phba;
3315 unsigned long later;
3316 int cnt;
3317
3318 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6175c02a 3319 if (cnt)
51ef4c26 3320 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
bbb9d180 3321 tgt_id, lun_id, context);
915caaaf
JS
3322 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3323 while (time_after(later, jiffies) && cnt) {
3324 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
bbb9d180 3325 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
dea3101e 3326 }
dea3101e 3327 if (cnt) {
e8b62011 3328 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
bbb9d180
JS
3329 "0724 I/O flush failure for context %s : cnt x%x\n",
3330 ((context == LPFC_CTX_LUN) ? "LUN" :
3331 ((context == LPFC_CTX_TGT) ? "TGT" :
3332 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
3333 cnt);
3334 return FAILED;
dea3101e 3335 }
bbb9d180
JS
3336 return SUCCESS;
3337}
3338
3339/**
3340 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
3341 * @cmnd: Pointer to scsi_cmnd data structure.
3342 *
3343 * This routine does a device reset by sending a LUN_RESET task management
3344 * command.
3345 *
3346 * Return code :
3347 * 0x2003 - Error
3348 * 0x2002 - Success
3349 **/
3350static int
3351lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3352{
3353 struct Scsi_Host *shost = cmnd->device->host;
3354 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3355 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 3356 struct lpfc_nodelist *pnode;
bbb9d180
JS
3357 unsigned tgt_id = cmnd->device->id;
3358 unsigned int lun_id = cmnd->device->lun;
3359 struct lpfc_scsi_event_header scsi_event;
3360 int status;
3361
1c6f4ef5
JS
3362 if (!rdata) {
3363 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3364 "0798 Device Reset rport failure: rdata x%p\n", rdata);
3365 return FAILED;
3366 }
3367 pnode = rdata->pnode;
65d430fa 3368 fc_block_scsi_eh(cmnd);
bbb9d180
JS
3369
3370 status = lpfc_chk_tgt_mapped(vport, cmnd);
3371 if (status == FAILED) {
3372 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3373 "0721 Device Reset rport failure: rdata x%p\n", rdata);
3374 return FAILED;
3375 }
3376
3377 scsi_event.event_type = FC_REG_SCSI_EVENT;
3378 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
3379 scsi_event.lun = lun_id;
3380 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3381 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3382
3383 fc_host_post_vendor_event(shost, fc_get_event_number(),
3384 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3385
3386 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3387 FCP_LUN_RESET);
3388
3389 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3390 "0713 SCSI layer issued Device Reset (%d, %d) "
3391 "return x%x\n", tgt_id, lun_id, status);
3392
3393 /*
3394 * We have to clean up i/o as : they may be orphaned by the TMF;
3395 * or if the TMF failed, they may be in an indeterminate state.
3396 * So, continue on.
3397 * We will report success if all the i/o aborts successfully.
3398 */
3399 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3400 LPFC_CTX_LUN);
3401 return status;
3402}
3403
3404/**
3405 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
3406 * @cmnd: Pointer to scsi_cmnd data structure.
3407 *
3408 * This routine does a target reset by sending a TARGET_RESET task management
3409 * command.
3410 *
3411 * Return code :
3412 * 0x2003 - Error
3413 * 0x2002 - Success
3414 **/
3415static int
3416lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3417{
3418 struct Scsi_Host *shost = cmnd->device->host;
3419 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3420 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 3421 struct lpfc_nodelist *pnode;
bbb9d180
JS
3422 unsigned tgt_id = cmnd->device->id;
3423 unsigned int lun_id = cmnd->device->lun;
3424 struct lpfc_scsi_event_header scsi_event;
3425 int status;
3426
1c6f4ef5
JS
3427 if (!rdata) {
3428 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3429 "0799 Target Reset rport failure: rdata x%p\n", rdata);
3430 return FAILED;
3431 }
3432 pnode = rdata->pnode;
65d430fa 3433 fc_block_scsi_eh(cmnd);
bbb9d180
JS
3434
3435 status = lpfc_chk_tgt_mapped(vport, cmnd);
3436 if (status == FAILED) {
3437 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3438 "0722 Target Reset rport failure: rdata x%p\n", rdata);
3439 return FAILED;
3440 }
3441
3442 scsi_event.event_type = FC_REG_SCSI_EVENT;
3443 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3444 scsi_event.lun = 0;
3445 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3446 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3447
3448 fc_host_post_vendor_event(shost, fc_get_event_number(),
3449 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3450
3451 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3452 FCP_TARGET_RESET);
3453
3454 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3455 "0723 SCSI layer issued Target Reset (%d, %d) "
3456 "return x%x\n", tgt_id, lun_id, status);
3457
3458 /*
3459 * We have to clean up i/o as : they may be orphaned by the TMF;
3460 * or if the TMF failed, they may be in an indeterminate state.
3461 * So, continue on.
3462 * We will report success if all the i/o aborts successfully.
3463 */
3464 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3465 LPFC_CTX_TGT);
3466 return status;
dea3101e 3467}
3468
9bad7671 3469/**
3621a710 3470 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
9bad7671
JS
3471 * @cmnd: Pointer to scsi_cmnd data structure.
3472 *
bbb9d180
JS
3473 * This routine does target reset to all targets on @cmnd->device->host.
3474 * This emulates Parallel SCSI Bus Reset Semantics.
9bad7671 3475 *
bbb9d180
JS
3476 * Return code :
3477 * 0x2003 - Error
3478 * 0x2002 - Success
9bad7671 3479 **/
94d0e7b8 3480static int
7054a606 3481lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
dea3101e 3482{
2e0fef85
JS
3483 struct Scsi_Host *shost = cmnd->device->host;
3484 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
dea3101e 3485 struct lpfc_nodelist *ndlp = NULL;
ea2151b4 3486 struct lpfc_scsi_event_header scsi_event;
bbb9d180
JS
3487 int match;
3488 int ret = SUCCESS, status, i;
ea2151b4
JS
3489
3490 scsi_event.event_type = FC_REG_SCSI_EVENT;
3491 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
3492 scsi_event.lun = 0;
3493 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
3494 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
3495
bbb9d180
JS
3496 fc_host_post_vendor_event(shost, fc_get_event_number(),
3497 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
dea3101e 3498
65d430fa 3499 fc_block_scsi_eh(cmnd);
bbb9d180 3500
dea3101e 3501 /*
3502 * Since the driver manages a single bus device, reset all
3503 * targets known to the driver. Should any target reset
3504 * fail, this routine returns failure to the midlayer.
3505 */
e17da18e 3506 for (i = 0; i < LPFC_MAX_TARGET; i++) {
685f0bf7 3507 /* Search for mapped node by target ID */
dea3101e 3508 match = 0;
2e0fef85
JS
3509 spin_lock_irq(shost->host_lock);
3510 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
3511 if (!NLP_CHK_NODE_ACT(ndlp))
3512 continue;
685f0bf7 3513 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
915caaaf 3514 ndlp->nlp_sid == i &&
685f0bf7 3515 ndlp->rport) {
dea3101e 3516 match = 1;
3517 break;
3518 }
3519 }
2e0fef85 3520 spin_unlock_irq(shost->host_lock);
dea3101e 3521 if (!match)
3522 continue;
bbb9d180
JS
3523
3524 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
3525 i, 0, FCP_TARGET_RESET);
3526
3527 if (status != SUCCESS) {
e8b62011
JS
3528 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3529 "0700 Bus Reset on target %d failed\n",
3530 i);
915caaaf 3531 ret = FAILED;
dea3101e 3532 }
3533 }
6175c02a 3534 /*
bbb9d180
JS
3535 * We have to clean up i/o as : they may be orphaned by the TMFs
3536 * above; or if any of the TMFs failed, they may be in an
3537 * indeterminate state.
3538 * We will report success if all the i/o aborts successfully.
6175c02a 3539 */
bbb9d180
JS
3540
3541 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
3542 if (status != SUCCESS)
0bd4ca25 3543 ret = FAILED;
bbb9d180 3544
e8b62011
JS
3545 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3546 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
dea3101e 3547 return ret;
3548}
3549
9bad7671 3550/**
3621a710 3551 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
9bad7671
JS
3552 * @sdev: Pointer to scsi_device.
3553 *
3554 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
3555 * globally available list of scsi buffers. This routine also makes sure scsi
3556 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
3557 * of scsi buffer exists for the lifetime of the driver.
3558 *
3559 * Return codes:
3560 * non-0 - Error
3561 * 0 - Success
3562 **/
dea3101e 3563static int
3564lpfc_slave_alloc(struct scsi_device *sdev)
3565{
2e0fef85
JS
3566 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3567 struct lpfc_hba *phba = vport->phba;
19a7b4ae 3568 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3772a991 3569 uint32_t total = 0;
dea3101e 3570 uint32_t num_to_alloc = 0;
3772a991 3571 int num_allocated = 0;
d7c47992 3572 uint32_t sdev_cnt;
dea3101e 3573
19a7b4ae 3574 if (!rport || fc_remote_port_chkready(rport))
dea3101e 3575 return -ENXIO;
3576
19a7b4ae 3577 sdev->hostdata = rport->dd_data;
d7c47992 3578 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
dea3101e 3579
3580 /*
3581 * Populate the cmds_per_lun count scsi_bufs into this host's globally
3582 * available list of scsi buffers. Don't allocate more than the
a784efbf
JSEC
3583 * HBA limit conveyed to the midlayer via the host structure. The
3584 * formula accounts for the lun_queue_depth + error handlers + 1
3585 * extra. This list of scsi bufs exists for the lifetime of the driver.
dea3101e 3586 */
3587 total = phba->total_scsi_bufs;
3de2a653 3588 num_to_alloc = vport->cfg_lun_queue_depth + 2;
92d7f7b0 3589
d7c47992
JS
3590 /* If allocated buffers are enough do nothing */
3591 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
3592 return 0;
3593
92d7f7b0
JS
3594 /* Allow some exchanges to be available always to complete discovery */
3595 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
3596 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3597 "0704 At limitation of %d preallocated "
3598 "command buffers\n", total);
dea3101e 3599 return 0;
92d7f7b0
JS
3600 /* Allow some exchanges to be available always to complete discovery */
3601 } else if (total + num_to_alloc >
3602 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
3603 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3604 "0705 Allocation request of %d "
3605 "command buffers will exceed max of %d. "
3606 "Reducing allocation request to %d.\n",
3607 num_to_alloc, phba->cfg_hba_queue_depth,
3608 (phba->cfg_hba_queue_depth - total));
dea3101e 3609 num_to_alloc = phba->cfg_hba_queue_depth - total;
3610 }
3772a991
JS
3611 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
3612 if (num_to_alloc != num_allocated) {
3613 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3614 "0708 Allocation request of %d "
3615 "command buffers did not succeed. "
3616 "Allocated %d buffers.\n",
3617 num_to_alloc, num_allocated);
dea3101e 3618 }
1c6f4ef5
JS
3619 if (num_allocated > 0)
3620 phba->total_scsi_bufs += num_allocated;
dea3101e 3621 return 0;
3622}
3623
9bad7671 3624/**
3621a710 3625 * lpfc_slave_configure - scsi_host_template slave_configure entry point
9bad7671
JS
3626 * @sdev: Pointer to scsi_device.
3627 *
3628 * This routine configures following items
3629 * - Tag command queuing support for @sdev if supported.
3630 * - Dev loss time out value of fc_rport.
3631 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
3632 *
3633 * Return codes:
3634 * 0 - Success
3635 **/
dea3101e 3636static int
3637lpfc_slave_configure(struct scsi_device *sdev)
3638{
2e0fef85
JS
3639 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3640 struct lpfc_hba *phba = vport->phba;
3641 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
dea3101e 3642
3643 if (sdev->tagged_supported)
3de2a653 3644 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 3645 else
3de2a653 3646 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 3647
3648 /*
3649 * Initialize the fc transport attributes for the target
3650 * containing this scsi device. Also note that the driver's
3651 * target pointer is stored in the starget_data for the
3652 * driver's sysfs entry point functions.
3653 */
3de2a653 3654 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
dea3101e 3655
875fbdfe 3656 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
3657 lpfc_sli_handle_fast_ring_event(phba,
3658 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe
JSEC
3659 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3660 lpfc_poll_rearm_timer(phba);
3661 }
3662
dea3101e 3663 return 0;
3664}
3665
9bad7671 3666/**
3621a710 3667 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
9bad7671
JS
3668 * @sdev: Pointer to scsi_device.
3669 *
3670 * This routine sets @sdev hostatdata filed to null.
3671 **/
dea3101e 3672static void
3673lpfc_slave_destroy(struct scsi_device *sdev)
3674{
d7c47992
JS
3675 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3676 struct lpfc_hba *phba = vport->phba;
3677 atomic_dec(&phba->sdev_cnt);
dea3101e 3678 sdev->hostdata = NULL;
3679 return;
3680}
3681
92d7f7b0 3682
dea3101e 3683struct scsi_host_template lpfc_template = {
3684 .module = THIS_MODULE,
3685 .name = LPFC_DRIVER_NAME,
3686 .info = lpfc_info,
3687 .queuecommand = lpfc_queuecommand,
3688 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
3689 .eh_device_reset_handler = lpfc_device_reset_handler,
3690 .eh_target_reset_handler = lpfc_target_reset_handler,
7054a606 3691 .eh_bus_reset_handler = lpfc_bus_reset_handler,
dea3101e 3692 .slave_alloc = lpfc_slave_alloc,
3693 .slave_configure = lpfc_slave_configure,
3694 .slave_destroy = lpfc_slave_destroy,
47a8617c 3695 .scan_finished = lpfc_scan_finished,
dea3101e 3696 .this_id = -1,
83108bd3 3697 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
dea3101e 3698 .cmd_per_lun = LPFC_CMD_PER_LUN,
3699 .use_clustering = ENABLE_CLUSTERING,
2e0fef85 3700 .shost_attrs = lpfc_hba_attrs,
564b2960 3701 .max_sectors = 0xFFFF,
f1c3b0fc 3702 .vendor_id = LPFC_NL_VENDOR_ID,
5ffc266e 3703 .change_queue_depth = lpfc_change_queue_depth,
dea3101e 3704};
3de2a653
JS
3705
3706struct scsi_host_template lpfc_vport_template = {
3707 .module = THIS_MODULE,
3708 .name = LPFC_DRIVER_NAME,
3709 .info = lpfc_info,
3710 .queuecommand = lpfc_queuecommand,
3711 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
3712 .eh_device_reset_handler = lpfc_device_reset_handler,
3713 .eh_target_reset_handler = lpfc_target_reset_handler,
3de2a653
JS
3714 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3715 .slave_alloc = lpfc_slave_alloc,
3716 .slave_configure = lpfc_slave_configure,
3717 .slave_destroy = lpfc_slave_destroy,
3718 .scan_finished = lpfc_scan_finished,
3719 .this_id = -1,
83108bd3 3720 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3de2a653
JS
3721 .cmd_per_lun = LPFC_CMD_PER_LUN,
3722 .use_clustering = ENABLE_CLUSTERING,
3723 .shost_attrs = lpfc_vport_attrs,
3724 .max_sectors = 0xFFFF,
5ffc266e 3725 .change_queue_depth = lpfc_change_queue_depth,
3de2a653 3726};