[SCSI] lpfc 8.3.31: Fix log message for Mailbox command when no error is detected
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_scsi.c
CommitLineData
d85296cf 1/*******************************************************************
dea3101e 2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
acd6859b 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
c44ce173 5 * EMULEX and SLI are trademarks of Emulex. *
dea3101e 6 * www.emulex.com *
c44ce173 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 8 * *
9 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea3101e 20 *******************************************************************/
dea3101e 21#include <linux/pci.h>
5a0e3ad6 22#include <linux/slab.h>
dea3101e 23#include <linux/interrupt.h>
09703660 24#include <linux/export.h>
a90f5684 25#include <linux/delay.h>
e2a0a9d6 26#include <asm/unaligned.h>
dea3101e 27
28#include <scsi/scsi.h>
29#include <scsi/scsi_device.h>
e2a0a9d6 30#include <scsi/scsi_eh.h>
dea3101e 31#include <scsi/scsi_host.h>
32#include <scsi/scsi_tcq.h>
33#include <scsi/scsi_transport_fc.h>
34
35#include "lpfc_version.h"
da0436e9 36#include "lpfc_hw4.h"
dea3101e 37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
da0436e9 39#include "lpfc_sli4.h"
ea2151b4 40#include "lpfc_nl.h"
dea3101e 41#include "lpfc_disc.h"
dea3101e 42#include "lpfc.h"
9a6b09c0 43#include "lpfc_scsi.h"
dea3101e 44#include "lpfc_logmsg.h"
45#include "lpfc_crtn.h"
92d7f7b0 46#include "lpfc_vport.h"
dea3101e 47
48#define LPFC_RESET_WAIT 2
49#define LPFC_ABORT_WAIT 2
50
e2a0a9d6
JS
51int _dump_buf_done;
52
53static char *dif_op_str[] = {
9a6b09c0
JS
54 "PROT_NORMAL",
55 "PROT_READ_INSERT",
56 "PROT_WRITE_STRIP",
57 "PROT_READ_STRIP",
58 "PROT_WRITE_INSERT",
59 "PROT_READ_PASS",
60 "PROT_WRITE_PASS",
61};
62
63static char *dif_grd_str[] = {
64 "NO_GUARD",
65 "DIF_CRC",
66 "DIX_IP",
e2a0a9d6 67};
f9bb2da1
JS
68
69struct scsi_dif_tuple {
70 __be16 guard_tag; /* Checksum */
71 __be16 app_tag; /* Opaque storage */
72 __be32 ref_tag; /* Target LBA or indirect LBA */
73};
74
da0436e9
JS
75static void
76lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
1c6f4ef5
JS
77static void
78lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
e2a0a9d6
JS
79
80static void
6a9c52cf 81lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
e2a0a9d6
JS
82{
83 void *src, *dst;
84 struct scatterlist *sgde = scsi_sglist(cmnd);
85
86 if (!_dump_buf_data) {
6a9c52cf
JS
87 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
88 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
e2a0a9d6
JS
89 __func__);
90 return;
91 }
92
93
94 if (!sgde) {
6a9c52cf
JS
95 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
96 "9051 BLKGRD: ERROR: data scatterlist is null\n");
e2a0a9d6
JS
97 return;
98 }
99
100 dst = (void *) _dump_buf_data;
101 while (sgde) {
102 src = sg_virt(sgde);
103 memcpy(dst, src, sgde->length);
104 dst += sgde->length;
105 sgde = sg_next(sgde);
106 }
107}
108
109static void
6a9c52cf 110lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
e2a0a9d6
JS
111{
112 void *src, *dst;
113 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
114
115 if (!_dump_buf_dif) {
6a9c52cf
JS
116 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
117 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
e2a0a9d6
JS
118 __func__);
119 return;
120 }
121
122 if (!sgde) {
6a9c52cf
JS
123 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
124 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
e2a0a9d6
JS
125 return;
126 }
127
128 dst = _dump_buf_dif;
129 while (sgde) {
130 src = sg_virt(sgde);
131 memcpy(dst, src, sgde->length);
132 dst += sgde->length;
133 sgde = sg_next(sgde);
134 }
135}
136
f1126688
JS
137/**
138 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
139 * @phba: Pointer to HBA object.
140 * @lpfc_cmd: lpfc scsi command object pointer.
141 *
142 * This function is called from the lpfc_prep_task_mgmt_cmd function to
143 * set the last bit in the response sge entry.
144 **/
145static void
146lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
147 struct lpfc_scsi_buf *lpfc_cmd)
148{
149 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
150 if (sgl) {
151 sgl += 1;
152 sgl->word2 = le32_to_cpu(sgl->word2);
153 bf_set(lpfc_sli4_sge_last, sgl, 1);
154 sgl->word2 = cpu_to_le32(sgl->word2);
155 }
156}
157
ea2151b4 158/**
3621a710 159 * lpfc_update_stats - Update statistical data for the command completion
ea2151b4
JS
160 * @phba: Pointer to HBA object.
161 * @lpfc_cmd: lpfc scsi command object pointer.
162 *
163 * This function is called when there is a command completion and this
164 * function updates the statistical data for the command completion.
165 **/
166static void
167lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
168{
169 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
170 struct lpfc_nodelist *pnode = rdata->pnode;
171 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
172 unsigned long flags;
173 struct Scsi_Host *shost = cmd->device->host;
174 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
175 unsigned long latency;
176 int i;
177
178 if (cmd->result)
179 return;
180
9f1e1b50
JS
181 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
182
ea2151b4
JS
183 spin_lock_irqsave(shost->host_lock, flags);
184 if (!vport->stat_data_enabled ||
185 vport->stat_data_blocked ||
5989b8d4 186 !pnode ||
ea2151b4
JS
187 !pnode->lat_data ||
188 (phba->bucket_type == LPFC_NO_BUCKET)) {
189 spin_unlock_irqrestore(shost->host_lock, flags);
190 return;
191 }
ea2151b4
JS
192
193 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
194 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
195 phba->bucket_step;
9f1e1b50
JS
196 /* check array subscript bounds */
197 if (i < 0)
198 i = 0;
199 else if (i >= LPFC_MAX_BUCKET_COUNT)
200 i = LPFC_MAX_BUCKET_COUNT - 1;
ea2151b4
JS
201 } else {
202 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
203 if (latency <= (phba->bucket_base +
204 ((1<<i)*phba->bucket_step)))
205 break;
206 }
207
208 pnode->lat_data[i].cmd_count++;
209 spin_unlock_irqrestore(shost->host_lock, flags);
210}
211
ea2151b4 212/**
3621a710 213 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
ea2151b4
JS
214 * @phba: Pointer to HBA context object.
215 * @vport: Pointer to vport object.
216 * @ndlp: Pointer to FC node associated with the target.
217 * @lun: Lun number of the scsi device.
218 * @old_val: Old value of the queue depth.
219 * @new_val: New value of the queue depth.
220 *
221 * This function sends an event to the mgmt application indicating
222 * there is a change in the scsi device queue depth.
223 **/
224static void
225lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
226 struct lpfc_vport *vport,
227 struct lpfc_nodelist *ndlp,
228 uint32_t lun,
229 uint32_t old_val,
230 uint32_t new_val)
231{
232 struct lpfc_fast_path_event *fast_path_evt;
233 unsigned long flags;
234
235 fast_path_evt = lpfc_alloc_fast_evt(phba);
236 if (!fast_path_evt)
237 return;
238
239 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
240 FC_REG_SCSI_EVENT;
241 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
242 LPFC_EVENT_VARQUEDEPTH;
243
244 /* Report all luns with change in queue depth */
245 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
246 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
247 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
248 &ndlp->nlp_portname, sizeof(struct lpfc_name));
249 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
250 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
251 }
252
253 fast_path_evt->un.queue_depth_evt.oldval = old_val;
254 fast_path_evt->un.queue_depth_evt.newval = new_val;
255 fast_path_evt->vport = vport;
256
257 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
258 spin_lock_irqsave(&phba->hbalock, flags);
259 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
260 spin_unlock_irqrestore(&phba->hbalock, flags);
261 lpfc_worker_wake_up(phba);
262
263 return;
264}
265
5ffc266e
JS
266/**
267 * lpfc_change_queue_depth - Alter scsi device queue depth
268 * @sdev: Pointer the scsi device on which to change the queue depth.
269 * @qdepth: New queue depth to set the sdev to.
270 * @reason: The reason for the queue depth change.
271 *
272 * This function is called by the midlayer and the LLD to alter the queue
273 * depth for a scsi device. This function sets the queue depth to the new
274 * value and sends an event out to log the queue depth change.
275 **/
276int
277lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
278{
279 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
280 struct lpfc_hba *phba = vport->phba;
281 struct lpfc_rport_data *rdata;
282 unsigned long new_queue_depth, old_queue_depth;
283
284 old_queue_depth = sdev->queue_depth;
285 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
286 new_queue_depth = sdev->queue_depth;
287 rdata = sdev->hostdata;
288 if (rdata)
289 lpfc_send_sdev_queuedepth_change_event(phba, vport,
290 rdata->pnode, sdev->lun,
291 old_queue_depth,
292 new_queue_depth);
293 return sdev->queue_depth;
294}
295
9bad7671 296/**
3621a710 297 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
9bad7671
JS
298 * @phba: The Hba for which this call is being executed.
299 *
300 * This routine is called when there is resource error in driver or firmware.
301 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
302 * posts at most 1 event each second. This routine wakes up worker thread of
303 * @phba to process WORKER_RAM_DOWN_EVENT event.
304 *
305 * This routine should be called with no lock held.
306 **/
92d7f7b0 307void
eaf15d5b 308lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
92d7f7b0
JS
309{
310 unsigned long flags;
5e9d9b82 311 uint32_t evt_posted;
92d7f7b0
JS
312
313 spin_lock_irqsave(&phba->hbalock, flags);
314 atomic_inc(&phba->num_rsrc_err);
315 phba->last_rsrc_error_time = jiffies;
316
317 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
318 spin_unlock_irqrestore(&phba->hbalock, flags);
319 return;
320 }
321
322 phba->last_ramp_down_time = jiffies;
323
324 spin_unlock_irqrestore(&phba->hbalock, flags);
325
326 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
327 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
328 if (!evt_posted)
92d7f7b0 329 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
92d7f7b0
JS
330 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
331
5e9d9b82
JS
332 if (!evt_posted)
333 lpfc_worker_wake_up(phba);
92d7f7b0
JS
334 return;
335}
336
9bad7671 337/**
3621a710 338 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
9bad7671
JS
339 * @phba: The Hba for which this call is being executed.
340 *
341 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
342 * post at most 1 event every 5 minute after last_ramp_up_time or
343 * last_rsrc_error_time. This routine wakes up worker thread of @phba
344 * to process WORKER_RAM_DOWN_EVENT event.
345 *
346 * This routine should be called with no lock held.
347 **/
92d7f7b0 348static inline void
3de2a653 349lpfc_rampup_queue_depth(struct lpfc_vport *vport,
a257bf90 350 uint32_t queue_depth)
92d7f7b0
JS
351{
352 unsigned long flags;
3de2a653 353 struct lpfc_hba *phba = vport->phba;
5e9d9b82 354 uint32_t evt_posted;
92d7f7b0
JS
355 atomic_inc(&phba->num_cmd_success);
356
a257bf90 357 if (vport->cfg_lun_queue_depth <= queue_depth)
92d7f7b0 358 return;
92d7f7b0 359 spin_lock_irqsave(&phba->hbalock, flags);
5ffc266e
JS
360 if (time_before(jiffies,
361 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
362 time_before(jiffies,
363 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
92d7f7b0
JS
364 spin_unlock_irqrestore(&phba->hbalock, flags);
365 return;
366 }
92d7f7b0
JS
367 phba->last_ramp_up_time = jiffies;
368 spin_unlock_irqrestore(&phba->hbalock, flags);
369
370 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
371 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
372 if (!evt_posted)
92d7f7b0 373 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
92d7f7b0
JS
374 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
375
5e9d9b82
JS
376 if (!evt_posted)
377 lpfc_worker_wake_up(phba);
378 return;
92d7f7b0
JS
379}
380
9bad7671 381/**
3621a710 382 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
9bad7671
JS
383 * @phba: The Hba for which this call is being executed.
384 *
385 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
386 * thread.This routine reduces queue depth for all scsi device on each vport
387 * associated with @phba.
388 **/
92d7f7b0
JS
389void
390lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
391{
549e55cd
JS
392 struct lpfc_vport **vports;
393 struct Scsi_Host *shost;
92d7f7b0 394 struct scsi_device *sdev;
5ffc266e 395 unsigned long new_queue_depth;
92d7f7b0 396 unsigned long num_rsrc_err, num_cmd_success;
549e55cd 397 int i;
92d7f7b0
JS
398
399 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
400 num_cmd_success = atomic_read(&phba->num_cmd_success);
401
549e55cd
JS
402 vports = lpfc_create_vport_work_array(phba);
403 if (vports != NULL)
21e9a0a5 404 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
405 shost = lpfc_shost_from_vport(vports[i]);
406 shost_for_each_device(sdev, shost) {
92d7f7b0 407 new_queue_depth =
549e55cd
JS
408 sdev->queue_depth * num_rsrc_err /
409 (num_rsrc_err + num_cmd_success);
410 if (!new_queue_depth)
411 new_queue_depth = sdev->queue_depth - 1;
412 else
413 new_queue_depth = sdev->queue_depth -
414 new_queue_depth;
5ffc266e
JS
415 lpfc_change_queue_depth(sdev, new_queue_depth,
416 SCSI_QDEPTH_DEFAULT);
549e55cd 417 }
92d7f7b0 418 }
09372820 419 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
420 atomic_set(&phba->num_rsrc_err, 0);
421 atomic_set(&phba->num_cmd_success, 0);
422}
423
9bad7671 424/**
3621a710 425 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
9bad7671
JS
426 * @phba: The Hba for which this call is being executed.
427 *
428 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
429 * thread.This routine increases queue depth for all scsi device on each vport
430 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
431 * num_cmd_success to zero.
432 **/
92d7f7b0
JS
433void
434lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
435{
549e55cd
JS
436 struct lpfc_vport **vports;
437 struct Scsi_Host *shost;
92d7f7b0 438 struct scsi_device *sdev;
549e55cd
JS
439 int i;
440
441 vports = lpfc_create_vport_work_array(phba);
442 if (vports != NULL)
21e9a0a5 443 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
444 shost = lpfc_shost_from_vport(vports[i]);
445 shost_for_each_device(sdev, shost) {
97eab634
JS
446 if (vports[i]->cfg_lun_queue_depth <=
447 sdev->queue_depth)
448 continue;
5ffc266e
JS
449 lpfc_change_queue_depth(sdev,
450 sdev->queue_depth+1,
451 SCSI_QDEPTH_RAMP_UP);
549e55cd 452 }
92d7f7b0 453 }
09372820 454 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
455 atomic_set(&phba->num_rsrc_err, 0);
456 atomic_set(&phba->num_cmd_success, 0);
457}
458
a8e497d5 459/**
3621a710 460 * lpfc_scsi_dev_block - set all scsi hosts to block state
a8e497d5
JS
461 * @phba: Pointer to HBA context object.
462 *
463 * This function walks vport list and set each SCSI host to block state
464 * by invoking fc_remote_port_delete() routine. This function is invoked
465 * with EEH when device's PCI slot has been permanently disabled.
466 **/
467void
468lpfc_scsi_dev_block(struct lpfc_hba *phba)
469{
470 struct lpfc_vport **vports;
471 struct Scsi_Host *shost;
472 struct scsi_device *sdev;
473 struct fc_rport *rport;
474 int i;
475
476 vports = lpfc_create_vport_work_array(phba);
477 if (vports != NULL)
21e9a0a5 478 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8e497d5
JS
479 shost = lpfc_shost_from_vport(vports[i]);
480 shost_for_each_device(sdev, shost) {
481 rport = starget_to_rport(scsi_target(sdev));
482 fc_remote_port_delete(rport);
483 }
484 }
485 lpfc_destroy_vport_work_array(phba, vports);
486}
487
9bad7671 488/**
3772a991 489 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
9bad7671 490 * @vport: The virtual port for which this call being executed.
3772a991 491 * @num_to_allocate: The requested number of buffers to allocate.
9bad7671 492 *
3772a991
JS
493 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
494 * the scsi buffer contains all the necessary information needed to initiate
495 * a SCSI I/O. The non-DMAable buffer region contains information to build
496 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
497 * and the initial BPL. In addition to allocating memory, the FCP CMND and
498 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
9bad7671
JS
499 *
500 * Return codes:
3772a991
JS
501 * int - number of scsi buffers that were allocated.
502 * 0 = failure, less than num_to_alloc is a partial failure.
9bad7671 503 **/
3772a991
JS
504static int
505lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
dea3101e 506{
2e0fef85 507 struct lpfc_hba *phba = vport->phba;
dea3101e 508 struct lpfc_scsi_buf *psb;
509 struct ulp_bde64 *bpl;
510 IOCB_t *iocb;
34b02dcd
JS
511 dma_addr_t pdma_phys_fcp_cmd;
512 dma_addr_t pdma_phys_fcp_rsp;
513 dma_addr_t pdma_phys_bpl;
604a3e30 514 uint16_t iotag;
3772a991 515 int bcnt;
dea3101e 516
3772a991
JS
517 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
518 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
519 if (!psb)
520 break;
dea3101e 521
3772a991
JS
522 /*
523 * Get memory from the pci pool to map the virt space to pci
524 * bus space for an I/O. The DMA buffer includes space for the
525 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
526 * necessary to support the sg_tablesize.
527 */
528 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
529 GFP_KERNEL, &psb->dma_handle);
530 if (!psb->data) {
531 kfree(psb);
532 break;
533 }
534
535 /* Initialize virtual ptrs to dma_buf region. */
536 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
537
538 /* Allocate iotag for psb->cur_iocbq. */
539 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
540 if (iotag == 0) {
541 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
542 psb->data, psb->dma_handle);
543 kfree(psb);
544 break;
545 }
546 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
547
548 psb->fcp_cmnd = psb->data;
549 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
550 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
34b02dcd 551 sizeof(struct fcp_rsp);
dea3101e 552
3772a991
JS
553 /* Initialize local short-hand pointers. */
554 bpl = psb->fcp_bpl;
555 pdma_phys_fcp_cmd = psb->dma_handle;
556 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
557 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
558 sizeof(struct fcp_rsp);
559
560 /*
561 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
562 * are sg list bdes. Initialize the first two and leave the
563 * rest for queuecommand.
564 */
565 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
566 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
567 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
568 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
569 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
570
571 /* Setup the physical region for the FCP RSP */
572 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
573 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
574 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
575 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
576 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
577
578 /*
579 * Since the IOCB for the FCP I/O is built into this
580 * lpfc_scsi_buf, initialize it with all known data now.
581 */
582 iocb = &psb->cur_iocbq.iocb;
583 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
584 if ((phba->sli_rev == 3) &&
585 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
586 /* fill in immediate fcp command BDE */
587 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
588 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
589 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
590 unsli3.fcp_ext.icd);
591 iocb->un.fcpi64.bdl.addrHigh = 0;
592 iocb->ulpBdeCount = 0;
593 iocb->ulpLe = 0;
25985edc 594 /* fill in response BDE */
3772a991
JS
595 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
596 BUFF_TYPE_BDE_64;
597 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
598 sizeof(struct fcp_rsp);
599 iocb->unsli3.fcp_ext.rbde.addrLow =
600 putPaddrLow(pdma_phys_fcp_rsp);
601 iocb->unsli3.fcp_ext.rbde.addrHigh =
602 putPaddrHigh(pdma_phys_fcp_rsp);
603 } else {
604 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
605 iocb->un.fcpi64.bdl.bdeSize =
606 (2 * sizeof(struct ulp_bde64));
607 iocb->un.fcpi64.bdl.addrLow =
608 putPaddrLow(pdma_phys_bpl);
609 iocb->un.fcpi64.bdl.addrHigh =
610 putPaddrHigh(pdma_phys_bpl);
611 iocb->ulpBdeCount = 1;
612 iocb->ulpLe = 1;
613 }
614 iocb->ulpClass = CLASS3;
615 psb->status = IOSTAT_SUCCESS;
da0436e9 616 /* Put it back into the SCSI buffer list */
eee8877e 617 psb->cur_iocbq.context1 = psb;
1c6f4ef5 618 lpfc_release_scsi_buf_s3(phba, psb);
dea3101e 619
34b02dcd 620 }
dea3101e 621
3772a991 622 return bcnt;
dea3101e 623}
624
1151e3ec
JS
625/**
626 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
627 * @vport: pointer to lpfc vport data structure.
628 *
629 * This routine is invoked by the vport cleanup for deletions and the cleanup
630 * for an ndlp on removal.
631 **/
632void
633lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
634{
635 struct lpfc_hba *phba = vport->phba;
636 struct lpfc_scsi_buf *psb, *next_psb;
637 unsigned long iflag = 0;
638
639 spin_lock_irqsave(&phba->hbalock, iflag);
640 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
641 list_for_each_entry_safe(psb, next_psb,
642 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
643 if (psb->rdata && psb->rdata->pnode
644 && psb->rdata->pnode->vport == vport)
645 psb->rdata = NULL;
646 }
647 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
648 spin_unlock_irqrestore(&phba->hbalock, iflag);
649}
650
da0436e9
JS
651/**
652 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
653 * @phba: pointer to lpfc hba data structure.
654 * @axri: pointer to the fcp xri abort wcqe structure.
655 *
656 * This routine is invoked by the worker thread to process a SLI4 fast-path
657 * FCP aborted xri.
658 **/
659void
660lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
661 struct sli4_wcqe_xri_aborted *axri)
662{
663 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
19ca7609 664 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
da0436e9
JS
665 struct lpfc_scsi_buf *psb, *next_psb;
666 unsigned long iflag = 0;
0f65ff68
JS
667 struct lpfc_iocbq *iocbq;
668 int i;
19ca7609
JS
669 struct lpfc_nodelist *ndlp;
670 int rrq_empty = 0;
589a52d6 671 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
da0436e9 672
0f65ff68
JS
673 spin_lock_irqsave(&phba->hbalock, iflag);
674 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
da0436e9
JS
675 list_for_each_entry_safe(psb, next_psb,
676 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
677 if (psb->cur_iocbq.sli4_xritag == xri) {
678 list_del(&psb->list);
341af102 679 psb->exch_busy = 0;
da0436e9 680 psb->status = IOSTAT_SUCCESS;
0f65ff68
JS
681 spin_unlock(
682 &phba->sli4_hba.abts_scsi_buf_list_lock);
1151e3ec
JS
683 if (psb->rdata && psb->rdata->pnode)
684 ndlp = psb->rdata->pnode;
685 else
686 ndlp = NULL;
687
19ca7609 688 rrq_empty = list_empty(&phba->active_rrq_list);
0f65ff68 689 spin_unlock_irqrestore(&phba->hbalock, iflag);
cb69f7de 690 if (ndlp) {
19ca7609 691 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
cb69f7de
JS
692 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
693 }
da0436e9 694 lpfc_release_scsi_buf_s4(phba, psb);
19ca7609
JS
695 if (rrq_empty)
696 lpfc_worker_wake_up(phba);
da0436e9
JS
697 return;
698 }
699 }
0f65ff68
JS
700 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
701 for (i = 1; i <= phba->sli.last_iotag; i++) {
702 iocbq = phba->sli.iocbq_lookup[i];
703
704 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
705 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
706 continue;
707 if (iocbq->sli4_xritag != xri)
708 continue;
709 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
710 psb->exch_busy = 0;
711 spin_unlock_irqrestore(&phba->hbalock, iflag);
589a52d6
JS
712 if (pring->txq_cnt)
713 lpfc_worker_wake_up(phba);
0f65ff68
JS
714 return;
715
716 }
717 spin_unlock_irqrestore(&phba->hbalock, iflag);
da0436e9
JS
718}
719
720/**
8a9d2e80 721 * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
da0436e9 722 * @phba: pointer to lpfc hba data structure.
8a9d2e80 723 * @post_sblist: pointer to the scsi buffer list.
da0436e9 724 *
8a9d2e80
JS
725 * This routine walks a list of scsi buffers that was passed in. It attempts
726 * to construct blocks of scsi buffer sgls which contains contiguous xris and
727 * uses the non-embedded SGL block post mailbox commands to post to the port.
728 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
729 * embedded SGL post mailbox command for posting. The @post_sblist passed in
730 * must be local list, thus no lock is needed when manipulate the list.
da0436e9 731 *
8a9d2e80 732 * Returns: 0 = failure, non-zero number of successfully posted buffers.
da0436e9
JS
733 **/
734int
8a9d2e80
JS
735lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
736 struct list_head *post_sblist, int sb_count)
da0436e9 737{
8a9d2e80
JS
738 struct lpfc_scsi_buf *psb, *psb_next;
739 int status;
740 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
741 dma_addr_t pdma_phys_bpl1;
742 int last_xritag = NO_XRI;
743 LIST_HEAD(prep_sblist);
744 LIST_HEAD(blck_sblist);
745 LIST_HEAD(scsi_sblist);
746
747 /* sanity check */
748 if (sb_count <= 0)
749 return -EINVAL;
750
751 list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
752 list_del_init(&psb->list);
753 block_cnt++;
754 if ((last_xritag != NO_XRI) &&
755 (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
756 /* a hole in xri block, form a sgl posting block */
757 list_splice_init(&prep_sblist, &blck_sblist);
758 post_cnt = block_cnt - 1;
759 /* prepare list for next posting block */
760 list_add_tail(&psb->list, &prep_sblist);
761 block_cnt = 1;
762 } else {
763 /* prepare list for next posting block */
764 list_add_tail(&psb->list, &prep_sblist);
765 /* enough sgls for non-embed sgl mbox command */
766 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
767 list_splice_init(&prep_sblist, &blck_sblist);
768 post_cnt = block_cnt;
769 block_cnt = 0;
da0436e9 770 }
8a9d2e80
JS
771 }
772 num_posting++;
773 last_xritag = psb->cur_iocbq.sli4_xritag;
da0436e9 774
8a9d2e80
JS
775 /* end of repost sgl list condition for SCSI buffers */
776 if (num_posting == sb_count) {
777 if (post_cnt == 0) {
778 /* last sgl posting block */
779 list_splice_init(&prep_sblist, &blck_sblist);
780 post_cnt = block_cnt;
781 } else if (block_cnt == 1) {
782 /* last single sgl with non-contiguous xri */
783 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
784 pdma_phys_bpl1 = psb->dma_phys_bpl +
785 SGL_PAGE_SIZE;
786 else
787 pdma_phys_bpl1 = 0;
788 status = lpfc_sli4_post_sgl(phba,
789 psb->dma_phys_bpl,
790 pdma_phys_bpl1,
791 psb->cur_iocbq.sli4_xritag);
792 if (status) {
793 /* failure, put on abort scsi list */
794 psb->exch_busy = 1;
795 } else {
796 /* success, put on SCSI buffer list */
797 psb->exch_busy = 0;
798 psb->status = IOSTAT_SUCCESS;
799 num_posted++;
800 }
801 /* success, put on SCSI buffer sgl list */
802 list_add_tail(&psb->list, &scsi_sblist);
803 }
804 }
da0436e9 805
8a9d2e80
JS
806 /* continue until a nembed page worth of sgls */
807 if (post_cnt == 0)
da0436e9 808 continue;
8a9d2e80
JS
809
810 /* post block of SCSI buffer list sgls */
811 status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
812 post_cnt);
813
814 /* don't reset xirtag due to hole in xri block */
815 if (block_cnt == 0)
816 last_xritag = NO_XRI;
817
818 /* reset SCSI buffer post count for next round of posting */
819 post_cnt = 0;
820
821 /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
822 while (!list_empty(&blck_sblist)) {
823 list_remove_head(&blck_sblist, psb,
824 struct lpfc_scsi_buf, list);
da0436e9 825 if (status) {
8a9d2e80 826 /* failure, put on abort scsi list */
341af102 827 psb->exch_busy = 1;
341af102 828 } else {
8a9d2e80 829 /* success, put on SCSI buffer list */
341af102 830 psb->exch_busy = 0;
da0436e9 831 psb->status = IOSTAT_SUCCESS;
8a9d2e80 832 num_posted++;
341af102 833 }
8a9d2e80 834 list_add_tail(&psb->list, &scsi_sblist);
da0436e9
JS
835 }
836 }
8a9d2e80
JS
837 /* Push SCSI buffers with sgl posted to the availble list */
838 while (!list_empty(&scsi_sblist)) {
839 list_remove_head(&scsi_sblist, psb,
840 struct lpfc_scsi_buf, list);
841 lpfc_release_scsi_buf_s4(phba, psb);
842 }
843 return num_posted;
844}
845
846/**
847 * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
848 * @phba: pointer to lpfc hba data structure.
849 *
850 * This routine walks the list of scsi buffers that have been allocated and
851 * repost them to the port by using SGL block post. This is needed after a
852 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
853 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
854 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
855 *
856 * Returns: 0 = success, non-zero failure.
857 **/
858int
859lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
860{
861 LIST_HEAD(post_sblist);
862 int num_posted, rc = 0;
863
864 /* get all SCSI buffers need to repost to a local list */
865 spin_lock(&phba->scsi_buf_list_lock);
866 list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
867 spin_unlock(&phba->scsi_buf_list_lock);
868
869 /* post the list of scsi buffer sgls to port if available */
870 if (!list_empty(&post_sblist)) {
871 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
872 phba->sli4_hba.scsi_xri_cnt);
873 /* failed to post any scsi buffer, return error */
874 if (num_posted == 0)
875 rc = -EIO;
876 }
da0436e9
JS
877 return rc;
878}
879
880/**
881 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
882 * @vport: The virtual port for which this call being executed.
883 * @num_to_allocate: The requested number of buffers to allocate.
884 *
8a9d2e80 885 * This routine allocates scsi buffers for device with SLI-4 interface spec,
da0436e9 886 * the scsi buffer contains all the necessary information needed to initiate
8a9d2e80
JS
887 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
888 * them on a list, it post them to the port by using SGL block post.
da0436e9
JS
889 *
890 * Return codes:
8a9d2e80 891 * int - number of scsi buffers that were allocated and posted.
da0436e9
JS
892 * 0 = failure, less than num_to_alloc is a partial failure.
893 **/
894static int
895lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
896{
897 struct lpfc_hba *phba = vport->phba;
898 struct lpfc_scsi_buf *psb;
899 struct sli4_sge *sgl;
900 IOCB_t *iocb;
901 dma_addr_t pdma_phys_fcp_cmd;
902 dma_addr_t pdma_phys_fcp_rsp;
903 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
8a9d2e80
JS
904 uint16_t iotag, lxri = 0;
905 int bcnt, num_posted;
906 LIST_HEAD(prep_sblist);
907 LIST_HEAD(post_sblist);
908 LIST_HEAD(scsi_sblist);
da0436e9
JS
909
910 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
911 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
912 if (!psb)
913 break;
da0436e9 914 /*
8a9d2e80
JS
915 * Get memory from the pci pool to map the virt space to
916 * pci bus space for an I/O. The DMA buffer includes space
917 * for the struct fcp_cmnd, struct fcp_rsp and the number
918 * of bde's necessary to support the sg_tablesize.
da0436e9
JS
919 */
920 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
921 GFP_KERNEL, &psb->dma_handle);
922 if (!psb->data) {
923 kfree(psb);
924 break;
925 }
da0436e9
JS
926 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
927
928 /* Allocate iotag for psb->cur_iocbq. */
929 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
930 if (iotag == 0) {
b92938b4
JS
931 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
932 psb->data, psb->dma_handle);
da0436e9
JS
933 kfree(psb);
934 break;
935 }
936
6d368e53
JS
937 lxri = lpfc_sli4_next_xritag(phba);
938 if (lxri == NO_XRI) {
da0436e9
JS
939 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
940 psb->data, psb->dma_handle);
941 kfree(psb);
942 break;
943 }
6d368e53
JS
944 psb->cur_iocbq.sli4_lxritag = lxri;
945 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
da0436e9 946 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
da0436e9
JS
947 psb->fcp_bpl = psb->data;
948 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
949 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
950 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
951 sizeof(struct fcp_cmnd));
952
953 /* Initialize local short-hand pointers. */
954 sgl = (struct sli4_sge *)psb->fcp_bpl;
955 pdma_phys_bpl = psb->dma_handle;
956 pdma_phys_fcp_cmd =
957 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
958 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
959 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
960
961 /*
8a9d2e80
JS
962 * The first two bdes are the FCP_CMD and FCP_RSP.
963 * The balance are sg list bdes. Initialize the
964 * first two and leave the rest for queuecommand.
da0436e9
JS
965 */
966 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
967 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
0558056c 968 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
969 bf_set(lpfc_sli4_sge_last, sgl, 0);
970 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 971 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
da0436e9
JS
972 sgl++;
973
974 /* Setup the physical region for the FCP RSP */
975 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
976 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
0558056c 977 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
978 bf_set(lpfc_sli4_sge_last, sgl, 1);
979 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 980 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
da0436e9
JS
981
982 /*
983 * Since the IOCB for the FCP I/O is built into this
984 * lpfc_scsi_buf, initialize it with all known data now.
985 */
986 iocb = &psb->cur_iocbq.iocb;
987 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
988 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
989 /* setting the BLP size to 2 * sizeof BDE may not be correct.
990 * We are setting the bpl to point to out sgl. An sgl's
991 * entries are 16 bytes, a bpl entries are 12 bytes.
992 */
993 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
994 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
995 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
996 iocb->ulpBdeCount = 1;
997 iocb->ulpLe = 1;
998 iocb->ulpClass = CLASS3;
8a9d2e80 999 psb->cur_iocbq.context1 = psb;
da0436e9
JS
1000 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1001 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
1002 else
1003 pdma_phys_bpl1 = 0;
1004 psb->dma_phys_bpl = pdma_phys_bpl;
da0436e9 1005
8a9d2e80
JS
1006 /* add the scsi buffer to a post list */
1007 list_add_tail(&psb->list, &post_sblist);
1008 spin_lock_irq(&phba->scsi_buf_list_lock);
1009 phba->sli4_hba.scsi_xri_cnt++;
1010 spin_unlock_irq(&phba->scsi_buf_list_lock);
1011 }
1012 lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1013 "3021 Allocate %d out of %d requested new SCSI "
1014 "buffers\n", bcnt, num_to_alloc);
1015
1016 /* post the list of scsi buffer sgls to port if available */
1017 if (!list_empty(&post_sblist))
1018 num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1019 &post_sblist, bcnt);
1020 else
1021 num_posted = 0;
1022
1023 return num_posted;
da0436e9
JS
1024}
1025
9bad7671 1026/**
3772a991
JS
1027 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
1028 * @vport: The virtual port for which this call being executed.
1029 * @num_to_allocate: The requested number of buffers to allocate.
1030 *
1031 * This routine wraps the actual SCSI buffer allocator function pointer from
1032 * the lpfc_hba struct.
1033 *
1034 * Return codes:
1035 * int - number of scsi buffers that were allocated.
1036 * 0 = failure, less than num_to_alloc is a partial failure.
1037 **/
1038static inline int
1039lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
1040{
1041 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
1042}
1043
1044/**
19ca7609 1045 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
3772a991 1046 * @phba: The HBA for which this call is being executed.
9bad7671
JS
1047 *
1048 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1049 * and returns to caller.
1050 *
1051 * Return codes:
1052 * NULL - Error
1053 * Pointer to lpfc_scsi_buf - Success
1054 **/
455c53ec 1055static struct lpfc_scsi_buf*
19ca7609 1056lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 1057{
0bd4ca25
JSEC
1058 struct lpfc_scsi_buf * lpfc_cmd = NULL;
1059 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
875fbdfe 1060 unsigned long iflag = 0;
0bd4ca25 1061
875fbdfe 1062 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
0bd4ca25 1063 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1dcb58e5
JS
1064 if (lpfc_cmd) {
1065 lpfc_cmd->seg_cnt = 0;
1066 lpfc_cmd->nonsg_phys = 0;
e2a0a9d6 1067 lpfc_cmd->prot_seg_cnt = 0;
1dcb58e5 1068 }
875fbdfe 1069 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
0bd4ca25
JSEC
1070 return lpfc_cmd;
1071}
19ca7609
JS
1072/**
1073 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1074 * @phba: The HBA for which this call is being executed.
1075 *
1076 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1077 * and returns to caller.
1078 *
1079 * Return codes:
1080 * NULL - Error
1081 * Pointer to lpfc_scsi_buf - Success
1082 **/
1083static struct lpfc_scsi_buf*
1084lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1085{
1151e3ec 1086 struct lpfc_scsi_buf *lpfc_cmd ;
19ca7609
JS
1087 unsigned long iflag = 0;
1088 int found = 0;
1089
1090 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1151e3ec
JS
1091 list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
1092 list) {
19ca7609 1093 if (lpfc_test_rrq_active(phba, ndlp,
1151e3ec
JS
1094 lpfc_cmd->cur_iocbq.sli4_xritag))
1095 continue;
1096 list_del(&lpfc_cmd->list);
19ca7609
JS
1097 found = 1;
1098 lpfc_cmd->seg_cnt = 0;
1099 lpfc_cmd->nonsg_phys = 0;
1100 lpfc_cmd->prot_seg_cnt = 0;
1151e3ec 1101 break;
19ca7609 1102 }
1151e3ec
JS
1103 spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
1104 iflag);
1105 if (!found)
1106 return NULL;
1107 else
1108 return lpfc_cmd;
19ca7609
JS
1109}
1110/**
1111 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1112 * @phba: The HBA for which this call is being executed.
1113 *
1114 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1115 * and returns to caller.
1116 *
1117 * Return codes:
1118 * NULL - Error
1119 * Pointer to lpfc_scsi_buf - Success
1120 **/
1121static struct lpfc_scsi_buf*
1122lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1123{
1124 return phba->lpfc_get_scsi_buf(phba, ndlp);
1125}
dea3101e 1126
9bad7671 1127/**
3772a991 1128 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
9bad7671
JS
1129 * @phba: The Hba for which this call is being executed.
1130 * @psb: The scsi buffer which is being released.
1131 *
1132 * This routine releases @psb scsi buffer by adding it to tail of @phba
1133 * lpfc_scsi_buf_list list.
1134 **/
0bd4ca25 1135static void
3772a991 1136lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
0bd4ca25 1137{
875fbdfe 1138 unsigned long iflag = 0;
dea3101e 1139
875fbdfe 1140 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
0bd4ca25 1141 psb->pCmd = NULL;
dea3101e 1142 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
875fbdfe 1143 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
dea3101e 1144}
1145
da0436e9
JS
1146/**
1147 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1148 * @phba: The Hba for which this call is being executed.
1149 * @psb: The scsi buffer which is being released.
1150 *
1151 * This routine releases @psb scsi buffer by adding it to tail of @phba
1152 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1153 * and cannot be reused for at least RA_TOV amount of time if it was
1154 * aborted.
1155 **/
1156static void
1157lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1158{
1159 unsigned long iflag = 0;
1160
341af102 1161 if (psb->exch_busy) {
da0436e9
JS
1162 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1163 iflag);
1164 psb->pCmd = NULL;
1165 list_add_tail(&psb->list,
1166 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
1167 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1168 iflag);
1169 } else {
1170
1171 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1172 psb->pCmd = NULL;
1173 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1174 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1175 }
1176}
1177
9bad7671 1178/**
3772a991
JS
1179 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1180 * @phba: The Hba for which this call is being executed.
1181 * @psb: The scsi buffer which is being released.
1182 *
1183 * This routine releases @psb scsi buffer by adding it to tail of @phba
1184 * lpfc_scsi_buf_list list.
1185 **/
1186static void
1187lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1188{
1189
1190 phba->lpfc_release_scsi_buf(phba, psb);
1191}
1192
1193/**
1194 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
9bad7671
JS
1195 * @phba: The Hba for which this call is being executed.
1196 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1197 *
1198 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3772a991
JS
1199 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1200 * through sg elements and format the bdea. This routine also initializes all
1201 * IOCB fields which are dependent on scsi command request buffer.
9bad7671
JS
1202 *
1203 * Return codes:
1204 * 1 - Error
1205 * 0 - Success
1206 **/
dea3101e 1207static int
3772a991 1208lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dea3101e 1209{
1210 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1211 struct scatterlist *sgel = NULL;
1212 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1213 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
0f65ff68 1214 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
dea3101e 1215 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
34b02dcd 1216 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dea3101e 1217 dma_addr_t physaddr;
34b02dcd 1218 uint32_t num_bde = 0;
a0b4f78f 1219 int nseg, datadir = scsi_cmnd->sc_data_direction;
dea3101e 1220
1221 /*
1222 * There are three possibilities here - use scatter-gather segment, use
1223 * the single mapping, or neither. Start the lpfc command prep by
1224 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1225 * data bde entry.
1226 */
1227 bpl += 2;
c59fd9eb 1228 if (scsi_sg_count(scsi_cmnd)) {
dea3101e 1229 /*
1230 * The driver stores the segment count returned from pci_map_sg
1231 * because this a count of dma-mappings used to map the use_sg
1232 * pages. They are not guaranteed to be the same for those
1233 * architectures that implement an IOMMU.
1234 */
dea3101e 1235
c59fd9eb
FT
1236 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1237 scsi_sg_count(scsi_cmnd), datadir);
1238 if (unlikely(!nseg))
1239 return 1;
1240
a0b4f78f 1241 lpfc_cmd->seg_cnt = nseg;
dea3101e 1242 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
1243 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1244 "9064 BLKGRD: %s: Too many sg segments from "
e2a0a9d6 1245 "dma_map_sg. Config %d, seg_cnt %d\n",
cadbd4a5 1246 __func__, phba->cfg_sg_seg_cnt,
dea3101e 1247 lpfc_cmd->seg_cnt);
a0b4f78f 1248 scsi_dma_unmap(scsi_cmnd);
dea3101e 1249 return 1;
1250 }
1251
1252 /*
1253 * The driver established a maximum scatter-gather segment count
1254 * during probe that limits the number of sg elements in any
1255 * single scsi command. Just run through the seg_cnt and format
1256 * the bde's.
34b02dcd
JS
1257 * When using SLI-3 the driver will try to fit all the BDEs into
1258 * the IOCB. If it can't then the BDEs get added to a BPL as it
1259 * does for SLI-2 mode.
dea3101e 1260 */
34b02dcd 1261 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
dea3101e 1262 physaddr = sg_dma_address(sgel);
34b02dcd 1263 if (phba->sli_rev == 3 &&
e2a0a9d6 1264 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
0f65ff68 1265 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
34b02dcd
JS
1266 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1267 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1268 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1269 data_bde->addrLow = putPaddrLow(physaddr);
1270 data_bde->addrHigh = putPaddrHigh(physaddr);
1271 data_bde++;
1272 } else {
1273 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1274 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1275 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1276 bpl->addrLow =
1277 le32_to_cpu(putPaddrLow(physaddr));
1278 bpl->addrHigh =
1279 le32_to_cpu(putPaddrHigh(physaddr));
1280 bpl++;
1281 }
dea3101e 1282 }
c59fd9eb 1283 }
dea3101e 1284
1285 /*
1286 * Finish initializing those IOCB fields that are dependent on the
34b02dcd
JS
1287 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1288 * explicitly reinitialized and for SLI-3 the extended bde count is
1289 * explicitly reinitialized since all iocb memory resources are reused.
dea3101e 1290 */
e2a0a9d6 1291 if (phba->sli_rev == 3 &&
0f65ff68
JS
1292 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1293 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
34b02dcd
JS
1294 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1295 /*
1296 * The extended IOCB format can only fit 3 BDE or a BPL.
1297 * This I/O has more than 3 BDE so the 1st data bde will
1298 * be a BPL that is filled in here.
1299 */
1300 physaddr = lpfc_cmd->dma_handle;
1301 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1302 data_bde->tus.f.bdeSize = (num_bde *
1303 sizeof(struct ulp_bde64));
1304 physaddr += (sizeof(struct fcp_cmnd) +
1305 sizeof(struct fcp_rsp) +
1306 (2 * sizeof(struct ulp_bde64)));
1307 data_bde->addrHigh = putPaddrHigh(physaddr);
1308 data_bde->addrLow = putPaddrLow(physaddr);
25985edc 1309 /* ebde count includes the response bde and data bpl */
34b02dcd
JS
1310 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1311 } else {
25985edc 1312 /* ebde count includes the response bde and data bdes */
34b02dcd
JS
1313 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1314 }
1315 } else {
1316 iocb_cmd->un.fcpi64.bdl.bdeSize =
1317 ((num_bde + 2) * sizeof(struct ulp_bde64));
0f65ff68 1318 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
34b02dcd 1319 }
09372820 1320 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
e2a0a9d6
JS
1321
1322 /*
1323 * Due to difference in data length between DIF/non-DIF paths,
1324 * we need to set word 4 of IOCB here
1325 */
a257bf90 1326 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
e2a0a9d6
JS
1327 return 0;
1328}
1329
f9bb2da1
JS
1330static inline unsigned
1331lpfc_cmd_blksize(struct scsi_cmnd *sc)
1332{
1333 return sc->device->sector_size;
1334}
1335
1336#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 1337
9a6b09c0
JS
1338/* Return if if error injection is detected by Initiator */
1339#define BG_ERR_INIT 0x1
1340/* Return if if error injection is detected by Target */
1341#define BG_ERR_TGT 0x2
1342/* Return if if swapping CSUM<-->CRC is required for error injection */
1343#define BG_ERR_SWAP 0x10
1344/* Return if disabling Guard/Ref/App checking is required for error injection */
1345#define BG_ERR_CHECK 0x20
acd6859b
JS
1346
1347/**
1348 * lpfc_bg_err_inject - Determine if we should inject an error
1349 * @phba: The Hba for which this call is being executed.
f9bb2da1
JS
1350 * @sc: The SCSI command to examine
1351 * @reftag: (out) BlockGuard reference tag for transmitted data
1352 * @apptag: (out) BlockGuard application tag for transmitted data
1353 * @new_guard (in) Value to replace CRC with if needed
1354 *
9a6b09c0 1355 * Returns BG_ERR_* bit mask or 0 if request ignored
acd6859b 1356 **/
f9bb2da1
JS
1357static int
1358lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1359 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1360{
1361 struct scatterlist *sgpe; /* s/g prot entry */
1362 struct scatterlist *sgde; /* s/g data entry */
9a6b09c0 1363 struct lpfc_scsi_buf *lpfc_cmd = NULL;
acd6859b 1364 struct scsi_dif_tuple *src = NULL;
4ac9b226
JS
1365 struct lpfc_nodelist *ndlp;
1366 struct lpfc_rport_data *rdata;
f9bb2da1
JS
1367 uint32_t op = scsi_get_prot_op(sc);
1368 uint32_t blksize;
1369 uint32_t numblks;
1370 sector_t lba;
1371 int rc = 0;
acd6859b 1372 int blockoff = 0;
f9bb2da1
JS
1373
1374 if (op == SCSI_PROT_NORMAL)
1375 return 0;
1376
acd6859b
JS
1377 sgpe = scsi_prot_sglist(sc);
1378 sgde = scsi_sglist(sc);
f9bb2da1 1379 lba = scsi_get_lba(sc);
4ac9b226
JS
1380
1381 /* First check if we need to match the LBA */
f9bb2da1
JS
1382 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1383 blksize = lpfc_cmd_blksize(sc);
1384 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1385
1386 /* Make sure we have the right LBA if one is specified */
1387 if ((phba->lpfc_injerr_lba < lba) ||
1388 (phba->lpfc_injerr_lba >= (lba + numblks)))
1389 return 0;
acd6859b
JS
1390 if (sgpe) {
1391 blockoff = phba->lpfc_injerr_lba - lba;
1392 numblks = sg_dma_len(sgpe) /
1393 sizeof(struct scsi_dif_tuple);
1394 if (numblks < blockoff)
1395 blockoff = numblks;
acd6859b 1396 }
f9bb2da1
JS
1397 }
1398
4ac9b226
JS
1399 /* Next check if we need to match the remote NPortID or WWPN */
1400 rdata = sc->device->hostdata;
1401 if (rdata && rdata->pnode) {
1402 ndlp = rdata->pnode;
1403
1404 /* Make sure we have the right NPortID if one is specified */
1405 if (phba->lpfc_injerr_nportid &&
1406 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1407 return 0;
1408
1409 /*
1410 * Make sure we have the right WWPN if one is specified.
1411 * wwn[0] should be a non-zero NAA in a good WWPN.
1412 */
1413 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1414 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1415 sizeof(struct lpfc_name)) != 0))
1416 return 0;
1417 }
1418
1419 /* Setup a ptr to the protection data if the SCSI host provides it */
1420 if (sgpe) {
1421 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1422 src += blockoff;
1423 lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1424 }
1425
f9bb2da1
JS
1426 /* Should we change the Reference Tag */
1427 if (reftag) {
acd6859b
JS
1428 if (phba->lpfc_injerr_wref_cnt) {
1429 switch (op) {
1430 case SCSI_PROT_WRITE_PASS:
9a6b09c0
JS
1431 if (src) {
1432 /*
1433 * For WRITE_PASS, force the error
1434 * to be sent on the wire. It should
1435 * be detected by the Target.
1436 * If blockoff != 0 error will be
1437 * inserted in middle of the IO.
1438 */
acd6859b
JS
1439
1440 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1441 "9076 BLKGRD: Injecting reftag error: "
1442 "write lba x%lx + x%x oldrefTag x%x\n",
1443 (unsigned long)lba, blockoff,
9a6b09c0 1444 be32_to_cpu(src->ref_tag));
f9bb2da1 1445
acd6859b 1446 /*
9a6b09c0
JS
1447 * Save the old ref_tag so we can
1448 * restore it on completion.
acd6859b 1449 */
9a6b09c0
JS
1450 if (lpfc_cmd) {
1451 lpfc_cmd->prot_data_type =
1452 LPFC_INJERR_REFTAG;
1453 lpfc_cmd->prot_data_segment =
1454 src;
1455 lpfc_cmd->prot_data =
1456 src->ref_tag;
1457 }
1458 src->ref_tag = cpu_to_be32(0xDEADBEEF);
acd6859b 1459 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1460 if (phba->lpfc_injerr_wref_cnt == 0) {
1461 phba->lpfc_injerr_nportid = 0;
1462 phba->lpfc_injerr_lba =
1463 LPFC_INJERR_LBA_OFF;
1464 memset(&phba->lpfc_injerr_wwpn,
1465 0, sizeof(struct lpfc_name));
1466 }
9a6b09c0
JS
1467 rc = BG_ERR_TGT | BG_ERR_CHECK;
1468
acd6859b
JS
1469 break;
1470 }
1471 /* Drop thru */
9a6b09c0 1472 case SCSI_PROT_WRITE_INSERT:
acd6859b 1473 /*
9a6b09c0
JS
1474 * For WRITE_INSERT, force the error
1475 * to be sent on the wire. It should be
1476 * detected by the Target.
acd6859b 1477 */
9a6b09c0 1478 /* DEADBEEF will be the reftag on the wire */
acd6859b
JS
1479 *reftag = 0xDEADBEEF;
1480 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1481 if (phba->lpfc_injerr_wref_cnt == 0) {
1482 phba->lpfc_injerr_nportid = 0;
1483 phba->lpfc_injerr_lba =
1484 LPFC_INJERR_LBA_OFF;
1485 memset(&phba->lpfc_injerr_wwpn,
1486 0, sizeof(struct lpfc_name));
1487 }
9a6b09c0 1488 rc = BG_ERR_TGT | BG_ERR_CHECK;
acd6859b
JS
1489
1490 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1491 "9078 BLKGRD: Injecting reftag error: "
acd6859b
JS
1492 "write lba x%lx\n", (unsigned long)lba);
1493 break;
9a6b09c0 1494 case SCSI_PROT_WRITE_STRIP:
acd6859b 1495 /*
9a6b09c0
JS
1496 * For WRITE_STRIP and WRITE_PASS,
1497 * force the error on data
1498 * being copied from SLI-Host to SLI-Port.
acd6859b 1499 */
f9bb2da1
JS
1500 *reftag = 0xDEADBEEF;
1501 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1502 if (phba->lpfc_injerr_wref_cnt == 0) {
1503 phba->lpfc_injerr_nportid = 0;
1504 phba->lpfc_injerr_lba =
1505 LPFC_INJERR_LBA_OFF;
1506 memset(&phba->lpfc_injerr_wwpn,
1507 0, sizeof(struct lpfc_name));
1508 }
9a6b09c0 1509 rc = BG_ERR_INIT;
f9bb2da1
JS
1510
1511 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1512 "9077 BLKGRD: Injecting reftag error: "
f9bb2da1 1513 "write lba x%lx\n", (unsigned long)lba);
acd6859b 1514 break;
f9bb2da1 1515 }
acd6859b
JS
1516 }
1517 if (phba->lpfc_injerr_rref_cnt) {
1518 switch (op) {
1519 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1520 case SCSI_PROT_READ_STRIP:
1521 case SCSI_PROT_READ_PASS:
1522 /*
1523 * For READ_STRIP and READ_PASS, force the
1524 * error on data being read off the wire. It
1525 * should force an IO error to the driver.
1526 */
f9bb2da1
JS
1527 *reftag = 0xDEADBEEF;
1528 phba->lpfc_injerr_rref_cnt--;
4ac9b226
JS
1529 if (phba->lpfc_injerr_rref_cnt == 0) {
1530 phba->lpfc_injerr_nportid = 0;
1531 phba->lpfc_injerr_lba =
1532 LPFC_INJERR_LBA_OFF;
1533 memset(&phba->lpfc_injerr_wwpn,
1534 0, sizeof(struct lpfc_name));
1535 }
acd6859b 1536 rc = BG_ERR_INIT;
f9bb2da1
JS
1537
1538 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 1539 "9079 BLKGRD: Injecting reftag error: "
f9bb2da1 1540 "read lba x%lx\n", (unsigned long)lba);
acd6859b 1541 break;
f9bb2da1
JS
1542 }
1543 }
1544 }
1545
1546 /* Should we change the Application Tag */
1547 if (apptag) {
acd6859b
JS
1548 if (phba->lpfc_injerr_wapp_cnt) {
1549 switch (op) {
1550 case SCSI_PROT_WRITE_PASS:
4ac9b226 1551 if (src) {
9a6b09c0
JS
1552 /*
1553 * For WRITE_PASS, force the error
1554 * to be sent on the wire. It should
1555 * be detected by the Target.
1556 * If blockoff != 0 error will be
1557 * inserted in middle of the IO.
1558 */
1559
acd6859b
JS
1560 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1561 "9080 BLKGRD: Injecting apptag error: "
1562 "write lba x%lx + x%x oldappTag x%x\n",
1563 (unsigned long)lba, blockoff,
9a6b09c0 1564 be16_to_cpu(src->app_tag));
acd6859b
JS
1565
1566 /*
9a6b09c0
JS
1567 * Save the old app_tag so we can
1568 * restore it on completion.
acd6859b 1569 */
9a6b09c0
JS
1570 if (lpfc_cmd) {
1571 lpfc_cmd->prot_data_type =
1572 LPFC_INJERR_APPTAG;
1573 lpfc_cmd->prot_data_segment =
1574 src;
1575 lpfc_cmd->prot_data =
1576 src->app_tag;
1577 }
1578 src->app_tag = cpu_to_be16(0xDEAD);
acd6859b 1579 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1580 if (phba->lpfc_injerr_wapp_cnt == 0) {
1581 phba->lpfc_injerr_nportid = 0;
1582 phba->lpfc_injerr_lba =
1583 LPFC_INJERR_LBA_OFF;
1584 memset(&phba->lpfc_injerr_wwpn,
1585 0, sizeof(struct lpfc_name));
1586 }
9a6b09c0 1587 rc = BG_ERR_TGT | BG_ERR_CHECK;
acd6859b
JS
1588 break;
1589 }
1590 /* Drop thru */
9a6b09c0 1591 case SCSI_PROT_WRITE_INSERT:
acd6859b 1592 /*
9a6b09c0
JS
1593 * For WRITE_INSERT, force the
1594 * error to be sent on the wire. It should be
1595 * detected by the Target.
acd6859b 1596 */
9a6b09c0 1597 /* DEAD will be the apptag on the wire */
acd6859b
JS
1598 *apptag = 0xDEAD;
1599 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1600 if (phba->lpfc_injerr_wapp_cnt == 0) {
1601 phba->lpfc_injerr_nportid = 0;
1602 phba->lpfc_injerr_lba =
1603 LPFC_INJERR_LBA_OFF;
1604 memset(&phba->lpfc_injerr_wwpn,
1605 0, sizeof(struct lpfc_name));
1606 }
9a6b09c0 1607 rc = BG_ERR_TGT | BG_ERR_CHECK;
f9bb2da1 1608
acd6859b 1609 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1610 "0813 BLKGRD: Injecting apptag error: "
acd6859b
JS
1611 "write lba x%lx\n", (unsigned long)lba);
1612 break;
9a6b09c0 1613 case SCSI_PROT_WRITE_STRIP:
acd6859b 1614 /*
9a6b09c0
JS
1615 * For WRITE_STRIP and WRITE_PASS,
1616 * force the error on data
1617 * being copied from SLI-Host to SLI-Port.
acd6859b 1618 */
f9bb2da1
JS
1619 *apptag = 0xDEAD;
1620 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1621 if (phba->lpfc_injerr_wapp_cnt == 0) {
1622 phba->lpfc_injerr_nportid = 0;
1623 phba->lpfc_injerr_lba =
1624 LPFC_INJERR_LBA_OFF;
1625 memset(&phba->lpfc_injerr_wwpn,
1626 0, sizeof(struct lpfc_name));
1627 }
9a6b09c0 1628 rc = BG_ERR_INIT;
f9bb2da1
JS
1629
1630 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1631 "0812 BLKGRD: Injecting apptag error: "
f9bb2da1 1632 "write lba x%lx\n", (unsigned long)lba);
acd6859b 1633 break;
f9bb2da1 1634 }
acd6859b
JS
1635 }
1636 if (phba->lpfc_injerr_rapp_cnt) {
1637 switch (op) {
1638 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1639 case SCSI_PROT_READ_STRIP:
1640 case SCSI_PROT_READ_PASS:
1641 /*
1642 * For READ_STRIP and READ_PASS, force the
1643 * error on data being read off the wire. It
1644 * should force an IO error to the driver.
1645 */
f9bb2da1
JS
1646 *apptag = 0xDEAD;
1647 phba->lpfc_injerr_rapp_cnt--;
4ac9b226
JS
1648 if (phba->lpfc_injerr_rapp_cnt == 0) {
1649 phba->lpfc_injerr_nportid = 0;
1650 phba->lpfc_injerr_lba =
1651 LPFC_INJERR_LBA_OFF;
1652 memset(&phba->lpfc_injerr_wwpn,
1653 0, sizeof(struct lpfc_name));
1654 }
acd6859b 1655 rc = BG_ERR_INIT;
f9bb2da1
JS
1656
1657 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 1658 "0814 BLKGRD: Injecting apptag error: "
f9bb2da1 1659 "read lba x%lx\n", (unsigned long)lba);
acd6859b 1660 break;
f9bb2da1
JS
1661 }
1662 }
1663 }
1664
acd6859b 1665
f9bb2da1 1666 /* Should we change the Guard Tag */
acd6859b
JS
1667 if (new_guard) {
1668 if (phba->lpfc_injerr_wgrd_cnt) {
1669 switch (op) {
1670 case SCSI_PROT_WRITE_PASS:
9a6b09c0 1671 rc = BG_ERR_CHECK;
acd6859b 1672 /* Drop thru */
9a6b09c0
JS
1673
1674 case SCSI_PROT_WRITE_INSERT:
acd6859b 1675 /*
9a6b09c0
JS
1676 * For WRITE_INSERT, force the
1677 * error to be sent on the wire. It should be
1678 * detected by the Target.
acd6859b
JS
1679 */
1680 phba->lpfc_injerr_wgrd_cnt--;
4ac9b226
JS
1681 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1682 phba->lpfc_injerr_nportid = 0;
1683 phba->lpfc_injerr_lba =
1684 LPFC_INJERR_LBA_OFF;
1685 memset(&phba->lpfc_injerr_wwpn,
1686 0, sizeof(struct lpfc_name));
1687 }
f9bb2da1 1688
9a6b09c0 1689 rc |= BG_ERR_TGT | BG_ERR_SWAP;
acd6859b 1690 /* Signals the caller to swap CRC->CSUM */
f9bb2da1 1691
acd6859b 1692 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1693 "0817 BLKGRD: Injecting guard error: "
acd6859b
JS
1694 "write lba x%lx\n", (unsigned long)lba);
1695 break;
9a6b09c0 1696 case SCSI_PROT_WRITE_STRIP:
acd6859b 1697 /*
9a6b09c0
JS
1698 * For WRITE_STRIP and WRITE_PASS,
1699 * force the error on data
1700 * being copied from SLI-Host to SLI-Port.
acd6859b
JS
1701 */
1702 phba->lpfc_injerr_wgrd_cnt--;
4ac9b226
JS
1703 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1704 phba->lpfc_injerr_nportid = 0;
1705 phba->lpfc_injerr_lba =
1706 LPFC_INJERR_LBA_OFF;
1707 memset(&phba->lpfc_injerr_wwpn,
1708 0, sizeof(struct lpfc_name));
1709 }
f9bb2da1 1710
9a6b09c0 1711 rc = BG_ERR_INIT | BG_ERR_SWAP;
acd6859b
JS
1712 /* Signals the caller to swap CRC->CSUM */
1713
1714 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1715 "0816 BLKGRD: Injecting guard error: "
acd6859b
JS
1716 "write lba x%lx\n", (unsigned long)lba);
1717 break;
1718 }
1719 }
1720 if (phba->lpfc_injerr_rgrd_cnt) {
1721 switch (op) {
1722 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1723 case SCSI_PROT_READ_STRIP:
1724 case SCSI_PROT_READ_PASS:
1725 /*
1726 * For READ_STRIP and READ_PASS, force the
1727 * error on data being read off the wire. It
1728 * should force an IO error to the driver.
1729 */
acd6859b 1730 phba->lpfc_injerr_rgrd_cnt--;
4ac9b226
JS
1731 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1732 phba->lpfc_injerr_nportid = 0;
1733 phba->lpfc_injerr_lba =
1734 LPFC_INJERR_LBA_OFF;
1735 memset(&phba->lpfc_injerr_wwpn,
1736 0, sizeof(struct lpfc_name));
1737 }
acd6859b 1738
9a6b09c0 1739 rc = BG_ERR_INIT | BG_ERR_SWAP;
acd6859b
JS
1740 /* Signals the caller to swap CRC->CSUM */
1741
1742 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1743 "0818 BLKGRD: Injecting guard error: "
1744 "read lba x%lx\n", (unsigned long)lba);
1745 }
f9bb2da1
JS
1746 }
1747 }
acd6859b 1748
f9bb2da1
JS
1749 return rc;
1750}
1751#endif
1752
acd6859b
JS
1753/**
1754 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1755 * the specified SCSI command.
1756 * @phba: The Hba for which this call is being executed.
6c8eea54
JS
1757 * @sc: The SCSI command to examine
1758 * @txopt: (out) BlockGuard operation for transmitted data
1759 * @rxopt: (out) BlockGuard operation for received data
1760 *
1761 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1762 *
acd6859b 1763 **/
e2a0a9d6 1764static int
6c8eea54
JS
1765lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1766 uint8_t *txop, uint8_t *rxop)
e2a0a9d6
JS
1767{
1768 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
6c8eea54 1769 uint8_t ret = 0;
e2a0a9d6
JS
1770
1771 if (guard_type == SHOST_DIX_GUARD_IP) {
1772 switch (scsi_get_prot_op(sc)) {
1773 case SCSI_PROT_READ_INSERT:
1774 case SCSI_PROT_WRITE_STRIP:
6c8eea54 1775 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
4ac9b226 1776 *txop = BG_OP_IN_CSUM_OUT_NODIF;
e2a0a9d6
JS
1777 break;
1778
1779 case SCSI_PROT_READ_STRIP:
1780 case SCSI_PROT_WRITE_INSERT:
6c8eea54 1781 *rxop = BG_OP_IN_CRC_OUT_NODIF;
4ac9b226 1782 *txop = BG_OP_IN_NODIF_OUT_CRC;
e2a0a9d6
JS
1783 break;
1784
c6af4042
MP
1785 case SCSI_PROT_READ_PASS:
1786 case SCSI_PROT_WRITE_PASS:
6c8eea54 1787 *rxop = BG_OP_IN_CRC_OUT_CSUM;
4ac9b226 1788 *txop = BG_OP_IN_CSUM_OUT_CRC;
e2a0a9d6
JS
1789 break;
1790
e2a0a9d6
JS
1791 case SCSI_PROT_NORMAL:
1792 default:
6a9c52cf 1793 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7c56b9fd
JS
1794 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1795 scsi_get_prot_op(sc));
6c8eea54 1796 ret = 1;
e2a0a9d6
JS
1797 break;
1798
1799 }
7c56b9fd 1800 } else {
e2a0a9d6
JS
1801 switch (scsi_get_prot_op(sc)) {
1802 case SCSI_PROT_READ_STRIP:
1803 case SCSI_PROT_WRITE_INSERT:
6c8eea54 1804 *rxop = BG_OP_IN_CRC_OUT_NODIF;
4ac9b226 1805 *txop = BG_OP_IN_NODIF_OUT_CRC;
e2a0a9d6
JS
1806 break;
1807
1808 case SCSI_PROT_READ_PASS:
1809 case SCSI_PROT_WRITE_PASS:
6c8eea54 1810 *rxop = BG_OP_IN_CRC_OUT_CRC;
4ac9b226 1811 *txop = BG_OP_IN_CRC_OUT_CRC;
e2a0a9d6
JS
1812 break;
1813
e2a0a9d6
JS
1814 case SCSI_PROT_READ_INSERT:
1815 case SCSI_PROT_WRITE_STRIP:
7c56b9fd 1816 *rxop = BG_OP_IN_NODIF_OUT_CRC;
4ac9b226 1817 *txop = BG_OP_IN_CRC_OUT_NODIF;
7c56b9fd
JS
1818 break;
1819
e2a0a9d6
JS
1820 case SCSI_PROT_NORMAL:
1821 default:
6a9c52cf 1822 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7c56b9fd
JS
1823 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1824 scsi_get_prot_op(sc));
6c8eea54 1825 ret = 1;
e2a0a9d6
JS
1826 break;
1827 }
e2a0a9d6
JS
1828 }
1829
6c8eea54 1830 return ret;
e2a0a9d6
JS
1831}
1832
acd6859b
JS
1833#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1834/**
1835 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1836 * the specified SCSI command in order to force a guard tag error.
1837 * @phba: The Hba for which this call is being executed.
1838 * @sc: The SCSI command to examine
1839 * @txopt: (out) BlockGuard operation for transmitted data
1840 * @rxopt: (out) BlockGuard operation for received data
1841 *
1842 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1843 *
1844 **/
1845static int
1846lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1847 uint8_t *txop, uint8_t *rxop)
1848{
1849 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1850 uint8_t ret = 0;
1851
1852 if (guard_type == SHOST_DIX_GUARD_IP) {
1853 switch (scsi_get_prot_op(sc)) {
1854 case SCSI_PROT_READ_INSERT:
1855 case SCSI_PROT_WRITE_STRIP:
acd6859b 1856 *rxop = BG_OP_IN_NODIF_OUT_CRC;
4ac9b226 1857 *txop = BG_OP_IN_CRC_OUT_NODIF;
acd6859b
JS
1858 break;
1859
1860 case SCSI_PROT_READ_STRIP:
1861 case SCSI_PROT_WRITE_INSERT:
acd6859b 1862 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
4ac9b226 1863 *txop = BG_OP_IN_NODIF_OUT_CSUM;
acd6859b
JS
1864 break;
1865
1866 case SCSI_PROT_READ_PASS:
1867 case SCSI_PROT_WRITE_PASS:
4ac9b226 1868 *rxop = BG_OP_IN_CSUM_OUT_CRC;
9a6b09c0 1869 *txop = BG_OP_IN_CRC_OUT_CSUM;
acd6859b
JS
1870 break;
1871
1872 case SCSI_PROT_NORMAL:
1873 default:
1874 break;
1875
1876 }
1877 } else {
1878 switch (scsi_get_prot_op(sc)) {
1879 case SCSI_PROT_READ_STRIP:
1880 case SCSI_PROT_WRITE_INSERT:
acd6859b 1881 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
4ac9b226 1882 *txop = BG_OP_IN_NODIF_OUT_CSUM;
acd6859b
JS
1883 break;
1884
1885 case SCSI_PROT_READ_PASS:
1886 case SCSI_PROT_WRITE_PASS:
4ac9b226 1887 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
9a6b09c0 1888 *txop = BG_OP_IN_CSUM_OUT_CSUM;
acd6859b
JS
1889 break;
1890
1891 case SCSI_PROT_READ_INSERT:
1892 case SCSI_PROT_WRITE_STRIP:
acd6859b 1893 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
4ac9b226 1894 *txop = BG_OP_IN_CSUM_OUT_NODIF;
acd6859b
JS
1895 break;
1896
1897 case SCSI_PROT_NORMAL:
1898 default:
1899 break;
1900 }
1901 }
1902
1903 return ret;
1904}
1905#endif
1906
1907/**
1908 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1909 * @phba: The Hba for which this call is being executed.
1910 * @sc: pointer to scsi command we're working on
1911 * @bpl: pointer to buffer list for protection groups
1912 * @datacnt: number of segments of data that have been dma mapped
1913 *
1914 * This function sets up BPL buffer list for protection groups of
e2a0a9d6
JS
1915 * type LPFC_PG_TYPE_NO_DIF
1916 *
1917 * This is usually used when the HBA is instructed to generate
1918 * DIFs and insert them into data stream (or strip DIF from
1919 * incoming data stream)
1920 *
1921 * The buffer list consists of just one protection group described
1922 * below:
1923 * +-------------------------+
6c8eea54
JS
1924 * start of prot group --> | PDE_5 |
1925 * +-------------------------+
1926 * | PDE_6 |
e2a0a9d6
JS
1927 * +-------------------------+
1928 * | Data BDE |
1929 * +-------------------------+
1930 * |more Data BDE's ... (opt)|
1931 * +-------------------------+
1932 *
e2a0a9d6
JS
1933 *
1934 * Note: Data s/g buffers have been dma mapped
acd6859b
JS
1935 *
1936 * Returns the number of BDEs added to the BPL.
1937 **/
e2a0a9d6
JS
1938static int
1939lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1940 struct ulp_bde64 *bpl, int datasegcnt)
1941{
1942 struct scatterlist *sgde = NULL; /* s/g data entry */
6c8eea54
JS
1943 struct lpfc_pde5 *pde5 = NULL;
1944 struct lpfc_pde6 *pde6 = NULL;
e2a0a9d6 1945 dma_addr_t physaddr;
6c8eea54 1946 int i = 0, num_bde = 0, status;
e2a0a9d6 1947 int datadir = sc->sc_data_direction;
0829a19a 1948#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 1949 uint32_t rc;
0829a19a 1950#endif
acd6859b 1951 uint32_t checking = 1;
e2a0a9d6 1952 uint32_t reftag;
7c56b9fd 1953 unsigned blksize;
6c8eea54 1954 uint8_t txop, rxop;
e2a0a9d6 1955
6c8eea54
JS
1956 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1957 if (status)
e2a0a9d6
JS
1958 goto out;
1959
6c8eea54 1960 /* extract some info from the scsi command for pde*/
e2a0a9d6 1961 blksize = lpfc_cmd_blksize(sc);
acd6859b 1962 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
e2a0a9d6 1963
f9bb2da1 1964#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 1965 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 1966 if (rc) {
9a6b09c0 1967 if (rc & BG_ERR_SWAP)
acd6859b 1968 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 1969 if (rc & BG_ERR_CHECK)
acd6859b
JS
1970 checking = 0;
1971 }
f9bb2da1
JS
1972#endif
1973
6c8eea54
JS
1974 /* setup PDE5 with what we have */
1975 pde5 = (struct lpfc_pde5 *) bpl;
1976 memset(pde5, 0, sizeof(struct lpfc_pde5));
1977 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
6c8eea54 1978
bc73905a 1979 /* Endianness conversion if necessary for PDE5 */
589a52d6 1980 pde5->word0 = cpu_to_le32(pde5->word0);
7c56b9fd 1981 pde5->reftag = cpu_to_le32(reftag);
589a52d6 1982
6c8eea54
JS
1983 /* advance bpl and increment bde count */
1984 num_bde++;
1985 bpl++;
1986 pde6 = (struct lpfc_pde6 *) bpl;
1987
1988 /* setup PDE6 with the rest of the info */
1989 memset(pde6, 0, sizeof(struct lpfc_pde6));
1990 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1991 bf_set(pde6_optx, pde6, txop);
1992 bf_set(pde6_oprx, pde6, rxop);
1993 if (datadir == DMA_FROM_DEVICE) {
acd6859b
JS
1994 bf_set(pde6_ce, pde6, checking);
1995 bf_set(pde6_re, pde6, checking);
6c8eea54
JS
1996 }
1997 bf_set(pde6_ai, pde6, 1);
7c56b9fd
JS
1998 bf_set(pde6_ae, pde6, 0);
1999 bf_set(pde6_apptagval, pde6, 0);
e2a0a9d6 2000
bc73905a 2001 /* Endianness conversion if necessary for PDE6 */
589a52d6
JS
2002 pde6->word0 = cpu_to_le32(pde6->word0);
2003 pde6->word1 = cpu_to_le32(pde6->word1);
2004 pde6->word2 = cpu_to_le32(pde6->word2);
2005
6c8eea54 2006 /* advance bpl and increment bde count */
e2a0a9d6
JS
2007 num_bde++;
2008 bpl++;
2009
2010 /* assumption: caller has already run dma_map_sg on command data */
2011 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2012 physaddr = sg_dma_address(sgde);
2013 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
2014 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2015 bpl->tus.f.bdeSize = sg_dma_len(sgde);
2016 if (datadir == DMA_TO_DEVICE)
2017 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2018 else
2019 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2020 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2021 bpl++;
2022 num_bde++;
2023 }
2024
2025out:
2026 return num_bde;
2027}
2028
acd6859b
JS
2029/**
2030 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
2031 * @phba: The Hba for which this call is being executed.
2032 * @sc: pointer to scsi command we're working on
2033 * @bpl: pointer to buffer list for protection groups
2034 * @datacnt: number of segments of data that have been dma mapped
2035 * @protcnt: number of segment of protection data that have been dma mapped
2036 *
2037 * This function sets up BPL buffer list for protection groups of
2038 * type LPFC_PG_TYPE_DIF
e2a0a9d6
JS
2039 *
2040 * This is usually used when DIFs are in their own buffers,
2041 * separate from the data. The HBA can then by instructed
2042 * to place the DIFs in the outgoing stream. For read operations,
2043 * The HBA could extract the DIFs and place it in DIF buffers.
2044 *
2045 * The buffer list for this type consists of one or more of the
2046 * protection groups described below:
2047 * +-------------------------+
6c8eea54 2048 * start of first prot group --> | PDE_5 |
e2a0a9d6 2049 * +-------------------------+
6c8eea54
JS
2050 * | PDE_6 |
2051 * +-------------------------+
2052 * | PDE_7 (Prot BDE) |
e2a0a9d6
JS
2053 * +-------------------------+
2054 * | Data BDE |
2055 * +-------------------------+
2056 * |more Data BDE's ... (opt)|
2057 * +-------------------------+
6c8eea54 2058 * start of new prot group --> | PDE_5 |
e2a0a9d6
JS
2059 * +-------------------------+
2060 * | ... |
2061 * +-------------------------+
2062 *
e2a0a9d6
JS
2063 * Note: It is assumed that both data and protection s/g buffers have been
2064 * mapped for DMA
acd6859b
JS
2065 *
2066 * Returns the number of BDEs added to the BPL.
2067 **/
e2a0a9d6
JS
2068static int
2069lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2070 struct ulp_bde64 *bpl, int datacnt, int protcnt)
2071{
2072 struct scatterlist *sgde = NULL; /* s/g data entry */
2073 struct scatterlist *sgpe = NULL; /* s/g prot entry */
6c8eea54
JS
2074 struct lpfc_pde5 *pde5 = NULL;
2075 struct lpfc_pde6 *pde6 = NULL;
7f86059a 2076 struct lpfc_pde7 *pde7 = NULL;
e2a0a9d6
JS
2077 dma_addr_t dataphysaddr, protphysaddr;
2078 unsigned short curr_data = 0, curr_prot = 0;
7f86059a
JS
2079 unsigned int split_offset;
2080 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
e2a0a9d6
JS
2081 unsigned int protgrp_blks, protgrp_bytes;
2082 unsigned int remainder, subtotal;
6c8eea54 2083 int status;
e2a0a9d6
JS
2084 int datadir = sc->sc_data_direction;
2085 unsigned char pgdone = 0, alldone = 0;
2086 unsigned blksize;
0829a19a 2087#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2088 uint32_t rc;
0829a19a 2089#endif
acd6859b 2090 uint32_t checking = 1;
e2a0a9d6 2091 uint32_t reftag;
6c8eea54 2092 uint8_t txop, rxop;
e2a0a9d6
JS
2093 int num_bde = 0;
2094
2095 sgpe = scsi_prot_sglist(sc);
2096 sgde = scsi_sglist(sc);
2097
2098 if (!sgpe || !sgde) {
2099 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
acd6859b
JS
2100 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2101 sgpe, sgde);
2102 return 0;
2103 }
2104
2105 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2106 if (status)
2107 goto out;
2108
2109 /* extract some info from the scsi command */
2110 blksize = lpfc_cmd_blksize(sc);
2111 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2112
2113#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2114 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2115 if (rc) {
9a6b09c0 2116 if (rc & BG_ERR_SWAP)
acd6859b 2117 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2118 if (rc & BG_ERR_CHECK)
acd6859b
JS
2119 checking = 0;
2120 }
2121#endif
2122
2123 split_offset = 0;
2124 do {
2125 /* setup PDE5 with what we have */
2126 pde5 = (struct lpfc_pde5 *) bpl;
2127 memset(pde5, 0, sizeof(struct lpfc_pde5));
2128 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2129
2130 /* Endianness conversion if necessary for PDE5 */
2131 pde5->word0 = cpu_to_le32(pde5->word0);
2132 pde5->reftag = cpu_to_le32(reftag);
2133
2134 /* advance bpl and increment bde count */
2135 num_bde++;
2136 bpl++;
2137 pde6 = (struct lpfc_pde6 *) bpl;
2138
2139 /* setup PDE6 with the rest of the info */
2140 memset(pde6, 0, sizeof(struct lpfc_pde6));
2141 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2142 bf_set(pde6_optx, pde6, txop);
2143 bf_set(pde6_oprx, pde6, rxop);
2144 bf_set(pde6_ce, pde6, checking);
2145 bf_set(pde6_re, pde6, checking);
2146 bf_set(pde6_ai, pde6, 1);
2147 bf_set(pde6_ae, pde6, 0);
2148 bf_set(pde6_apptagval, pde6, 0);
2149
2150 /* Endianness conversion if necessary for PDE6 */
2151 pde6->word0 = cpu_to_le32(pde6->word0);
2152 pde6->word1 = cpu_to_le32(pde6->word1);
2153 pde6->word2 = cpu_to_le32(pde6->word2);
2154
2155 /* advance bpl and increment bde count */
2156 num_bde++;
2157 bpl++;
2158
2159 /* setup the first BDE that points to protection buffer */
2160 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2161 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2162
2163 /* must be integer multiple of the DIF block length */
2164 BUG_ON(protgroup_len % 8);
2165
2166 pde7 = (struct lpfc_pde7 *) bpl;
2167 memset(pde7, 0, sizeof(struct lpfc_pde7));
2168 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2169
2170 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2171 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2172
2173 protgrp_blks = protgroup_len / 8;
2174 protgrp_bytes = protgrp_blks * blksize;
2175
2176 /* check if this pde is crossing the 4K boundary; if so split */
2177 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2178 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2179 protgroup_offset += protgroup_remainder;
2180 protgrp_blks = protgroup_remainder / 8;
2181 protgrp_bytes = protgrp_blks * blksize;
2182 } else {
2183 protgroup_offset = 0;
2184 curr_prot++;
2185 }
2186
2187 num_bde++;
2188
2189 /* setup BDE's for data blocks associated with DIF data */
2190 pgdone = 0;
2191 subtotal = 0; /* total bytes processed for current prot grp */
2192 while (!pgdone) {
2193 if (!sgde) {
2194 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2195 "9065 BLKGRD:%s Invalid data segment\n",
2196 __func__);
2197 return 0;
2198 }
2199 bpl++;
2200 dataphysaddr = sg_dma_address(sgde) + split_offset;
2201 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2202 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2203
2204 remainder = sg_dma_len(sgde) - split_offset;
2205
2206 if ((subtotal + remainder) <= protgrp_bytes) {
2207 /* we can use this whole buffer */
2208 bpl->tus.f.bdeSize = remainder;
2209 split_offset = 0;
2210
2211 if ((subtotal + remainder) == protgrp_bytes)
2212 pgdone = 1;
2213 } else {
2214 /* must split this buffer with next prot grp */
2215 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2216 split_offset += bpl->tus.f.bdeSize;
2217 }
2218
2219 subtotal += bpl->tus.f.bdeSize;
2220
2221 if (datadir == DMA_TO_DEVICE)
2222 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2223 else
2224 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2225 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2226
2227 num_bde++;
2228 curr_data++;
2229
2230 if (split_offset)
2231 break;
2232
2233 /* Move to the next s/g segment if possible */
2234 sgde = sg_next(sgde);
2235
2236 }
2237
2238 if (protgroup_offset) {
2239 /* update the reference tag */
2240 reftag += protgrp_blks;
2241 bpl++;
2242 continue;
2243 }
2244
2245 /* are we done ? */
2246 if (curr_prot == protcnt) {
2247 alldone = 1;
2248 } else if (curr_prot < protcnt) {
2249 /* advance to next prot buffer */
2250 sgpe = sg_next(sgpe);
2251 bpl++;
2252
2253 /* update the reference tag */
2254 reftag += protgrp_blks;
2255 } else {
2256 /* if we're here, we have a bug */
2257 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2258 "9054 BLKGRD: bug in %s\n", __func__);
2259 }
2260
2261 } while (!alldone);
2262out:
2263
2264 return num_bde;
2265}
2266
2267/**
2268 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2269 * @phba: The Hba for which this call is being executed.
2270 * @sc: pointer to scsi command we're working on
2271 * @sgl: pointer to buffer list for protection groups
2272 * @datacnt: number of segments of data that have been dma mapped
2273 *
2274 * This function sets up SGL buffer list for protection groups of
2275 * type LPFC_PG_TYPE_NO_DIF
2276 *
2277 * This is usually used when the HBA is instructed to generate
2278 * DIFs and insert them into data stream (or strip DIF from
2279 * incoming data stream)
2280 *
2281 * The buffer list consists of just one protection group described
2282 * below:
2283 * +-------------------------+
2284 * start of prot group --> | DI_SEED |
2285 * +-------------------------+
2286 * | Data SGE |
2287 * +-------------------------+
2288 * |more Data SGE's ... (opt)|
2289 * +-------------------------+
2290 *
2291 *
2292 * Note: Data s/g buffers have been dma mapped
2293 *
2294 * Returns the number of SGEs added to the SGL.
2295 **/
2296static int
2297lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2298 struct sli4_sge *sgl, int datasegcnt)
2299{
2300 struct scatterlist *sgde = NULL; /* s/g data entry */
2301 struct sli4_sge_diseed *diseed = NULL;
2302 dma_addr_t physaddr;
2303 int i = 0, num_sge = 0, status;
2304 int datadir = sc->sc_data_direction;
2305 uint32_t reftag;
2306 unsigned blksize;
2307 uint8_t txop, rxop;
0829a19a 2308#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2309 uint32_t rc;
0829a19a 2310#endif
acd6859b
JS
2311 uint32_t checking = 1;
2312 uint32_t dma_len;
2313 uint32_t dma_offset = 0;
2314
2315 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2316 if (status)
2317 goto out;
2318
2319 /* extract some info from the scsi command for pde*/
2320 blksize = lpfc_cmd_blksize(sc);
2321 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2322
2323#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2324 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2325 if (rc) {
9a6b09c0 2326 if (rc & BG_ERR_SWAP)
acd6859b 2327 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2328 if (rc & BG_ERR_CHECK)
acd6859b
JS
2329 checking = 0;
2330 }
2331#endif
2332
2333 /* setup DISEED with what we have */
2334 diseed = (struct sli4_sge_diseed *) sgl;
2335 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2336 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2337
2338 /* Endianness conversion if necessary */
2339 diseed->ref_tag = cpu_to_le32(reftag);
2340 diseed->ref_tag_tran = diseed->ref_tag;
2341
2342 /* setup DISEED with the rest of the info */
2343 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2344 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2345 if (datadir == DMA_FROM_DEVICE) {
2346 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2347 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2348 }
2349 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2350 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2351
2352 /* Endianness conversion if necessary for DISEED */
2353 diseed->word2 = cpu_to_le32(diseed->word2);
2354 diseed->word3 = cpu_to_le32(diseed->word3);
2355
2356 /* advance bpl and increment sge count */
2357 num_sge++;
2358 sgl++;
2359
2360 /* assumption: caller has already run dma_map_sg on command data */
2361 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2362 physaddr = sg_dma_address(sgde);
2363 dma_len = sg_dma_len(sgde);
2364 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2365 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2366 if ((i + 1) == datasegcnt)
2367 bf_set(lpfc_sli4_sge_last, sgl, 1);
2368 else
2369 bf_set(lpfc_sli4_sge_last, sgl, 0);
2370 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2371 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2372
2373 sgl->sge_len = cpu_to_le32(dma_len);
2374 dma_offset += dma_len;
2375
2376 sgl++;
2377 num_sge++;
2378 }
2379
2380out:
2381 return num_sge;
2382}
2383
2384/**
2385 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2386 * @phba: The Hba for which this call is being executed.
2387 * @sc: pointer to scsi command we're working on
2388 * @sgl: pointer to buffer list for protection groups
2389 * @datacnt: number of segments of data that have been dma mapped
2390 * @protcnt: number of segment of protection data that have been dma mapped
2391 *
2392 * This function sets up SGL buffer list for protection groups of
2393 * type LPFC_PG_TYPE_DIF
2394 *
2395 * This is usually used when DIFs are in their own buffers,
2396 * separate from the data. The HBA can then by instructed
2397 * to place the DIFs in the outgoing stream. For read operations,
2398 * The HBA could extract the DIFs and place it in DIF buffers.
2399 *
2400 * The buffer list for this type consists of one or more of the
2401 * protection groups described below:
2402 * +-------------------------+
2403 * start of first prot group --> | DISEED |
2404 * +-------------------------+
2405 * | DIF (Prot SGE) |
2406 * +-------------------------+
2407 * | Data SGE |
2408 * +-------------------------+
2409 * |more Data SGE's ... (opt)|
2410 * +-------------------------+
2411 * start of new prot group --> | DISEED |
2412 * +-------------------------+
2413 * | ... |
2414 * +-------------------------+
2415 *
2416 * Note: It is assumed that both data and protection s/g buffers have been
2417 * mapped for DMA
2418 *
2419 * Returns the number of SGEs added to the SGL.
2420 **/
2421static int
2422lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2423 struct sli4_sge *sgl, int datacnt, int protcnt)
2424{
2425 struct scatterlist *sgde = NULL; /* s/g data entry */
2426 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2427 struct sli4_sge_diseed *diseed = NULL;
2428 dma_addr_t dataphysaddr, protphysaddr;
2429 unsigned short curr_data = 0, curr_prot = 0;
2430 unsigned int split_offset;
2431 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2432 unsigned int protgrp_blks, protgrp_bytes;
2433 unsigned int remainder, subtotal;
2434 int status;
2435 unsigned char pgdone = 0, alldone = 0;
2436 unsigned blksize;
2437 uint32_t reftag;
2438 uint8_t txop, rxop;
2439 uint32_t dma_len;
0829a19a 2440#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2441 uint32_t rc;
0829a19a 2442#endif
acd6859b
JS
2443 uint32_t checking = 1;
2444 uint32_t dma_offset = 0;
2445 int num_sge = 0;
2446
2447 sgpe = scsi_prot_sglist(sc);
2448 sgde = scsi_sglist(sc);
2449
2450 if (!sgpe || !sgde) {
2451 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2452 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
e2a0a9d6
JS
2453 sgpe, sgde);
2454 return 0;
2455 }
2456
6c8eea54
JS
2457 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2458 if (status)
e2a0a9d6
JS
2459 goto out;
2460
6c8eea54 2461 /* extract some info from the scsi command */
e2a0a9d6 2462 blksize = lpfc_cmd_blksize(sc);
acd6859b 2463 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
e2a0a9d6 2464
f9bb2da1 2465#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2466 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2467 if (rc) {
9a6b09c0 2468 if (rc & BG_ERR_SWAP)
acd6859b 2469 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2470 if (rc & BG_ERR_CHECK)
acd6859b
JS
2471 checking = 0;
2472 }
f9bb2da1
JS
2473#endif
2474
e2a0a9d6
JS
2475 split_offset = 0;
2476 do {
acd6859b
JS
2477 /* setup DISEED with what we have */
2478 diseed = (struct sli4_sge_diseed *) sgl;
2479 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2480 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2481
2482 /* Endianness conversion if necessary */
2483 diseed->ref_tag = cpu_to_le32(reftag);
2484 diseed->ref_tag_tran = diseed->ref_tag;
2485
2486 /* setup DISEED with the rest of the info */
2487 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2488 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2489 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2490 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2491 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2492 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2493
2494 /* Endianness conversion if necessary for DISEED */
2495 diseed->word2 = cpu_to_le32(diseed->word2);
2496 diseed->word3 = cpu_to_le32(diseed->word3);
2497
2498 /* advance sgl and increment bde count */
2499 num_sge++;
2500 sgl++;
e2a0a9d6
JS
2501
2502 /* setup the first BDE that points to protection buffer */
7f86059a
JS
2503 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2504 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
e2a0a9d6 2505
e2a0a9d6
JS
2506 /* must be integer multiple of the DIF block length */
2507 BUG_ON(protgroup_len % 8);
2508
acd6859b
JS
2509 /* Now setup DIF SGE */
2510 sgl->word2 = 0;
2511 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2512 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2513 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2514 sgl->word2 = cpu_to_le32(sgl->word2);
7f86059a 2515
e2a0a9d6
JS
2516 protgrp_blks = protgroup_len / 8;
2517 protgrp_bytes = protgrp_blks * blksize;
2518
acd6859b
JS
2519 /* check if DIF SGE is crossing the 4K boundary; if so split */
2520 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2521 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
7f86059a
JS
2522 protgroup_offset += protgroup_remainder;
2523 protgrp_blks = protgroup_remainder / 8;
7c56b9fd 2524 protgrp_bytes = protgrp_blks * blksize;
7f86059a
JS
2525 } else {
2526 protgroup_offset = 0;
2527 curr_prot++;
2528 }
e2a0a9d6 2529
acd6859b 2530 num_sge++;
e2a0a9d6 2531
acd6859b 2532 /* setup SGE's for data blocks associated with DIF data */
e2a0a9d6
JS
2533 pgdone = 0;
2534 subtotal = 0; /* total bytes processed for current prot grp */
2535 while (!pgdone) {
2536 if (!sgde) {
6a9c52cf 2537 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 2538 "9086 BLKGRD:%s Invalid data segment\n",
e2a0a9d6
JS
2539 __func__);
2540 return 0;
2541 }
acd6859b 2542 sgl++;
e2a0a9d6 2543 dataphysaddr = sg_dma_address(sgde) + split_offset;
e2a0a9d6
JS
2544
2545 remainder = sg_dma_len(sgde) - split_offset;
2546
2547 if ((subtotal + remainder) <= protgrp_bytes) {
2548 /* we can use this whole buffer */
acd6859b 2549 dma_len = remainder;
e2a0a9d6
JS
2550 split_offset = 0;
2551
2552 if ((subtotal + remainder) == protgrp_bytes)
2553 pgdone = 1;
2554 } else {
2555 /* must split this buffer with next prot grp */
acd6859b
JS
2556 dma_len = protgrp_bytes - subtotal;
2557 split_offset += dma_len;
e2a0a9d6
JS
2558 }
2559
acd6859b 2560 subtotal += dma_len;
e2a0a9d6 2561
acd6859b
JS
2562 sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2563 sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2564 bf_set(lpfc_sli4_sge_last, sgl, 0);
2565 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2566 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
e2a0a9d6 2567
acd6859b
JS
2568 sgl->sge_len = cpu_to_le32(dma_len);
2569 dma_offset += dma_len;
2570
2571 num_sge++;
e2a0a9d6
JS
2572 curr_data++;
2573
2574 if (split_offset)
2575 break;
2576
2577 /* Move to the next s/g segment if possible */
2578 sgde = sg_next(sgde);
2579 }
2580
7f86059a
JS
2581 if (protgroup_offset) {
2582 /* update the reference tag */
2583 reftag += protgrp_blks;
acd6859b 2584 sgl++;
7f86059a
JS
2585 continue;
2586 }
2587
e2a0a9d6
JS
2588 /* are we done ? */
2589 if (curr_prot == protcnt) {
acd6859b 2590 bf_set(lpfc_sli4_sge_last, sgl, 1);
e2a0a9d6
JS
2591 alldone = 1;
2592 } else if (curr_prot < protcnt) {
2593 /* advance to next prot buffer */
2594 sgpe = sg_next(sgpe);
acd6859b 2595 sgl++;
e2a0a9d6
JS
2596
2597 /* update the reference tag */
2598 reftag += protgrp_blks;
2599 } else {
2600 /* if we're here, we have a bug */
6a9c52cf 2601 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 2602 "9085 BLKGRD: bug in %s\n", __func__);
e2a0a9d6
JS
2603 }
2604
2605 } while (!alldone);
acd6859b 2606
e2a0a9d6
JS
2607out:
2608
acd6859b 2609 return num_sge;
e2a0a9d6 2610}
7f86059a 2611
acd6859b
JS
2612/**
2613 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2614 * @phba: The Hba for which this call is being executed.
2615 * @sc: pointer to scsi command we're working on
2616 *
e2a0a9d6
JS
2617 * Given a SCSI command that supports DIF, determine composition of protection
2618 * groups involved in setting up buffer lists
2619 *
acd6859b
JS
2620 * Returns: Protection group type (with or without DIF)
2621 *
2622 **/
e2a0a9d6
JS
2623static int
2624lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2625{
2626 int ret = LPFC_PG_TYPE_INVALID;
2627 unsigned char op = scsi_get_prot_op(sc);
2628
2629 switch (op) {
2630 case SCSI_PROT_READ_STRIP:
2631 case SCSI_PROT_WRITE_INSERT:
2632 ret = LPFC_PG_TYPE_NO_DIF;
2633 break;
2634 case SCSI_PROT_READ_INSERT:
2635 case SCSI_PROT_WRITE_STRIP:
2636 case SCSI_PROT_READ_PASS:
2637 case SCSI_PROT_WRITE_PASS:
e2a0a9d6
JS
2638 ret = LPFC_PG_TYPE_DIF_BUF;
2639 break;
2640 default:
2641 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2642 "9021 Unsupported protection op:%d\n", op);
2643 break;
2644 }
2645
2646 return ret;
2647}
2648
acd6859b
JS
2649/**
2650 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2651 * @phba: The Hba for which this call is being executed.
2652 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2653 *
e2a0a9d6
JS
2654 * This is the protection/DIF aware version of
2655 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2656 * two functions eventually, but for now, it's here
acd6859b 2657 **/
e2a0a9d6 2658static int
acd6859b 2659lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
e2a0a9d6
JS
2660 struct lpfc_scsi_buf *lpfc_cmd)
2661{
2662 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2663 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2664 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2665 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2666 uint32_t num_bde = 0;
2667 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2668 int prot_group_type = 0;
2669 int diflen, fcpdl;
2670 unsigned blksize;
2671
2672 /*
2673 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2674 * fcp_rsp regions to the first data bde entry
2675 */
2676 bpl += 2;
2677 if (scsi_sg_count(scsi_cmnd)) {
2678 /*
2679 * The driver stores the segment count returned from pci_map_sg
2680 * because this a count of dma-mappings used to map the use_sg
2681 * pages. They are not guaranteed to be the same for those
2682 * architectures that implement an IOMMU.
2683 */
2684 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2685 scsi_sglist(scsi_cmnd),
2686 scsi_sg_count(scsi_cmnd), datadir);
2687 if (unlikely(!datasegcnt))
2688 return 1;
2689
2690 lpfc_cmd->seg_cnt = datasegcnt;
2691 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
2692 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2693 "9067 BLKGRD: %s: Too many sg segments"
2694 " from dma_map_sg. Config %d, seg_cnt"
2695 " %d\n",
e2a0a9d6
JS
2696 __func__, phba->cfg_sg_seg_cnt,
2697 lpfc_cmd->seg_cnt);
2698 scsi_dma_unmap(scsi_cmnd);
2699 return 1;
2700 }
2701
2702 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2703
2704 switch (prot_group_type) {
2705 case LPFC_PG_TYPE_NO_DIF:
2706 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2707 datasegcnt);
c9404c9c 2708 /* we should have 2 or more entries in buffer list */
e2a0a9d6
JS
2709 if (num_bde < 2)
2710 goto err;
2711 break;
2712 case LPFC_PG_TYPE_DIF_BUF:{
2713 /*
2714 * This type indicates that protection buffers are
2715 * passed to the driver, so that needs to be prepared
2716 * for DMA
2717 */
2718 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2719 scsi_prot_sglist(scsi_cmnd),
2720 scsi_prot_sg_count(scsi_cmnd), datadir);
2721 if (unlikely(!protsegcnt)) {
2722 scsi_dma_unmap(scsi_cmnd);
2723 return 1;
2724 }
2725
2726 lpfc_cmd->prot_seg_cnt = protsegcnt;
2727 if (lpfc_cmd->prot_seg_cnt
2728 > phba->cfg_prot_sg_seg_cnt) {
6a9c52cf
JS
2729 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2730 "9068 BLKGRD: %s: Too many prot sg "
2731 "segments from dma_map_sg. Config %d,"
e2a0a9d6
JS
2732 "prot_seg_cnt %d\n", __func__,
2733 phba->cfg_prot_sg_seg_cnt,
2734 lpfc_cmd->prot_seg_cnt);
2735 dma_unmap_sg(&phba->pcidev->dev,
2736 scsi_prot_sglist(scsi_cmnd),
2737 scsi_prot_sg_count(scsi_cmnd),
2738 datadir);
2739 scsi_dma_unmap(scsi_cmnd);
2740 return 1;
2741 }
2742
2743 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2744 datasegcnt, protsegcnt);
c9404c9c 2745 /* we should have 3 or more entries in buffer list */
e2a0a9d6
JS
2746 if (num_bde < 3)
2747 goto err;
2748 break;
2749 }
2750 case LPFC_PG_TYPE_INVALID:
2751 default:
2752 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2753 "9022 Unexpected protection group %i\n",
2754 prot_group_type);
2755 return 1;
2756 }
2757 }
2758
2759 /*
2760 * Finish initializing those IOCB fields that are dependent on the
2761 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2762 * reinitialized since all iocb memory resources are used many times
2763 * for transmit, receive, and continuation bpl's.
2764 */
2765 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2766 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2767 iocb_cmd->ulpBdeCount = 1;
2768 iocb_cmd->ulpLe = 1;
2769
2770 fcpdl = scsi_bufflen(scsi_cmnd);
2771
2772 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
2773 /*
2774 * We are in DIF Type 1 mode
2775 * Every data block has a 8 byte DIF (trailer)
2776 * attached to it. Must ajust FCP data length
2777 */
2778 blksize = lpfc_cmd_blksize(scsi_cmnd);
2779 diflen = (fcpdl / blksize) * 8;
2780 fcpdl += diflen;
2781 }
2782 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2783
2784 /*
2785 * Due to difference in data length between DIF/non-DIF paths,
2786 * we need to set word 4 of IOCB here
2787 */
2788 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2789
dea3101e 2790 return 0;
e2a0a9d6
JS
2791err:
2792 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2793 "9023 Could not setup all needed BDE's"
2794 "prot_group_type=%d, num_bde=%d\n",
2795 prot_group_type, num_bde);
2796 return 1;
2797}
2798
2799/*
2800 * This function checks for BlockGuard errors detected by
2801 * the HBA. In case of errors, the ASC/ASCQ fields in the
2802 * sense buffer will be set accordingly, paired with
2803 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2804 * detected corruption.
2805 *
2806 * Returns:
2807 * 0 - No error found
2808 * 1 - BlockGuard error found
2809 * -1 - Internal error (bad profile, ...etc)
2810 */
2811static int
2812lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2813 struct lpfc_iocbq *pIocbOut)
2814{
2815 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2816 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2817 int ret = 0;
2818 uint32_t bghm = bgf->bghm;
2819 uint32_t bgstat = bgf->bgstat;
2820 uint64_t failing_sector = 0;
2821
6a9c52cf
JS
2822 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
2823 " 0x%x lba 0x%llx blk cnt 0x%x "
e2a0a9d6 2824 "bgstat=0x%x bghm=0x%x\n",
87b5c328 2825 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
83096ebf 2826 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
2827
2828 spin_lock(&_dump_buf_lock);
2829 if (!_dump_buf_done) {
6a9c52cf
JS
2830 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
2831 " Data for %u blocks to debugfs\n",
e2a0a9d6 2832 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
6a9c52cf 2833 lpfc_debug_save_data(phba, cmd);
e2a0a9d6
JS
2834
2835 /* If we have a prot sgl, save the DIF buffer */
2836 if (lpfc_prot_group_type(phba, cmd) ==
2837 LPFC_PG_TYPE_DIF_BUF) {
6a9c52cf
JS
2838 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
2839 "Saving DIF for %u blocks to debugfs\n",
2840 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2841 lpfc_debug_save_dif(phba, cmd);
e2a0a9d6
JS
2842 }
2843
2844 _dump_buf_done = 1;
2845 }
2846 spin_unlock(&_dump_buf_lock);
2847
2848 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2849 cmd->result = ScsiResult(DID_ERROR, 0);
6a9c52cf
JS
2850 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
2851 " BlockGuard profile. bgstat:0x%x\n",
2852 bgstat);
e2a0a9d6
JS
2853 ret = (-1);
2854 goto out;
2855 }
2856
2857 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2858 cmd->result = ScsiResult(DID_ERROR, 0);
6a9c52cf
JS
2859 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
2860 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
e2a0a9d6
JS
2861 bgstat);
2862 ret = (-1);
2863 goto out;
2864 }
2865
2866 if (lpfc_bgs_get_guard_err(bgstat)) {
2867 ret = 1;
2868
2869 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2870 0x10, 0x1);
1c9fbafc 2871 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
2872 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2873 phba->bg_guard_err_cnt++;
6a9c52cf
JS
2874 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2875 "9055 BLKGRD: guard_tag error\n");
e2a0a9d6
JS
2876 }
2877
2878 if (lpfc_bgs_get_reftag_err(bgstat)) {
2879 ret = 1;
2880
2881 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2882 0x10, 0x3);
1c9fbafc 2883 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
2884 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2885
2886 phba->bg_reftag_err_cnt++;
6a9c52cf
JS
2887 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2888 "9056 BLKGRD: ref_tag error\n");
e2a0a9d6
JS
2889 }
2890
2891 if (lpfc_bgs_get_apptag_err(bgstat)) {
2892 ret = 1;
2893
2894 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2895 0x10, 0x2);
1c9fbafc 2896 cmd->result = DRIVER_SENSE << 24
e2a0a9d6
JS
2897 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
2898
2899 phba->bg_apptag_err_cnt++;
6a9c52cf
JS
2900 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2901 "9061 BLKGRD: app_tag error\n");
e2a0a9d6
JS
2902 }
2903
2904 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2905 /*
2906 * setup sense data descriptor 0 per SPC-4 as an information
7c56b9fd
JS
2907 * field, and put the failing LBA in it.
2908 * This code assumes there was also a guard/app/ref tag error
2909 * indication.
e2a0a9d6 2910 */
7c56b9fd
JS
2911 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
2912 cmd->sense_buffer[8] = 0; /* Information descriptor type */
2913 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
2914 cmd->sense_buffer[10] = 0x80; /* Validity bit */
acd6859b
JS
2915
2916 /* bghm is a "on the wire" FC frame based count */
2917 switch (scsi_get_prot_op(cmd)) {
2918 case SCSI_PROT_READ_INSERT:
2919 case SCSI_PROT_WRITE_STRIP:
2920 bghm /= cmd->device->sector_size;
2921 break;
2922 case SCSI_PROT_READ_STRIP:
2923 case SCSI_PROT_WRITE_INSERT:
2924 case SCSI_PROT_READ_PASS:
2925 case SCSI_PROT_WRITE_PASS:
2926 bghm /= (cmd->device->sector_size +
2927 sizeof(struct scsi_dif_tuple));
2928 break;
2929 }
e2a0a9d6
JS
2930
2931 failing_sector = scsi_get_lba(cmd);
2932 failing_sector += bghm;
2933
7c56b9fd
JS
2934 /* Descriptor Information */
2935 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
e2a0a9d6
JS
2936 }
2937
2938 if (!ret) {
2939 /* No error was reported - problem in FW? */
2940 cmd->result = ScsiResult(DID_ERROR, 0);
6a9c52cf 2941 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4ac9b226 2942 "9057 BLKGRD: Unknown error reported!\n");
e2a0a9d6
JS
2943 }
2944
2945out:
2946 return ret;
dea3101e 2947}
2948
da0436e9
JS
2949/**
2950 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
2951 * @phba: The Hba for which this call is being executed.
2952 * @lpfc_cmd: The scsi buffer which is going to be mapped.
2953 *
2954 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
2955 * field of @lpfc_cmd for device with SLI-4 interface spec.
2956 *
2957 * Return codes:
6c8eea54
JS
2958 * 1 - Error
2959 * 0 - Success
da0436e9
JS
2960 **/
2961static int
2962lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2963{
2964 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2965 struct scatterlist *sgel = NULL;
2966 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2967 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
fedd3b7b 2968 struct sli4_sge *first_data_sgl;
da0436e9
JS
2969 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2970 dma_addr_t physaddr;
2971 uint32_t num_bde = 0;
2972 uint32_t dma_len;
2973 uint32_t dma_offset = 0;
2974 int nseg;
fedd3b7b 2975 struct ulp_bde64 *bde;
da0436e9
JS
2976
2977 /*
2978 * There are three possibilities here - use scatter-gather segment, use
2979 * the single mapping, or neither. Start the lpfc command prep by
2980 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2981 * data bde entry.
2982 */
2983 if (scsi_sg_count(scsi_cmnd)) {
2984 /*
2985 * The driver stores the segment count returned from pci_map_sg
2986 * because this a count of dma-mappings used to map the use_sg
2987 * pages. They are not guaranteed to be the same for those
2988 * architectures that implement an IOMMU.
2989 */
2990
2991 nseg = scsi_dma_map(scsi_cmnd);
2992 if (unlikely(!nseg))
2993 return 1;
2994 sgl += 1;
2995 /* clear the last flag in the fcp_rsp map entry */
2996 sgl->word2 = le32_to_cpu(sgl->word2);
2997 bf_set(lpfc_sli4_sge_last, sgl, 0);
2998 sgl->word2 = cpu_to_le32(sgl->word2);
2999 sgl += 1;
fedd3b7b 3000 first_data_sgl = sgl;
da0436e9
JS
3001 lpfc_cmd->seg_cnt = nseg;
3002 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
3003 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3004 " %s: Too many sg segments from "
3005 "dma_map_sg. Config %d, seg_cnt %d\n",
3006 __func__, phba->cfg_sg_seg_cnt,
da0436e9
JS
3007 lpfc_cmd->seg_cnt);
3008 scsi_dma_unmap(scsi_cmnd);
3009 return 1;
3010 }
3011
3012 /*
3013 * The driver established a maximum scatter-gather segment count
3014 * during probe that limits the number of sg elements in any
3015 * single scsi command. Just run through the seg_cnt and format
3016 * the sge's.
3017 * When using SLI-3 the driver will try to fit all the BDEs into
3018 * the IOCB. If it can't then the BDEs get added to a BPL as it
3019 * does for SLI-2 mode.
3020 */
3021 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3022 physaddr = sg_dma_address(sgel);
3023 dma_len = sg_dma_len(sgel);
da0436e9
JS
3024 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3025 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
0558056c 3026 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
3027 if ((num_bde + 1) == nseg)
3028 bf_set(lpfc_sli4_sge_last, sgl, 1);
3029 else
3030 bf_set(lpfc_sli4_sge_last, sgl, 0);
3031 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
f9bb2da1 3032 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
da0436e9 3033 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 3034 sgl->sge_len = cpu_to_le32(dma_len);
da0436e9
JS
3035 dma_offset += dma_len;
3036 sgl++;
3037 }
fedd3b7b
JS
3038 /* setup the performance hint (first data BDE) if enabled */
3039 if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
3040 bde = (struct ulp_bde64 *)
3041 &(iocb_cmd->unsli3.sli3Words[5]);
3042 bde->addrLow = first_data_sgl->addr_lo;
3043 bde->addrHigh = first_data_sgl->addr_hi;
3044 bde->tus.f.bdeSize =
3045 le32_to_cpu(first_data_sgl->sge_len);
3046 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3047 bde->tus.w = cpu_to_le32(bde->tus.w);
3048 }
da0436e9
JS
3049 } else {
3050 sgl += 1;
3051 /* clear the last flag in the fcp_rsp map entry */
3052 sgl->word2 = le32_to_cpu(sgl->word2);
3053 bf_set(lpfc_sli4_sge_last, sgl, 1);
3054 sgl->word2 = cpu_to_le32(sgl->word2);
3055 }
3056
3057 /*
3058 * Finish initializing those IOCB fields that are dependent on the
3059 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3060 * explicitly reinitialized.
3061 * all iocb memory resources are reused.
3062 */
3063 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3064
3065 /*
3066 * Due to difference in data length between DIF/non-DIF paths,
3067 * we need to set word 4 of IOCB here
3068 */
3069 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3070 return 0;
3071}
3072
acd6859b
JS
3073/**
3074 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
3075 * @phba: The Hba for which this call is being executed.
3076 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
3077 *
3078 * Adjust the data length to account for how much data
3079 * is actually on the wire.
3080 *
3081 * returns the adjusted data length
3082 **/
3083static int
3084lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
3085 struct lpfc_scsi_buf *lpfc_cmd)
3086{
3087 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
3088 int diflen, fcpdl;
3089 unsigned blksize;
3090
3091 fcpdl = scsi_bufflen(sc);
3092
3093 /* Check if there is protection data on the wire */
3094 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
3095 /* Read */
3096 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
3097 return fcpdl;
3098
3099 } else {
3100 /* Write */
3101 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
3102 return fcpdl;
3103 }
3104
3105 /* If protection data on the wire, adjust the count accordingly */
3106 blksize = lpfc_cmd_blksize(sc);
3107 diflen = (fcpdl / blksize) * 8;
3108 fcpdl += diflen;
3109 return fcpdl;
3110}
3111
3112/**
3113 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3114 * @phba: The Hba for which this call is being executed.
3115 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3116 *
3117 * This is the protection/DIF aware version of
3118 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3119 * two functions eventually, but for now, it's here
3120 **/
3121static int
3122lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3123 struct lpfc_scsi_buf *lpfc_cmd)
3124{
3125 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3126 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3127 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3128 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3129 uint32_t num_bde = 0;
3130 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3131 int prot_group_type = 0;
3132 int fcpdl;
3133
3134 /*
3135 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3136 * fcp_rsp regions to the first data bde entry
3137 */
3138 if (scsi_sg_count(scsi_cmnd)) {
3139 /*
3140 * The driver stores the segment count returned from pci_map_sg
3141 * because this a count of dma-mappings used to map the use_sg
3142 * pages. They are not guaranteed to be the same for those
3143 * architectures that implement an IOMMU.
3144 */
3145 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3146 scsi_sglist(scsi_cmnd),
3147 scsi_sg_count(scsi_cmnd), datadir);
3148 if (unlikely(!datasegcnt))
3149 return 1;
3150
3151 sgl += 1;
3152 /* clear the last flag in the fcp_rsp map entry */
3153 sgl->word2 = le32_to_cpu(sgl->word2);
3154 bf_set(lpfc_sli4_sge_last, sgl, 0);
3155 sgl->word2 = cpu_to_le32(sgl->word2);
3156
3157 sgl += 1;
3158 lpfc_cmd->seg_cnt = datasegcnt;
3159 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3160 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3161 "9087 BLKGRD: %s: Too many sg segments"
3162 " from dma_map_sg. Config %d, seg_cnt"
3163 " %d\n",
3164 __func__, phba->cfg_sg_seg_cnt,
3165 lpfc_cmd->seg_cnt);
3166 scsi_dma_unmap(scsi_cmnd);
3167 return 1;
3168 }
3169
3170 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3171
3172 switch (prot_group_type) {
3173 case LPFC_PG_TYPE_NO_DIF:
3174 num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3175 datasegcnt);
3176 /* we should have 2 or more entries in buffer list */
3177 if (num_bde < 2)
3178 goto err;
3179 break;
3180 case LPFC_PG_TYPE_DIF_BUF:{
3181 /*
3182 * This type indicates that protection buffers are
3183 * passed to the driver, so that needs to be prepared
3184 * for DMA
3185 */
3186 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3187 scsi_prot_sglist(scsi_cmnd),
3188 scsi_prot_sg_count(scsi_cmnd), datadir);
3189 if (unlikely(!protsegcnt)) {
3190 scsi_dma_unmap(scsi_cmnd);
3191 return 1;
3192 }
3193
3194 lpfc_cmd->prot_seg_cnt = protsegcnt;
3195 if (lpfc_cmd->prot_seg_cnt
3196 > phba->cfg_prot_sg_seg_cnt) {
3197 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3198 "9088 BLKGRD: %s: Too many prot sg "
3199 "segments from dma_map_sg. Config %d,"
3200 "prot_seg_cnt %d\n", __func__,
3201 phba->cfg_prot_sg_seg_cnt,
3202 lpfc_cmd->prot_seg_cnt);
3203 dma_unmap_sg(&phba->pcidev->dev,
3204 scsi_prot_sglist(scsi_cmnd),
3205 scsi_prot_sg_count(scsi_cmnd),
3206 datadir);
3207 scsi_dma_unmap(scsi_cmnd);
3208 return 1;
3209 }
3210
3211 num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3212 datasegcnt, protsegcnt);
3213 /* we should have 3 or more entries in buffer list */
3214 if (num_bde < 3)
3215 goto err;
3216 break;
3217 }
3218 case LPFC_PG_TYPE_INVALID:
3219 default:
3220 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3221 "9083 Unexpected protection group %i\n",
3222 prot_group_type);
3223 return 1;
3224 }
3225 }
3226
3227 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3228
3229 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3230
3231 /*
3232 * Due to difference in data length between DIF/non-DIF paths,
3233 * we need to set word 4 of IOCB here
3234 */
3235 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3236 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF;
3237
3238 return 0;
3239err:
3240 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3241 "9084 Could not setup all needed BDE's"
3242 "prot_group_type=%d, num_bde=%d\n",
3243 prot_group_type, num_bde);
3244 return 1;
3245}
3246
3772a991
JS
3247/**
3248 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3249 * @phba: The Hba for which this call is being executed.
3250 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3251 *
3252 * This routine wraps the actual DMA mapping function pointer from the
3253 * lpfc_hba struct.
3254 *
3255 * Return codes:
6c8eea54
JS
3256 * 1 - Error
3257 * 0 - Success
3772a991
JS
3258 **/
3259static inline int
3260lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3261{
3262 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3263}
3264
acd6859b
JS
3265/**
3266 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3267 * using BlockGuard.
3268 * @phba: The Hba for which this call is being executed.
3269 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3270 *
3271 * This routine wraps the actual DMA mapping function pointer from the
3272 * lpfc_hba struct.
3273 *
3274 * Return codes:
3275 * 1 - Error
3276 * 0 - Success
3277 **/
3278static inline int
3279lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3280{
3281 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3282}
3283
ea2151b4 3284/**
3621a710 3285 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
ea2151b4
JS
3286 * @phba: Pointer to hba context object.
3287 * @vport: Pointer to vport object.
3288 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3289 * @rsp_iocb: Pointer to response iocb object which reported error.
3290 *
3291 * This function posts an event when there is a SCSI command reporting
3292 * error from the scsi device.
3293 **/
3294static void
3295lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3296 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3297 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3298 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3299 uint32_t resp_info = fcprsp->rspStatus2;
3300 uint32_t scsi_status = fcprsp->rspStatus3;
3301 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3302 struct lpfc_fast_path_event *fast_path_evt = NULL;
3303 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3304 unsigned long flags;
3305
5989b8d4
JS
3306 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3307 return;
3308
ea2151b4
JS
3309 /* If there is queuefull or busy condition send a scsi event */
3310 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3311 (cmnd->result == SAM_STAT_BUSY)) {
3312 fast_path_evt = lpfc_alloc_fast_evt(phba);
3313 if (!fast_path_evt)
3314 return;
3315 fast_path_evt->un.scsi_evt.event_type =
3316 FC_REG_SCSI_EVENT;
3317 fast_path_evt->un.scsi_evt.subcategory =
3318 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3319 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3320 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3321 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3322 &pnode->nlp_portname, sizeof(struct lpfc_name));
3323 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3324 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3325 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3326 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3327 fast_path_evt = lpfc_alloc_fast_evt(phba);
3328 if (!fast_path_evt)
3329 return;
3330 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3331 FC_REG_SCSI_EVENT;
3332 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3333 LPFC_EVENT_CHECK_COND;
3334 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3335 cmnd->device->lun;
3336 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3337 &pnode->nlp_portname, sizeof(struct lpfc_name));
3338 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3339 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3340 fast_path_evt->un.check_cond_evt.sense_key =
3341 cmnd->sense_buffer[2] & 0xf;
3342 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3343 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3344 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3345 fcpi_parm &&
3346 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3347 ((scsi_status == SAM_STAT_GOOD) &&
3348 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3349 /*
3350 * If status is good or resid does not match with fcp_param and
3351 * there is valid fcpi_parm, then there is a read_check error
3352 */
3353 fast_path_evt = lpfc_alloc_fast_evt(phba);
3354 if (!fast_path_evt)
3355 return;
3356 fast_path_evt->un.read_check_error.header.event_type =
3357 FC_REG_FABRIC_EVENT;
3358 fast_path_evt->un.read_check_error.header.subcategory =
3359 LPFC_EVENT_FCPRDCHKERR;
3360 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3361 &pnode->nlp_portname, sizeof(struct lpfc_name));
3362 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3363 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3364 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3365 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3366 fast_path_evt->un.read_check_error.fcpiparam =
3367 fcpi_parm;
3368 } else
3369 return;
3370
3371 fast_path_evt->vport = vport;
3372 spin_lock_irqsave(&phba->hbalock, flags);
3373 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3374 spin_unlock_irqrestore(&phba->hbalock, flags);
3375 lpfc_worker_wake_up(phba);
3376 return;
3377}
9bad7671
JS
3378
3379/**
f1126688 3380 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3772a991 3381 * @phba: The HBA for which this call is being executed.
9bad7671
JS
3382 * @psb: The scsi buffer which is going to be un-mapped.
3383 *
3384 * This routine does DMA un-mapping of scatter gather list of scsi command
3772a991 3385 * field of @lpfc_cmd for device with SLI-3 interface spec.
9bad7671 3386 **/
bcf4dbfa 3387static void
f1126688 3388lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
bcf4dbfa
JS
3389{
3390 /*
3391 * There are only two special cases to consider. (1) the scsi command
3392 * requested scatter-gather usage or (2) the scsi command allocated
3393 * a request buffer, but did not request use_sg. There is a third
3394 * case, but it does not require resource deallocation.
3395 */
a0b4f78f
FT
3396 if (psb->seg_cnt > 0)
3397 scsi_dma_unmap(psb->pCmd);
e2a0a9d6
JS
3398 if (psb->prot_seg_cnt > 0)
3399 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3400 scsi_prot_sg_count(psb->pCmd),
3401 psb->pCmd->sc_data_direction);
bcf4dbfa
JS
3402}
3403
9bad7671 3404/**
3621a710 3405 * lpfc_handler_fcp_err - FCP response handler
9bad7671
JS
3406 * @vport: The virtual port for which this call is being executed.
3407 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3408 * @rsp_iocb: The response IOCB which contains FCP error.
3409 *
3410 * This routine is called to process response IOCB with status field
3411 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3412 * based upon SCSI and FCP error.
3413 **/
dea3101e 3414static void
2e0fef85
JS
3415lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3416 struct lpfc_iocbq *rsp_iocb)
dea3101e 3417{
3418 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3419 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3420 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
7054a606 3421 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
dea3101e 3422 uint32_t resp_info = fcprsp->rspStatus2;
3423 uint32_t scsi_status = fcprsp->rspStatus3;
c7743956 3424 uint32_t *lp;
dea3101e 3425 uint32_t host_status = DID_OK;
3426 uint32_t rsplen = 0;
c7743956 3427 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
dea3101e 3428
ea2151b4 3429
dea3101e 3430 /*
3431 * If this is a task management command, there is no
3432 * scsi packet associated with this lpfc_cmd. The driver
3433 * consumes it.
3434 */
3435 if (fcpcmd->fcpCntl2) {
3436 scsi_status = 0;
3437 goto out;
3438 }
3439
6a9c52cf
JS
3440 if (resp_info & RSP_LEN_VALID) {
3441 rsplen = be32_to_cpu(fcprsp->rspRspLen);
e40a02c1 3442 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
6a9c52cf
JS
3443 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3444 "2719 Invalid response length: "
3445 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
3446 cmnd->device->id,
3447 cmnd->device->lun, cmnd->cmnd[0],
3448 rsplen);
3449 host_status = DID_ERROR;
3450 goto out;
3451 }
e40a02c1
JS
3452 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3453 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3454 "2757 Protocol failure detected during "
3455 "processing of FCP I/O op: "
3456 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
3457 cmnd->device->id,
3458 cmnd->device->lun, cmnd->cmnd[0],
3459 fcprsp->rspInfo3);
3460 host_status = DID_ERROR;
3461 goto out;
3462 }
6a9c52cf
JS
3463 }
3464
c7743956
JS
3465 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3466 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3467 if (snslen > SCSI_SENSE_BUFFERSIZE)
3468 snslen = SCSI_SENSE_BUFFERSIZE;
3469
3470 if (resp_info & RSP_LEN_VALID)
3471 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3472 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3473 }
3474 lp = (uint32_t *)cmnd->sense_buffer;
3475
73d91e50
JS
3476 if (!scsi_status && (resp_info & RESID_UNDER) &&
3477 vport->cfg_log_verbose & LOG_FCP_UNDER)
3478 logit = LOG_FCP_UNDER;
c7743956 3479
e8b62011 3480 lpfc_printf_vlog(vport, KERN_WARNING, logit,
e2a0a9d6 3481 "9024 FCP command x%x failed: x%x SNS x%x x%x "
e8b62011
JS
3482 "Data: x%x x%x x%x x%x x%x\n",
3483 cmnd->cmnd[0], scsi_status,
3484 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3485 be32_to_cpu(fcprsp->rspResId),
3486 be32_to_cpu(fcprsp->rspSnsLen),
3487 be32_to_cpu(fcprsp->rspRspLen),
3488 fcprsp->rspInfo3);
dea3101e 3489
a0b4f78f 3490 scsi_set_resid(cmnd, 0);
dea3101e 3491 if (resp_info & RESID_UNDER) {
a0b4f78f 3492 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
dea3101e 3493
73d91e50 3494 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
e2a0a9d6 3495 "9025 FCP Read Underrun, expected %d, "
e8b62011
JS
3496 "residual %d Data: x%x x%x x%x\n",
3497 be32_to_cpu(fcpcmd->fcpDl),
3498 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3499 cmnd->underflow);
dea3101e 3500
7054a606
JS
3501 /*
3502 * If there is an under run check if under run reported by
3503 * storage array is same as the under run reported by HBA.
3504 * If this is not same, there is a dropped frame.
3505 */
3506 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3507 fcpi_parm &&
a0b4f78f 3508 (scsi_get_resid(cmnd) != fcpi_parm)) {
e8b62011
JS
3509 lpfc_printf_vlog(vport, KERN_WARNING,
3510 LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 3511 "9026 FCP Read Check Error "
e8b62011
JS
3512 "and Underrun Data: x%x x%x x%x x%x\n",
3513 be32_to_cpu(fcpcmd->fcpDl),
3514 scsi_get_resid(cmnd), fcpi_parm,
3515 cmnd->cmnd[0]);
a0b4f78f 3516 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
7054a606
JS
3517 host_status = DID_ERROR;
3518 }
dea3101e 3519 /*
3520 * The cmnd->underflow is the minimum number of bytes that must
25985edc 3521 * be transferred for this command. Provided a sense condition
dea3101e 3522 * is not present, make sure the actual amount transferred is at
3523 * least the underflow value or fail.
3524 */
3525 if (!(resp_info & SNS_LEN_VALID) &&
3526 (scsi_status == SAM_STAT_GOOD) &&
a0b4f78f
FT
3527 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3528 < cmnd->underflow)) {
e8b62011 3529 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
e2a0a9d6 3530 "9027 FCP command x%x residual "
e8b62011
JS
3531 "underrun converted to error "
3532 "Data: x%x x%x x%x\n",
66dbfbe6 3533 cmnd->cmnd[0], scsi_bufflen(cmnd),
e8b62011 3534 scsi_get_resid(cmnd), cmnd->underflow);
dea3101e 3535 host_status = DID_ERROR;
3536 }
3537 } else if (resp_info & RESID_OVER) {
e8b62011 3538 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
e2a0a9d6 3539 "9028 FCP command x%x residual overrun error. "
e4e74273 3540 "Data: x%x x%x\n", cmnd->cmnd[0],
e8b62011 3541 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
dea3101e 3542 host_status = DID_ERROR;
3543
3544 /*
3545 * Check SLI validation that all the transfer was actually done
3546 * (fcpi_parm should be zero). Apply check only to reads.
3547 */
eee8877e 3548 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
e8b62011 3549 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 3550 "9029 FCP Read Check Error Data: "
eee8877e 3551 "x%x x%x x%x x%x x%x\n",
e8b62011
JS
3552 be32_to_cpu(fcpcmd->fcpDl),
3553 be32_to_cpu(fcprsp->rspResId),
eee8877e
JS
3554 fcpi_parm, cmnd->cmnd[0], scsi_status);
3555 switch (scsi_status) {
3556 case SAM_STAT_GOOD:
3557 case SAM_STAT_CHECK_CONDITION:
3558 /* Fabric dropped a data frame. Fail any successful
3559 * command in which we detected dropped frames.
3560 * A status of good or some check conditions could
3561 * be considered a successful command.
3562 */
3563 host_status = DID_ERROR;
3564 break;
3565 }
a0b4f78f 3566 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
dea3101e 3567 }
3568
3569 out:
3570 cmnd->result = ScsiResult(host_status, scsi_status);
ea2151b4 3571 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
dea3101e 3572}
3573
9bad7671 3574/**
3621a710 3575 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
9bad7671
JS
3576 * @phba: The Hba for which this call is being executed.
3577 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3772a991 3578 * @pIocbOut: The response IOCBQ for the scsi cmnd.
9bad7671
JS
3579 *
3580 * This routine assigns scsi command result by looking into response IOCB
3581 * status field appropriately. This routine handles QUEUE FULL condition as
3582 * well by ramping down device queue depth.
3583 **/
dea3101e 3584static void
3585lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3586 struct lpfc_iocbq *pIocbOut)
3587{
3588 struct lpfc_scsi_buf *lpfc_cmd =
3589 (struct lpfc_scsi_buf *) pIocbIn->context1;
2e0fef85 3590 struct lpfc_vport *vport = pIocbIn->vport;
dea3101e 3591 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3592 struct lpfc_nodelist *pnode = rdata->pnode;
75baf696 3593 struct scsi_cmnd *cmd;
445cf4f4 3594 int result;
a257bf90 3595 struct scsi_device *tmp_sdev;
5ffc266e 3596 int depth;
fa61a54e 3597 unsigned long flags;
ea2151b4 3598 struct lpfc_fast_path_event *fast_path_evt;
75baf696 3599 struct Scsi_Host *shost;
a257bf90 3600 uint32_t queue_depth, scsi_id;
73d91e50 3601 uint32_t logit = LOG_FCP;
dea3101e 3602
75baf696
JS
3603 /* Sanity check on return of outstanding command */
3604 if (!(lpfc_cmd->pCmd))
3605 return;
3606 cmd = lpfc_cmd->pCmd;
3607 shost = cmd->device->host;
3608
dea3101e 3609 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
3610 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
341af102
JS
3611 /* pick up SLI4 exhange busy status from HBA */
3612 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3613
9a6b09c0
JS
3614#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3615 if (lpfc_cmd->prot_data_type) {
3616 struct scsi_dif_tuple *src = NULL;
3617
3618 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3619 /*
3620 * Used to restore any changes to protection
3621 * data for error injection.
3622 */
3623 switch (lpfc_cmd->prot_data_type) {
3624 case LPFC_INJERR_REFTAG:
3625 src->ref_tag =
3626 lpfc_cmd->prot_data;
3627 break;
3628 case LPFC_INJERR_APPTAG:
3629 src->app_tag =
3630 (uint16_t)lpfc_cmd->prot_data;
3631 break;
3632 case LPFC_INJERR_GUARD:
3633 src->guard_tag =
3634 (uint16_t)lpfc_cmd->prot_data;
3635 break;
3636 default:
3637 break;
3638 }
3639
3640 lpfc_cmd->prot_data = 0;
3641 lpfc_cmd->prot_data_type = 0;
3642 lpfc_cmd->prot_data_segment = NULL;
3643 }
3644#endif
109f6ed0
JS
3645 if (pnode && NLP_CHK_NODE_ACT(pnode))
3646 atomic_dec(&pnode->cmd_pending);
dea3101e 3647
3648 if (lpfc_cmd->status) {
3649 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3650 (lpfc_cmd->result & IOERR_DRVR_MASK))
3651 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3652 else if (lpfc_cmd->status >= IOSTAT_CNT)
3653 lpfc_cmd->status = IOSTAT_DEFAULT;
73d91e50
JS
3654 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR
3655 && !lpfc_cmd->fcp_rsp->rspStatus3
3656 && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER)
3657 && !(phba->cfg_log_verbose & LOG_FCP_UNDER))
3658 logit = 0;
3659 else
3660 logit = LOG_FCP | LOG_FCP_UNDER;
3661 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3662 "9030 FCP cmd x%x failed <%d/%d> "
5a0d80fc
JS
3663 "status: x%x result: x%x "
3664 "sid: x%x did: x%x oxid: x%x "
3665 "Data: x%x x%x\n",
73d91e50
JS
3666 cmd->cmnd[0],
3667 cmd->device ? cmd->device->id : 0xffff,
3668 cmd->device ? cmd->device->lun : 0xffff,
3669 lpfc_cmd->status, lpfc_cmd->result,
5a0d80fc
JS
3670 vport->fc_myDID, pnode->nlp_DID,
3671 phba->sli_rev == LPFC_SLI_REV4 ?
3672 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
73d91e50
JS
3673 pIocbOut->iocb.ulpContext,
3674 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
dea3101e 3675
3676 switch (lpfc_cmd->status) {
3677 case IOSTAT_FCP_RSP_ERROR:
3678 /* Call FCP RSP handler to determine result */
2e0fef85 3679 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
dea3101e 3680 break;
3681 case IOSTAT_NPORT_BSY:
3682 case IOSTAT_FABRIC_BSY:
0f1f53a7 3683 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
ea2151b4
JS
3684 fast_path_evt = lpfc_alloc_fast_evt(phba);
3685 if (!fast_path_evt)
3686 break;
3687 fast_path_evt->un.fabric_evt.event_type =
3688 FC_REG_FABRIC_EVENT;
3689 fast_path_evt->un.fabric_evt.subcategory =
3690 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3691 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3692 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3693 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3694 &pnode->nlp_portname,
3695 sizeof(struct lpfc_name));
3696 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3697 &pnode->nlp_nodename,
3698 sizeof(struct lpfc_name));
3699 }
3700 fast_path_evt->vport = vport;
3701 fast_path_evt->work_evt.evt =
3702 LPFC_EVT_FASTPATH_MGMT_EVT;
3703 spin_lock_irqsave(&phba->hbalock, flags);
3704 list_add_tail(&fast_path_evt->work_evt.evt_listp,
3705 &phba->work_list);
3706 spin_unlock_irqrestore(&phba->hbalock, flags);
3707 lpfc_worker_wake_up(phba);
dea3101e 3708 break;
92d7f7b0 3709 case IOSTAT_LOCAL_REJECT:
1151e3ec 3710 case IOSTAT_REMOTE_STOP:
ab56dc2e
JS
3711 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3712 lpfc_cmd->result ==
3713 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3714 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3715 lpfc_cmd->result ==
3716 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3717 cmd->result = ScsiResult(DID_NO_CONNECT, 0);
3718 break;
3719 }
d7c255b2 3720 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
92d7f7b0 3721 lpfc_cmd->result == IOERR_NO_RESOURCES ||
b92938b4
JS
3722 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3723 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
92d7f7b0 3724 cmd->result = ScsiResult(DID_REQUEUE, 0);
58da1ffb 3725 break;
e2a0a9d6 3726 }
e2a0a9d6
JS
3727 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3728 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3729 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3730 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3731 /*
3732 * This is a response for a BG enabled
3733 * cmd. Parse BG error
3734 */
3735 lpfc_parse_bg_err(phba, lpfc_cmd,
3736 pIocbOut);
3737 break;
3738 } else {
3739 lpfc_printf_vlog(vport, KERN_WARNING,
3740 LOG_BG,
3741 "9031 non-zero BGSTAT "
6a9c52cf 3742 "on unprotected cmd\n");
e2a0a9d6
JS
3743 }
3744 }
1151e3ec
JS
3745 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3746 && (phba->sli_rev == LPFC_SLI_REV4)
3747 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3748 /* This IO was aborted by the target, we don't
3749 * know the rxid and because we did not send the
3750 * ABTS we cannot generate and RRQ.
3751 */
3752 lpfc_set_rrq_active(phba, pnode,
3753 lpfc_cmd->cur_iocbq.sli4_xritag,
3754 0, 0);
3755 }
e2a0a9d6 3756 /* else: fall through */
dea3101e 3757 default:
3758 cmd->result = ScsiResult(DID_ERROR, 0);
3759 break;
3760 }
3761
58da1ffb 3762 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
19a7b4ae 3763 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
0f1f53a7
JS
3764 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
3765 SAM_STAT_BUSY);
ab56dc2e 3766 } else
dea3101e 3767 cmd->result = ScsiResult(DID_OK, 0);
dea3101e 3768
3769 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3770 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3771
e8b62011
JS
3772 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3773 "0710 Iodone <%d/%d> cmd %p, error "
3774 "x%x SNS x%x x%x Data: x%x x%x\n",
3775 cmd->device->id, cmd->device->lun, cmd,
3776 cmd->result, *lp, *(lp + 3), cmd->retries,
3777 scsi_get_resid(cmd));
dea3101e 3778 }
3779
ea2151b4 3780 lpfc_update_stats(phba, lpfc_cmd);
445cf4f4 3781 result = cmd->result;
977b5a0a
JS
3782 if (vport->cfg_max_scsicmpl_time &&
3783 time_after(jiffies, lpfc_cmd->start_time +
3784 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
a257bf90 3785 spin_lock_irqsave(shost->host_lock, flags);
109f6ed0
JS
3786 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3787 if (pnode->cmd_qdepth >
3788 atomic_read(&pnode->cmd_pending) &&
3789 (atomic_read(&pnode->cmd_pending) >
3790 LPFC_MIN_TGT_QDEPTH) &&
3791 ((cmd->cmnd[0] == READ_10) ||
3792 (cmd->cmnd[0] == WRITE_10)))
3793 pnode->cmd_qdepth =
3794 atomic_read(&pnode->cmd_pending);
3795
3796 pnode->last_change_time = jiffies;
3797 }
a257bf90 3798 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0 3799 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
7dc517df 3800 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
977b5a0a 3801 time_after(jiffies, pnode->last_change_time +
109f6ed0 3802 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
a257bf90 3803 spin_lock_irqsave(shost->host_lock, flags);
7dc517df
JS
3804 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
3805 / 100;
3806 depth = depth ? depth : 1;
3807 pnode->cmd_qdepth += depth;
3808 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
3809 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
109f6ed0 3810 pnode->last_change_time = jiffies;
a257bf90 3811 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0 3812 }
977b5a0a
JS
3813 }
3814
1dcb58e5 3815 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
a257bf90
JS
3816
3817 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
3818 queue_depth = cmd->device->queue_depth;
3819 scsi_id = cmd->device->id;
0bd4ca25
JSEC
3820 cmd->scsi_done(cmd);
3821
b808608b 3822 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
fa61a54e
JS
3823 /*
3824 * If there is a thread waiting for command completion
3825 * wake up the thread.
3826 */
a257bf90 3827 spin_lock_irqsave(shost->host_lock, flags);
495a714c 3828 lpfc_cmd->pCmd = NULL;
fa61a54e
JS
3829 if (lpfc_cmd->waitq)
3830 wake_up(lpfc_cmd->waitq);
a257bf90 3831 spin_unlock_irqrestore(shost->host_lock, flags);
b808608b
JW
3832 lpfc_release_scsi_buf(phba, lpfc_cmd);
3833 return;
3834 }
3835
92d7f7b0 3836 if (!result)
a257bf90 3837 lpfc_rampup_queue_depth(vport, queue_depth);
92d7f7b0 3838
445cf4f4
JSEC
3839 /*
3840 * Check for queue full. If the lun is reporting queue full, then
3841 * back off the lun queue depth to prevent target overloads.
3842 */
58da1ffb
JS
3843 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
3844 NLP_CHK_NODE_ACT(pnode)) {
a257bf90
JS
3845 shost_for_each_device(tmp_sdev, shost) {
3846 if (tmp_sdev->id != scsi_id)
445cf4f4
JSEC
3847 continue;
3848 depth = scsi_track_queue_full(tmp_sdev,
5ffc266e
JS
3849 tmp_sdev->queue_depth-1);
3850 if (depth <= 0)
3851 continue;
e8b62011
JS
3852 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3853 "0711 detected queue full - lun queue "
3854 "depth adjusted to %d.\n", depth);
ea2151b4 3855 lpfc_send_sdev_queuedepth_change_event(phba, vport,
5ffc266e
JS
3856 pnode,
3857 tmp_sdev->lun,
3858 depth+1, depth);
445cf4f4
JSEC
3859 }
3860 }
3861
fa61a54e
JS
3862 /*
3863 * If there is a thread waiting for command completion
3864 * wake up the thread.
3865 */
a257bf90 3866 spin_lock_irqsave(shost->host_lock, flags);
495a714c 3867 lpfc_cmd->pCmd = NULL;
fa61a54e
JS
3868 if (lpfc_cmd->waitq)
3869 wake_up(lpfc_cmd->waitq);
a257bf90 3870 spin_unlock_irqrestore(shost->host_lock, flags);
fa61a54e 3871
0bd4ca25 3872 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 3873}
3874
34b02dcd 3875/**
3621a710 3876 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
34b02dcd
JS
3877 * @data: A pointer to the immediate command data portion of the IOCB.
3878 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
3879 *
3880 * The routine copies the entire FCP command from @fcp_cmnd to @data while
3881 * byte swapping the data to big endian format for transmission on the wire.
3882 **/
3883static void
3884lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
3885{
3886 int i, j;
3887 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
3888 i += sizeof(uint32_t), j++) {
3889 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
3890 }
3891}
3892
9bad7671 3893/**
f1126688 3894 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
9bad7671
JS
3895 * @vport: The virtual port for which this call is being executed.
3896 * @lpfc_cmd: The scsi command which needs to send.
3897 * @pnode: Pointer to lpfc_nodelist.
3898 *
3899 * This routine initializes fcp_cmnd and iocb data structure from scsi command
3772a991 3900 * to transfer for device with SLI3 interface spec.
9bad7671 3901 **/
dea3101e 3902static void
f1126688 3903lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2e0fef85 3904 struct lpfc_nodelist *pnode)
dea3101e 3905{
2e0fef85 3906 struct lpfc_hba *phba = vport->phba;
dea3101e 3907 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3908 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3909 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3910 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
3911 int datadir = scsi_cmnd->sc_data_direction;
7e2b19fb 3912 char tag[2];
dea3101e 3913
58da1ffb
JS
3914 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3915 return;
3916
dea3101e 3917 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
69859dc4
JSEC
3918 /* clear task management bits */
3919 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
dea3101e 3920
91886523
JSEC
3921 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
3922 &lpfc_cmd->fcp_cmnd->fcp_lun);
dea3101e 3923
df9e1b59
JS
3924 memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
3925 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
7e2b19fb
JS
3926 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
3927 switch (tag[0]) {
dea3101e 3928 case HEAD_OF_QUEUE_TAG:
3929 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
3930 break;
3931 case ORDERED_QUEUE_TAG:
3932 fcp_cmnd->fcpCntl1 = ORDERED_Q;
3933 break;
3934 default:
3935 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
3936 break;
3937 }
3938 } else
3939 fcp_cmnd->fcpCntl1 = 0;
3940
3941 /*
3942 * There are three possibilities here - use scatter-gather segment, use
3943 * the single mapping, or neither. Start the lpfc command prep by
3944 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3945 * data bde entry.
3946 */
a0b4f78f 3947 if (scsi_sg_count(scsi_cmnd)) {
dea3101e 3948 if (datadir == DMA_TO_DEVICE) {
3949 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
3772a991
JS
3950 if (phba->sli_rev < LPFC_SLI_REV4) {
3951 iocb_cmd->un.fcpi.fcpi_parm = 0;
3952 iocb_cmd->ulpPU = 0;
3953 } else
3954 iocb_cmd->ulpPU = PARM_READ_CHECK;
dea3101e 3955 fcp_cmnd->fcpCntl3 = WRITE_DATA;
3956 phba->fc4OutputRequests++;
3957 } else {
3958 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
3959 iocb_cmd->ulpPU = PARM_READ_CHECK;
dea3101e 3960 fcp_cmnd->fcpCntl3 = READ_DATA;
3961 phba->fc4InputRequests++;
3962 }
3963 } else {
3964 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
3965 iocb_cmd->un.fcpi.fcpi_parm = 0;
3966 iocb_cmd->ulpPU = 0;
3967 fcp_cmnd->fcpCntl3 = 0;
3968 phba->fc4ControlRequests++;
3969 }
e2a0a9d6
JS
3970 if (phba->sli_rev == 3 &&
3971 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 3972 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 3973 /*
3974 * Finish initializing those IOCB fields that are independent
3975 * of the scsi_cmnd request_buffer
3976 */
3977 piocbq->iocb.ulpContext = pnode->nlp_rpi;
6d368e53
JS
3978 if (phba->sli_rev == LPFC_SLI_REV4)
3979 piocbq->iocb.ulpContext =
3980 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
dea3101e 3981 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
3982 piocbq->iocb.ulpFCP2Rcvy = 1;
09372820
JS
3983 else
3984 piocbq->iocb.ulpFCP2Rcvy = 0;
dea3101e 3985
3986 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
3987 piocbq->context1 = lpfc_cmd;
3988 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
3989 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2e0fef85 3990 piocbq->vport = vport;
dea3101e 3991}
3992
da0436e9 3993/**
6d368e53 3994 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
9bad7671
JS
3995 * @vport: The virtual port for which this call is being executed.
3996 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3997 * @lun: Logical unit number.
3998 * @task_mgmt_cmd: SCSI task management command.
3999 *
3772a991
JS
4000 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4001 * for device with SLI-3 interface spec.
9bad7671
JS
4002 *
4003 * Return codes:
4004 * 0 - Error
4005 * 1 - Success
4006 **/
dea3101e 4007static int
f1126688 4008lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
dea3101e 4009 struct lpfc_scsi_buf *lpfc_cmd,
420b630d 4010 unsigned int lun,
dea3101e 4011 uint8_t task_mgmt_cmd)
4012{
dea3101e 4013 struct lpfc_iocbq *piocbq;
4014 IOCB_t *piocb;
4015 struct fcp_cmnd *fcp_cmnd;
0b18ac42 4016 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
dea3101e 4017 struct lpfc_nodelist *ndlp = rdata->pnode;
4018
58da1ffb
JS
4019 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4020 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
dea3101e 4021 return 0;
dea3101e 4022
dea3101e 4023 piocbq = &(lpfc_cmd->cur_iocbq);
2e0fef85
JS
4024 piocbq->vport = vport;
4025
dea3101e 4026 piocb = &piocbq->iocb;
4027
4028 fcp_cmnd = lpfc_cmd->fcp_cmnd;
34b02dcd
JS
4029 /* Clear out any old data in the FCP command area */
4030 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4031 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
dea3101e 4032 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
e2a0a9d6
JS
4033 if (vport->phba->sli_rev == 3 &&
4034 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 4035 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 4036 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
dea3101e 4037 piocb->ulpContext = ndlp->nlp_rpi;
6d368e53
JS
4038 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4039 piocb->ulpContext =
4040 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4041 }
dea3101e 4042 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4043 piocb->ulpFCP2Rcvy = 1;
4044 }
4045 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4046
4047 /* ulpTimeout is only one byte */
4048 if (lpfc_cmd->timeout > 0xff) {
4049 /*
4050 * Do not timeout the command at the firmware level.
4051 * The driver will provide the timeout mechanism.
4052 */
4053 piocb->ulpTimeout = 0;
f1126688 4054 } else
dea3101e 4055 piocb->ulpTimeout = lpfc_cmd->timeout;
da0436e9 4056
f1126688
JS
4057 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4058 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
3772a991 4059
f1126688 4060 return 1;
3772a991
JS
4061}
4062
4063/**
25985edc 4064 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
3772a991
JS
4065 * @phba: The hba struct for which this call is being executed.
4066 * @dev_grp: The HBA PCI-Device group number.
4067 *
4068 * This routine sets up the SCSI interface API function jump table in @phba
4069 * struct.
4070 * Returns: 0 - success, -ENODEV - failure.
4071 **/
4072int
4073lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4074{
4075
f1126688
JS
4076 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4077 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
f1126688 4078
3772a991
JS
4079 switch (dev_grp) {
4080 case LPFC_PCI_DEV_LP:
4081 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4082 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
acd6859b 4083 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
3772a991 4084 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
19ca7609 4085 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
3772a991 4086 break;
da0436e9
JS
4087 case LPFC_PCI_DEV_OC:
4088 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4089 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
acd6859b 4090 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
da0436e9 4091 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
19ca7609 4092 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
da0436e9 4093 break;
3772a991
JS
4094 default:
4095 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4096 "1418 Invalid HBA PCI-device group: 0x%x\n",
4097 dev_grp);
4098 return -ENODEV;
4099 break;
4100 }
3772a991 4101 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
84d1b006 4102 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
3772a991
JS
4103 return 0;
4104}
4105
9bad7671 4106/**
3621a710 4107 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
9bad7671
JS
4108 * @phba: The Hba for which this call is being executed.
4109 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4110 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4111 *
4112 * This routine is IOCB completion routine for device reset and target reset
4113 * routine. This routine release scsi buffer associated with lpfc_cmd.
4114 **/
7054a606
JS
4115static void
4116lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4117 struct lpfc_iocbq *cmdiocbq,
4118 struct lpfc_iocbq *rspiocbq)
4119{
4120 struct lpfc_scsi_buf *lpfc_cmd =
4121 (struct lpfc_scsi_buf *) cmdiocbq->context1;
4122 if (lpfc_cmd)
4123 lpfc_release_scsi_buf(phba, lpfc_cmd);
4124 return;
4125}
4126
9bad7671 4127/**
3621a710 4128 * lpfc_info - Info entry point of scsi_host_template data structure
9bad7671
JS
4129 * @host: The scsi host for which this call is being executed.
4130 *
4131 * This routine provides module information about hba.
4132 *
4133 * Reutrn code:
4134 * Pointer to char - Success.
4135 **/
dea3101e 4136const char *
4137lpfc_info(struct Scsi_Host *host)
4138{
2e0fef85
JS
4139 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4140 struct lpfc_hba *phba = vport->phba;
dea3101e 4141 int len;
4142 static char lpfcinfobuf[384];
4143
4144 memset(lpfcinfobuf,0,384);
4145 if (phba && phba->pcidev){
4146 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4147 len = strlen(lpfcinfobuf);
4148 snprintf(lpfcinfobuf + len,
4149 384-len,
4150 " on PCI bus %02x device %02x irq %d",
4151 phba->pcidev->bus->number,
4152 phba->pcidev->devfn,
4153 phba->pcidev->irq);
4154 len = strlen(lpfcinfobuf);
4155 if (phba->Port[0]) {
4156 snprintf(lpfcinfobuf + len,
4157 384-len,
4158 " port %s",
4159 phba->Port);
4160 }
65467b6b
JS
4161 len = strlen(lpfcinfobuf);
4162 if (phba->sli4_hba.link_state.logical_speed) {
4163 snprintf(lpfcinfobuf + len,
4164 384-len,
4165 " Logical Link Speed: %d Mbps",
4166 phba->sli4_hba.link_state.logical_speed * 10);
4167 }
dea3101e 4168 }
4169 return lpfcinfobuf;
4170}
4171
9bad7671 4172/**
3621a710 4173 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
9bad7671
JS
4174 * @phba: The Hba for which this call is being executed.
4175 *
4176 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
4177 * The default value of cfg_poll_tmo is 10 milliseconds.
4178 **/
875fbdfe
JSEC
4179static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4180{
4181 unsigned long poll_tmo_expires =
4182 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4183
4184 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
4185 mod_timer(&phba->fcp_poll_timer,
4186 poll_tmo_expires);
4187}
4188
9bad7671 4189/**
3621a710 4190 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
9bad7671
JS
4191 * @phba: The Hba for which this call is being executed.
4192 *
4193 * This routine starts the fcp_poll_timer of @phba.
4194 **/
875fbdfe
JSEC
4195void lpfc_poll_start_timer(struct lpfc_hba * phba)
4196{
4197 lpfc_poll_rearm_timer(phba);
4198}
4199
9bad7671 4200/**
3621a710 4201 * lpfc_poll_timeout - Restart polling timer
9bad7671
JS
4202 * @ptr: Map to lpfc_hba data structure pointer.
4203 *
4204 * This routine restarts fcp_poll timer, when FCP ring polling is enable
4205 * and FCP Ring interrupt is disable.
4206 **/
4207
875fbdfe
JSEC
4208void lpfc_poll_timeout(unsigned long ptr)
4209{
2e0fef85 4210 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
875fbdfe
JSEC
4211
4212 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
4213 lpfc_sli_handle_fast_ring_event(phba,
4214 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4215
875fbdfe
JSEC
4216 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4217 lpfc_poll_rearm_timer(phba);
4218 }
875fbdfe
JSEC
4219}
4220
9bad7671 4221/**
3621a710 4222 * lpfc_queuecommand - scsi_host_template queuecommand entry point
9bad7671
JS
4223 * @cmnd: Pointer to scsi_cmnd data structure.
4224 * @done: Pointer to done routine.
4225 *
4226 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4227 * This routine prepares an IOCB from scsi command and provides to firmware.
4228 * The @done callback is invoked after driver finished processing the command.
4229 *
4230 * Return value :
4231 * 0 - Success
4232 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4233 **/
dea3101e 4234static int
f281233d 4235lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
dea3101e 4236{
2e0fef85
JS
4237 struct Scsi_Host *shost = cmnd->device->host;
4238 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4239 struct lpfc_hba *phba = vport->phba;
dea3101e 4240 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 4241 struct lpfc_nodelist *ndlp;
0bd4ca25 4242 struct lpfc_scsi_buf *lpfc_cmd;
19a7b4ae 4243 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
19a7b4ae 4244 int err;
dea3101e 4245
19a7b4ae
JSEC
4246 err = fc_remote_port_chkready(rport);
4247 if (err) {
4248 cmnd->result = err;
dea3101e 4249 goto out_fail_command;
4250 }
1c6f4ef5 4251 ndlp = rdata->pnode;
dea3101e 4252
bf08611b 4253 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
acd6859b 4254 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
e2a0a9d6 4255
6a9c52cf
JS
4256 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4257 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4258 " op:%02x str=%s without registering for"
4259 " BlockGuard - Rejecting command\n",
e2a0a9d6
JS
4260 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4261 dif_op_str[scsi_get_prot_op(cmnd)]);
4262 goto out_fail_command;
4263 }
4264
dea3101e 4265 /*
19a7b4ae
JSEC
4266 * Catch race where our node has transitioned, but the
4267 * transport is still transitioning.
dea3101e 4268 */
b522d7d4 4269 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
f55ca84d 4270 cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
b522d7d4
JS
4271 goto out_fail_command;
4272 }
7dc517df 4273 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
3496343d 4274 goto out_tgt_busy;
a93ce024 4275
19ca7609 4276 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
dea3101e 4277 if (lpfc_cmd == NULL) {
eaf15d5b 4278 lpfc_rampdown_queue_depth(phba);
92d7f7b0 4279
e8b62011
JS
4280 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4281 "0707 driver's buffer pool is empty, "
4282 "IO busied\n");
dea3101e 4283 goto out_host_busy;
4284 }
4285
4286 /*
4287 * Store the midlayer's command structure for the completion phase
4288 * and complete the command initialization.
4289 */
4290 lpfc_cmd->pCmd = cmnd;
4291 lpfc_cmd->rdata = rdata;
4292 lpfc_cmd->timeout = 0;
977b5a0a 4293 lpfc_cmd->start_time = jiffies;
dea3101e 4294 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4295 cmnd->scsi_done = done;
4296
e2a0a9d6 4297 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
6a9c52cf
JS
4298 if (vport->phba->cfg_enable_bg) {
4299 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
9a6b09c0
JS
4300 "9033 BLKGRD: rcvd protected cmd:%02x op=%s "
4301 "guard=%s\n", cmnd->cmnd[0],
4302 dif_op_str[scsi_get_prot_op(cmnd)],
4303 dif_grd_str[scsi_host_get_guard(shost)]);
6a9c52cf
JS
4304 if (cmnd->cmnd[0] == READ_10)
4305 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
e2a0a9d6 4306 "9035 BLKGRD: READ @ sector %llu, "
9a6b09c0 4307 "cnt %u, rpt %d\n",
83096ebf 4308 (unsigned long long)scsi_get_lba(cmnd),
9a6b09c0
JS
4309 blk_rq_sectors(cmnd->request),
4310 (cmnd->cmnd[1]>>5));
6a9c52cf
JS
4311 else if (cmnd->cmnd[0] == WRITE_10)
4312 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
e2a0a9d6 4313 "9036 BLKGRD: WRITE @ sector %llu, "
9a6b09c0 4314 "cnt %u, wpt %d\n",
87b5c328 4315 (unsigned long long)scsi_get_lba(cmnd),
83096ebf 4316 blk_rq_sectors(cmnd->request),
9a6b09c0 4317 (cmnd->cmnd[1]>>5));
6a9c52cf 4318 }
e2a0a9d6
JS
4319
4320 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4321 } else {
6a9c52cf 4322 if (vport->phba->cfg_enable_bg) {
e2a0a9d6 4323 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
9a6b09c0
JS
4324 "9038 BLKGRD: rcvd unprotected cmd:"
4325 "%02x op=%s guard=%s\n", cmnd->cmnd[0],
4326 dif_op_str[scsi_get_prot_op(cmnd)],
4327 dif_grd_str[scsi_host_get_guard(shost)]);
6a9c52cf
JS
4328 if (cmnd->cmnd[0] == READ_10)
4329 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4330 "9040 dbg: READ @ sector %llu, "
9a6b09c0 4331 "cnt %u, rpt %d\n",
6a9c52cf 4332 (unsigned long long)scsi_get_lba(cmnd),
9a6b09c0
JS
4333 blk_rq_sectors(cmnd->request),
4334 (cmnd->cmnd[1]>>5));
6a9c52cf
JS
4335 else if (cmnd->cmnd[0] == WRITE_10)
4336 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
9a6b09c0
JS
4337 "9041 dbg: WRITE @ sector %llu, "
4338 "cnt %u, wpt %d\n",
4339 (unsigned long long)scsi_get_lba(cmnd),
4340 blk_rq_sectors(cmnd->request),
4341 (cmnd->cmnd[1]>>5));
6a9c52cf 4342 }
e2a0a9d6
JS
4343 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4344 }
4345
dea3101e 4346 if (err)
4347 goto out_host_busy_free_buf;
4348
2e0fef85 4349 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
dea3101e 4350
977b5a0a 4351 atomic_inc(&ndlp->cmd_pending);
3772a991 4352 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
92d7f7b0 4353 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
eaf15d5b
JS
4354 if (err) {
4355 atomic_dec(&ndlp->cmd_pending);
dea3101e 4356 goto out_host_busy_free_buf;
eaf15d5b 4357 }
875fbdfe 4358 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
4359 spin_unlock(shost->host_lock);
4360 lpfc_sli_handle_fast_ring_event(phba,
4361 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4362
4363 spin_lock(shost->host_lock);
875fbdfe
JSEC
4364 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4365 lpfc_poll_rearm_timer(phba);
4366 }
4367
dea3101e 4368 return 0;
4369
4370 out_host_busy_free_buf:
bcf4dbfa 4371 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
0bd4ca25 4372 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 4373 out_host_busy:
4374 return SCSI_MLQUEUE_HOST_BUSY;
4375
3496343d
MC
4376 out_tgt_busy:
4377 return SCSI_MLQUEUE_TARGET_BUSY;
4378
dea3101e 4379 out_fail_command:
4380 done(cmnd);
4381 return 0;
4382}
4383
f281233d
JG
4384static DEF_SCSI_QCMD(lpfc_queuecommand)
4385
9bad7671 4386/**
3621a710 4387 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
9bad7671
JS
4388 * @cmnd: Pointer to scsi_cmnd data structure.
4389 *
4390 * This routine aborts @cmnd pending in base driver.
4391 *
4392 * Return code :
4393 * 0x2003 - Error
4394 * 0x2002 - Success
4395 **/
dea3101e 4396static int
63c59c3b 4397lpfc_abort_handler(struct scsi_cmnd *cmnd)
dea3101e 4398{
2e0fef85
JS
4399 struct Scsi_Host *shost = cmnd->device->host;
4400 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4401 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
4402 struct lpfc_iocbq *iocb;
4403 struct lpfc_iocbq *abtsiocb;
dea3101e 4404 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 4405 IOCB_t *cmd, *icmd;
0bd4ca25 4406 int ret = SUCCESS;
fa61a54e 4407 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
dea3101e 4408
589a52d6
JS
4409 ret = fc_block_scsi_eh(cmnd);
4410 if (ret)
4411 return ret;
4f2e66c6
JS
4412
4413 spin_lock_irq(&phba->hbalock);
4414 /* driver queued commands are in process of being flushed */
4415 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4416 spin_unlock_irq(&phba->hbalock);
4417 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4418 "3168 SCSI Layer abort requested I/O has been "
4419 "flushed by LLD.\n");
4420 return FAILED;
4421 }
4422
0bd4ca25 4423 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
eee8877e 4424 if (!lpfc_cmd) {
4f2e66c6 4425 spin_unlock_irq(&phba->hbalock);
eee8877e
JS
4426 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4427 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5cd049a5
CH
4428 "x%x ID %d LUN %d\n",
4429 ret, cmnd->device->id, cmnd->device->lun);
eee8877e
JS
4430 return SUCCESS;
4431 }
dea3101e 4432
4f2e66c6
JS
4433 iocb = &lpfc_cmd->cur_iocbq;
4434 /* the command is in process of being cancelled */
4435 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4436 spin_unlock_irq(&phba->hbalock);
4437 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4438 "3169 SCSI Layer abort requested I/O has been "
4439 "cancelled by LLD.\n");
4440 return FAILED;
4441 }
0bd4ca25
JSEC
4442 /*
4443 * If pCmd field of the corresponding lpfc_scsi_buf structure
4444 * points to a different SCSI command, then the driver has
4445 * already completed this command, but the midlayer did not
4f2e66c6 4446 * see the completion before the eh fired. Just return SUCCESS.
0bd4ca25 4447 */
4f2e66c6
JS
4448 if (lpfc_cmd->pCmd != cmnd) {
4449 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4450 "3170 SCSI Layer abort requested I/O has been "
4451 "completed by LLD.\n");
4452 goto out_unlock;
4453 }
dea3101e 4454
0bd4ca25 4455 BUG_ON(iocb->context1 != lpfc_cmd);
dea3101e 4456
4f2e66c6 4457 abtsiocb = __lpfc_sli_get_iocbq(phba);
0bd4ca25
JSEC
4458 if (abtsiocb == NULL) {
4459 ret = FAILED;
4f2e66c6 4460 goto out_unlock;
dea3101e 4461 }
4462
dea3101e 4463 /*
0bd4ca25
JSEC
4464 * The scsi command can not be in txq and it is in flight because the
4465 * pCmd is still pointig at the SCSI command we have to abort. There
4466 * is no need to search the txcmplq. Just send an abort to the FW.
dea3101e 4467 */
dea3101e 4468
0bd4ca25
JSEC
4469 cmd = &iocb->iocb;
4470 icmd = &abtsiocb->iocb;
4471 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4472 icmd->un.acxri.abortContextTag = cmd->ulpContext;
3772a991
JS
4473 if (phba->sli_rev == LPFC_SLI_REV4)
4474 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4475 else
4476 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 4477
0bd4ca25
JSEC
4478 icmd->ulpLe = 1;
4479 icmd->ulpClass = cmd->ulpClass;
5ffc266e
JS
4480
4481 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4482 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
341af102 4483 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
5ffc266e 4484
2e0fef85 4485 if (lpfc_is_link_up(phba))
0bd4ca25
JSEC
4486 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4487 else
4488 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 4489
0bd4ca25 4490 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2e0fef85 4491 abtsiocb->vport = vport;
4f2e66c6
JS
4492 /* no longer need the lock after this point */
4493 spin_unlock_irq(&phba->hbalock);
4494
3772a991
JS
4495 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4496 IOCB_ERROR) {
0bd4ca25
JSEC
4497 lpfc_sli_release_iocbq(phba, abtsiocb);
4498 ret = FAILED;
4499 goto out;
4500 }
dea3101e 4501
875fbdfe 4502 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
45ed1190
JS
4503 lpfc_sli_handle_fast_ring_event(phba,
4504 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe 4505
fa61a54e 4506 lpfc_cmd->waitq = &waitq;
0bd4ca25 4507 /* Wait for abort to complete */
fa61a54e
JS
4508 wait_event_timeout(waitq,
4509 (lpfc_cmd->pCmd != cmnd),
4510 (2*vport->cfg_devloss_tmo*HZ));
fa61a54e 4511 lpfc_cmd->waitq = NULL;
dea3101e 4512
0bd4ca25
JSEC
4513 if (lpfc_cmd->pCmd == cmnd) {
4514 ret = FAILED;
e8b62011
JS
4515 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4516 "0748 abort handler timed out waiting "
4517 "for abort to complete: ret %#x, ID %d, "
5cd049a5
CH
4518 "LUN %d\n",
4519 ret, cmnd->device->id, cmnd->device->lun);
dea3101e 4520 }
4f2e66c6 4521 goto out;
dea3101e 4522
4f2e66c6
JS
4523out_unlock:
4524 spin_unlock_irq(&phba->hbalock);
4525out:
e8b62011
JS
4526 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4527 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5cd049a5
CH
4528 "LUN %d\n", ret, cmnd->device->id,
4529 cmnd->device->lun);
63c59c3b 4530 return ret;
8fa728a2
JG
4531}
4532
bbb9d180
JS
4533static char *
4534lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4535{
4536 switch (task_mgmt_cmd) {
4537 case FCP_ABORT_TASK_SET:
4538 return "ABORT_TASK_SET";
4539 case FCP_CLEAR_TASK_SET:
4540 return "FCP_CLEAR_TASK_SET";
4541 case FCP_BUS_RESET:
4542 return "FCP_BUS_RESET";
4543 case FCP_LUN_RESET:
4544 return "FCP_LUN_RESET";
4545 case FCP_TARGET_RESET:
4546 return "FCP_TARGET_RESET";
4547 case FCP_CLEAR_ACA:
4548 return "FCP_CLEAR_ACA";
4549 case FCP_TERMINATE_TASK:
4550 return "FCP_TERMINATE_TASK";
4551 default:
4552 return "unknown";
4553 }
4554}
4555
9bad7671 4556/**
bbb9d180
JS
4557 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
4558 * @vport: The virtual port for which this call is being executed.
4559 * @rdata: Pointer to remote port local data
4560 * @tgt_id: Target ID of remote device.
4561 * @lun_id: Lun number for the TMF
4562 * @task_mgmt_cmd: type of TMF to send
9bad7671 4563 *
bbb9d180
JS
4564 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
4565 * a remote port.
9bad7671 4566 *
bbb9d180
JS
4567 * Return Code:
4568 * 0x2003 - Error
4569 * 0x2002 - Success.
9bad7671 4570 **/
dea3101e 4571static int
bbb9d180
JS
4572lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
4573 unsigned tgt_id, unsigned int lun_id,
4574 uint8_t task_mgmt_cmd)
dea3101e 4575{
2e0fef85 4576 struct lpfc_hba *phba = vport->phba;
0bd4ca25 4577 struct lpfc_scsi_buf *lpfc_cmd;
bbb9d180
JS
4578 struct lpfc_iocbq *iocbq;
4579 struct lpfc_iocbq *iocbqrsp;
5989b8d4 4580 struct lpfc_nodelist *pnode = rdata->pnode;
bbb9d180 4581 int ret;
915caaaf 4582 int status;
dea3101e 4583
5989b8d4 4584 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
915caaaf 4585 return FAILED;
bbb9d180 4586
19ca7609 4587 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
dea3101e 4588 if (lpfc_cmd == NULL)
915caaaf 4589 return FAILED;
dea3101e 4590 lpfc_cmd->timeout = 60;
0b18ac42 4591 lpfc_cmd->rdata = rdata;
dea3101e 4592
bbb9d180
JS
4593 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
4594 task_mgmt_cmd);
915caaaf
JS
4595 if (!status) {
4596 lpfc_release_scsi_buf(phba, lpfc_cmd);
4597 return FAILED;
4598 }
dea3101e 4599
bbb9d180 4600 iocbq = &lpfc_cmd->cur_iocbq;
0bd4ca25 4601 iocbqrsp = lpfc_sli_get_iocbq(phba);
915caaaf
JS
4602 if (iocbqrsp == NULL) {
4603 lpfc_release_scsi_buf(phba, lpfc_cmd);
4604 return FAILED;
4605 }
bbb9d180 4606
e8b62011 4607 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
bbb9d180 4608 "0702 Issue %s to TGT %d LUN %d "
6d368e53 4609 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
bbb9d180 4610 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
6d368e53
JS
4611 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
4612 iocbq->iocb_flag);
bbb9d180 4613
3772a991 4614 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
915caaaf 4615 iocbq, iocbqrsp, lpfc_cmd->timeout);
bbb9d180
JS
4616 if (status != IOCB_SUCCESS) {
4617 if (status == IOCB_TIMEDOUT) {
4618 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4619 ret = TIMEOUT_ERROR;
4620 } else
915caaaf 4621 ret = FAILED;
bbb9d180
JS
4622 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4623 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6d368e53
JS
4624 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
4625 "iocb_flag x%x\n",
bbb9d180
JS
4626 lpfc_taskmgmt_name(task_mgmt_cmd),
4627 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
6d368e53
JS
4628 iocbqrsp->iocb.un.ulpWord[4],
4629 iocbq->iocb_flag);
2a9bf3d0
JS
4630 } else if (status == IOCB_BUSY)
4631 ret = FAILED;
4632 else
bbb9d180
JS
4633 ret = SUCCESS;
4634
6175c02a 4635 lpfc_sli_release_iocbq(phba, iocbqrsp);
bbb9d180
JS
4636
4637 if (ret != TIMEOUT_ERROR)
4638 lpfc_release_scsi_buf(phba, lpfc_cmd);
4639
4640 return ret;
4641}
4642
4643/**
4644 * lpfc_chk_tgt_mapped -
4645 * @vport: The virtual port to check on
4646 * @cmnd: Pointer to scsi_cmnd data structure.
4647 *
4648 * This routine delays until the scsi target (aka rport) for the
4649 * command exists (is present and logged in) or we declare it non-existent.
4650 *
4651 * Return code :
4652 * 0x2003 - Error
4653 * 0x2002 - Success
4654 **/
4655static int
4656lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
4657{
4658 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 4659 struct lpfc_nodelist *pnode;
bbb9d180
JS
4660 unsigned long later;
4661
1c6f4ef5
JS
4662 if (!rdata) {
4663 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4664 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
4665 return FAILED;
4666 }
4667 pnode = rdata->pnode;
bbb9d180
JS
4668 /*
4669 * If target is not in a MAPPED state, delay until
4670 * target is rediscovered or devloss timeout expires.
4671 */
4672 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4673 while (time_after(later, jiffies)) {
4674 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4675 return FAILED;
4676 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
4677 return SUCCESS;
4678 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
4679 rdata = cmnd->device->hostdata;
4680 if (!rdata)
4681 return FAILED;
4682 pnode = rdata->pnode;
4683 }
4684 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
4685 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4686 return FAILED;
4687 return SUCCESS;
4688}
4689
4690/**
4691 * lpfc_reset_flush_io_context -
4692 * @vport: The virtual port (scsi_host) for the flush context
4693 * @tgt_id: If aborting by Target contect - specifies the target id
4694 * @lun_id: If aborting by Lun context - specifies the lun id
4695 * @context: specifies the context level to flush at.
4696 *
4697 * After a reset condition via TMF, we need to flush orphaned i/o
4698 * contexts from the adapter. This routine aborts any contexts
4699 * outstanding, then waits for their completions. The wait is
4700 * bounded by devloss_tmo though.
4701 *
4702 * Return code :
4703 * 0x2003 - Error
4704 * 0x2002 - Success
4705 **/
4706static int
4707lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
4708 uint64_t lun_id, lpfc_ctx_cmd context)
4709{
4710 struct lpfc_hba *phba = vport->phba;
4711 unsigned long later;
4712 int cnt;
4713
4714 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6175c02a 4715 if (cnt)
51ef4c26 4716 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
bbb9d180 4717 tgt_id, lun_id, context);
915caaaf
JS
4718 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4719 while (time_after(later, jiffies) && cnt) {
4720 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
bbb9d180 4721 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
dea3101e 4722 }
dea3101e 4723 if (cnt) {
e8b62011 4724 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
bbb9d180
JS
4725 "0724 I/O flush failure for context %s : cnt x%x\n",
4726 ((context == LPFC_CTX_LUN) ? "LUN" :
4727 ((context == LPFC_CTX_TGT) ? "TGT" :
4728 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
4729 cnt);
4730 return FAILED;
dea3101e 4731 }
bbb9d180
JS
4732 return SUCCESS;
4733}
4734
4735/**
4736 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
4737 * @cmnd: Pointer to scsi_cmnd data structure.
4738 *
4739 * This routine does a device reset by sending a LUN_RESET task management
4740 * command.
4741 *
4742 * Return code :
4743 * 0x2003 - Error
4744 * 0x2002 - Success
4745 **/
4746static int
4747lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
4748{
4749 struct Scsi_Host *shost = cmnd->device->host;
4750 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4751 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 4752 struct lpfc_nodelist *pnode;
bbb9d180
JS
4753 unsigned tgt_id = cmnd->device->id;
4754 unsigned int lun_id = cmnd->device->lun;
4755 struct lpfc_scsi_event_header scsi_event;
4756 int status;
4757
1c6f4ef5
JS
4758 if (!rdata) {
4759 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4760 "0798 Device Reset rport failure: rdata x%p\n", rdata);
4761 return FAILED;
4762 }
4763 pnode = rdata->pnode;
589a52d6
JS
4764 status = fc_block_scsi_eh(cmnd);
4765 if (status)
4766 return status;
bbb9d180
JS
4767
4768 status = lpfc_chk_tgt_mapped(vport, cmnd);
4769 if (status == FAILED) {
4770 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4771 "0721 Device Reset rport failure: rdata x%p\n", rdata);
4772 return FAILED;
4773 }
4774
4775 scsi_event.event_type = FC_REG_SCSI_EVENT;
4776 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
4777 scsi_event.lun = lun_id;
4778 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
4779 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
4780
4781 fc_host_post_vendor_event(shost, fc_get_event_number(),
4782 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4783
4784 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
4785 FCP_LUN_RESET);
4786
4787 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4788 "0713 SCSI layer issued Device Reset (%d, %d) "
4789 "return x%x\n", tgt_id, lun_id, status);
4790
4791 /*
4792 * We have to clean up i/o as : they may be orphaned by the TMF;
4793 * or if the TMF failed, they may be in an indeterminate state.
4794 * So, continue on.
4795 * We will report success if all the i/o aborts successfully.
4796 */
4797 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4798 LPFC_CTX_LUN);
4799 return status;
4800}
4801
4802/**
4803 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
4804 * @cmnd: Pointer to scsi_cmnd data structure.
4805 *
4806 * This routine does a target reset by sending a TARGET_RESET task management
4807 * command.
4808 *
4809 * Return code :
4810 * 0x2003 - Error
4811 * 0x2002 - Success
4812 **/
4813static int
4814lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
4815{
4816 struct Scsi_Host *shost = cmnd->device->host;
4817 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4818 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1c6f4ef5 4819 struct lpfc_nodelist *pnode;
bbb9d180
JS
4820 unsigned tgt_id = cmnd->device->id;
4821 unsigned int lun_id = cmnd->device->lun;
4822 struct lpfc_scsi_event_header scsi_event;
4823 int status;
4824
1c6f4ef5
JS
4825 if (!rdata) {
4826 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4827 "0799 Target Reset rport failure: rdata x%p\n", rdata);
4828 return FAILED;
4829 }
4830 pnode = rdata->pnode;
589a52d6
JS
4831 status = fc_block_scsi_eh(cmnd);
4832 if (status)
4833 return status;
bbb9d180
JS
4834
4835 status = lpfc_chk_tgt_mapped(vport, cmnd);
4836 if (status == FAILED) {
4837 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4838 "0722 Target Reset rport failure: rdata x%p\n", rdata);
4839 return FAILED;
4840 }
4841
4842 scsi_event.event_type = FC_REG_SCSI_EVENT;
4843 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
4844 scsi_event.lun = 0;
4845 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
4846 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
4847
4848 fc_host_post_vendor_event(shost, fc_get_event_number(),
4849 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
4850
4851 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
4852 FCP_TARGET_RESET);
4853
4854 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4855 "0723 SCSI layer issued Target Reset (%d, %d) "
4856 "return x%x\n", tgt_id, lun_id, status);
4857
4858 /*
4859 * We have to clean up i/o as : they may be orphaned by the TMF;
4860 * or if the TMF failed, they may be in an indeterminate state.
4861 * So, continue on.
4862 * We will report success if all the i/o aborts successfully.
4863 */
4864 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
4865 LPFC_CTX_TGT);
4866 return status;
dea3101e 4867}
4868
9bad7671 4869/**
3621a710 4870 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
9bad7671
JS
4871 * @cmnd: Pointer to scsi_cmnd data structure.
4872 *
bbb9d180
JS
4873 * This routine does target reset to all targets on @cmnd->device->host.
4874 * This emulates Parallel SCSI Bus Reset Semantics.
9bad7671 4875 *
bbb9d180
JS
4876 * Return code :
4877 * 0x2003 - Error
4878 * 0x2002 - Success
9bad7671 4879 **/
94d0e7b8 4880static int
7054a606 4881lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
dea3101e 4882{
2e0fef85
JS
4883 struct Scsi_Host *shost = cmnd->device->host;
4884 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
dea3101e 4885 struct lpfc_nodelist *ndlp = NULL;
ea2151b4 4886 struct lpfc_scsi_event_header scsi_event;
bbb9d180
JS
4887 int match;
4888 int ret = SUCCESS, status, i;
ea2151b4
JS
4889
4890 scsi_event.event_type = FC_REG_SCSI_EVENT;
4891 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
4892 scsi_event.lun = 0;
4893 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
4894 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
4895
bbb9d180
JS
4896 fc_host_post_vendor_event(shost, fc_get_event_number(),
4897 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
dea3101e 4898
bf08611b
JS
4899 status = fc_block_scsi_eh(cmnd);
4900 if (status)
4901 return status;
bbb9d180 4902
dea3101e 4903 /*
4904 * Since the driver manages a single bus device, reset all
4905 * targets known to the driver. Should any target reset
4906 * fail, this routine returns failure to the midlayer.
4907 */
e17da18e 4908 for (i = 0; i < LPFC_MAX_TARGET; i++) {
685f0bf7 4909 /* Search for mapped node by target ID */
dea3101e 4910 match = 0;
2e0fef85
JS
4911 spin_lock_irq(shost->host_lock);
4912 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
4913 if (!NLP_CHK_NODE_ACT(ndlp))
4914 continue;
685f0bf7 4915 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
915caaaf 4916 ndlp->nlp_sid == i &&
685f0bf7 4917 ndlp->rport) {
dea3101e 4918 match = 1;
4919 break;
4920 }
4921 }
2e0fef85 4922 spin_unlock_irq(shost->host_lock);
dea3101e 4923 if (!match)
4924 continue;
bbb9d180
JS
4925
4926 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
4927 i, 0, FCP_TARGET_RESET);
4928
4929 if (status != SUCCESS) {
e8b62011
JS
4930 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4931 "0700 Bus Reset on target %d failed\n",
4932 i);
915caaaf 4933 ret = FAILED;
dea3101e 4934 }
4935 }
6175c02a 4936 /*
bbb9d180
JS
4937 * We have to clean up i/o as : they may be orphaned by the TMFs
4938 * above; or if any of the TMFs failed, they may be in an
4939 * indeterminate state.
4940 * We will report success if all the i/o aborts successfully.
6175c02a 4941 */
bbb9d180
JS
4942
4943 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
4944 if (status != SUCCESS)
0bd4ca25 4945 ret = FAILED;
bbb9d180 4946
e8b62011
JS
4947 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4948 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
dea3101e 4949 return ret;
4950}
4951
9bad7671 4952/**
3621a710 4953 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
9bad7671
JS
4954 * @sdev: Pointer to scsi_device.
4955 *
4956 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
4957 * globally available list of scsi buffers. This routine also makes sure scsi
4958 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
4959 * of scsi buffer exists for the lifetime of the driver.
4960 *
4961 * Return codes:
4962 * non-0 - Error
4963 * 0 - Success
4964 **/
dea3101e 4965static int
4966lpfc_slave_alloc(struct scsi_device *sdev)
4967{
2e0fef85
JS
4968 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
4969 struct lpfc_hba *phba = vport->phba;
19a7b4ae 4970 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3772a991 4971 uint32_t total = 0;
dea3101e 4972 uint32_t num_to_alloc = 0;
3772a991 4973 int num_allocated = 0;
d7c47992 4974 uint32_t sdev_cnt;
dea3101e 4975
19a7b4ae 4976 if (!rport || fc_remote_port_chkready(rport))
dea3101e 4977 return -ENXIO;
4978
19a7b4ae 4979 sdev->hostdata = rport->dd_data;
d7c47992 4980 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
dea3101e 4981
4982 /*
4983 * Populate the cmds_per_lun count scsi_bufs into this host's globally
4984 * available list of scsi buffers. Don't allocate more than the
a784efbf
JSEC
4985 * HBA limit conveyed to the midlayer via the host structure. The
4986 * formula accounts for the lun_queue_depth + error handlers + 1
4987 * extra. This list of scsi bufs exists for the lifetime of the driver.
dea3101e 4988 */
4989 total = phba->total_scsi_bufs;
3de2a653 4990 num_to_alloc = vport->cfg_lun_queue_depth + 2;
92d7f7b0 4991
d7c47992
JS
4992 /* If allocated buffers are enough do nothing */
4993 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
4994 return 0;
4995
92d7f7b0
JS
4996 /* Allow some exchanges to be available always to complete discovery */
4997 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
4998 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4999 "0704 At limitation of %d preallocated "
5000 "command buffers\n", total);
dea3101e 5001 return 0;
92d7f7b0
JS
5002 /* Allow some exchanges to be available always to complete discovery */
5003 } else if (total + num_to_alloc >
5004 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
5005 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5006 "0705 Allocation request of %d "
5007 "command buffers will exceed max of %d. "
5008 "Reducing allocation request to %d.\n",
5009 num_to_alloc, phba->cfg_hba_queue_depth,
5010 (phba->cfg_hba_queue_depth - total));
dea3101e 5011 num_to_alloc = phba->cfg_hba_queue_depth - total;
5012 }
3772a991
JS
5013 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5014 if (num_to_alloc != num_allocated) {
5015 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5016 "0708 Allocation request of %d "
5017 "command buffers did not succeed. "
5018 "Allocated %d buffers.\n",
5019 num_to_alloc, num_allocated);
dea3101e 5020 }
1c6f4ef5
JS
5021 if (num_allocated > 0)
5022 phba->total_scsi_bufs += num_allocated;
dea3101e 5023 return 0;
5024}
5025
9bad7671 5026/**
3621a710 5027 * lpfc_slave_configure - scsi_host_template slave_configure entry point
9bad7671
JS
5028 * @sdev: Pointer to scsi_device.
5029 *
5030 * This routine configures following items
5031 * - Tag command queuing support for @sdev if supported.
9bad7671
JS
5032 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5033 *
5034 * Return codes:
5035 * 0 - Success
5036 **/
dea3101e 5037static int
5038lpfc_slave_configure(struct scsi_device *sdev)
5039{
2e0fef85
JS
5040 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5041 struct lpfc_hba *phba = vport->phba;
dea3101e 5042
5043 if (sdev->tagged_supported)
3de2a653 5044 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 5045 else
3de2a653 5046 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
dea3101e 5047
875fbdfe 5048 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190
JS
5049 lpfc_sli_handle_fast_ring_event(phba,
5050 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe
JSEC
5051 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5052 lpfc_poll_rearm_timer(phba);
5053 }
5054
dea3101e 5055 return 0;
5056}
5057
9bad7671 5058/**
3621a710 5059 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
9bad7671
JS
5060 * @sdev: Pointer to scsi_device.
5061 *
5062 * This routine sets @sdev hostatdata filed to null.
5063 **/
dea3101e 5064static void
5065lpfc_slave_destroy(struct scsi_device *sdev)
5066{
d7c47992
JS
5067 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5068 struct lpfc_hba *phba = vport->phba;
5069 atomic_dec(&phba->sdev_cnt);
dea3101e 5070 sdev->hostdata = NULL;
5071 return;
5072}
5073
92d7f7b0 5074
dea3101e 5075struct scsi_host_template lpfc_template = {
5076 .module = THIS_MODULE,
5077 .name = LPFC_DRIVER_NAME,
5078 .info = lpfc_info,
5079 .queuecommand = lpfc_queuecommand,
5080 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
5081 .eh_device_reset_handler = lpfc_device_reset_handler,
5082 .eh_target_reset_handler = lpfc_target_reset_handler,
7054a606 5083 .eh_bus_reset_handler = lpfc_bus_reset_handler,
dea3101e 5084 .slave_alloc = lpfc_slave_alloc,
5085 .slave_configure = lpfc_slave_configure,
5086 .slave_destroy = lpfc_slave_destroy,
47a8617c 5087 .scan_finished = lpfc_scan_finished,
dea3101e 5088 .this_id = -1,
83108bd3 5089 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
dea3101e 5090 .cmd_per_lun = LPFC_CMD_PER_LUN,
5091 .use_clustering = ENABLE_CLUSTERING,
2e0fef85 5092 .shost_attrs = lpfc_hba_attrs,
564b2960 5093 .max_sectors = 0xFFFF,
f1c3b0fc 5094 .vendor_id = LPFC_NL_VENDOR_ID,
5ffc266e 5095 .change_queue_depth = lpfc_change_queue_depth,
dea3101e 5096};
3de2a653
JS
5097
5098struct scsi_host_template lpfc_vport_template = {
5099 .module = THIS_MODULE,
5100 .name = LPFC_DRIVER_NAME,
5101 .info = lpfc_info,
5102 .queuecommand = lpfc_queuecommand,
5103 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
5104 .eh_device_reset_handler = lpfc_device_reset_handler,
5105 .eh_target_reset_handler = lpfc_target_reset_handler,
3de2a653
JS
5106 .eh_bus_reset_handler = lpfc_bus_reset_handler,
5107 .slave_alloc = lpfc_slave_alloc,
5108 .slave_configure = lpfc_slave_configure,
5109 .slave_destroy = lpfc_slave_destroy,
5110 .scan_finished = lpfc_scan_finished,
5111 .this_id = -1,
83108bd3 5112 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3de2a653
JS
5113 .cmd_per_lun = LPFC_CMD_PER_LUN,
5114 .use_clustering = ENABLE_CLUSTERING,
5115 .shost_attrs = lpfc_vport_attrs,
5116 .max_sectors = 0xFFFF,
5ffc266e 5117 .change_queue_depth = lpfc_change_queue_depth,
3de2a653 5118};