scsi: lpfc: remove ScsiResult macro
[linux-2.6-block.git] / drivers / scsi / lpfc / lpfc_scsi.c
CommitLineData
d85296cf 1/*******************************************************************
dea3101e 2 * This file is part of the Emulex Linux Device Driver for *
c44ce173 3 * Fibre Channel Host Bus Adapters. *
128bddac 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
3e21d1cb 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
51f4ca3c 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
c44ce173 7 * EMULEX and SLI are trademarks of Emulex. *
d080abe0 8 * www.broadcom.com *
c44ce173 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea3101e 10 * *
11 * This program is free software; you can redistribute it and/or *
c44ce173
JSEC
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
dea3101e 22 *******************************************************************/
dea3101e 23#include <linux/pci.h>
5a0e3ad6 24#include <linux/slab.h>
dea3101e 25#include <linux/interrupt.h>
09703660 26#include <linux/export.h>
a90f5684 27#include <linux/delay.h>
e2a0a9d6 28#include <asm/unaligned.h>
128b6f9f 29#include <linux/t10-pi.h>
737d4248
JS
30#include <linux/crc-t10dif.h>
31#include <net/checksum.h>
dea3101e 32
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
e2a0a9d6 35#include <scsi/scsi_eh.h>
dea3101e 36#include <scsi/scsi_host.h>
37#include <scsi/scsi_tcq.h>
38#include <scsi/scsi_transport_fc.h>
39
40#include "lpfc_version.h"
da0436e9 41#include "lpfc_hw4.h"
dea3101e 42#include "lpfc_hw.h"
43#include "lpfc_sli.h"
da0436e9 44#include "lpfc_sli4.h"
ea2151b4 45#include "lpfc_nl.h"
dea3101e 46#include "lpfc_disc.h"
dea3101e 47#include "lpfc.h"
9a6b09c0 48#include "lpfc_scsi.h"
dea3101e 49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
92d7f7b0 51#include "lpfc_vport.h"
dea3101e 52
53#define LPFC_RESET_WAIT 2
54#define LPFC_ABORT_WAIT 2
55
737d4248 56int _dump_buf_done = 1;
e2a0a9d6
JS
57
58static char *dif_op_str[] = {
9a6b09c0
JS
59 "PROT_NORMAL",
60 "PROT_READ_INSERT",
61 "PROT_WRITE_STRIP",
62 "PROT_READ_STRIP",
63 "PROT_WRITE_INSERT",
64 "PROT_READ_PASS",
65 "PROT_WRITE_PASS",
66};
67
f9bb2da1
JS
68struct scsi_dif_tuple {
69 __be16 guard_tag; /* Checksum */
70 __be16 app_tag; /* Opaque storage */
71 __be32 ref_tag; /* Target LBA or indirect LBA */
72};
73
1ba981fd
JS
74static struct lpfc_rport_data *
75lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
76{
77 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
78
f38fa0bb 79 if (vport->phba->cfg_fof)
1ba981fd
JS
80 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
81 else
82 return (struct lpfc_rport_data *)sdev->hostdata;
83}
84
da0436e9
JS
85static void
86lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
1c6f4ef5
JS
87static void
88lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
9c6aa9d7
JS
89static int
90lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
e2a0a9d6
JS
91
92static void
6a9c52cf 93lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
e2a0a9d6
JS
94{
95 void *src, *dst;
96 struct scatterlist *sgde = scsi_sglist(cmnd);
97
98 if (!_dump_buf_data) {
6a9c52cf
JS
99 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
100 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
e2a0a9d6
JS
101 __func__);
102 return;
103 }
104
105
106 if (!sgde) {
6a9c52cf
JS
107 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
108 "9051 BLKGRD: ERROR: data scatterlist is null\n");
e2a0a9d6
JS
109 return;
110 }
111
112 dst = (void *) _dump_buf_data;
113 while (sgde) {
114 src = sg_virt(sgde);
115 memcpy(dst, src, sgde->length);
116 dst += sgde->length;
117 sgde = sg_next(sgde);
118 }
119}
120
121static void
6a9c52cf 122lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
e2a0a9d6
JS
123{
124 void *src, *dst;
125 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
126
127 if (!_dump_buf_dif) {
6a9c52cf
JS
128 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
129 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
e2a0a9d6
JS
130 __func__);
131 return;
132 }
133
134 if (!sgde) {
6a9c52cf
JS
135 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
136 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
e2a0a9d6
JS
137 return;
138 }
139
140 dst = _dump_buf_dif;
141 while (sgde) {
142 src = sg_virt(sgde);
143 memcpy(dst, src, sgde->length);
144 dst += sgde->length;
145 sgde = sg_next(sgde);
146 }
147}
148
9c6aa9d7
JS
149static inline unsigned
150lpfc_cmd_blksize(struct scsi_cmnd *sc)
151{
152 return sc->device->sector_size;
153}
154
155#define LPFC_CHECK_PROTECT_GUARD 1
156#define LPFC_CHECK_PROTECT_REF 2
157static inline unsigned
158lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
159{
160 return 1;
161}
162
163static inline unsigned
164lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
165{
166 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
167 return 0;
168 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
169 return 1;
170 return 0;
171}
172
f1126688
JS
173/**
174 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
175 * @phba: Pointer to HBA object.
176 * @lpfc_cmd: lpfc scsi command object pointer.
177 *
178 * This function is called from the lpfc_prep_task_mgmt_cmd function to
179 * set the last bit in the response sge entry.
180 **/
181static void
182lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
183 struct lpfc_scsi_buf *lpfc_cmd)
184{
185 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
186 if (sgl) {
187 sgl += 1;
188 sgl->word2 = le32_to_cpu(sgl->word2);
189 bf_set(lpfc_sli4_sge_last, sgl, 1);
190 sgl->word2 = cpu_to_le32(sgl->word2);
191 }
192}
193
ea2151b4 194/**
3621a710 195 * lpfc_update_stats - Update statistical data for the command completion
ea2151b4
JS
196 * @phba: Pointer to HBA object.
197 * @lpfc_cmd: lpfc scsi command object pointer.
198 *
199 * This function is called when there is a command completion and this
200 * function updates the statistical data for the command completion.
201 **/
202static void
203lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
204{
205 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
206 struct lpfc_nodelist *pnode = rdata->pnode;
207 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
208 unsigned long flags;
209 struct Scsi_Host *shost = cmd->device->host;
210 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
211 unsigned long latency;
212 int i;
213
214 if (cmd->result)
215 return;
216
9f1e1b50
JS
217 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
218
ea2151b4
JS
219 spin_lock_irqsave(shost->host_lock, flags);
220 if (!vport->stat_data_enabled ||
221 vport->stat_data_blocked ||
5989b8d4 222 !pnode ||
ea2151b4
JS
223 !pnode->lat_data ||
224 (phba->bucket_type == LPFC_NO_BUCKET)) {
225 spin_unlock_irqrestore(shost->host_lock, flags);
226 return;
227 }
ea2151b4
JS
228
229 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
230 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
231 phba->bucket_step;
9f1e1b50
JS
232 /* check array subscript bounds */
233 if (i < 0)
234 i = 0;
235 else if (i >= LPFC_MAX_BUCKET_COUNT)
236 i = LPFC_MAX_BUCKET_COUNT - 1;
ea2151b4
JS
237 } else {
238 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
239 if (latency <= (phba->bucket_base +
240 ((1<<i)*phba->bucket_step)))
241 break;
242 }
243
244 pnode->lat_data[i].cmd_count++;
245 spin_unlock_irqrestore(shost->host_lock, flags);
246}
247
9bad7671 248/**
3621a710 249 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
9bad7671
JS
250 * @phba: The Hba for which this call is being executed.
251 *
252 * This routine is called when there is resource error in driver or firmware.
253 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
254 * posts at most 1 event each second. This routine wakes up worker thread of
255 * @phba to process WORKER_RAM_DOWN_EVENT event.
256 *
257 * This routine should be called with no lock held.
258 **/
92d7f7b0 259void
eaf15d5b 260lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
92d7f7b0
JS
261{
262 unsigned long flags;
5e9d9b82 263 uint32_t evt_posted;
0d4aec13 264 unsigned long expires;
92d7f7b0
JS
265
266 spin_lock_irqsave(&phba->hbalock, flags);
267 atomic_inc(&phba->num_rsrc_err);
268 phba->last_rsrc_error_time = jiffies;
269
0d4aec13
MS
270 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
271 if (time_after(expires, jiffies)) {
92d7f7b0
JS
272 spin_unlock_irqrestore(&phba->hbalock, flags);
273 return;
274 }
275
276 phba->last_ramp_down_time = jiffies;
277
278 spin_unlock_irqrestore(&phba->hbalock, flags);
279
280 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
5e9d9b82
JS
281 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
282 if (!evt_posted)
92d7f7b0 283 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
92d7f7b0
JS
284 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
285
5e9d9b82
JS
286 if (!evt_posted)
287 lpfc_worker_wake_up(phba);
92d7f7b0
JS
288 return;
289}
290
9bad7671 291/**
3621a710 292 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
9bad7671
JS
293 * @phba: The Hba for which this call is being executed.
294 *
295 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
296 * thread.This routine reduces queue depth for all scsi device on each vport
297 * associated with @phba.
298 **/
92d7f7b0
JS
299void
300lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
301{
549e55cd
JS
302 struct lpfc_vport **vports;
303 struct Scsi_Host *shost;
92d7f7b0 304 struct scsi_device *sdev;
5ffc266e 305 unsigned long new_queue_depth;
92d7f7b0 306 unsigned long num_rsrc_err, num_cmd_success;
549e55cd 307 int i;
92d7f7b0
JS
308
309 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
310 num_cmd_success = atomic_read(&phba->num_cmd_success);
311
75ad83a4
JS
312 /*
313 * The error and success command counters are global per
314 * driver instance. If another handler has already
315 * operated on this error event, just exit.
316 */
317 if (num_rsrc_err == 0)
318 return;
319
549e55cd
JS
320 vports = lpfc_create_vport_work_array(phba);
321 if (vports != NULL)
21e9a0a5 322 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
549e55cd
JS
323 shost = lpfc_shost_from_vport(vports[i]);
324 shost_for_each_device(sdev, shost) {
92d7f7b0 325 new_queue_depth =
549e55cd
JS
326 sdev->queue_depth * num_rsrc_err /
327 (num_rsrc_err + num_cmd_success);
328 if (!new_queue_depth)
329 new_queue_depth = sdev->queue_depth - 1;
330 else
331 new_queue_depth = sdev->queue_depth -
332 new_queue_depth;
db5ed4df 333 scsi_change_queue_depth(sdev, new_queue_depth);
549e55cd 334 }
92d7f7b0 335 }
09372820 336 lpfc_destroy_vport_work_array(phba, vports);
92d7f7b0
JS
337 atomic_set(&phba->num_rsrc_err, 0);
338 atomic_set(&phba->num_cmd_success, 0);
339}
340
a8e497d5 341/**
3621a710 342 * lpfc_scsi_dev_block - set all scsi hosts to block state
a8e497d5
JS
343 * @phba: Pointer to HBA context object.
344 *
345 * This function walks vport list and set each SCSI host to block state
346 * by invoking fc_remote_port_delete() routine. This function is invoked
347 * with EEH when device's PCI slot has been permanently disabled.
348 **/
349void
350lpfc_scsi_dev_block(struct lpfc_hba *phba)
351{
352 struct lpfc_vport **vports;
353 struct Scsi_Host *shost;
354 struct scsi_device *sdev;
355 struct fc_rport *rport;
356 int i;
357
358 vports = lpfc_create_vport_work_array(phba);
359 if (vports != NULL)
21e9a0a5 360 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
a8e497d5
JS
361 shost = lpfc_shost_from_vport(vports[i]);
362 shost_for_each_device(sdev, shost) {
363 rport = starget_to_rport(scsi_target(sdev));
364 fc_remote_port_delete(rport);
365 }
366 }
367 lpfc_destroy_vport_work_array(phba, vports);
368}
369
9bad7671 370/**
3772a991 371 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
9bad7671 372 * @vport: The virtual port for which this call being executed.
3772a991 373 * @num_to_allocate: The requested number of buffers to allocate.
9bad7671 374 *
3772a991
JS
375 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
376 * the scsi buffer contains all the necessary information needed to initiate
377 * a SCSI I/O. The non-DMAable buffer region contains information to build
378 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
379 * and the initial BPL. In addition to allocating memory, the FCP CMND and
380 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
9bad7671
JS
381 *
382 * Return codes:
3772a991
JS
383 * int - number of scsi buffers that were allocated.
384 * 0 = failure, less than num_to_alloc is a partial failure.
9bad7671 385 **/
3772a991
JS
386static int
387lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
dea3101e 388{
2e0fef85 389 struct lpfc_hba *phba = vport->phba;
dea3101e 390 struct lpfc_scsi_buf *psb;
391 struct ulp_bde64 *bpl;
392 IOCB_t *iocb;
34b02dcd
JS
393 dma_addr_t pdma_phys_fcp_cmd;
394 dma_addr_t pdma_phys_fcp_rsp;
395 dma_addr_t pdma_phys_bpl;
604a3e30 396 uint16_t iotag;
96f7077f
JS
397 int bcnt, bpl_size;
398
399 bpl_size = phba->cfg_sg_dma_buf_size -
400 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
401
402 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
403 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
404 num_to_alloc, phba->cfg_sg_dma_buf_size,
405 (int)sizeof(struct fcp_cmnd),
406 (int)sizeof(struct fcp_rsp), bpl_size);
dea3101e 407
3772a991
JS
408 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
409 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
410 if (!psb)
411 break;
dea3101e 412
3772a991
JS
413 /*
414 * Get memory from the pci pool to map the virt space to pci
415 * bus space for an I/O. The DMA buffer includes space for the
416 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
417 * necessary to support the sg_tablesize.
418 */
771db5c0 419 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
3772a991
JS
420 GFP_KERNEL, &psb->dma_handle);
421 if (!psb->data) {
422 kfree(psb);
423 break;
424 }
425
3772a991
JS
426
427 /* Allocate iotag for psb->cur_iocbq. */
428 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
429 if (iotag == 0) {
771db5c0 430 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
895427bd 431 psb->data, psb->dma_handle);
3772a991
JS
432 kfree(psb);
433 break;
434 }
435 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
436
437 psb->fcp_cmnd = psb->data;
438 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
439 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
34b02dcd 440 sizeof(struct fcp_rsp);
dea3101e 441
3772a991
JS
442 /* Initialize local short-hand pointers. */
443 bpl = psb->fcp_bpl;
444 pdma_phys_fcp_cmd = psb->dma_handle;
445 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
446 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
447 sizeof(struct fcp_rsp);
448
449 /*
450 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
451 * are sg list bdes. Initialize the first two and leave the
452 * rest for queuecommand.
453 */
454 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
455 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
456 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
457 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
458 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
459
460 /* Setup the physical region for the FCP RSP */
461 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
462 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
463 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
464 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
465 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
466
467 /*
468 * Since the IOCB for the FCP I/O is built into this
469 * lpfc_scsi_buf, initialize it with all known data now.
470 */
471 iocb = &psb->cur_iocbq.iocb;
472 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
473 if ((phba->sli_rev == 3) &&
474 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
475 /* fill in immediate fcp command BDE */
476 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
477 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
478 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
479 unsli3.fcp_ext.icd);
480 iocb->un.fcpi64.bdl.addrHigh = 0;
481 iocb->ulpBdeCount = 0;
482 iocb->ulpLe = 0;
25985edc 483 /* fill in response BDE */
3772a991
JS
484 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
485 BUFF_TYPE_BDE_64;
486 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
487 sizeof(struct fcp_rsp);
488 iocb->unsli3.fcp_ext.rbde.addrLow =
489 putPaddrLow(pdma_phys_fcp_rsp);
490 iocb->unsli3.fcp_ext.rbde.addrHigh =
491 putPaddrHigh(pdma_phys_fcp_rsp);
492 } else {
493 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
494 iocb->un.fcpi64.bdl.bdeSize =
495 (2 * sizeof(struct ulp_bde64));
496 iocb->un.fcpi64.bdl.addrLow =
497 putPaddrLow(pdma_phys_bpl);
498 iocb->un.fcpi64.bdl.addrHigh =
499 putPaddrHigh(pdma_phys_bpl);
500 iocb->ulpBdeCount = 1;
501 iocb->ulpLe = 1;
502 }
503 iocb->ulpClass = CLASS3;
504 psb->status = IOSTAT_SUCCESS;
da0436e9 505 /* Put it back into the SCSI buffer list */
eee8877e 506 psb->cur_iocbq.context1 = psb;
1c6f4ef5 507 lpfc_release_scsi_buf_s3(phba, psb);
dea3101e 508
34b02dcd 509 }
dea3101e 510
3772a991 511 return bcnt;
dea3101e 512}
513
1151e3ec
JS
514/**
515 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
516 * @vport: pointer to lpfc vport data structure.
517 *
518 * This routine is invoked by the vport cleanup for deletions and the cleanup
519 * for an ndlp on removal.
520 **/
521void
522lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
523{
524 struct lpfc_hba *phba = vport->phba;
525 struct lpfc_scsi_buf *psb, *next_psb;
526 unsigned long iflag = 0;
527
895427bd
JS
528 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
529 return;
1151e3ec
JS
530 spin_lock_irqsave(&phba->hbalock, iflag);
531 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
532 list_for_each_entry_safe(psb, next_psb,
533 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
534 if (psb->rdata && psb->rdata->pnode
535 && psb->rdata->pnode->vport == vport)
536 psb->rdata = NULL;
537 }
538 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
539 spin_unlock_irqrestore(&phba->hbalock, iflag);
540}
541
da0436e9
JS
542/**
543 * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
544 * @phba: pointer to lpfc hba data structure.
545 * @axri: pointer to the fcp xri abort wcqe structure.
546 *
547 * This routine is invoked by the worker thread to process a SLI4 fast-path
548 * FCP aborted xri.
549 **/
550void
551lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
552 struct sli4_wcqe_xri_aborted *axri)
553{
554 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
19ca7609 555 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
da0436e9
JS
556 struct lpfc_scsi_buf *psb, *next_psb;
557 unsigned long iflag = 0;
0f65ff68
JS
558 struct lpfc_iocbq *iocbq;
559 int i;
19ca7609
JS
560 struct lpfc_nodelist *ndlp;
561 int rrq_empty = 0;
895427bd 562 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
da0436e9 563
895427bd
JS
564 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
565 return;
0f65ff68
JS
566 spin_lock_irqsave(&phba->hbalock, iflag);
567 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
da0436e9
JS
568 list_for_each_entry_safe(psb, next_psb,
569 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
570 if (psb->cur_iocbq.sli4_xritag == xri) {
571 list_del(&psb->list);
341af102 572 psb->exch_busy = 0;
da0436e9 573 psb->status = IOSTAT_SUCCESS;
0f65ff68
JS
574 spin_unlock(
575 &phba->sli4_hba.abts_scsi_buf_list_lock);
1151e3ec
JS
576 if (psb->rdata && psb->rdata->pnode)
577 ndlp = psb->rdata->pnode;
578 else
579 ndlp = NULL;
580
19ca7609 581 rrq_empty = list_empty(&phba->active_rrq_list);
0f65ff68 582 spin_unlock_irqrestore(&phba->hbalock, iflag);
cb69f7de 583 if (ndlp) {
ee0f4fe1
JS
584 lpfc_set_rrq_active(phba, ndlp,
585 psb->cur_iocbq.sli4_lxritag, rxid, 1);
cb69f7de
JS
586 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
587 }
da0436e9 588 lpfc_release_scsi_buf_s4(phba, psb);
19ca7609
JS
589 if (rrq_empty)
590 lpfc_worker_wake_up(phba);
da0436e9
JS
591 return;
592 }
593 }
0f65ff68
JS
594 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
595 for (i = 1; i <= phba->sli.last_iotag; i++) {
596 iocbq = phba->sli.iocbq_lookup[i];
597
598 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
599 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
600 continue;
601 if (iocbq->sli4_xritag != xri)
602 continue;
603 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
604 psb->exch_busy = 0;
605 spin_unlock_irqrestore(&phba->hbalock, iflag);
0e9bb8d7 606 if (!list_empty(&pring->txq))
589a52d6 607 lpfc_worker_wake_up(phba);
0f65ff68
JS
608 return;
609
610 }
611 spin_unlock_irqrestore(&phba->hbalock, iflag);
da0436e9
JS
612}
613
614/**
4b160ae8 615 * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
da0436e9 616 * @phba: pointer to lpfc hba data structure.
8a9d2e80 617 * @post_sblist: pointer to the scsi buffer list.
da0436e9 618 *
8a9d2e80
JS
619 * This routine walks a list of scsi buffers that was passed in. It attempts
620 * to construct blocks of scsi buffer sgls which contains contiguous xris and
621 * uses the non-embedded SGL block post mailbox commands to post to the port.
622 * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
623 * embedded SGL post mailbox command for posting. The @post_sblist passed in
624 * must be local list, thus no lock is needed when manipulate the list.
da0436e9 625 *
8a9d2e80 626 * Returns: 0 = failure, non-zero number of successfully posted buffers.
da0436e9 627 **/
7bfe781e 628static int
8a9d2e80
JS
629lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
630 struct list_head *post_sblist, int sb_count)
da0436e9 631{
8a9d2e80 632 struct lpfc_scsi_buf *psb, *psb_next;
96f7077f 633 int status, sgl_size;
8a9d2e80
JS
634 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
635 dma_addr_t pdma_phys_bpl1;
636 int last_xritag = NO_XRI;
637 LIST_HEAD(prep_sblist);
638 LIST_HEAD(blck_sblist);
639 LIST_HEAD(scsi_sblist);
640
641 /* sanity check */
642 if (sb_count <= 0)
643 return -EINVAL;
644
96f7077f
JS
645 sgl_size = phba->cfg_sg_dma_buf_size -
646 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
647
8a9d2e80
JS
648 list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
649 list_del_init(&psb->list);
650 block_cnt++;
651 if ((last_xritag != NO_XRI) &&
652 (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
653 /* a hole in xri block, form a sgl posting block */
654 list_splice_init(&prep_sblist, &blck_sblist);
655 post_cnt = block_cnt - 1;
656 /* prepare list for next posting block */
657 list_add_tail(&psb->list, &prep_sblist);
658 block_cnt = 1;
659 } else {
660 /* prepare list for next posting block */
661 list_add_tail(&psb->list, &prep_sblist);
662 /* enough sgls for non-embed sgl mbox command */
663 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
664 list_splice_init(&prep_sblist, &blck_sblist);
665 post_cnt = block_cnt;
666 block_cnt = 0;
da0436e9 667 }
8a9d2e80
JS
668 }
669 num_posting++;
670 last_xritag = psb->cur_iocbq.sli4_xritag;
da0436e9 671
8a9d2e80
JS
672 /* end of repost sgl list condition for SCSI buffers */
673 if (num_posting == sb_count) {
674 if (post_cnt == 0) {
675 /* last sgl posting block */
676 list_splice_init(&prep_sblist, &blck_sblist);
677 post_cnt = block_cnt;
678 } else if (block_cnt == 1) {
679 /* last single sgl with non-contiguous xri */
96f7077f 680 if (sgl_size > SGL_PAGE_SIZE)
8a9d2e80
JS
681 pdma_phys_bpl1 = psb->dma_phys_bpl +
682 SGL_PAGE_SIZE;
683 else
684 pdma_phys_bpl1 = 0;
685 status = lpfc_sli4_post_sgl(phba,
686 psb->dma_phys_bpl,
687 pdma_phys_bpl1,
688 psb->cur_iocbq.sli4_xritag);
689 if (status) {
690 /* failure, put on abort scsi list */
691 psb->exch_busy = 1;
692 } else {
693 /* success, put on SCSI buffer list */
694 psb->exch_busy = 0;
695 psb->status = IOSTAT_SUCCESS;
696 num_posted++;
697 }
698 /* success, put on SCSI buffer sgl list */
699 list_add_tail(&psb->list, &scsi_sblist);
700 }
701 }
da0436e9 702
8a9d2e80
JS
703 /* continue until a nembed page worth of sgls */
704 if (post_cnt == 0)
da0436e9 705 continue;
8a9d2e80
JS
706
707 /* post block of SCSI buffer list sgls */
708 status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
709 post_cnt);
710
711 /* don't reset xirtag due to hole in xri block */
712 if (block_cnt == 0)
713 last_xritag = NO_XRI;
714
715 /* reset SCSI buffer post count for next round of posting */
716 post_cnt = 0;
717
718 /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
719 while (!list_empty(&blck_sblist)) {
720 list_remove_head(&blck_sblist, psb,
721 struct lpfc_scsi_buf, list);
da0436e9 722 if (status) {
8a9d2e80 723 /* failure, put on abort scsi list */
341af102 724 psb->exch_busy = 1;
341af102 725 } else {
8a9d2e80 726 /* success, put on SCSI buffer list */
341af102 727 psb->exch_busy = 0;
da0436e9 728 psb->status = IOSTAT_SUCCESS;
8a9d2e80 729 num_posted++;
341af102 730 }
8a9d2e80 731 list_add_tail(&psb->list, &scsi_sblist);
da0436e9
JS
732 }
733 }
8a9d2e80
JS
734 /* Push SCSI buffers with sgl posted to the availble list */
735 while (!list_empty(&scsi_sblist)) {
736 list_remove_head(&scsi_sblist, psb,
737 struct lpfc_scsi_buf, list);
738 lpfc_release_scsi_buf_s4(phba, psb);
739 }
740 return num_posted;
741}
742
743/**
4b160ae8 744 * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
8a9d2e80
JS
745 * @phba: pointer to lpfc hba data structure.
746 *
747 * This routine walks the list of scsi buffers that have been allocated and
748 * repost them to the port by using SGL block post. This is needed after a
749 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
750 * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
751 * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
752 *
753 * Returns: 0 = success, non-zero failure.
754 **/
755int
756lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
757{
758 LIST_HEAD(post_sblist);
759 int num_posted, rc = 0;
760
761 /* get all SCSI buffers need to repost to a local list */
a40fc5f0 762 spin_lock_irq(&phba->scsi_buf_list_get_lock);
164cecd1 763 spin_lock(&phba->scsi_buf_list_put_lock);
a40fc5f0
JS
764 list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
765 list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
164cecd1 766 spin_unlock(&phba->scsi_buf_list_put_lock);
a40fc5f0 767 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80
JS
768
769 /* post the list of scsi buffer sgls to port if available */
770 if (!list_empty(&post_sblist)) {
771 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
772 phba->sli4_hba.scsi_xri_cnt);
773 /* failed to post any scsi buffer, return error */
774 if (num_posted == 0)
775 rc = -EIO;
776 }
da0436e9
JS
777 return rc;
778}
779
780/**
781 * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
782 * @vport: The virtual port for which this call being executed.
783 * @num_to_allocate: The requested number of buffers to allocate.
784 *
8a9d2e80 785 * This routine allocates scsi buffers for device with SLI-4 interface spec,
da0436e9 786 * the scsi buffer contains all the necessary information needed to initiate
8a9d2e80
JS
787 * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
788 * them on a list, it post them to the port by using SGL block post.
da0436e9
JS
789 *
790 * Return codes:
8a9d2e80 791 * int - number of scsi buffers that were allocated and posted.
da0436e9
JS
792 * 0 = failure, less than num_to_alloc is a partial failure.
793 **/
794static int
795lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
796{
797 struct lpfc_hba *phba = vport->phba;
798 struct lpfc_scsi_buf *psb;
799 struct sli4_sge *sgl;
800 IOCB_t *iocb;
801 dma_addr_t pdma_phys_fcp_cmd;
802 dma_addr_t pdma_phys_fcp_rsp;
96f7077f 803 dma_addr_t pdma_phys_bpl;
8a9d2e80 804 uint16_t iotag, lxri = 0;
96f7077f 805 int bcnt, num_posted, sgl_size;
8a9d2e80
JS
806 LIST_HEAD(prep_sblist);
807 LIST_HEAD(post_sblist);
808 LIST_HEAD(scsi_sblist);
da0436e9 809
96f7077f
JS
810 sgl_size = phba->cfg_sg_dma_buf_size -
811 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
812
813 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
814 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
815 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
816 (int)sizeof(struct fcp_cmnd),
817 (int)sizeof(struct fcp_rsp));
818
da0436e9
JS
819 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
820 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
821 if (!psb)
822 break;
da0436e9 823 /*
8a9d2e80
JS
824 * Get memory from the pci pool to map the virt space to
825 * pci bus space for an I/O. The DMA buffer includes space
826 * for the struct fcp_cmnd, struct fcp_rsp and the number
827 * of bde's necessary to support the sg_tablesize.
da0436e9 828 */
771db5c0 829 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
da0436e9
JS
830 GFP_KERNEL, &psb->dma_handle);
831 if (!psb->data) {
832 kfree(psb);
833 break;
834 }
da0436e9 835
092cb034
JS
836 /*
837 * 4K Page alignment is CRITICAL to BlockGuard, double check
838 * to be sure.
839 */
f44ac12f
JS
840 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
841 (((unsigned long)(psb->data) &
092cb034 842 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
f44ac12f
JS
843 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
844 "3369 Memory alignment error "
845 "addr=%lx\n",
846 (unsigned long)psb->data);
771db5c0 847 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
96f7077f
JS
848 psb->data, psb->dma_handle);
849 kfree(psb);
850 break;
851 }
852
f7bc6434
JS
853
854 lxri = lpfc_sli4_next_xritag(phba);
855 if (lxri == NO_XRI) {
771db5c0 856 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
895427bd 857 psb->data, psb->dma_handle);
da0436e9
JS
858 kfree(psb);
859 break;
860 }
861
f7bc6434
JS
862 /* Allocate iotag for psb->cur_iocbq. */
863 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
864 if (iotag == 0) {
771db5c0 865 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
895427bd 866 psb->data, psb->dma_handle);
da0436e9 867 kfree(psb);
f7bc6434 868 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4b160ae8 869 "3368 Failed to allocate IOTAG for"
f7bc6434
JS
870 " XRI:0x%x\n", lxri);
871 lpfc_sli4_free_xri(phba, lxri);
da0436e9
JS
872 break;
873 }
6d368e53
JS
874 psb->cur_iocbq.sli4_lxritag = lxri;
875 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
da0436e9 876 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
da0436e9 877 psb->fcp_bpl = psb->data;
96f7077f 878 psb->fcp_cmnd = (psb->data + sgl_size);
da0436e9
JS
879 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
880 sizeof(struct fcp_cmnd));
881
882 /* Initialize local short-hand pointers. */
883 sgl = (struct sli4_sge *)psb->fcp_bpl;
884 pdma_phys_bpl = psb->dma_handle;
96f7077f 885 pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
da0436e9
JS
886 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
887
888 /*
8a9d2e80
JS
889 * The first two bdes are the FCP_CMD and FCP_RSP.
890 * The balance are sg list bdes. Initialize the
891 * first two and leave the rest for queuecommand.
da0436e9
JS
892 */
893 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
894 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
0558056c 895 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
896 bf_set(lpfc_sli4_sge_last, sgl, 0);
897 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 898 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
da0436e9
JS
899 sgl++;
900
901 /* Setup the physical region for the FCP RSP */
902 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
903 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
0558056c 904 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
905 bf_set(lpfc_sli4_sge_last, sgl, 1);
906 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 907 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
da0436e9
JS
908
909 /*
910 * Since the IOCB for the FCP I/O is built into this
911 * lpfc_scsi_buf, initialize it with all known data now.
912 */
913 iocb = &psb->cur_iocbq.iocb;
914 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
915 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
916 /* setting the BLP size to 2 * sizeof BDE may not be correct.
917 * We are setting the bpl to point to out sgl. An sgl's
918 * entries are 16 bytes, a bpl entries are 12 bytes.
919 */
920 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
921 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
922 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
923 iocb->ulpBdeCount = 1;
924 iocb->ulpLe = 1;
925 iocb->ulpClass = CLASS3;
8a9d2e80 926 psb->cur_iocbq.context1 = psb;
da0436e9 927 psb->dma_phys_bpl = pdma_phys_bpl;
da0436e9 928
8a9d2e80
JS
929 /* add the scsi buffer to a post list */
930 list_add_tail(&psb->list, &post_sblist);
a40fc5f0 931 spin_lock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80 932 phba->sli4_hba.scsi_xri_cnt++;
a40fc5f0 933 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
8a9d2e80 934 }
895427bd 935 lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP,
8a9d2e80
JS
936 "3021 Allocate %d out of %d requested new SCSI "
937 "buffers\n", bcnt, num_to_alloc);
938
939 /* post the list of scsi buffer sgls to port if available */
940 if (!list_empty(&post_sblist))
941 num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
942 &post_sblist, bcnt);
943 else
944 num_posted = 0;
945
946 return num_posted;
da0436e9
JS
947}
948
9bad7671 949/**
3772a991
JS
950 * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
951 * @vport: The virtual port for which this call being executed.
952 * @num_to_allocate: The requested number of buffers to allocate.
953 *
954 * This routine wraps the actual SCSI buffer allocator function pointer from
955 * the lpfc_hba struct.
956 *
957 * Return codes:
958 * int - number of scsi buffers that were allocated.
959 * 0 = failure, less than num_to_alloc is a partial failure.
960 **/
961static inline int
962lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
963{
964 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
965}
966
967/**
19ca7609 968 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
3772a991 969 * @phba: The HBA for which this call is being executed.
9bad7671
JS
970 *
971 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
972 * and returns to caller.
973 *
974 * Return codes:
975 * NULL - Error
976 * Pointer to lpfc_scsi_buf - Success
977 **/
455c53ec 978static struct lpfc_scsi_buf*
19ca7609 979lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
dea3101e 980{
0bd4ca25 981 struct lpfc_scsi_buf * lpfc_cmd = NULL;
a40fc5f0 982 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
164cecd1 983 unsigned long iflag = 0;
a40fc5f0 984
164cecd1 985 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
a40fc5f0
JS
986 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
987 list);
988 if (!lpfc_cmd) {
164cecd1 989 spin_lock(&phba->scsi_buf_list_put_lock);
a40fc5f0
JS
990 list_splice(&phba->lpfc_scsi_buf_list_put,
991 &phba->lpfc_scsi_buf_list_get);
992 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
993 list_remove_head(scsi_buf_list_get, lpfc_cmd,
994 struct lpfc_scsi_buf, list);
164cecd1 995 spin_unlock(&phba->scsi_buf_list_put_lock);
1dcb58e5 996 }
164cecd1 997 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
0bd4ca25
JSEC
998 return lpfc_cmd;
999}
19ca7609
JS
1000/**
1001 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1002 * @phba: The HBA for which this call is being executed.
1003 *
1004 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1005 * and returns to caller.
1006 *
1007 * Return codes:
1008 * NULL - Error
1009 * Pointer to lpfc_scsi_buf - Success
1010 **/
1011static struct lpfc_scsi_buf*
1012lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1013{
3be30e0e 1014 struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
164cecd1 1015 unsigned long iflag = 0;
19ca7609
JS
1016 int found = 0;
1017
164cecd1 1018 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
3be30e0e
JS
1019 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1020 &phba->lpfc_scsi_buf_list_get, list) {
19ca7609 1021 if (lpfc_test_rrq_active(phba, ndlp,
ee0f4fe1 1022 lpfc_cmd->cur_iocbq.sli4_lxritag))
1151e3ec 1023 continue;
59c68eaa 1024 list_del_init(&lpfc_cmd->list);
19ca7609 1025 found = 1;
1151e3ec 1026 break;
19ca7609 1027 }
a40fc5f0 1028 if (!found) {
164cecd1 1029 spin_lock(&phba->scsi_buf_list_put_lock);
a40fc5f0
JS
1030 list_splice(&phba->lpfc_scsi_buf_list_put,
1031 &phba->lpfc_scsi_buf_list_get);
1032 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
164cecd1 1033 spin_unlock(&phba->scsi_buf_list_put_lock);
3be30e0e
JS
1034 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1035 &phba->lpfc_scsi_buf_list_get, list) {
a40fc5f0
JS
1036 if (lpfc_test_rrq_active(
1037 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1038 continue;
59c68eaa 1039 list_del_init(&lpfc_cmd->list);
a40fc5f0
JS
1040 found = 1;
1041 break;
1042 }
1043 }
164cecd1 1044 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1151e3ec
JS
1045 if (!found)
1046 return NULL;
a40fc5f0 1047 return lpfc_cmd;
19ca7609
JS
1048}
1049/**
1050 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1051 * @phba: The HBA for which this call is being executed.
1052 *
1053 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1054 * and returns to caller.
1055 *
1056 * Return codes:
1057 * NULL - Error
1058 * Pointer to lpfc_scsi_buf - Success
1059 **/
1060static struct lpfc_scsi_buf*
1061lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1062{
1063 return phba->lpfc_get_scsi_buf(phba, ndlp);
1064}
dea3101e 1065
9bad7671 1066/**
3772a991 1067 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
9bad7671
JS
1068 * @phba: The Hba for which this call is being executed.
1069 * @psb: The scsi buffer which is being released.
1070 *
1071 * This routine releases @psb scsi buffer by adding it to tail of @phba
1072 * lpfc_scsi_buf_list list.
1073 **/
0bd4ca25 1074static void
3772a991 1075lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
0bd4ca25 1076{
875fbdfe 1077 unsigned long iflag = 0;
dea3101e 1078
a40fc5f0
JS
1079 psb->seg_cnt = 0;
1080 psb->nonsg_phys = 0;
1081 psb->prot_seg_cnt = 0;
1082
1083 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
0bd4ca25 1084 psb->pCmd = NULL;
6a485eb9 1085 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
a40fc5f0
JS
1086 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1087 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
dea3101e 1088}
1089
da0436e9
JS
1090/**
1091 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1092 * @phba: The Hba for which this call is being executed.
1093 * @psb: The scsi buffer which is being released.
1094 *
1095 * This routine releases @psb scsi buffer by adding it to tail of @phba
1096 * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1097 * and cannot be reused for at least RA_TOV amount of time if it was
1098 * aborted.
1099 **/
1100static void
1101lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1102{
1103 unsigned long iflag = 0;
1104
a40fc5f0
JS
1105 psb->seg_cnt = 0;
1106 psb->nonsg_phys = 0;
1107 psb->prot_seg_cnt = 0;
1108
341af102 1109 if (psb->exch_busy) {
da0436e9
JS
1110 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1111 iflag);
1112 psb->pCmd = NULL;
1113 list_add_tail(&psb->list,
1114 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
1115 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1116 iflag);
1117 } else {
da0436e9 1118 psb->pCmd = NULL;
6a485eb9 1119 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
a40fc5f0
JS
1120 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1121 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1122 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
da0436e9
JS
1123 }
1124}
1125
9bad7671 1126/**
3772a991
JS
1127 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1128 * @phba: The Hba for which this call is being executed.
1129 * @psb: The scsi buffer which is being released.
1130 *
1131 * This routine releases @psb scsi buffer by adding it to tail of @phba
1132 * lpfc_scsi_buf_list list.
1133 **/
1134static void
1135lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1136{
1137
1138 phba->lpfc_release_scsi_buf(phba, psb);
1139}
1140
1141/**
1142 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
9bad7671
JS
1143 * @phba: The Hba for which this call is being executed.
1144 * @lpfc_cmd: The scsi buffer which is going to be mapped.
1145 *
1146 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3772a991 1147 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
4b160ae8 1148 * through sg elements and format the bde. This routine also initializes all
3772a991 1149 * IOCB fields which are dependent on scsi command request buffer.
9bad7671
JS
1150 *
1151 * Return codes:
1152 * 1 - Error
1153 * 0 - Success
1154 **/
dea3101e 1155static int
3772a991 1156lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dea3101e 1157{
1158 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1159 struct scatterlist *sgel = NULL;
1160 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1161 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
0f65ff68 1162 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
dea3101e 1163 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
34b02dcd 1164 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dea3101e 1165 dma_addr_t physaddr;
34b02dcd 1166 uint32_t num_bde = 0;
a0b4f78f 1167 int nseg, datadir = scsi_cmnd->sc_data_direction;
dea3101e 1168
1169 /*
1170 * There are three possibilities here - use scatter-gather segment, use
1171 * the single mapping, or neither. Start the lpfc command prep by
1172 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1173 * data bde entry.
1174 */
1175 bpl += 2;
c59fd9eb 1176 if (scsi_sg_count(scsi_cmnd)) {
dea3101e 1177 /*
1178 * The driver stores the segment count returned from pci_map_sg
1179 * because this a count of dma-mappings used to map the use_sg
1180 * pages. They are not guaranteed to be the same for those
1181 * architectures that implement an IOMMU.
1182 */
dea3101e 1183
c59fd9eb
FT
1184 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1185 scsi_sg_count(scsi_cmnd), datadir);
1186 if (unlikely(!nseg))
1187 return 1;
1188
a0b4f78f 1189 lpfc_cmd->seg_cnt = nseg;
dea3101e 1190 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
1191 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1192 "9064 BLKGRD: %s: Too many sg segments from "
e2a0a9d6 1193 "dma_map_sg. Config %d, seg_cnt %d\n",
cadbd4a5 1194 __func__, phba->cfg_sg_seg_cnt,
dea3101e 1195 lpfc_cmd->seg_cnt);
96f7077f 1196 lpfc_cmd->seg_cnt = 0;
a0b4f78f 1197 scsi_dma_unmap(scsi_cmnd);
dea3101e 1198 return 1;
1199 }
1200
1201 /*
1202 * The driver established a maximum scatter-gather segment count
1203 * during probe that limits the number of sg elements in any
1204 * single scsi command. Just run through the seg_cnt and format
1205 * the bde's.
34b02dcd
JS
1206 * When using SLI-3 the driver will try to fit all the BDEs into
1207 * the IOCB. If it can't then the BDEs get added to a BPL as it
1208 * does for SLI-2 mode.
dea3101e 1209 */
34b02dcd 1210 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
dea3101e 1211 physaddr = sg_dma_address(sgel);
34b02dcd 1212 if (phba->sli_rev == 3 &&
e2a0a9d6 1213 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
0f65ff68 1214 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
34b02dcd
JS
1215 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1216 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1217 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1218 data_bde->addrLow = putPaddrLow(physaddr);
1219 data_bde->addrHigh = putPaddrHigh(physaddr);
1220 data_bde++;
1221 } else {
1222 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1223 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1224 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1225 bpl->addrLow =
1226 le32_to_cpu(putPaddrLow(physaddr));
1227 bpl->addrHigh =
1228 le32_to_cpu(putPaddrHigh(physaddr));
1229 bpl++;
1230 }
dea3101e 1231 }
c59fd9eb 1232 }
dea3101e 1233
1234 /*
1235 * Finish initializing those IOCB fields that are dependent on the
34b02dcd
JS
1236 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
1237 * explicitly reinitialized and for SLI-3 the extended bde count is
1238 * explicitly reinitialized since all iocb memory resources are reused.
dea3101e 1239 */
e2a0a9d6 1240 if (phba->sli_rev == 3 &&
0f65ff68
JS
1241 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1242 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
34b02dcd
JS
1243 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1244 /*
1245 * The extended IOCB format can only fit 3 BDE or a BPL.
1246 * This I/O has more than 3 BDE so the 1st data bde will
1247 * be a BPL that is filled in here.
1248 */
1249 physaddr = lpfc_cmd->dma_handle;
1250 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1251 data_bde->tus.f.bdeSize = (num_bde *
1252 sizeof(struct ulp_bde64));
1253 physaddr += (sizeof(struct fcp_cmnd) +
1254 sizeof(struct fcp_rsp) +
1255 (2 * sizeof(struct ulp_bde64)));
1256 data_bde->addrHigh = putPaddrHigh(physaddr);
1257 data_bde->addrLow = putPaddrLow(physaddr);
25985edc 1258 /* ebde count includes the response bde and data bpl */
34b02dcd
JS
1259 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1260 } else {
25985edc 1261 /* ebde count includes the response bde and data bdes */
34b02dcd
JS
1262 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1263 }
1264 } else {
1265 iocb_cmd->un.fcpi64.bdl.bdeSize =
1266 ((num_bde + 2) * sizeof(struct ulp_bde64));
0f65ff68 1267 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
34b02dcd 1268 }
09372820 1269 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
e2a0a9d6
JS
1270
1271 /*
1272 * Due to difference in data length between DIF/non-DIF paths,
1273 * we need to set word 4 of IOCB here
1274 */
a257bf90 1275 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
e2a0a9d6
JS
1276 return 0;
1277}
1278
f9bb2da1 1279#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 1280
4b160ae8 1281/* Return BG_ERR_INIT if error injection is detected by Initiator */
9a6b09c0 1282#define BG_ERR_INIT 0x1
4b160ae8 1283/* Return BG_ERR_TGT if error injection is detected by Target */
9a6b09c0 1284#define BG_ERR_TGT 0x2
4b160ae8 1285/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
9a6b09c0 1286#define BG_ERR_SWAP 0x10
4b160ae8
MG
1287/**
1288 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
1289 * error injection
1290 **/
9a6b09c0 1291#define BG_ERR_CHECK 0x20
acd6859b
JS
1292
1293/**
1294 * lpfc_bg_err_inject - Determine if we should inject an error
1295 * @phba: The Hba for which this call is being executed.
f9bb2da1
JS
1296 * @sc: The SCSI command to examine
1297 * @reftag: (out) BlockGuard reference tag for transmitted data
1298 * @apptag: (out) BlockGuard application tag for transmitted data
1299 * @new_guard (in) Value to replace CRC with if needed
1300 *
9a6b09c0 1301 * Returns BG_ERR_* bit mask or 0 if request ignored
acd6859b 1302 **/
f9bb2da1
JS
1303static int
1304lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1305 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1306{
1307 struct scatterlist *sgpe; /* s/g prot entry */
9a6b09c0 1308 struct lpfc_scsi_buf *lpfc_cmd = NULL;
acd6859b 1309 struct scsi_dif_tuple *src = NULL;
4ac9b226
JS
1310 struct lpfc_nodelist *ndlp;
1311 struct lpfc_rport_data *rdata;
f9bb2da1
JS
1312 uint32_t op = scsi_get_prot_op(sc);
1313 uint32_t blksize;
1314 uint32_t numblks;
1315 sector_t lba;
1316 int rc = 0;
acd6859b 1317 int blockoff = 0;
f9bb2da1
JS
1318
1319 if (op == SCSI_PROT_NORMAL)
1320 return 0;
1321
acd6859b 1322 sgpe = scsi_prot_sglist(sc);
f9bb2da1 1323 lba = scsi_get_lba(sc);
4ac9b226
JS
1324
1325 /* First check if we need to match the LBA */
f9bb2da1
JS
1326 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1327 blksize = lpfc_cmd_blksize(sc);
1328 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1329
1330 /* Make sure we have the right LBA if one is specified */
1331 if ((phba->lpfc_injerr_lba < lba) ||
1332 (phba->lpfc_injerr_lba >= (lba + numblks)))
1333 return 0;
acd6859b
JS
1334 if (sgpe) {
1335 blockoff = phba->lpfc_injerr_lba - lba;
1336 numblks = sg_dma_len(sgpe) /
1337 sizeof(struct scsi_dif_tuple);
1338 if (numblks < blockoff)
1339 blockoff = numblks;
acd6859b 1340 }
f9bb2da1
JS
1341 }
1342
4ac9b226 1343 /* Next check if we need to match the remote NPortID or WWPN */
1ba981fd 1344 rdata = lpfc_rport_data_from_scsi_device(sc->device);
4ac9b226
JS
1345 if (rdata && rdata->pnode) {
1346 ndlp = rdata->pnode;
1347
1348 /* Make sure we have the right NPortID if one is specified */
1349 if (phba->lpfc_injerr_nportid &&
1350 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1351 return 0;
1352
1353 /*
1354 * Make sure we have the right WWPN if one is specified.
1355 * wwn[0] should be a non-zero NAA in a good WWPN.
1356 */
1357 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1358 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1359 sizeof(struct lpfc_name)) != 0))
1360 return 0;
1361 }
1362
1363 /* Setup a ptr to the protection data if the SCSI host provides it */
1364 if (sgpe) {
1365 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1366 src += blockoff;
1367 lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1368 }
1369
f9bb2da1
JS
1370 /* Should we change the Reference Tag */
1371 if (reftag) {
acd6859b
JS
1372 if (phba->lpfc_injerr_wref_cnt) {
1373 switch (op) {
1374 case SCSI_PROT_WRITE_PASS:
9a6b09c0
JS
1375 if (src) {
1376 /*
1377 * For WRITE_PASS, force the error
1378 * to be sent on the wire. It should
1379 * be detected by the Target.
1380 * If blockoff != 0 error will be
1381 * inserted in middle of the IO.
1382 */
acd6859b
JS
1383
1384 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1385 "9076 BLKGRD: Injecting reftag error: "
1386 "write lba x%lx + x%x oldrefTag x%x\n",
1387 (unsigned long)lba, blockoff,
9a6b09c0 1388 be32_to_cpu(src->ref_tag));
f9bb2da1 1389
acd6859b 1390 /*
9a6b09c0
JS
1391 * Save the old ref_tag so we can
1392 * restore it on completion.
acd6859b 1393 */
9a6b09c0
JS
1394 if (lpfc_cmd) {
1395 lpfc_cmd->prot_data_type =
1396 LPFC_INJERR_REFTAG;
1397 lpfc_cmd->prot_data_segment =
1398 src;
1399 lpfc_cmd->prot_data =
1400 src->ref_tag;
1401 }
1402 src->ref_tag = cpu_to_be32(0xDEADBEEF);
acd6859b 1403 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1404 if (phba->lpfc_injerr_wref_cnt == 0) {
1405 phba->lpfc_injerr_nportid = 0;
1406 phba->lpfc_injerr_lba =
1407 LPFC_INJERR_LBA_OFF;
1408 memset(&phba->lpfc_injerr_wwpn,
1409 0, sizeof(struct lpfc_name));
1410 }
9a6b09c0
JS
1411 rc = BG_ERR_TGT | BG_ERR_CHECK;
1412
acd6859b
JS
1413 break;
1414 }
1415 /* Drop thru */
9a6b09c0 1416 case SCSI_PROT_WRITE_INSERT:
acd6859b 1417 /*
9a6b09c0
JS
1418 * For WRITE_INSERT, force the error
1419 * to be sent on the wire. It should be
1420 * detected by the Target.
acd6859b 1421 */
9a6b09c0 1422 /* DEADBEEF will be the reftag on the wire */
acd6859b
JS
1423 *reftag = 0xDEADBEEF;
1424 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1425 if (phba->lpfc_injerr_wref_cnt == 0) {
1426 phba->lpfc_injerr_nportid = 0;
1427 phba->lpfc_injerr_lba =
1428 LPFC_INJERR_LBA_OFF;
1429 memset(&phba->lpfc_injerr_wwpn,
1430 0, sizeof(struct lpfc_name));
1431 }
9a6b09c0 1432 rc = BG_ERR_TGT | BG_ERR_CHECK;
acd6859b
JS
1433
1434 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1435 "9078 BLKGRD: Injecting reftag error: "
acd6859b
JS
1436 "write lba x%lx\n", (unsigned long)lba);
1437 break;
9a6b09c0 1438 case SCSI_PROT_WRITE_STRIP:
acd6859b 1439 /*
9a6b09c0
JS
1440 * For WRITE_STRIP and WRITE_PASS,
1441 * force the error on data
1442 * being copied from SLI-Host to SLI-Port.
acd6859b 1443 */
f9bb2da1
JS
1444 *reftag = 0xDEADBEEF;
1445 phba->lpfc_injerr_wref_cnt--;
4ac9b226
JS
1446 if (phba->lpfc_injerr_wref_cnt == 0) {
1447 phba->lpfc_injerr_nportid = 0;
1448 phba->lpfc_injerr_lba =
1449 LPFC_INJERR_LBA_OFF;
1450 memset(&phba->lpfc_injerr_wwpn,
1451 0, sizeof(struct lpfc_name));
1452 }
9a6b09c0 1453 rc = BG_ERR_INIT;
f9bb2da1
JS
1454
1455 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1456 "9077 BLKGRD: Injecting reftag error: "
f9bb2da1 1457 "write lba x%lx\n", (unsigned long)lba);
acd6859b 1458 break;
f9bb2da1 1459 }
acd6859b
JS
1460 }
1461 if (phba->lpfc_injerr_rref_cnt) {
1462 switch (op) {
1463 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1464 case SCSI_PROT_READ_STRIP:
1465 case SCSI_PROT_READ_PASS:
1466 /*
1467 * For READ_STRIP and READ_PASS, force the
1468 * error on data being read off the wire. It
1469 * should force an IO error to the driver.
1470 */
f9bb2da1
JS
1471 *reftag = 0xDEADBEEF;
1472 phba->lpfc_injerr_rref_cnt--;
4ac9b226
JS
1473 if (phba->lpfc_injerr_rref_cnt == 0) {
1474 phba->lpfc_injerr_nportid = 0;
1475 phba->lpfc_injerr_lba =
1476 LPFC_INJERR_LBA_OFF;
1477 memset(&phba->lpfc_injerr_wwpn,
1478 0, sizeof(struct lpfc_name));
1479 }
acd6859b 1480 rc = BG_ERR_INIT;
f9bb2da1
JS
1481
1482 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 1483 "9079 BLKGRD: Injecting reftag error: "
f9bb2da1 1484 "read lba x%lx\n", (unsigned long)lba);
acd6859b 1485 break;
f9bb2da1
JS
1486 }
1487 }
1488 }
1489
1490 /* Should we change the Application Tag */
1491 if (apptag) {
acd6859b
JS
1492 if (phba->lpfc_injerr_wapp_cnt) {
1493 switch (op) {
1494 case SCSI_PROT_WRITE_PASS:
4ac9b226 1495 if (src) {
9a6b09c0
JS
1496 /*
1497 * For WRITE_PASS, force the error
1498 * to be sent on the wire. It should
1499 * be detected by the Target.
1500 * If blockoff != 0 error will be
1501 * inserted in middle of the IO.
1502 */
1503
acd6859b
JS
1504 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1505 "9080 BLKGRD: Injecting apptag error: "
1506 "write lba x%lx + x%x oldappTag x%x\n",
1507 (unsigned long)lba, blockoff,
9a6b09c0 1508 be16_to_cpu(src->app_tag));
acd6859b
JS
1509
1510 /*
9a6b09c0
JS
1511 * Save the old app_tag so we can
1512 * restore it on completion.
acd6859b 1513 */
9a6b09c0
JS
1514 if (lpfc_cmd) {
1515 lpfc_cmd->prot_data_type =
1516 LPFC_INJERR_APPTAG;
1517 lpfc_cmd->prot_data_segment =
1518 src;
1519 lpfc_cmd->prot_data =
1520 src->app_tag;
1521 }
1522 src->app_tag = cpu_to_be16(0xDEAD);
acd6859b 1523 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1524 if (phba->lpfc_injerr_wapp_cnt == 0) {
1525 phba->lpfc_injerr_nportid = 0;
1526 phba->lpfc_injerr_lba =
1527 LPFC_INJERR_LBA_OFF;
1528 memset(&phba->lpfc_injerr_wwpn,
1529 0, sizeof(struct lpfc_name));
1530 }
9a6b09c0 1531 rc = BG_ERR_TGT | BG_ERR_CHECK;
acd6859b
JS
1532 break;
1533 }
1534 /* Drop thru */
9a6b09c0 1535 case SCSI_PROT_WRITE_INSERT:
acd6859b 1536 /*
9a6b09c0
JS
1537 * For WRITE_INSERT, force the
1538 * error to be sent on the wire. It should be
1539 * detected by the Target.
acd6859b 1540 */
9a6b09c0 1541 /* DEAD will be the apptag on the wire */
acd6859b
JS
1542 *apptag = 0xDEAD;
1543 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1544 if (phba->lpfc_injerr_wapp_cnt == 0) {
1545 phba->lpfc_injerr_nportid = 0;
1546 phba->lpfc_injerr_lba =
1547 LPFC_INJERR_LBA_OFF;
1548 memset(&phba->lpfc_injerr_wwpn,
1549 0, sizeof(struct lpfc_name));
1550 }
9a6b09c0 1551 rc = BG_ERR_TGT | BG_ERR_CHECK;
f9bb2da1 1552
acd6859b 1553 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1554 "0813 BLKGRD: Injecting apptag error: "
acd6859b
JS
1555 "write lba x%lx\n", (unsigned long)lba);
1556 break;
9a6b09c0 1557 case SCSI_PROT_WRITE_STRIP:
acd6859b 1558 /*
9a6b09c0
JS
1559 * For WRITE_STRIP and WRITE_PASS,
1560 * force the error on data
1561 * being copied from SLI-Host to SLI-Port.
acd6859b 1562 */
f9bb2da1
JS
1563 *apptag = 0xDEAD;
1564 phba->lpfc_injerr_wapp_cnt--;
4ac9b226
JS
1565 if (phba->lpfc_injerr_wapp_cnt == 0) {
1566 phba->lpfc_injerr_nportid = 0;
1567 phba->lpfc_injerr_lba =
1568 LPFC_INJERR_LBA_OFF;
1569 memset(&phba->lpfc_injerr_wwpn,
1570 0, sizeof(struct lpfc_name));
1571 }
9a6b09c0 1572 rc = BG_ERR_INIT;
f9bb2da1
JS
1573
1574 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1575 "0812 BLKGRD: Injecting apptag error: "
f9bb2da1 1576 "write lba x%lx\n", (unsigned long)lba);
acd6859b 1577 break;
f9bb2da1 1578 }
acd6859b
JS
1579 }
1580 if (phba->lpfc_injerr_rapp_cnt) {
1581 switch (op) {
1582 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1583 case SCSI_PROT_READ_STRIP:
1584 case SCSI_PROT_READ_PASS:
1585 /*
1586 * For READ_STRIP and READ_PASS, force the
1587 * error on data being read off the wire. It
1588 * should force an IO error to the driver.
1589 */
f9bb2da1
JS
1590 *apptag = 0xDEAD;
1591 phba->lpfc_injerr_rapp_cnt--;
4ac9b226
JS
1592 if (phba->lpfc_injerr_rapp_cnt == 0) {
1593 phba->lpfc_injerr_nportid = 0;
1594 phba->lpfc_injerr_lba =
1595 LPFC_INJERR_LBA_OFF;
1596 memset(&phba->lpfc_injerr_wwpn,
1597 0, sizeof(struct lpfc_name));
1598 }
acd6859b 1599 rc = BG_ERR_INIT;
f9bb2da1
JS
1600
1601 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 1602 "0814 BLKGRD: Injecting apptag error: "
f9bb2da1 1603 "read lba x%lx\n", (unsigned long)lba);
acd6859b 1604 break;
f9bb2da1
JS
1605 }
1606 }
1607 }
1608
acd6859b 1609
f9bb2da1 1610 /* Should we change the Guard Tag */
acd6859b
JS
1611 if (new_guard) {
1612 if (phba->lpfc_injerr_wgrd_cnt) {
1613 switch (op) {
1614 case SCSI_PROT_WRITE_PASS:
9a6b09c0 1615 rc = BG_ERR_CHECK;
acd6859b 1616 /* Drop thru */
9a6b09c0
JS
1617
1618 case SCSI_PROT_WRITE_INSERT:
acd6859b 1619 /*
9a6b09c0
JS
1620 * For WRITE_INSERT, force the
1621 * error to be sent on the wire. It should be
1622 * detected by the Target.
acd6859b
JS
1623 */
1624 phba->lpfc_injerr_wgrd_cnt--;
4ac9b226
JS
1625 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1626 phba->lpfc_injerr_nportid = 0;
1627 phba->lpfc_injerr_lba =
1628 LPFC_INJERR_LBA_OFF;
1629 memset(&phba->lpfc_injerr_wwpn,
1630 0, sizeof(struct lpfc_name));
1631 }
f9bb2da1 1632
9a6b09c0 1633 rc |= BG_ERR_TGT | BG_ERR_SWAP;
acd6859b 1634 /* Signals the caller to swap CRC->CSUM */
f9bb2da1 1635
acd6859b 1636 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1637 "0817 BLKGRD: Injecting guard error: "
acd6859b
JS
1638 "write lba x%lx\n", (unsigned long)lba);
1639 break;
9a6b09c0 1640 case SCSI_PROT_WRITE_STRIP:
acd6859b 1641 /*
9a6b09c0
JS
1642 * For WRITE_STRIP and WRITE_PASS,
1643 * force the error on data
1644 * being copied from SLI-Host to SLI-Port.
acd6859b
JS
1645 */
1646 phba->lpfc_injerr_wgrd_cnt--;
4ac9b226
JS
1647 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1648 phba->lpfc_injerr_nportid = 0;
1649 phba->lpfc_injerr_lba =
1650 LPFC_INJERR_LBA_OFF;
1651 memset(&phba->lpfc_injerr_wwpn,
1652 0, sizeof(struct lpfc_name));
1653 }
f9bb2da1 1654
9a6b09c0 1655 rc = BG_ERR_INIT | BG_ERR_SWAP;
acd6859b
JS
1656 /* Signals the caller to swap CRC->CSUM */
1657
1658 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
9a6b09c0 1659 "0816 BLKGRD: Injecting guard error: "
acd6859b
JS
1660 "write lba x%lx\n", (unsigned long)lba);
1661 break;
1662 }
1663 }
1664 if (phba->lpfc_injerr_rgrd_cnt) {
1665 switch (op) {
1666 case SCSI_PROT_READ_INSERT:
acd6859b
JS
1667 case SCSI_PROT_READ_STRIP:
1668 case SCSI_PROT_READ_PASS:
1669 /*
1670 * For READ_STRIP and READ_PASS, force the
1671 * error on data being read off the wire. It
1672 * should force an IO error to the driver.
1673 */
acd6859b 1674 phba->lpfc_injerr_rgrd_cnt--;
4ac9b226
JS
1675 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1676 phba->lpfc_injerr_nportid = 0;
1677 phba->lpfc_injerr_lba =
1678 LPFC_INJERR_LBA_OFF;
1679 memset(&phba->lpfc_injerr_wwpn,
1680 0, sizeof(struct lpfc_name));
1681 }
acd6859b 1682
9a6b09c0 1683 rc = BG_ERR_INIT | BG_ERR_SWAP;
acd6859b
JS
1684 /* Signals the caller to swap CRC->CSUM */
1685
1686 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1687 "0818 BLKGRD: Injecting guard error: "
1688 "read lba x%lx\n", (unsigned long)lba);
1689 }
f9bb2da1
JS
1690 }
1691 }
acd6859b 1692
f9bb2da1
JS
1693 return rc;
1694}
1695#endif
1696
acd6859b
JS
1697/**
1698 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1699 * the specified SCSI command.
1700 * @phba: The Hba for which this call is being executed.
6c8eea54
JS
1701 * @sc: The SCSI command to examine
1702 * @txopt: (out) BlockGuard operation for transmitted data
1703 * @rxopt: (out) BlockGuard operation for received data
1704 *
1705 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1706 *
acd6859b 1707 **/
e2a0a9d6 1708static int
6c8eea54
JS
1709lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1710 uint8_t *txop, uint8_t *rxop)
e2a0a9d6 1711{
6c8eea54 1712 uint8_t ret = 0;
e2a0a9d6 1713
9c6aa9d7 1714 if (lpfc_cmd_guard_csum(sc)) {
e2a0a9d6
JS
1715 switch (scsi_get_prot_op(sc)) {
1716 case SCSI_PROT_READ_INSERT:
1717 case SCSI_PROT_WRITE_STRIP:
6c8eea54 1718 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
4ac9b226 1719 *txop = BG_OP_IN_CSUM_OUT_NODIF;
e2a0a9d6
JS
1720 break;
1721
1722 case SCSI_PROT_READ_STRIP:
1723 case SCSI_PROT_WRITE_INSERT:
6c8eea54 1724 *rxop = BG_OP_IN_CRC_OUT_NODIF;
4ac9b226 1725 *txop = BG_OP_IN_NODIF_OUT_CRC;
e2a0a9d6
JS
1726 break;
1727
c6af4042
MP
1728 case SCSI_PROT_READ_PASS:
1729 case SCSI_PROT_WRITE_PASS:
6c8eea54 1730 *rxop = BG_OP_IN_CRC_OUT_CSUM;
4ac9b226 1731 *txop = BG_OP_IN_CSUM_OUT_CRC;
e2a0a9d6
JS
1732 break;
1733
e2a0a9d6
JS
1734 case SCSI_PROT_NORMAL:
1735 default:
6a9c52cf 1736 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7c56b9fd
JS
1737 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1738 scsi_get_prot_op(sc));
6c8eea54 1739 ret = 1;
e2a0a9d6
JS
1740 break;
1741
1742 }
7c56b9fd 1743 } else {
e2a0a9d6
JS
1744 switch (scsi_get_prot_op(sc)) {
1745 case SCSI_PROT_READ_STRIP:
1746 case SCSI_PROT_WRITE_INSERT:
6c8eea54 1747 *rxop = BG_OP_IN_CRC_OUT_NODIF;
4ac9b226 1748 *txop = BG_OP_IN_NODIF_OUT_CRC;
e2a0a9d6
JS
1749 break;
1750
1751 case SCSI_PROT_READ_PASS:
1752 case SCSI_PROT_WRITE_PASS:
6c8eea54 1753 *rxop = BG_OP_IN_CRC_OUT_CRC;
4ac9b226 1754 *txop = BG_OP_IN_CRC_OUT_CRC;
e2a0a9d6
JS
1755 break;
1756
e2a0a9d6
JS
1757 case SCSI_PROT_READ_INSERT:
1758 case SCSI_PROT_WRITE_STRIP:
7c56b9fd 1759 *rxop = BG_OP_IN_NODIF_OUT_CRC;
4ac9b226 1760 *txop = BG_OP_IN_CRC_OUT_NODIF;
7c56b9fd
JS
1761 break;
1762
e2a0a9d6
JS
1763 case SCSI_PROT_NORMAL:
1764 default:
6a9c52cf 1765 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
7c56b9fd
JS
1766 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1767 scsi_get_prot_op(sc));
6c8eea54 1768 ret = 1;
e2a0a9d6
JS
1769 break;
1770 }
e2a0a9d6
JS
1771 }
1772
6c8eea54 1773 return ret;
e2a0a9d6
JS
1774}
1775
acd6859b
JS
1776#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1777/**
1778 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1779 * the specified SCSI command in order to force a guard tag error.
1780 * @phba: The Hba for which this call is being executed.
1781 * @sc: The SCSI command to examine
1782 * @txopt: (out) BlockGuard operation for transmitted data
1783 * @rxopt: (out) BlockGuard operation for received data
1784 *
1785 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1786 *
1787 **/
1788static int
1789lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1790 uint8_t *txop, uint8_t *rxop)
1791{
acd6859b
JS
1792 uint8_t ret = 0;
1793
9c6aa9d7 1794 if (lpfc_cmd_guard_csum(sc)) {
acd6859b
JS
1795 switch (scsi_get_prot_op(sc)) {
1796 case SCSI_PROT_READ_INSERT:
1797 case SCSI_PROT_WRITE_STRIP:
acd6859b 1798 *rxop = BG_OP_IN_NODIF_OUT_CRC;
4ac9b226 1799 *txop = BG_OP_IN_CRC_OUT_NODIF;
acd6859b
JS
1800 break;
1801
1802 case SCSI_PROT_READ_STRIP:
1803 case SCSI_PROT_WRITE_INSERT:
acd6859b 1804 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
4ac9b226 1805 *txop = BG_OP_IN_NODIF_OUT_CSUM;
acd6859b
JS
1806 break;
1807
1808 case SCSI_PROT_READ_PASS:
1809 case SCSI_PROT_WRITE_PASS:
4ac9b226 1810 *rxop = BG_OP_IN_CSUM_OUT_CRC;
9a6b09c0 1811 *txop = BG_OP_IN_CRC_OUT_CSUM;
acd6859b
JS
1812 break;
1813
1814 case SCSI_PROT_NORMAL:
1815 default:
1816 break;
1817
1818 }
1819 } else {
1820 switch (scsi_get_prot_op(sc)) {
1821 case SCSI_PROT_READ_STRIP:
1822 case SCSI_PROT_WRITE_INSERT:
acd6859b 1823 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
4ac9b226 1824 *txop = BG_OP_IN_NODIF_OUT_CSUM;
acd6859b
JS
1825 break;
1826
1827 case SCSI_PROT_READ_PASS:
1828 case SCSI_PROT_WRITE_PASS:
4ac9b226 1829 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
9a6b09c0 1830 *txop = BG_OP_IN_CSUM_OUT_CSUM;
acd6859b
JS
1831 break;
1832
1833 case SCSI_PROT_READ_INSERT:
1834 case SCSI_PROT_WRITE_STRIP:
acd6859b 1835 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
4ac9b226 1836 *txop = BG_OP_IN_CSUM_OUT_NODIF;
acd6859b
JS
1837 break;
1838
1839 case SCSI_PROT_NORMAL:
1840 default:
1841 break;
1842 }
1843 }
1844
1845 return ret;
1846}
1847#endif
1848
1849/**
1850 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1851 * @phba: The Hba for which this call is being executed.
1852 * @sc: pointer to scsi command we're working on
1853 * @bpl: pointer to buffer list for protection groups
1854 * @datacnt: number of segments of data that have been dma mapped
1855 *
1856 * This function sets up BPL buffer list for protection groups of
e2a0a9d6
JS
1857 * type LPFC_PG_TYPE_NO_DIF
1858 *
1859 * This is usually used when the HBA is instructed to generate
1860 * DIFs and insert them into data stream (or strip DIF from
1861 * incoming data stream)
1862 *
1863 * The buffer list consists of just one protection group described
1864 * below:
1865 * +-------------------------+
6c8eea54
JS
1866 * start of prot group --> | PDE_5 |
1867 * +-------------------------+
1868 * | PDE_6 |
e2a0a9d6
JS
1869 * +-------------------------+
1870 * | Data BDE |
1871 * +-------------------------+
1872 * |more Data BDE's ... (opt)|
1873 * +-------------------------+
1874 *
e2a0a9d6
JS
1875 *
1876 * Note: Data s/g buffers have been dma mapped
acd6859b
JS
1877 *
1878 * Returns the number of BDEs added to the BPL.
1879 **/
e2a0a9d6
JS
1880static int
1881lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1882 struct ulp_bde64 *bpl, int datasegcnt)
1883{
1884 struct scatterlist *sgde = NULL; /* s/g data entry */
6c8eea54
JS
1885 struct lpfc_pde5 *pde5 = NULL;
1886 struct lpfc_pde6 *pde6 = NULL;
e2a0a9d6 1887 dma_addr_t physaddr;
6c8eea54 1888 int i = 0, num_bde = 0, status;
e2a0a9d6 1889 int datadir = sc->sc_data_direction;
0829a19a 1890#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 1891 uint32_t rc;
0829a19a 1892#endif
acd6859b 1893 uint32_t checking = 1;
e2a0a9d6 1894 uint32_t reftag;
6c8eea54 1895 uint8_t txop, rxop;
e2a0a9d6 1896
6c8eea54
JS
1897 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1898 if (status)
e2a0a9d6
JS
1899 goto out;
1900
6c8eea54 1901 /* extract some info from the scsi command for pde*/
acd6859b 1902 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
e2a0a9d6 1903
f9bb2da1 1904#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 1905 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 1906 if (rc) {
9a6b09c0 1907 if (rc & BG_ERR_SWAP)
acd6859b 1908 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 1909 if (rc & BG_ERR_CHECK)
acd6859b
JS
1910 checking = 0;
1911 }
f9bb2da1
JS
1912#endif
1913
6c8eea54
JS
1914 /* setup PDE5 with what we have */
1915 pde5 = (struct lpfc_pde5 *) bpl;
1916 memset(pde5, 0, sizeof(struct lpfc_pde5));
1917 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
6c8eea54 1918
bc73905a 1919 /* Endianness conversion if necessary for PDE5 */
589a52d6 1920 pde5->word0 = cpu_to_le32(pde5->word0);
7c56b9fd 1921 pde5->reftag = cpu_to_le32(reftag);
589a52d6 1922
6c8eea54
JS
1923 /* advance bpl and increment bde count */
1924 num_bde++;
1925 bpl++;
1926 pde6 = (struct lpfc_pde6 *) bpl;
1927
1928 /* setup PDE6 with the rest of the info */
1929 memset(pde6, 0, sizeof(struct lpfc_pde6));
1930 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1931 bf_set(pde6_optx, pde6, txop);
1932 bf_set(pde6_oprx, pde6, rxop);
a6887e28
JS
1933
1934 /*
1935 * We only need to check the data on READs, for WRITEs
1936 * protection data is automatically generated, not checked.
1937 */
6c8eea54 1938 if (datadir == DMA_FROM_DEVICE) {
9c6aa9d7 1939 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
a6887e28
JS
1940 bf_set(pde6_ce, pde6, checking);
1941 else
1942 bf_set(pde6_ce, pde6, 0);
1943
9c6aa9d7 1944 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
a6887e28
JS
1945 bf_set(pde6_re, pde6, checking);
1946 else
1947 bf_set(pde6_re, pde6, 0);
6c8eea54
JS
1948 }
1949 bf_set(pde6_ai, pde6, 1);
7c56b9fd
JS
1950 bf_set(pde6_ae, pde6, 0);
1951 bf_set(pde6_apptagval, pde6, 0);
e2a0a9d6 1952
bc73905a 1953 /* Endianness conversion if necessary for PDE6 */
589a52d6
JS
1954 pde6->word0 = cpu_to_le32(pde6->word0);
1955 pde6->word1 = cpu_to_le32(pde6->word1);
1956 pde6->word2 = cpu_to_le32(pde6->word2);
1957
6c8eea54 1958 /* advance bpl and increment bde count */
e2a0a9d6
JS
1959 num_bde++;
1960 bpl++;
1961
1962 /* assumption: caller has already run dma_map_sg on command data */
1963 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1964 physaddr = sg_dma_address(sgde);
1965 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1966 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1967 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1968 if (datadir == DMA_TO_DEVICE)
1969 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1970 else
1971 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1972 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1973 bpl++;
1974 num_bde++;
1975 }
1976
1977out:
1978 return num_bde;
1979}
1980
acd6859b
JS
1981/**
1982 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1983 * @phba: The Hba for which this call is being executed.
1984 * @sc: pointer to scsi command we're working on
1985 * @bpl: pointer to buffer list for protection groups
1986 * @datacnt: number of segments of data that have been dma mapped
1987 * @protcnt: number of segment of protection data that have been dma mapped
1988 *
1989 * This function sets up BPL buffer list for protection groups of
1990 * type LPFC_PG_TYPE_DIF
e2a0a9d6
JS
1991 *
1992 * This is usually used when DIFs are in their own buffers,
1993 * separate from the data. The HBA can then by instructed
1994 * to place the DIFs in the outgoing stream. For read operations,
1995 * The HBA could extract the DIFs and place it in DIF buffers.
1996 *
1997 * The buffer list for this type consists of one or more of the
1998 * protection groups described below:
1999 * +-------------------------+
6c8eea54 2000 * start of first prot group --> | PDE_5 |
e2a0a9d6 2001 * +-------------------------+
6c8eea54
JS
2002 * | PDE_6 |
2003 * +-------------------------+
2004 * | PDE_7 (Prot BDE) |
e2a0a9d6
JS
2005 * +-------------------------+
2006 * | Data BDE |
2007 * +-------------------------+
2008 * |more Data BDE's ... (opt)|
2009 * +-------------------------+
6c8eea54 2010 * start of new prot group --> | PDE_5 |
e2a0a9d6
JS
2011 * +-------------------------+
2012 * | ... |
2013 * +-------------------------+
2014 *
e2a0a9d6
JS
2015 * Note: It is assumed that both data and protection s/g buffers have been
2016 * mapped for DMA
acd6859b
JS
2017 *
2018 * Returns the number of BDEs added to the BPL.
2019 **/
e2a0a9d6
JS
2020static int
2021lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2022 struct ulp_bde64 *bpl, int datacnt, int protcnt)
2023{
2024 struct scatterlist *sgde = NULL; /* s/g data entry */
2025 struct scatterlist *sgpe = NULL; /* s/g prot entry */
6c8eea54
JS
2026 struct lpfc_pde5 *pde5 = NULL;
2027 struct lpfc_pde6 *pde6 = NULL;
7f86059a 2028 struct lpfc_pde7 *pde7 = NULL;
e2a0a9d6
JS
2029 dma_addr_t dataphysaddr, protphysaddr;
2030 unsigned short curr_data = 0, curr_prot = 0;
7f86059a
JS
2031 unsigned int split_offset;
2032 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
e2a0a9d6
JS
2033 unsigned int protgrp_blks, protgrp_bytes;
2034 unsigned int remainder, subtotal;
6c8eea54 2035 int status;
e2a0a9d6
JS
2036 int datadir = sc->sc_data_direction;
2037 unsigned char pgdone = 0, alldone = 0;
2038 unsigned blksize;
0829a19a 2039#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2040 uint32_t rc;
0829a19a 2041#endif
acd6859b 2042 uint32_t checking = 1;
e2a0a9d6 2043 uint32_t reftag;
6c8eea54 2044 uint8_t txop, rxop;
e2a0a9d6
JS
2045 int num_bde = 0;
2046
2047 sgpe = scsi_prot_sglist(sc);
2048 sgde = scsi_sglist(sc);
2049
2050 if (!sgpe || !sgde) {
2051 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
acd6859b
JS
2052 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2053 sgpe, sgde);
2054 return 0;
2055 }
2056
2057 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2058 if (status)
2059 goto out;
2060
2061 /* extract some info from the scsi command */
2062 blksize = lpfc_cmd_blksize(sc);
2063 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2064
2065#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2066 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2067 if (rc) {
9a6b09c0 2068 if (rc & BG_ERR_SWAP)
acd6859b 2069 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2070 if (rc & BG_ERR_CHECK)
acd6859b
JS
2071 checking = 0;
2072 }
2073#endif
2074
2075 split_offset = 0;
2076 do {
96f7077f
JS
2077 /* Check to see if we ran out of space */
2078 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
2079 return num_bde + 3;
2080
acd6859b
JS
2081 /* setup PDE5 with what we have */
2082 pde5 = (struct lpfc_pde5 *) bpl;
2083 memset(pde5, 0, sizeof(struct lpfc_pde5));
2084 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2085
2086 /* Endianness conversion if necessary for PDE5 */
2087 pde5->word0 = cpu_to_le32(pde5->word0);
2088 pde5->reftag = cpu_to_le32(reftag);
2089
2090 /* advance bpl and increment bde count */
2091 num_bde++;
2092 bpl++;
2093 pde6 = (struct lpfc_pde6 *) bpl;
2094
2095 /* setup PDE6 with the rest of the info */
2096 memset(pde6, 0, sizeof(struct lpfc_pde6));
2097 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2098 bf_set(pde6_optx, pde6, txop);
2099 bf_set(pde6_oprx, pde6, rxop);
a6887e28 2100
9c6aa9d7 2101 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
a6887e28
JS
2102 bf_set(pde6_ce, pde6, checking);
2103 else
2104 bf_set(pde6_ce, pde6, 0);
2105
9c6aa9d7 2106 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
a6887e28
JS
2107 bf_set(pde6_re, pde6, checking);
2108 else
2109 bf_set(pde6_re, pde6, 0);
2110
acd6859b
JS
2111 bf_set(pde6_ai, pde6, 1);
2112 bf_set(pde6_ae, pde6, 0);
2113 bf_set(pde6_apptagval, pde6, 0);
2114
2115 /* Endianness conversion if necessary for PDE6 */
2116 pde6->word0 = cpu_to_le32(pde6->word0);
2117 pde6->word1 = cpu_to_le32(pde6->word1);
2118 pde6->word2 = cpu_to_le32(pde6->word2);
2119
2120 /* advance bpl and increment bde count */
2121 num_bde++;
2122 bpl++;
2123
2124 /* setup the first BDE that points to protection buffer */
2125 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2126 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2127
2128 /* must be integer multiple of the DIF block length */
2129 BUG_ON(protgroup_len % 8);
2130
2131 pde7 = (struct lpfc_pde7 *) bpl;
2132 memset(pde7, 0, sizeof(struct lpfc_pde7));
2133 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2134
2135 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2136 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2137
2138 protgrp_blks = protgroup_len / 8;
2139 protgrp_bytes = protgrp_blks * blksize;
2140
2141 /* check if this pde is crossing the 4K boundary; if so split */
2142 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2143 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2144 protgroup_offset += protgroup_remainder;
2145 protgrp_blks = protgroup_remainder / 8;
2146 protgrp_bytes = protgrp_blks * blksize;
2147 } else {
2148 protgroup_offset = 0;
2149 curr_prot++;
2150 }
2151
2152 num_bde++;
2153
2154 /* setup BDE's for data blocks associated with DIF data */
2155 pgdone = 0;
2156 subtotal = 0; /* total bytes processed for current prot grp */
2157 while (!pgdone) {
96f7077f
JS
2158 /* Check to see if we ran out of space */
2159 if (num_bde >= phba->cfg_total_seg_cnt)
2160 return num_bde + 1;
2161
acd6859b
JS
2162 if (!sgde) {
2163 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2164 "9065 BLKGRD:%s Invalid data segment\n",
2165 __func__);
2166 return 0;
2167 }
2168 bpl++;
2169 dataphysaddr = sg_dma_address(sgde) + split_offset;
2170 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2171 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2172
2173 remainder = sg_dma_len(sgde) - split_offset;
2174
2175 if ((subtotal + remainder) <= protgrp_bytes) {
2176 /* we can use this whole buffer */
2177 bpl->tus.f.bdeSize = remainder;
2178 split_offset = 0;
2179
2180 if ((subtotal + remainder) == protgrp_bytes)
2181 pgdone = 1;
2182 } else {
2183 /* must split this buffer with next prot grp */
2184 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2185 split_offset += bpl->tus.f.bdeSize;
2186 }
2187
2188 subtotal += bpl->tus.f.bdeSize;
2189
2190 if (datadir == DMA_TO_DEVICE)
2191 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2192 else
2193 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2194 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2195
2196 num_bde++;
2197 curr_data++;
2198
2199 if (split_offset)
2200 break;
2201
2202 /* Move to the next s/g segment if possible */
2203 sgde = sg_next(sgde);
2204
2205 }
2206
2207 if (protgroup_offset) {
2208 /* update the reference tag */
2209 reftag += protgrp_blks;
2210 bpl++;
2211 continue;
2212 }
2213
2214 /* are we done ? */
2215 if (curr_prot == protcnt) {
2216 alldone = 1;
2217 } else if (curr_prot < protcnt) {
2218 /* advance to next prot buffer */
2219 sgpe = sg_next(sgpe);
2220 bpl++;
2221
2222 /* update the reference tag */
2223 reftag += protgrp_blks;
2224 } else {
2225 /* if we're here, we have a bug */
2226 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2227 "9054 BLKGRD: bug in %s\n", __func__);
2228 }
2229
2230 } while (!alldone);
2231out:
2232
2233 return num_bde;
2234}
2235
2236/**
2237 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2238 * @phba: The Hba for which this call is being executed.
2239 * @sc: pointer to scsi command we're working on
2240 * @sgl: pointer to buffer list for protection groups
2241 * @datacnt: number of segments of data that have been dma mapped
2242 *
2243 * This function sets up SGL buffer list for protection groups of
2244 * type LPFC_PG_TYPE_NO_DIF
2245 *
2246 * This is usually used when the HBA is instructed to generate
2247 * DIFs and insert them into data stream (or strip DIF from
2248 * incoming data stream)
2249 *
2250 * The buffer list consists of just one protection group described
2251 * below:
2252 * +-------------------------+
2253 * start of prot group --> | DI_SEED |
2254 * +-------------------------+
2255 * | Data SGE |
2256 * +-------------------------+
2257 * |more Data SGE's ... (opt)|
2258 * +-------------------------+
2259 *
2260 *
2261 * Note: Data s/g buffers have been dma mapped
2262 *
2263 * Returns the number of SGEs added to the SGL.
2264 **/
2265static int
2266lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2267 struct sli4_sge *sgl, int datasegcnt)
2268{
2269 struct scatterlist *sgde = NULL; /* s/g data entry */
2270 struct sli4_sge_diseed *diseed = NULL;
2271 dma_addr_t physaddr;
2272 int i = 0, num_sge = 0, status;
acd6859b 2273 uint32_t reftag;
acd6859b 2274 uint8_t txop, rxop;
0829a19a 2275#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2276 uint32_t rc;
0829a19a 2277#endif
acd6859b
JS
2278 uint32_t checking = 1;
2279 uint32_t dma_len;
2280 uint32_t dma_offset = 0;
2281
2282 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2283 if (status)
2284 goto out;
2285
2286 /* extract some info from the scsi command for pde*/
acd6859b
JS
2287 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2288
2289#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2290 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2291 if (rc) {
9a6b09c0 2292 if (rc & BG_ERR_SWAP)
acd6859b 2293 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2294 if (rc & BG_ERR_CHECK)
acd6859b
JS
2295 checking = 0;
2296 }
2297#endif
2298
2299 /* setup DISEED with what we have */
2300 diseed = (struct sli4_sge_diseed *) sgl;
2301 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2302 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2303
2304 /* Endianness conversion if necessary */
2305 diseed->ref_tag = cpu_to_le32(reftag);
2306 diseed->ref_tag_tran = diseed->ref_tag;
2307
a6887e28
JS
2308 /*
2309 * We only need to check the data on READs, for WRITEs
2310 * protection data is automatically generated, not checked.
2311 */
2312 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
9c6aa9d7 2313 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
a6887e28
JS
2314 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2315 else
2316 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2317
9c6aa9d7 2318 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
a6887e28
JS
2319 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2320 else
2321 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2322 }
2323
acd6859b
JS
2324 /* setup DISEED with the rest of the info */
2325 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2326 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
a6887e28 2327
acd6859b
JS
2328 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2329 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2330
2331 /* Endianness conversion if necessary for DISEED */
2332 diseed->word2 = cpu_to_le32(diseed->word2);
2333 diseed->word3 = cpu_to_le32(diseed->word3);
2334
2335 /* advance bpl and increment sge count */
2336 num_sge++;
2337 sgl++;
2338
2339 /* assumption: caller has already run dma_map_sg on command data */
2340 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2341 physaddr = sg_dma_address(sgde);
2342 dma_len = sg_dma_len(sgde);
2343 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2344 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2345 if ((i + 1) == datasegcnt)
2346 bf_set(lpfc_sli4_sge_last, sgl, 1);
2347 else
2348 bf_set(lpfc_sli4_sge_last, sgl, 0);
2349 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2350 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2351
2352 sgl->sge_len = cpu_to_le32(dma_len);
2353 dma_offset += dma_len;
2354
2355 sgl++;
2356 num_sge++;
2357 }
2358
2359out:
2360 return num_sge;
2361}
2362
2363/**
2364 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2365 * @phba: The Hba for which this call is being executed.
2366 * @sc: pointer to scsi command we're working on
2367 * @sgl: pointer to buffer list for protection groups
2368 * @datacnt: number of segments of data that have been dma mapped
2369 * @protcnt: number of segment of protection data that have been dma mapped
2370 *
2371 * This function sets up SGL buffer list for protection groups of
2372 * type LPFC_PG_TYPE_DIF
2373 *
2374 * This is usually used when DIFs are in their own buffers,
2375 * separate from the data. The HBA can then by instructed
2376 * to place the DIFs in the outgoing stream. For read operations,
2377 * The HBA could extract the DIFs and place it in DIF buffers.
2378 *
2379 * The buffer list for this type consists of one or more of the
2380 * protection groups described below:
2381 * +-------------------------+
2382 * start of first prot group --> | DISEED |
2383 * +-------------------------+
2384 * | DIF (Prot SGE) |
2385 * +-------------------------+
2386 * | Data SGE |
2387 * +-------------------------+
2388 * |more Data SGE's ... (opt)|
2389 * +-------------------------+
2390 * start of new prot group --> | DISEED |
2391 * +-------------------------+
2392 * | ... |
2393 * +-------------------------+
2394 *
2395 * Note: It is assumed that both data and protection s/g buffers have been
2396 * mapped for DMA
2397 *
2398 * Returns the number of SGEs added to the SGL.
2399 **/
2400static int
2401lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2402 struct sli4_sge *sgl, int datacnt, int protcnt)
2403{
2404 struct scatterlist *sgde = NULL; /* s/g data entry */
2405 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2406 struct sli4_sge_diseed *diseed = NULL;
2407 dma_addr_t dataphysaddr, protphysaddr;
2408 unsigned short curr_data = 0, curr_prot = 0;
2409 unsigned int split_offset;
2410 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2411 unsigned int protgrp_blks, protgrp_bytes;
2412 unsigned int remainder, subtotal;
2413 int status;
2414 unsigned char pgdone = 0, alldone = 0;
2415 unsigned blksize;
2416 uint32_t reftag;
2417 uint8_t txop, rxop;
2418 uint32_t dma_len;
0829a19a 2419#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
acd6859b 2420 uint32_t rc;
0829a19a 2421#endif
acd6859b
JS
2422 uint32_t checking = 1;
2423 uint32_t dma_offset = 0;
2424 int num_sge = 0;
2425
2426 sgpe = scsi_prot_sglist(sc);
2427 sgde = scsi_sglist(sc);
2428
2429 if (!sgpe || !sgde) {
2430 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2431 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
e2a0a9d6
JS
2432 sgpe, sgde);
2433 return 0;
2434 }
2435
6c8eea54
JS
2436 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2437 if (status)
e2a0a9d6
JS
2438 goto out;
2439
6c8eea54 2440 /* extract some info from the scsi command */
e2a0a9d6 2441 blksize = lpfc_cmd_blksize(sc);
acd6859b 2442 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
e2a0a9d6 2443
f9bb2da1 2444#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4ac9b226 2445 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
acd6859b 2446 if (rc) {
9a6b09c0 2447 if (rc & BG_ERR_SWAP)
acd6859b 2448 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
9a6b09c0 2449 if (rc & BG_ERR_CHECK)
acd6859b
JS
2450 checking = 0;
2451 }
f9bb2da1
JS
2452#endif
2453
e2a0a9d6
JS
2454 split_offset = 0;
2455 do {
96f7077f
JS
2456 /* Check to see if we ran out of space */
2457 if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2458 return num_sge + 3;
2459
acd6859b
JS
2460 /* setup DISEED with what we have */
2461 diseed = (struct sli4_sge_diseed *) sgl;
2462 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2463 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2464
2465 /* Endianness conversion if necessary */
2466 diseed->ref_tag = cpu_to_le32(reftag);
2467 diseed->ref_tag_tran = diseed->ref_tag;
2468
9c6aa9d7 2469 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
a6887e28
JS
2470 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2471
2472 } else {
2473 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2474 /*
2475 * When in this mode, the hardware will replace
2476 * the guard tag from the host with a
2477 * newly generated good CRC for the wire.
2478 * Switch to raw mode here to avoid this
2479 * behavior. What the host sends gets put on the wire.
2480 */
2481 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2482 txop = BG_OP_RAW_MODE;
2483 rxop = BG_OP_RAW_MODE;
2484 }
2485 }
2486
2487
9c6aa9d7 2488 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
a6887e28
JS
2489 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2490 else
2491 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2492
acd6859b
JS
2493 /* setup DISEED with the rest of the info */
2494 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2495 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
a6887e28 2496
acd6859b
JS
2497 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2498 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2499
2500 /* Endianness conversion if necessary for DISEED */
2501 diseed->word2 = cpu_to_le32(diseed->word2);
2502 diseed->word3 = cpu_to_le32(diseed->word3);
2503
2504 /* advance sgl and increment bde count */
2505 num_sge++;
2506 sgl++;
e2a0a9d6
JS
2507
2508 /* setup the first BDE that points to protection buffer */
7f86059a
JS
2509 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2510 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
e2a0a9d6 2511
e2a0a9d6
JS
2512 /* must be integer multiple of the DIF block length */
2513 BUG_ON(protgroup_len % 8);
2514
acd6859b
JS
2515 /* Now setup DIF SGE */
2516 sgl->word2 = 0;
2517 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2518 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2519 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2520 sgl->word2 = cpu_to_le32(sgl->word2);
7f86059a 2521
e2a0a9d6
JS
2522 protgrp_blks = protgroup_len / 8;
2523 protgrp_bytes = protgrp_blks * blksize;
2524
acd6859b
JS
2525 /* check if DIF SGE is crossing the 4K boundary; if so split */
2526 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2527 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
7f86059a
JS
2528 protgroup_offset += protgroup_remainder;
2529 protgrp_blks = protgroup_remainder / 8;
7c56b9fd 2530 protgrp_bytes = protgrp_blks * blksize;
7f86059a
JS
2531 } else {
2532 protgroup_offset = 0;
2533 curr_prot++;
2534 }
e2a0a9d6 2535
acd6859b 2536 num_sge++;
e2a0a9d6 2537
acd6859b 2538 /* setup SGE's for data blocks associated with DIF data */
e2a0a9d6
JS
2539 pgdone = 0;
2540 subtotal = 0; /* total bytes processed for current prot grp */
2541 while (!pgdone) {
96f7077f
JS
2542 /* Check to see if we ran out of space */
2543 if (num_sge >= phba->cfg_total_seg_cnt)
2544 return num_sge + 1;
2545
e2a0a9d6 2546 if (!sgde) {
6a9c52cf 2547 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 2548 "9086 BLKGRD:%s Invalid data segment\n",
e2a0a9d6
JS
2549 __func__);
2550 return 0;
2551 }
acd6859b 2552 sgl++;
e2a0a9d6 2553 dataphysaddr = sg_dma_address(sgde) + split_offset;
e2a0a9d6
JS
2554
2555 remainder = sg_dma_len(sgde) - split_offset;
2556
2557 if ((subtotal + remainder) <= protgrp_bytes) {
2558 /* we can use this whole buffer */
acd6859b 2559 dma_len = remainder;
e2a0a9d6
JS
2560 split_offset = 0;
2561
2562 if ((subtotal + remainder) == protgrp_bytes)
2563 pgdone = 1;
2564 } else {
2565 /* must split this buffer with next prot grp */
acd6859b
JS
2566 dma_len = protgrp_bytes - subtotal;
2567 split_offset += dma_len;
e2a0a9d6
JS
2568 }
2569
acd6859b 2570 subtotal += dma_len;
e2a0a9d6 2571
acd6859b
JS
2572 sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2573 sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2574 bf_set(lpfc_sli4_sge_last, sgl, 0);
2575 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2576 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
e2a0a9d6 2577
acd6859b
JS
2578 sgl->sge_len = cpu_to_le32(dma_len);
2579 dma_offset += dma_len;
2580
2581 num_sge++;
e2a0a9d6
JS
2582 curr_data++;
2583
2584 if (split_offset)
2585 break;
2586
2587 /* Move to the next s/g segment if possible */
2588 sgde = sg_next(sgde);
2589 }
2590
7f86059a
JS
2591 if (protgroup_offset) {
2592 /* update the reference tag */
2593 reftag += protgrp_blks;
acd6859b 2594 sgl++;
7f86059a
JS
2595 continue;
2596 }
2597
e2a0a9d6
JS
2598 /* are we done ? */
2599 if (curr_prot == protcnt) {
acd6859b 2600 bf_set(lpfc_sli4_sge_last, sgl, 1);
e2a0a9d6
JS
2601 alldone = 1;
2602 } else if (curr_prot < protcnt) {
2603 /* advance to next prot buffer */
2604 sgpe = sg_next(sgpe);
acd6859b 2605 sgl++;
e2a0a9d6
JS
2606
2607 /* update the reference tag */
2608 reftag += protgrp_blks;
2609 } else {
2610 /* if we're here, we have a bug */
6a9c52cf 2611 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
acd6859b 2612 "9085 BLKGRD: bug in %s\n", __func__);
e2a0a9d6
JS
2613 }
2614
2615 } while (!alldone);
acd6859b 2616
e2a0a9d6
JS
2617out:
2618
acd6859b 2619 return num_sge;
e2a0a9d6 2620}
7f86059a 2621
acd6859b
JS
2622/**
2623 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2624 * @phba: The Hba for which this call is being executed.
2625 * @sc: pointer to scsi command we're working on
2626 *
e2a0a9d6
JS
2627 * Given a SCSI command that supports DIF, determine composition of protection
2628 * groups involved in setting up buffer lists
2629 *
acd6859b
JS
2630 * Returns: Protection group type (with or without DIF)
2631 *
2632 **/
e2a0a9d6
JS
2633static int
2634lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2635{
2636 int ret = LPFC_PG_TYPE_INVALID;
2637 unsigned char op = scsi_get_prot_op(sc);
2638
2639 switch (op) {
2640 case SCSI_PROT_READ_STRIP:
2641 case SCSI_PROT_WRITE_INSERT:
2642 ret = LPFC_PG_TYPE_NO_DIF;
2643 break;
2644 case SCSI_PROT_READ_INSERT:
2645 case SCSI_PROT_WRITE_STRIP:
2646 case SCSI_PROT_READ_PASS:
2647 case SCSI_PROT_WRITE_PASS:
e2a0a9d6
JS
2648 ret = LPFC_PG_TYPE_DIF_BUF;
2649 break;
2650 default:
9c6aa9d7
JS
2651 if (phba)
2652 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2653 "9021 Unsupported protection op:%d\n",
2654 op);
e2a0a9d6
JS
2655 break;
2656 }
e2a0a9d6
JS
2657 return ret;
2658}
2659
a6887e28
JS
2660/**
2661 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2662 * @phba: The Hba for which this call is being executed.
2663 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2664 *
2665 * Adjust the data length to account for how much data
2666 * is actually on the wire.
2667 *
2668 * returns the adjusted data length
2669 **/
2670static int
2671lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2672 struct lpfc_scsi_buf *lpfc_cmd)
2673{
2674 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2675 int fcpdl;
2676
2677 fcpdl = scsi_bufflen(sc);
2678
2679 /* Check if there is protection data on the wire */
2680 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
9c6aa9d7 2681 /* Read check for protection data */
a6887e28
JS
2682 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2683 return fcpdl;
2684
2685 } else {
9c6aa9d7 2686 /* Write check for protection data */
a6887e28
JS
2687 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2688 return fcpdl;
2689 }
2690
2691 /*
2692 * If we are in DIF Type 1 mode every data block has a 8 byte
9c6aa9d7
JS
2693 * DIF (trailer) attached to it. Must ajust FCP data length
2694 * to account for the protection data.
a6887e28 2695 */
9c6aa9d7 2696 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
a6887e28
JS
2697
2698 return fcpdl;
2699}
2700
acd6859b
JS
2701/**
2702 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2703 * @phba: The Hba for which this call is being executed.
2704 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2705 *
e2a0a9d6
JS
2706 * This is the protection/DIF aware version of
2707 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2708 * two functions eventually, but for now, it's here
acd6859b 2709 **/
e2a0a9d6 2710static int
acd6859b 2711lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
e2a0a9d6
JS
2712 struct lpfc_scsi_buf *lpfc_cmd)
2713{
2714 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2715 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2716 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2717 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2718 uint32_t num_bde = 0;
2719 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2720 int prot_group_type = 0;
a6887e28 2721 int fcpdl;
e2a0a9d6
JS
2722
2723 /*
2724 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2725 * fcp_rsp regions to the first data bde entry
2726 */
2727 bpl += 2;
2728 if (scsi_sg_count(scsi_cmnd)) {
2729 /*
2730 * The driver stores the segment count returned from pci_map_sg
2731 * because this a count of dma-mappings used to map the use_sg
2732 * pages. They are not guaranteed to be the same for those
2733 * architectures that implement an IOMMU.
2734 */
2735 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2736 scsi_sglist(scsi_cmnd),
2737 scsi_sg_count(scsi_cmnd), datadir);
2738 if (unlikely(!datasegcnt))
2739 return 1;
2740
2741 lpfc_cmd->seg_cnt = datasegcnt;
96f7077f
JS
2742
2743 /* First check if data segment count from SCSI Layer is good */
2744 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2745 goto err;
e2a0a9d6
JS
2746
2747 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2748
2749 switch (prot_group_type) {
2750 case LPFC_PG_TYPE_NO_DIF:
96f7077f
JS
2751
2752 /* Here we need to add a PDE5 and PDE6 to the count */
2753 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2754 goto err;
2755
e2a0a9d6
JS
2756 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2757 datasegcnt);
c9404c9c 2758 /* we should have 2 or more entries in buffer list */
e2a0a9d6
JS
2759 if (num_bde < 2)
2760 goto err;
2761 break;
96f7077f
JS
2762
2763 case LPFC_PG_TYPE_DIF_BUF:
e2a0a9d6
JS
2764 /*
2765 * This type indicates that protection buffers are
2766 * passed to the driver, so that needs to be prepared
2767 * for DMA
2768 */
2769 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2770 scsi_prot_sglist(scsi_cmnd),
2771 scsi_prot_sg_count(scsi_cmnd), datadir);
2772 if (unlikely(!protsegcnt)) {
2773 scsi_dma_unmap(scsi_cmnd);
2774 return 1;
2775 }
2776
2777 lpfc_cmd->prot_seg_cnt = protsegcnt;
96f7077f
JS
2778
2779 /*
2780 * There is a minimun of 4 BPLs used for every
2781 * protection data segment.
2782 */
2783 if ((lpfc_cmd->prot_seg_cnt * 4) >
2784 (phba->cfg_total_seg_cnt - 2))
2785 goto err;
e2a0a9d6
JS
2786
2787 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2788 datasegcnt, protsegcnt);
c9404c9c 2789 /* we should have 3 or more entries in buffer list */
96f7077f
JS
2790 if ((num_bde < 3) ||
2791 (num_bde > phba->cfg_total_seg_cnt))
e2a0a9d6
JS
2792 goto err;
2793 break;
96f7077f 2794
e2a0a9d6
JS
2795 case LPFC_PG_TYPE_INVALID:
2796 default:
96f7077f
JS
2797 scsi_dma_unmap(scsi_cmnd);
2798 lpfc_cmd->seg_cnt = 0;
2799
e2a0a9d6
JS
2800 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2801 "9022 Unexpected protection group %i\n",
2802 prot_group_type);
2803 return 1;
2804 }
2805 }
2806
2807 /*
2808 * Finish initializing those IOCB fields that are dependent on the
2809 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2810 * reinitialized since all iocb memory resources are used many times
2811 * for transmit, receive, and continuation bpl's.
2812 */
2813 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2814 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2815 iocb_cmd->ulpBdeCount = 1;
2816 iocb_cmd->ulpLe = 1;
2817
a6887e28 2818 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
e2a0a9d6
JS
2819 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2820
2821 /*
2822 * Due to difference in data length between DIF/non-DIF paths,
2823 * we need to set word 4 of IOCB here
2824 */
2825 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2826
dea3101e 2827 return 0;
e2a0a9d6 2828err:
96f7077f
JS
2829 if (lpfc_cmd->seg_cnt)
2830 scsi_dma_unmap(scsi_cmnd);
2831 if (lpfc_cmd->prot_seg_cnt)
2832 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2833 scsi_prot_sg_count(scsi_cmnd),
2834 scsi_cmnd->sc_data_direction);
2835
e2a0a9d6 2836 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
96f7077f
JS
2837 "9023 Cannot setup S/G List for HBA"
2838 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2839 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2840 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
e2a0a9d6 2841 prot_group_type, num_bde);
96f7077f
JS
2842
2843 lpfc_cmd->seg_cnt = 0;
2844 lpfc_cmd->prot_seg_cnt = 0;
e2a0a9d6
JS
2845 return 1;
2846}
2847
737d4248
JS
2848/*
2849 * This function calcuates the T10 DIF guard tag
2850 * on the specified data using a CRC algorithmn
2851 * using crc_t10dif.
2852 */
7bfe781e 2853static uint16_t
737d4248
JS
2854lpfc_bg_crc(uint8_t *data, int count)
2855{
2856 uint16_t crc = 0;
2857 uint16_t x;
2858
2859 crc = crc_t10dif(data, count);
2860 x = cpu_to_be16(crc);
2861 return x;
2862}
2863
2864/*
2865 * This function calcuates the T10 DIF guard tag
2866 * on the specified data using a CSUM algorithmn
2867 * using ip_compute_csum.
2868 */
7bfe781e 2869static uint16_t
737d4248
JS
2870lpfc_bg_csum(uint8_t *data, int count)
2871{
2872 uint16_t ret;
2873
2874 ret = ip_compute_csum(data, count);
2875 return ret;
2876}
2877
2878/*
2879 * This function examines the protection data to try to determine
2880 * what type of T10-DIF error occurred.
2881 */
7bfe781e 2882static void
737d4248
JS
2883lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2884{
2885 struct scatterlist *sgpe; /* s/g prot entry */
2886 struct scatterlist *sgde; /* s/g data entry */
2887 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2888 struct scsi_dif_tuple *src = NULL;
2889 uint8_t *data_src = NULL;
db6f1c2f 2890 uint16_t guard_tag;
737d4248
JS
2891 uint16_t start_app_tag, app_tag;
2892 uint32_t start_ref_tag, ref_tag;
2893 int prot, protsegcnt;
2894 int err_type, len, data_len;
2895 int chk_ref, chk_app, chk_guard;
2896 uint16_t sum;
2897 unsigned blksize;
2898
2899 err_type = BGS_GUARD_ERR_MASK;
2900 sum = 0;
2901 guard_tag = 0;
2902
2903 /* First check to see if there is protection data to examine */
2904 prot = scsi_get_prot_op(cmd);
2905 if ((prot == SCSI_PROT_READ_STRIP) ||
2906 (prot == SCSI_PROT_WRITE_INSERT) ||
2907 (prot == SCSI_PROT_NORMAL))
2908 goto out;
2909
2910 /* Currently the driver just supports ref_tag and guard_tag checking */
2911 chk_ref = 1;
2912 chk_app = 0;
2913 chk_guard = 0;
2914
2915 /* Setup a ptr to the protection data provided by the SCSI host */
2916 sgpe = scsi_prot_sglist(cmd);
2917 protsegcnt = lpfc_cmd->prot_seg_cnt;
2918
2919 if (sgpe && protsegcnt) {
2920
2921 /*
2922 * We will only try to verify guard tag if the segment
2923 * data length is a multiple of the blksize.
2924 */
2925 sgde = scsi_sglist(cmd);
2926 blksize = lpfc_cmd_blksize(cmd);
2927 data_src = (uint8_t *)sg_virt(sgde);
2928 data_len = sgde->length;
2929 if ((data_len & (blksize - 1)) == 0)
2930 chk_guard = 1;
737d4248 2931
e85d8f9f 2932 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
a6887e28 2933 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
737d4248 2934 start_app_tag = src->app_tag;
737d4248
JS
2935 len = sgpe->length;
2936 while (src && protsegcnt) {
2937 while (len) {
2938
2939 /*
2940 * First check to see if a protection data
2941 * check is valid
2942 */
128b6f9f
DM
2943 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2944 (src->app_tag == T10_PI_APP_ESCAPE)) {
737d4248
JS
2945 start_ref_tag++;
2946 goto skipit;
2947 }
2948
9c6aa9d7 2949 /* First Guard Tag checking */
737d4248
JS
2950 if (chk_guard) {
2951 guard_tag = src->guard_tag;
9c6aa9d7 2952 if (lpfc_cmd_guard_csum(cmd))
737d4248
JS
2953 sum = lpfc_bg_csum(data_src,
2954 blksize);
2955 else
2956 sum = lpfc_bg_crc(data_src,
2957 blksize);
2958 if ((guard_tag != sum)) {
2959 err_type = BGS_GUARD_ERR_MASK;
2960 goto out;
2961 }
2962 }
9c6aa9d7
JS
2963
2964 /* Reference Tag checking */
2965 ref_tag = be32_to_cpu(src->ref_tag);
2966 if (chk_ref && (ref_tag != start_ref_tag)) {
2967 err_type = BGS_REFTAG_ERR_MASK;
2968 goto out;
2969 }
2970 start_ref_tag++;
2971
2972 /* App Tag checking */
2973 app_tag = src->app_tag;
2974 if (chk_app && (app_tag != start_app_tag)) {
2975 err_type = BGS_APPTAG_ERR_MASK;
2976 goto out;
2977 }
737d4248
JS
2978skipit:
2979 len -= sizeof(struct scsi_dif_tuple);
2980 if (len < 0)
2981 len = 0;
2982 src++;
2983
2984 data_src += blksize;
2985 data_len -= blksize;
2986
2987 /*
2988 * Are we at the end of the Data segment?
2989 * The data segment is only used for Guard
2990 * tag checking.
2991 */
2992 if (chk_guard && (data_len == 0)) {
2993 chk_guard = 0;
2994 sgde = sg_next(sgde);
2995 if (!sgde)
2996 goto out;
2997
2998 data_src = (uint8_t *)sg_virt(sgde);
2999 data_len = sgde->length;
3000 if ((data_len & (blksize - 1)) == 0)
3001 chk_guard = 1;
3002 }
3003 }
3004
3005 /* Goto the next Protection data segment */
3006 sgpe = sg_next(sgpe);
3007 if (sgpe) {
3008 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3009 len = sgpe->length;
3010 } else {
3011 src = NULL;
3012 }
3013 protsegcnt--;
3014 }
3015 }
3016out:
3017 if (err_type == BGS_GUARD_ERR_MASK) {
3018 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3019 0x10, 0x1);
c6668cae
JT
3020 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3021 SAM_STAT_CHECK_CONDITION;
737d4248
JS
3022 phba->bg_guard_err_cnt++;
3023 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3024 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3025 (unsigned long)scsi_get_lba(cmd),
3026 sum, guard_tag);
3027
3028 } else if (err_type == BGS_REFTAG_ERR_MASK) {
3029 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3030 0x10, 0x3);
c6668cae
JT
3031 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3032 SAM_STAT_CHECK_CONDITION;
737d4248
JS
3033
3034 phba->bg_reftag_err_cnt++;
3035 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3036 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3037 (unsigned long)scsi_get_lba(cmd),
3038 ref_tag, start_ref_tag);
3039
3040 } else if (err_type == BGS_APPTAG_ERR_MASK) {
3041 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3042 0x10, 0x2);
c6668cae
JT
3043 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3044 SAM_STAT_CHECK_CONDITION;
737d4248
JS
3045
3046 phba->bg_apptag_err_cnt++;
3047 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3048 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3049 (unsigned long)scsi_get_lba(cmd),
3050 app_tag, start_app_tag);
3051 }
3052}
3053
3054
e2a0a9d6
JS
3055/*
3056 * This function checks for BlockGuard errors detected by
3057 * the HBA. In case of errors, the ASC/ASCQ fields in the
3058 * sense buffer will be set accordingly, paired with
3059 * ILLEGAL_REQUEST to signal to the kernel that the HBA
3060 * detected corruption.
3061 *
3062 * Returns:
3063 * 0 - No error found
3064 * 1 - BlockGuard error found
3065 * -1 - Internal error (bad profile, ...etc)
3066 */
3067static int
3068lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
3069 struct lpfc_iocbq *pIocbOut)
3070{
3071 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3072 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3073 int ret = 0;
3074 uint32_t bghm = bgf->bghm;
3075 uint32_t bgstat = bgf->bgstat;
3076 uint64_t failing_sector = 0;
3077
e2a0a9d6
JS
3078 spin_lock(&_dump_buf_lock);
3079 if (!_dump_buf_done) {
6a9c52cf
JS
3080 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
3081 " Data for %u blocks to debugfs\n",
e2a0a9d6 3082 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
6a9c52cf 3083 lpfc_debug_save_data(phba, cmd);
e2a0a9d6
JS
3084
3085 /* If we have a prot sgl, save the DIF buffer */
3086 if (lpfc_prot_group_type(phba, cmd) ==
3087 LPFC_PG_TYPE_DIF_BUF) {
6a9c52cf
JS
3088 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
3089 "Saving DIF for %u blocks to debugfs\n",
3090 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3091 lpfc_debug_save_dif(phba, cmd);
e2a0a9d6
JS
3092 }
3093
3094 _dump_buf_done = 1;
3095 }
3096 spin_unlock(&_dump_buf_lock);
3097
3098 if (lpfc_bgs_get_invalid_prof(bgstat)) {
c6668cae 3099 cmd->result = DID_ERROR << 16;
737d4248
JS
3100 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3101 "9072 BLKGRD: Invalid BG Profile in cmd"
3102 " 0x%x lba 0x%llx blk cnt 0x%x "
3103 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3104 (unsigned long long)scsi_get_lba(cmd),
3105 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
3106 ret = (-1);
3107 goto out;
3108 }
3109
3110 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
c6668cae 3111 cmd->result = DID_ERROR << 16;
737d4248
JS
3112 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3113 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
3114 " 0x%x lba 0x%llx blk cnt 0x%x "
3115 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3116 (unsigned long long)scsi_get_lba(cmd),
3117 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
3118 ret = (-1);
3119 goto out;
3120 }
3121
3122 if (lpfc_bgs_get_guard_err(bgstat)) {
3123 ret = 1;
3124
3125 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3126 0x10, 0x1);
c6668cae
JT
3127 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3128 SAM_STAT_CHECK_CONDITION;
e2a0a9d6 3129 phba->bg_guard_err_cnt++;
737d4248
JS
3130 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3131 "9055 BLKGRD: Guard Tag error in cmd"
3132 " 0x%x lba 0x%llx blk cnt 0x%x "
3133 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3134 (unsigned long long)scsi_get_lba(cmd),
3135 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
3136 }
3137
3138 if (lpfc_bgs_get_reftag_err(bgstat)) {
3139 ret = 1;
3140
3141 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3142 0x10, 0x3);
c6668cae
JT
3143 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3144 SAM_STAT_CHECK_CONDITION;
e2a0a9d6
JS
3145
3146 phba->bg_reftag_err_cnt++;
737d4248
JS
3147 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3148 "9056 BLKGRD: Ref Tag error in cmd"
3149 " 0x%x lba 0x%llx blk cnt 0x%x "
3150 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3151 (unsigned long long)scsi_get_lba(cmd),
3152 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
3153 }
3154
3155 if (lpfc_bgs_get_apptag_err(bgstat)) {
3156 ret = 1;
3157
3158 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3159 0x10, 0x2);
c6668cae
JT
3160 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
3161 SAM_STAT_CHECK_CONDITION;
e2a0a9d6
JS
3162
3163 phba->bg_apptag_err_cnt++;
737d4248
JS
3164 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3165 "9061 BLKGRD: App Tag error in cmd"
3166 " 0x%x lba 0x%llx blk cnt 0x%x "
3167 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3168 (unsigned long long)scsi_get_lba(cmd),
3169 blk_rq_sectors(cmd->request), bgstat, bghm);
e2a0a9d6
JS
3170 }
3171
3172 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3173 /*
3174 * setup sense data descriptor 0 per SPC-4 as an information
7c56b9fd
JS
3175 * field, and put the failing LBA in it.
3176 * This code assumes there was also a guard/app/ref tag error
3177 * indication.
e2a0a9d6 3178 */
7c56b9fd
JS
3179 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
3180 cmd->sense_buffer[8] = 0; /* Information descriptor type */
3181 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
3182 cmd->sense_buffer[10] = 0x80; /* Validity bit */
acd6859b
JS
3183
3184 /* bghm is a "on the wire" FC frame based count */
3185 switch (scsi_get_prot_op(cmd)) {
3186 case SCSI_PROT_READ_INSERT:
3187 case SCSI_PROT_WRITE_STRIP:
3188 bghm /= cmd->device->sector_size;
3189 break;
3190 case SCSI_PROT_READ_STRIP:
3191 case SCSI_PROT_WRITE_INSERT:
3192 case SCSI_PROT_READ_PASS:
3193 case SCSI_PROT_WRITE_PASS:
3194 bghm /= (cmd->device->sector_size +
3195 sizeof(struct scsi_dif_tuple));
3196 break;
3197 }
e2a0a9d6
JS
3198
3199 failing_sector = scsi_get_lba(cmd);
3200 failing_sector += bghm;
3201
7c56b9fd
JS
3202 /* Descriptor Information */
3203 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
e2a0a9d6
JS
3204 }
3205
3206 if (!ret) {
3207 /* No error was reported - problem in FW? */
737d4248
JS
3208 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3209 "9057 BLKGRD: Unknown error in cmd"
3210 " 0x%x lba 0x%llx blk cnt 0x%x "
3211 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3212 (unsigned long long)scsi_get_lba(cmd),
3213 blk_rq_sectors(cmd->request), bgstat, bghm);
3214
3215 /* Calcuate what type of error it was */
3216 lpfc_calc_bg_err(phba, lpfc_cmd);
e2a0a9d6 3217 }
e2a0a9d6
JS
3218out:
3219 return ret;
dea3101e 3220}
3221
da0436e9
JS
3222/**
3223 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3224 * @phba: The Hba for which this call is being executed.
3225 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3226 *
3227 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3228 * field of @lpfc_cmd for device with SLI-4 interface spec.
3229 *
3230 * Return codes:
6c8eea54
JS
3231 * 1 - Error
3232 * 0 - Success
da0436e9
JS
3233 **/
3234static int
3235lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3236{
3237 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3238 struct scatterlist *sgel = NULL;
3239 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3240 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
fedd3b7b 3241 struct sli4_sge *first_data_sgl;
da0436e9
JS
3242 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3243 dma_addr_t physaddr;
3244 uint32_t num_bde = 0;
3245 uint32_t dma_len;
3246 uint32_t dma_offset = 0;
3247 int nseg;
fedd3b7b 3248 struct ulp_bde64 *bde;
da0436e9
JS
3249
3250 /*
3251 * There are three possibilities here - use scatter-gather segment, use
3252 * the single mapping, or neither. Start the lpfc command prep by
3253 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3254 * data bde entry.
3255 */
3256 if (scsi_sg_count(scsi_cmnd)) {
3257 /*
3258 * The driver stores the segment count returned from pci_map_sg
3259 * because this a count of dma-mappings used to map the use_sg
3260 * pages. They are not guaranteed to be the same for those
3261 * architectures that implement an IOMMU.
3262 */
3263
3264 nseg = scsi_dma_map(scsi_cmnd);
5116fbf1 3265 if (unlikely(nseg <= 0))
da0436e9
JS
3266 return 1;
3267 sgl += 1;
3268 /* clear the last flag in the fcp_rsp map entry */
3269 sgl->word2 = le32_to_cpu(sgl->word2);
3270 bf_set(lpfc_sli4_sge_last, sgl, 0);
3271 sgl->word2 = cpu_to_le32(sgl->word2);
3272 sgl += 1;
fedd3b7b 3273 first_data_sgl = sgl;
da0436e9
JS
3274 lpfc_cmd->seg_cnt = nseg;
3275 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
6a9c52cf
JS
3276 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3277 " %s: Too many sg segments from "
3278 "dma_map_sg. Config %d, seg_cnt %d\n",
3279 __func__, phba->cfg_sg_seg_cnt,
da0436e9 3280 lpfc_cmd->seg_cnt);
96f7077f 3281 lpfc_cmd->seg_cnt = 0;
da0436e9
JS
3282 scsi_dma_unmap(scsi_cmnd);
3283 return 1;
3284 }
3285
3286 /*
3287 * The driver established a maximum scatter-gather segment count
3288 * during probe that limits the number of sg elements in any
3289 * single scsi command. Just run through the seg_cnt and format
3290 * the sge's.
3291 * When using SLI-3 the driver will try to fit all the BDEs into
3292 * the IOCB. If it can't then the BDEs get added to a BPL as it
3293 * does for SLI-2 mode.
3294 */
3295 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3296 physaddr = sg_dma_address(sgel);
3297 dma_len = sg_dma_len(sgel);
da0436e9
JS
3298 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3299 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
0558056c 3300 sgl->word2 = le32_to_cpu(sgl->word2);
da0436e9
JS
3301 if ((num_bde + 1) == nseg)
3302 bf_set(lpfc_sli4_sge_last, sgl, 1);
3303 else
3304 bf_set(lpfc_sli4_sge_last, sgl, 0);
3305 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
f9bb2da1 3306 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
da0436e9 3307 sgl->word2 = cpu_to_le32(sgl->word2);
28baac74 3308 sgl->sge_len = cpu_to_le32(dma_len);
da0436e9
JS
3309 dma_offset += dma_len;
3310 sgl++;
3311 }
0bc2b7c5
JS
3312 /*
3313 * Setup the first Payload BDE. For FCoE we just key off
414abe0a
JS
3314 * Performance Hints, for FC we use lpfc_enable_pbde.
3315 * We populate words 13-15 of IOCB/WQE.
0bc2b7c5
JS
3316 */
3317 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
414abe0a 3318 phba->cfg_enable_pbde) {
fedd3b7b 3319 bde = (struct ulp_bde64 *)
414abe0a 3320 &(iocb_cmd->unsli3.sli3Words[5]);
fedd3b7b
JS
3321 bde->addrLow = first_data_sgl->addr_lo;
3322 bde->addrHigh = first_data_sgl->addr_hi;
3323 bde->tus.f.bdeSize =
3324 le32_to_cpu(first_data_sgl->sge_len);
3325 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3326 bde->tus.w = cpu_to_le32(bde->tus.w);
3327 }
da0436e9
JS
3328 } else {
3329 sgl += 1;
3330 /* clear the last flag in the fcp_rsp map entry */
3331 sgl->word2 = le32_to_cpu(sgl->word2);
3332 bf_set(lpfc_sli4_sge_last, sgl, 1);
3333 sgl->word2 = cpu_to_le32(sgl->word2);
414abe0a
JS
3334
3335 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3336 phba->cfg_enable_pbde) {
3337 bde = (struct ulp_bde64 *)
3338 &(iocb_cmd->unsli3.sli3Words[5]);
3339 memset(bde, 0, (sizeof(uint32_t) * 3));
3340 }
da0436e9
JS
3341 }
3342
3343 /*
3344 * Finish initializing those IOCB fields that are dependent on the
3345 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3346 * explicitly reinitialized.
3347 * all iocb memory resources are reused.
3348 */
3349 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3350
3351 /*
3352 * Due to difference in data length between DIF/non-DIF paths,
3353 * we need to set word 4 of IOCB here
3354 */
3355 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1ba981fd
JS
3356
3357 /*
3358 * If the OAS driver feature is enabled and the lun is enabled for
3359 * OAS, set the oas iocb related flags.
3360 */
f38fa0bb 3361 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
c92c841c 3362 scsi_cmnd->device->hostdata)->oas_enabled) {
9bd2bff5 3363 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
c92c841c
JS
3364 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3365 scsi_cmnd->device->hostdata)->priority;
3366 }
da0436e9
JS
3367 return 0;
3368}
3369
acd6859b
JS
3370/**
3371 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3372 * @phba: The Hba for which this call is being executed.
3373 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3374 *
3375 * This is the protection/DIF aware version of
3376 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3377 * two functions eventually, but for now, it's here
3378 **/
3379static int
3380lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3381 struct lpfc_scsi_buf *lpfc_cmd)
3382{
3383 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3384 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3385 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3386 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
96f7077f 3387 uint32_t num_sge = 0;
acd6859b
JS
3388 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3389 int prot_group_type = 0;
3390 int fcpdl;
3391
3392 /*
3393 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
96f7077f 3394 * fcp_rsp regions to the first data sge entry
acd6859b
JS
3395 */
3396 if (scsi_sg_count(scsi_cmnd)) {
3397 /*
3398 * The driver stores the segment count returned from pci_map_sg
3399 * because this a count of dma-mappings used to map the use_sg
3400 * pages. They are not guaranteed to be the same for those
3401 * architectures that implement an IOMMU.
3402 */
3403 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3404 scsi_sglist(scsi_cmnd),
3405 scsi_sg_count(scsi_cmnd), datadir);
3406 if (unlikely(!datasegcnt))
3407 return 1;
3408
3409 sgl += 1;
3410 /* clear the last flag in the fcp_rsp map entry */
3411 sgl->word2 = le32_to_cpu(sgl->word2);
3412 bf_set(lpfc_sli4_sge_last, sgl, 0);
3413 sgl->word2 = cpu_to_le32(sgl->word2);
3414
3415 sgl += 1;
3416 lpfc_cmd->seg_cnt = datasegcnt;
96f7077f
JS
3417
3418 /* First check if data segment count from SCSI Layer is good */
3419 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3420 goto err;
acd6859b
JS
3421
3422 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3423
3424 switch (prot_group_type) {
3425 case LPFC_PG_TYPE_NO_DIF:
96f7077f
JS
3426 /* Here we need to add a DISEED to the count */
3427 if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3428 goto err;
3429
3430 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
acd6859b 3431 datasegcnt);
96f7077f 3432
acd6859b 3433 /* we should have 2 or more entries in buffer list */
96f7077f 3434 if (num_sge < 2)
acd6859b
JS
3435 goto err;
3436 break;
96f7077f
JS
3437
3438 case LPFC_PG_TYPE_DIF_BUF:
acd6859b
JS
3439 /*
3440 * This type indicates that protection buffers are
3441 * passed to the driver, so that needs to be prepared
3442 * for DMA
3443 */
3444 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3445 scsi_prot_sglist(scsi_cmnd),
3446 scsi_prot_sg_count(scsi_cmnd), datadir);
3447 if (unlikely(!protsegcnt)) {
3448 scsi_dma_unmap(scsi_cmnd);
3449 return 1;
3450 }
3451
3452 lpfc_cmd->prot_seg_cnt = protsegcnt;
96f7077f
JS
3453 /*
3454 * There is a minimun of 3 SGEs used for every
3455 * protection data segment.
3456 */
3457 if ((lpfc_cmd->prot_seg_cnt * 3) >
3458 (phba->cfg_total_seg_cnt - 2))
3459 goto err;
acd6859b 3460
96f7077f 3461 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
acd6859b 3462 datasegcnt, protsegcnt);
96f7077f 3463
acd6859b 3464 /* we should have 3 or more entries in buffer list */
96f7077f
JS
3465 if ((num_sge < 3) ||
3466 (num_sge > phba->cfg_total_seg_cnt))
acd6859b
JS
3467 goto err;
3468 break;
96f7077f 3469
acd6859b
JS
3470 case LPFC_PG_TYPE_INVALID:
3471 default:
96f7077f
JS
3472 scsi_dma_unmap(scsi_cmnd);
3473 lpfc_cmd->seg_cnt = 0;
3474
acd6859b
JS
3475 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3476 "9083 Unexpected protection group %i\n",
3477 prot_group_type);
3478 return 1;
3479 }
3480 }
3481
8012cc38
JS
3482 switch (scsi_get_prot_op(scsi_cmnd)) {
3483 case SCSI_PROT_WRITE_STRIP:
3484 case SCSI_PROT_READ_STRIP:
3485 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3486 break;
3487 case SCSI_PROT_WRITE_INSERT:
3488 case SCSI_PROT_READ_INSERT:
3489 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3490 break;
3491 case SCSI_PROT_WRITE_PASS:
3492 case SCSI_PROT_READ_PASS:
3493 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3494 break;
3495 }
3496
acd6859b 3497 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
acd6859b
JS
3498 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3499
3500 /*
3501 * Due to difference in data length between DIF/non-DIF paths,
3502 * we need to set word 4 of IOCB here
3503 */
3504 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
acd6859b 3505
9bd2bff5
JS
3506 /*
3507 * If the OAS driver feature is enabled and the lun is enabled for
3508 * OAS, set the oas iocb related flags.
3509 */
3510 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3511 scsi_cmnd->device->hostdata)->oas_enabled)
3512 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3513
acd6859b
JS
3514 return 0;
3515err:
96f7077f
JS
3516 if (lpfc_cmd->seg_cnt)
3517 scsi_dma_unmap(scsi_cmnd);
3518 if (lpfc_cmd->prot_seg_cnt)
3519 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3520 scsi_prot_sg_count(scsi_cmnd),
3521 scsi_cmnd->sc_data_direction);
3522
acd6859b 3523 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
96f7077f
JS
3524 "9084 Cannot setup S/G List for HBA"
3525 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3526 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3527 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3528 prot_group_type, num_sge);
3529
3530 lpfc_cmd->seg_cnt = 0;
3531 lpfc_cmd->prot_seg_cnt = 0;
acd6859b
JS
3532 return 1;
3533}
3534
3772a991
JS
3535/**
3536 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3537 * @phba: The Hba for which this call is being executed.
3538 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3539 *
3540 * This routine wraps the actual DMA mapping function pointer from the
3541 * lpfc_hba struct.
3542 *
3543 * Return codes:
6c8eea54
JS
3544 * 1 - Error
3545 * 0 - Success
3772a991
JS
3546 **/
3547static inline int
3548lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3549{
3550 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3551}
3552
acd6859b
JS
3553/**
3554 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3555 * using BlockGuard.
3556 * @phba: The Hba for which this call is being executed.
3557 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3558 *
3559 * This routine wraps the actual DMA mapping function pointer from the
3560 * lpfc_hba struct.
3561 *
3562 * Return codes:
3563 * 1 - Error
3564 * 0 - Success
3565 **/
3566static inline int
3567lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3568{
3569 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3570}
3571
ea2151b4 3572/**
3621a710 3573 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
ea2151b4
JS
3574 * @phba: Pointer to hba context object.
3575 * @vport: Pointer to vport object.
3576 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3577 * @rsp_iocb: Pointer to response iocb object which reported error.
3578 *
3579 * This function posts an event when there is a SCSI command reporting
3580 * error from the scsi device.
3581 **/
3582static void
3583lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3584 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3585 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3586 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3587 uint32_t resp_info = fcprsp->rspStatus2;
3588 uint32_t scsi_status = fcprsp->rspStatus3;
3589 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3590 struct lpfc_fast_path_event *fast_path_evt = NULL;
3591 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3592 unsigned long flags;
3593
5989b8d4
JS
3594 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3595 return;
3596
ea2151b4
JS
3597 /* If there is queuefull or busy condition send a scsi event */
3598 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3599 (cmnd->result == SAM_STAT_BUSY)) {
3600 fast_path_evt = lpfc_alloc_fast_evt(phba);
3601 if (!fast_path_evt)
3602 return;
3603 fast_path_evt->un.scsi_evt.event_type =
3604 FC_REG_SCSI_EVENT;
3605 fast_path_evt->un.scsi_evt.subcategory =
3606 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3607 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3608 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3609 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3610 &pnode->nlp_portname, sizeof(struct lpfc_name));
3611 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3612 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3613 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3614 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3615 fast_path_evt = lpfc_alloc_fast_evt(phba);
3616 if (!fast_path_evt)
3617 return;
3618 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3619 FC_REG_SCSI_EVENT;
3620 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3621 LPFC_EVENT_CHECK_COND;
3622 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3623 cmnd->device->lun;
3624 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3625 &pnode->nlp_portname, sizeof(struct lpfc_name));
3626 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3627 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3628 fast_path_evt->un.check_cond_evt.sense_key =
3629 cmnd->sense_buffer[2] & 0xf;
3630 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3631 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3632 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3633 fcpi_parm &&
3634 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3635 ((scsi_status == SAM_STAT_GOOD) &&
3636 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3637 /*
3638 * If status is good or resid does not match with fcp_param and
3639 * there is valid fcpi_parm, then there is a read_check error
3640 */
3641 fast_path_evt = lpfc_alloc_fast_evt(phba);
3642 if (!fast_path_evt)
3643 return;
3644 fast_path_evt->un.read_check_error.header.event_type =
3645 FC_REG_FABRIC_EVENT;
3646 fast_path_evt->un.read_check_error.header.subcategory =
3647 LPFC_EVENT_FCPRDCHKERR;
3648 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3649 &pnode->nlp_portname, sizeof(struct lpfc_name));
3650 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3651 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3652 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3653 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3654 fast_path_evt->un.read_check_error.fcpiparam =
3655 fcpi_parm;
3656 } else
3657 return;
3658
3659 fast_path_evt->vport = vport;
3660 spin_lock_irqsave(&phba->hbalock, flags);
3661 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3662 spin_unlock_irqrestore(&phba->hbalock, flags);
3663 lpfc_worker_wake_up(phba);
3664 return;
3665}
9bad7671
JS
3666
3667/**
f1126688 3668 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3772a991 3669 * @phba: The HBA for which this call is being executed.
9bad7671
JS
3670 * @psb: The scsi buffer which is going to be un-mapped.
3671 *
3672 * This routine does DMA un-mapping of scatter gather list of scsi command
3772a991 3673 * field of @lpfc_cmd for device with SLI-3 interface spec.
9bad7671 3674 **/
bcf4dbfa 3675static void
f1126688 3676lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
bcf4dbfa
JS
3677{
3678 /*
3679 * There are only two special cases to consider. (1) the scsi command
3680 * requested scatter-gather usage or (2) the scsi command allocated
3681 * a request buffer, but did not request use_sg. There is a third
3682 * case, but it does not require resource deallocation.
3683 */
a0b4f78f
FT
3684 if (psb->seg_cnt > 0)
3685 scsi_dma_unmap(psb->pCmd);
e2a0a9d6
JS
3686 if (psb->prot_seg_cnt > 0)
3687 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3688 scsi_prot_sg_count(psb->pCmd),
3689 psb->pCmd->sc_data_direction);
bcf4dbfa
JS
3690}
3691
9bad7671 3692/**
3621a710 3693 * lpfc_handler_fcp_err - FCP response handler
9bad7671
JS
3694 * @vport: The virtual port for which this call is being executed.
3695 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3696 * @rsp_iocb: The response IOCB which contains FCP error.
3697 *
3698 * This routine is called to process response IOCB with status field
3699 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3700 * based upon SCSI and FCP error.
3701 **/
dea3101e 3702static void
2e0fef85
JS
3703lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3704 struct lpfc_iocbq *rsp_iocb)
dea3101e 3705{
5afab6bb 3706 struct lpfc_hba *phba = vport->phba;
dea3101e 3707 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3708 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3709 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
7054a606 3710 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
dea3101e 3711 uint32_t resp_info = fcprsp->rspStatus2;
3712 uint32_t scsi_status = fcprsp->rspStatus3;
c7743956 3713 uint32_t *lp;
dea3101e 3714 uint32_t host_status = DID_OK;
3715 uint32_t rsplen = 0;
5afab6bb 3716 uint32_t fcpDl;
c7743956 3717 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
dea3101e 3718
ea2151b4 3719
dea3101e 3720 /*
3721 * If this is a task management command, there is no
3722 * scsi packet associated with this lpfc_cmd. The driver
3723 * consumes it.
3724 */
3725 if (fcpcmd->fcpCntl2) {
3726 scsi_status = 0;
3727 goto out;
3728 }
3729
6a9c52cf
JS
3730 if (resp_info & RSP_LEN_VALID) {
3731 rsplen = be32_to_cpu(fcprsp->rspRspLen);
e40a02c1 3732 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
6a9c52cf
JS
3733 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3734 "2719 Invalid response length: "
9cb78c16 3735 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
6a9c52cf
JS
3736 cmnd->device->id,
3737 cmnd->device->lun, cmnd->cmnd[0],
3738 rsplen);
3739 host_status = DID_ERROR;
3740 goto out;
3741 }
e40a02c1
JS
3742 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3743 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3744 "2757 Protocol failure detected during "
3745 "processing of FCP I/O op: "
9cb78c16 3746 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
e40a02c1
JS
3747 cmnd->device->id,
3748 cmnd->device->lun, cmnd->cmnd[0],
3749 fcprsp->rspInfo3);
3750 host_status = DID_ERROR;
3751 goto out;
3752 }
6a9c52cf
JS
3753 }
3754
c7743956
JS
3755 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3756 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3757 if (snslen > SCSI_SENSE_BUFFERSIZE)
3758 snslen = SCSI_SENSE_BUFFERSIZE;
3759
3760 if (resp_info & RSP_LEN_VALID)
3761 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3762 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3763 }
3764 lp = (uint32_t *)cmnd->sense_buffer;
3765
aa1c7ee7
JS
3766 /* special handling for under run conditions */
3767 if (!scsi_status && (resp_info & RESID_UNDER)) {
3768 /* don't log under runs if fcp set... */
3769 if (vport->cfg_log_verbose & LOG_FCP)
3770 logit = LOG_FCP_ERROR;
3771 /* unless operator says so */
3772 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3773 logit = LOG_FCP_UNDER;
3774 }
c7743956 3775
e8b62011 3776 lpfc_printf_vlog(vport, KERN_WARNING, logit,
e2a0a9d6 3777 "9024 FCP command x%x failed: x%x SNS x%x x%x "
e8b62011
JS
3778 "Data: x%x x%x x%x x%x x%x\n",
3779 cmnd->cmnd[0], scsi_status,
3780 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3781 be32_to_cpu(fcprsp->rspResId),
3782 be32_to_cpu(fcprsp->rspSnsLen),
3783 be32_to_cpu(fcprsp->rspRspLen),
3784 fcprsp->rspInfo3);
dea3101e 3785
a0b4f78f 3786 scsi_set_resid(cmnd, 0);
5afab6bb 3787 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
dea3101e 3788 if (resp_info & RESID_UNDER) {
a0b4f78f 3789 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
dea3101e 3790
73d91e50 3791 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
45634a86 3792 "9025 FCP Underrun, expected %d, "
e8b62011 3793 "residual %d Data: x%x x%x x%x\n",
5afab6bb 3794 fcpDl,
e8b62011
JS
3795 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3796 cmnd->underflow);
dea3101e 3797
7054a606 3798 /*
45634a86 3799 * If there is an under run, check if under run reported by
7054a606
JS
3800 * storage array is same as the under run reported by HBA.
3801 * If this is not same, there is a dropped frame.
3802 */
45634a86 3803 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
e8b62011
JS
3804 lpfc_printf_vlog(vport, KERN_WARNING,
3805 LOG_FCP | LOG_FCP_ERROR,
e2a0a9d6 3806 "9026 FCP Read Check Error "
e8b62011 3807 "and Underrun Data: x%x x%x x%x x%x\n",
5afab6bb 3808 fcpDl,
e8b62011
JS
3809 scsi_get_resid(cmnd), fcpi_parm,
3810 cmnd->cmnd[0]);
a0b4f78f 3811 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
7054a606
JS
3812 host_status = DID_ERROR;
3813 }
dea3101e 3814 /*
3815 * The cmnd->underflow is the minimum number of bytes that must
25985edc 3816 * be transferred for this command. Provided a sense condition
dea3101e 3817 * is not present, make sure the actual amount transferred is at
3818 * least the underflow value or fail.
3819 */
3820 if (!(resp_info & SNS_LEN_VALID) &&
3821 (scsi_status == SAM_STAT_GOOD) &&
a0b4f78f
FT
3822 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3823 < cmnd->underflow)) {
e8b62011 3824 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
e2a0a9d6 3825 "9027 FCP command x%x residual "
e8b62011
JS
3826 "underrun converted to error "
3827 "Data: x%x x%x x%x\n",
66dbfbe6 3828 cmnd->cmnd[0], scsi_bufflen(cmnd),
e8b62011 3829 scsi_get_resid(cmnd), cmnd->underflow);
dea3101e 3830 host_status = DID_ERROR;
3831 }
3832 } else if (resp_info & RESID_OVER) {
e8b62011 3833 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
e2a0a9d6 3834 "9028 FCP command x%x residual overrun error. "
e4e74273 3835 "Data: x%x x%x\n", cmnd->cmnd[0],
e8b62011 3836 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
dea3101e 3837 host_status = DID_ERROR;
3838
3839 /*
3840 * Check SLI validation that all the transfer was actually done
26373d23 3841 * (fcpi_parm should be zero). Apply check only to reads.
dea3101e 3842 */
5afab6bb 3843 } else if (fcpi_parm) {
e8b62011 3844 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
5afab6bb 3845 "9029 FCP %s Check Error xri x%x Data: "
eee8877e 3846 "x%x x%x x%x x%x x%x\n",
5afab6bb
JS
3847 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3848 "Read" : "Write"),
3849 ((phba->sli_rev == LPFC_SLI_REV4) ?
3850 lpfc_cmd->cur_iocbq.sli4_xritag :
3851 rsp_iocb->iocb.ulpContext),
3852 fcpDl, be32_to_cpu(fcprsp->rspResId),
eee8877e 3853 fcpi_parm, cmnd->cmnd[0], scsi_status);
5afab6bb
JS
3854
3855 /* There is some issue with the LPe12000 that causes it
3856 * to miscalculate the fcpi_parm and falsely trip this
3857 * recovery logic. Detect this case and don't error when true.
3858 */
3859 if (fcpi_parm > fcpDl)
3860 goto out;
3861
eee8877e
JS
3862 switch (scsi_status) {
3863 case SAM_STAT_GOOD:
3864 case SAM_STAT_CHECK_CONDITION:
3865 /* Fabric dropped a data frame. Fail any successful
3866 * command in which we detected dropped frames.
3867 * A status of good or some check conditions could
3868 * be considered a successful command.
3869 */
3870 host_status = DID_ERROR;
3871 break;
3872 }
a0b4f78f 3873 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
dea3101e 3874 }
3875
3876 out:
c6668cae 3877 cmnd->result = host_status << 16 | scsi_status;
ea2151b4 3878 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
dea3101e 3879}
3880
8b0dff14
JS
3881/**
3882 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
3883 * @phba: Pointer to HBA context object.
3884 *
3885 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
3886 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
3887 * held.
3888 * If scsi-mq is enabled, get the default block layer mapping of software queues
3889 * to hardware queues. This information is saved in request tag.
3890 *
3891 * Return: index into SLI4 fast-path FCP queue index.
3892 **/
3893int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
3894 struct lpfc_scsi_buf *lpfc_cmd)
3895{
3896 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3897 struct lpfc_vector_map_info *cpup;
3898 int chann, cpu;
3899 uint32_t tag;
3900 uint16_t hwq;
3901
05a05872 3902 if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
8b0dff14
JS
3903 tag = blk_mq_unique_tag(cmnd->request);
3904 hwq = blk_mq_unique_tag_to_hwq(tag);
3905
3906 return hwq;
3907 }
3908
3909 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
3910 && phba->cfg_fcp_io_channel > 1) {
3911 cpu = smp_processor_id();
3912 if (cpu < phba->sli4_hba.num_present_cpu) {
3913 cpup = phba->sli4_hba.cpu_map;
3914 cpup += cpu;
3915 return cpup->channel_id;
3916 }
3917 }
3918 chann = atomic_add_return(1, &phba->fcp_qidx);
2ea259ee 3919 chann = chann % phba->cfg_fcp_io_channel;
8b0dff14
JS
3920 return chann;
3921}
3922
3923
9bad7671 3924/**
3621a710 3925 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
9bad7671
JS
3926 * @phba: The Hba for which this call is being executed.
3927 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3772a991 3928 * @pIocbOut: The response IOCBQ for the scsi cmnd.
9bad7671
JS
3929 *
3930 * This routine assigns scsi command result by looking into response IOCB
3931 * status field appropriately. This routine handles QUEUE FULL condition as
3932 * well by ramping down device queue depth.
3933 **/
dea3101e 3934static void
3935lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3936 struct lpfc_iocbq *pIocbOut)
3937{
3938 struct lpfc_scsi_buf *lpfc_cmd =
3939 (struct lpfc_scsi_buf *) pIocbIn->context1;
2e0fef85 3940 struct lpfc_vport *vport = pIocbIn->vport;
dea3101e 3941 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3942 struct lpfc_nodelist *pnode = rdata->pnode;
75baf696 3943 struct scsi_cmnd *cmd;
fa61a54e 3944 unsigned long flags;
ea2151b4 3945 struct lpfc_fast_path_event *fast_path_evt;
75baf696 3946 struct Scsi_Host *shost;
73d91e50 3947 uint32_t logit = LOG_FCP;
dea3101e 3948
2cee7808 3949 atomic_inc(&phba->fc4ScsiIoCmpls);
895427bd 3950
75baf696 3951 /* Sanity check on return of outstanding command */
75baf696 3952 cmd = lpfc_cmd->pCmd;
c90261dc
JS
3953 if (!cmd)
3954 return;
75baf696
JS
3955 shost = cmd->device->host;
3956
e3d2b802 3957 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
dea3101e 3958 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
341af102
JS
3959 /* pick up SLI4 exhange busy status from HBA */
3960 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3961
9a6b09c0
JS
3962#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3963 if (lpfc_cmd->prot_data_type) {
3964 struct scsi_dif_tuple *src = NULL;
3965
3966 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3967 /*
3968 * Used to restore any changes to protection
3969 * data for error injection.
3970 */
3971 switch (lpfc_cmd->prot_data_type) {
3972 case LPFC_INJERR_REFTAG:
3973 src->ref_tag =
3974 lpfc_cmd->prot_data;
3975 break;
3976 case LPFC_INJERR_APPTAG:
3977 src->app_tag =
3978 (uint16_t)lpfc_cmd->prot_data;
3979 break;
3980 case LPFC_INJERR_GUARD:
3981 src->guard_tag =
3982 (uint16_t)lpfc_cmd->prot_data;
3983 break;
3984 default:
3985 break;
3986 }
3987
3988 lpfc_cmd->prot_data = 0;
3989 lpfc_cmd->prot_data_type = 0;
3990 lpfc_cmd->prot_data_segment = NULL;
3991 }
3992#endif
2ea259ee 3993
dea3101e 3994 if (lpfc_cmd->status) {
3995 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3996 (lpfc_cmd->result & IOERR_DRVR_MASK))
3997 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3998 else if (lpfc_cmd->status >= IOSTAT_CNT)
3999 lpfc_cmd->status = IOSTAT_DEFAULT;
aa1c7ee7
JS
4000 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4001 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4002 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4003 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
73d91e50
JS
4004 logit = 0;
4005 else
4006 logit = LOG_FCP | LOG_FCP_UNDER;
4007 lpfc_printf_vlog(vport, KERN_WARNING, logit,
9cb78c16 4008 "9030 FCP cmd x%x failed <%d/%lld> "
5a0d80fc
JS
4009 "status: x%x result: x%x "
4010 "sid: x%x did: x%x oxid: x%x "
4011 "Data: x%x x%x\n",
73d91e50
JS
4012 cmd->cmnd[0],
4013 cmd->device ? cmd->device->id : 0xffff,
4014 cmd->device ? cmd->device->lun : 0xffff,
4015 lpfc_cmd->status, lpfc_cmd->result,
3bf41ba9
JS
4016 vport->fc_myDID,
4017 (pnode) ? pnode->nlp_DID : 0,
5a0d80fc
JS
4018 phba->sli_rev == LPFC_SLI_REV4 ?
4019 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
73d91e50
JS
4020 pIocbOut->iocb.ulpContext,
4021 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
dea3101e 4022
4023 switch (lpfc_cmd->status) {
4024 case IOSTAT_FCP_RSP_ERROR:
4025 /* Call FCP RSP handler to determine result */
2e0fef85 4026 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
dea3101e 4027 break;
4028 case IOSTAT_NPORT_BSY:
4029 case IOSTAT_FABRIC_BSY:
c6668cae 4030 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
ea2151b4
JS
4031 fast_path_evt = lpfc_alloc_fast_evt(phba);
4032 if (!fast_path_evt)
4033 break;
4034 fast_path_evt->un.fabric_evt.event_type =
4035 FC_REG_FABRIC_EVENT;
4036 fast_path_evt->un.fabric_evt.subcategory =
4037 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4038 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4039 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4040 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4041 &pnode->nlp_portname,
4042 sizeof(struct lpfc_name));
4043 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4044 &pnode->nlp_nodename,
4045 sizeof(struct lpfc_name));
4046 }
4047 fast_path_evt->vport = vport;
4048 fast_path_evt->work_evt.evt =
4049 LPFC_EVT_FASTPATH_MGMT_EVT;
4050 spin_lock_irqsave(&phba->hbalock, flags);
4051 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4052 &phba->work_list);
4053 spin_unlock_irqrestore(&phba->hbalock, flags);
4054 lpfc_worker_wake_up(phba);
dea3101e 4055 break;
92d7f7b0 4056 case IOSTAT_LOCAL_REJECT:
1151e3ec 4057 case IOSTAT_REMOTE_STOP:
ab56dc2e
JS
4058 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4059 lpfc_cmd->result ==
4060 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4061 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4062 lpfc_cmd->result ==
4063 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
c6668cae 4064 cmd->result = DID_NO_CONNECT << 16;
ab56dc2e
JS
4065 break;
4066 }
d7c255b2 4067 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
92d7f7b0 4068 lpfc_cmd->result == IOERR_NO_RESOURCES ||
b92938b4
JS
4069 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4070 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
c6668cae 4071 cmd->result = DID_REQUEUE << 16;
58da1ffb 4072 break;
e2a0a9d6 4073 }
e2a0a9d6
JS
4074 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4075 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4076 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4077 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4078 /*
4079 * This is a response for a BG enabled
4080 * cmd. Parse BG error
4081 */
4082 lpfc_parse_bg_err(phba, lpfc_cmd,
4083 pIocbOut);
4084 break;
4085 } else {
4086 lpfc_printf_vlog(vport, KERN_WARNING,
4087 LOG_BG,
4088 "9031 non-zero BGSTAT "
6a9c52cf 4089 "on unprotected cmd\n");
e2a0a9d6
JS
4090 }
4091 }
1151e3ec
JS
4092 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4093 && (phba->sli_rev == LPFC_SLI_REV4)
4094 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
4095 /* This IO was aborted by the target, we don't
4096 * know the rxid and because we did not send the
4097 * ABTS we cannot generate and RRQ.
4098 */
4099 lpfc_set_rrq_active(phba, pnode,
ee0f4fe1
JS
4100 lpfc_cmd->cur_iocbq.sli4_lxritag,
4101 0, 0);
1151e3ec 4102 }
e2a0a9d6 4103 /* else: fall through */
dea3101e 4104 default:
c6668cae 4105 cmd->result = DID_ERROR << 16;
dea3101e 4106 break;
4107 }
4108
58da1ffb 4109 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
19a7b4ae 4110 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
c6668cae
JT
4111 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4112 SAM_STAT_BUSY;
ab56dc2e 4113 } else
c6668cae 4114 cmd->result = DID_OK << 16;
dea3101e 4115
4116 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4117 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4118
e8b62011 4119 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
9cb78c16 4120 "0710 Iodone <%d/%llu> cmd %p, error "
e8b62011
JS
4121 "x%x SNS x%x x%x Data: x%x x%x\n",
4122 cmd->device->id, cmd->device->lun, cmd,
4123 cmd->result, *lp, *(lp + 3), cmd->retries,
4124 scsi_get_resid(cmd));
dea3101e 4125 }
4126
ea2151b4 4127 lpfc_update_stats(phba, lpfc_cmd);
977b5a0a
JS
4128 if (vport->cfg_max_scsicmpl_time &&
4129 time_after(jiffies, lpfc_cmd->start_time +
4130 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
a257bf90 4131 spin_lock_irqsave(shost->host_lock, flags);
109f6ed0 4132 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
f91bc594 4133 atomic_dec(&pnode->cmd_pending);
109f6ed0
JS
4134 if (pnode->cmd_qdepth >
4135 atomic_read(&pnode->cmd_pending) &&
4136 (atomic_read(&pnode->cmd_pending) >
4137 LPFC_MIN_TGT_QDEPTH) &&
4138 ((cmd->cmnd[0] == READ_10) ||
4139 (cmd->cmnd[0] == WRITE_10)))
4140 pnode->cmd_qdepth =
4141 atomic_read(&pnode->cmd_pending);
4142
4143 pnode->last_change_time = jiffies;
4144 }
a257bf90 4145 spin_unlock_irqrestore(shost->host_lock, flags);
109f6ed0 4146 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
f91bc594 4147 atomic_dec(&pnode->cmd_pending);
977b5a0a 4148 }
1dcb58e5 4149 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
a257bf90 4150
876dd7d0 4151 spin_lock_irqsave(&phba->hbalock, flags);
92e3af66 4152 lpfc_cmd->pCmd = NULL;
876dd7d0 4153 spin_unlock_irqrestore(&phba->hbalock, flags);
92e3af66 4154
89533e9b
JS
4155 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4156 cmd->scsi_done(cmd);
4157
fa61a54e
JS
4158 /*
4159 * If there is a thread waiting for command completion
4160 * wake up the thread.
4161 */
a257bf90 4162 spin_lock_irqsave(shost->host_lock, flags);
fa61a54e
JS
4163 if (lpfc_cmd->waitq)
4164 wake_up(lpfc_cmd->waitq);
a257bf90 4165 spin_unlock_irqrestore(shost->host_lock, flags);
fa61a54e 4166
0bd4ca25 4167 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 4168}
4169
8b2564ec
AK
4170/**
4171 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4172 * @data: A pointer to the immediate command data portion of the IOCB.
4173 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4174 *
4175 * The routine copies the entire FCP command from @fcp_cmnd to @data while
4176 * byte swapping the data to big endian format for transmission on the wire.
4177 **/
4178static void
4179lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4180{
4181 int i, j;
4182 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4183 i += sizeof(uint32_t), j++) {
4184 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4185 }
4186}
4187
9bad7671 4188/**
f1126688 4189 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
9bad7671
JS
4190 * @vport: The virtual port for which this call is being executed.
4191 * @lpfc_cmd: The scsi command which needs to send.
4192 * @pnode: Pointer to lpfc_nodelist.
4193 *
4194 * This routine initializes fcp_cmnd and iocb data structure from scsi command
3772a991 4195 * to transfer for device with SLI3 interface spec.
9bad7671 4196 **/
dea3101e 4197static void
f1126688 4198lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2e0fef85 4199 struct lpfc_nodelist *pnode)
dea3101e 4200{
2e0fef85 4201 struct lpfc_hba *phba = vport->phba;
dea3101e 4202 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4203 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4204 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4205 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4206 int datadir = scsi_cmnd->sc_data_direction;
027140ea
JS
4207 uint8_t *ptr;
4208 bool sli4;
98bbf5f7 4209 uint32_t fcpdl;
dea3101e 4210
58da1ffb
JS
4211 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4212 return;
4213
dea3101e 4214 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
69859dc4
JSEC
4215 /* clear task management bits */
4216 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
dea3101e 4217
91886523
JSEC
4218 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4219 &lpfc_cmd->fcp_cmnd->fcp_lun);
dea3101e 4220
027140ea
JS
4221 ptr = &fcp_cmnd->fcpCdb[0];
4222 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4223 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4224 ptr += scsi_cmnd->cmd_len;
4225 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4226 }
4227
50668633 4228 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
dea3101e 4229
027140ea 4230 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
6acb3481 4231 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
027140ea 4232
dea3101e 4233 /*
4234 * There are three possibilities here - use scatter-gather segment, use
4235 * the single mapping, or neither. Start the lpfc command prep by
4236 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4237 * data bde entry.
4238 */
a0b4f78f 4239 if (scsi_sg_count(scsi_cmnd)) {
dea3101e 4240 if (datadir == DMA_TO_DEVICE) {
4241 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
182ba753 4242 iocb_cmd->ulpPU = PARM_READ_CHECK;
3cb01c57
JS
4243 if (vport->cfg_first_burst_size &&
4244 (pnode->nlp_flag & NLP_FIRSTBURST)) {
98bbf5f7
JS
4245 fcpdl = scsi_bufflen(scsi_cmnd);
4246 if (fcpdl < vport->cfg_first_burst_size)
4247 piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4248 else
4249 piocbq->iocb.un.fcpi.fcpi_XRdy =
4250 vport->cfg_first_burst_size;
3cb01c57 4251 }
dea3101e 4252 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2cee7808 4253 atomic_inc(&phba->fc4ScsiOutputRequests);
dea3101e 4254 } else {
4255 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4256 iocb_cmd->ulpPU = PARM_READ_CHECK;
dea3101e 4257 fcp_cmnd->fcpCntl3 = READ_DATA;
2cee7808 4258 atomic_inc(&phba->fc4ScsiInputRequests);
dea3101e 4259 }
4260 } else {
4261 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4262 iocb_cmd->un.fcpi.fcpi_parm = 0;
4263 iocb_cmd->ulpPU = 0;
4264 fcp_cmnd->fcpCntl3 = 0;
2cee7808 4265 atomic_inc(&phba->fc4ScsiControlRequests);
dea3101e 4266 }
8b2564ec
AK
4267 if (phba->sli_rev == 3 &&
4268 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4269 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 4270 /*
4271 * Finish initializing those IOCB fields that are independent
4272 * of the scsi_cmnd request_buffer
4273 */
4274 piocbq->iocb.ulpContext = pnode->nlp_rpi;
027140ea 4275 if (sli4)
6d368e53
JS
4276 piocbq->iocb.ulpContext =
4277 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
dea3101e 4278 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4279 piocbq->iocb.ulpFCP2Rcvy = 1;
09372820
JS
4280 else
4281 piocbq->iocb.ulpFCP2Rcvy = 0;
dea3101e 4282
4283 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4284 piocbq->context1 = lpfc_cmd;
4285 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4286 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2e0fef85 4287 piocbq->vport = vport;
dea3101e 4288}
4289
da0436e9 4290/**
6d368e53 4291 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
9bad7671
JS
4292 * @vport: The virtual port for which this call is being executed.
4293 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4294 * @lun: Logical unit number.
4295 * @task_mgmt_cmd: SCSI task management command.
4296 *
3772a991
JS
4297 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4298 * for device with SLI-3 interface spec.
9bad7671
JS
4299 *
4300 * Return codes:
4301 * 0 - Error
4302 * 1 - Success
4303 **/
dea3101e 4304static int
f1126688 4305lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
dea3101e 4306 struct lpfc_scsi_buf *lpfc_cmd,
9cb78c16 4307 uint64_t lun,
dea3101e 4308 uint8_t task_mgmt_cmd)
4309{
dea3101e 4310 struct lpfc_iocbq *piocbq;
4311 IOCB_t *piocb;
4312 struct fcp_cmnd *fcp_cmnd;
0b18ac42 4313 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
dea3101e 4314 struct lpfc_nodelist *ndlp = rdata->pnode;
4315
58da1ffb
JS
4316 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4317 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
dea3101e 4318 return 0;
dea3101e 4319
dea3101e 4320 piocbq = &(lpfc_cmd->cur_iocbq);
2e0fef85
JS
4321 piocbq->vport = vport;
4322
dea3101e 4323 piocb = &piocbq->iocb;
4324
4325 fcp_cmnd = lpfc_cmd->fcp_cmnd;
34b02dcd
JS
4326 /* Clear out any old data in the FCP command area */
4327 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4328 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
dea3101e 4329 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
e2a0a9d6
JS
4330 if (vport->phba->sli_rev == 3 &&
4331 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
34b02dcd 4332 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
dea3101e 4333 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
dea3101e 4334 piocb->ulpContext = ndlp->nlp_rpi;
6d368e53
JS
4335 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4336 piocb->ulpContext =
4337 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4338 }
53151bbb 4339 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
dea3101e 4340 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
f9226c20
JS
4341 piocb->ulpPU = 0;
4342 piocb->un.fcpi.fcpi_parm = 0;
dea3101e 4343
4344 /* ulpTimeout is only one byte */
4345 if (lpfc_cmd->timeout > 0xff) {
4346 /*
4347 * Do not timeout the command at the firmware level.
4348 * The driver will provide the timeout mechanism.
4349 */
4350 piocb->ulpTimeout = 0;
f1126688 4351 } else
dea3101e 4352 piocb->ulpTimeout = lpfc_cmd->timeout;
da0436e9 4353
f1126688
JS
4354 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4355 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
3772a991 4356
f1126688 4357 return 1;
3772a991
JS
4358}
4359
4360/**
25985edc 4361 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
3772a991
JS
4362 * @phba: The hba struct for which this call is being executed.
4363 * @dev_grp: The HBA PCI-Device group number.
4364 *
4365 * This routine sets up the SCSI interface API function jump table in @phba
4366 * struct.
4367 * Returns: 0 - success, -ENODEV - failure.
4368 **/
4369int
4370lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4371{
4372
f1126688
JS
4373 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4374 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
f1126688 4375
3772a991
JS
4376 switch (dev_grp) {
4377 case LPFC_PCI_DEV_LP:
4378 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4379 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
acd6859b 4380 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
3772a991 4381 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
19ca7609 4382 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
3772a991 4383 break;
da0436e9
JS
4384 case LPFC_PCI_DEV_OC:
4385 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4386 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
acd6859b 4387 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
da0436e9 4388 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
19ca7609 4389 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
da0436e9 4390 break;
3772a991
JS
4391 default:
4392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4393 "1418 Invalid HBA PCI-device group: 0x%x\n",
4394 dev_grp);
4395 return -ENODEV;
4396 break;
4397 }
3772a991 4398 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
84d1b006 4399 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
3772a991
JS
4400 return 0;
4401}
4402
9bad7671 4403/**
3621a710 4404 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
9bad7671
JS
4405 * @phba: The Hba for which this call is being executed.
4406 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4407 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4408 *
4409 * This routine is IOCB completion routine for device reset and target reset
4410 * routine. This routine release scsi buffer associated with lpfc_cmd.
4411 **/
7054a606
JS
4412static void
4413lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4414 struct lpfc_iocbq *cmdiocbq,
4415 struct lpfc_iocbq *rspiocbq)
4416{
4417 struct lpfc_scsi_buf *lpfc_cmd =
4418 (struct lpfc_scsi_buf *) cmdiocbq->context1;
4419 if (lpfc_cmd)
4420 lpfc_release_scsi_buf(phba, lpfc_cmd);
4421 return;
4422}
4423
9bad7671 4424/**
3621a710 4425 * lpfc_info - Info entry point of scsi_host_template data structure
9bad7671
JS
4426 * @host: The scsi host for which this call is being executed.
4427 *
4428 * This routine provides module information about hba.
4429 *
4430 * Reutrn code:
4431 * Pointer to char - Success.
4432 **/
dea3101e 4433const char *
4434lpfc_info(struct Scsi_Host *host)
4435{
2e0fef85
JS
4436 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4437 struct lpfc_hba *phba = vport->phba;
8b68cd52 4438 int len, link_speed = 0;
dea3101e 4439 static char lpfcinfobuf[384];
4440
4441 memset(lpfcinfobuf,0,384);
4442 if (phba && phba->pcidev){
4443 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4444 len = strlen(lpfcinfobuf);
4445 snprintf(lpfcinfobuf + len,
4446 384-len,
4447 " on PCI bus %02x device %02x irq %d",
4448 phba->pcidev->bus->number,
4449 phba->pcidev->devfn,
4450 phba->pcidev->irq);
4451 len = strlen(lpfcinfobuf);
4452 if (phba->Port[0]) {
4453 snprintf(lpfcinfobuf + len,
4454 384-len,
4455 " port %s",
4456 phba->Port);
4457 }
65467b6b 4458 len = strlen(lpfcinfobuf);
a085e87c 4459 link_speed = lpfc_sli_port_speed_get(phba);
8b68cd52
JS
4460 if (link_speed != 0)
4461 snprintf(lpfcinfobuf + len, 384-len,
4462 " Logical Link Speed: %d Mbps", link_speed);
dea3101e 4463 }
4464 return lpfcinfobuf;
4465}
4466
9bad7671 4467/**
3621a710 4468 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
9bad7671
JS
4469 * @phba: The Hba for which this call is being executed.
4470 *
4471 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
4472 * The default value of cfg_poll_tmo is 10 milliseconds.
4473 **/
875fbdfe
JSEC
4474static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4475{
4476 unsigned long poll_tmo_expires =
4477 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4478
895427bd 4479 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
875fbdfe
JSEC
4480 mod_timer(&phba->fcp_poll_timer,
4481 poll_tmo_expires);
4482}
4483
9bad7671 4484/**
3621a710 4485 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
9bad7671
JS
4486 * @phba: The Hba for which this call is being executed.
4487 *
4488 * This routine starts the fcp_poll_timer of @phba.
4489 **/
875fbdfe
JSEC
4490void lpfc_poll_start_timer(struct lpfc_hba * phba)
4491{
4492 lpfc_poll_rearm_timer(phba);
4493}
4494
9bad7671 4495/**
3621a710 4496 * lpfc_poll_timeout - Restart polling timer
9bad7671
JS
4497 * @ptr: Map to lpfc_hba data structure pointer.
4498 *
4499 * This routine restarts fcp_poll timer, when FCP ring polling is enable
4500 * and FCP Ring interrupt is disable.
4501 **/
4502
f22eb4d3 4503void lpfc_poll_timeout(struct timer_list *t)
875fbdfe 4504{
f22eb4d3 4505 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
875fbdfe
JSEC
4506
4507 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190 4508 lpfc_sli_handle_fast_ring_event(phba,
895427bd 4509 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
45ed1190 4510
875fbdfe
JSEC
4511 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4512 lpfc_poll_rearm_timer(phba);
4513 }
875fbdfe
JSEC
4514}
4515
9bad7671 4516/**
3621a710 4517 * lpfc_queuecommand - scsi_host_template queuecommand entry point
9bad7671
JS
4518 * @cmnd: Pointer to scsi_cmnd data structure.
4519 * @done: Pointer to done routine.
4520 *
4521 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4522 * This routine prepares an IOCB from scsi command and provides to firmware.
4523 * The @done callback is invoked after driver finished processing the command.
4524 *
4525 * Return value :
4526 * 0 - Success
4527 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4528 **/
dea3101e 4529static int
b9a7c631 4530lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
dea3101e 4531{
2e0fef85
JS
4532 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4533 struct lpfc_hba *phba = vport->phba;
1ba981fd 4534 struct lpfc_rport_data *rdata;
1c6f4ef5 4535 struct lpfc_nodelist *ndlp;
0bd4ca25 4536 struct lpfc_scsi_buf *lpfc_cmd;
19a7b4ae 4537 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
19a7b4ae 4538 int err;
dea3101e 4539
1ba981fd 4540 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
b0e83012
JS
4541
4542 /* sanity check on references */
4543 if (unlikely(!rdata) || unlikely(!rport))
4544 goto out_fail_command;
4545
19a7b4ae
JSEC
4546 err = fc_remote_port_chkready(rport);
4547 if (err) {
4548 cmnd->result = err;
dea3101e 4549 goto out_fail_command;
4550 }
1c6f4ef5 4551 ndlp = rdata->pnode;
dea3101e 4552
bf08611b 4553 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
acd6859b 4554 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
e2a0a9d6 4555
6a9c52cf
JS
4556 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4557 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4558 " op:%02x str=%s without registering for"
4559 " BlockGuard - Rejecting command\n",
e2a0a9d6
JS
4560 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4561 dif_op_str[scsi_get_prot_op(cmnd)]);
4562 goto out_fail_command;
4563 }
4564
dea3101e 4565 /*
19a7b4ae
JSEC
4566 * Catch race where our node has transitioned, but the
4567 * transport is still transitioning.
dea3101e 4568 */
6b415f5d
JS
4569 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4570 goto out_tgt_busy;
64bf0099
JS
4571 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
4572 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4573 "3377 Target Queue Full, scsi Id:%d Qdepth:%d"
4574 " Pending command:%d"
4575 " WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
4576 " WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
4577 ndlp->nlp_sid, ndlp->cmd_qdepth,
4578 atomic_read(&ndlp->cmd_pending),
4579 ndlp->nlp_nodename.u.wwn[0],
4580 ndlp->nlp_nodename.u.wwn[1],
4581 ndlp->nlp_nodename.u.wwn[2],
4582 ndlp->nlp_nodename.u.wwn[3],
4583 ndlp->nlp_nodename.u.wwn[4],
4584 ndlp->nlp_nodename.u.wwn[5],
4585 ndlp->nlp_nodename.u.wwn[6],
4586 ndlp->nlp_nodename.u.wwn[7],
4587 ndlp->nlp_portname.u.wwn[0],
4588 ndlp->nlp_portname.u.wwn[1],
4589 ndlp->nlp_portname.u.wwn[2],
4590 ndlp->nlp_portname.u.wwn[3],
4591 ndlp->nlp_portname.u.wwn[4],
4592 ndlp->nlp_portname.u.wwn[5],
4593 ndlp->nlp_portname.u.wwn[6],
4594 ndlp->nlp_portname.u.wwn[7]);
3496343d 4595 goto out_tgt_busy;
64bf0099 4596 }
f91bc594
JS
4597 atomic_inc(&ndlp->cmd_pending);
4598
19ca7609 4599 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
dea3101e 4600 if (lpfc_cmd == NULL) {
eaf15d5b 4601 lpfc_rampdown_queue_depth(phba);
92d7f7b0 4602
895427bd 4603 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
e8b62011
JS
4604 "0707 driver's buffer pool is empty, "
4605 "IO busied\n");
dea3101e 4606 goto out_host_busy;
4607 }
4608
4609 /*
4610 * Store the midlayer's command structure for the completion phase
4611 * and complete the command initialization.
4612 */
4613 lpfc_cmd->pCmd = cmnd;
4614 lpfc_cmd->rdata = rdata;
4615 lpfc_cmd->timeout = 0;
977b5a0a 4616 lpfc_cmd->start_time = jiffies;
dea3101e 4617 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
dea3101e 4618
e2a0a9d6 4619 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
6a9c52cf 4620 if (vport->phba->cfg_enable_bg) {
737d4248
JS
4621 lpfc_printf_vlog(vport,
4622 KERN_INFO, LOG_SCSI_CMD,
2613470a
JS
4623 "9033 BLKGRD: rcvd %s cmd:x%x "
4624 "sector x%llx cnt %u pt %x\n",
4625 dif_op_str[scsi_get_prot_op(cmnd)],
4626 cmnd->cmnd[0],
4627 (unsigned long long)scsi_get_lba(cmnd),
4628 blk_rq_sectors(cmnd->request),
4629 (cmnd->cmnd[1]>>5));
6a9c52cf 4630 }
e2a0a9d6
JS
4631 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4632 } else {
6a9c52cf 4633 if (vport->phba->cfg_enable_bg) {
737d4248
JS
4634 lpfc_printf_vlog(vport,
4635 KERN_INFO, LOG_SCSI_CMD,
2613470a
JS
4636 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4637 "x%x sector x%llx cnt %u pt %x\n",
4638 cmnd->cmnd[0],
4639 (unsigned long long)scsi_get_lba(cmnd),
9a6b09c0 4640 blk_rq_sectors(cmnd->request),
2613470a 4641 (cmnd->cmnd[1]>>5));
6a9c52cf 4642 }
e2a0a9d6
JS
4643 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4644 }
4645
dea3101e 4646 if (err)
4647 goto out_host_busy_free_buf;
4648
2e0fef85 4649 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
dea3101e 4650
3772a991 4651 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
92d7f7b0 4652 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
eaf15d5b 4653 if (err) {
76f96b6d
JS
4654 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4655 "3376 FCP could not issue IOCB err %x"
9cb78c16 4656 "FCP cmd x%x <%d/%llu> "
76f96b6d
JS
4657 "sid: x%x did: x%x oxid: x%x "
4658 "Data: x%x x%x x%x x%x\n",
4659 err, cmnd->cmnd[0],
4660 cmnd->device ? cmnd->device->id : 0xffff,
9cb78c16 4661 cmnd->device ? cmnd->device->lun : (u64) -1,
76f96b6d
JS
4662 vport->fc_myDID, ndlp->nlp_DID,
4663 phba->sli_rev == LPFC_SLI_REV4 ?
4664 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4665 lpfc_cmd->cur_iocbq.iocb.ulpContext,
4666 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4667 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4668 (uint32_t)
4669 (cmnd->request->timeout / 1000));
4670
2cee7808
JS
4671 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
4672 case WRITE_DATA:
4673 atomic_dec(&phba->fc4ScsiOutputRequests);
4674 break;
4675 case READ_DATA:
4676 atomic_dec(&phba->fc4ScsiInputRequests);
4677 break;
4678 default:
4679 atomic_dec(&phba->fc4ScsiControlRequests);
4680 }
dea3101e 4681 goto out_host_busy_free_buf;
eaf15d5b 4682 }
875fbdfe 4683 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190 4684 lpfc_sli_handle_fast_ring_event(phba,
895427bd 4685 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
45ed1190 4686
875fbdfe
JSEC
4687 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4688 lpfc_poll_rearm_timer(phba);
4689 }
4690
dea3101e 4691 return 0;
4692
4693 out_host_busy_free_buf:
bcf4dbfa 4694 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
0bd4ca25 4695 lpfc_release_scsi_buf(phba, lpfc_cmd);
dea3101e 4696 out_host_busy:
f91bc594 4697 atomic_dec(&ndlp->cmd_pending);
dea3101e 4698 return SCSI_MLQUEUE_HOST_BUSY;
4699
3496343d
MC
4700 out_tgt_busy:
4701 return SCSI_MLQUEUE_TARGET_BUSY;
4702
dea3101e 4703 out_fail_command:
b9a7c631 4704 cmnd->scsi_done(cmnd);
dea3101e 4705 return 0;
4706}
4707
f281233d 4708
9bad7671 4709/**
3621a710 4710 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
9bad7671
JS
4711 * @cmnd: Pointer to scsi_cmnd data structure.
4712 *
4713 * This routine aborts @cmnd pending in base driver.
4714 *
4715 * Return code :
4716 * 0x2003 - Error
4717 * 0x2002 - Success
4718 **/
dea3101e 4719static int
63c59c3b 4720lpfc_abort_handler(struct scsi_cmnd *cmnd)
dea3101e 4721{
2e0fef85
JS
4722 struct Scsi_Host *shost = cmnd->device->host;
4723 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4724 struct lpfc_hba *phba = vport->phba;
0bd4ca25
JSEC
4725 struct lpfc_iocbq *iocb;
4726 struct lpfc_iocbq *abtsiocb;
dea3101e 4727 struct lpfc_scsi_buf *lpfc_cmd;
dea3101e 4728 IOCB_t *cmd, *icmd;
3a70730a 4729 int ret = SUCCESS, status = 0;
98912dda 4730 struct lpfc_sli_ring *pring_s4;
895427bd 4731 int ret_val;
59c68eaa 4732 unsigned long flags;
fa61a54e 4733 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
dea3101e 4734
3a70730a 4735 status = fc_block_scsi_eh(cmnd);
908e18e4 4736 if (status != 0 && status != SUCCESS)
3a70730a 4737 return status;
4f2e66c6 4738
876dd7d0 4739 spin_lock_irqsave(&phba->hbalock, flags);
4f2e66c6
JS
4740 /* driver queued commands are in process of being flushed */
4741 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
876dd7d0 4742 spin_unlock_irqrestore(&phba->hbalock, flags);
4f2e66c6
JS
4743 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4744 "3168 SCSI Layer abort requested I/O has been "
4745 "flushed by LLD.\n");
4746 return FAILED;
4747 }
4748
0bd4ca25 4749 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
92e3af66 4750 if (!lpfc_cmd || !lpfc_cmd->pCmd) {
876dd7d0 4751 spin_unlock_irqrestore(&phba->hbalock, flags);
eee8877e
JS
4752 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4753 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
9cb78c16 4754 "x%x ID %d LUN %llu\n",
3a70730a 4755 SUCCESS, cmnd->device->id, cmnd->device->lun);
eee8877e
JS
4756 return SUCCESS;
4757 }
dea3101e 4758
4f2e66c6
JS
4759 iocb = &lpfc_cmd->cur_iocbq;
4760 /* the command is in process of being cancelled */
4761 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
876dd7d0 4762 spin_unlock_irqrestore(&phba->hbalock, flags);
4f2e66c6
JS
4763 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4764 "3169 SCSI Layer abort requested I/O has been "
4765 "cancelled by LLD.\n");
4766 return FAILED;
4767 }
0bd4ca25
JSEC
4768 /*
4769 * If pCmd field of the corresponding lpfc_scsi_buf structure
4770 * points to a different SCSI command, then the driver has
4771 * already completed this command, but the midlayer did not
4f2e66c6 4772 * see the completion before the eh fired. Just return SUCCESS.
0bd4ca25 4773 */
4f2e66c6
JS
4774 if (lpfc_cmd->pCmd != cmnd) {
4775 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4776 "3170 SCSI Layer abort requested I/O has been "
4777 "completed by LLD.\n");
4778 goto out_unlock;
4779 }
dea3101e 4780
0bd4ca25 4781 BUG_ON(iocb->context1 != lpfc_cmd);
dea3101e 4782
ee62021a
JS
4783 /* abort issued in recovery is still in progress */
4784 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4785 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4786 "3389 SCSI Layer I/O Abort Request is pending\n");
4787 spin_unlock_irqrestore(&phba->hbalock, flags);
4788 goto wait_for_cmpl;
4789 }
4790
4f2e66c6 4791 abtsiocb = __lpfc_sli_get_iocbq(phba);
0bd4ca25
JSEC
4792 if (abtsiocb == NULL) {
4793 ret = FAILED;
4f2e66c6 4794 goto out_unlock;
dea3101e 4795 }
4796
afbd8d88
JS
4797 /* Indicate the IO is being aborted by the driver. */
4798 iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4799
dea3101e 4800 /*
0bd4ca25
JSEC
4801 * The scsi command can not be in txq and it is in flight because the
4802 * pCmd is still pointig at the SCSI command we have to abort. There
4803 * is no need to search the txcmplq. Just send an abort to the FW.
dea3101e 4804 */
dea3101e 4805
0bd4ca25
JSEC
4806 cmd = &iocb->iocb;
4807 icmd = &abtsiocb->iocb;
4808 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4809 icmd->un.acxri.abortContextTag = cmd->ulpContext;
3772a991
JS
4810 if (phba->sli_rev == LPFC_SLI_REV4)
4811 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4812 else
4813 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
dea3101e 4814
0bd4ca25
JSEC
4815 icmd->ulpLe = 1;
4816 icmd->ulpClass = cmd->ulpClass;
5ffc266e
JS
4817
4818 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
895427bd 4819 abtsiocb->hba_wqidx = iocb->hba_wqidx;
341af102 4820 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
9bd2bff5
JS
4821 if (iocb->iocb_flag & LPFC_IO_FOF)
4822 abtsiocb->iocb_flag |= LPFC_IO_FOF;
5ffc266e 4823
2e0fef85 4824 if (lpfc_is_link_up(phba))
0bd4ca25
JSEC
4825 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4826 else
4827 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
dea3101e 4828
0bd4ca25 4829 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2e0fef85 4830 abtsiocb->vport = vport;
98912dda 4831 if (phba->sli_rev == LPFC_SLI_REV4) {
59c68eaa 4832 pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb);
895427bd
JS
4833 if (pring_s4 == NULL) {
4834 ret = FAILED;
4835 goto out_unlock;
4836 }
98912dda 4837 /* Note: both hbalock and ring_lock must be set here */
59c68eaa 4838 spin_lock(&pring_s4->ring_lock);
98912dda
JS
4839 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4840 abtsiocb, 0);
59c68eaa 4841 spin_unlock(&pring_s4->ring_lock);
98912dda
JS
4842 } else {
4843 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4844 abtsiocb, 0);
4845 }
4f2e66c6 4846 /* no longer need the lock after this point */
876dd7d0 4847 spin_unlock_irqrestore(&phba->hbalock, flags);
4f2e66c6 4848
98912dda
JS
4849
4850 if (ret_val == IOCB_ERROR) {
0bd4ca25
JSEC
4851 lpfc_sli_release_iocbq(phba, abtsiocb);
4852 ret = FAILED;
4853 goto out;
4854 }
dea3101e 4855
875fbdfe 4856 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
45ed1190 4857 lpfc_sli_handle_fast_ring_event(phba,
895427bd 4858 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe 4859
ee62021a 4860wait_for_cmpl:
fa61a54e 4861 lpfc_cmd->waitq = &waitq;
0bd4ca25 4862 /* Wait for abort to complete */
fa61a54e
JS
4863 wait_event_timeout(waitq,
4864 (lpfc_cmd->pCmd != cmnd),
256ec0d0 4865 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
ee62021a
JS
4866
4867 spin_lock_irqsave(shost->host_lock, flags);
fa61a54e 4868 lpfc_cmd->waitq = NULL;
ee62021a 4869 spin_unlock_irqrestore(shost->host_lock, flags);
dea3101e 4870
0bd4ca25
JSEC
4871 if (lpfc_cmd->pCmd == cmnd) {
4872 ret = FAILED;
e8b62011
JS
4873 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4874 "0748 abort handler timed out waiting "
4b160ae8 4875 "for aborting I/O (xri:x%x) to complete: "
9cb78c16 4876 "ret %#x, ID %d, LUN %llu\n",
247ca945
JS
4877 iocb->sli4_xritag, ret,
4878 cmnd->device->id, cmnd->device->lun);
dea3101e 4879 }
4f2e66c6 4880 goto out;
dea3101e 4881
4f2e66c6 4882out_unlock:
876dd7d0 4883 spin_unlock_irqrestore(&phba->hbalock, flags);
4f2e66c6 4884out:
e8b62011
JS
4885 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4886 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
9cb78c16 4887 "LUN %llu\n", ret, cmnd->device->id,
5cd049a5 4888 cmnd->device->lun);
63c59c3b 4889 return ret;
8fa728a2
JG
4890}
4891
bbb9d180
JS
4892static char *
4893lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4894{
4895 switch (task_mgmt_cmd) {
4896 case FCP_ABORT_TASK_SET:
4897 return "ABORT_TASK_SET";
4898 case FCP_CLEAR_TASK_SET:
4899 return "FCP_CLEAR_TASK_SET";
4900 case FCP_BUS_RESET:
4901 return "FCP_BUS_RESET";
4902 case FCP_LUN_RESET:
4903 return "FCP_LUN_RESET";
4904 case FCP_TARGET_RESET:
4905 return "FCP_TARGET_RESET";
4906 case FCP_CLEAR_ACA:
4907 return "FCP_CLEAR_ACA";
4908 case FCP_TERMINATE_TASK:
4909 return "FCP_TERMINATE_TASK";
4910 default:
4911 return "unknown";
4912 }
4913}
4914
53151bbb
JS
4915
4916/**
4917 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
4918 * @vport: The virtual port for which this call is being executed.
4919 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4920 *
4921 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
4922 *
4923 * Return code :
4924 * 0x2003 - Error
4925 * 0x2002 - Success
4926 **/
4927static int
4928lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
4929{
4930 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4931 uint32_t rsp_info;
4932 uint32_t rsp_len;
4933 uint8_t rsp_info_code;
4934 int ret = FAILED;
4935
4936
4937 if (fcprsp == NULL)
4938 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4939 "0703 fcp_rsp is missing\n");
4940 else {
4941 rsp_info = fcprsp->rspStatus2;
4942 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
4943 rsp_info_code = fcprsp->rspInfo3;
4944
4945
4946 lpfc_printf_vlog(vport, KERN_INFO,
4947 LOG_FCP,
4948 "0706 fcp_rsp valid 0x%x,"
4949 " rsp len=%d code 0x%x\n",
4950 rsp_info,
4951 rsp_len, rsp_info_code);
4952
4953 if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
4954 switch (rsp_info_code) {
4955 case RSP_NO_FAILURE:
4956 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4957 "0715 Task Mgmt No Failure\n");
4958 ret = SUCCESS;
4959 break;
4960 case RSP_TM_NOT_SUPPORTED: /* TM rejected */
4961 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4962 "0716 Task Mgmt Target "
4963 "reject\n");
4964 break;
4965 case RSP_TM_NOT_COMPLETED: /* TM failed */
4966 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4967 "0717 Task Mgmt Target "
4968 "failed TM\n");
4969 break;
4970 case RSP_TM_INVALID_LU: /* TM to invalid LU! */
4971 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4972 "0718 Task Mgmt to invalid "
4973 "LUN\n");
4974 break;
4975 }
4976 }
4977 }
4978 return ret;
4979}
4980
4981
9bad7671 4982/**
bbb9d180
JS
4983 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
4984 * @vport: The virtual port for which this call is being executed.
4985 * @rdata: Pointer to remote port local data
4986 * @tgt_id: Target ID of remote device.
4987 * @lun_id: Lun number for the TMF
4988 * @task_mgmt_cmd: type of TMF to send
9bad7671 4989 *
bbb9d180
JS
4990 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
4991 * a remote port.
9bad7671 4992 *
bbb9d180
JS
4993 * Return Code:
4994 * 0x2003 - Error
4995 * 0x2002 - Success.
9bad7671 4996 **/
dea3101e 4997static int
eed695d7
JS
4998lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
4999 unsigned int tgt_id, uint64_t lun_id,
5000 uint8_t task_mgmt_cmd)
dea3101e 5001{
2e0fef85 5002 struct lpfc_hba *phba = vport->phba;
0bd4ca25 5003 struct lpfc_scsi_buf *lpfc_cmd;
bbb9d180
JS
5004 struct lpfc_iocbq *iocbq;
5005 struct lpfc_iocbq *iocbqrsp;
eed695d7
JS
5006 struct lpfc_rport_data *rdata;
5007 struct lpfc_nodelist *pnode;
bbb9d180 5008 int ret;
915caaaf 5009 int status;
dea3101e 5010
eed695d7
JS
5011 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5012 if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
915caaaf 5013 return FAILED;
eed695d7 5014 pnode = rdata->pnode;
bbb9d180 5015
eed695d7 5016 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
dea3101e 5017 if (lpfc_cmd == NULL)
915caaaf 5018 return FAILED;
0c411222 5019 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
0b18ac42 5020 lpfc_cmd->rdata = rdata;
eed695d7 5021 lpfc_cmd->pCmd = cmnd;
dea3101e 5022
bbb9d180
JS
5023 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5024 task_mgmt_cmd);
915caaaf
JS
5025 if (!status) {
5026 lpfc_release_scsi_buf(phba, lpfc_cmd);
5027 return FAILED;
5028 }
dea3101e 5029
bbb9d180 5030 iocbq = &lpfc_cmd->cur_iocbq;
0bd4ca25 5031 iocbqrsp = lpfc_sli_get_iocbq(phba);
915caaaf
JS
5032 if (iocbqrsp == NULL) {
5033 lpfc_release_scsi_buf(phba, lpfc_cmd);
5034 return FAILED;
5035 }
5a0916b4 5036 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
bbb9d180 5037
e8b62011 5038 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
9cb78c16 5039 "0702 Issue %s to TGT %d LUN %llu "
6d368e53 5040 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
bbb9d180 5041 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
6d368e53
JS
5042 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5043 iocbq->iocb_flag);
bbb9d180 5044
3772a991 5045 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
915caaaf 5046 iocbq, iocbqrsp, lpfc_cmd->timeout);
53151bbb
JS
5047 if ((status != IOCB_SUCCESS) ||
5048 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
ae374a30
JS
5049 if (status != IOCB_SUCCESS ||
5050 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
5051 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5052 "0727 TMF %s to TGT %d LUN %llu "
5053 "failed (%d, %d) iocb_flag x%x\n",
5054 lpfc_taskmgmt_name(task_mgmt_cmd),
5055 tgt_id, lun_id,
5056 iocbqrsp->iocb.ulpStatus,
5057 iocbqrsp->iocb.un.ulpWord[4],
5058 iocbq->iocb_flag);
53151bbb
JS
5059 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5060 if (status == IOCB_SUCCESS) {
5061 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5062 /* Something in the FCP_RSP was invalid.
5063 * Check conditions */
5064 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5065 else
5066 ret = FAILED;
5067 } else if (status == IOCB_TIMEDOUT) {
5068 ret = TIMEOUT_ERROR;
5069 } else {
5070 ret = FAILED;
5071 }
53151bbb 5072 } else
bbb9d180
JS
5073 ret = SUCCESS;
5074
6175c02a 5075 lpfc_sli_release_iocbq(phba, iocbqrsp);
bbb9d180
JS
5076
5077 if (ret != TIMEOUT_ERROR)
5078 lpfc_release_scsi_buf(phba, lpfc_cmd);
5079
5080 return ret;
5081}
5082
5083/**
5084 * lpfc_chk_tgt_mapped -
5085 * @vport: The virtual port to check on
5086 * @cmnd: Pointer to scsi_cmnd data structure.
5087 *
5088 * This routine delays until the scsi target (aka rport) for the
5089 * command exists (is present and logged in) or we declare it non-existent.
5090 *
5091 * Return code :
5092 * 0x2003 - Error
5093 * 0x2002 - Success
5094 **/
5095static int
5096lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5097{
1ba981fd 5098 struct lpfc_rport_data *rdata;
1c6f4ef5 5099 struct lpfc_nodelist *pnode;
bbb9d180
JS
5100 unsigned long later;
5101
1ba981fd 5102 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
1c6f4ef5
JS
5103 if (!rdata) {
5104 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5105 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
5106 return FAILED;
5107 }
5108 pnode = rdata->pnode;
bbb9d180
JS
5109 /*
5110 * If target is not in a MAPPED state, delay until
5111 * target is rediscovered or devloss timeout expires.
5112 */
5113 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5114 while (time_after(later, jiffies)) {
5115 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5116 return FAILED;
5117 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5118 return SUCCESS;
5119 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1ba981fd 5120 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
bbb9d180
JS
5121 if (!rdata)
5122 return FAILED;
5123 pnode = rdata->pnode;
5124 }
5125 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5126 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5127 return FAILED;
5128 return SUCCESS;
5129}
5130
5131/**
5132 * lpfc_reset_flush_io_context -
5133 * @vport: The virtual port (scsi_host) for the flush context
5134 * @tgt_id: If aborting by Target contect - specifies the target id
5135 * @lun_id: If aborting by Lun context - specifies the lun id
5136 * @context: specifies the context level to flush at.
5137 *
5138 * After a reset condition via TMF, we need to flush orphaned i/o
5139 * contexts from the adapter. This routine aborts any contexts
5140 * outstanding, then waits for their completions. The wait is
5141 * bounded by devloss_tmo though.
5142 *
5143 * Return code :
5144 * 0x2003 - Error
5145 * 0x2002 - Success
5146 **/
5147static int
5148lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5149 uint64_t lun_id, lpfc_ctx_cmd context)
5150{
5151 struct lpfc_hba *phba = vport->phba;
5152 unsigned long later;
5153 int cnt;
5154
5155 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6175c02a 5156 if (cnt)
98912dda 5157 lpfc_sli_abort_taskmgmt(vport,
895427bd 5158 &phba->sli.sli3_ring[LPFC_FCP_RING],
98912dda 5159 tgt_id, lun_id, context);
915caaaf
JS
5160 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5161 while (time_after(later, jiffies) && cnt) {
5162 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
bbb9d180 5163 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
dea3101e 5164 }
dea3101e 5165 if (cnt) {
e8b62011 5166 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
bbb9d180
JS
5167 "0724 I/O flush failure for context %s : cnt x%x\n",
5168 ((context == LPFC_CTX_LUN) ? "LUN" :
5169 ((context == LPFC_CTX_TGT) ? "TGT" :
5170 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5171 cnt);
5172 return FAILED;
dea3101e 5173 }
bbb9d180
JS
5174 return SUCCESS;
5175}
5176
5177/**
5178 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5179 * @cmnd: Pointer to scsi_cmnd data structure.
5180 *
5181 * This routine does a device reset by sending a LUN_RESET task management
5182 * command.
5183 *
5184 * Return code :
5185 * 0x2003 - Error
5186 * 0x2002 - Success
5187 **/
5188static int
5189lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5190{
5191 struct Scsi_Host *shost = cmnd->device->host;
5192 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1ba981fd 5193 struct lpfc_rport_data *rdata;
1c6f4ef5 5194 struct lpfc_nodelist *pnode;
bbb9d180 5195 unsigned tgt_id = cmnd->device->id;
9cb78c16 5196 uint64_t lun_id = cmnd->device->lun;
bbb9d180 5197 struct lpfc_scsi_event_header scsi_event;
53151bbb 5198 int status;
bbb9d180 5199
1ba981fd 5200 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
ad490b6e 5201 if (!rdata || !rdata->pnode) {
1c6f4ef5 5202 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
ad490b6e
JS
5203 "0798 Device Reset rport failure: rdata x%p\n",
5204 rdata);
1c6f4ef5
JS
5205 return FAILED;
5206 }
5207 pnode = rdata->pnode;
589a52d6 5208 status = fc_block_scsi_eh(cmnd);
908e18e4 5209 if (status != 0 && status != SUCCESS)
589a52d6 5210 return status;
bbb9d180
JS
5211
5212 status = lpfc_chk_tgt_mapped(vport, cmnd);
5213 if (status == FAILED) {
5214 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5215 "0721 Device Reset rport failure: rdata x%p\n", rdata);
5216 return FAILED;
5217 }
5218
5219 scsi_event.event_type = FC_REG_SCSI_EVENT;
5220 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5221 scsi_event.lun = lun_id;
5222 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5223 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5224
5225 fc_host_post_vendor_event(shost, fc_get_event_number(),
5226 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5227
eed695d7 5228 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
bbb9d180
JS
5229 FCP_LUN_RESET);
5230
5231 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
9cb78c16 5232 "0713 SCSI layer issued Device Reset (%d, %llu) "
bbb9d180
JS
5233 "return x%x\n", tgt_id, lun_id, status);
5234
5235 /*
5236 * We have to clean up i/o as : they may be orphaned by the TMF;
5237 * or if the TMF failed, they may be in an indeterminate state.
5238 * So, continue on.
5239 * We will report success if all the i/o aborts successfully.
5240 */
53151bbb
JS
5241 if (status == SUCCESS)
5242 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
bbb9d180 5243 LPFC_CTX_LUN);
53151bbb
JS
5244
5245 return status;
bbb9d180
JS
5246}
5247
5248/**
5249 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5250 * @cmnd: Pointer to scsi_cmnd data structure.
5251 *
5252 * This routine does a target reset by sending a TARGET_RESET task management
5253 * command.
5254 *
5255 * Return code :
5256 * 0x2003 - Error
5257 * 0x2002 - Success
5258 **/
5259static int
5260lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5261{
5262 struct Scsi_Host *shost = cmnd->device->host;
5263 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1ba981fd 5264 struct lpfc_rport_data *rdata;
1c6f4ef5 5265 struct lpfc_nodelist *pnode;
bbb9d180 5266 unsigned tgt_id = cmnd->device->id;
9cb78c16 5267 uint64_t lun_id = cmnd->device->lun;
bbb9d180 5268 struct lpfc_scsi_event_header scsi_event;
53151bbb 5269 int status;
bbb9d180 5270
1ba981fd 5271 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
1c6f4ef5
JS
5272 if (!rdata) {
5273 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5274 "0799 Target Reset rport failure: rdata x%p\n", rdata);
5275 return FAILED;
5276 }
5277 pnode = rdata->pnode;
589a52d6 5278 status = fc_block_scsi_eh(cmnd);
908e18e4 5279 if (status != 0 && status != SUCCESS)
589a52d6 5280 return status;
bbb9d180
JS
5281
5282 status = lpfc_chk_tgt_mapped(vport, cmnd);
5283 if (status == FAILED) {
5284 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5285 "0722 Target Reset rport failure: rdata x%p\n", rdata);
63e480fd
JS
5286 if (pnode) {
5287 spin_lock_irq(shost->host_lock);
5288 pnode->nlp_flag &= ~NLP_NPR_ADISC;
5289 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5290 spin_unlock_irq(shost->host_lock);
5291 }
8c50d25c
JS
5292 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5293 LPFC_CTX_TGT);
5294 return FAST_IO_FAIL;
bbb9d180
JS
5295 }
5296
5297 scsi_event.event_type = FC_REG_SCSI_EVENT;
5298 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5299 scsi_event.lun = 0;
5300 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5301 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5302
5303 fc_host_post_vendor_event(shost, fc_get_event_number(),
5304 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5305
eed695d7 5306 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
bbb9d180
JS
5307 FCP_TARGET_RESET);
5308
5309 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
9cb78c16 5310 "0723 SCSI layer issued Target Reset (%d, %llu) "
bbb9d180
JS
5311 "return x%x\n", tgt_id, lun_id, status);
5312
5313 /*
5314 * We have to clean up i/o as : they may be orphaned by the TMF;
5315 * or if the TMF failed, they may be in an indeterminate state.
5316 * So, continue on.
5317 * We will report success if all the i/o aborts successfully.
5318 */
53151bbb
JS
5319 if (status == SUCCESS)
5320 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3a70730a 5321 LPFC_CTX_TGT);
53151bbb 5322 return status;
dea3101e 5323}
5324
9bad7671 5325/**
3621a710 5326 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
9bad7671
JS
5327 * @cmnd: Pointer to scsi_cmnd data structure.
5328 *
bbb9d180
JS
5329 * This routine does target reset to all targets on @cmnd->device->host.
5330 * This emulates Parallel SCSI Bus Reset Semantics.
9bad7671 5331 *
bbb9d180
JS
5332 * Return code :
5333 * 0x2003 - Error
5334 * 0x2002 - Success
9bad7671 5335 **/
94d0e7b8 5336static int
7054a606 5337lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
dea3101e 5338{
2e0fef85
JS
5339 struct Scsi_Host *shost = cmnd->device->host;
5340 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
dea3101e 5341 struct lpfc_nodelist *ndlp = NULL;
ea2151b4 5342 struct lpfc_scsi_event_header scsi_event;
bbb9d180
JS
5343 int match;
5344 int ret = SUCCESS, status, i;
ea2151b4
JS
5345
5346 scsi_event.event_type = FC_REG_SCSI_EVENT;
5347 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5348 scsi_event.lun = 0;
5349 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5350 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5351
bbb9d180
JS
5352 fc_host_post_vendor_event(shost, fc_get_event_number(),
5353 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
dea3101e 5354
bf08611b 5355 status = fc_block_scsi_eh(cmnd);
908e18e4 5356 if (status != 0 && status != SUCCESS)
bf08611b 5357 return status;
bbb9d180 5358
dea3101e 5359 /*
5360 * Since the driver manages a single bus device, reset all
5361 * targets known to the driver. Should any target reset
5362 * fail, this routine returns failure to the midlayer.
5363 */
e17da18e 5364 for (i = 0; i < LPFC_MAX_TARGET; i++) {
685f0bf7 5365 /* Search for mapped node by target ID */
dea3101e 5366 match = 0;
2e0fef85
JS
5367 spin_lock_irq(shost->host_lock);
5368 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
e47c9093
JS
5369 if (!NLP_CHK_NODE_ACT(ndlp))
5370 continue;
a6571c6e
JS
5371 if (vport->phba->cfg_fcp2_no_tgt_reset &&
5372 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5373 continue;
685f0bf7 5374 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
915caaaf 5375 ndlp->nlp_sid == i &&
a0f2d3ef
JS
5376 ndlp->rport &&
5377 ndlp->nlp_type & NLP_FCP_TARGET) {
dea3101e 5378 match = 1;
5379 break;
5380 }
5381 }
2e0fef85 5382 spin_unlock_irq(shost->host_lock);
dea3101e 5383 if (!match)
5384 continue;
bbb9d180 5385
eed695d7 5386 status = lpfc_send_taskmgmt(vport, cmnd,
bbb9d180
JS
5387 i, 0, FCP_TARGET_RESET);
5388
5389 if (status != SUCCESS) {
e8b62011
JS
5390 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5391 "0700 Bus Reset on target %d failed\n",
5392 i);
915caaaf 5393 ret = FAILED;
dea3101e 5394 }
5395 }
6175c02a 5396 /*
bbb9d180
JS
5397 * We have to clean up i/o as : they may be orphaned by the TMFs
5398 * above; or if any of the TMFs failed, they may be in an
5399 * indeterminate state.
5400 * We will report success if all the i/o aborts successfully.
6175c02a 5401 */
bbb9d180
JS
5402
5403 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5404 if (status != SUCCESS)
0bd4ca25 5405 ret = FAILED;
bbb9d180 5406
e8b62011
JS
5407 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5408 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
dea3101e 5409 return ret;
5410}
5411
27b01b82
JS
5412/**
5413 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5414 * @cmnd: Pointer to scsi_cmnd data structure.
5415 *
5416 * This routine does host reset to the adaptor port. It brings the HBA
5417 * offline, performs a board restart, and then brings the board back online.
5418 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5419 * reject all outstanding SCSI commands to the host and error returned
5420 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5421 * of error handling, it will only return error if resetting of the adapter
5422 * is not successful; in all other cases, will return success.
5423 *
5424 * Return code :
5425 * 0x2003 - Error
5426 * 0x2002 - Success
5427 **/
5428static int
5429lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5430{
5431 struct Scsi_Host *shost = cmnd->device->host;
5432 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5433 struct lpfc_hba *phba = vport->phba;
5434 int rc, ret = SUCCESS;
5435
a88dbb6a
JS
5436 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5437 "3172 SCSI layer issued Host Reset Data:\n");
5438
618a5230 5439 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
27b01b82
JS
5440 lpfc_offline(phba);
5441 rc = lpfc_sli_brdrestart(phba);
5442 if (rc)
5443 ret = FAILED;
a88dbb6a
JS
5444 rc = lpfc_online(phba);
5445 if (rc)
5446 ret = FAILED;
27b01b82
JS
5447 lpfc_unblock_mgmt_io(phba);
5448
a88dbb6a
JS
5449 if (ret == FAILED) {
5450 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5451 "3323 Failed host reset, bring it offline\n");
5452 lpfc_sli4_offline_eratt(phba);
5453 }
27b01b82
JS
5454 return ret;
5455}
5456
9bad7671 5457/**
3621a710 5458 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
9bad7671
JS
5459 * @sdev: Pointer to scsi_device.
5460 *
5461 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
5462 * globally available list of scsi buffers. This routine also makes sure scsi
5463 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5464 * of scsi buffer exists for the lifetime of the driver.
5465 *
5466 * Return codes:
5467 * non-0 - Error
5468 * 0 - Success
5469 **/
dea3101e 5470static int
5471lpfc_slave_alloc(struct scsi_device *sdev)
5472{
2e0fef85
JS
5473 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5474 struct lpfc_hba *phba = vport->phba;
19a7b4ae 5475 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3772a991 5476 uint32_t total = 0;
dea3101e 5477 uint32_t num_to_alloc = 0;
3772a991 5478 int num_allocated = 0;
d7c47992 5479 uint32_t sdev_cnt;
1ba981fd
JS
5480 struct lpfc_device_data *device_data;
5481 unsigned long flags;
5482 struct lpfc_name target_wwpn;
dea3101e 5483
19a7b4ae 5484 if (!rport || fc_remote_port_chkready(rport))
dea3101e 5485 return -ENXIO;
5486
f38fa0bb 5487 if (phba->cfg_fof) {
1ba981fd
JS
5488
5489 /*
5490 * Check to see if the device data structure for the lun
5491 * exists. If not, create one.
5492 */
5493
5494 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5495 spin_lock_irqsave(&phba->devicelock, flags);
5496 device_data = __lpfc_get_device_data(phba,
5497 &phba->luns,
5498 &vport->fc_portname,
5499 &target_wwpn,
5500 sdev->lun);
5501 if (!device_data) {
5502 spin_unlock_irqrestore(&phba->devicelock, flags);
5503 device_data = lpfc_create_device_data(phba,
5504 &vport->fc_portname,
5505 &target_wwpn,
b5749fe1
JS
5506 sdev->lun,
5507 phba->cfg_XLanePriority,
5508 true);
1ba981fd
JS
5509 if (!device_data)
5510 return -ENOMEM;
5511 spin_lock_irqsave(&phba->devicelock, flags);
5512 list_add_tail(&device_data->listentry, &phba->luns);
5513 }
5514 device_data->rport_data = rport->dd_data;
5515 device_data->available = true;
5516 spin_unlock_irqrestore(&phba->devicelock, flags);
5517 sdev->hostdata = device_data;
5518 } else {
5519 sdev->hostdata = rport->dd_data;
5520 }
d7c47992 5521 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
dea3101e 5522
5523 /*
5524 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5525 * available list of scsi buffers. Don't allocate more than the
a784efbf
JSEC
5526 * HBA limit conveyed to the midlayer via the host structure. The
5527 * formula accounts for the lun_queue_depth + error handlers + 1
5528 * extra. This list of scsi bufs exists for the lifetime of the driver.
dea3101e 5529 */
5530 total = phba->total_scsi_bufs;
3de2a653 5531 num_to_alloc = vport->cfg_lun_queue_depth + 2;
92d7f7b0 5532
d7c47992
JS
5533 /* If allocated buffers are enough do nothing */
5534 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5535 return 0;
5536
92d7f7b0
JS
5537 /* Allow some exchanges to be available always to complete discovery */
5538 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
5539 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5540 "0704 At limitation of %d preallocated "
5541 "command buffers\n", total);
dea3101e 5542 return 0;
92d7f7b0
JS
5543 /* Allow some exchanges to be available always to complete discovery */
5544 } else if (total + num_to_alloc >
5545 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
e8b62011
JS
5546 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5547 "0705 Allocation request of %d "
5548 "command buffers will exceed max of %d. "
5549 "Reducing allocation request to %d.\n",
5550 num_to_alloc, phba->cfg_hba_queue_depth,
5551 (phba->cfg_hba_queue_depth - total));
dea3101e 5552 num_to_alloc = phba->cfg_hba_queue_depth - total;
5553 }
3772a991
JS
5554 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5555 if (num_to_alloc != num_allocated) {
96f7077f
JS
5556 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5557 "0708 Allocation request of %d "
5558 "command buffers did not succeed. "
5559 "Allocated %d buffers.\n",
5560 num_to_alloc, num_allocated);
dea3101e 5561 }
1c6f4ef5
JS
5562 if (num_allocated > 0)
5563 phba->total_scsi_bufs += num_allocated;
dea3101e 5564 return 0;
5565}
5566
9bad7671 5567/**
3621a710 5568 * lpfc_slave_configure - scsi_host_template slave_configure entry point
9bad7671
JS
5569 * @sdev: Pointer to scsi_device.
5570 *
5571 * This routine configures following items
5572 * - Tag command queuing support for @sdev if supported.
9bad7671
JS
5573 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5574 *
5575 * Return codes:
5576 * 0 - Success
5577 **/
dea3101e 5578static int
5579lpfc_slave_configure(struct scsi_device *sdev)
5580{
2e0fef85
JS
5581 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5582 struct lpfc_hba *phba = vport->phba;
dea3101e 5583
db5ed4df 5584 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
dea3101e 5585
875fbdfe 5586 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
45ed1190 5587 lpfc_sli_handle_fast_ring_event(phba,
895427bd 5588 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
875fbdfe
JSEC
5589 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5590 lpfc_poll_rearm_timer(phba);
5591 }
5592
dea3101e 5593 return 0;
5594}
5595
9bad7671 5596/**
3621a710 5597 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
9bad7671
JS
5598 * @sdev: Pointer to scsi_device.
5599 *
5600 * This routine sets @sdev hostatdata filed to null.
5601 **/
dea3101e 5602static void
5603lpfc_slave_destroy(struct scsi_device *sdev)
5604{
d7c47992
JS
5605 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5606 struct lpfc_hba *phba = vport->phba;
1ba981fd
JS
5607 unsigned long flags;
5608 struct lpfc_device_data *device_data = sdev->hostdata;
5609
d7c47992 5610 atomic_dec(&phba->sdev_cnt);
f38fa0bb 5611 if ((phba->cfg_fof) && (device_data)) {
1ba981fd
JS
5612 spin_lock_irqsave(&phba->devicelock, flags);
5613 device_data->available = false;
5614 if (!device_data->oas_enabled)
5615 lpfc_delete_device_data(phba, device_data);
5616 spin_unlock_irqrestore(&phba->devicelock, flags);
5617 }
dea3101e 5618 sdev->hostdata = NULL;
5619 return;
5620}
5621
1ba981fd
JS
5622/**
5623 * lpfc_create_device_data - creates and initializes device data structure for OAS
5624 * @pha: Pointer to host bus adapter structure.
5625 * @vport_wwpn: Pointer to vport's wwpn information
5626 * @target_wwpn: Pointer to target's wwpn information
5627 * @lun: Lun on target
5628 * @atomic_create: Flag to indicate if memory should be allocated using the
5629 * GFP_ATOMIC flag or not.
5630 *
5631 * This routine creates a device data structure which will contain identifying
5632 * information for the device (host wwpn, target wwpn, lun), state of OAS,
5633 * whether or not the corresponding lun is available by the system,
5634 * and pointer to the rport data.
5635 *
5636 * Return codes:
5637 * NULL - Error
5638 * Pointer to lpfc_device_data - Success
5639 **/
5640struct lpfc_device_data*
5641lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5642 struct lpfc_name *target_wwpn, uint64_t lun,
b5749fe1 5643 uint32_t pri, bool atomic_create)
1ba981fd
JS
5644{
5645
5646 struct lpfc_device_data *lun_info;
5647 int memory_flags;
5648
5649 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
f38fa0bb 5650 !(phba->cfg_fof))
1ba981fd
JS
5651 return NULL;
5652
5653 /* Attempt to create the device data to contain lun info */
5654
5655 if (atomic_create)
5656 memory_flags = GFP_ATOMIC;
5657 else
5658 memory_flags = GFP_KERNEL;
5659 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5660 if (!lun_info)
5661 return NULL;
5662 INIT_LIST_HEAD(&lun_info->listentry);
5663 lun_info->rport_data = NULL;
5664 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5665 sizeof(struct lpfc_name));
5666 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5667 sizeof(struct lpfc_name));
5668 lun_info->device_id.lun = lun;
5669 lun_info->oas_enabled = false;
b5749fe1 5670 lun_info->priority = pri;
1ba981fd
JS
5671 lun_info->available = false;
5672 return lun_info;
5673}
5674
5675/**
5676 * lpfc_delete_device_data - frees a device data structure for OAS
5677 * @pha: Pointer to host bus adapter structure.
5678 * @lun_info: Pointer to device data structure to free.
5679 *
5680 * This routine frees the previously allocated device data structure passed.
5681 *
5682 **/
5683void
5684lpfc_delete_device_data(struct lpfc_hba *phba,
5685 struct lpfc_device_data *lun_info)
5686{
5687
5688 if (unlikely(!phba) || !lun_info ||
f38fa0bb 5689 !(phba->cfg_fof))
1ba981fd
JS
5690 return;
5691
5692 if (!list_empty(&lun_info->listentry))
5693 list_del(&lun_info->listentry);
5694 mempool_free(lun_info, phba->device_data_mem_pool);
5695 return;
5696}
5697
5698/**
5699 * __lpfc_get_device_data - returns the device data for the specified lun
5700 * @pha: Pointer to host bus adapter structure.
5701 * @list: Point to list to search.
5702 * @vport_wwpn: Pointer to vport's wwpn information
5703 * @target_wwpn: Pointer to target's wwpn information
5704 * @lun: Lun on target
5705 *
5706 * This routine searches the list passed for the specified lun's device data.
5707 * This function does not hold locks, it is the responsibility of the caller
5708 * to ensure the proper lock is held before calling the function.
5709 *
5710 * Return codes:
5711 * NULL - Error
5712 * Pointer to lpfc_device_data - Success
5713 **/
5714struct lpfc_device_data*
5715__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5716 struct lpfc_name *vport_wwpn,
5717 struct lpfc_name *target_wwpn, uint64_t lun)
5718{
5719
5720 struct lpfc_device_data *lun_info;
5721
5722 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
f38fa0bb 5723 !phba->cfg_fof)
1ba981fd
JS
5724 return NULL;
5725
5726 /* Check to see if the lun is already enabled for OAS. */
5727
5728 list_for_each_entry(lun_info, list, listentry) {
5729 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5730 sizeof(struct lpfc_name)) == 0) &&
5731 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5732 sizeof(struct lpfc_name)) == 0) &&
5733 (lun_info->device_id.lun == lun))
5734 return lun_info;
5735 }
5736
5737 return NULL;
5738}
5739
5740/**
5741 * lpfc_find_next_oas_lun - searches for the next oas lun
5742 * @pha: Pointer to host bus adapter structure.
5743 * @vport_wwpn: Pointer to vport's wwpn information
5744 * @target_wwpn: Pointer to target's wwpn information
5745 * @starting_lun: Pointer to the lun to start searching for
5746 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5747 * @found_target_wwpn: Pointer to the found lun's target wwpn information
5748 * @found_lun: Pointer to the found lun.
5749 * @found_lun_status: Pointer to status of the found lun.
5750 *
5751 * This routine searches the luns list for the specified lun
5752 * or the first lun for the vport/target. If the vport wwpn contains
5753 * a zero value then a specific vport is not specified. In this case
5754 * any vport which contains the lun will be considered a match. If the
5755 * target wwpn contains a zero value then a specific target is not specified.
5756 * In this case any target which contains the lun will be considered a
5757 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
5758 * are returned. The function will also return the next lun if available.
5759 * If the next lun is not found, starting_lun parameter will be set to
5760 * NO_MORE_OAS_LUN.
5761 *
5762 * Return codes:
5763 * non-0 - Error
5764 * 0 - Success
5765 **/
5766bool
5767lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5768 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5769 struct lpfc_name *found_vport_wwpn,
5770 struct lpfc_name *found_target_wwpn,
5771 uint64_t *found_lun,
b5749fe1
JS
5772 uint32_t *found_lun_status,
5773 uint32_t *found_lun_pri)
1ba981fd
JS
5774{
5775
5776 unsigned long flags;
5777 struct lpfc_device_data *lun_info;
5778 struct lpfc_device_id *device_id;
5779 uint64_t lun;
5780 bool found = false;
5781
5782 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5783 !starting_lun || !found_vport_wwpn ||
5784 !found_target_wwpn || !found_lun || !found_lun_status ||
5785 (*starting_lun == NO_MORE_OAS_LUN) ||
f38fa0bb 5786 !phba->cfg_fof)
1ba981fd
JS
5787 return false;
5788
5789 lun = *starting_lun;
5790 *found_lun = NO_MORE_OAS_LUN;
5791 *starting_lun = NO_MORE_OAS_LUN;
5792
5793 /* Search for lun or the lun closet in value */
5794
5795 spin_lock_irqsave(&phba->devicelock, flags);
5796 list_for_each_entry(lun_info, &phba->luns, listentry) {
5797 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5798 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5799 sizeof(struct lpfc_name)) == 0)) &&
5800 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5801 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5802 sizeof(struct lpfc_name)) == 0)) &&
5803 (lun_info->oas_enabled)) {
5804 device_id = &lun_info->device_id;
5805 if ((!found) &&
5806 ((lun == FIND_FIRST_OAS_LUN) ||
5807 (device_id->lun == lun))) {
5808 *found_lun = device_id->lun;
5809 memcpy(found_vport_wwpn,
5810 &device_id->vport_wwpn,
5811 sizeof(struct lpfc_name));
5812 memcpy(found_target_wwpn,
5813 &device_id->target_wwpn,
5814 sizeof(struct lpfc_name));
5815 if (lun_info->available)
5816 *found_lun_status =
5817 OAS_LUN_STATUS_EXISTS;
5818 else
5819 *found_lun_status = 0;
b5749fe1 5820 *found_lun_pri = lun_info->priority;
1ba981fd
JS
5821 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5822 memset(vport_wwpn, 0x0,
5823 sizeof(struct lpfc_name));
5824 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5825 memset(target_wwpn, 0x0,
5826 sizeof(struct lpfc_name));
5827 found = true;
5828 } else if (found) {
5829 *starting_lun = device_id->lun;
5830 memcpy(vport_wwpn, &device_id->vport_wwpn,
5831 sizeof(struct lpfc_name));
5832 memcpy(target_wwpn, &device_id->target_wwpn,
5833 sizeof(struct lpfc_name));
5834 break;
5835 }
5836 }
5837 }
5838 spin_unlock_irqrestore(&phba->devicelock, flags);
5839 return found;
5840}
5841
5842/**
5843 * lpfc_enable_oas_lun - enables a lun for OAS operations
5844 * @pha: Pointer to host bus adapter structure.
5845 * @vport_wwpn: Pointer to vport's wwpn information
5846 * @target_wwpn: Pointer to target's wwpn information
5847 * @lun: Lun
5848 *
5849 * This routine enables a lun for oas operations. The routines does so by
5850 * doing the following :
5851 *
5852 * 1) Checks to see if the device data for the lun has been created.
5853 * 2) If found, sets the OAS enabled flag if not set and returns.
5854 * 3) Otherwise, creates a device data structure.
5855 * 4) If successfully created, indicates the device data is for an OAS lun,
5856 * indicates the lun is not available and add to the list of luns.
5857 *
5858 * Return codes:
5859 * false - Error
5860 * true - Success
5861 **/
5862bool
5863lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
c92c841c 5864 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
1ba981fd
JS
5865{
5866
5867 struct lpfc_device_data *lun_info;
5868 unsigned long flags;
5869
5870 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
f38fa0bb 5871 !phba->cfg_fof)
1ba981fd
JS
5872 return false;
5873
5874 spin_lock_irqsave(&phba->devicelock, flags);
5875
5876 /* Check to see if the device data for the lun has been created */
5877 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5878 target_wwpn, lun);
5879 if (lun_info) {
5880 if (!lun_info->oas_enabled)
5881 lun_info->oas_enabled = true;
b5749fe1 5882 lun_info->priority = pri;
1ba981fd
JS
5883 spin_unlock_irqrestore(&phba->devicelock, flags);
5884 return true;
5885 }
5886
5887 /* Create an lun info structure and add to list of luns */
5888 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
b5749fe1 5889 pri, false);
1ba981fd
JS
5890 if (lun_info) {
5891 lun_info->oas_enabled = true;
c92c841c 5892 lun_info->priority = pri;
1ba981fd
JS
5893 lun_info->available = false;
5894 list_add_tail(&lun_info->listentry, &phba->luns);
5895 spin_unlock_irqrestore(&phba->devicelock, flags);
5896 return true;
5897 }
5898 spin_unlock_irqrestore(&phba->devicelock, flags);
5899 return false;
5900}
5901
5902/**
5903 * lpfc_disable_oas_lun - disables a lun for OAS operations
5904 * @pha: Pointer to host bus adapter structure.
5905 * @vport_wwpn: Pointer to vport's wwpn information
5906 * @target_wwpn: Pointer to target's wwpn information
5907 * @lun: Lun
5908 *
5909 * This routine disables a lun for oas operations. The routines does so by
5910 * doing the following :
5911 *
5912 * 1) Checks to see if the device data for the lun is created.
5913 * 2) If present, clears the flag indicating this lun is for OAS.
5914 * 3) If the lun is not available by the system, the device data is
5915 * freed.
5916 *
5917 * Return codes:
5918 * false - Error
5919 * true - Success
5920 **/
5921bool
5922lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
b5749fe1 5923 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
1ba981fd
JS
5924{
5925
5926 struct lpfc_device_data *lun_info;
5927 unsigned long flags;
5928
5929 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
f38fa0bb 5930 !phba->cfg_fof)
1ba981fd
JS
5931 return false;
5932
5933 spin_lock_irqsave(&phba->devicelock, flags);
5934
5935 /* Check to see if the lun is available. */
5936 lun_info = __lpfc_get_device_data(phba,
5937 &phba->luns, vport_wwpn,
5938 target_wwpn, lun);
5939 if (lun_info) {
5940 lun_info->oas_enabled = false;
b5749fe1 5941 lun_info->priority = pri;
1ba981fd
JS
5942 if (!lun_info->available)
5943 lpfc_delete_device_data(phba, lun_info);
5944 spin_unlock_irqrestore(&phba->devicelock, flags);
5945 return true;
5946 }
5947
5948 spin_unlock_irqrestore(&phba->devicelock, flags);
5949 return false;
5950}
92d7f7b0 5951
895427bd
JS
5952static int
5953lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5954{
5955 return SCSI_MLQUEUE_HOST_BUSY;
5956}
5957
5958static int
5959lpfc_no_handler(struct scsi_cmnd *cmnd)
5960{
5961 return FAILED;
5962}
5963
5964static int
5965lpfc_no_slave(struct scsi_device *sdev)
5966{
5967 return -ENODEV;
5968}
5969
5970struct scsi_host_template lpfc_template_nvme = {
5971 .module = THIS_MODULE,
5972 .name = LPFC_DRIVER_NAME,
5973 .proc_name = LPFC_DRIVER_NAME,
5974 .info = lpfc_info,
5975 .queuecommand = lpfc_no_command,
5976 .eh_abort_handler = lpfc_no_handler,
5977 .eh_device_reset_handler = lpfc_no_handler,
5978 .eh_target_reset_handler = lpfc_no_handler,
5979 .eh_bus_reset_handler = lpfc_no_handler,
5980 .eh_host_reset_handler = lpfc_no_handler,
5981 .slave_alloc = lpfc_no_slave,
5982 .slave_configure = lpfc_no_slave,
5983 .scan_finished = lpfc_scan_finished,
5984 .this_id = -1,
5985 .sg_tablesize = 1,
5986 .cmd_per_lun = 1,
5987 .use_clustering = ENABLE_CLUSTERING,
5988 .shost_attrs = lpfc_hba_attrs,
5989 .max_sectors = 0xFFFF,
5990 .vendor_id = LPFC_NL_VENDOR_ID,
5991 .track_queue_depth = 0,
5992};
5993
96418b5e 5994struct scsi_host_template lpfc_template_no_hr = {
ea4142f6
JS
5995 .module = THIS_MODULE,
5996 .name = LPFC_DRIVER_NAME,
08dcd4cf 5997 .proc_name = LPFC_DRIVER_NAME,
ea4142f6
JS
5998 .info = lpfc_info,
5999 .queuecommand = lpfc_queuecommand,
856984b7 6000 .eh_timed_out = fc_eh_timed_out,
ea4142f6
JS
6001 .eh_abort_handler = lpfc_abort_handler,
6002 .eh_device_reset_handler = lpfc_device_reset_handler,
6003 .eh_target_reset_handler = lpfc_target_reset_handler,
6004 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6005 .slave_alloc = lpfc_slave_alloc,
6006 .slave_configure = lpfc_slave_configure,
6007 .slave_destroy = lpfc_slave_destroy,
6008 .scan_finished = lpfc_scan_finished,
6009 .this_id = -1,
6010 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6011 .cmd_per_lun = LPFC_CMD_PER_LUN,
6012 .use_clustering = ENABLE_CLUSTERING,
6013 .shost_attrs = lpfc_hba_attrs,
6014 .max_sectors = 0xFFFF,
6015 .vendor_id = LPFC_NL_VENDOR_ID,
6016 .change_queue_depth = scsi_change_queue_depth,
ea4142f6
JS
6017 .track_queue_depth = 1,
6018};
6019
dea3101e 6020struct scsi_host_template lpfc_template = {
6021 .module = THIS_MODULE,
6022 .name = LPFC_DRIVER_NAME,
08dcd4cf 6023 .proc_name = LPFC_DRIVER_NAME,
dea3101e 6024 .info = lpfc_info,
6025 .queuecommand = lpfc_queuecommand,
b6a05c82 6026 .eh_timed_out = fc_eh_timed_out,
dea3101e 6027 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
6028 .eh_device_reset_handler = lpfc_device_reset_handler,
6029 .eh_target_reset_handler = lpfc_target_reset_handler,
7054a606 6030 .eh_bus_reset_handler = lpfc_bus_reset_handler,
27b01b82 6031 .eh_host_reset_handler = lpfc_host_reset_handler,
dea3101e 6032 .slave_alloc = lpfc_slave_alloc,
6033 .slave_configure = lpfc_slave_configure,
6034 .slave_destroy = lpfc_slave_destroy,
47a8617c 6035 .scan_finished = lpfc_scan_finished,
dea3101e 6036 .this_id = -1,
83108bd3 6037 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
dea3101e 6038 .cmd_per_lun = LPFC_CMD_PER_LUN,
6039 .use_clustering = ENABLE_CLUSTERING,
2e0fef85 6040 .shost_attrs = lpfc_hba_attrs,
564b2960 6041 .max_sectors = 0xFFFF,
f1c3b0fc 6042 .vendor_id = LPFC_NL_VENDOR_ID,
db5ed4df 6043 .change_queue_depth = scsi_change_queue_depth,
c40ecc12 6044 .track_queue_depth = 1,
dea3101e 6045};
3de2a653
JS
6046
6047struct scsi_host_template lpfc_vport_template = {
6048 .module = THIS_MODULE,
6049 .name = LPFC_DRIVER_NAME,
08dcd4cf 6050 .proc_name = LPFC_DRIVER_NAME,
3de2a653
JS
6051 .info = lpfc_info,
6052 .queuecommand = lpfc_queuecommand,
b6a05c82 6053 .eh_timed_out = fc_eh_timed_out,
3de2a653 6054 .eh_abort_handler = lpfc_abort_handler,
bbb9d180
JS
6055 .eh_device_reset_handler = lpfc_device_reset_handler,
6056 .eh_target_reset_handler = lpfc_target_reset_handler,
3de2a653
JS
6057 .slave_alloc = lpfc_slave_alloc,
6058 .slave_configure = lpfc_slave_configure,
6059 .slave_destroy = lpfc_slave_destroy,
6060 .scan_finished = lpfc_scan_finished,
6061 .this_id = -1,
83108bd3 6062 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3de2a653
JS
6063 .cmd_per_lun = LPFC_CMD_PER_LUN,
6064 .use_clustering = ENABLE_CLUSTERING,
6065 .shost_attrs = lpfc_vport_attrs,
6066 .max_sectors = 0xFFFF,
db5ed4df 6067 .change_queue_depth = scsi_change_queue_depth,
c40ecc12 6068 .track_queue_depth = 1,
3de2a653 6069};