Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_bsg.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2012 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12
13 /* BSG support for ELS/CT pass through */
14 void
15 qla2x00_bsg_job_done(void *data, void *ptr, int res)
16 {
17         srb_t *sp = (srb_t *)ptr;
18         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
19         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
20
21         bsg_job->reply->result = res;
22         bsg_job->job_done(bsg_job);
23         sp->free(vha, sp);
24 }
25
26 void
27 qla2x00_bsg_sp_free(void *data, void *ptr)
28 {
29         srb_t *sp = (srb_t *)ptr;
30         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
31         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
32         struct qla_hw_data *ha = vha->hw;
33
34         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
35             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
36
37         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
38             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
39
40         if (sp->type == SRB_CT_CMD ||
41             sp->type == SRB_ELS_CMD_HST)
42                 kfree(sp->fcport);
43         mempool_free(sp, vha->hw->srb_mempool);
44 }
45
46 int
47 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
48         struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
49 {
50         int i, ret, num_valid;
51         uint8_t *bcode;
52         struct qla_fcp_prio_entry *pri_entry;
53         uint32_t *bcode_val_ptr, bcode_val;
54
55         ret = 1;
56         num_valid = 0;
57         bcode = (uint8_t *)pri_cfg;
58         bcode_val_ptr = (uint32_t *)pri_cfg;
59         bcode_val = (uint32_t)(*bcode_val_ptr);
60
61         if (bcode_val == 0xFFFFFFFF) {
62                 /* No FCP Priority config data in flash */
63                 ql_dbg(ql_dbg_user, vha, 0x7051,
64                     "No FCP Priority config data.\n");
65                 return 0;
66         }
67
68         if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
69                         bcode[3] != 'S') {
70                 /* Invalid FCP priority data header*/
71                 ql_dbg(ql_dbg_user, vha, 0x7052,
72                     "Invalid FCP Priority data header. bcode=0x%x.\n",
73                     bcode_val);
74                 return 0;
75         }
76         if (flag != 1)
77                 return ret;
78
79         pri_entry = &pri_cfg->entry[0];
80         for (i = 0; i < pri_cfg->num_entries; i++) {
81                 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
82                         num_valid++;
83                 pri_entry++;
84         }
85
86         if (num_valid == 0) {
87                 /* No valid FCP priority data entries */
88                 ql_dbg(ql_dbg_user, vha, 0x7053,
89                     "No valid FCP Priority data entries.\n");
90                 ret = 0;
91         } else {
92                 /* FCP priority data is valid */
93                 ql_dbg(ql_dbg_user, vha, 0x7054,
94                     "Valid FCP priority data. num entries = %d.\n",
95                     num_valid);
96         }
97
98         return ret;
99 }
100
101 static int
102 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
103 {
104         struct Scsi_Host *host = bsg_job->shost;
105         scsi_qla_host_t *vha = shost_priv(host);
106         struct qla_hw_data *ha = vha->hw;
107         int ret = 0;
108         uint32_t len;
109         uint32_t oper;
110
111         if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
112                 ret = -EINVAL;
113                 goto exit_fcp_prio_cfg;
114         }
115
116         /* Get the sub command */
117         oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
118
119         /* Only set config is allowed if config memory is not allocated */
120         if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
121                 ret = -EINVAL;
122                 goto exit_fcp_prio_cfg;
123         }
124         switch (oper) {
125         case QLFC_FCP_PRIO_DISABLE:
126                 if (ha->flags.fcp_prio_enabled) {
127                         ha->flags.fcp_prio_enabled = 0;
128                         ha->fcp_prio_cfg->attributes &=
129                                 ~FCP_PRIO_ATTR_ENABLE;
130                         qla24xx_update_all_fcp_prio(vha);
131                         bsg_job->reply->result = DID_OK;
132                 } else {
133                         ret = -EINVAL;
134                         bsg_job->reply->result = (DID_ERROR << 16);
135                         goto exit_fcp_prio_cfg;
136                 }
137                 break;
138
139         case QLFC_FCP_PRIO_ENABLE:
140                 if (!ha->flags.fcp_prio_enabled) {
141                         if (ha->fcp_prio_cfg) {
142                                 ha->flags.fcp_prio_enabled = 1;
143                                 ha->fcp_prio_cfg->attributes |=
144                                     FCP_PRIO_ATTR_ENABLE;
145                                 qla24xx_update_all_fcp_prio(vha);
146                                 bsg_job->reply->result = DID_OK;
147                         } else {
148                                 ret = -EINVAL;
149                                 bsg_job->reply->result = (DID_ERROR << 16);
150                                 goto exit_fcp_prio_cfg;
151                         }
152                 }
153                 break;
154
155         case QLFC_FCP_PRIO_GET_CONFIG:
156                 len = bsg_job->reply_payload.payload_len;
157                 if (!len || len > FCP_PRIO_CFG_SIZE) {
158                         ret = -EINVAL;
159                         bsg_job->reply->result = (DID_ERROR << 16);
160                         goto exit_fcp_prio_cfg;
161                 }
162
163                 bsg_job->reply->result = DID_OK;
164                 bsg_job->reply->reply_payload_rcv_len =
165                         sg_copy_from_buffer(
166                         bsg_job->reply_payload.sg_list,
167                         bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
168                         len);
169
170                 break;
171
172         case QLFC_FCP_PRIO_SET_CONFIG:
173                 len = bsg_job->request_payload.payload_len;
174                 if (!len || len > FCP_PRIO_CFG_SIZE) {
175                         bsg_job->reply->result = (DID_ERROR << 16);
176                         ret = -EINVAL;
177                         goto exit_fcp_prio_cfg;
178                 }
179
180                 if (!ha->fcp_prio_cfg) {
181                         ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
182                         if (!ha->fcp_prio_cfg) {
183                                 ql_log(ql_log_warn, vha, 0x7050,
184                                     "Unable to allocate memory for fcp prio "
185                                     "config data (%x).\n", FCP_PRIO_CFG_SIZE);
186                                 bsg_job->reply->result = (DID_ERROR << 16);
187                                 ret = -ENOMEM;
188                                 goto exit_fcp_prio_cfg;
189                         }
190                 }
191
192                 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
193                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
194                 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
195                         FCP_PRIO_CFG_SIZE);
196
197                 /* validate fcp priority data */
198
199                 if (!qla24xx_fcp_prio_cfg_valid(vha,
200                     (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
201                         bsg_job->reply->result = (DID_ERROR << 16);
202                         ret = -EINVAL;
203                         /* If buffer was invalidatic int
204                          * fcp_prio_cfg is of no use
205                          */
206                         vfree(ha->fcp_prio_cfg);
207                         ha->fcp_prio_cfg = NULL;
208                         goto exit_fcp_prio_cfg;
209                 }
210
211                 ha->flags.fcp_prio_enabled = 0;
212                 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
213                         ha->flags.fcp_prio_enabled = 1;
214                 qla24xx_update_all_fcp_prio(vha);
215                 bsg_job->reply->result = DID_OK;
216                 break;
217         default:
218                 ret = -EINVAL;
219                 break;
220         }
221 exit_fcp_prio_cfg:
222         bsg_job->job_done(bsg_job);
223         return ret;
224 }
225
226 static int
227 qla2x00_process_els(struct fc_bsg_job *bsg_job)
228 {
229         struct fc_rport *rport;
230         fc_port_t *fcport = NULL;
231         struct Scsi_Host *host;
232         scsi_qla_host_t *vha;
233         struct qla_hw_data *ha;
234         srb_t *sp;
235         const char *type;
236         int req_sg_cnt, rsp_sg_cnt;
237         int rval =  (DRIVER_ERROR << 16);
238         uint16_t nextlid = 0;
239
240         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
241                 rport = bsg_job->rport;
242                 fcport = *(fc_port_t **) rport->dd_data;
243                 host = rport_to_shost(rport);
244                 vha = shost_priv(host);
245                 ha = vha->hw;
246                 type = "FC_BSG_RPT_ELS";
247         } else {
248                 host = bsg_job->shost;
249                 vha = shost_priv(host);
250                 ha = vha->hw;
251                 type = "FC_BSG_HST_ELS_NOLOGIN";
252         }
253
254         /* pass through is supported only for ISP 4Gb or higher */
255         if (!IS_FWI2_CAPABLE(ha)) {
256                 ql_dbg(ql_dbg_user, vha, 0x7001,
257                     "ELS passthru not supported for ISP23xx based adapters.\n");
258                 rval = -EPERM;
259                 goto done;
260         }
261
262         /*  Multiple SG's are not supported for ELS requests */
263         if (bsg_job->request_payload.sg_cnt > 1 ||
264                 bsg_job->reply_payload.sg_cnt > 1) {
265                 ql_dbg(ql_dbg_user, vha, 0x7002,
266                     "Multiple SG's are not suppored for ELS requests, "
267                     "request_sg_cnt=%x reply_sg_cnt=%x.\n",
268                     bsg_job->request_payload.sg_cnt,
269                     bsg_job->reply_payload.sg_cnt);
270                 rval = -EPERM;
271                 goto done;
272         }
273
274         /* ELS request for rport */
275         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
276                 /* make sure the rport is logged in,
277                  * if not perform fabric login
278                  */
279                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
280                         ql_dbg(ql_dbg_user, vha, 0x7003,
281                             "Failed to login port %06X for ELS passthru.\n",
282                             fcport->d_id.b24);
283                         rval = -EIO;
284                         goto done;
285                 }
286         } else {
287                 /* Allocate a dummy fcport structure, since functions
288                  * preparing the IOCB and mailbox command retrieves port
289                  * specific information from fcport structure. For Host based
290                  * ELS commands there will be no fcport structure allocated
291                  */
292                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
293                 if (!fcport) {
294                         rval = -ENOMEM;
295                         goto done;
296                 }
297
298                 /* Initialize all required  fields of fcport */
299                 fcport->vha = vha;
300                 fcport->d_id.b.al_pa =
301                         bsg_job->request->rqst_data.h_els.port_id[0];
302                 fcport->d_id.b.area =
303                         bsg_job->request->rqst_data.h_els.port_id[1];
304                 fcport->d_id.b.domain =
305                         bsg_job->request->rqst_data.h_els.port_id[2];
306                 fcport->loop_id =
307                         (fcport->d_id.b.al_pa == 0xFD) ?
308                         NPH_FABRIC_CONTROLLER : NPH_F_PORT;
309         }
310
311         if (!vha->flags.online) {
312                 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
313                 rval = -EIO;
314                 goto done;
315         }
316
317         req_sg_cnt =
318                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
319                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
320         if (!req_sg_cnt) {
321                 rval = -ENOMEM;
322                 goto done_free_fcport;
323         }
324
325         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
326                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
327         if (!rsp_sg_cnt) {
328                 rval = -ENOMEM;
329                 goto done_free_fcport;
330         }
331
332         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
333                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
334                 ql_log(ql_log_warn, vha, 0x7008,
335                     "dma mapping resulted in different sg counts, "
336                     "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
337                     "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
338                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
339                 rval = -EAGAIN;
340                 goto done_unmap_sg;
341         }
342
343         /* Alloc SRB structure */
344         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
345         if (!sp) {
346                 rval = -ENOMEM;
347                 goto done_unmap_sg;
348         }
349
350         sp->type =
351                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
352                 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
353         sp->name =
354                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
355                 "bsg_els_rpt" : "bsg_els_hst");
356         sp->u.bsg_job = bsg_job;
357         sp->free = qla2x00_bsg_sp_free;
358         sp->done = qla2x00_bsg_job_done;
359
360         ql_dbg(ql_dbg_user, vha, 0x700a,
361             "bsg rqst type: %s els type: %x - loop-id=%x "
362             "portid=%-2x%02x%02x.\n", type,
363             bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
364             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
365
366         rval = qla2x00_start_sp(sp);
367         if (rval != QLA_SUCCESS) {
368                 ql_log(ql_log_warn, vha, 0x700e,
369                     "qla2x00_start_sp failed = %d\n", rval);
370                 mempool_free(sp, ha->srb_mempool);
371                 rval = -EIO;
372                 goto done_unmap_sg;
373         }
374         return rval;
375
376 done_unmap_sg:
377         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
378                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
379         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
380                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
381         goto done_free_fcport;
382
383 done_free_fcport:
384         if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
385                 kfree(fcport);
386 done:
387         return rval;
388 }
389
390 inline uint16_t
391 qla24xx_calc_ct_iocbs(uint16_t dsds)
392 {
393         uint16_t iocbs;
394
395         iocbs = 1;
396         if (dsds > 2) {
397                 iocbs += (dsds - 2) / 5;
398                 if ((dsds - 2) % 5)
399                         iocbs++;
400         }
401         return iocbs;
402 }
403
404 static int
405 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
406 {
407         srb_t *sp;
408         struct Scsi_Host *host = bsg_job->shost;
409         scsi_qla_host_t *vha = shost_priv(host);
410         struct qla_hw_data *ha = vha->hw;
411         int rval = (DRIVER_ERROR << 16);
412         int req_sg_cnt, rsp_sg_cnt;
413         uint16_t loop_id;
414         struct fc_port *fcport;
415         char  *type = "FC_BSG_HST_CT";
416
417         req_sg_cnt =
418                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
419                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
420         if (!req_sg_cnt) {
421                 ql_log(ql_log_warn, vha, 0x700f,
422                     "dma_map_sg return %d for request\n", req_sg_cnt);
423                 rval = -ENOMEM;
424                 goto done;
425         }
426
427         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
428                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
429         if (!rsp_sg_cnt) {
430                 ql_log(ql_log_warn, vha, 0x7010,
431                     "dma_map_sg return %d for reply\n", rsp_sg_cnt);
432                 rval = -ENOMEM;
433                 goto done;
434         }
435
436         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
437             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
438                 ql_log(ql_log_warn, vha, 0x7011,
439                     "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
440                     "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
441                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
442                 rval = -EAGAIN;
443                 goto done_unmap_sg;
444         }
445
446         if (!vha->flags.online) {
447                 ql_log(ql_log_warn, vha, 0x7012,
448                     "Host is not online.\n");
449                 rval = -EIO;
450                 goto done_unmap_sg;
451         }
452
453         loop_id =
454                 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
455                         >> 24;
456         switch (loop_id) {
457         case 0xFC:
458                 loop_id = cpu_to_le16(NPH_SNS);
459                 break;
460         case 0xFA:
461                 loop_id = vha->mgmt_svr_loop_id;
462                 break;
463         default:
464                 ql_dbg(ql_dbg_user, vha, 0x7013,
465                     "Unknown loop id: %x.\n", loop_id);
466                 rval = -EINVAL;
467                 goto done_unmap_sg;
468         }
469
470         /* Allocate a dummy fcport structure, since functions preparing the
471          * IOCB and mailbox command retrieves port specific information
472          * from fcport structure. For Host based ELS commands there will be
473          * no fcport structure allocated
474          */
475         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
476         if (!fcport) {
477                 ql_log(ql_log_warn, vha, 0x7014,
478                     "Failed to allocate fcport.\n");
479                 rval = -ENOMEM;
480                 goto done_unmap_sg;
481         }
482
483         /* Initialize all required  fields of fcport */
484         fcport->vha = vha;
485         fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
486         fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
487         fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
488         fcport->loop_id = loop_id;
489
490         /* Alloc SRB structure */
491         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
492         if (!sp) {
493                 ql_log(ql_log_warn, vha, 0x7015,
494                     "qla2x00_get_sp failed.\n");
495                 rval = -ENOMEM;
496                 goto done_free_fcport;
497         }
498
499         sp->type = SRB_CT_CMD;
500         sp->name = "bsg_ct";
501         sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
502         sp->u.bsg_job = bsg_job;
503         sp->free = qla2x00_bsg_sp_free;
504         sp->done = qla2x00_bsg_job_done;
505
506         ql_dbg(ql_dbg_user, vha, 0x7016,
507             "bsg rqst type: %s else type: %x - "
508             "loop-id=%x portid=%02x%02x%02x.\n", type,
509             (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
510             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
511             fcport->d_id.b.al_pa);
512
513         rval = qla2x00_start_sp(sp);
514         if (rval != QLA_SUCCESS) {
515                 ql_log(ql_log_warn, vha, 0x7017,
516                     "qla2x00_start_sp failed=%d.\n", rval);
517                 mempool_free(sp, ha->srb_mempool);
518                 rval = -EIO;
519                 goto done_free_fcport;
520         }
521         return rval;
522
523 done_free_fcport:
524         kfree(fcport);
525 done_unmap_sg:
526         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
527                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
528         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
529                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
530 done:
531         return rval;
532 }
533 /*
534  * Set the port configuration to enable the internal or external loopback
535  * depending on the loopback mode.
536  */
537 static inline int
538 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
539         uint16_t *new_config, uint16_t mode)
540 {
541         int ret = 0;
542         int rval = 0;
543         struct qla_hw_data *ha = vha->hw;
544
545         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
546                 goto done_set_internal;
547
548         if (mode == INTERNAL_LOOPBACK)
549                 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
550         else if (mode == EXTERNAL_LOOPBACK)
551                 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
552         ql_dbg(ql_dbg_user, vha, 0x70be,
553              "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
554
555         memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
556
557         ha->notify_dcbx_comp = 1;
558         ret = qla81xx_set_port_config(vha, new_config);
559         if (ret != QLA_SUCCESS) {
560                 ql_log(ql_log_warn, vha, 0x7021,
561                     "set port config failed.\n");
562                 ha->notify_dcbx_comp = 0;
563                 rval = -EINVAL;
564                 goto done_set_internal;
565         }
566
567         /* Wait for DCBX complete event */
568         if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
569                 ql_dbg(ql_dbg_user, vha, 0x7022,
570                     "State change notification not received.\n");
571                 rval = -EINVAL;
572         } else {
573                 if (ha->flags.idc_compl_status) {
574                         ql_dbg(ql_dbg_user, vha, 0x70c3,
575                             "Bad status in IDC Completion AEN\n");
576                         rval = -EINVAL;
577                         ha->flags.idc_compl_status = 0;
578                 } else
579                         ql_dbg(ql_dbg_user, vha, 0x7023,
580                             "State change received.\n");
581         }
582
583         ha->notify_dcbx_comp = 0;
584
585 done_set_internal:
586         return rval;
587 }
588
589 /* Disable loopback mode */
590 static inline int
591 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
592     int wait)
593 {
594         int ret = 0;
595         int rval = 0;
596         uint16_t new_config[4];
597         struct qla_hw_data *ha = vha->hw;
598
599         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
600                 goto done_reset_internal;
601
602         memset(new_config, 0 , sizeof(new_config));
603         if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
604             ENABLE_INTERNAL_LOOPBACK ||
605             (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
606             ENABLE_EXTERNAL_LOOPBACK) {
607                 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
608                 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
609                     (new_config[0] & INTERNAL_LOOPBACK_MASK));
610                 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
611
612                 ha->notify_dcbx_comp = wait;
613                 ret = qla81xx_set_port_config(vha, new_config);
614                 if (ret != QLA_SUCCESS) {
615                         ql_log(ql_log_warn, vha, 0x7025,
616                             "Set port config failed.\n");
617                         ha->notify_dcbx_comp = 0;
618                         rval = -EINVAL;
619                         goto done_reset_internal;
620                 }
621
622                 /* Wait for DCBX complete event */
623                 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
624                         (20 * HZ))) {
625                         ql_dbg(ql_dbg_user, vha, 0x7026,
626                             "State change notification not received.\n");
627                         ha->notify_dcbx_comp = 0;
628                         rval = -EINVAL;
629                         goto done_reset_internal;
630                 } else
631                         ql_dbg(ql_dbg_user, vha, 0x7027,
632                             "State change received.\n");
633
634                 ha->notify_dcbx_comp = 0;
635         }
636 done_reset_internal:
637         return rval;
638 }
639
640 static int
641 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
642 {
643         struct Scsi_Host *host = bsg_job->shost;
644         scsi_qla_host_t *vha = shost_priv(host);
645         struct qla_hw_data *ha = vha->hw;
646         int rval;
647         uint8_t command_sent;
648         char *type;
649         struct msg_echo_lb elreq;
650         uint16_t response[MAILBOX_REGISTER_COUNT];
651         uint16_t config[4], new_config[4];
652         uint8_t *fw_sts_ptr;
653         uint8_t *req_data = NULL;
654         dma_addr_t req_data_dma;
655         uint32_t req_data_len;
656         uint8_t *rsp_data = NULL;
657         dma_addr_t rsp_data_dma;
658         uint32_t rsp_data_len;
659
660         if (!vha->flags.online) {
661                 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
662                 return -EIO;
663         }
664
665         elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
666                 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
667                 DMA_TO_DEVICE);
668
669         if (!elreq.req_sg_cnt) {
670                 ql_log(ql_log_warn, vha, 0x701a,
671                     "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
672                 return -ENOMEM;
673         }
674
675         elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
676                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
677                 DMA_FROM_DEVICE);
678
679         if (!elreq.rsp_sg_cnt) {
680                 ql_log(ql_log_warn, vha, 0x701b,
681                     "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
682                 rval = -ENOMEM;
683                 goto done_unmap_req_sg;
684         }
685
686         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
687                 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
688                 ql_log(ql_log_warn, vha, 0x701c,
689                     "dma mapping resulted in different sg counts, "
690                     "request_sg_cnt: %x dma_request_sg_cnt: %x "
691                     "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
692                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
693                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
694                 rval = -EAGAIN;
695                 goto done_unmap_sg;
696         }
697         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
698         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
699                 &req_data_dma, GFP_KERNEL);
700         if (!req_data) {
701                 ql_log(ql_log_warn, vha, 0x701d,
702                     "dma alloc failed for req_data.\n");
703                 rval = -ENOMEM;
704                 goto done_unmap_sg;
705         }
706
707         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
708                 &rsp_data_dma, GFP_KERNEL);
709         if (!rsp_data) {
710                 ql_log(ql_log_warn, vha, 0x7004,
711                     "dma alloc failed for rsp_data.\n");
712                 rval = -ENOMEM;
713                 goto done_free_dma_req;
714         }
715
716         /* Copy the request buffer in req_data now */
717         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
718                 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
719
720         elreq.send_dma = req_data_dma;
721         elreq.rcv_dma = rsp_data_dma;
722         elreq.transfer_size = req_data_len;
723
724         elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
725
726         if (atomic_read(&vha->loop_state) == LOOP_READY &&
727             (ha->current_topology == ISP_CFG_F ||
728             ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
729             le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
730             && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
731                 elreq.options == EXTERNAL_LOOPBACK) {
732                 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
733                 ql_dbg(ql_dbg_user, vha, 0x701e,
734                     "BSG request type: %s.\n", type);
735                 command_sent = INT_DEF_LB_ECHO_CMD;
736                 rval = qla2x00_echo_test(vha, &elreq, response);
737         } else {
738                 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
739                         memset(config, 0, sizeof(config));
740                         memset(new_config, 0, sizeof(new_config));
741                         if (qla81xx_get_port_config(vha, config)) {
742                                 ql_log(ql_log_warn, vha, 0x701f,
743                                     "Get port config failed.\n");
744                                 bsg_job->reply->result = (DID_ERROR << 16);
745                                 rval = -EPERM;
746                                 goto done_free_dma_req;
747                         }
748
749                         ql_dbg(ql_dbg_user, vha, 0x70c0,
750                             "elreq.options=%04x\n", elreq.options);
751
752                         if (elreq.options == EXTERNAL_LOOPBACK)
753                                 if (IS_QLA8031(ha))
754                                         rval = qla81xx_set_loopback_mode(vha,
755                                             config, new_config, elreq.options);
756                                 else
757                                         rval = qla81xx_reset_loopback_mode(vha,
758                                             config, 1);
759                         else
760                                 rval = qla81xx_set_loopback_mode(vha, config,
761                                     new_config, elreq.options);
762
763                         if (rval) {
764                                 bsg_job->reply->result = (DID_ERROR << 16);
765                                 rval = -EPERM;
766                                 goto done_free_dma_req;
767                         }
768
769                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
770                         ql_dbg(ql_dbg_user, vha, 0x7028,
771                             "BSG request type: %s.\n", type);
772
773                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
774                         rval = qla2x00_loopback_test(vha, &elreq, response);
775
776                         if (new_config[0]) {
777                                 /* Revert back to original port config
778                                  * Also clear internal loopback
779                                  */
780                                 qla81xx_reset_loopback_mode(vha,
781                                     new_config, 0);
782                         }
783
784                         if (response[0] == MBS_COMMAND_ERROR &&
785                                         response[1] == MBS_LB_RESET) {
786                                 ql_log(ql_log_warn, vha, 0x7029,
787                                     "MBX command error, Aborting ISP.\n");
788                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
789                                 qla2xxx_wake_dpc(vha);
790                                 qla2x00_wait_for_chip_reset(vha);
791                                 /* Also reset the MPI */
792                                 if (qla81xx_restart_mpi_firmware(vha) !=
793                                     QLA_SUCCESS) {
794                                         ql_log(ql_log_warn, vha, 0x702a,
795                                             "MPI reset failed.\n");
796                                 }
797
798                                 bsg_job->reply->result = (DID_ERROR << 16);
799                                 rval = -EIO;
800                                 goto done_free_dma_req;
801                         }
802                 } else {
803                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
804                         ql_dbg(ql_dbg_user, vha, 0x702b,
805                             "BSG request type: %s.\n", type);
806                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
807                         rval = qla2x00_loopback_test(vha, &elreq, response);
808                 }
809         }
810
811         if (rval) {
812                 ql_log(ql_log_warn, vha, 0x702c,
813                     "Vendor request %s failed.\n", type);
814
815                 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
816                     sizeof(struct fc_bsg_reply);
817
818                 memcpy(fw_sts_ptr, response, sizeof(response));
819                 fw_sts_ptr += sizeof(response);
820                 *fw_sts_ptr = command_sent;
821                 rval = 0;
822                 bsg_job->reply->result = (DID_ERROR << 16);
823         } else {
824                 ql_dbg(ql_dbg_user, vha, 0x702d,
825                     "Vendor request %s completed.\n", type);
826
827                 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
828                         sizeof(response) + sizeof(uint8_t);
829                 bsg_job->reply->reply_payload_rcv_len =
830                         bsg_job->reply_payload.payload_len;
831                 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
832                         sizeof(struct fc_bsg_reply);
833                 memcpy(fw_sts_ptr, response, sizeof(response));
834                 fw_sts_ptr += sizeof(response);
835                 *fw_sts_ptr = command_sent;
836                 bsg_job->reply->result = DID_OK;
837                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
838                         bsg_job->reply_payload.sg_cnt, rsp_data,
839                         rsp_data_len);
840         }
841         bsg_job->job_done(bsg_job);
842
843         dma_free_coherent(&ha->pdev->dev, rsp_data_len,
844                 rsp_data, rsp_data_dma);
845 done_free_dma_req:
846         dma_free_coherent(&ha->pdev->dev, req_data_len,
847                 req_data, req_data_dma);
848 done_unmap_sg:
849         dma_unmap_sg(&ha->pdev->dev,
850             bsg_job->reply_payload.sg_list,
851             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
852 done_unmap_req_sg:
853         dma_unmap_sg(&ha->pdev->dev,
854             bsg_job->request_payload.sg_list,
855             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
856         return rval;
857 }
858
859 static int
860 qla84xx_reset(struct fc_bsg_job *bsg_job)
861 {
862         struct Scsi_Host *host = bsg_job->shost;
863         scsi_qla_host_t *vha = shost_priv(host);
864         struct qla_hw_data *ha = vha->hw;
865         int rval = 0;
866         uint32_t flag;
867
868         if (!IS_QLA84XX(ha)) {
869                 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
870                 return -EINVAL;
871         }
872
873         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
874
875         rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
876
877         if (rval) {
878                 ql_log(ql_log_warn, vha, 0x7030,
879                     "Vendor request 84xx reset failed.\n");
880                 rval = 0;
881                 bsg_job->reply->result = (DID_ERROR << 16);
882
883         } else {
884                 ql_dbg(ql_dbg_user, vha, 0x7031,
885                     "Vendor request 84xx reset completed.\n");
886                 bsg_job->reply->result = DID_OK;
887         }
888
889         bsg_job->job_done(bsg_job);
890         return rval;
891 }
892
893 static int
894 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
895 {
896         struct Scsi_Host *host = bsg_job->shost;
897         scsi_qla_host_t *vha = shost_priv(host);
898         struct qla_hw_data *ha = vha->hw;
899         struct verify_chip_entry_84xx *mn = NULL;
900         dma_addr_t mn_dma, fw_dma;
901         void *fw_buf = NULL;
902         int rval = 0;
903         uint32_t sg_cnt;
904         uint32_t data_len;
905         uint16_t options;
906         uint32_t flag;
907         uint32_t fw_ver;
908
909         if (!IS_QLA84XX(ha)) {
910                 ql_dbg(ql_dbg_user, vha, 0x7032,
911                     "Not 84xx, exiting.\n");
912                 return -EINVAL;
913         }
914
915         sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
916                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
917         if (!sg_cnt) {
918                 ql_log(ql_log_warn, vha, 0x7033,
919                     "dma_map_sg returned %d for request.\n", sg_cnt);
920                 return -ENOMEM;
921         }
922
923         if (sg_cnt != bsg_job->request_payload.sg_cnt) {
924                 ql_log(ql_log_warn, vha, 0x7034,
925                     "DMA mapping resulted in different sg counts, "
926                     "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
927                     bsg_job->request_payload.sg_cnt, sg_cnt);
928                 rval = -EAGAIN;
929                 goto done_unmap_sg;
930         }
931
932         data_len = bsg_job->request_payload.payload_len;
933         fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
934                 &fw_dma, GFP_KERNEL);
935         if (!fw_buf) {
936                 ql_log(ql_log_warn, vha, 0x7035,
937                     "DMA alloc failed for fw_buf.\n");
938                 rval = -ENOMEM;
939                 goto done_unmap_sg;
940         }
941
942         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
943                 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
944
945         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
946         if (!mn) {
947                 ql_log(ql_log_warn, vha, 0x7036,
948                     "DMA alloc failed for fw buffer.\n");
949                 rval = -ENOMEM;
950                 goto done_free_fw_buf;
951         }
952
953         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
954         fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
955
956         memset(mn, 0, sizeof(struct access_chip_84xx));
957         mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
958         mn->entry_count = 1;
959
960         options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
961         if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
962                 options |= VCO_DIAG_FW;
963
964         mn->options = cpu_to_le16(options);
965         mn->fw_ver =  cpu_to_le32(fw_ver);
966         mn->fw_size =  cpu_to_le32(data_len);
967         mn->fw_seq_size =  cpu_to_le32(data_len);
968         mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
969         mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
970         mn->dseg_length = cpu_to_le32(data_len);
971         mn->data_seg_cnt = cpu_to_le16(1);
972
973         rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
974
975         if (rval) {
976                 ql_log(ql_log_warn, vha, 0x7037,
977                     "Vendor request 84xx updatefw failed.\n");
978
979                 rval = 0;
980                 bsg_job->reply->result = (DID_ERROR << 16);
981         } else {
982                 ql_dbg(ql_dbg_user, vha, 0x7038,
983                     "Vendor request 84xx updatefw completed.\n");
984
985                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
986                 bsg_job->reply->result = DID_OK;
987         }
988
989         bsg_job->job_done(bsg_job);
990         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
991
992 done_free_fw_buf:
993         dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
994
995 done_unmap_sg:
996         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
997                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
998
999         return rval;
1000 }
1001
1002 static int
1003 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1004 {
1005         struct Scsi_Host *host = bsg_job->shost;
1006         scsi_qla_host_t *vha = shost_priv(host);
1007         struct qla_hw_data *ha = vha->hw;
1008         struct access_chip_84xx *mn = NULL;
1009         dma_addr_t mn_dma, mgmt_dma;
1010         void *mgmt_b = NULL;
1011         int rval = 0;
1012         struct qla_bsg_a84_mgmt *ql84_mgmt;
1013         uint32_t sg_cnt;
1014         uint32_t data_len = 0;
1015         uint32_t dma_direction = DMA_NONE;
1016
1017         if (!IS_QLA84XX(ha)) {
1018                 ql_log(ql_log_warn, vha, 0x703a,
1019                     "Not 84xx, exiting.\n");
1020                 return -EINVAL;
1021         }
1022
1023         ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1024                 sizeof(struct fc_bsg_request));
1025         if (!ql84_mgmt) {
1026                 ql_log(ql_log_warn, vha, 0x703b,
1027                     "MGMT header not provided, exiting.\n");
1028                 return -EINVAL;
1029         }
1030
1031         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1032         if (!mn) {
1033                 ql_log(ql_log_warn, vha, 0x703c,
1034                     "DMA alloc failed for fw buffer.\n");
1035                 return -ENOMEM;
1036         }
1037
1038         memset(mn, 0, sizeof(struct access_chip_84xx));
1039         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1040         mn->entry_count = 1;
1041
1042         switch (ql84_mgmt->mgmt.cmd) {
1043         case QLA84_MGMT_READ_MEM:
1044         case QLA84_MGMT_GET_INFO:
1045                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1046                         bsg_job->reply_payload.sg_list,
1047                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1048                 if (!sg_cnt) {
1049                         ql_log(ql_log_warn, vha, 0x703d,
1050                             "dma_map_sg returned %d for reply.\n", sg_cnt);
1051                         rval = -ENOMEM;
1052                         goto exit_mgmt;
1053                 }
1054
1055                 dma_direction = DMA_FROM_DEVICE;
1056
1057                 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1058                         ql_log(ql_log_warn, vha, 0x703e,
1059                             "DMA mapping resulted in different sg counts, "
1060                             "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1061                             bsg_job->reply_payload.sg_cnt, sg_cnt);
1062                         rval = -EAGAIN;
1063                         goto done_unmap_sg;
1064                 }
1065
1066                 data_len = bsg_job->reply_payload.payload_len;
1067
1068                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1069                     &mgmt_dma, GFP_KERNEL);
1070                 if (!mgmt_b) {
1071                         ql_log(ql_log_warn, vha, 0x703f,
1072                             "DMA alloc failed for mgmt_b.\n");
1073                         rval = -ENOMEM;
1074                         goto done_unmap_sg;
1075                 }
1076
1077                 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1078                         mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1079                         mn->parameter1 =
1080                                 cpu_to_le32(
1081                                 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1082
1083                 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1084                         mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1085                         mn->parameter1 =
1086                                 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1087
1088                         mn->parameter2 =
1089                                 cpu_to_le32(
1090                                 ql84_mgmt->mgmt.mgmtp.u.info.context);
1091                 }
1092                 break;
1093
1094         case QLA84_MGMT_WRITE_MEM:
1095                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1096                         bsg_job->request_payload.sg_list,
1097                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1098
1099                 if (!sg_cnt) {
1100                         ql_log(ql_log_warn, vha, 0x7040,
1101                             "dma_map_sg returned %d.\n", sg_cnt);
1102                         rval = -ENOMEM;
1103                         goto exit_mgmt;
1104                 }
1105
1106                 dma_direction = DMA_TO_DEVICE;
1107
1108                 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1109                         ql_log(ql_log_warn, vha, 0x7041,
1110                             "DMA mapping resulted in different sg counts, "
1111                             "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1112                             bsg_job->request_payload.sg_cnt, sg_cnt);
1113                         rval = -EAGAIN;
1114                         goto done_unmap_sg;
1115                 }
1116
1117                 data_len = bsg_job->request_payload.payload_len;
1118                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1119                         &mgmt_dma, GFP_KERNEL);
1120                 if (!mgmt_b) {
1121                         ql_log(ql_log_warn, vha, 0x7042,
1122                             "DMA alloc failed for mgmt_b.\n");
1123                         rval = -ENOMEM;
1124                         goto done_unmap_sg;
1125                 }
1126
1127                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1128                         bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1129
1130                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1131                 mn->parameter1 =
1132                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1133                 break;
1134
1135         case QLA84_MGMT_CHNG_CONFIG:
1136                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1137                 mn->parameter1 =
1138                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1139
1140                 mn->parameter2 =
1141                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1142
1143                 mn->parameter3 =
1144                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1145                 break;
1146
1147         default:
1148                 rval = -EIO;
1149                 goto exit_mgmt;
1150         }
1151
1152         if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1153                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1154                 mn->dseg_count = cpu_to_le16(1);
1155                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1156                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1157                 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1158         }
1159
1160         rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1161
1162         if (rval) {
1163                 ql_log(ql_log_warn, vha, 0x7043,
1164                     "Vendor request 84xx mgmt failed.\n");
1165
1166                 rval = 0;
1167                 bsg_job->reply->result = (DID_ERROR << 16);
1168
1169         } else {
1170                 ql_dbg(ql_dbg_user, vha, 0x7044,
1171                     "Vendor request 84xx mgmt completed.\n");
1172
1173                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1174                 bsg_job->reply->result = DID_OK;
1175
1176                 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1177                         (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1178                         bsg_job->reply->reply_payload_rcv_len =
1179                                 bsg_job->reply_payload.payload_len;
1180
1181                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1182                                 bsg_job->reply_payload.sg_cnt, mgmt_b,
1183                                 data_len);
1184                 }
1185         }
1186
1187         bsg_job->job_done(bsg_job);
1188
1189 done_unmap_sg:
1190         if (mgmt_b)
1191                 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1192
1193         if (dma_direction == DMA_TO_DEVICE)
1194                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1195                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1196         else if (dma_direction == DMA_FROM_DEVICE)
1197                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1198                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1199
1200 exit_mgmt:
1201         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1202
1203         return rval;
1204 }
1205
1206 static int
1207 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1208 {
1209         struct Scsi_Host *host = bsg_job->shost;
1210         scsi_qla_host_t *vha = shost_priv(host);
1211         int rval = 0;
1212         struct qla_port_param *port_param = NULL;
1213         fc_port_t *fcport = NULL;
1214         uint16_t mb[MAILBOX_REGISTER_COUNT];
1215         uint8_t *rsp_ptr = NULL;
1216
1217         if (!IS_IIDMA_CAPABLE(vha->hw)) {
1218                 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1219                 return -EINVAL;
1220         }
1221
1222         port_param = (struct qla_port_param *)((char *)bsg_job->request +
1223                 sizeof(struct fc_bsg_request));
1224         if (!port_param) {
1225                 ql_log(ql_log_warn, vha, 0x7047,
1226                     "port_param header not provided.\n");
1227                 return -EINVAL;
1228         }
1229
1230         if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1231                 ql_log(ql_log_warn, vha, 0x7048,
1232                     "Invalid destination type.\n");
1233                 return -EINVAL;
1234         }
1235
1236         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1237                 if (fcport->port_type != FCT_TARGET)
1238                         continue;
1239
1240                 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1241                         fcport->port_name, sizeof(fcport->port_name)))
1242                         continue;
1243                 break;
1244         }
1245
1246         if (!fcport) {
1247                 ql_log(ql_log_warn, vha, 0x7049,
1248                     "Failed to find port.\n");
1249                 return -EINVAL;
1250         }
1251
1252         if (atomic_read(&fcport->state) != FCS_ONLINE) {
1253                 ql_log(ql_log_warn, vha, 0x704a,
1254                     "Port is not online.\n");
1255                 return -EINVAL;
1256         }
1257
1258         if (fcport->flags & FCF_LOGIN_NEEDED) {
1259                 ql_log(ql_log_warn, vha, 0x704b,
1260                     "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1261                 return -EINVAL;
1262         }
1263
1264         if (port_param->mode)
1265                 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1266                         port_param->speed, mb);
1267         else
1268                 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1269                         &port_param->speed, mb);
1270
1271         if (rval) {
1272                 ql_log(ql_log_warn, vha, 0x704c,
1273                     "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1274                     "%04x %x %04x %04x.\n", fcport->port_name[0],
1275                     fcport->port_name[1], fcport->port_name[2],
1276                     fcport->port_name[3], fcport->port_name[4],
1277                     fcport->port_name[5], fcport->port_name[6],
1278                     fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1279                 rval = 0;
1280                 bsg_job->reply->result = (DID_ERROR << 16);
1281
1282         } else {
1283                 if (!port_param->mode) {
1284                         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1285                                 sizeof(struct qla_port_param);
1286
1287                         rsp_ptr = ((uint8_t *)bsg_job->reply) +
1288                                 sizeof(struct fc_bsg_reply);
1289
1290                         memcpy(rsp_ptr, port_param,
1291                                 sizeof(struct qla_port_param));
1292                 }
1293
1294                 bsg_job->reply->result = DID_OK;
1295         }
1296
1297         bsg_job->job_done(bsg_job);
1298         return rval;
1299 }
1300
1301 static int
1302 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1303         uint8_t is_update)
1304 {
1305         uint32_t start = 0;
1306         int valid = 0;
1307         struct qla_hw_data *ha = vha->hw;
1308
1309         if (unlikely(pci_channel_offline(ha->pdev)))
1310                 return -EINVAL;
1311
1312         start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1313         if (start > ha->optrom_size) {
1314                 ql_log(ql_log_warn, vha, 0x7055,
1315                     "start %d > optrom_size %d.\n", start, ha->optrom_size);
1316                 return -EINVAL;
1317         }
1318
1319         if (ha->optrom_state != QLA_SWAITING) {
1320                 ql_log(ql_log_info, vha, 0x7056,
1321                     "optrom_state %d.\n", ha->optrom_state);
1322                 return -EBUSY;
1323         }
1324
1325         ha->optrom_region_start = start;
1326         ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1327         if (is_update) {
1328                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1329                         valid = 1;
1330                 else if (start == (ha->flt_region_boot * 4) ||
1331                     start == (ha->flt_region_fw * 4))
1332                         valid = 1;
1333                 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1334                     IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
1335                         valid = 1;
1336                 if (!valid) {
1337                         ql_log(ql_log_warn, vha, 0x7058,
1338                             "Invalid start region 0x%x/0x%x.\n", start,
1339                             bsg_job->request_payload.payload_len);
1340                         return -EINVAL;
1341                 }
1342
1343                 ha->optrom_region_size = start +
1344                     bsg_job->request_payload.payload_len > ha->optrom_size ?
1345                     ha->optrom_size - start :
1346                     bsg_job->request_payload.payload_len;
1347                 ha->optrom_state = QLA_SWRITING;
1348         } else {
1349                 ha->optrom_region_size = start +
1350                     bsg_job->reply_payload.payload_len > ha->optrom_size ?
1351                     ha->optrom_size - start :
1352                     bsg_job->reply_payload.payload_len;
1353                 ha->optrom_state = QLA_SREADING;
1354         }
1355
1356         ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1357         if (!ha->optrom_buffer) {
1358                 ql_log(ql_log_warn, vha, 0x7059,
1359                     "Read: Unable to allocate memory for optrom retrieval "
1360                     "(%x)\n", ha->optrom_region_size);
1361
1362                 ha->optrom_state = QLA_SWAITING;
1363                 return -ENOMEM;
1364         }
1365
1366         memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1367         return 0;
1368 }
1369
1370 static int
1371 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1372 {
1373         struct Scsi_Host *host = bsg_job->shost;
1374         scsi_qla_host_t *vha = shost_priv(host);
1375         struct qla_hw_data *ha = vha->hw;
1376         int rval = 0;
1377
1378         if (ha->flags.nic_core_reset_hdlr_active)
1379                 return -EBUSY;
1380
1381         rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1382         if (rval)
1383                 return rval;
1384
1385         ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1386             ha->optrom_region_start, ha->optrom_region_size);
1387
1388         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1389             bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1390             ha->optrom_region_size);
1391
1392         bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1393         bsg_job->reply->result = DID_OK;
1394         vfree(ha->optrom_buffer);
1395         ha->optrom_buffer = NULL;
1396         ha->optrom_state = QLA_SWAITING;
1397         bsg_job->job_done(bsg_job);
1398         return rval;
1399 }
1400
1401 static int
1402 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1403 {
1404         struct Scsi_Host *host = bsg_job->shost;
1405         scsi_qla_host_t *vha = shost_priv(host);
1406         struct qla_hw_data *ha = vha->hw;
1407         int rval = 0;
1408
1409         rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1410         if (rval)
1411                 return rval;
1412
1413         /* Set the isp82xx_no_md_cap not to capture minidump */
1414         ha->flags.isp82xx_no_md_cap = 1;
1415
1416         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1417             bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1418             ha->optrom_region_size);
1419
1420         ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1421             ha->optrom_region_start, ha->optrom_region_size);
1422
1423         bsg_job->reply->result = DID_OK;
1424         vfree(ha->optrom_buffer);
1425         ha->optrom_buffer = NULL;
1426         ha->optrom_state = QLA_SWAITING;
1427         bsg_job->job_done(bsg_job);
1428         return rval;
1429 }
1430
1431 static int
1432 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1433 {
1434         struct Scsi_Host *host = bsg_job->shost;
1435         scsi_qla_host_t *vha = shost_priv(host);
1436         struct qla_hw_data *ha = vha->hw;
1437         int rval = 0;
1438         uint8_t bsg[DMA_POOL_SIZE];
1439         struct qla_image_version_list *list = (void *)bsg;
1440         struct qla_image_version *image;
1441         uint32_t count;
1442         dma_addr_t sfp_dma;
1443         void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1444         if (!sfp) {
1445                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1446                     EXT_STATUS_NO_MEMORY;
1447                 goto done;
1448         }
1449
1450         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1451             bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1452
1453         image = list->version;
1454         count = list->count;
1455         while (count--) {
1456                 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1457                 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1458                     image->field_address.device, image->field_address.offset,
1459                     sizeof(image->field_info), image->field_address.option);
1460                 if (rval) {
1461                         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1462                             EXT_STATUS_MAILBOX;
1463                         goto dealloc;
1464                 }
1465                 image++;
1466         }
1467
1468         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1469
1470 dealloc:
1471         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1472
1473 done:
1474         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1475         bsg_job->reply->result = DID_OK << 16;
1476         bsg_job->job_done(bsg_job);
1477
1478         return 0;
1479 }
1480
1481 static int
1482 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1483 {
1484         struct Scsi_Host *host = bsg_job->shost;
1485         scsi_qla_host_t *vha = shost_priv(host);
1486         struct qla_hw_data *ha = vha->hw;
1487         int rval = 0;
1488         uint8_t bsg[DMA_POOL_SIZE];
1489         struct qla_status_reg *sr = (void *)bsg;
1490         dma_addr_t sfp_dma;
1491         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1492         if (!sfp) {
1493                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1494                     EXT_STATUS_NO_MEMORY;
1495                 goto done;
1496         }
1497
1498         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1499             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1500
1501         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1502             sr->field_address.device, sr->field_address.offset,
1503             sizeof(sr->status_reg), sr->field_address.option);
1504         sr->status_reg = *sfp;
1505
1506         if (rval) {
1507                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1508                     EXT_STATUS_MAILBOX;
1509                 goto dealloc;
1510         }
1511
1512         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1513             bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1514
1515         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1516
1517 dealloc:
1518         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1519
1520 done:
1521         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1522         bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1523         bsg_job->reply->result = DID_OK << 16;
1524         bsg_job->job_done(bsg_job);
1525
1526         return 0;
1527 }
1528
1529 static int
1530 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1531 {
1532         struct Scsi_Host *host = bsg_job->shost;
1533         scsi_qla_host_t *vha = shost_priv(host);
1534         struct qla_hw_data *ha = vha->hw;
1535         int rval = 0;
1536         uint8_t bsg[DMA_POOL_SIZE];
1537         struct qla_status_reg *sr = (void *)bsg;
1538         dma_addr_t sfp_dma;
1539         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1540         if (!sfp) {
1541                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1542                     EXT_STATUS_NO_MEMORY;
1543                 goto done;
1544         }
1545
1546         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1547             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1548
1549         *sfp = sr->status_reg;
1550         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1551             sr->field_address.device, sr->field_address.offset,
1552             sizeof(sr->status_reg), sr->field_address.option);
1553
1554         if (rval) {
1555                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1556                     EXT_STATUS_MAILBOX;
1557                 goto dealloc;
1558         }
1559
1560         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1561
1562 dealloc:
1563         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1564
1565 done:
1566         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1567         bsg_job->reply->result = DID_OK << 16;
1568         bsg_job->job_done(bsg_job);
1569
1570         return 0;
1571 }
1572
1573 static int
1574 qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
1575 {
1576         struct Scsi_Host *host = bsg_job->shost;
1577         scsi_qla_host_t *vha = shost_priv(host);
1578         struct qla_hw_data *ha = vha->hw;
1579         int rval = 0;
1580         uint8_t bsg[DMA_POOL_SIZE];
1581         struct qla_i2c_access *i2c = (void *)bsg;
1582         dma_addr_t sfp_dma;
1583         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1584         if (!sfp) {
1585                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1586                     EXT_STATUS_NO_MEMORY;
1587                 goto done;
1588         }
1589
1590         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1591             bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1592
1593         memcpy(sfp, i2c->buffer, i2c->length);
1594         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1595             i2c->device, i2c->offset, i2c->length, i2c->option);
1596
1597         if (rval) {
1598                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1599                     EXT_STATUS_MAILBOX;
1600                 goto dealloc;
1601         }
1602
1603         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1604
1605 dealloc:
1606         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1607
1608 done:
1609         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1610         bsg_job->reply->result = DID_OK << 16;
1611         bsg_job->job_done(bsg_job);
1612
1613         return 0;
1614 }
1615
1616 static int
1617 qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
1618 {
1619         struct Scsi_Host *host = bsg_job->shost;
1620         scsi_qla_host_t *vha = shost_priv(host);
1621         struct qla_hw_data *ha = vha->hw;
1622         int rval = 0;
1623         uint8_t bsg[DMA_POOL_SIZE];
1624         struct qla_i2c_access *i2c = (void *)bsg;
1625         dma_addr_t sfp_dma;
1626         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1627         if (!sfp) {
1628                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1629                     EXT_STATUS_NO_MEMORY;
1630                 goto done;
1631         }
1632
1633         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1634             bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1635
1636         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1637                 i2c->device, i2c->offset, i2c->length, i2c->option);
1638
1639         if (rval) {
1640                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1641                     EXT_STATUS_MAILBOX;
1642                 goto dealloc;
1643         }
1644
1645         memcpy(i2c->buffer, sfp, i2c->length);
1646         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1647             bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1648
1649         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1650
1651 dealloc:
1652         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1653
1654 done:
1655         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1656         bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
1657         bsg_job->reply->result = DID_OK << 16;
1658         bsg_job->job_done(bsg_job);
1659
1660         return 0;
1661 }
1662
1663 static int
1664 qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
1665 {
1666         struct Scsi_Host *host = bsg_job->shost;
1667         scsi_qla_host_t *vha = shost_priv(host);
1668         struct qla_hw_data *ha = vha->hw;
1669         uint16_t thread_id;
1670         uint32_t rval = EXT_STATUS_OK;
1671         uint16_t req_sg_cnt = 0;
1672         uint16_t rsp_sg_cnt = 0;
1673         uint16_t nextlid = 0;
1674         uint32_t tot_dsds;
1675         srb_t *sp = NULL;
1676         uint32_t req_data_len = 0;
1677         uint32_t rsp_data_len = 0;
1678
1679         /* Check the type of the adapter */
1680         if (!IS_BIDI_CAPABLE(ha)) {
1681                 ql_log(ql_log_warn, vha, 0x70a0,
1682                         "This adapter is not supported\n");
1683                 rval = EXT_STATUS_NOT_SUPPORTED;
1684                 goto done;
1685         }
1686
1687         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1688                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1689                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1690                 rval =  EXT_STATUS_BUSY;
1691                 goto done;
1692         }
1693
1694         /* Check if host is online */
1695         if (!vha->flags.online) {
1696                 ql_log(ql_log_warn, vha, 0x70a1,
1697                         "Host is not online\n");
1698                 rval = EXT_STATUS_DEVICE_OFFLINE;
1699                 goto done;
1700         }
1701
1702         /* Check if cable is plugged in or not */
1703         if (vha->device_flags & DFLG_NO_CABLE) {
1704                 ql_log(ql_log_warn, vha, 0x70a2,
1705                         "Cable is unplugged...\n");
1706                 rval = EXT_STATUS_INVALID_CFG;
1707                 goto done;
1708         }
1709
1710         /* Check if the switch is connected or not */
1711         if (ha->current_topology != ISP_CFG_F) {
1712                 ql_log(ql_log_warn, vha, 0x70a3,
1713                         "Host is not connected to the switch\n");
1714                 rval = EXT_STATUS_INVALID_CFG;
1715                 goto done;
1716         }
1717
1718         /* Check if operating mode is P2P */
1719         if (ha->operating_mode != P2P) {
1720                 ql_log(ql_log_warn, vha, 0x70a4,
1721                     "Host is operating mode is not P2p\n");
1722                 rval = EXT_STATUS_INVALID_CFG;
1723                 goto done;
1724         }
1725
1726         thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1727
1728         mutex_lock(&ha->selflogin_lock);
1729         if (vha->self_login_loop_id == 0) {
1730                 /* Initialize all required  fields of fcport */
1731                 vha->bidir_fcport.vha = vha;
1732                 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1733                 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1734                 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1735                 vha->bidir_fcport.loop_id = vha->loop_id;
1736
1737                 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1738                         ql_log(ql_log_warn, vha, 0x70a7,
1739                             "Failed to login port %06X for bidirectional IOCB\n",
1740                             vha->bidir_fcport.d_id.b24);
1741                         mutex_unlock(&ha->selflogin_lock);
1742                         rval = EXT_STATUS_MAILBOX;
1743                         goto done;
1744                 }
1745                 vha->self_login_loop_id = nextlid - 1;
1746
1747         }
1748         /* Assign the self login loop id to fcport */
1749         mutex_unlock(&ha->selflogin_lock);
1750
1751         vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1752
1753         req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1754                 bsg_job->request_payload.sg_list,
1755                 bsg_job->request_payload.sg_cnt,
1756                 DMA_TO_DEVICE);
1757
1758         if (!req_sg_cnt) {
1759                 rval = EXT_STATUS_NO_MEMORY;
1760                 goto done;
1761         }
1762
1763         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1764                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1765                 DMA_FROM_DEVICE);
1766
1767         if (!rsp_sg_cnt) {
1768                 rval = EXT_STATUS_NO_MEMORY;
1769                 goto done_unmap_req_sg;
1770         }
1771
1772         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1773                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1774                 ql_dbg(ql_dbg_user, vha, 0x70a9,
1775                     "Dma mapping resulted in different sg counts "
1776                     "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1777                     "%x dma_reply_sg_cnt: %x]\n",
1778                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
1779                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1780                 rval = EXT_STATUS_NO_MEMORY;
1781                 goto done_unmap_sg;
1782         }
1783
1784         if (req_data_len != rsp_data_len) {
1785                 rval = EXT_STATUS_BUSY;
1786                 ql_log(ql_log_warn, vha, 0x70aa,
1787                     "req_data_len != rsp_data_len\n");
1788                 goto done_unmap_sg;
1789         }
1790
1791         req_data_len = bsg_job->request_payload.payload_len;
1792         rsp_data_len = bsg_job->reply_payload.payload_len;
1793
1794
1795         /* Alloc SRB structure */
1796         sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1797         if (!sp) {
1798                 ql_dbg(ql_dbg_user, vha, 0x70ac,
1799                     "Alloc SRB structure failed\n");
1800                 rval = EXT_STATUS_NO_MEMORY;
1801                 goto done_unmap_sg;
1802         }
1803
1804         /*Populate srb->ctx with bidir ctx*/
1805         sp->u.bsg_job = bsg_job;
1806         sp->free = qla2x00_bsg_sp_free;
1807         sp->type = SRB_BIDI_CMD;
1808         sp->done = qla2x00_bsg_job_done;
1809
1810         /* Add the read and write sg count */
1811         tot_dsds = rsp_sg_cnt + req_sg_cnt;
1812
1813         rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1814         if (rval != EXT_STATUS_OK)
1815                 goto done_free_srb;
1816         /* the bsg request  will be completed in the interrupt handler */
1817         return rval;
1818
1819 done_free_srb:
1820         mempool_free(sp, ha->srb_mempool);
1821 done_unmap_sg:
1822         dma_unmap_sg(&ha->pdev->dev,
1823             bsg_job->reply_payload.sg_list,
1824             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1825 done_unmap_req_sg:
1826         dma_unmap_sg(&ha->pdev->dev,
1827             bsg_job->request_payload.sg_list,
1828             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1829 done:
1830
1831         /* Return an error vendor specific response
1832          * and complete the bsg request
1833          */
1834         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1835         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1836         bsg_job->reply->reply_payload_rcv_len = 0;
1837         bsg_job->reply->result = (DID_OK) << 16;
1838         bsg_job->job_done(bsg_job);
1839         /* Always retrun success, vendor rsp carries correct status */
1840         return 0;
1841 }
1842
1843 static int
1844 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1845 {
1846         switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1847         case QL_VND_LOOPBACK:
1848                 return qla2x00_process_loopback(bsg_job);
1849
1850         case QL_VND_A84_RESET:
1851                 return qla84xx_reset(bsg_job);
1852
1853         case QL_VND_A84_UPDATE_FW:
1854                 return qla84xx_updatefw(bsg_job);
1855
1856         case QL_VND_A84_MGMT_CMD:
1857                 return qla84xx_mgmt_cmd(bsg_job);
1858
1859         case QL_VND_IIDMA:
1860                 return qla24xx_iidma(bsg_job);
1861
1862         case QL_VND_FCP_PRIO_CFG_CMD:
1863                 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1864
1865         case QL_VND_READ_FLASH:
1866                 return qla2x00_read_optrom(bsg_job);
1867
1868         case QL_VND_UPDATE_FLASH:
1869                 return qla2x00_update_optrom(bsg_job);
1870
1871         case QL_VND_SET_FRU_VERSION:
1872                 return qla2x00_update_fru_versions(bsg_job);
1873
1874         case QL_VND_READ_FRU_STATUS:
1875                 return qla2x00_read_fru_status(bsg_job);
1876
1877         case QL_VND_WRITE_FRU_STATUS:
1878                 return qla2x00_write_fru_status(bsg_job);
1879
1880         case QL_VND_WRITE_I2C:
1881                 return qla2x00_write_i2c(bsg_job);
1882
1883         case QL_VND_READ_I2C:
1884                 return qla2x00_read_i2c(bsg_job);
1885
1886         case QL_VND_DIAG_IO_CMD:
1887                 return qla24xx_process_bidir_cmd(bsg_job);
1888
1889         default:
1890                 bsg_job->reply->result = (DID_ERROR << 16);
1891                 bsg_job->job_done(bsg_job);
1892                 return -ENOSYS;
1893         }
1894 }
1895
1896 int
1897 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1898 {
1899         int ret = -EINVAL;
1900         struct fc_rport *rport;
1901         fc_port_t *fcport = NULL;
1902         struct Scsi_Host *host;
1903         scsi_qla_host_t *vha;
1904
1905         /* In case no data transferred. */
1906         bsg_job->reply->reply_payload_rcv_len = 0;
1907
1908         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1909                 rport = bsg_job->rport;
1910                 fcport = *(fc_port_t **) rport->dd_data;
1911                 host = rport_to_shost(rport);
1912                 vha = shost_priv(host);
1913         } else {
1914                 host = bsg_job->shost;
1915                 vha = shost_priv(host);
1916         }
1917
1918         if (qla2x00_reset_active(vha)) {
1919                 ql_dbg(ql_dbg_user, vha, 0x709f,
1920                     "BSG: ISP abort active/needed -- cmd=%d.\n",
1921                     bsg_job->request->msgcode);
1922                 bsg_job->reply->result = (DID_ERROR << 16);
1923                 bsg_job->job_done(bsg_job);
1924                 return -EBUSY;
1925         }
1926
1927         ql_dbg(ql_dbg_user, vha, 0x7000,
1928             "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
1929
1930         switch (bsg_job->request->msgcode) {
1931         case FC_BSG_RPT_ELS:
1932         case FC_BSG_HST_ELS_NOLOGIN:
1933                 ret = qla2x00_process_els(bsg_job);
1934                 break;
1935         case FC_BSG_HST_CT:
1936                 ret = qla2x00_process_ct(bsg_job);
1937                 break;
1938         case FC_BSG_HST_VENDOR:
1939                 ret = qla2x00_process_vendor_specific(bsg_job);
1940                 break;
1941         case FC_BSG_HST_ADD_RPORT:
1942         case FC_BSG_HST_DEL_RPORT:
1943         case FC_BSG_RPT_CT:
1944         default:
1945                 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1946                 bsg_job->reply->result = ret;
1947                 break;
1948         }
1949         return ret;
1950 }
1951
1952 int
1953 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1954 {
1955         scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1956         struct qla_hw_data *ha = vha->hw;
1957         srb_t *sp;
1958         int cnt, que;
1959         unsigned long flags;
1960         struct req_que *req;
1961
1962         /* find the bsg job from the active list of commands */
1963         spin_lock_irqsave(&ha->hardware_lock, flags);
1964         for (que = 0; que < ha->max_req_queues; que++) {
1965                 req = ha->req_q_map[que];
1966                 if (!req)
1967                         continue;
1968
1969                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1970                         sp = req->outstanding_cmds[cnt];
1971                         if (sp) {
1972                                 if (((sp->type == SRB_CT_CMD) ||
1973                                         (sp->type == SRB_ELS_CMD_HST))
1974                                         && (sp->u.bsg_job == bsg_job)) {
1975                                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1976                                         if (ha->isp_ops->abort_command(sp)) {
1977                                                 ql_log(ql_log_warn, vha, 0x7089,
1978                                                     "mbx abort_command "
1979                                                     "failed.\n");
1980                                                 bsg_job->req->errors =
1981                                                 bsg_job->reply->result = -EIO;
1982                                         } else {
1983                                                 ql_dbg(ql_dbg_user, vha, 0x708a,
1984                                                     "mbx abort_command "
1985                                                     "success.\n");
1986                                                 bsg_job->req->errors =
1987                                                 bsg_job->reply->result = 0;
1988                                         }
1989                                         spin_lock_irqsave(&ha->hardware_lock, flags);
1990                                         goto done;
1991                                 }
1992                         }
1993                 }
1994         }
1995         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1996         ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1997         bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1998         return 0;
1999
2000 done:
2001         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2002         if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2003                 kfree(sp->fcport);
2004         mempool_free(sp, ha->srb_mempool);
2005         return 0;
2006 }