240b07b0098a47829c2b782861cefe528c72996a
[linux-2.6-block.git] / drivers / scsi / qla2xxx / qla_bsg.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13
14 /* BSG support for ELS/CT pass through */
15 void
16 qla2x00_bsg_job_done(void *ptr, int res)
17 {
18         srb_t *sp = ptr;
19         struct bsg_job *bsg_job = sp->u.bsg_job;
20         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
21
22         bsg_reply->result = res;
23         bsg_job_done(bsg_job, bsg_reply->result,
24                        bsg_reply->reply_payload_rcv_len);
25         sp->free(sp);
26 }
27
28 void
29 qla2x00_bsg_sp_free(void *ptr)
30 {
31         srb_t *sp = ptr;
32         struct qla_hw_data *ha = sp->vha->hw;
33         struct bsg_job *bsg_job = sp->u.bsg_job;
34         struct fc_bsg_request *bsg_request = bsg_job->request;
35         struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
36
37         if (sp->type == SRB_FXIOCB_BCMD) {
38                 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
39                     &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
40
41                 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
42                         dma_unmap_sg(&ha->pdev->dev,
43                             bsg_job->request_payload.sg_list,
44                             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
45
46                 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
47                         dma_unmap_sg(&ha->pdev->dev,
48                             bsg_job->reply_payload.sg_list,
49                             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
50         } else {
51                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
52                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
53
54                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
55                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
56         }
57
58         if (sp->type == SRB_CT_CMD ||
59             sp->type == SRB_FXIOCB_BCMD ||
60             sp->type == SRB_ELS_CMD_HST)
61                 kfree(sp->fcport);
62         qla2x00_rel_sp(sp);
63 }
64
65 int
66 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
67         struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
68 {
69         int i, ret, num_valid;
70         uint8_t *bcode;
71         struct qla_fcp_prio_entry *pri_entry;
72         uint32_t *bcode_val_ptr, bcode_val;
73
74         ret = 1;
75         num_valid = 0;
76         bcode = (uint8_t *)pri_cfg;
77         bcode_val_ptr = (uint32_t *)pri_cfg;
78         bcode_val = (uint32_t)(*bcode_val_ptr);
79
80         if (bcode_val == 0xFFFFFFFF) {
81                 /* No FCP Priority config data in flash */
82                 ql_dbg(ql_dbg_user, vha, 0x7051,
83                     "No FCP Priority config data.\n");
84                 return 0;
85         }
86
87         if (memcmp(bcode, "HQOS", 4)) {
88                 /* Invalid FCP priority data header*/
89                 ql_dbg(ql_dbg_user, vha, 0x7052,
90                     "Invalid FCP Priority data header. bcode=0x%x.\n",
91                     bcode_val);
92                 return 0;
93         }
94         if (flag != 1)
95                 return ret;
96
97         pri_entry = &pri_cfg->entry[0];
98         for (i = 0; i < pri_cfg->num_entries; i++) {
99                 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
100                         num_valid++;
101                 pri_entry++;
102         }
103
104         if (num_valid == 0) {
105                 /* No valid FCP priority data entries */
106                 ql_dbg(ql_dbg_user, vha, 0x7053,
107                     "No valid FCP Priority data entries.\n");
108                 ret = 0;
109         } else {
110                 /* FCP priority data is valid */
111                 ql_dbg(ql_dbg_user, vha, 0x7054,
112                     "Valid FCP priority data. num entries = %d.\n",
113                     num_valid);
114         }
115
116         return ret;
117 }
118
119 static int
120 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
121 {
122         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
123         struct fc_bsg_request *bsg_request = bsg_job->request;
124         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
125         scsi_qla_host_t *vha = shost_priv(host);
126         struct qla_hw_data *ha = vha->hw;
127         int ret = 0;
128         uint32_t len;
129         uint32_t oper;
130
131         if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
132                 ret = -EINVAL;
133                 goto exit_fcp_prio_cfg;
134         }
135
136         /* Get the sub command */
137         oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
138
139         /* Only set config is allowed if config memory is not allocated */
140         if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
141                 ret = -EINVAL;
142                 goto exit_fcp_prio_cfg;
143         }
144         switch (oper) {
145         case QLFC_FCP_PRIO_DISABLE:
146                 if (ha->flags.fcp_prio_enabled) {
147                         ha->flags.fcp_prio_enabled = 0;
148                         ha->fcp_prio_cfg->attributes &=
149                                 ~FCP_PRIO_ATTR_ENABLE;
150                         qla24xx_update_all_fcp_prio(vha);
151                         bsg_reply->result = DID_OK;
152                 } else {
153                         ret = -EINVAL;
154                         bsg_reply->result = (DID_ERROR << 16);
155                         goto exit_fcp_prio_cfg;
156                 }
157                 break;
158
159         case QLFC_FCP_PRIO_ENABLE:
160                 if (!ha->flags.fcp_prio_enabled) {
161                         if (ha->fcp_prio_cfg) {
162                                 ha->flags.fcp_prio_enabled = 1;
163                                 ha->fcp_prio_cfg->attributes |=
164                                     FCP_PRIO_ATTR_ENABLE;
165                                 qla24xx_update_all_fcp_prio(vha);
166                                 bsg_reply->result = DID_OK;
167                         } else {
168                                 ret = -EINVAL;
169                                 bsg_reply->result = (DID_ERROR << 16);
170                                 goto exit_fcp_prio_cfg;
171                         }
172                 }
173                 break;
174
175         case QLFC_FCP_PRIO_GET_CONFIG:
176                 len = bsg_job->reply_payload.payload_len;
177                 if (!len || len > FCP_PRIO_CFG_SIZE) {
178                         ret = -EINVAL;
179                         bsg_reply->result = (DID_ERROR << 16);
180                         goto exit_fcp_prio_cfg;
181                 }
182
183                 bsg_reply->result = DID_OK;
184                 bsg_reply->reply_payload_rcv_len =
185                         sg_copy_from_buffer(
186                         bsg_job->reply_payload.sg_list,
187                         bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
188                         len);
189
190                 break;
191
192         case QLFC_FCP_PRIO_SET_CONFIG:
193                 len = bsg_job->request_payload.payload_len;
194                 if (!len || len > FCP_PRIO_CFG_SIZE) {
195                         bsg_reply->result = (DID_ERROR << 16);
196                         ret = -EINVAL;
197                         goto exit_fcp_prio_cfg;
198                 }
199
200                 if (!ha->fcp_prio_cfg) {
201                         ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
202                         if (!ha->fcp_prio_cfg) {
203                                 ql_log(ql_log_warn, vha, 0x7050,
204                                     "Unable to allocate memory for fcp prio "
205                                     "config data (%x).\n", FCP_PRIO_CFG_SIZE);
206                                 bsg_reply->result = (DID_ERROR << 16);
207                                 ret = -ENOMEM;
208                                 goto exit_fcp_prio_cfg;
209                         }
210                 }
211
212                 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
213                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
214                 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
215                         FCP_PRIO_CFG_SIZE);
216
217                 /* validate fcp priority data */
218
219                 if (!qla24xx_fcp_prio_cfg_valid(vha,
220                     (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
221                         bsg_reply->result = (DID_ERROR << 16);
222                         ret = -EINVAL;
223                         /* If buffer was invalidatic int
224                          * fcp_prio_cfg is of no use
225                          */
226                         vfree(ha->fcp_prio_cfg);
227                         ha->fcp_prio_cfg = NULL;
228                         goto exit_fcp_prio_cfg;
229                 }
230
231                 ha->flags.fcp_prio_enabled = 0;
232                 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
233                         ha->flags.fcp_prio_enabled = 1;
234                 qla24xx_update_all_fcp_prio(vha);
235                 bsg_reply->result = DID_OK;
236                 break;
237         default:
238                 ret = -EINVAL;
239                 break;
240         }
241 exit_fcp_prio_cfg:
242         if (!ret)
243                 bsg_job_done(bsg_job, bsg_reply->result,
244                                bsg_reply->reply_payload_rcv_len);
245         return ret;
246 }
247
248 static int
249 qla2x00_process_els(struct bsg_job *bsg_job)
250 {
251         struct fc_bsg_request *bsg_request = bsg_job->request;
252         struct fc_rport *rport;
253         fc_port_t *fcport = NULL;
254         struct Scsi_Host *host;
255         scsi_qla_host_t *vha;
256         struct qla_hw_data *ha;
257         srb_t *sp;
258         const char *type;
259         int req_sg_cnt, rsp_sg_cnt;
260         int rval =  (DRIVER_ERROR << 16);
261         uint16_t nextlid = 0;
262
263         if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
264                 rport = fc_bsg_to_rport(bsg_job);
265                 fcport = *(fc_port_t **) rport->dd_data;
266                 host = rport_to_shost(rport);
267                 vha = shost_priv(host);
268                 ha = vha->hw;
269                 type = "FC_BSG_RPT_ELS";
270         } else {
271                 host = fc_bsg_to_shost(bsg_job);
272                 vha = shost_priv(host);
273                 ha = vha->hw;
274                 type = "FC_BSG_HST_ELS_NOLOGIN";
275         }
276
277         if (!vha->flags.online) {
278                 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
279                 rval = -EIO;
280                 goto done;
281         }
282
283         /* pass through is supported only for ISP 4Gb or higher */
284         if (!IS_FWI2_CAPABLE(ha)) {
285                 ql_dbg(ql_dbg_user, vha, 0x7001,
286                     "ELS passthru not supported for ISP23xx based adapters.\n");
287                 rval = -EPERM;
288                 goto done;
289         }
290
291         /*  Multiple SG's are not supported for ELS requests */
292         if (bsg_job->request_payload.sg_cnt > 1 ||
293                 bsg_job->reply_payload.sg_cnt > 1) {
294                 ql_dbg(ql_dbg_user, vha, 0x7002,
295                     "Multiple SG's are not supported for ELS requests, "
296                     "request_sg_cnt=%x reply_sg_cnt=%x.\n",
297                     bsg_job->request_payload.sg_cnt,
298                     bsg_job->reply_payload.sg_cnt);
299                 rval = -EPERM;
300                 goto done;
301         }
302
303         /* ELS request for rport */
304         if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
305                 /* make sure the rport is logged in,
306                  * if not perform fabric login
307                  */
308                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
309                         ql_dbg(ql_dbg_user, vha, 0x7003,
310                             "Failed to login port %06X for ELS passthru.\n",
311                             fcport->d_id.b24);
312                         rval = -EIO;
313                         goto done;
314                 }
315         } else {
316                 /* Allocate a dummy fcport structure, since functions
317                  * preparing the IOCB and mailbox command retrieves port
318                  * specific information from fcport structure. For Host based
319                  * ELS commands there will be no fcport structure allocated
320                  */
321                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
322                 if (!fcport) {
323                         rval = -ENOMEM;
324                         goto done;
325                 }
326
327                 /* Initialize all required  fields of fcport */
328                 fcport->vha = vha;
329                 fcport->d_id.b.al_pa =
330                         bsg_request->rqst_data.h_els.port_id[0];
331                 fcport->d_id.b.area =
332                         bsg_request->rqst_data.h_els.port_id[1];
333                 fcport->d_id.b.domain =
334                         bsg_request->rqst_data.h_els.port_id[2];
335                 fcport->loop_id =
336                         (fcport->d_id.b.al_pa == 0xFD) ?
337                         NPH_FABRIC_CONTROLLER : NPH_F_PORT;
338         }
339
340         req_sg_cnt =
341                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
342                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
343         if (!req_sg_cnt) {
344                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
345                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
346                 rval = -ENOMEM;
347                 goto done_free_fcport;
348         }
349
350         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
351                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
352         if (!rsp_sg_cnt) {
353                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
354                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
355                 rval = -ENOMEM;
356                 goto done_free_fcport;
357         }
358
359         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
360                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
361                 ql_log(ql_log_warn, vha, 0x7008,
362                     "dma mapping resulted in different sg counts, "
363                     "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
364                     "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
365                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
366                 rval = -EAGAIN;
367                 goto done_unmap_sg;
368         }
369
370         /* Alloc SRB structure */
371         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
372         if (!sp) {
373                 rval = -ENOMEM;
374                 goto done_unmap_sg;
375         }
376
377         sp->type =
378                 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
379                  SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
380         sp->name =
381                 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
382                  "bsg_els_rpt" : "bsg_els_hst");
383         sp->u.bsg_job = bsg_job;
384         sp->free = qla2x00_bsg_sp_free;
385         sp->done = qla2x00_bsg_job_done;
386
387         ql_dbg(ql_dbg_user, vha, 0x700a,
388             "bsg rqst type: %s els type: %x - loop-id=%x "
389             "portid=%-2x%02x%02x.\n", type,
390             bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
391             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
392
393         rval = qla2x00_start_sp(sp);
394         if (rval != QLA_SUCCESS) {
395                 ql_log(ql_log_warn, vha, 0x700e,
396                     "qla2x00_start_sp failed = %d\n", rval);
397                 qla2x00_rel_sp(sp);
398                 rval = -EIO;
399                 goto done_unmap_sg;
400         }
401         return rval;
402
403 done_unmap_sg:
404         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
405                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
406         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
407                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
408         goto done_free_fcport;
409
410 done_free_fcport:
411         if (bsg_request->msgcode == FC_BSG_RPT_ELS)
412                 kfree(fcport);
413 done:
414         return rval;
415 }
416
417 static inline uint16_t
418 qla24xx_calc_ct_iocbs(uint16_t dsds)
419 {
420         uint16_t iocbs;
421
422         iocbs = 1;
423         if (dsds > 2) {
424                 iocbs += (dsds - 2) / 5;
425                 if ((dsds - 2) % 5)
426                         iocbs++;
427         }
428         return iocbs;
429 }
430
431 static int
432 qla2x00_process_ct(struct bsg_job *bsg_job)
433 {
434         srb_t *sp;
435         struct fc_bsg_request *bsg_request = bsg_job->request;
436         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
437         scsi_qla_host_t *vha = shost_priv(host);
438         struct qla_hw_data *ha = vha->hw;
439         int rval = (DRIVER_ERROR << 16);
440         int req_sg_cnt, rsp_sg_cnt;
441         uint16_t loop_id;
442         struct fc_port *fcport;
443         char  *type = "FC_BSG_HST_CT";
444
445         req_sg_cnt =
446                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
447                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
448         if (!req_sg_cnt) {
449                 ql_log(ql_log_warn, vha, 0x700f,
450                     "dma_map_sg return %d for request\n", req_sg_cnt);
451                 rval = -ENOMEM;
452                 goto done;
453         }
454
455         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
456                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
457         if (!rsp_sg_cnt) {
458                 ql_log(ql_log_warn, vha, 0x7010,
459                     "dma_map_sg return %d for reply\n", rsp_sg_cnt);
460                 rval = -ENOMEM;
461                 goto done;
462         }
463
464         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
465             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
466                 ql_log(ql_log_warn, vha, 0x7011,
467                     "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
468                     "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
469                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
470                 rval = -EAGAIN;
471                 goto done_unmap_sg;
472         }
473
474         if (!vha->flags.online) {
475                 ql_log(ql_log_warn, vha, 0x7012,
476                     "Host is not online.\n");
477                 rval = -EIO;
478                 goto done_unmap_sg;
479         }
480
481         loop_id =
482                 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
483                         >> 24;
484         switch (loop_id) {
485         case 0xFC:
486                 loop_id = cpu_to_le16(NPH_SNS);
487                 break;
488         case 0xFA:
489                 loop_id = vha->mgmt_svr_loop_id;
490                 break;
491         default:
492                 ql_dbg(ql_dbg_user, vha, 0x7013,
493                     "Unknown loop id: %x.\n", loop_id);
494                 rval = -EINVAL;
495                 goto done_unmap_sg;
496         }
497
498         /* Allocate a dummy fcport structure, since functions preparing the
499          * IOCB and mailbox command retrieves port specific information
500          * from fcport structure. For Host based ELS commands there will be
501          * no fcport structure allocated
502          */
503         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
504         if (!fcport) {
505                 ql_log(ql_log_warn, vha, 0x7014,
506                     "Failed to allocate fcport.\n");
507                 rval = -ENOMEM;
508                 goto done_unmap_sg;
509         }
510
511         /* Initialize all required  fields of fcport */
512         fcport->vha = vha;
513         fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
514         fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
515         fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
516         fcport->loop_id = loop_id;
517
518         /* Alloc SRB structure */
519         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
520         if (!sp) {
521                 ql_log(ql_log_warn, vha, 0x7015,
522                     "qla2x00_get_sp failed.\n");
523                 rval = -ENOMEM;
524                 goto done_free_fcport;
525         }
526
527         sp->type = SRB_CT_CMD;
528         sp->name = "bsg_ct";
529         sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
530         sp->u.bsg_job = bsg_job;
531         sp->free = qla2x00_bsg_sp_free;
532         sp->done = qla2x00_bsg_job_done;
533
534         ql_dbg(ql_dbg_user, vha, 0x7016,
535             "bsg rqst type: %s else type: %x - "
536             "loop-id=%x portid=%02x%02x%02x.\n", type,
537             (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
538             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
539             fcport->d_id.b.al_pa);
540
541         rval = qla2x00_start_sp(sp);
542         if (rval != QLA_SUCCESS) {
543                 ql_log(ql_log_warn, vha, 0x7017,
544                     "qla2x00_start_sp failed=%d.\n", rval);
545                 qla2x00_rel_sp(sp);
546                 rval = -EIO;
547                 goto done_free_fcport;
548         }
549         return rval;
550
551 done_free_fcport:
552         kfree(fcport);
553 done_unmap_sg:
554         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
555                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
556         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
557                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
558 done:
559         return rval;
560 }
561
562 /* Disable loopback mode */
563 static inline int
564 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
565                             int wait, int wait2)
566 {
567         int ret = 0;
568         int rval = 0;
569         uint16_t new_config[4];
570         struct qla_hw_data *ha = vha->hw;
571
572         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
573                 goto done_reset_internal;
574
575         memset(new_config, 0 , sizeof(new_config));
576         if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
577             ENABLE_INTERNAL_LOOPBACK ||
578             (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
579             ENABLE_EXTERNAL_LOOPBACK) {
580                 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
581                 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
582                     (new_config[0] & INTERNAL_LOOPBACK_MASK));
583                 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
584
585                 ha->notify_dcbx_comp = wait;
586                 ha->notify_lb_portup_comp = wait2;
587
588                 ret = qla81xx_set_port_config(vha, new_config);
589                 if (ret != QLA_SUCCESS) {
590                         ql_log(ql_log_warn, vha, 0x7025,
591                             "Set port config failed.\n");
592                         ha->notify_dcbx_comp = 0;
593                         ha->notify_lb_portup_comp = 0;
594                         rval = -EINVAL;
595                         goto done_reset_internal;
596                 }
597
598                 /* Wait for DCBX complete event */
599                 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
600                         (DCBX_COMP_TIMEOUT * HZ))) {
601                         ql_dbg(ql_dbg_user, vha, 0x7026,
602                             "DCBX completion not received.\n");
603                         ha->notify_dcbx_comp = 0;
604                         ha->notify_lb_portup_comp = 0;
605                         rval = -EINVAL;
606                         goto done_reset_internal;
607                 } else
608                         ql_dbg(ql_dbg_user, vha, 0x7027,
609                             "DCBX completion received.\n");
610
611                 if (wait2 &&
612                     !wait_for_completion_timeout(&ha->lb_portup_comp,
613                     (LB_PORTUP_COMP_TIMEOUT * HZ))) {
614                         ql_dbg(ql_dbg_user, vha, 0x70c5,
615                             "Port up completion not received.\n");
616                         ha->notify_lb_portup_comp = 0;
617                         rval = -EINVAL;
618                         goto done_reset_internal;
619                 } else
620                         ql_dbg(ql_dbg_user, vha, 0x70c6,
621                             "Port up completion received.\n");
622
623                 ha->notify_dcbx_comp = 0;
624                 ha->notify_lb_portup_comp = 0;
625         }
626 done_reset_internal:
627         return rval;
628 }
629
630 /*
631  * Set the port configuration to enable the internal or external loopback
632  * depending on the loopback mode.
633  */
634 static inline int
635 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
636         uint16_t *new_config, uint16_t mode)
637 {
638         int ret = 0;
639         int rval = 0;
640         unsigned long rem_tmo = 0, current_tmo = 0;
641         struct qla_hw_data *ha = vha->hw;
642
643         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
644                 goto done_set_internal;
645
646         if (mode == INTERNAL_LOOPBACK)
647                 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
648         else if (mode == EXTERNAL_LOOPBACK)
649                 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
650         ql_dbg(ql_dbg_user, vha, 0x70be,
651              "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
652
653         memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
654
655         ha->notify_dcbx_comp = 1;
656         ret = qla81xx_set_port_config(vha, new_config);
657         if (ret != QLA_SUCCESS) {
658                 ql_log(ql_log_warn, vha, 0x7021,
659                     "set port config failed.\n");
660                 ha->notify_dcbx_comp = 0;
661                 rval = -EINVAL;
662                 goto done_set_internal;
663         }
664
665         /* Wait for DCBX complete event */
666         current_tmo = DCBX_COMP_TIMEOUT * HZ;
667         while (1) {
668                 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
669                     current_tmo);
670                 if (!ha->idc_extend_tmo || rem_tmo) {
671                         ha->idc_extend_tmo = 0;
672                         break;
673                 }
674                 current_tmo = ha->idc_extend_tmo * HZ;
675                 ha->idc_extend_tmo = 0;
676         }
677
678         if (!rem_tmo) {
679                 ql_dbg(ql_dbg_user, vha, 0x7022,
680                     "DCBX completion not received.\n");
681                 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
682                 /*
683                  * If the reset of the loopback mode doesn't work take a FCoE
684                  * dump and reset the chip.
685                  */
686                 if (ret) {
687                         ha->isp_ops->fw_dump(vha, 0);
688                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
689                 }
690                 rval = -EINVAL;
691         } else {
692                 if (ha->flags.idc_compl_status) {
693                         ql_dbg(ql_dbg_user, vha, 0x70c3,
694                             "Bad status in IDC Completion AEN\n");
695                         rval = -EINVAL;
696                         ha->flags.idc_compl_status = 0;
697                 } else
698                         ql_dbg(ql_dbg_user, vha, 0x7023,
699                             "DCBX completion received.\n");
700         }
701
702         ha->notify_dcbx_comp = 0;
703         ha->idc_extend_tmo = 0;
704
705 done_set_internal:
706         return rval;
707 }
708
709 static int
710 qla2x00_process_loopback(struct bsg_job *bsg_job)
711 {
712         struct fc_bsg_request *bsg_request = bsg_job->request;
713         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
714         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
715         scsi_qla_host_t *vha = shost_priv(host);
716         struct qla_hw_data *ha = vha->hw;
717         int rval;
718         uint8_t command_sent;
719         char *type;
720         struct msg_echo_lb elreq;
721         uint16_t response[MAILBOX_REGISTER_COUNT];
722         uint16_t config[4], new_config[4];
723         uint8_t *fw_sts_ptr;
724         uint8_t *req_data = NULL;
725         dma_addr_t req_data_dma;
726         uint32_t req_data_len;
727         uint8_t *rsp_data = NULL;
728         dma_addr_t rsp_data_dma;
729         uint32_t rsp_data_len;
730
731         if (!vha->flags.online) {
732                 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
733                 return -EIO;
734         }
735
736         memset(&elreq, 0, sizeof(elreq));
737
738         elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
739                 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
740                 DMA_TO_DEVICE);
741
742         if (!elreq.req_sg_cnt) {
743                 ql_log(ql_log_warn, vha, 0x701a,
744                     "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
745                 return -ENOMEM;
746         }
747
748         elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
749                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
750                 DMA_FROM_DEVICE);
751
752         if (!elreq.rsp_sg_cnt) {
753                 ql_log(ql_log_warn, vha, 0x701b,
754                     "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
755                 rval = -ENOMEM;
756                 goto done_unmap_req_sg;
757         }
758
759         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
760                 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
761                 ql_log(ql_log_warn, vha, 0x701c,
762                     "dma mapping resulted in different sg counts, "
763                     "request_sg_cnt: %x dma_request_sg_cnt: %x "
764                     "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
765                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
766                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
767                 rval = -EAGAIN;
768                 goto done_unmap_sg;
769         }
770         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
771         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
772                 &req_data_dma, GFP_KERNEL);
773         if (!req_data) {
774                 ql_log(ql_log_warn, vha, 0x701d,
775                     "dma alloc failed for req_data.\n");
776                 rval = -ENOMEM;
777                 goto done_unmap_sg;
778         }
779
780         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
781                 &rsp_data_dma, GFP_KERNEL);
782         if (!rsp_data) {
783                 ql_log(ql_log_warn, vha, 0x7004,
784                     "dma alloc failed for rsp_data.\n");
785                 rval = -ENOMEM;
786                 goto done_free_dma_req;
787         }
788
789         /* Copy the request buffer in req_data now */
790         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
791                 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
792
793         elreq.send_dma = req_data_dma;
794         elreq.rcv_dma = rsp_data_dma;
795         elreq.transfer_size = req_data_len;
796
797         elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
798         elreq.iteration_count =
799             bsg_request->rqst_data.h_vendor.vendor_cmd[2];
800
801         if (atomic_read(&vha->loop_state) == LOOP_READY &&
802             (ha->current_topology == ISP_CFG_F ||
803             (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
804              req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
805             elreq.options == EXTERNAL_LOOPBACK) {
806                 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
807                 ql_dbg(ql_dbg_user, vha, 0x701e,
808                     "BSG request type: %s.\n", type);
809                 command_sent = INT_DEF_LB_ECHO_CMD;
810                 rval = qla2x00_echo_test(vha, &elreq, response);
811         } else {
812                 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
813                         memset(config, 0, sizeof(config));
814                         memset(new_config, 0, sizeof(new_config));
815
816                         if (qla81xx_get_port_config(vha, config)) {
817                                 ql_log(ql_log_warn, vha, 0x701f,
818                                     "Get port config failed.\n");
819                                 rval = -EPERM;
820                                 goto done_free_dma_rsp;
821                         }
822
823                         if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
824                                 ql_dbg(ql_dbg_user, vha, 0x70c4,
825                                     "Loopback operation already in "
826                                     "progress.\n");
827                                 rval = -EAGAIN;
828                                 goto done_free_dma_rsp;
829                         }
830
831                         ql_dbg(ql_dbg_user, vha, 0x70c0,
832                             "elreq.options=%04x\n", elreq.options);
833
834                         if (elreq.options == EXTERNAL_LOOPBACK)
835                                 if (IS_QLA8031(ha) || IS_QLA8044(ha))
836                                         rval = qla81xx_set_loopback_mode(vha,
837                                             config, new_config, elreq.options);
838                                 else
839                                         rval = qla81xx_reset_loopback_mode(vha,
840                                             config, 1, 0);
841                         else
842                                 rval = qla81xx_set_loopback_mode(vha, config,
843                                     new_config, elreq.options);
844
845                         if (rval) {
846                                 rval = -EPERM;
847                                 goto done_free_dma_rsp;
848                         }
849
850                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
851                         ql_dbg(ql_dbg_user, vha, 0x7028,
852                             "BSG request type: %s.\n", type);
853
854                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
855                         rval = qla2x00_loopback_test(vha, &elreq, response);
856
857                         if (response[0] == MBS_COMMAND_ERROR &&
858                                         response[1] == MBS_LB_RESET) {
859                                 ql_log(ql_log_warn, vha, 0x7029,
860                                     "MBX command error, Aborting ISP.\n");
861                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
862                                 qla2xxx_wake_dpc(vha);
863                                 qla2x00_wait_for_chip_reset(vha);
864                                 /* Also reset the MPI */
865                                 if (IS_QLA81XX(ha)) {
866                                         if (qla81xx_restart_mpi_firmware(vha) !=
867                                             QLA_SUCCESS) {
868                                                 ql_log(ql_log_warn, vha, 0x702a,
869                                                     "MPI reset failed.\n");
870                                         }
871                                 }
872
873                                 rval = -EIO;
874                                 goto done_free_dma_rsp;
875                         }
876
877                         if (new_config[0]) {
878                                 int ret;
879
880                                 /* Revert back to original port config
881                                  * Also clear internal loopback
882                                  */
883                                 ret = qla81xx_reset_loopback_mode(vha,
884                                     new_config, 0, 1);
885                                 if (ret) {
886                                         /*
887                                          * If the reset of the loopback mode
888                                          * doesn't work take FCoE dump and then
889                                          * reset the chip.
890                                          */
891                                         ha->isp_ops->fw_dump(vha, 0);
892                                         set_bit(ISP_ABORT_NEEDED,
893                                             &vha->dpc_flags);
894                                 }
895
896                         }
897
898                 } else {
899                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
900                         ql_dbg(ql_dbg_user, vha, 0x702b,
901                             "BSG request type: %s.\n", type);
902                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
903                         rval = qla2x00_loopback_test(vha, &elreq, response);
904                 }
905         }
906
907         if (rval) {
908                 ql_log(ql_log_warn, vha, 0x702c,
909                     "Vendor request %s failed.\n", type);
910
911                 rval = 0;
912                 bsg_reply->result = (DID_ERROR << 16);
913                 bsg_reply->reply_payload_rcv_len = 0;
914         } else {
915                 ql_dbg(ql_dbg_user, vha, 0x702d,
916                     "Vendor request %s completed.\n", type);
917                 bsg_reply->result = (DID_OK << 16);
918                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
919                         bsg_job->reply_payload.sg_cnt, rsp_data,
920                         rsp_data_len);
921         }
922
923         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
924             sizeof(response) + sizeof(uint8_t);
925         fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
926         memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
927                         sizeof(response));
928         fw_sts_ptr += sizeof(response);
929         *fw_sts_ptr = command_sent;
930
931 done_free_dma_rsp:
932         dma_free_coherent(&ha->pdev->dev, rsp_data_len,
933                 rsp_data, rsp_data_dma);
934 done_free_dma_req:
935         dma_free_coherent(&ha->pdev->dev, req_data_len,
936                 req_data, req_data_dma);
937 done_unmap_sg:
938         dma_unmap_sg(&ha->pdev->dev,
939             bsg_job->reply_payload.sg_list,
940             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
941 done_unmap_req_sg:
942         dma_unmap_sg(&ha->pdev->dev,
943             bsg_job->request_payload.sg_list,
944             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
945         if (!rval)
946                 bsg_job_done(bsg_job, bsg_reply->result,
947                                bsg_reply->reply_payload_rcv_len);
948         return rval;
949 }
950
951 static int
952 qla84xx_reset(struct bsg_job *bsg_job)
953 {
954         struct fc_bsg_request *bsg_request = bsg_job->request;
955         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
956         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
957         scsi_qla_host_t *vha = shost_priv(host);
958         struct qla_hw_data *ha = vha->hw;
959         int rval = 0;
960         uint32_t flag;
961
962         if (!IS_QLA84XX(ha)) {
963                 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
964                 return -EINVAL;
965         }
966
967         flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
968
969         rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
970
971         if (rval) {
972                 ql_log(ql_log_warn, vha, 0x7030,
973                     "Vendor request 84xx reset failed.\n");
974                 rval = (DID_ERROR << 16);
975
976         } else {
977                 ql_dbg(ql_dbg_user, vha, 0x7031,
978                     "Vendor request 84xx reset completed.\n");
979                 bsg_reply->result = DID_OK;
980                 bsg_job_done(bsg_job, bsg_reply->result,
981                                bsg_reply->reply_payload_rcv_len);
982         }
983
984         return rval;
985 }
986
987 static int
988 qla84xx_updatefw(struct bsg_job *bsg_job)
989 {
990         struct fc_bsg_request *bsg_request = bsg_job->request;
991         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
992         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
993         scsi_qla_host_t *vha = shost_priv(host);
994         struct qla_hw_data *ha = vha->hw;
995         struct verify_chip_entry_84xx *mn = NULL;
996         dma_addr_t mn_dma, fw_dma;
997         void *fw_buf = NULL;
998         int rval = 0;
999         uint32_t sg_cnt;
1000         uint32_t data_len;
1001         uint16_t options;
1002         uint32_t flag;
1003         uint32_t fw_ver;
1004
1005         if (!IS_QLA84XX(ha)) {
1006                 ql_dbg(ql_dbg_user, vha, 0x7032,
1007                     "Not 84xx, exiting.\n");
1008                 return -EINVAL;
1009         }
1010
1011         sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1012                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1013         if (!sg_cnt) {
1014                 ql_log(ql_log_warn, vha, 0x7033,
1015                     "dma_map_sg returned %d for request.\n", sg_cnt);
1016                 return -ENOMEM;
1017         }
1018
1019         if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1020                 ql_log(ql_log_warn, vha, 0x7034,
1021                     "DMA mapping resulted in different sg counts, "
1022                     "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1023                     bsg_job->request_payload.sg_cnt, sg_cnt);
1024                 rval = -EAGAIN;
1025                 goto done_unmap_sg;
1026         }
1027
1028         data_len = bsg_job->request_payload.payload_len;
1029         fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1030                 &fw_dma, GFP_KERNEL);
1031         if (!fw_buf) {
1032                 ql_log(ql_log_warn, vha, 0x7035,
1033                     "DMA alloc failed for fw_buf.\n");
1034                 rval = -ENOMEM;
1035                 goto done_unmap_sg;
1036         }
1037
1038         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1039                 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1040
1041         mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1042         if (!mn) {
1043                 ql_log(ql_log_warn, vha, 0x7036,
1044                     "DMA alloc failed for fw buffer.\n");
1045                 rval = -ENOMEM;
1046                 goto done_free_fw_buf;
1047         }
1048
1049         flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1050         fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1051
1052         mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1053         mn->entry_count = 1;
1054
1055         options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1056         if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1057                 options |= VCO_DIAG_FW;
1058
1059         mn->options = cpu_to_le16(options);
1060         mn->fw_ver =  cpu_to_le32(fw_ver);
1061         mn->fw_size =  cpu_to_le32(data_len);
1062         mn->fw_seq_size =  cpu_to_le32(data_len);
1063         put_unaligned_le64(fw_dma, &mn->dsd.address);
1064         mn->dsd.length = cpu_to_le32(data_len);
1065         mn->data_seg_cnt = cpu_to_le16(1);
1066
1067         rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1068
1069         if (rval) {
1070                 ql_log(ql_log_warn, vha, 0x7037,
1071                     "Vendor request 84xx updatefw failed.\n");
1072
1073                 rval = (DID_ERROR << 16);
1074         } else {
1075                 ql_dbg(ql_dbg_user, vha, 0x7038,
1076                     "Vendor request 84xx updatefw completed.\n");
1077
1078                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1079                 bsg_reply->result = DID_OK;
1080         }
1081
1082         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1083
1084 done_free_fw_buf:
1085         dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1086
1087 done_unmap_sg:
1088         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1089                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1090
1091         if (!rval)
1092                 bsg_job_done(bsg_job, bsg_reply->result,
1093                                bsg_reply->reply_payload_rcv_len);
1094         return rval;
1095 }
1096
1097 static int
1098 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1099 {
1100         struct fc_bsg_request *bsg_request = bsg_job->request;
1101         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1102         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1103         scsi_qla_host_t *vha = shost_priv(host);
1104         struct qla_hw_data *ha = vha->hw;
1105         struct access_chip_84xx *mn = NULL;
1106         dma_addr_t mn_dma, mgmt_dma;
1107         void *mgmt_b = NULL;
1108         int rval = 0;
1109         struct qla_bsg_a84_mgmt *ql84_mgmt;
1110         uint32_t sg_cnt;
1111         uint32_t data_len = 0;
1112         uint32_t dma_direction = DMA_NONE;
1113
1114         if (!IS_QLA84XX(ha)) {
1115                 ql_log(ql_log_warn, vha, 0x703a,
1116                     "Not 84xx, exiting.\n");
1117                 return -EINVAL;
1118         }
1119
1120         mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1121         if (!mn) {
1122                 ql_log(ql_log_warn, vha, 0x703c,
1123                     "DMA alloc failed for fw buffer.\n");
1124                 return -ENOMEM;
1125         }
1126
1127         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1128         mn->entry_count = 1;
1129         ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1130         switch (ql84_mgmt->mgmt.cmd) {
1131         case QLA84_MGMT_READ_MEM:
1132         case QLA84_MGMT_GET_INFO:
1133                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1134                         bsg_job->reply_payload.sg_list,
1135                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1136                 if (!sg_cnt) {
1137                         ql_log(ql_log_warn, vha, 0x703d,
1138                             "dma_map_sg returned %d for reply.\n", sg_cnt);
1139                         rval = -ENOMEM;
1140                         goto exit_mgmt;
1141                 }
1142
1143                 dma_direction = DMA_FROM_DEVICE;
1144
1145                 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1146                         ql_log(ql_log_warn, vha, 0x703e,
1147                             "DMA mapping resulted in different sg counts, "
1148                             "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1149                             bsg_job->reply_payload.sg_cnt, sg_cnt);
1150                         rval = -EAGAIN;
1151                         goto done_unmap_sg;
1152                 }
1153
1154                 data_len = bsg_job->reply_payload.payload_len;
1155
1156                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1157                     &mgmt_dma, GFP_KERNEL);
1158                 if (!mgmt_b) {
1159                         ql_log(ql_log_warn, vha, 0x703f,
1160                             "DMA alloc failed for mgmt_b.\n");
1161                         rval = -ENOMEM;
1162                         goto done_unmap_sg;
1163                 }
1164
1165                 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1166                         mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1167                         mn->parameter1 =
1168                                 cpu_to_le32(
1169                                 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1170
1171                 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1172                         mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1173                         mn->parameter1 =
1174                                 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1175
1176                         mn->parameter2 =
1177                                 cpu_to_le32(
1178                                 ql84_mgmt->mgmt.mgmtp.u.info.context);
1179                 }
1180                 break;
1181
1182         case QLA84_MGMT_WRITE_MEM:
1183                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1184                         bsg_job->request_payload.sg_list,
1185                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1186
1187                 if (!sg_cnt) {
1188                         ql_log(ql_log_warn, vha, 0x7040,
1189                             "dma_map_sg returned %d.\n", sg_cnt);
1190                         rval = -ENOMEM;
1191                         goto exit_mgmt;
1192                 }
1193
1194                 dma_direction = DMA_TO_DEVICE;
1195
1196                 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1197                         ql_log(ql_log_warn, vha, 0x7041,
1198                             "DMA mapping resulted in different sg counts, "
1199                             "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1200                             bsg_job->request_payload.sg_cnt, sg_cnt);
1201                         rval = -EAGAIN;
1202                         goto done_unmap_sg;
1203                 }
1204
1205                 data_len = bsg_job->request_payload.payload_len;
1206                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1207                         &mgmt_dma, GFP_KERNEL);
1208                 if (!mgmt_b) {
1209                         ql_log(ql_log_warn, vha, 0x7042,
1210                             "DMA alloc failed for mgmt_b.\n");
1211                         rval = -ENOMEM;
1212                         goto done_unmap_sg;
1213                 }
1214
1215                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1216                         bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1217
1218                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1219                 mn->parameter1 =
1220                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1221                 break;
1222
1223         case QLA84_MGMT_CHNG_CONFIG:
1224                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1225                 mn->parameter1 =
1226                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1227
1228                 mn->parameter2 =
1229                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1230
1231                 mn->parameter3 =
1232                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1233                 break;
1234
1235         default:
1236                 rval = -EIO;
1237                 goto exit_mgmt;
1238         }
1239
1240         if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1241                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1242                 mn->dseg_count = cpu_to_le16(1);
1243                 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1244                 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1245         }
1246
1247         rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1248
1249         if (rval) {
1250                 ql_log(ql_log_warn, vha, 0x7043,
1251                     "Vendor request 84xx mgmt failed.\n");
1252
1253                 rval = (DID_ERROR << 16);
1254
1255         } else {
1256                 ql_dbg(ql_dbg_user, vha, 0x7044,
1257                     "Vendor request 84xx mgmt completed.\n");
1258
1259                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1260                 bsg_reply->result = DID_OK;
1261
1262                 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1263                         (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1264                         bsg_reply->reply_payload_rcv_len =
1265                                 bsg_job->reply_payload.payload_len;
1266
1267                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1268                                 bsg_job->reply_payload.sg_cnt, mgmt_b,
1269                                 data_len);
1270                 }
1271         }
1272
1273 done_unmap_sg:
1274         if (mgmt_b)
1275                 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1276
1277         if (dma_direction == DMA_TO_DEVICE)
1278                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1279                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1280         else if (dma_direction == DMA_FROM_DEVICE)
1281                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1282                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1283
1284 exit_mgmt:
1285         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1286
1287         if (!rval)
1288                 bsg_job_done(bsg_job, bsg_reply->result,
1289                                bsg_reply->reply_payload_rcv_len);
1290         return rval;
1291 }
1292
1293 static int
1294 qla24xx_iidma(struct bsg_job *bsg_job)
1295 {
1296         struct fc_bsg_request *bsg_request = bsg_job->request;
1297         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1298         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1299         scsi_qla_host_t *vha = shost_priv(host);
1300         int rval = 0;
1301         struct qla_port_param *port_param = NULL;
1302         fc_port_t *fcport = NULL;
1303         int found = 0;
1304         uint16_t mb[MAILBOX_REGISTER_COUNT];
1305         uint8_t *rsp_ptr = NULL;
1306
1307         if (!IS_IIDMA_CAPABLE(vha->hw)) {
1308                 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1309                 return -EINVAL;
1310         }
1311
1312         port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1313         if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1314                 ql_log(ql_log_warn, vha, 0x7048,
1315                     "Invalid destination type.\n");
1316                 return -EINVAL;
1317         }
1318
1319         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1320                 if (fcport->port_type != FCT_TARGET)
1321                         continue;
1322
1323                 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1324                         fcport->port_name, sizeof(fcport->port_name)))
1325                         continue;
1326
1327                 found = 1;
1328                 break;
1329         }
1330
1331         if (!found) {
1332                 ql_log(ql_log_warn, vha, 0x7049,
1333                     "Failed to find port.\n");
1334                 return -EINVAL;
1335         }
1336
1337         if (atomic_read(&fcport->state) != FCS_ONLINE) {
1338                 ql_log(ql_log_warn, vha, 0x704a,
1339                     "Port is not online.\n");
1340                 return -EINVAL;
1341         }
1342
1343         if (fcport->flags & FCF_LOGIN_NEEDED) {
1344                 ql_log(ql_log_warn, vha, 0x704b,
1345                     "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1346                 return -EINVAL;
1347         }
1348
1349         if (port_param->mode)
1350                 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1351                         port_param->speed, mb);
1352         else
1353                 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1354                         &port_param->speed, mb);
1355
1356         if (rval) {
1357                 ql_log(ql_log_warn, vha, 0x704c,
1358                     "iiDMA cmd failed for %8phN -- "
1359                     "%04x %x %04x %04x.\n", fcport->port_name,
1360                     rval, fcport->fp_speed, mb[0], mb[1]);
1361                 rval = (DID_ERROR << 16);
1362         } else {
1363                 if (!port_param->mode) {
1364                         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1365                                 sizeof(struct qla_port_param);
1366
1367                         rsp_ptr = ((uint8_t *)bsg_reply) +
1368                                 sizeof(struct fc_bsg_reply);
1369
1370                         memcpy(rsp_ptr, port_param,
1371                                 sizeof(struct qla_port_param));
1372                 }
1373
1374                 bsg_reply->result = DID_OK;
1375                 bsg_job_done(bsg_job, bsg_reply->result,
1376                                bsg_reply->reply_payload_rcv_len);
1377         }
1378
1379         return rval;
1380 }
1381
1382 static int
1383 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1384         uint8_t is_update)
1385 {
1386         struct fc_bsg_request *bsg_request = bsg_job->request;
1387         uint32_t start = 0;
1388         int valid = 0;
1389         struct qla_hw_data *ha = vha->hw;
1390
1391         if (unlikely(pci_channel_offline(ha->pdev)))
1392                 return -EINVAL;
1393
1394         start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1395         if (start > ha->optrom_size) {
1396                 ql_log(ql_log_warn, vha, 0x7055,
1397                     "start %d > optrom_size %d.\n", start, ha->optrom_size);
1398                 return -EINVAL;
1399         }
1400
1401         if (ha->optrom_state != QLA_SWAITING) {
1402                 ql_log(ql_log_info, vha, 0x7056,
1403                     "optrom_state %d.\n", ha->optrom_state);
1404                 return -EBUSY;
1405         }
1406
1407         ha->optrom_region_start = start;
1408         ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1409         if (is_update) {
1410                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1411                         valid = 1;
1412                 else if (start == (ha->flt_region_boot * 4) ||
1413                     start == (ha->flt_region_fw * 4))
1414                         valid = 1;
1415                 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1416                     IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1417                     IS_QLA28XX(ha))
1418                         valid = 1;
1419                 if (!valid) {
1420                         ql_log(ql_log_warn, vha, 0x7058,
1421                             "Invalid start region 0x%x/0x%x.\n", start,
1422                             bsg_job->request_payload.payload_len);
1423                         return -EINVAL;
1424                 }
1425
1426                 ha->optrom_region_size = start +
1427                     bsg_job->request_payload.payload_len > ha->optrom_size ?
1428                     ha->optrom_size - start :
1429                     bsg_job->request_payload.payload_len;
1430                 ha->optrom_state = QLA_SWRITING;
1431         } else {
1432                 ha->optrom_region_size = start +
1433                     bsg_job->reply_payload.payload_len > ha->optrom_size ?
1434                     ha->optrom_size - start :
1435                     bsg_job->reply_payload.payload_len;
1436                 ha->optrom_state = QLA_SREADING;
1437         }
1438
1439         ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1440         if (!ha->optrom_buffer) {
1441                 ql_log(ql_log_warn, vha, 0x7059,
1442                     "Read: Unable to allocate memory for optrom retrieval "
1443                     "(%x)\n", ha->optrom_region_size);
1444
1445                 ha->optrom_state = QLA_SWAITING;
1446                 return -ENOMEM;
1447         }
1448
1449         return 0;
1450 }
1451
1452 static int
1453 qla2x00_read_optrom(struct bsg_job *bsg_job)
1454 {
1455         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1456         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1457         scsi_qla_host_t *vha = shost_priv(host);
1458         struct qla_hw_data *ha = vha->hw;
1459         int rval = 0;
1460
1461         if (ha->flags.nic_core_reset_hdlr_active)
1462                 return -EBUSY;
1463
1464         mutex_lock(&ha->optrom_mutex);
1465         rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1466         if (rval) {
1467                 mutex_unlock(&ha->optrom_mutex);
1468                 return rval;
1469         }
1470
1471         ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1472             ha->optrom_region_start, ha->optrom_region_size);
1473
1474         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1475             bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1476             ha->optrom_region_size);
1477
1478         bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1479         bsg_reply->result = DID_OK;
1480         vfree(ha->optrom_buffer);
1481         ha->optrom_buffer = NULL;
1482         ha->optrom_state = QLA_SWAITING;
1483         mutex_unlock(&ha->optrom_mutex);
1484         bsg_job_done(bsg_job, bsg_reply->result,
1485                        bsg_reply->reply_payload_rcv_len);
1486         return rval;
1487 }
1488
1489 static int
1490 qla2x00_update_optrom(struct bsg_job *bsg_job)
1491 {
1492         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1493         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1494         scsi_qla_host_t *vha = shost_priv(host);
1495         struct qla_hw_data *ha = vha->hw;
1496         int rval = 0;
1497
1498         mutex_lock(&ha->optrom_mutex);
1499         rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1500         if (rval) {
1501                 mutex_unlock(&ha->optrom_mutex);
1502                 return rval;
1503         }
1504
1505         /* Set the isp82xx_no_md_cap not to capture minidump */
1506         ha->flags.isp82xx_no_md_cap = 1;
1507
1508         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1509             bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1510             ha->optrom_region_size);
1511
1512         ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1513             ha->optrom_region_start, ha->optrom_region_size);
1514
1515         bsg_reply->result = DID_OK;
1516         vfree(ha->optrom_buffer);
1517         ha->optrom_buffer = NULL;
1518         ha->optrom_state = QLA_SWAITING;
1519         mutex_unlock(&ha->optrom_mutex);
1520         bsg_job_done(bsg_job, bsg_reply->result,
1521                        bsg_reply->reply_payload_rcv_len);
1522         return rval;
1523 }
1524
1525 static int
1526 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1527 {
1528         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1529         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1530         scsi_qla_host_t *vha = shost_priv(host);
1531         struct qla_hw_data *ha = vha->hw;
1532         int rval = 0;
1533         uint8_t bsg[DMA_POOL_SIZE];
1534         struct qla_image_version_list *list = (void *)bsg;
1535         struct qla_image_version *image;
1536         uint32_t count;
1537         dma_addr_t sfp_dma;
1538         void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1539
1540         if (!sfp) {
1541                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1542                     EXT_STATUS_NO_MEMORY;
1543                 goto done;
1544         }
1545
1546         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1547             bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1548
1549         image = list->version;
1550         count = list->count;
1551         while (count--) {
1552                 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1553                 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1554                     image->field_address.device, image->field_address.offset,
1555                     sizeof(image->field_info), image->field_address.option);
1556                 if (rval) {
1557                         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1558                             EXT_STATUS_MAILBOX;
1559                         goto dealloc;
1560                 }
1561                 image++;
1562         }
1563
1564         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1565
1566 dealloc:
1567         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1568
1569 done:
1570         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1571         bsg_reply->result = DID_OK << 16;
1572         bsg_job_done(bsg_job, bsg_reply->result,
1573                        bsg_reply->reply_payload_rcv_len);
1574
1575         return 0;
1576 }
1577
1578 static int
1579 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1580 {
1581         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1582         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1583         scsi_qla_host_t *vha = shost_priv(host);
1584         struct qla_hw_data *ha = vha->hw;
1585         int rval = 0;
1586         uint8_t bsg[DMA_POOL_SIZE];
1587         struct qla_status_reg *sr = (void *)bsg;
1588         dma_addr_t sfp_dma;
1589         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1590
1591         if (!sfp) {
1592                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1593                     EXT_STATUS_NO_MEMORY;
1594                 goto done;
1595         }
1596
1597         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1598             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1599
1600         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1601             sr->field_address.device, sr->field_address.offset,
1602             sizeof(sr->status_reg), sr->field_address.option);
1603         sr->status_reg = *sfp;
1604
1605         if (rval) {
1606                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1607                     EXT_STATUS_MAILBOX;
1608                 goto dealloc;
1609         }
1610
1611         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1612             bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1613
1614         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1615
1616 dealloc:
1617         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1618
1619 done:
1620         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1621         bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1622         bsg_reply->result = DID_OK << 16;
1623         bsg_job_done(bsg_job, bsg_reply->result,
1624                        bsg_reply->reply_payload_rcv_len);
1625
1626         return 0;
1627 }
1628
1629 static int
1630 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1631 {
1632         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1633         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1634         scsi_qla_host_t *vha = shost_priv(host);
1635         struct qla_hw_data *ha = vha->hw;
1636         int rval = 0;
1637         uint8_t bsg[DMA_POOL_SIZE];
1638         struct qla_status_reg *sr = (void *)bsg;
1639         dma_addr_t sfp_dma;
1640         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1641
1642         if (!sfp) {
1643                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1644                     EXT_STATUS_NO_MEMORY;
1645                 goto done;
1646         }
1647
1648         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1649             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1650
1651         *sfp = sr->status_reg;
1652         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1653             sr->field_address.device, sr->field_address.offset,
1654             sizeof(sr->status_reg), sr->field_address.option);
1655
1656         if (rval) {
1657                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1658                     EXT_STATUS_MAILBOX;
1659                 goto dealloc;
1660         }
1661
1662         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1663
1664 dealloc:
1665         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1666
1667 done:
1668         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1669         bsg_reply->result = DID_OK << 16;
1670         bsg_job_done(bsg_job, bsg_reply->result,
1671                        bsg_reply->reply_payload_rcv_len);
1672
1673         return 0;
1674 }
1675
1676 static int
1677 qla2x00_write_i2c(struct bsg_job *bsg_job)
1678 {
1679         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1680         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1681         scsi_qla_host_t *vha = shost_priv(host);
1682         struct qla_hw_data *ha = vha->hw;
1683         int rval = 0;
1684         uint8_t bsg[DMA_POOL_SIZE];
1685         struct qla_i2c_access *i2c = (void *)bsg;
1686         dma_addr_t sfp_dma;
1687         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1688
1689         if (!sfp) {
1690                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1691                     EXT_STATUS_NO_MEMORY;
1692                 goto done;
1693         }
1694
1695         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1696             bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1697
1698         memcpy(sfp, i2c->buffer, i2c->length);
1699         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1700             i2c->device, i2c->offset, i2c->length, i2c->option);
1701
1702         if (rval) {
1703                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1704                     EXT_STATUS_MAILBOX;
1705                 goto dealloc;
1706         }
1707
1708         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1709
1710 dealloc:
1711         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1712
1713 done:
1714         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1715         bsg_reply->result = DID_OK << 16;
1716         bsg_job_done(bsg_job, bsg_reply->result,
1717                        bsg_reply->reply_payload_rcv_len);
1718
1719         return 0;
1720 }
1721
1722 static int
1723 qla2x00_read_i2c(struct bsg_job *bsg_job)
1724 {
1725         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1726         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1727         scsi_qla_host_t *vha = shost_priv(host);
1728         struct qla_hw_data *ha = vha->hw;
1729         int rval = 0;
1730         uint8_t bsg[DMA_POOL_SIZE];
1731         struct qla_i2c_access *i2c = (void *)bsg;
1732         dma_addr_t sfp_dma;
1733         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1734
1735         if (!sfp) {
1736                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1737                     EXT_STATUS_NO_MEMORY;
1738                 goto done;
1739         }
1740
1741         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1742             bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1743
1744         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1745                 i2c->device, i2c->offset, i2c->length, i2c->option);
1746
1747         if (rval) {
1748                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1749                     EXT_STATUS_MAILBOX;
1750                 goto dealloc;
1751         }
1752
1753         memcpy(i2c->buffer, sfp, i2c->length);
1754         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1755             bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1756
1757         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1758
1759 dealloc:
1760         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1761
1762 done:
1763         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1764         bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1765         bsg_reply->result = DID_OK << 16;
1766         bsg_job_done(bsg_job, bsg_reply->result,
1767                        bsg_reply->reply_payload_rcv_len);
1768
1769         return 0;
1770 }
1771
1772 static int
1773 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1774 {
1775         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1776         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1777         scsi_qla_host_t *vha = shost_priv(host);
1778         struct qla_hw_data *ha = vha->hw;
1779         uint32_t rval = EXT_STATUS_OK;
1780         uint16_t req_sg_cnt = 0;
1781         uint16_t rsp_sg_cnt = 0;
1782         uint16_t nextlid = 0;
1783         uint32_t tot_dsds;
1784         srb_t *sp = NULL;
1785         uint32_t req_data_len;
1786         uint32_t rsp_data_len;
1787
1788         /* Check the type of the adapter */
1789         if (!IS_BIDI_CAPABLE(ha)) {
1790                 ql_log(ql_log_warn, vha, 0x70a0,
1791                         "This adapter is not supported\n");
1792                 rval = EXT_STATUS_NOT_SUPPORTED;
1793                 goto done;
1794         }
1795
1796         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1797                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1798                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1799                 rval =  EXT_STATUS_BUSY;
1800                 goto done;
1801         }
1802
1803         /* Check if host is online */
1804         if (!vha->flags.online) {
1805                 ql_log(ql_log_warn, vha, 0x70a1,
1806                         "Host is not online\n");
1807                 rval = EXT_STATUS_DEVICE_OFFLINE;
1808                 goto done;
1809         }
1810
1811         /* Check if cable is plugged in or not */
1812         if (vha->device_flags & DFLG_NO_CABLE) {
1813                 ql_log(ql_log_warn, vha, 0x70a2,
1814                         "Cable is unplugged...\n");
1815                 rval = EXT_STATUS_INVALID_CFG;
1816                 goto done;
1817         }
1818
1819         /* Check if the switch is connected or not */
1820         if (ha->current_topology != ISP_CFG_F) {
1821                 ql_log(ql_log_warn, vha, 0x70a3,
1822                         "Host is not connected to the switch\n");
1823                 rval = EXT_STATUS_INVALID_CFG;
1824                 goto done;
1825         }
1826
1827         /* Check if operating mode is P2P */
1828         if (ha->operating_mode != P2P) {
1829                 ql_log(ql_log_warn, vha, 0x70a4,
1830                     "Host operating mode is not P2p\n");
1831                 rval = EXT_STATUS_INVALID_CFG;
1832                 goto done;
1833         }
1834
1835         mutex_lock(&ha->selflogin_lock);
1836         if (vha->self_login_loop_id == 0) {
1837                 /* Initialize all required  fields of fcport */
1838                 vha->bidir_fcport.vha = vha;
1839                 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1840                 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1841                 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1842                 vha->bidir_fcport.loop_id = vha->loop_id;
1843
1844                 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1845                         ql_log(ql_log_warn, vha, 0x70a7,
1846                             "Failed to login port %06X for bidirectional IOCB\n",
1847                             vha->bidir_fcport.d_id.b24);
1848                         mutex_unlock(&ha->selflogin_lock);
1849                         rval = EXT_STATUS_MAILBOX;
1850                         goto done;
1851                 }
1852                 vha->self_login_loop_id = nextlid - 1;
1853
1854         }
1855         /* Assign the self login loop id to fcport */
1856         mutex_unlock(&ha->selflogin_lock);
1857
1858         vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1859
1860         req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1861                 bsg_job->request_payload.sg_list,
1862                 bsg_job->request_payload.sg_cnt,
1863                 DMA_TO_DEVICE);
1864
1865         if (!req_sg_cnt) {
1866                 rval = EXT_STATUS_NO_MEMORY;
1867                 goto done;
1868         }
1869
1870         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1871                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1872                 DMA_FROM_DEVICE);
1873
1874         if (!rsp_sg_cnt) {
1875                 rval = EXT_STATUS_NO_MEMORY;
1876                 goto done_unmap_req_sg;
1877         }
1878
1879         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1880                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1881                 ql_dbg(ql_dbg_user, vha, 0x70a9,
1882                     "Dma mapping resulted in different sg counts "
1883                     "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1884                     "%x dma_reply_sg_cnt: %x]\n",
1885                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
1886                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1887                 rval = EXT_STATUS_NO_MEMORY;
1888                 goto done_unmap_sg;
1889         }
1890
1891         req_data_len = bsg_job->request_payload.payload_len;
1892         rsp_data_len = bsg_job->reply_payload.payload_len;
1893
1894         if (req_data_len != rsp_data_len) {
1895                 rval = EXT_STATUS_BUSY;
1896                 ql_log(ql_log_warn, vha, 0x70aa,
1897                     "req_data_len != rsp_data_len\n");
1898                 goto done_unmap_sg;
1899         }
1900
1901         /* Alloc SRB structure */
1902         sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1903         if (!sp) {
1904                 ql_dbg(ql_dbg_user, vha, 0x70ac,
1905                     "Alloc SRB structure failed\n");
1906                 rval = EXT_STATUS_NO_MEMORY;
1907                 goto done_unmap_sg;
1908         }
1909
1910         /*Populate srb->ctx with bidir ctx*/
1911         sp->u.bsg_job = bsg_job;
1912         sp->free = qla2x00_bsg_sp_free;
1913         sp->type = SRB_BIDI_CMD;
1914         sp->done = qla2x00_bsg_job_done;
1915
1916         /* Add the read and write sg count */
1917         tot_dsds = rsp_sg_cnt + req_sg_cnt;
1918
1919         rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1920         if (rval != EXT_STATUS_OK)
1921                 goto done_free_srb;
1922         /* the bsg request  will be completed in the interrupt handler */
1923         return rval;
1924
1925 done_free_srb:
1926         mempool_free(sp, ha->srb_mempool);
1927 done_unmap_sg:
1928         dma_unmap_sg(&ha->pdev->dev,
1929             bsg_job->reply_payload.sg_list,
1930             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1931 done_unmap_req_sg:
1932         dma_unmap_sg(&ha->pdev->dev,
1933             bsg_job->request_payload.sg_list,
1934             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1935 done:
1936
1937         /* Return an error vendor specific response
1938          * and complete the bsg request
1939          */
1940         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1941         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1942         bsg_reply->reply_payload_rcv_len = 0;
1943         bsg_reply->result = (DID_OK) << 16;
1944         bsg_job_done(bsg_job, bsg_reply->result,
1945                        bsg_reply->reply_payload_rcv_len);
1946         /* Always return success, vendor rsp carries correct status */
1947         return 0;
1948 }
1949
1950 static int
1951 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1952 {
1953         struct fc_bsg_request *bsg_request = bsg_job->request;
1954         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1955         scsi_qla_host_t *vha = shost_priv(host);
1956         struct qla_hw_data *ha = vha->hw;
1957         int rval = (DRIVER_ERROR << 16);
1958         struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1959         srb_t *sp;
1960         int req_sg_cnt = 0, rsp_sg_cnt = 0;
1961         struct fc_port *fcport;
1962         char  *type = "FC_BSG_HST_FX_MGMT";
1963
1964         /* Copy the IOCB specific information */
1965         piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1966             &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1967
1968         /* Dump the vendor information */
1969         ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1970             piocb_rqst, sizeof(*piocb_rqst));
1971
1972         if (!vha->flags.online) {
1973                 ql_log(ql_log_warn, vha, 0x70d0,
1974                     "Host is not online.\n");
1975                 rval = -EIO;
1976                 goto done;
1977         }
1978
1979         if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1980                 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1981                     bsg_job->request_payload.sg_list,
1982                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1983                 if (!req_sg_cnt) {
1984                         ql_log(ql_log_warn, vha, 0x70c7,
1985                             "dma_map_sg return %d for request\n", req_sg_cnt);
1986                         rval = -ENOMEM;
1987                         goto done;
1988                 }
1989         }
1990
1991         if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1992                 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1993                     bsg_job->reply_payload.sg_list,
1994                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1995                 if (!rsp_sg_cnt) {
1996                         ql_log(ql_log_warn, vha, 0x70c8,
1997                             "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1998                         rval = -ENOMEM;
1999                         goto done_unmap_req_sg;
2000                 }
2001         }
2002
2003         ql_dbg(ql_dbg_user, vha, 0x70c9,
2004             "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2005             "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2006             req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2007
2008         /* Allocate a dummy fcport structure, since functions preparing the
2009          * IOCB and mailbox command retrieves port specific information
2010          * from fcport structure. For Host based ELS commands there will be
2011          * no fcport structure allocated
2012          */
2013         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2014         if (!fcport) {
2015                 ql_log(ql_log_warn, vha, 0x70ca,
2016                     "Failed to allocate fcport.\n");
2017                 rval = -ENOMEM;
2018                 goto done_unmap_rsp_sg;
2019         }
2020
2021         /* Alloc SRB structure */
2022         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2023         if (!sp) {
2024                 ql_log(ql_log_warn, vha, 0x70cb,
2025                     "qla2x00_get_sp failed.\n");
2026                 rval = -ENOMEM;
2027                 goto done_free_fcport;
2028         }
2029
2030         /* Initialize all required  fields of fcport */
2031         fcport->vha = vha;
2032         fcport->loop_id = piocb_rqst->dataword;
2033
2034         sp->type = SRB_FXIOCB_BCMD;
2035         sp->name = "bsg_fx_mgmt";
2036         sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2037         sp->u.bsg_job = bsg_job;
2038         sp->free = qla2x00_bsg_sp_free;
2039         sp->done = qla2x00_bsg_job_done;
2040
2041         ql_dbg(ql_dbg_user, vha, 0x70cc,
2042             "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2043             type, piocb_rqst->func_type, fcport->loop_id);
2044
2045         rval = qla2x00_start_sp(sp);
2046         if (rval != QLA_SUCCESS) {
2047                 ql_log(ql_log_warn, vha, 0x70cd,
2048                     "qla2x00_start_sp failed=%d.\n", rval);
2049                 mempool_free(sp, ha->srb_mempool);
2050                 rval = -EIO;
2051                 goto done_free_fcport;
2052         }
2053         return rval;
2054
2055 done_free_fcport:
2056         kfree(fcport);
2057
2058 done_unmap_rsp_sg:
2059         if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2060                 dma_unmap_sg(&ha->pdev->dev,
2061                     bsg_job->reply_payload.sg_list,
2062                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2063 done_unmap_req_sg:
2064         if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2065                 dma_unmap_sg(&ha->pdev->dev,
2066                     bsg_job->request_payload.sg_list,
2067                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2068
2069 done:
2070         return rval;
2071 }
2072
2073 static int
2074 qla26xx_serdes_op(struct bsg_job *bsg_job)
2075 {
2076         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2077         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2078         scsi_qla_host_t *vha = shost_priv(host);
2079         int rval = 0;
2080         struct qla_serdes_reg sr;
2081
2082         memset(&sr, 0, sizeof(sr));
2083
2084         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2085             bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2086
2087         switch (sr.cmd) {
2088         case INT_SC_SERDES_WRITE_REG:
2089                 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2090                 bsg_reply->reply_payload_rcv_len = 0;
2091                 break;
2092         case INT_SC_SERDES_READ_REG:
2093                 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2094                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2095                     bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2096                 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2097                 break;
2098         default:
2099                 ql_dbg(ql_dbg_user, vha, 0x708c,
2100                     "Unknown serdes cmd %x.\n", sr.cmd);
2101                 rval = -EINVAL;
2102                 break;
2103         }
2104
2105         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2106             rval ? EXT_STATUS_MAILBOX : 0;
2107
2108         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2109         bsg_reply->result = DID_OK << 16;
2110         bsg_job_done(bsg_job, bsg_reply->result,
2111                        bsg_reply->reply_payload_rcv_len);
2112         return 0;
2113 }
2114
2115 static int
2116 qla8044_serdes_op(struct bsg_job *bsg_job)
2117 {
2118         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2119         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2120         scsi_qla_host_t *vha = shost_priv(host);
2121         int rval = 0;
2122         struct qla_serdes_reg_ex sr;
2123
2124         memset(&sr, 0, sizeof(sr));
2125
2126         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2127             bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2128
2129         switch (sr.cmd) {
2130         case INT_SC_SERDES_WRITE_REG:
2131                 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2132                 bsg_reply->reply_payload_rcv_len = 0;
2133                 break;
2134         case INT_SC_SERDES_READ_REG:
2135                 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2136                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2137                     bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2138                 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2139                 break;
2140         default:
2141                 ql_dbg(ql_dbg_user, vha, 0x7020,
2142                     "Unknown serdes cmd %x.\n", sr.cmd);
2143                 rval = -EINVAL;
2144                 break;
2145         }
2146
2147         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2148             rval ? EXT_STATUS_MAILBOX : 0;
2149
2150         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2151         bsg_reply->result = DID_OK << 16;
2152         bsg_job_done(bsg_job, bsg_reply->result,
2153                        bsg_reply->reply_payload_rcv_len);
2154         return 0;
2155 }
2156
2157 static int
2158 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2159 {
2160         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2161         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2162         scsi_qla_host_t *vha = shost_priv(host);
2163         struct qla_hw_data *ha = vha->hw;
2164         struct qla_flash_update_caps cap;
2165
2166         if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2167                 return -EPERM;
2168
2169         memset(&cap, 0, sizeof(cap));
2170         cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2171                            (uint64_t)ha->fw_attributes_ext[0] << 32 |
2172                            (uint64_t)ha->fw_attributes_h << 16 |
2173                            (uint64_t)ha->fw_attributes;
2174
2175         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2176             bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2177         bsg_reply->reply_payload_rcv_len = sizeof(cap);
2178
2179         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2180             EXT_STATUS_OK;
2181
2182         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2183         bsg_reply->result = DID_OK << 16;
2184         bsg_job_done(bsg_job, bsg_reply->result,
2185                        bsg_reply->reply_payload_rcv_len);
2186         return 0;
2187 }
2188
2189 static int
2190 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2191 {
2192         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2193         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2194         scsi_qla_host_t *vha = shost_priv(host);
2195         struct qla_hw_data *ha = vha->hw;
2196         uint64_t online_fw_attr = 0;
2197         struct qla_flash_update_caps cap;
2198
2199         if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2200                 return -EPERM;
2201
2202         memset(&cap, 0, sizeof(cap));
2203         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2204             bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2205
2206         online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2207                          (uint64_t)ha->fw_attributes_ext[0] << 32 |
2208                          (uint64_t)ha->fw_attributes_h << 16 |
2209                          (uint64_t)ha->fw_attributes;
2210
2211         if (online_fw_attr != cap.capabilities) {
2212                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2213                     EXT_STATUS_INVALID_PARAM;
2214                 return -EINVAL;
2215         }
2216
2217         if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
2218                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2219                     EXT_STATUS_INVALID_PARAM;
2220                 return -EINVAL;
2221         }
2222
2223         bsg_reply->reply_payload_rcv_len = 0;
2224
2225         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2226             EXT_STATUS_OK;
2227
2228         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2229         bsg_reply->result = DID_OK << 16;
2230         bsg_job_done(bsg_job, bsg_reply->result,
2231                        bsg_reply->reply_payload_rcv_len);
2232         return 0;
2233 }
2234
2235 static int
2236 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2237 {
2238         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2239         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2240         scsi_qla_host_t *vha = shost_priv(host);
2241         struct qla_hw_data *ha = vha->hw;
2242         struct qla_bbcr_data bbcr;
2243         uint16_t loop_id, topo, sw_cap;
2244         uint8_t domain, area, al_pa, state;
2245         int rval;
2246
2247         if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2248                 return -EPERM;
2249
2250         memset(&bbcr, 0, sizeof(bbcr));
2251
2252         if (vha->flags.bbcr_enable)
2253                 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2254         else
2255                 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2256
2257         if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2258                 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2259                         &area, &domain, &topo, &sw_cap);
2260                 if (rval != QLA_SUCCESS) {
2261                         bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2262                         bbcr.state = QLA_BBCR_STATE_OFFLINE;
2263                         bbcr.mbx1 = loop_id;
2264                         goto done;
2265                 }
2266
2267                 state = (vha->bbcr >> 12) & 0x1;
2268
2269                 if (state) {
2270                         bbcr.state = QLA_BBCR_STATE_OFFLINE;
2271                         bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2272                 } else {
2273                         bbcr.state = QLA_BBCR_STATE_ONLINE;
2274                         bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2275                 }
2276
2277                 bbcr.configured_bbscn = vha->bbcr & 0xf;
2278         }
2279
2280 done:
2281         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2282                 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2283         bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2284
2285         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2286
2287         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2288         bsg_reply->result = DID_OK << 16;
2289         bsg_job_done(bsg_job, bsg_reply->result,
2290                        bsg_reply->reply_payload_rcv_len);
2291         return 0;
2292 }
2293
2294 static int
2295 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2296 {
2297         struct fc_bsg_request *bsg_request = bsg_job->request;
2298         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2299         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2300         scsi_qla_host_t *vha = shost_priv(host);
2301         struct qla_hw_data *ha = vha->hw;
2302         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2303         struct link_statistics *stats = NULL;
2304         dma_addr_t stats_dma;
2305         int rval;
2306         uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2307         uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2308
2309         if (test_bit(UNLOADING, &vha->dpc_flags))
2310                 return -ENODEV;
2311
2312         if (unlikely(pci_channel_offline(ha->pdev)))
2313                 return -ENODEV;
2314
2315         if (qla2x00_reset_active(vha))
2316                 return -EBUSY;
2317
2318         if (!IS_FWI2_CAPABLE(ha))
2319                 return -EPERM;
2320
2321         stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2322                                    GFP_KERNEL);
2323         if (!stats) {
2324                 ql_log(ql_log_warn, vha, 0x70e2,
2325                     "Failed to allocate memory for stats.\n");
2326                 return -ENOMEM;
2327         }
2328
2329         rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2330
2331         if (rval == QLA_SUCCESS) {
2332                 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2333                         stats, sizeof(*stats));
2334                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2335                         bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2336         }
2337
2338         bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2339         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2340             rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2341
2342         bsg_job->reply_len = sizeof(*bsg_reply);
2343         bsg_reply->result = DID_OK << 16;
2344         bsg_job_done(bsg_job, bsg_reply->result,
2345                        bsg_reply->reply_payload_rcv_len);
2346
2347         dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2348                 stats, stats_dma);
2349
2350         return 0;
2351 }
2352
2353 static int
2354 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2355 {
2356         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2357         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2358         scsi_qla_host_t *vha = shost_priv(host);
2359         int rval;
2360         struct qla_dport_diag *dd;
2361
2362         if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2363             !IS_QLA28XX(vha->hw))
2364                 return -EPERM;
2365
2366         dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2367         if (!dd) {
2368                 ql_log(ql_log_warn, vha, 0x70db,
2369                     "Failed to allocate memory for dport.\n");
2370                 return -ENOMEM;
2371         }
2372
2373         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2374             bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2375
2376         rval = qla26xx_dport_diagnostics(
2377             vha, dd->buf, sizeof(dd->buf), dd->options);
2378         if (rval == QLA_SUCCESS) {
2379                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2380                     bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2381         }
2382
2383         bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2384         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2385             rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2386
2387         bsg_job->reply_len = sizeof(*bsg_reply);
2388         bsg_reply->result = DID_OK << 16;
2389         bsg_job_done(bsg_job, bsg_reply->result,
2390                        bsg_reply->reply_payload_rcv_len);
2391
2392         kfree(dd);
2393
2394         return 0;
2395 }
2396
2397 static int
2398 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2399 {
2400         scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2401         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2402         struct qla_hw_data *ha = vha->hw;
2403         struct qla_active_regions regions = { };
2404         struct active_regions active_regions = { };
2405
2406         qla28xx_get_aux_images(vha, &active_regions);
2407         regions.global_image = active_regions.global;
2408
2409         if (IS_QLA28XX(ha)) {
2410                 qla27xx_get_active_image(vha, &active_regions);
2411                 regions.board_config = active_regions.aux.board_config;
2412                 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2413                 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2414                 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2415         }
2416
2417         ql_dbg(ql_dbg_user, vha, 0x70e1,
2418             "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2419             __func__, vha->host_no, regions.global_image,
2420             regions.board_config, regions.vpd_nvram,
2421             regions.npiv_config_0_1, regions.npiv_config_2_3);
2422
2423         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2424             bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
2425
2426         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2427         bsg_reply->reply_payload_rcv_len = sizeof(regions);
2428         bsg_reply->result = DID_OK << 16;
2429         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2430         bsg_job_done(bsg_job, bsg_reply->result,
2431             bsg_reply->reply_payload_rcv_len);
2432
2433         return 0;
2434 }
2435
2436 static int
2437 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2438 {
2439         struct fc_bsg_request *bsg_request = bsg_job->request;
2440
2441         switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2442         case QL_VND_LOOPBACK:
2443                 return qla2x00_process_loopback(bsg_job);
2444
2445         case QL_VND_A84_RESET:
2446                 return qla84xx_reset(bsg_job);
2447
2448         case QL_VND_A84_UPDATE_FW:
2449                 return qla84xx_updatefw(bsg_job);
2450
2451         case QL_VND_A84_MGMT_CMD:
2452                 return qla84xx_mgmt_cmd(bsg_job);
2453
2454         case QL_VND_IIDMA:
2455                 return qla24xx_iidma(bsg_job);
2456
2457         case QL_VND_FCP_PRIO_CFG_CMD:
2458                 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2459
2460         case QL_VND_READ_FLASH:
2461                 return qla2x00_read_optrom(bsg_job);
2462
2463         case QL_VND_UPDATE_FLASH:
2464                 return qla2x00_update_optrom(bsg_job);
2465
2466         case QL_VND_SET_FRU_VERSION:
2467                 return qla2x00_update_fru_versions(bsg_job);
2468
2469         case QL_VND_READ_FRU_STATUS:
2470                 return qla2x00_read_fru_status(bsg_job);
2471
2472         case QL_VND_WRITE_FRU_STATUS:
2473                 return qla2x00_write_fru_status(bsg_job);
2474
2475         case QL_VND_WRITE_I2C:
2476                 return qla2x00_write_i2c(bsg_job);
2477
2478         case QL_VND_READ_I2C:
2479                 return qla2x00_read_i2c(bsg_job);
2480
2481         case QL_VND_DIAG_IO_CMD:
2482                 return qla24xx_process_bidir_cmd(bsg_job);
2483
2484         case QL_VND_FX00_MGMT_CMD:
2485                 return qlafx00_mgmt_cmd(bsg_job);
2486
2487         case QL_VND_SERDES_OP:
2488                 return qla26xx_serdes_op(bsg_job);
2489
2490         case QL_VND_SERDES_OP_EX:
2491                 return qla8044_serdes_op(bsg_job);
2492
2493         case QL_VND_GET_FLASH_UPDATE_CAPS:
2494                 return qla27xx_get_flash_upd_cap(bsg_job);
2495
2496         case QL_VND_SET_FLASH_UPDATE_CAPS:
2497                 return qla27xx_set_flash_upd_cap(bsg_job);
2498
2499         case QL_VND_GET_BBCR_DATA:
2500                 return qla27xx_get_bbcr_data(bsg_job);
2501
2502         case QL_VND_GET_PRIV_STATS:
2503         case QL_VND_GET_PRIV_STATS_EX:
2504                 return qla2x00_get_priv_stats(bsg_job);
2505
2506         case QL_VND_DPORT_DIAGNOSTICS:
2507                 return qla2x00_do_dport_diagnostics(bsg_job);
2508
2509         case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2510                 return qla2x00_get_flash_image_status(bsg_job);
2511
2512         default:
2513                 return -ENOSYS;
2514         }
2515 }
2516
2517 int
2518 qla24xx_bsg_request(struct bsg_job *bsg_job)
2519 {
2520         struct fc_bsg_request *bsg_request = bsg_job->request;
2521         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2522         int ret = -EINVAL;
2523         struct fc_rport *rport;
2524         struct Scsi_Host *host;
2525         scsi_qla_host_t *vha;
2526
2527         /* In case no data transferred. */
2528         bsg_reply->reply_payload_rcv_len = 0;
2529
2530         if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2531                 rport = fc_bsg_to_rport(bsg_job);
2532                 host = rport_to_shost(rport);
2533                 vha = shost_priv(host);
2534         } else {
2535                 host = fc_bsg_to_shost(bsg_job);
2536                 vha = shost_priv(host);
2537         }
2538
2539         if (qla2x00_chip_is_down(vha)) {
2540                 ql_dbg(ql_dbg_user, vha, 0x709f,
2541                     "BSG: ISP abort active/needed -- cmd=%d.\n",
2542                     bsg_request->msgcode);
2543                 return -EBUSY;
2544         }
2545
2546         ql_dbg(ql_dbg_user, vha, 0x7000,
2547             "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2548
2549         switch (bsg_request->msgcode) {
2550         case FC_BSG_RPT_ELS:
2551         case FC_BSG_HST_ELS_NOLOGIN:
2552                 ret = qla2x00_process_els(bsg_job);
2553                 break;
2554         case FC_BSG_HST_CT:
2555                 ret = qla2x00_process_ct(bsg_job);
2556                 break;
2557         case FC_BSG_HST_VENDOR:
2558                 ret = qla2x00_process_vendor_specific(bsg_job);
2559                 break;
2560         case FC_BSG_HST_ADD_RPORT:
2561         case FC_BSG_HST_DEL_RPORT:
2562         case FC_BSG_RPT_CT:
2563         default:
2564                 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2565                 break;
2566         }
2567         return ret;
2568 }
2569
2570 int
2571 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2572 {
2573         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2574         scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2575         struct qla_hw_data *ha = vha->hw;
2576         srb_t *sp;
2577         int cnt, que;
2578         unsigned long flags;
2579         struct req_que *req;
2580
2581         /* find the bsg job from the active list of commands */
2582         spin_lock_irqsave(&ha->hardware_lock, flags);
2583         for (que = 0; que < ha->max_req_queues; que++) {
2584                 req = ha->req_q_map[que];
2585                 if (!req)
2586                         continue;
2587
2588                 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2589                         sp = req->outstanding_cmds[cnt];
2590                         if (sp) {
2591                                 if (((sp->type == SRB_CT_CMD) ||
2592                                         (sp->type == SRB_ELS_CMD_HST) ||
2593                                         (sp->type == SRB_FXIOCB_BCMD))
2594                                         && (sp->u.bsg_job == bsg_job)) {
2595                                         req->outstanding_cmds[cnt] = NULL;
2596                                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2597                                         if (ha->isp_ops->abort_command(sp)) {
2598                                                 ql_log(ql_log_warn, vha, 0x7089,
2599                                                     "mbx abort_command "
2600                                                     "failed.\n");
2601                                                 bsg_reply->result = -EIO;
2602                                         } else {
2603                                                 ql_dbg(ql_dbg_user, vha, 0x708a,
2604                                                     "mbx abort_command "
2605                                                     "success.\n");
2606                                                 bsg_reply->result = 0;
2607                                         }
2608                                         spin_lock_irqsave(&ha->hardware_lock, flags);
2609                                         goto done;
2610                                 }
2611                         }
2612                 }
2613         }
2614         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2615         ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2616         bsg_reply->result = -ENXIO;
2617         return 0;
2618
2619 done:
2620         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2621         sp->free(sp);
2622         return 0;
2623 }