2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
14 /* BSG support for ELS/CT pass through */
16 qla2x00_bsg_job_done(void *data, void *ptr, int res)
18 srb_t *sp = (srb_t *)ptr;
19 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
20 struct bsg_job *bsg_job = sp->u.bsg_job;
21 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
23 bsg_reply->result = res;
24 bsg_job_done(bsg_job, bsg_reply->result,
25 bsg_reply->reply_payload_rcv_len);
30 qla2x00_bsg_sp_free(void *data, void *ptr)
32 srb_t *sp = (srb_t *)ptr;
33 struct scsi_qla_host *vha = sp->fcport->vha;
34 struct bsg_job *bsg_job = sp->u.bsg_job;
35 struct fc_bsg_request *bsg_request = bsg_job->request;
37 struct qla_hw_data *ha = vha->hw;
38 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
40 if (sp->type == SRB_FXIOCB_BCMD) {
41 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
42 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
44 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
45 dma_unmap_sg(&ha->pdev->dev,
46 bsg_job->request_payload.sg_list,
47 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
49 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
50 dma_unmap_sg(&ha->pdev->dev,
51 bsg_job->reply_payload.sg_list,
52 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
54 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
55 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
57 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
58 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
61 if (sp->type == SRB_CT_CMD ||
62 sp->type == SRB_FXIOCB_BCMD ||
63 sp->type == SRB_ELS_CMD_HST)
65 qla2x00_rel_sp(vha, sp);
69 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
70 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
72 int i, ret, num_valid;
74 struct qla_fcp_prio_entry *pri_entry;
75 uint32_t *bcode_val_ptr, bcode_val;
79 bcode = (uint8_t *)pri_cfg;
80 bcode_val_ptr = (uint32_t *)pri_cfg;
81 bcode_val = (uint32_t)(*bcode_val_ptr);
83 if (bcode_val == 0xFFFFFFFF) {
84 /* No FCP Priority config data in flash */
85 ql_dbg(ql_dbg_user, vha, 0x7051,
86 "No FCP Priority config data.\n");
90 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
92 /* Invalid FCP priority data header*/
93 ql_dbg(ql_dbg_user, vha, 0x7052,
94 "Invalid FCP Priority data header. bcode=0x%x.\n",
101 pri_entry = &pri_cfg->entry[0];
102 for (i = 0; i < pri_cfg->num_entries; i++) {
103 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
108 if (num_valid == 0) {
109 /* No valid FCP priority data entries */
110 ql_dbg(ql_dbg_user, vha, 0x7053,
111 "No valid FCP Priority data entries.\n");
114 /* FCP priority data is valid */
115 ql_dbg(ql_dbg_user, vha, 0x7054,
116 "Valid FCP priority data. num entries = %d.\n",
124 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
126 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
127 struct fc_bsg_request *bsg_request = bsg_job->request;
128 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
129 scsi_qla_host_t *vha = shost_priv(host);
130 struct qla_hw_data *ha = vha->hw;
135 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
137 goto exit_fcp_prio_cfg;
140 /* Get the sub command */
141 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
143 /* Only set config is allowed if config memory is not allocated */
144 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
146 goto exit_fcp_prio_cfg;
149 case QLFC_FCP_PRIO_DISABLE:
150 if (ha->flags.fcp_prio_enabled) {
151 ha->flags.fcp_prio_enabled = 0;
152 ha->fcp_prio_cfg->attributes &=
153 ~FCP_PRIO_ATTR_ENABLE;
154 qla24xx_update_all_fcp_prio(vha);
155 bsg_reply->result = DID_OK;
158 bsg_reply->result = (DID_ERROR << 16);
159 goto exit_fcp_prio_cfg;
163 case QLFC_FCP_PRIO_ENABLE:
164 if (!ha->flags.fcp_prio_enabled) {
165 if (ha->fcp_prio_cfg) {
166 ha->flags.fcp_prio_enabled = 1;
167 ha->fcp_prio_cfg->attributes |=
168 FCP_PRIO_ATTR_ENABLE;
169 qla24xx_update_all_fcp_prio(vha);
170 bsg_reply->result = DID_OK;
173 bsg_reply->result = (DID_ERROR << 16);
174 goto exit_fcp_prio_cfg;
179 case QLFC_FCP_PRIO_GET_CONFIG:
180 len = bsg_job->reply_payload.payload_len;
181 if (!len || len > FCP_PRIO_CFG_SIZE) {
183 bsg_reply->result = (DID_ERROR << 16);
184 goto exit_fcp_prio_cfg;
187 bsg_reply->result = DID_OK;
188 bsg_reply->reply_payload_rcv_len =
190 bsg_job->reply_payload.sg_list,
191 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
196 case QLFC_FCP_PRIO_SET_CONFIG:
197 len = bsg_job->request_payload.payload_len;
198 if (!len || len > FCP_PRIO_CFG_SIZE) {
199 bsg_reply->result = (DID_ERROR << 16);
201 goto exit_fcp_prio_cfg;
204 if (!ha->fcp_prio_cfg) {
205 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
206 if (!ha->fcp_prio_cfg) {
207 ql_log(ql_log_warn, vha, 0x7050,
208 "Unable to allocate memory for fcp prio "
209 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
210 bsg_reply->result = (DID_ERROR << 16);
212 goto exit_fcp_prio_cfg;
216 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
217 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
218 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
221 /* validate fcp priority data */
223 if (!qla24xx_fcp_prio_cfg_valid(vha,
224 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
225 bsg_reply->result = (DID_ERROR << 16);
227 /* If buffer was invalidatic int
228 * fcp_prio_cfg is of no use
230 vfree(ha->fcp_prio_cfg);
231 ha->fcp_prio_cfg = NULL;
232 goto exit_fcp_prio_cfg;
235 ha->flags.fcp_prio_enabled = 0;
236 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
237 ha->flags.fcp_prio_enabled = 1;
238 qla24xx_update_all_fcp_prio(vha);
239 bsg_reply->result = DID_OK;
247 bsg_job_done(bsg_job, bsg_reply->result,
248 bsg_reply->reply_payload_rcv_len);
253 qla2x00_process_els(struct bsg_job *bsg_job)
255 struct fc_bsg_request *bsg_request = bsg_job->request;
256 struct fc_rport *rport;
257 fc_port_t *fcport = NULL;
258 struct Scsi_Host *host;
259 scsi_qla_host_t *vha;
260 struct qla_hw_data *ha;
263 int req_sg_cnt, rsp_sg_cnt;
264 int rval = (DRIVER_ERROR << 16);
265 uint16_t nextlid = 0;
267 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
268 rport = fc_bsg_to_rport(bsg_job);
269 fcport = *(fc_port_t **) rport->dd_data;
270 host = rport_to_shost(rport);
271 vha = shost_priv(host);
273 type = "FC_BSG_RPT_ELS";
275 host = fc_bsg_to_shost(bsg_job);
276 vha = shost_priv(host);
278 type = "FC_BSG_HST_ELS_NOLOGIN";
281 if (!vha->flags.online) {
282 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
287 /* pass through is supported only for ISP 4Gb or higher */
288 if (!IS_FWI2_CAPABLE(ha)) {
289 ql_dbg(ql_dbg_user, vha, 0x7001,
290 "ELS passthru not supported for ISP23xx based adapters.\n");
295 /* Multiple SG's are not supported for ELS requests */
296 if (bsg_job->request_payload.sg_cnt > 1 ||
297 bsg_job->reply_payload.sg_cnt > 1) {
298 ql_dbg(ql_dbg_user, vha, 0x7002,
299 "Multiple SG's are not suppored for ELS requests, "
300 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
301 bsg_job->request_payload.sg_cnt,
302 bsg_job->reply_payload.sg_cnt);
307 /* ELS request for rport */
308 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
309 /* make sure the rport is logged in,
310 * if not perform fabric login
312 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
313 ql_dbg(ql_dbg_user, vha, 0x7003,
314 "Failed to login port %06X for ELS passthru.\n",
320 /* Allocate a dummy fcport structure, since functions
321 * preparing the IOCB and mailbox command retrieves port
322 * specific information from fcport structure. For Host based
323 * ELS commands there will be no fcport structure allocated
325 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
331 /* Initialize all required fields of fcport */
333 fcport->d_id.b.al_pa =
334 bsg_request->rqst_data.h_els.port_id[0];
335 fcport->d_id.b.area =
336 bsg_request->rqst_data.h_els.port_id[1];
337 fcport->d_id.b.domain =
338 bsg_request->rqst_data.h_els.port_id[2];
340 (fcport->d_id.b.al_pa == 0xFD) ?
341 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
345 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
346 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
349 goto done_free_fcport;
352 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
353 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
356 goto done_free_fcport;
359 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
360 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
361 ql_log(ql_log_warn, vha, 0x7008,
362 "dma mapping resulted in different sg counts, "
363 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
364 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
365 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
370 /* Alloc SRB structure */
371 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
378 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
379 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
381 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
382 "bsg_els_rpt" : "bsg_els_hst");
383 sp->u.bsg_job = bsg_job;
384 sp->free = qla2x00_bsg_sp_free;
385 sp->done = qla2x00_bsg_job_done;
387 ql_dbg(ql_dbg_user, vha, 0x700a,
388 "bsg rqst type: %s els type: %x - loop-id=%x "
389 "portid=%-2x%02x%02x.\n", type,
390 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
391 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
393 rval = qla2x00_start_sp(sp);
394 if (rval != QLA_SUCCESS) {
395 ql_log(ql_log_warn, vha, 0x700e,
396 "qla2x00_start_sp failed = %d\n", rval);
397 qla2x00_rel_sp(vha, sp);
404 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
405 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
406 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
407 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
408 goto done_free_fcport;
411 if (bsg_request->msgcode == FC_BSG_RPT_ELS)
417 static inline uint16_t
418 qla24xx_calc_ct_iocbs(uint16_t dsds)
424 iocbs += (dsds - 2) / 5;
432 qla2x00_process_ct(struct bsg_job *bsg_job)
435 struct fc_bsg_request *bsg_request = bsg_job->request;
436 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
437 scsi_qla_host_t *vha = shost_priv(host);
438 struct qla_hw_data *ha = vha->hw;
439 int rval = (DRIVER_ERROR << 16);
440 int req_sg_cnt, rsp_sg_cnt;
442 struct fc_port *fcport;
443 char *type = "FC_BSG_HST_CT";
446 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
447 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
449 ql_log(ql_log_warn, vha, 0x700f,
450 "dma_map_sg return %d for request\n", req_sg_cnt);
455 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
456 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
458 ql_log(ql_log_warn, vha, 0x7010,
459 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
464 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
465 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
466 ql_log(ql_log_warn, vha, 0x7011,
467 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
468 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
469 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
474 if (!vha->flags.online) {
475 ql_log(ql_log_warn, vha, 0x7012,
476 "Host is not online.\n");
482 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
486 loop_id = cpu_to_le16(NPH_SNS);
489 loop_id = vha->mgmt_svr_loop_id;
492 ql_dbg(ql_dbg_user, vha, 0x7013,
493 "Unknown loop id: %x.\n", loop_id);
498 /* Allocate a dummy fcport structure, since functions preparing the
499 * IOCB and mailbox command retrieves port specific information
500 * from fcport structure. For Host based ELS commands there will be
501 * no fcport structure allocated
503 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
505 ql_log(ql_log_warn, vha, 0x7014,
506 "Failed to allocate fcport.\n");
511 /* Initialize all required fields of fcport */
513 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
514 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
515 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
516 fcport->loop_id = loop_id;
518 /* Alloc SRB structure */
519 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
521 ql_log(ql_log_warn, vha, 0x7015,
522 "qla2x00_get_sp failed.\n");
524 goto done_free_fcport;
527 sp->type = SRB_CT_CMD;
529 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
530 sp->u.bsg_job = bsg_job;
531 sp->free = qla2x00_bsg_sp_free;
532 sp->done = qla2x00_bsg_job_done;
534 ql_dbg(ql_dbg_user, vha, 0x7016,
535 "bsg rqst type: %s else type: %x - "
536 "loop-id=%x portid=%02x%02x%02x.\n", type,
537 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
538 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
539 fcport->d_id.b.al_pa);
541 rval = qla2x00_start_sp(sp);
542 if (rval != QLA_SUCCESS) {
543 ql_log(ql_log_warn, vha, 0x7017,
544 "qla2x00_start_sp failed=%d.\n", rval);
545 qla2x00_rel_sp(vha, sp);
547 goto done_free_fcport;
554 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
555 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
556 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
557 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
562 /* Disable loopback mode */
564 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
569 uint16_t new_config[4];
570 struct qla_hw_data *ha = vha->hw;
572 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
573 goto done_reset_internal;
575 memset(new_config, 0 , sizeof(new_config));
576 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
577 ENABLE_INTERNAL_LOOPBACK ||
578 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
579 ENABLE_EXTERNAL_LOOPBACK) {
580 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
581 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
582 (new_config[0] & INTERNAL_LOOPBACK_MASK));
583 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
585 ha->notify_dcbx_comp = wait;
586 ha->notify_lb_portup_comp = wait2;
588 ret = qla81xx_set_port_config(vha, new_config);
589 if (ret != QLA_SUCCESS) {
590 ql_log(ql_log_warn, vha, 0x7025,
591 "Set port config failed.\n");
592 ha->notify_dcbx_comp = 0;
593 ha->notify_lb_portup_comp = 0;
595 goto done_reset_internal;
598 /* Wait for DCBX complete event */
599 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
600 (DCBX_COMP_TIMEOUT * HZ))) {
601 ql_dbg(ql_dbg_user, vha, 0x7026,
602 "DCBX completion not received.\n");
603 ha->notify_dcbx_comp = 0;
604 ha->notify_lb_portup_comp = 0;
606 goto done_reset_internal;
608 ql_dbg(ql_dbg_user, vha, 0x7027,
609 "DCBX completion received.\n");
612 !wait_for_completion_timeout(&ha->lb_portup_comp,
613 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
614 ql_dbg(ql_dbg_user, vha, 0x70c5,
615 "Port up completion not received.\n");
616 ha->notify_lb_portup_comp = 0;
618 goto done_reset_internal;
620 ql_dbg(ql_dbg_user, vha, 0x70c6,
621 "Port up completion received.\n");
623 ha->notify_dcbx_comp = 0;
624 ha->notify_lb_portup_comp = 0;
631 * Set the port configuration to enable the internal or external loopback
632 * depending on the loopback mode.
635 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
636 uint16_t *new_config, uint16_t mode)
640 unsigned long rem_tmo = 0, current_tmo = 0;
641 struct qla_hw_data *ha = vha->hw;
643 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
644 goto done_set_internal;
646 if (mode == INTERNAL_LOOPBACK)
647 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
648 else if (mode == EXTERNAL_LOOPBACK)
649 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
650 ql_dbg(ql_dbg_user, vha, 0x70be,
651 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
653 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
655 ha->notify_dcbx_comp = 1;
656 ret = qla81xx_set_port_config(vha, new_config);
657 if (ret != QLA_SUCCESS) {
658 ql_log(ql_log_warn, vha, 0x7021,
659 "set port config failed.\n");
660 ha->notify_dcbx_comp = 0;
662 goto done_set_internal;
665 /* Wait for DCBX complete event */
666 current_tmo = DCBX_COMP_TIMEOUT * HZ;
668 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
670 if (!ha->idc_extend_tmo || rem_tmo) {
671 ha->idc_extend_tmo = 0;
674 current_tmo = ha->idc_extend_tmo * HZ;
675 ha->idc_extend_tmo = 0;
679 ql_dbg(ql_dbg_user, vha, 0x7022,
680 "DCBX completion not received.\n");
681 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
683 * If the reset of the loopback mode doesn't work take a FCoE
684 * dump and reset the chip.
687 ha->isp_ops->fw_dump(vha, 0);
688 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
692 if (ha->flags.idc_compl_status) {
693 ql_dbg(ql_dbg_user, vha, 0x70c3,
694 "Bad status in IDC Completion AEN\n");
696 ha->flags.idc_compl_status = 0;
698 ql_dbg(ql_dbg_user, vha, 0x7023,
699 "DCBX completion received.\n");
702 ha->notify_dcbx_comp = 0;
703 ha->idc_extend_tmo = 0;
710 qla2x00_process_loopback(struct bsg_job *bsg_job)
712 struct fc_bsg_request *bsg_request = bsg_job->request;
713 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
714 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
715 scsi_qla_host_t *vha = shost_priv(host);
716 struct qla_hw_data *ha = vha->hw;
718 uint8_t command_sent;
720 struct msg_echo_lb elreq;
721 uint16_t response[MAILBOX_REGISTER_COUNT];
722 uint16_t config[4], new_config[4];
724 uint8_t *req_data = NULL;
725 dma_addr_t req_data_dma;
726 uint32_t req_data_len;
727 uint8_t *rsp_data = NULL;
728 dma_addr_t rsp_data_dma;
729 uint32_t rsp_data_len;
731 if (!vha->flags.online) {
732 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
736 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
737 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
740 if (!elreq.req_sg_cnt) {
741 ql_log(ql_log_warn, vha, 0x701a,
742 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
746 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
747 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
750 if (!elreq.rsp_sg_cnt) {
751 ql_log(ql_log_warn, vha, 0x701b,
752 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
754 goto done_unmap_req_sg;
757 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
758 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
759 ql_log(ql_log_warn, vha, 0x701c,
760 "dma mapping resulted in different sg counts, "
761 "request_sg_cnt: %x dma_request_sg_cnt: %x "
762 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
763 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
764 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
768 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
769 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
770 &req_data_dma, GFP_KERNEL);
772 ql_log(ql_log_warn, vha, 0x701d,
773 "dma alloc failed for req_data.\n");
778 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
779 &rsp_data_dma, GFP_KERNEL);
781 ql_log(ql_log_warn, vha, 0x7004,
782 "dma alloc failed for rsp_data.\n");
784 goto done_free_dma_req;
787 /* Copy the request buffer in req_data now */
788 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
789 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
791 elreq.send_dma = req_data_dma;
792 elreq.rcv_dma = rsp_data_dma;
793 elreq.transfer_size = req_data_len;
795 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
796 elreq.iteration_count =
797 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
799 if (atomic_read(&vha->loop_state) == LOOP_READY &&
800 (ha->current_topology == ISP_CFG_F ||
801 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
802 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
803 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
804 elreq.options == EXTERNAL_LOOPBACK) {
805 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
806 ql_dbg(ql_dbg_user, vha, 0x701e,
807 "BSG request type: %s.\n", type);
808 command_sent = INT_DEF_LB_ECHO_CMD;
809 rval = qla2x00_echo_test(vha, &elreq, response);
811 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
812 memset(config, 0, sizeof(config));
813 memset(new_config, 0, sizeof(new_config));
815 if (qla81xx_get_port_config(vha, config)) {
816 ql_log(ql_log_warn, vha, 0x701f,
817 "Get port config failed.\n");
819 goto done_free_dma_rsp;
822 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
823 ql_dbg(ql_dbg_user, vha, 0x70c4,
824 "Loopback operation already in "
827 goto done_free_dma_rsp;
830 ql_dbg(ql_dbg_user, vha, 0x70c0,
831 "elreq.options=%04x\n", elreq.options);
833 if (elreq.options == EXTERNAL_LOOPBACK)
834 if (IS_QLA8031(ha) || IS_QLA8044(ha))
835 rval = qla81xx_set_loopback_mode(vha,
836 config, new_config, elreq.options);
838 rval = qla81xx_reset_loopback_mode(vha,
841 rval = qla81xx_set_loopback_mode(vha, config,
842 new_config, elreq.options);
846 goto done_free_dma_rsp;
849 type = "FC_BSG_HST_VENDOR_LOOPBACK";
850 ql_dbg(ql_dbg_user, vha, 0x7028,
851 "BSG request type: %s.\n", type);
853 command_sent = INT_DEF_LB_LOOPBACK_CMD;
854 rval = qla2x00_loopback_test(vha, &elreq, response);
856 if (response[0] == MBS_COMMAND_ERROR &&
857 response[1] == MBS_LB_RESET) {
858 ql_log(ql_log_warn, vha, 0x7029,
859 "MBX command error, Aborting ISP.\n");
860 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
861 qla2xxx_wake_dpc(vha);
862 qla2x00_wait_for_chip_reset(vha);
863 /* Also reset the MPI */
864 if (IS_QLA81XX(ha)) {
865 if (qla81xx_restart_mpi_firmware(vha) !=
867 ql_log(ql_log_warn, vha, 0x702a,
868 "MPI reset failed.\n");
873 goto done_free_dma_rsp;
879 /* Revert back to original port config
880 * Also clear internal loopback
882 ret = qla81xx_reset_loopback_mode(vha,
886 * If the reset of the loopback mode
887 * doesn't work take FCoE dump and then
890 ha->isp_ops->fw_dump(vha, 0);
891 set_bit(ISP_ABORT_NEEDED,
898 type = "FC_BSG_HST_VENDOR_LOOPBACK";
899 ql_dbg(ql_dbg_user, vha, 0x702b,
900 "BSG request type: %s.\n", type);
901 command_sent = INT_DEF_LB_LOOPBACK_CMD;
902 rval = qla2x00_loopback_test(vha, &elreq, response);
907 ql_log(ql_log_warn, vha, 0x702c,
908 "Vendor request %s failed.\n", type);
911 bsg_reply->result = (DID_ERROR << 16);
912 bsg_reply->reply_payload_rcv_len = 0;
914 ql_dbg(ql_dbg_user, vha, 0x702d,
915 "Vendor request %s completed.\n", type);
916 bsg_reply->result = (DID_OK << 16);
917 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
918 bsg_job->reply_payload.sg_cnt, rsp_data,
922 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
923 sizeof(response) + sizeof(uint8_t);
924 fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) +
925 sizeof(struct fc_bsg_reply);
926 memcpy(fw_sts_ptr, response, sizeof(response));
927 fw_sts_ptr += sizeof(response);
928 *fw_sts_ptr = command_sent;
931 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
932 rsp_data, rsp_data_dma);
934 dma_free_coherent(&ha->pdev->dev, req_data_len,
935 req_data, req_data_dma);
937 dma_unmap_sg(&ha->pdev->dev,
938 bsg_job->reply_payload.sg_list,
939 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
941 dma_unmap_sg(&ha->pdev->dev,
942 bsg_job->request_payload.sg_list,
943 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
945 bsg_job_done(bsg_job, bsg_reply->result,
946 bsg_reply->reply_payload_rcv_len);
951 qla84xx_reset(struct bsg_job *bsg_job)
953 struct fc_bsg_request *bsg_request = bsg_job->request;
954 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
955 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
956 scsi_qla_host_t *vha = shost_priv(host);
957 struct qla_hw_data *ha = vha->hw;
961 if (!IS_QLA84XX(ha)) {
962 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
966 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
968 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
971 ql_log(ql_log_warn, vha, 0x7030,
972 "Vendor request 84xx reset failed.\n");
973 rval = (DID_ERROR << 16);
976 ql_dbg(ql_dbg_user, vha, 0x7031,
977 "Vendor request 84xx reset completed.\n");
978 bsg_reply->result = DID_OK;
979 bsg_job_done(bsg_job, bsg_reply->result,
980 bsg_reply->reply_payload_rcv_len);
987 qla84xx_updatefw(struct bsg_job *bsg_job)
989 struct fc_bsg_request *bsg_request = bsg_job->request;
990 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
991 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
992 scsi_qla_host_t *vha = shost_priv(host);
993 struct qla_hw_data *ha = vha->hw;
994 struct verify_chip_entry_84xx *mn = NULL;
995 dma_addr_t mn_dma, fw_dma;
1004 if (!IS_QLA84XX(ha)) {
1005 ql_dbg(ql_dbg_user, vha, 0x7032,
1006 "Not 84xx, exiting.\n");
1010 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1011 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1013 ql_log(ql_log_warn, vha, 0x7033,
1014 "dma_map_sg returned %d for request.\n", sg_cnt);
1018 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1019 ql_log(ql_log_warn, vha, 0x7034,
1020 "DMA mapping resulted in different sg counts, "
1021 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1022 bsg_job->request_payload.sg_cnt, sg_cnt);
1027 data_len = bsg_job->request_payload.payload_len;
1028 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1029 &fw_dma, GFP_KERNEL);
1031 ql_log(ql_log_warn, vha, 0x7035,
1032 "DMA alloc failed for fw_buf.\n");
1037 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1038 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1040 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1042 ql_log(ql_log_warn, vha, 0x7036,
1043 "DMA alloc failed for fw buffer.\n");
1045 goto done_free_fw_buf;
1048 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1049 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1051 memset(mn, 0, sizeof(struct access_chip_84xx));
1052 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1053 mn->entry_count = 1;
1055 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1056 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1057 options |= VCO_DIAG_FW;
1059 mn->options = cpu_to_le16(options);
1060 mn->fw_ver = cpu_to_le32(fw_ver);
1061 mn->fw_size = cpu_to_le32(data_len);
1062 mn->fw_seq_size = cpu_to_le32(data_len);
1063 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1064 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1065 mn->dseg_length = cpu_to_le32(data_len);
1066 mn->data_seg_cnt = cpu_to_le16(1);
1068 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1071 ql_log(ql_log_warn, vha, 0x7037,
1072 "Vendor request 84xx updatefw failed.\n");
1074 rval = (DID_ERROR << 16);
1076 ql_dbg(ql_dbg_user, vha, 0x7038,
1077 "Vendor request 84xx updatefw completed.\n");
1079 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1080 bsg_reply->result = DID_OK;
1083 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1086 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1089 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1090 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1093 bsg_job_done(bsg_job, bsg_reply->result,
1094 bsg_reply->reply_payload_rcv_len);
1099 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1101 struct fc_bsg_request *bsg_request = bsg_job->request;
1102 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1103 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1104 scsi_qla_host_t *vha = shost_priv(host);
1105 struct qla_hw_data *ha = vha->hw;
1106 struct access_chip_84xx *mn = NULL;
1107 dma_addr_t mn_dma, mgmt_dma;
1108 void *mgmt_b = NULL;
1110 struct qla_bsg_a84_mgmt *ql84_mgmt;
1112 uint32_t data_len = 0;
1113 uint32_t dma_direction = DMA_NONE;
1115 if (!IS_QLA84XX(ha)) {
1116 ql_log(ql_log_warn, vha, 0x703a,
1117 "Not 84xx, exiting.\n");
1121 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1123 ql_log(ql_log_warn, vha, 0x703c,
1124 "DMA alloc failed for fw buffer.\n");
1128 memset(mn, 0, sizeof(struct access_chip_84xx));
1129 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1130 mn->entry_count = 1;
1131 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1132 switch (ql84_mgmt->mgmt.cmd) {
1133 case QLA84_MGMT_READ_MEM:
1134 case QLA84_MGMT_GET_INFO:
1135 sg_cnt = dma_map_sg(&ha->pdev->dev,
1136 bsg_job->reply_payload.sg_list,
1137 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1139 ql_log(ql_log_warn, vha, 0x703d,
1140 "dma_map_sg returned %d for reply.\n", sg_cnt);
1145 dma_direction = DMA_FROM_DEVICE;
1147 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1148 ql_log(ql_log_warn, vha, 0x703e,
1149 "DMA mapping resulted in different sg counts, "
1150 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1151 bsg_job->reply_payload.sg_cnt, sg_cnt);
1156 data_len = bsg_job->reply_payload.payload_len;
1158 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1159 &mgmt_dma, GFP_KERNEL);
1161 ql_log(ql_log_warn, vha, 0x703f,
1162 "DMA alloc failed for mgmt_b.\n");
1167 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1168 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1171 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1173 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1174 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1176 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1180 ql84_mgmt->mgmt.mgmtp.u.info.context);
1184 case QLA84_MGMT_WRITE_MEM:
1185 sg_cnt = dma_map_sg(&ha->pdev->dev,
1186 bsg_job->request_payload.sg_list,
1187 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1190 ql_log(ql_log_warn, vha, 0x7040,
1191 "dma_map_sg returned %d.\n", sg_cnt);
1196 dma_direction = DMA_TO_DEVICE;
1198 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1199 ql_log(ql_log_warn, vha, 0x7041,
1200 "DMA mapping resulted in different sg counts, "
1201 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1202 bsg_job->request_payload.sg_cnt, sg_cnt);
1207 data_len = bsg_job->request_payload.payload_len;
1208 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1209 &mgmt_dma, GFP_KERNEL);
1211 ql_log(ql_log_warn, vha, 0x7042,
1212 "DMA alloc failed for mgmt_b.\n");
1217 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1218 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1220 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1222 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1225 case QLA84_MGMT_CHNG_CONFIG:
1226 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1228 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1231 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1234 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1242 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1243 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1244 mn->dseg_count = cpu_to_le16(1);
1245 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1246 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1247 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1250 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1253 ql_log(ql_log_warn, vha, 0x7043,
1254 "Vendor request 84xx mgmt failed.\n");
1256 rval = (DID_ERROR << 16);
1259 ql_dbg(ql_dbg_user, vha, 0x7044,
1260 "Vendor request 84xx mgmt completed.\n");
1262 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1263 bsg_reply->result = DID_OK;
1265 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1266 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1267 bsg_reply->reply_payload_rcv_len =
1268 bsg_job->reply_payload.payload_len;
1270 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1271 bsg_job->reply_payload.sg_cnt, mgmt_b,
1278 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1280 if (dma_direction == DMA_TO_DEVICE)
1281 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1282 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1283 else if (dma_direction == DMA_FROM_DEVICE)
1284 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1285 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1288 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1291 bsg_job_done(bsg_job, bsg_reply->result,
1292 bsg_reply->reply_payload_rcv_len);
1297 qla24xx_iidma(struct bsg_job *bsg_job)
1299 struct fc_bsg_request *bsg_request = bsg_job->request;
1300 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1301 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1302 scsi_qla_host_t *vha = shost_priv(host);
1304 struct qla_port_param *port_param = NULL;
1305 fc_port_t *fcport = NULL;
1307 uint16_t mb[MAILBOX_REGISTER_COUNT];
1308 uint8_t *rsp_ptr = NULL;
1310 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1311 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1315 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1316 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1317 ql_log(ql_log_warn, vha, 0x7048,
1318 "Invalid destination type.\n");
1322 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1323 if (fcport->port_type != FCT_TARGET)
1326 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1327 fcport->port_name, sizeof(fcport->port_name)))
1335 ql_log(ql_log_warn, vha, 0x7049,
1336 "Failed to find port.\n");
1340 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1341 ql_log(ql_log_warn, vha, 0x704a,
1342 "Port is not online.\n");
1346 if (fcport->flags & FCF_LOGIN_NEEDED) {
1347 ql_log(ql_log_warn, vha, 0x704b,
1348 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1352 if (port_param->mode)
1353 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1354 port_param->speed, mb);
1356 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1357 &port_param->speed, mb);
1360 ql_log(ql_log_warn, vha, 0x704c,
1361 "iIDMA cmd failed for %8phN -- "
1362 "%04x %x %04x %04x.\n", fcport->port_name,
1363 rval, fcport->fp_speed, mb[0], mb[1]);
1364 rval = (DID_ERROR << 16);
1366 if (!port_param->mode) {
1367 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1368 sizeof(struct qla_port_param);
1370 rsp_ptr = ((uint8_t *)bsg_reply) +
1371 sizeof(struct fc_bsg_reply);
1373 memcpy(rsp_ptr, port_param,
1374 sizeof(struct qla_port_param));
1377 bsg_reply->result = DID_OK;
1378 bsg_job_done(bsg_job, bsg_reply->result,
1379 bsg_reply->reply_payload_rcv_len);
1386 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1389 struct fc_bsg_request *bsg_request = bsg_job->request;
1392 struct qla_hw_data *ha = vha->hw;
1394 if (unlikely(pci_channel_offline(ha->pdev)))
1397 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1398 if (start > ha->optrom_size) {
1399 ql_log(ql_log_warn, vha, 0x7055,
1400 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1404 if (ha->optrom_state != QLA_SWAITING) {
1405 ql_log(ql_log_info, vha, 0x7056,
1406 "optrom_state %d.\n", ha->optrom_state);
1410 ha->optrom_region_start = start;
1411 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1413 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1415 else if (start == (ha->flt_region_boot * 4) ||
1416 start == (ha->flt_region_fw * 4))
1418 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1419 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
1422 ql_log(ql_log_warn, vha, 0x7058,
1423 "Invalid start region 0x%x/0x%x.\n", start,
1424 bsg_job->request_payload.payload_len);
1428 ha->optrom_region_size = start +
1429 bsg_job->request_payload.payload_len > ha->optrom_size ?
1430 ha->optrom_size - start :
1431 bsg_job->request_payload.payload_len;
1432 ha->optrom_state = QLA_SWRITING;
1434 ha->optrom_region_size = start +
1435 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1436 ha->optrom_size - start :
1437 bsg_job->reply_payload.payload_len;
1438 ha->optrom_state = QLA_SREADING;
1441 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1442 if (!ha->optrom_buffer) {
1443 ql_log(ql_log_warn, vha, 0x7059,
1444 "Read: Unable to allocate memory for optrom retrieval "
1445 "(%x)\n", ha->optrom_region_size);
1447 ha->optrom_state = QLA_SWAITING;
1451 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1456 qla2x00_read_optrom(struct bsg_job *bsg_job)
1458 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1459 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1460 scsi_qla_host_t *vha = shost_priv(host);
1461 struct qla_hw_data *ha = vha->hw;
1464 if (ha->flags.nic_core_reset_hdlr_active)
1467 mutex_lock(&ha->optrom_mutex);
1468 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1470 mutex_unlock(&ha->optrom_mutex);
1474 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1475 ha->optrom_region_start, ha->optrom_region_size);
1477 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1478 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1479 ha->optrom_region_size);
1481 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1482 bsg_reply->result = DID_OK;
1483 vfree(ha->optrom_buffer);
1484 ha->optrom_buffer = NULL;
1485 ha->optrom_state = QLA_SWAITING;
1486 mutex_unlock(&ha->optrom_mutex);
1487 bsg_job_done(bsg_job, bsg_reply->result,
1488 bsg_reply->reply_payload_rcv_len);
1493 qla2x00_update_optrom(struct bsg_job *bsg_job)
1495 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1496 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1497 scsi_qla_host_t *vha = shost_priv(host);
1498 struct qla_hw_data *ha = vha->hw;
1501 mutex_lock(&ha->optrom_mutex);
1502 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1504 mutex_unlock(&ha->optrom_mutex);
1508 /* Set the isp82xx_no_md_cap not to capture minidump */
1509 ha->flags.isp82xx_no_md_cap = 1;
1511 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1512 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1513 ha->optrom_region_size);
1515 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1516 ha->optrom_region_start, ha->optrom_region_size);
1518 bsg_reply->result = DID_OK;
1519 vfree(ha->optrom_buffer);
1520 ha->optrom_buffer = NULL;
1521 ha->optrom_state = QLA_SWAITING;
1522 mutex_unlock(&ha->optrom_mutex);
1523 bsg_job_done(bsg_job, bsg_reply->result,
1524 bsg_reply->reply_payload_rcv_len);
1529 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1531 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1532 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1533 scsi_qla_host_t *vha = shost_priv(host);
1534 struct qla_hw_data *ha = vha->hw;
1536 uint8_t bsg[DMA_POOL_SIZE];
1537 struct qla_image_version_list *list = (void *)bsg;
1538 struct qla_image_version *image;
1541 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1543 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1544 EXT_STATUS_NO_MEMORY;
1548 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1549 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1551 image = list->version;
1552 count = list->count;
1554 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1555 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1556 image->field_address.device, image->field_address.offset,
1557 sizeof(image->field_info), image->field_address.option);
1559 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1566 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1569 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1572 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1573 bsg_reply->result = DID_OK << 16;
1574 bsg_job_done(bsg_job, bsg_reply->result,
1575 bsg_reply->reply_payload_rcv_len);
1581 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1583 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1584 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1585 scsi_qla_host_t *vha = shost_priv(host);
1586 struct qla_hw_data *ha = vha->hw;
1588 uint8_t bsg[DMA_POOL_SIZE];
1589 struct qla_status_reg *sr = (void *)bsg;
1591 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1593 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1594 EXT_STATUS_NO_MEMORY;
1598 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1599 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1601 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1602 sr->field_address.device, sr->field_address.offset,
1603 sizeof(sr->status_reg), sr->field_address.option);
1604 sr->status_reg = *sfp;
1607 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1612 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1613 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1615 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1618 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1621 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1622 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1623 bsg_reply->result = DID_OK << 16;
1624 bsg_job_done(bsg_job, bsg_reply->result,
1625 bsg_reply->reply_payload_rcv_len);
1631 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1633 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1634 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1635 scsi_qla_host_t *vha = shost_priv(host);
1636 struct qla_hw_data *ha = vha->hw;
1638 uint8_t bsg[DMA_POOL_SIZE];
1639 struct qla_status_reg *sr = (void *)bsg;
1641 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1643 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1644 EXT_STATUS_NO_MEMORY;
1648 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1649 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1651 *sfp = sr->status_reg;
1652 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1653 sr->field_address.device, sr->field_address.offset,
1654 sizeof(sr->status_reg), sr->field_address.option);
1657 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1662 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1665 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1668 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1669 bsg_reply->result = DID_OK << 16;
1670 bsg_job_done(bsg_job, bsg_reply->result,
1671 bsg_reply->reply_payload_rcv_len);
1677 qla2x00_write_i2c(struct bsg_job *bsg_job)
1679 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1680 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1681 scsi_qla_host_t *vha = shost_priv(host);
1682 struct qla_hw_data *ha = vha->hw;
1684 uint8_t bsg[DMA_POOL_SIZE];
1685 struct qla_i2c_access *i2c = (void *)bsg;
1687 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1689 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1690 EXT_STATUS_NO_MEMORY;
1694 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1695 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1697 memcpy(sfp, i2c->buffer, i2c->length);
1698 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1699 i2c->device, i2c->offset, i2c->length, i2c->option);
1702 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1707 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1710 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1713 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1714 bsg_reply->result = DID_OK << 16;
1715 bsg_job_done(bsg_job, bsg_reply->result,
1716 bsg_reply->reply_payload_rcv_len);
1722 qla2x00_read_i2c(struct bsg_job *bsg_job)
1724 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1725 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1726 scsi_qla_host_t *vha = shost_priv(host);
1727 struct qla_hw_data *ha = vha->hw;
1729 uint8_t bsg[DMA_POOL_SIZE];
1730 struct qla_i2c_access *i2c = (void *)bsg;
1732 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1734 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1735 EXT_STATUS_NO_MEMORY;
1739 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1740 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1742 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1743 i2c->device, i2c->offset, i2c->length, i2c->option);
1746 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1751 memcpy(i2c->buffer, sfp, i2c->length);
1752 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1753 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1755 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1758 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1761 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1762 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1763 bsg_reply->result = DID_OK << 16;
1764 bsg_job_done(bsg_job, bsg_reply->result,
1765 bsg_reply->reply_payload_rcv_len);
1771 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1773 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1774 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1775 scsi_qla_host_t *vha = shost_priv(host);
1776 struct qla_hw_data *ha = vha->hw;
1777 uint32_t rval = EXT_STATUS_OK;
1778 uint16_t req_sg_cnt = 0;
1779 uint16_t rsp_sg_cnt = 0;
1780 uint16_t nextlid = 0;
1783 uint32_t req_data_len = 0;
1784 uint32_t rsp_data_len = 0;
1786 /* Check the type of the adapter */
1787 if (!IS_BIDI_CAPABLE(ha)) {
1788 ql_log(ql_log_warn, vha, 0x70a0,
1789 "This adapter is not supported\n");
1790 rval = EXT_STATUS_NOT_SUPPORTED;
1794 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1795 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1796 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1797 rval = EXT_STATUS_BUSY;
1801 /* Check if host is online */
1802 if (!vha->flags.online) {
1803 ql_log(ql_log_warn, vha, 0x70a1,
1804 "Host is not online\n");
1805 rval = EXT_STATUS_DEVICE_OFFLINE;
1809 /* Check if cable is plugged in or not */
1810 if (vha->device_flags & DFLG_NO_CABLE) {
1811 ql_log(ql_log_warn, vha, 0x70a2,
1812 "Cable is unplugged...\n");
1813 rval = EXT_STATUS_INVALID_CFG;
1817 /* Check if the switch is connected or not */
1818 if (ha->current_topology != ISP_CFG_F) {
1819 ql_log(ql_log_warn, vha, 0x70a3,
1820 "Host is not connected to the switch\n");
1821 rval = EXT_STATUS_INVALID_CFG;
1825 /* Check if operating mode is P2P */
1826 if (ha->operating_mode != P2P) {
1827 ql_log(ql_log_warn, vha, 0x70a4,
1828 "Host is operating mode is not P2p\n");
1829 rval = EXT_STATUS_INVALID_CFG;
1833 mutex_lock(&ha->selflogin_lock);
1834 if (vha->self_login_loop_id == 0) {
1835 /* Initialize all required fields of fcport */
1836 vha->bidir_fcport.vha = vha;
1837 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1838 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1839 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1840 vha->bidir_fcport.loop_id = vha->loop_id;
1842 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1843 ql_log(ql_log_warn, vha, 0x70a7,
1844 "Failed to login port %06X for bidirectional IOCB\n",
1845 vha->bidir_fcport.d_id.b24);
1846 mutex_unlock(&ha->selflogin_lock);
1847 rval = EXT_STATUS_MAILBOX;
1850 vha->self_login_loop_id = nextlid - 1;
1853 /* Assign the self login loop id to fcport */
1854 mutex_unlock(&ha->selflogin_lock);
1856 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1858 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1859 bsg_job->request_payload.sg_list,
1860 bsg_job->request_payload.sg_cnt,
1864 rval = EXT_STATUS_NO_MEMORY;
1868 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1869 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1873 rval = EXT_STATUS_NO_MEMORY;
1874 goto done_unmap_req_sg;
1877 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1878 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1879 ql_dbg(ql_dbg_user, vha, 0x70a9,
1880 "Dma mapping resulted in different sg counts "
1881 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1882 "%x dma_reply_sg_cnt: %x]\n",
1883 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1884 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1885 rval = EXT_STATUS_NO_MEMORY;
1889 if (req_data_len != rsp_data_len) {
1890 rval = EXT_STATUS_BUSY;
1891 ql_log(ql_log_warn, vha, 0x70aa,
1892 "req_data_len != rsp_data_len\n");
1896 req_data_len = bsg_job->request_payload.payload_len;
1897 rsp_data_len = bsg_job->reply_payload.payload_len;
1900 /* Alloc SRB structure */
1901 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1903 ql_dbg(ql_dbg_user, vha, 0x70ac,
1904 "Alloc SRB structure failed\n");
1905 rval = EXT_STATUS_NO_MEMORY;
1909 /*Populate srb->ctx with bidir ctx*/
1910 sp->u.bsg_job = bsg_job;
1911 sp->free = qla2x00_bsg_sp_free;
1912 sp->type = SRB_BIDI_CMD;
1913 sp->done = qla2x00_bsg_job_done;
1915 /* Add the read and write sg count */
1916 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1918 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1919 if (rval != EXT_STATUS_OK)
1921 /* the bsg request will be completed in the interrupt handler */
1925 mempool_free(sp, ha->srb_mempool);
1927 dma_unmap_sg(&ha->pdev->dev,
1928 bsg_job->reply_payload.sg_list,
1929 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1931 dma_unmap_sg(&ha->pdev->dev,
1932 bsg_job->request_payload.sg_list,
1933 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1936 /* Return an error vendor specific response
1937 * and complete the bsg request
1939 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1940 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1941 bsg_reply->reply_payload_rcv_len = 0;
1942 bsg_reply->result = (DID_OK) << 16;
1943 bsg_job_done(bsg_job, bsg_reply->result,
1944 bsg_reply->reply_payload_rcv_len);
1945 /* Always return success, vendor rsp carries correct status */
1950 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1952 struct fc_bsg_request *bsg_request = bsg_job->request;
1953 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1954 scsi_qla_host_t *vha = shost_priv(host);
1955 struct qla_hw_data *ha = vha->hw;
1956 int rval = (DRIVER_ERROR << 16);
1957 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1959 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1960 struct fc_port *fcport;
1961 char *type = "FC_BSG_HST_FX_MGMT";
1963 /* Copy the IOCB specific information */
1964 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1965 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1967 /* Dump the vendor information */
1968 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1969 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1971 if (!vha->flags.online) {
1972 ql_log(ql_log_warn, vha, 0x70d0,
1973 "Host is not online.\n");
1978 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1979 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1980 bsg_job->request_payload.sg_list,
1981 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1983 ql_log(ql_log_warn, vha, 0x70c7,
1984 "dma_map_sg return %d for request\n", req_sg_cnt);
1990 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1991 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1992 bsg_job->reply_payload.sg_list,
1993 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1995 ql_log(ql_log_warn, vha, 0x70c8,
1996 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1998 goto done_unmap_req_sg;
2002 ql_dbg(ql_dbg_user, vha, 0x70c9,
2003 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2004 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2005 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2007 /* Allocate a dummy fcport structure, since functions preparing the
2008 * IOCB and mailbox command retrieves port specific information
2009 * from fcport structure. For Host based ELS commands there will be
2010 * no fcport structure allocated
2012 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2014 ql_log(ql_log_warn, vha, 0x70ca,
2015 "Failed to allocate fcport.\n");
2017 goto done_unmap_rsp_sg;
2020 /* Alloc SRB structure */
2021 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2023 ql_log(ql_log_warn, vha, 0x70cb,
2024 "qla2x00_get_sp failed.\n");
2026 goto done_free_fcport;
2029 /* Initialize all required fields of fcport */
2031 fcport->loop_id = piocb_rqst->dataword;
2033 sp->type = SRB_FXIOCB_BCMD;
2034 sp->name = "bsg_fx_mgmt";
2035 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2036 sp->u.bsg_job = bsg_job;
2037 sp->free = qla2x00_bsg_sp_free;
2038 sp->done = qla2x00_bsg_job_done;
2040 ql_dbg(ql_dbg_user, vha, 0x70cc,
2041 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2042 type, piocb_rqst->func_type, fcport->loop_id);
2044 rval = qla2x00_start_sp(sp);
2045 if (rval != QLA_SUCCESS) {
2046 ql_log(ql_log_warn, vha, 0x70cd,
2047 "qla2x00_start_sp failed=%d.\n", rval);
2048 mempool_free(sp, ha->srb_mempool);
2050 goto done_free_fcport;
2058 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2059 dma_unmap_sg(&ha->pdev->dev,
2060 bsg_job->reply_payload.sg_list,
2061 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2063 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2064 dma_unmap_sg(&ha->pdev->dev,
2065 bsg_job->request_payload.sg_list,
2066 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2073 qla26xx_serdes_op(struct bsg_job *bsg_job)
2075 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2076 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2077 scsi_qla_host_t *vha = shost_priv(host);
2079 struct qla_serdes_reg sr;
2081 memset(&sr, 0, sizeof(sr));
2083 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2084 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2087 case INT_SC_SERDES_WRITE_REG:
2088 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2089 bsg_reply->reply_payload_rcv_len = 0;
2091 case INT_SC_SERDES_READ_REG:
2092 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2093 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2094 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2095 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2098 ql_dbg(ql_dbg_user, vha, 0x708c,
2099 "Unknown serdes cmd %x.\n", sr.cmd);
2104 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2105 rval ? EXT_STATUS_MAILBOX : 0;
2107 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2108 bsg_reply->result = DID_OK << 16;
2109 bsg_job_done(bsg_job, bsg_reply->result,
2110 bsg_reply->reply_payload_rcv_len);
2115 qla8044_serdes_op(struct bsg_job *bsg_job)
2117 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2118 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2119 scsi_qla_host_t *vha = shost_priv(host);
2121 struct qla_serdes_reg_ex sr;
2123 memset(&sr, 0, sizeof(sr));
2125 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2126 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2129 case INT_SC_SERDES_WRITE_REG:
2130 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2131 bsg_reply->reply_payload_rcv_len = 0;
2133 case INT_SC_SERDES_READ_REG:
2134 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2135 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2136 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2137 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2140 ql_dbg(ql_dbg_user, vha, 0x70cf,
2141 "Unknown serdes cmd %x.\n", sr.cmd);
2146 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2147 rval ? EXT_STATUS_MAILBOX : 0;
2149 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2150 bsg_reply->result = DID_OK << 16;
2151 bsg_job_done(bsg_job, bsg_reply->result,
2152 bsg_reply->reply_payload_rcv_len);
2157 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2159 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2160 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2161 scsi_qla_host_t *vha = shost_priv(host);
2162 struct qla_hw_data *ha = vha->hw;
2163 struct qla_flash_update_caps cap;
2165 if (!(IS_QLA27XX(ha)))
2168 memset(&cap, 0, sizeof(cap));
2169 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2170 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2171 (uint64_t)ha->fw_attributes_h << 16 |
2172 (uint64_t)ha->fw_attributes;
2174 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2175 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2176 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2178 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2181 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2182 bsg_reply->result = DID_OK << 16;
2183 bsg_job_done(bsg_job, bsg_reply->result,
2184 bsg_reply->reply_payload_rcv_len);
2189 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2191 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2192 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2193 scsi_qla_host_t *vha = shost_priv(host);
2194 struct qla_hw_data *ha = vha->hw;
2195 uint64_t online_fw_attr = 0;
2196 struct qla_flash_update_caps cap;
2198 if (!(IS_QLA27XX(ha)))
2201 memset(&cap, 0, sizeof(cap));
2202 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2203 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2205 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2206 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2207 (uint64_t)ha->fw_attributes_h << 16 |
2208 (uint64_t)ha->fw_attributes;
2210 if (online_fw_attr != cap.capabilities) {
2211 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2212 EXT_STATUS_INVALID_PARAM;
2216 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2217 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2218 EXT_STATUS_INVALID_PARAM;
2222 bsg_reply->reply_payload_rcv_len = 0;
2224 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2227 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2228 bsg_reply->result = DID_OK << 16;
2229 bsg_job_done(bsg_job, bsg_reply->result,
2230 bsg_reply->reply_payload_rcv_len);
2235 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2237 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2238 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2239 scsi_qla_host_t *vha = shost_priv(host);
2240 struct qla_hw_data *ha = vha->hw;
2241 struct qla_bbcr_data bbcr;
2242 uint16_t loop_id, topo, sw_cap;
2243 uint8_t domain, area, al_pa, state;
2246 if (!(IS_QLA27XX(ha)))
2249 memset(&bbcr, 0, sizeof(bbcr));
2251 if (vha->flags.bbcr_enable)
2252 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2254 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2256 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2257 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2258 &area, &domain, &topo, &sw_cap);
2259 if (rval != QLA_SUCCESS) {
2260 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2261 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2262 bbcr.mbx1 = loop_id;
2266 state = (vha->bbcr >> 12) & 0x1;
2269 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2270 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2272 bbcr.state = QLA_BBCR_STATE_ONLINE;
2273 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2276 bbcr.configured_bbscn = vha->bbcr & 0xf;
2280 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2281 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2282 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2284 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2286 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2287 bsg_reply->result = DID_OK << 16;
2288 bsg_job_done(bsg_job, bsg_reply->result,
2289 bsg_reply->reply_payload_rcv_len);
2294 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2296 struct fc_bsg_request *bsg_request = bsg_job->request;
2297 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2298 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2299 scsi_qla_host_t *vha = shost_priv(host);
2300 struct qla_hw_data *ha = vha->hw;
2301 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2302 struct link_statistics *stats = NULL;
2303 dma_addr_t stats_dma;
2305 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2306 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2308 if (test_bit(UNLOADING, &vha->dpc_flags))
2311 if (unlikely(pci_channel_offline(ha->pdev)))
2314 if (qla2x00_reset_active(vha))
2317 if (!IS_FWI2_CAPABLE(ha))
2320 stats = dma_alloc_coherent(&ha->pdev->dev,
2321 sizeof(*stats), &stats_dma, GFP_KERNEL);
2323 ql_log(ql_log_warn, vha, 0x70e2,
2324 "Failed to allocate memory for stats.\n");
2328 memset(stats, 0, sizeof(*stats));
2330 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2332 if (rval == QLA_SUCCESS) {
2333 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
2334 (uint8_t *)stats, sizeof(*stats));
2335 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2336 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2339 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2340 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2341 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2343 bsg_job->reply_len = sizeof(*bsg_reply);
2344 bsg_reply->result = DID_OK << 16;
2345 bsg_job_done(bsg_job, bsg_reply->result,
2346 bsg_reply->reply_payload_rcv_len);
2348 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2355 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2357 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2358 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2359 scsi_qla_host_t *vha = shost_priv(host);
2361 struct qla_dport_diag *dd;
2363 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
2366 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2368 ql_log(ql_log_warn, vha, 0x70db,
2369 "Failed to allocate memory for dport.\n");
2373 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2374 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2376 rval = qla26xx_dport_diagnostics(
2377 vha, dd->buf, sizeof(dd->buf), dd->options);
2378 if (rval == QLA_SUCCESS) {
2379 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2380 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2383 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2384 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2385 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2387 bsg_job->reply_len = sizeof(*bsg_reply);
2388 bsg_reply->result = DID_OK << 16;
2389 bsg_job_done(bsg_job, bsg_reply->result,
2390 bsg_reply->reply_payload_rcv_len);
2398 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2400 struct fc_bsg_request *bsg_request = bsg_job->request;
2402 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2403 case QL_VND_LOOPBACK:
2404 return qla2x00_process_loopback(bsg_job);
2406 case QL_VND_A84_RESET:
2407 return qla84xx_reset(bsg_job);
2409 case QL_VND_A84_UPDATE_FW:
2410 return qla84xx_updatefw(bsg_job);
2412 case QL_VND_A84_MGMT_CMD:
2413 return qla84xx_mgmt_cmd(bsg_job);
2416 return qla24xx_iidma(bsg_job);
2418 case QL_VND_FCP_PRIO_CFG_CMD:
2419 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2421 case QL_VND_READ_FLASH:
2422 return qla2x00_read_optrom(bsg_job);
2424 case QL_VND_UPDATE_FLASH:
2425 return qla2x00_update_optrom(bsg_job);
2427 case QL_VND_SET_FRU_VERSION:
2428 return qla2x00_update_fru_versions(bsg_job);
2430 case QL_VND_READ_FRU_STATUS:
2431 return qla2x00_read_fru_status(bsg_job);
2433 case QL_VND_WRITE_FRU_STATUS:
2434 return qla2x00_write_fru_status(bsg_job);
2436 case QL_VND_WRITE_I2C:
2437 return qla2x00_write_i2c(bsg_job);
2439 case QL_VND_READ_I2C:
2440 return qla2x00_read_i2c(bsg_job);
2442 case QL_VND_DIAG_IO_CMD:
2443 return qla24xx_process_bidir_cmd(bsg_job);
2445 case QL_VND_FX00_MGMT_CMD:
2446 return qlafx00_mgmt_cmd(bsg_job);
2448 case QL_VND_SERDES_OP:
2449 return qla26xx_serdes_op(bsg_job);
2451 case QL_VND_SERDES_OP_EX:
2452 return qla8044_serdes_op(bsg_job);
2454 case QL_VND_GET_FLASH_UPDATE_CAPS:
2455 return qla27xx_get_flash_upd_cap(bsg_job);
2457 case QL_VND_SET_FLASH_UPDATE_CAPS:
2458 return qla27xx_set_flash_upd_cap(bsg_job);
2460 case QL_VND_GET_BBCR_DATA:
2461 return qla27xx_get_bbcr_data(bsg_job);
2463 case QL_VND_GET_PRIV_STATS:
2464 case QL_VND_GET_PRIV_STATS_EX:
2465 return qla2x00_get_priv_stats(bsg_job);
2467 case QL_VND_DPORT_DIAGNOSTICS:
2468 return qla2x00_do_dport_diagnostics(bsg_job);
2476 qla24xx_bsg_request(struct bsg_job *bsg_job)
2478 struct fc_bsg_request *bsg_request = bsg_job->request;
2479 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2481 struct fc_rport *rport;
2482 struct Scsi_Host *host;
2483 scsi_qla_host_t *vha;
2485 /* In case no data transferred. */
2486 bsg_reply->reply_payload_rcv_len = 0;
2488 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2489 rport = fc_bsg_to_rport(bsg_job);
2490 host = rport_to_shost(rport);
2491 vha = shost_priv(host);
2493 host = fc_bsg_to_shost(bsg_job);
2494 vha = shost_priv(host);
2497 if (qla2x00_reset_active(vha)) {
2498 ql_dbg(ql_dbg_user, vha, 0x709f,
2499 "BSG: ISP abort active/needed -- cmd=%d.\n",
2500 bsg_request->msgcode);
2504 ql_dbg(ql_dbg_user, vha, 0x7000,
2505 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2507 switch (bsg_request->msgcode) {
2508 case FC_BSG_RPT_ELS:
2509 case FC_BSG_HST_ELS_NOLOGIN:
2510 ret = qla2x00_process_els(bsg_job);
2513 ret = qla2x00_process_ct(bsg_job);
2515 case FC_BSG_HST_VENDOR:
2516 ret = qla2x00_process_vendor_specific(bsg_job);
2518 case FC_BSG_HST_ADD_RPORT:
2519 case FC_BSG_HST_DEL_RPORT:
2522 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2529 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2531 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2532 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2533 struct qla_hw_data *ha = vha->hw;
2536 unsigned long flags;
2537 struct req_que *req;
2539 /* find the bsg job from the active list of commands */
2540 spin_lock_irqsave(&ha->hardware_lock, flags);
2541 for (que = 0; que < ha->max_req_queues; que++) {
2542 req = ha->req_q_map[que];
2546 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2547 sp = req->outstanding_cmds[cnt];
2549 if (((sp->type == SRB_CT_CMD) ||
2550 (sp->type == SRB_ELS_CMD_HST) ||
2551 (sp->type == SRB_FXIOCB_BCMD))
2552 && (sp->u.bsg_job == bsg_job)) {
2553 req->outstanding_cmds[cnt] = NULL;
2554 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2555 if (ha->isp_ops->abort_command(sp)) {
2556 ql_log(ql_log_warn, vha, 0x7089,
2557 "mbx abort_command "
2559 bsg_job->req->errors =
2560 bsg_reply->result = -EIO;
2562 ql_dbg(ql_dbg_user, vha, 0x708a,
2563 "mbx abort_command "
2565 bsg_job->req->errors =
2566 bsg_reply->result = 0;
2568 spin_lock_irqsave(&ha->hardware_lock, flags);
2574 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2575 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2576 bsg_job->req->errors = bsg_reply->result = -ENXIO;
2580 spin_unlock_irqrestore(&ha->hardware_lock, flags);