2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
9 #include <linux/utsname.h>
11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20 static int qla_async_rsnn_nn(scsi_qla_host_t *);
23 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
27 * Returns a pointer to the @vha's ms_iocb.
30 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
32 struct qla_hw_data *ha = vha->hw;
33 ms_iocb_entry_t *ms_pkt;
35 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
36 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
38 ms_pkt->entry_type = MS_IOCB_TYPE;
39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
44 ms_pkt->total_dsd_count = cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
48 put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
49 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
51 put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
52 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
54 vha->qla_stats.control_requests++;
60 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
64 * Returns a pointer to the @ha's ms_iocb.
67 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
69 struct qla_hw_data *ha = vha->hw;
70 struct ct_entry_24xx *ct_pkt;
72 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
73 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
75 ct_pkt->entry_type = CT_IOCB_TYPE;
76 ct_pkt->entry_count = 1;
77 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
78 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
79 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
80 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
81 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
82 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
84 put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
85 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
87 put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
88 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
89 ct_pkt->vp_index = vha->vp_idx;
91 vha->qla_stats.control_requests++;
97 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
98 * @p: CT request buffer
100 * @rsp_size: response size in bytes
102 * Returns a pointer to the intitialized @ct_req.
104 static inline struct ct_sns_req *
105 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
107 memset(p, 0, sizeof(struct ct_sns_pkt));
109 p->p.req.header.revision = 0x01;
110 p->p.req.header.gs_type = 0xFC;
111 p->p.req.header.gs_subtype = 0x02;
112 p->p.req.command = cpu_to_be16(cmd);
113 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
119 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
120 struct ct_sns_rsp *ct_rsp, const char *routine)
123 uint16_t comp_status;
124 struct qla_hw_data *ha = vha->hw;
125 bool lid_is_sns = false;
127 rval = QLA_FUNCTION_FAILED;
128 if (ms_pkt->entry_status != 0) {
129 ql_dbg(ql_dbg_disc, vha, 0x2031,
130 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
131 routine, ms_pkt->entry_status, vha->d_id.b.domain,
132 vha->d_id.b.area, vha->d_id.b.al_pa);
134 if (IS_FWI2_CAPABLE(ha))
135 comp_status = le16_to_cpu(
136 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
138 comp_status = le16_to_cpu(ms_pkt->status);
139 switch (comp_status) {
141 case CS_DATA_UNDERRUN:
142 case CS_DATA_OVERRUN: /* Overrun? */
143 if (ct_rsp->header.response !=
144 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
145 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
146 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
147 routine, vha->d_id.b.domain,
148 vha->d_id.b.area, vha->d_id.b.al_pa,
149 comp_status, ct_rsp->header.response);
150 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
152 offsetof(typeof(*ct_rsp), rsp));
153 rval = QLA_INVALID_COMMAND;
157 case CS_PORT_LOGGED_OUT:
158 if (IS_FWI2_CAPABLE(ha)) {
159 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
163 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
168 ql_dbg(ql_dbg_async, vha, 0x502b,
169 "%s failed, Name server has logged out",
171 rval = QLA_NOT_LOGGED_IN;
172 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
173 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
177 rval = QLA_FUNCTION_TIMEOUT;
180 ql_dbg(ql_dbg_disc, vha, 0x2033,
181 "%s failed, completion status (%x) on port_id: "
182 "%02x%02x%02x.\n", routine, comp_status,
183 vha->d_id.b.domain, vha->d_id.b.area,
192 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
194 * @fcport: fcport entry to updated
196 * Returns 0 on success.
199 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
203 ms_iocb_entry_t *ms_pkt;
204 struct ct_sns_req *ct_req;
205 struct ct_sns_rsp *ct_rsp;
206 struct qla_hw_data *ha = vha->hw;
209 if (IS_QLA2100(ha) || IS_QLA2200(ha))
210 return qla2x00_sns_ga_nxt(vha, fcport);
212 arg.iocb = ha->ms_iocb;
213 arg.req_dma = ha->ct_sns_dma;
214 arg.rsp_dma = ha->ct_sns_dma;
215 arg.req_size = GA_NXT_REQ_SIZE;
216 arg.rsp_size = GA_NXT_RSP_SIZE;
217 arg.nport_handle = NPH_SNS;
220 /* Prepare common MS IOCB */
221 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
223 /* Prepare CT request */
224 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
226 ct_rsp = &ha->ct_sns->p.rsp;
228 /* Prepare CT arguments -- port_id */
229 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
231 /* Execute MS IOCB */
232 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
233 sizeof(ms_iocb_entry_t));
234 if (rval != QLA_SUCCESS) {
236 ql_dbg(ql_dbg_disc, vha, 0x2062,
237 "GA_NXT issue IOCB failed (%d).\n", rval);
238 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
240 rval = QLA_FUNCTION_FAILED;
242 /* Populate fc_port_t entry. */
243 fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
245 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
247 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
250 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
251 FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
253 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
254 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
255 fcport->d_id.b.domain = 0xf0;
257 ql_dbg(ql_dbg_disc, vha, 0x2063,
258 "GA_NXT entry - nn %8phN pn %8phN "
259 "port_id=%02x%02x%02x.\n",
260 fcport->node_name, fcport->port_name,
261 fcport->d_id.b.domain, fcport->d_id.b.area,
262 fcport->d_id.b.al_pa);
269 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
271 return vha->hw->max_fibre_devices * 4 + 16;
275 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
277 * @list: switch info entries to populate
279 * NOTE: Non-Nx_Ports are not requested.
281 * Returns 0 on success.
284 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
289 ms_iocb_entry_t *ms_pkt;
290 struct ct_sns_req *ct_req;
291 struct ct_sns_rsp *ct_rsp;
293 struct ct_sns_gid_pt_data *gid_data;
294 struct qla_hw_data *ha = vha->hw;
295 uint16_t gid_pt_rsp_size;
298 if (IS_QLA2100(ha) || IS_QLA2200(ha))
299 return qla2x00_sns_gid_pt(vha, list);
302 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
304 arg.iocb = ha->ms_iocb;
305 arg.req_dma = ha->ct_sns_dma;
306 arg.rsp_dma = ha->ct_sns_dma;
307 arg.req_size = GID_PT_REQ_SIZE;
308 arg.rsp_size = gid_pt_rsp_size;
309 arg.nport_handle = NPH_SNS;
312 /* Prepare common MS IOCB */
313 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
315 /* Prepare CT request */
316 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
317 ct_rsp = &ha->ct_sns->p.rsp;
319 /* Prepare CT arguments -- port_type */
320 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
322 /* Execute MS IOCB */
323 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
324 sizeof(ms_iocb_entry_t));
325 if (rval != QLA_SUCCESS) {
327 ql_dbg(ql_dbg_disc, vha, 0x2055,
328 "GID_PT issue IOCB failed (%d).\n", rval);
329 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
331 rval = QLA_FUNCTION_FAILED;
333 /* Set port IDs in switch info list. */
334 for (i = 0; i < ha->max_fibre_devices; i++) {
335 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
336 list[i].d_id = be_to_port_id(gid_data->port_id);
337 memset(list[i].fabric_port_name, 0, WWN_SIZE);
338 list[i].fp_speed = PORT_SPEED_UNKNOWN;
341 if (gid_data->control_byte & BIT_7) {
342 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
348 * If we've used all available slots, then the switch is
349 * reporting back more devices than we can handle with this
350 * single call. Return a failed status, and let GA_NXT handle
353 if (i == ha->max_fibre_devices)
354 rval = QLA_FUNCTION_FAILED;
361 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
363 * @list: switch info entries to populate
365 * Returns 0 on success.
368 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
370 int rval = QLA_SUCCESS;
373 ms_iocb_entry_t *ms_pkt;
374 struct ct_sns_req *ct_req;
375 struct ct_sns_rsp *ct_rsp;
376 struct qla_hw_data *ha = vha->hw;
379 if (IS_QLA2100(ha) || IS_QLA2200(ha))
380 return qla2x00_sns_gpn_id(vha, list);
382 arg.iocb = ha->ms_iocb;
383 arg.req_dma = ha->ct_sns_dma;
384 arg.rsp_dma = ha->ct_sns_dma;
385 arg.req_size = GPN_ID_REQ_SIZE;
386 arg.rsp_size = GPN_ID_RSP_SIZE;
387 arg.nport_handle = NPH_SNS;
389 for (i = 0; i < ha->max_fibre_devices; i++) {
391 /* Prepare common MS IOCB */
392 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
394 /* Prepare CT request */
395 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
397 ct_rsp = &ha->ct_sns->p.rsp;
399 /* Prepare CT arguments -- port_id */
400 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
402 /* Execute MS IOCB */
403 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
404 sizeof(ms_iocb_entry_t));
405 if (rval != QLA_SUCCESS) {
407 ql_dbg(ql_dbg_disc, vha, 0x2056,
408 "GPN_ID issue IOCB failed (%d).\n", rval);
410 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
411 "GPN_ID") != QLA_SUCCESS) {
412 rval = QLA_FUNCTION_FAILED;
416 memcpy(list[i].port_name,
417 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
420 /* Last device exit. */
421 if (list[i].d_id.b.rsvd_1 != 0)
429 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
431 * @list: switch info entries to populate
433 * Returns 0 on success.
436 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
438 int rval = QLA_SUCCESS;
440 struct qla_hw_data *ha = vha->hw;
441 ms_iocb_entry_t *ms_pkt;
442 struct ct_sns_req *ct_req;
443 struct ct_sns_rsp *ct_rsp;
446 if (IS_QLA2100(ha) || IS_QLA2200(ha))
447 return qla2x00_sns_gnn_id(vha, list);
449 arg.iocb = ha->ms_iocb;
450 arg.req_dma = ha->ct_sns_dma;
451 arg.rsp_dma = ha->ct_sns_dma;
452 arg.req_size = GNN_ID_REQ_SIZE;
453 arg.rsp_size = GNN_ID_RSP_SIZE;
454 arg.nport_handle = NPH_SNS;
456 for (i = 0; i < ha->max_fibre_devices; i++) {
458 /* Prepare common MS IOCB */
459 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
461 /* Prepare CT request */
462 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
464 ct_rsp = &ha->ct_sns->p.rsp;
466 /* Prepare CT arguments -- port_id */
467 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
469 /* Execute MS IOCB */
470 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
471 sizeof(ms_iocb_entry_t));
472 if (rval != QLA_SUCCESS) {
474 ql_dbg(ql_dbg_disc, vha, 0x2057,
475 "GNN_ID issue IOCB failed (%d).\n", rval);
477 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
478 "GNN_ID") != QLA_SUCCESS) {
479 rval = QLA_FUNCTION_FAILED;
483 memcpy(list[i].node_name,
484 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
486 ql_dbg(ql_dbg_disc, vha, 0x2058,
487 "GID_PT entry - nn %8phN pn %8phN "
488 "portid=%02x%02x%02x.\n",
489 list[i].node_name, list[i].port_name,
490 list[i].d_id.b.domain, list[i].d_id.b.area,
491 list[i].d_id.b.al_pa);
494 /* Last device exit. */
495 if (list[i].d_id.b.rsvd_1 != 0)
502 static void qla2x00_async_sns_sp_done(void *s, int rc)
505 struct scsi_qla_host *vha = sp->vha;
506 struct ct_sns_pkt *ct_sns;
507 struct qla_work_evt *e;
510 if (rc == QLA_SUCCESS) {
511 ql_dbg(ql_dbg_disc, vha, 0x204f,
512 "Async done-%s exiting normally.\n",
514 } else if (rc == QLA_FUNCTION_TIMEOUT) {
515 ql_dbg(ql_dbg_disc, vha, 0x204f,
516 "Async done-%s timeout\n", sp->name);
518 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
519 memset(ct_sns, 0, sizeof(*ct_sns));
521 if (sp->retry_count > 3)
524 ql_dbg(ql_dbg_disc, vha, 0x204f,
525 "Async done-%s fail rc %x. Retry count %d\n",
526 sp->name, rc, sp->retry_count);
528 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
532 del_timer(&sp->u.iocb_cmd.timer);
534 qla2x00_post_work(vha, e);
539 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
542 /* please ignore kernel warning. otherwise, we have mem leak. */
543 if (sp->u.iocb_cmd.u.ctarg.req) {
544 dma_free_coherent(&vha->hw->pdev->dev,
545 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
546 sp->u.iocb_cmd.u.ctarg.req,
547 sp->u.iocb_cmd.u.ctarg.req_dma);
548 sp->u.iocb_cmd.u.ctarg.req = NULL;
551 if (sp->u.iocb_cmd.u.ctarg.rsp) {
552 dma_free_coherent(&vha->hw->pdev->dev,
553 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
554 sp->u.iocb_cmd.u.ctarg.rsp,
555 sp->u.iocb_cmd.u.ctarg.rsp_dma);
556 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
565 qla2x00_post_work(vha, e);
569 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
572 * Returns 0 on success.
575 qla2x00_rft_id(scsi_qla_host_t *vha)
577 struct qla_hw_data *ha = vha->hw;
579 if (IS_QLA2100(ha) || IS_QLA2200(ha))
580 return qla2x00_sns_rft_id(vha);
582 return qla_async_rftid(vha, &vha->d_id);
585 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
587 int rval = QLA_MEMORY_ALLOC_FAILED;
588 struct ct_sns_req *ct_req;
590 struct ct_sns_pkt *ct_sns;
592 if (!vha->flags.online)
595 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
599 sp->type = SRB_CT_PTHRU_CMD;
601 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
603 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
604 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
606 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
607 if (!sp->u.iocb_cmd.u.ctarg.req) {
608 ql_log(ql_log_warn, vha, 0xd041,
609 "%s: Failed to allocate ct_sns request.\n",
614 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
615 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
617 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
618 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
619 ql_log(ql_log_warn, vha, 0xd042,
620 "%s: Failed to allocate ct_sns request.\n",
624 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
625 memset(ct_sns, 0, sizeof(*ct_sns));
626 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
628 /* Prepare CT request */
629 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
631 /* Prepare CT arguments -- port_id, FC-4 types */
632 ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
633 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
635 if (vha->flags.nvme_enabled)
636 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
638 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
639 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
640 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
641 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
642 sp->done = qla2x00_async_sns_sp_done;
644 ql_dbg(ql_dbg_disc, vha, 0xffff,
645 "Async-%s - hdl=%x portid %06x.\n",
646 sp->name, sp->handle, d_id->b24);
648 rval = qla2x00_start_sp(sp);
649 if (rval != QLA_SUCCESS) {
650 ql_dbg(ql_dbg_disc, vha, 0x2043,
651 "RFT_ID issue IOCB failed (%d).\n", rval);
662 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
666 * Returns 0 on success.
669 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
671 struct qla_hw_data *ha = vha->hw;
673 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
674 ql_dbg(ql_dbg_disc, vha, 0x2046,
675 "RFF_ID call not supported on ISP2100/ISP2200.\n");
676 return (QLA_SUCCESS);
679 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
683 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
684 u8 fc4feature, u8 fc4type)
686 int rval = QLA_MEMORY_ALLOC_FAILED;
687 struct ct_sns_req *ct_req;
689 struct ct_sns_pkt *ct_sns;
691 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
695 sp->type = SRB_CT_PTHRU_CMD;
697 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
699 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
700 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
702 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
703 if (!sp->u.iocb_cmd.u.ctarg.req) {
704 ql_log(ql_log_warn, vha, 0xd041,
705 "%s: Failed to allocate ct_sns request.\n",
710 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
711 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
713 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
714 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
715 ql_log(ql_log_warn, vha, 0xd042,
716 "%s: Failed to allocate ct_sns request.\n",
720 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
721 memset(ct_sns, 0, sizeof(*ct_sns));
722 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
724 /* Prepare CT request */
725 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
727 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
728 ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
729 ct_req->req.rff_id.fc4_feature = fc4feature;
730 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
732 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
733 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
734 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
735 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
736 sp->done = qla2x00_async_sns_sp_done;
738 ql_dbg(ql_dbg_disc, vha, 0xffff,
739 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
740 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
742 rval = qla2x00_start_sp(sp);
743 if (rval != QLA_SUCCESS) {
744 ql_dbg(ql_dbg_disc, vha, 0x2047,
745 "RFF_ID issue IOCB failed (%d).\n", rval);
758 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
761 * Returns 0 on success.
764 qla2x00_rnn_id(scsi_qla_host_t *vha)
766 struct qla_hw_data *ha = vha->hw;
768 if (IS_QLA2100(ha) || IS_QLA2200(ha))
769 return qla2x00_sns_rnn_id(vha);
771 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
774 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
777 int rval = QLA_MEMORY_ALLOC_FAILED;
778 struct ct_sns_req *ct_req;
780 struct ct_sns_pkt *ct_sns;
782 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
786 sp->type = SRB_CT_PTHRU_CMD;
788 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
790 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
791 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
793 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
794 if (!sp->u.iocb_cmd.u.ctarg.req) {
795 ql_log(ql_log_warn, vha, 0xd041,
796 "%s: Failed to allocate ct_sns request.\n",
801 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
802 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
804 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
805 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
806 ql_log(ql_log_warn, vha, 0xd042,
807 "%s: Failed to allocate ct_sns request.\n",
811 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
812 memset(ct_sns, 0, sizeof(*ct_sns));
813 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
815 /* Prepare CT request */
816 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
818 /* Prepare CT arguments -- port_id, node_name */
819 ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
820 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
822 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
823 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
824 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
826 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
827 sp->done = qla2x00_async_sns_sp_done;
829 ql_dbg(ql_dbg_disc, vha, 0xffff,
830 "Async-%s - hdl=%x portid %06x\n",
831 sp->name, sp->handle, d_id->b24);
833 rval = qla2x00_start_sp(sp);
834 if (rval != QLA_SUCCESS) {
835 ql_dbg(ql_dbg_disc, vha, 0x204d,
836 "RNN_ID issue IOCB failed (%d).\n", rval);
849 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
851 struct qla_hw_data *ha = vha->hw;
854 snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
855 ha->mr.fw_version, qla2x00_version_str);
858 "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
859 ha->fw_major_version, ha->fw_minor_version,
860 ha->fw_subminor_version, qla2x00_version_str);
864 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
867 * Returns 0 on success.
870 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
872 struct qla_hw_data *ha = vha->hw;
874 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
875 ql_dbg(ql_dbg_disc, vha, 0x2050,
876 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
877 return (QLA_SUCCESS);
880 return qla_async_rsnn_nn(vha);
883 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
885 int rval = QLA_MEMORY_ALLOC_FAILED;
886 struct ct_sns_req *ct_req;
888 struct ct_sns_pkt *ct_sns;
890 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
894 sp->type = SRB_CT_PTHRU_CMD;
895 sp->name = "rsnn_nn";
896 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
898 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
899 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
901 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
902 if (!sp->u.iocb_cmd.u.ctarg.req) {
903 ql_log(ql_log_warn, vha, 0xd041,
904 "%s: Failed to allocate ct_sns request.\n",
909 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
910 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
912 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
913 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
914 ql_log(ql_log_warn, vha, 0xd042,
915 "%s: Failed to allocate ct_sns request.\n",
919 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
920 memset(ct_sns, 0, sizeof(*ct_sns));
921 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
923 /* Prepare CT request */
924 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
926 /* Prepare CT arguments -- node_name, symbolic node_name, size */
927 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
929 /* Prepare the Symbolic Node Name */
930 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
931 sizeof(ct_req->req.rsnn_nn.sym_node_name));
932 ct_req->req.rsnn_nn.name_len =
933 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
936 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
937 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
938 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
940 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
941 sp->done = qla2x00_async_sns_sp_done;
943 ql_dbg(ql_dbg_disc, vha, 0xffff,
944 "Async-%s - hdl=%x.\n",
945 sp->name, sp->handle);
947 rval = qla2x00_start_sp(sp);
948 if (rval != QLA_SUCCESS) {
949 ql_dbg(ql_dbg_disc, vha, 0x2043,
950 "RFT_ID issue IOCB failed (%d).\n", rval);
963 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
966 * @scmd_len: Subcommand length
967 * @data_size: response size in bytes
969 * Returns a pointer to the @ha's sns_cmd.
971 static inline struct sns_cmd_pkt *
972 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
976 struct sns_cmd_pkt *sns_cmd;
977 struct qla_hw_data *ha = vha->hw;
979 sns_cmd = ha->sns_cmd;
980 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
981 wc = data_size / 2; /* Size in 16bit words. */
982 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
983 put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
984 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
985 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
986 wc = (data_size - 16) / 4; /* Size in 32bit words. */
987 sns_cmd->p.cmd.size = cpu_to_le16(wc);
989 vha->qla_stats.control_requests++;
995 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
997 * @fcport: fcport entry to updated
999 * This command uses the old Exectute SNS Command mailbox routine.
1001 * Returns 0 on success.
1004 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1006 int rval = QLA_SUCCESS;
1007 struct qla_hw_data *ha = vha->hw;
1008 struct sns_cmd_pkt *sns_cmd;
1011 /* Prepare SNS command request. */
1012 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1013 GA_NXT_SNS_DATA_SIZE);
1015 /* Prepare SNS command arguments -- port_id. */
1016 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1017 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1018 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1020 /* Execute SNS command. */
1021 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1022 sizeof(struct sns_cmd_pkt));
1023 if (rval != QLA_SUCCESS) {
1025 ql_dbg(ql_dbg_disc, vha, 0x205f,
1026 "GA_NXT Send SNS failed (%d).\n", rval);
1027 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1028 sns_cmd->p.gan_data[9] != 0x02) {
1029 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1030 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1031 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1032 sns_cmd->p.gan_data, 16);
1033 rval = QLA_FUNCTION_FAILED;
1035 /* Populate fc_port_t entry. */
1036 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1037 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1038 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1040 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1041 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1043 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1044 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1045 fcport->d_id.b.domain = 0xf0;
1047 ql_dbg(ql_dbg_disc, vha, 0x2061,
1048 "GA_NXT entry - nn %8phN pn %8phN "
1049 "port_id=%02x%02x%02x.\n",
1050 fcport->node_name, fcport->port_name,
1051 fcport->d_id.b.domain, fcport->d_id.b.area,
1052 fcport->d_id.b.al_pa);
1059 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1061 * @list: switch info entries to populate
1063 * This command uses the old Exectute SNS Command mailbox routine.
1065 * NOTE: Non-Nx_Ports are not requested.
1067 * Returns 0 on success.
1070 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1073 struct qla_hw_data *ha = vha->hw;
1076 struct sns_cmd_pkt *sns_cmd;
1077 uint16_t gid_pt_sns_data_size;
1079 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1082 /* Prepare SNS command request. */
1083 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1084 gid_pt_sns_data_size);
1086 /* Prepare SNS command arguments -- port_type. */
1087 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1089 /* Execute SNS command. */
1090 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1091 sizeof(struct sns_cmd_pkt));
1092 if (rval != QLA_SUCCESS) {
1094 ql_dbg(ql_dbg_disc, vha, 0x206d,
1095 "GID_PT Send SNS failed (%d).\n", rval);
1096 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1097 sns_cmd->p.gid_data[9] != 0x02) {
1098 ql_dbg(ql_dbg_disc, vha, 0x202f,
1099 "GID_PT failed, rejected request, gid_rsp:\n");
1100 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1101 sns_cmd->p.gid_data, 16);
1102 rval = QLA_FUNCTION_FAILED;
1104 /* Set port IDs in switch info list. */
1105 for (i = 0; i < ha->max_fibre_devices; i++) {
1106 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1107 list[i].d_id.b.domain = entry[1];
1108 list[i].d_id.b.area = entry[2];
1109 list[i].d_id.b.al_pa = entry[3];
1111 /* Last one exit. */
1112 if (entry[0] & BIT_7) {
1113 list[i].d_id.b.rsvd_1 = entry[0];
1119 * If we've used all available slots, then the switch is
1120 * reporting back more devices that we can handle with this
1121 * single call. Return a failed status, and let GA_NXT handle
1124 if (i == ha->max_fibre_devices)
1125 rval = QLA_FUNCTION_FAILED;
1132 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1134 * @list: switch info entries to populate
1136 * This command uses the old Exectute SNS Command mailbox routine.
1138 * Returns 0 on success.
1141 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1143 int rval = QLA_SUCCESS;
1144 struct qla_hw_data *ha = vha->hw;
1146 struct sns_cmd_pkt *sns_cmd;
1148 for (i = 0; i < ha->max_fibre_devices; i++) {
1150 /* Prepare SNS command request. */
1151 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1152 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1154 /* Prepare SNS command arguments -- port_id. */
1155 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1156 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1157 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1159 /* Execute SNS command. */
1160 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1161 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1162 if (rval != QLA_SUCCESS) {
1164 ql_dbg(ql_dbg_disc, vha, 0x2032,
1165 "GPN_ID Send SNS failed (%d).\n", rval);
1166 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1167 sns_cmd->p.gpn_data[9] != 0x02) {
1168 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1169 "GPN_ID failed, rejected request, gpn_rsp:\n");
1170 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1171 sns_cmd->p.gpn_data, 16);
1172 rval = QLA_FUNCTION_FAILED;
1175 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1179 /* Last device exit. */
1180 if (list[i].d_id.b.rsvd_1 != 0)
1188 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1190 * @list: switch info entries to populate
1192 * This command uses the old Exectute SNS Command mailbox routine.
1194 * Returns 0 on success.
1197 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1199 int rval = QLA_SUCCESS;
1200 struct qla_hw_data *ha = vha->hw;
1202 struct sns_cmd_pkt *sns_cmd;
1204 for (i = 0; i < ha->max_fibre_devices; i++) {
1206 /* Prepare SNS command request. */
1207 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1208 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1210 /* Prepare SNS command arguments -- port_id. */
1211 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1212 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1213 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1215 /* Execute SNS command. */
1216 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1217 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1218 if (rval != QLA_SUCCESS) {
1220 ql_dbg(ql_dbg_disc, vha, 0x203f,
1221 "GNN_ID Send SNS failed (%d).\n", rval);
1222 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1223 sns_cmd->p.gnn_data[9] != 0x02) {
1224 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1225 "GNN_ID failed, rejected request, gnn_rsp:\n");
1226 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1227 sns_cmd->p.gnn_data, 16);
1228 rval = QLA_FUNCTION_FAILED;
1231 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1234 ql_dbg(ql_dbg_disc, vha, 0x206e,
1235 "GID_PT entry - nn %8phN pn %8phN "
1236 "port_id=%02x%02x%02x.\n",
1237 list[i].node_name, list[i].port_name,
1238 list[i].d_id.b.domain, list[i].d_id.b.area,
1239 list[i].d_id.b.al_pa);
1242 /* Last device exit. */
1243 if (list[i].d_id.b.rsvd_1 != 0)
1251 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1254 * This command uses the old Exectute SNS Command mailbox routine.
1256 * Returns 0 on success.
1259 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1262 struct qla_hw_data *ha = vha->hw;
1263 struct sns_cmd_pkt *sns_cmd;
1266 /* Prepare SNS command request. */
1267 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1268 RFT_ID_SNS_DATA_SIZE);
1270 /* Prepare SNS command arguments -- port_id, FC-4 types */
1271 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1272 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1273 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1275 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1277 /* Execute SNS command. */
1278 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1279 sizeof(struct sns_cmd_pkt));
1280 if (rval != QLA_SUCCESS) {
1282 ql_dbg(ql_dbg_disc, vha, 0x2060,
1283 "RFT_ID Send SNS failed (%d).\n", rval);
1284 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1285 sns_cmd->p.rft_data[9] != 0x02) {
1286 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1287 "RFT_ID failed, rejected request rft_rsp:\n");
1288 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1289 sns_cmd->p.rft_data, 16);
1290 rval = QLA_FUNCTION_FAILED;
1292 ql_dbg(ql_dbg_disc, vha, 0x2073,
1293 "RFT_ID exiting normally.\n");
1300 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1303 * This command uses the old Exectute SNS Command mailbox routine.
1305 * Returns 0 on success.
1308 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1311 struct qla_hw_data *ha = vha->hw;
1312 struct sns_cmd_pkt *sns_cmd;
1315 /* Prepare SNS command request. */
1316 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1317 RNN_ID_SNS_DATA_SIZE);
1319 /* Prepare SNS command arguments -- port_id, nodename. */
1320 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1321 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1322 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1324 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1325 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1326 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1327 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1328 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1329 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1330 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1331 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1333 /* Execute SNS command. */
1334 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1335 sizeof(struct sns_cmd_pkt));
1336 if (rval != QLA_SUCCESS) {
1338 ql_dbg(ql_dbg_disc, vha, 0x204a,
1339 "RNN_ID Send SNS failed (%d).\n", rval);
1340 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1341 sns_cmd->p.rnn_data[9] != 0x02) {
1342 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1343 "RNN_ID failed, rejected request, rnn_rsp:\n");
1344 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1345 sns_cmd->p.rnn_data, 16);
1346 rval = QLA_FUNCTION_FAILED;
1348 ql_dbg(ql_dbg_disc, vha, 0x204c,
1349 "RNN_ID exiting normally.\n");
1356 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1359 * Returns 0 on success.
1362 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1365 uint16_t mb[MAILBOX_REGISTER_COUNT];
1366 struct qla_hw_data *ha = vha->hw;
1369 if (vha->flags.management_server_logged_in)
1372 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1374 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1375 if (rval == QLA_MEMORY_ALLOC_FAILED)
1376 ql_dbg(ql_dbg_disc, vha, 0x2085,
1377 "Failed management_server login: loopid=%x "
1378 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1380 ql_dbg(ql_dbg_disc, vha, 0x2024,
1381 "Failed management_server login: loopid=%x "
1382 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1383 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1385 ret = QLA_FUNCTION_FAILED;
1387 vha->flags.management_server_logged_in = 1;
1393 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1395 * @req_size: request size in bytes
1396 * @rsp_size: response size in bytes
1398 * Returns a pointer to the @ha's ms_iocb.
1401 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1404 ms_iocb_entry_t *ms_pkt;
1405 struct qla_hw_data *ha = vha->hw;
1407 ms_pkt = ha->ms_iocb;
1408 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1410 ms_pkt->entry_type = MS_IOCB_TYPE;
1411 ms_pkt->entry_count = 1;
1412 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1413 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1414 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1415 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1416 ms_pkt->total_dsd_count = cpu_to_le16(2);
1417 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1418 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1420 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1421 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1423 put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1424 ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1430 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1432 * @req_size: request size in bytes
1433 * @rsp_size: response size in bytes
1435 * Returns a pointer to the @ha's ms_iocb.
1438 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1441 struct ct_entry_24xx *ct_pkt;
1442 struct qla_hw_data *ha = vha->hw;
1444 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1445 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1447 ct_pkt->entry_type = CT_IOCB_TYPE;
1448 ct_pkt->entry_count = 1;
1449 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1450 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1451 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1452 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1453 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1454 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1456 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1457 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1459 put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1460 ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1461 ct_pkt->vp_index = vha->vp_idx;
1467 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1469 struct qla_hw_data *ha = vha->hw;
1470 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1471 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1473 if (IS_FWI2_CAPABLE(ha)) {
1474 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1475 ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1477 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1478 ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1483 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1484 * @p: CT request buffer
1486 * @rsp_size: response size in bytes
1488 * Returns a pointer to the intitialized @ct_req.
1490 static inline struct ct_sns_req *
1491 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1494 memset(p, 0, sizeof(struct ct_sns_pkt));
1496 p->p.req.header.revision = 0x01;
1497 p->p.req.header.gs_type = 0xFA;
1498 p->p.req.header.gs_subtype = 0x10;
1499 p->p.req.command = cpu_to_be16(cmd);
1500 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1506 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
1509 * Returns 0 on success.
1512 qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1517 ms_iocb_entry_t *ms_pkt;
1518 struct ct_sns_req *ct_req;
1519 struct ct_sns_rsp *ct_rsp;
1521 struct ct_fdmi_hba_attr *eiter;
1522 struct qla_hw_data *ha = vha->hw;
1525 /* Prepare common MS IOCB */
1526 /* Request size adjusted after CT preparation */
1527 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1529 /* Prepare CT request */
1530 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
1531 ct_rsp = &ha->ct_sns->p.rsp;
1533 /* Prepare FDMI command arguments -- attribute block, attributes. */
1534 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1535 ct_req->req.rhba.entry_count = cpu_to_be32(1);
1536 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1537 size = 2 * WWN_SIZE + 4 + 4;
1540 ct_req->req.rhba.attrs.count =
1541 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1542 entries = &ct_req->req;
1545 eiter = entries + size;
1546 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1547 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1548 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1549 size += 4 + WWN_SIZE;
1551 ql_dbg(ql_dbg_disc, vha, 0x2025,
1552 "NodeName = %8phN.\n", eiter->a.node_name);
1555 eiter = entries + size;
1556 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1557 alen = strlen(QLA2XXX_MANUFACTURER);
1558 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1559 "%s", "QLogic Corporation");
1560 alen += 4 - (alen & 3);
1561 eiter->len = cpu_to_be16(4 + alen);
1564 ql_dbg(ql_dbg_disc, vha, 0x2026,
1565 "Manufacturer = %s.\n", eiter->a.manufacturer);
1567 /* Serial number. */
1568 eiter = entries + size;
1569 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1570 if (IS_FWI2_CAPABLE(ha))
1571 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1572 sizeof(eiter->a.serial_num));
1574 sn = ((ha->serial0 & 0x1f) << 16) |
1575 (ha->serial2 << 8) | ha->serial1;
1576 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1577 "%c%05d", 'A' + sn / 100000, sn % 100000);
1579 alen = strlen(eiter->a.serial_num);
1580 alen += 4 - (alen & 3);
1581 eiter->len = cpu_to_be16(4 + alen);
1584 ql_dbg(ql_dbg_disc, vha, 0x2027,
1585 "Serial no. = %s.\n", eiter->a.serial_num);
1588 eiter = entries + size;
1589 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1590 snprintf(eiter->a.model, sizeof(eiter->a.model),
1591 "%s", ha->model_number);
1592 alen = strlen(eiter->a.model);
1593 alen += 4 - (alen & 3);
1594 eiter->len = cpu_to_be16(4 + alen);
1597 ql_dbg(ql_dbg_disc, vha, 0x2028,
1598 "Model Name = %s.\n", eiter->a.model);
1600 /* Model description. */
1601 eiter = entries + size;
1602 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1603 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1604 "%s", ha->model_desc);
1605 alen = strlen(eiter->a.model_desc);
1606 alen += 4 - (alen & 3);
1607 eiter->len = cpu_to_be16(4 + alen);
1610 ql_dbg(ql_dbg_disc, vha, 0x2029,
1611 "Model Desc = %s.\n", eiter->a.model_desc);
1613 /* Hardware version. */
1614 eiter = entries + size;
1615 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1616 if (!IS_FWI2_CAPABLE(ha)) {
1617 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1618 "HW:%s", ha->adapter_id);
1619 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1620 sizeof(eiter->a.hw_version))) {
1622 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1623 sizeof(eiter->a.hw_version))) {
1626 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1627 "HW:%s", ha->adapter_id);
1629 alen = strlen(eiter->a.hw_version);
1630 alen += 4 - (alen & 3);
1631 eiter->len = cpu_to_be16(4 + alen);
1634 ql_dbg(ql_dbg_disc, vha, 0x202a,
1635 "Hardware ver = %s.\n", eiter->a.hw_version);
1637 /* Driver version. */
1638 eiter = entries + size;
1639 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1640 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1641 "%s", qla2x00_version_str);
1642 alen = strlen(eiter->a.driver_version);
1643 alen += 4 - (alen & 3);
1644 eiter->len = cpu_to_be16(4 + alen);
1647 ql_dbg(ql_dbg_disc, vha, 0x202b,
1648 "Driver ver = %s.\n", eiter->a.driver_version);
1650 /* Option ROM version. */
1651 eiter = entries + size;
1652 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1653 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1654 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1655 alen = strlen(eiter->a.orom_version);
1656 alen += 4 - (alen & 3);
1657 eiter->len = cpu_to_be16(4 + alen);
1660 ql_dbg(ql_dbg_disc, vha , 0x202c,
1661 "Optrom vers = %s.\n", eiter->a.orom_version);
1663 /* Firmware version */
1664 eiter = entries + size;
1665 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1666 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1667 sizeof(eiter->a.fw_version));
1668 alen = strlen(eiter->a.fw_version);
1669 alen += 4 - (alen & 3);
1670 eiter->len = cpu_to_be16(4 + alen);
1673 ql_dbg(ql_dbg_disc, vha, 0x202d,
1674 "Firmware vers = %s.\n", eiter->a.fw_version);
1676 /* Update MS request size. */
1677 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1679 ql_dbg(ql_dbg_disc, vha, 0x202e,
1680 "RHBA identifier = %8phN size=%d.\n",
1681 ct_req->req.rhba.hba_identifier, size);
1682 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1685 /* Execute MS IOCB */
1686 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1687 sizeof(ms_iocb_entry_t));
1688 if (rval != QLA_SUCCESS) {
1690 ql_dbg(ql_dbg_disc, vha, 0x2030,
1691 "RHBA issue IOCB failed (%d).\n", rval);
1692 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1694 rval = QLA_FUNCTION_FAILED;
1695 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1696 ct_rsp->header.explanation_code ==
1697 CT_EXPL_ALREADY_REGISTERED) {
1698 ql_dbg(ql_dbg_disc, vha, 0x2034,
1699 "HBA already registered.\n");
1700 rval = QLA_ALREADY_REGISTERED;
1702 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1703 "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1704 ct_rsp->header.reason_code,
1705 ct_rsp->header.explanation_code);
1708 ql_dbg(ql_dbg_disc, vha, 0x2035,
1709 "RHBA exiting normally.\n");
1716 * qla2x00_fdmi_rpa() - perform RPA registration
1719 * Returns 0 on success.
1722 qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1726 struct qla_hw_data *ha = vha->hw;
1727 ms_iocb_entry_t *ms_pkt;
1728 struct ct_sns_req *ct_req;
1729 struct ct_sns_rsp *ct_rsp;
1731 struct ct_fdmi_port_attr *eiter;
1732 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1733 struct new_utsname *p_sysid = NULL;
1736 /* Prepare common MS IOCB */
1737 /* Request size adjusted after CT preparation */
1738 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1740 /* Prepare CT request */
1741 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
1743 ct_rsp = &ha->ct_sns->p.rsp;
1745 /* Prepare FDMI command arguments -- attribute block, attributes. */
1746 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1747 size = WWN_SIZE + 4;
1750 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1751 entries = &ct_req->req;
1754 eiter = entries + size;
1755 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1756 eiter->len = cpu_to_be16(4 + 32);
1757 eiter->a.fc4_types[2] = 0x01;
1760 ql_dbg(ql_dbg_disc, vha, 0x2039,
1761 "FC4_TYPES=%02x %02x.\n",
1762 eiter->a.fc4_types[2],
1763 eiter->a.fc4_types[1]);
1765 /* Supported speed. */
1766 eiter = entries + size;
1767 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1768 eiter->len = cpu_to_be16(4 + 4);
1769 if (IS_CNA_CAPABLE(ha))
1770 eiter->a.sup_speed = cpu_to_be32(
1771 FDMI_PORT_SPEED_10GB);
1772 else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1773 eiter->a.sup_speed = cpu_to_be32(
1774 FDMI_PORT_SPEED_32GB|
1775 FDMI_PORT_SPEED_16GB|
1776 FDMI_PORT_SPEED_8GB);
1777 else if (IS_QLA2031(ha))
1778 eiter->a.sup_speed = cpu_to_be32(
1779 FDMI_PORT_SPEED_16GB|
1780 FDMI_PORT_SPEED_8GB|
1781 FDMI_PORT_SPEED_4GB);
1782 else if (IS_QLA25XX(ha))
1783 eiter->a.sup_speed = cpu_to_be32(
1784 FDMI_PORT_SPEED_8GB|
1785 FDMI_PORT_SPEED_4GB|
1786 FDMI_PORT_SPEED_2GB|
1787 FDMI_PORT_SPEED_1GB);
1788 else if (IS_QLA24XX_TYPE(ha))
1789 eiter->a.sup_speed = cpu_to_be32(
1790 FDMI_PORT_SPEED_4GB|
1791 FDMI_PORT_SPEED_2GB|
1792 FDMI_PORT_SPEED_1GB);
1793 else if (IS_QLA23XX(ha))
1794 eiter->a.sup_speed = cpu_to_be32(
1795 FDMI_PORT_SPEED_2GB|
1796 FDMI_PORT_SPEED_1GB);
1798 eiter->a.sup_speed = cpu_to_be32(
1799 FDMI_PORT_SPEED_1GB);
1802 ql_dbg(ql_dbg_disc, vha, 0x203a,
1803 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1805 /* Current speed. */
1806 eiter = entries + size;
1807 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1808 eiter->len = cpu_to_be16(4 + 4);
1809 switch (ha->link_data_rate) {
1810 case PORT_SPEED_1GB:
1811 eiter->a.cur_speed =
1812 cpu_to_be32(FDMI_PORT_SPEED_1GB);
1814 case PORT_SPEED_2GB:
1815 eiter->a.cur_speed =
1816 cpu_to_be32(FDMI_PORT_SPEED_2GB);
1818 case PORT_SPEED_4GB:
1819 eiter->a.cur_speed =
1820 cpu_to_be32(FDMI_PORT_SPEED_4GB);
1822 case PORT_SPEED_8GB:
1823 eiter->a.cur_speed =
1824 cpu_to_be32(FDMI_PORT_SPEED_8GB);
1826 case PORT_SPEED_10GB:
1827 eiter->a.cur_speed =
1828 cpu_to_be32(FDMI_PORT_SPEED_10GB);
1830 case PORT_SPEED_16GB:
1831 eiter->a.cur_speed =
1832 cpu_to_be32(FDMI_PORT_SPEED_16GB);
1834 case PORT_SPEED_32GB:
1835 eiter->a.cur_speed =
1836 cpu_to_be32(FDMI_PORT_SPEED_32GB);
1839 eiter->a.cur_speed =
1840 cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1845 ql_dbg(ql_dbg_disc, vha, 0x203b,
1846 "Current_Speed=%x.\n", eiter->a.cur_speed);
1848 /* Max frame size. */
1849 eiter = entries + size;
1850 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1851 eiter->len = cpu_to_be16(4 + 4);
1852 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1853 le16_to_cpu(icb24->frame_payload_size) :
1854 le16_to_cpu(ha->init_cb->frame_payload_size);
1855 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1858 ql_dbg(ql_dbg_disc, vha, 0x203c,
1859 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1861 /* OS device name. */
1862 eiter = entries + size;
1863 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1864 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1865 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1866 alen = strlen(eiter->a.os_dev_name);
1867 alen += 4 - (alen & 3);
1868 eiter->len = cpu_to_be16(4 + alen);
1871 ql_dbg(ql_dbg_disc, vha, 0x204b,
1872 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1875 eiter = entries + size;
1876 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1877 p_sysid = utsname();
1879 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1880 "%s", p_sysid->nodename);
1882 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1883 "%s", fc_host_system_hostname(vha->host));
1885 alen = strlen(eiter->a.host_name);
1886 alen += 4 - (alen & 3);
1887 eiter->len = cpu_to_be16(4 + alen);
1890 ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
1892 /* Update MS request size. */
1893 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1895 ql_dbg(ql_dbg_disc, vha, 0x203e,
1896 "RPA portname %016llx, size = %d.\n",
1897 wwn_to_u64(ct_req->req.rpa.port_name), size);
1898 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1901 /* Execute MS IOCB */
1902 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1903 sizeof(ms_iocb_entry_t));
1904 if (rval != QLA_SUCCESS) {
1906 ql_dbg(ql_dbg_disc, vha, 0x2040,
1907 "RPA issue IOCB failed (%d).\n", rval);
1908 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1910 rval = QLA_FUNCTION_FAILED;
1911 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1912 ct_rsp->header.explanation_code ==
1913 CT_EXPL_ALREADY_REGISTERED) {
1914 ql_dbg(ql_dbg_disc, vha, 0x20cd,
1915 "RPA already registered.\n");
1916 rval = QLA_ALREADY_REGISTERED;
1920 ql_dbg(ql_dbg_disc, vha, 0x2041,
1921 "RPA exiting normally.\n");
1928 * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
1931 * Returns 0 on success.
1934 qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1938 ms_iocb_entry_t *ms_pkt;
1939 struct ct_sns_req *ct_req;
1940 struct ct_sns_rsp *ct_rsp;
1942 struct ct_fdmiv2_hba_attr *eiter;
1943 struct qla_hw_data *ha = vha->hw;
1944 struct new_utsname *p_sysid = NULL;
1947 /* Prepare common MS IOCB */
1948 /* Request size adjusted after CT preparation */
1949 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1951 /* Prepare CT request */
1952 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
1954 ct_rsp = &ha->ct_sns->p.rsp;
1956 /* Prepare FDMI command arguments -- attribute block, attributes. */
1957 memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
1958 ct_req->req.rhba2.entry_count = cpu_to_be32(1);
1959 memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
1960 size = 2 * WWN_SIZE + 4 + 4;
1963 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1964 entries = &ct_req->req;
1967 eiter = entries + size;
1968 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1969 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1970 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1971 size += 4 + WWN_SIZE;
1973 ql_dbg(ql_dbg_disc, vha, 0x207d,
1974 "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1977 eiter = entries + size;
1978 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1979 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1980 "%s", "QLogic Corporation");
1981 eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
1982 alen = strlen(eiter->a.manufacturer);
1983 alen += 4 - (alen & 3);
1984 eiter->len = cpu_to_be16(4 + alen);
1987 ql_dbg(ql_dbg_disc, vha, 0x20a5,
1988 "Manufacturer = %s.\n", eiter->a.manufacturer);
1990 /* Serial number. */
1991 eiter = entries + size;
1992 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1993 if (IS_FWI2_CAPABLE(ha))
1994 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1995 sizeof(eiter->a.serial_num));
1997 sn = ((ha->serial0 & 0x1f) << 16) |
1998 (ha->serial2 << 8) | ha->serial1;
1999 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
2000 "%c%05d", 'A' + sn / 100000, sn % 100000);
2002 alen = strlen(eiter->a.serial_num);
2003 alen += 4 - (alen & 3);
2004 eiter->len = cpu_to_be16(4 + alen);
2007 ql_dbg(ql_dbg_disc, vha, 0x20a6,
2008 "Serial no. = %s.\n", eiter->a.serial_num);
2011 eiter = entries + size;
2012 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
2013 snprintf(eiter->a.model, sizeof(eiter->a.model),
2014 "%s", ha->model_number);
2015 alen = strlen(eiter->a.model);
2016 alen += 4 - (alen & 3);
2017 eiter->len = cpu_to_be16(4 + alen);
2020 ql_dbg(ql_dbg_disc, vha, 0x20a7,
2021 "Model Name = %s.\n", eiter->a.model);
2023 /* Model description. */
2024 eiter = entries + size;
2025 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
2026 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
2027 "%s", ha->model_desc);
2028 alen = strlen(eiter->a.model_desc);
2029 alen += 4 - (alen & 3);
2030 eiter->len = cpu_to_be16(4 + alen);
2033 ql_dbg(ql_dbg_disc, vha, 0x20a8,
2034 "Model Desc = %s.\n", eiter->a.model_desc);
2036 /* Hardware version. */
2037 eiter = entries + size;
2038 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
2039 if (!IS_FWI2_CAPABLE(ha)) {
2040 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2041 "HW:%s", ha->adapter_id);
2042 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
2043 sizeof(eiter->a.hw_version))) {
2045 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
2046 sizeof(eiter->a.hw_version))) {
2049 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2050 "HW:%s", ha->adapter_id);
2052 alen = strlen(eiter->a.hw_version);
2053 alen += 4 - (alen & 3);
2054 eiter->len = cpu_to_be16(4 + alen);
2057 ql_dbg(ql_dbg_disc, vha, 0x20a9,
2058 "Hardware ver = %s.\n", eiter->a.hw_version);
2060 /* Driver version. */
2061 eiter = entries + size;
2062 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
2063 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
2064 "%s", qla2x00_version_str);
2065 alen = strlen(eiter->a.driver_version);
2066 alen += 4 - (alen & 3);
2067 eiter->len = cpu_to_be16(4 + alen);
2070 ql_dbg(ql_dbg_disc, vha, 0x20aa,
2071 "Driver ver = %s.\n", eiter->a.driver_version);
2073 /* Option ROM version. */
2074 eiter = entries + size;
2075 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
2076 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
2077 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2078 alen = strlen(eiter->a.orom_version);
2079 alen += 4 - (alen & 3);
2080 eiter->len = cpu_to_be16(4 + alen);
2083 ql_dbg(ql_dbg_disc, vha , 0x20ab,
2084 "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
2085 eiter->a.orom_version[0]);
2087 /* Firmware version */
2088 eiter = entries + size;
2089 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
2090 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
2091 sizeof(eiter->a.fw_version));
2092 alen = strlen(eiter->a.fw_version);
2093 alen += 4 - (alen & 3);
2094 eiter->len = cpu_to_be16(4 + alen);
2097 ql_dbg(ql_dbg_disc, vha, 0x20ac,
2098 "Firmware vers = %s.\n", eiter->a.fw_version);
2100 /* OS Name and Version */
2101 eiter = entries + size;
2102 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
2103 p_sysid = utsname();
2105 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2107 p_sysid->sysname, p_sysid->release, p_sysid->version);
2109 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2110 "%s %s", "Linux", fc_host_system_hostname(vha->host));
2112 alen = strlen(eiter->a.os_version);
2113 alen += 4 - (alen & 3);
2114 eiter->len = cpu_to_be16(4 + alen);
2117 ql_dbg(ql_dbg_disc, vha, 0x20ae,
2118 "OS Name and Version = %s.\n", eiter->a.os_version);
2120 /* MAX CT Payload Length */
2121 eiter = entries + size;
2122 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
2123 eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
2124 eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
2125 eiter->len = cpu_to_be16(4 + 4);
2128 ql_dbg(ql_dbg_disc, vha, 0x20af,
2129 "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
2131 /* Node Sybolic Name */
2132 eiter = entries + size;
2133 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
2134 qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
2135 sizeof(eiter->a.sym_name));
2136 alen = strlen(eiter->a.sym_name);
2137 alen += 4 - (alen & 3);
2138 eiter->len = cpu_to_be16(4 + alen);
2141 ql_dbg(ql_dbg_disc, vha, 0x20b0,
2142 "Symbolic Name = %s.\n", eiter->a.sym_name);
2145 eiter = entries + size;
2146 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
2147 eiter->a.vendor_id = cpu_to_be32(0x1077);
2148 eiter->len = cpu_to_be16(4 + 4);
2151 ql_dbg(ql_dbg_disc, vha, 0x20b1,
2152 "Vendor Id = %x.\n", eiter->a.vendor_id);
2155 eiter = entries + size;
2156 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
2157 eiter->a.num_ports = cpu_to_be32(1);
2158 eiter->len = cpu_to_be16(4 + 4);
2161 ql_dbg(ql_dbg_disc, vha, 0x20b2,
2162 "Port Num = %x.\n", eiter->a.num_ports);
2165 eiter = entries + size;
2166 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
2167 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2168 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2169 size += 4 + WWN_SIZE;
2171 ql_dbg(ql_dbg_disc, vha, 0x20b3,
2172 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2175 eiter = entries + size;
2176 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
2177 snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
2178 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2179 alen = strlen(eiter->a.bios_name);
2180 alen += 4 - (alen & 3);
2181 eiter->len = cpu_to_be16(4 + alen);
2184 ql_dbg(ql_dbg_disc, vha, 0x20b4,
2185 "BIOS Name = %s\n", eiter->a.bios_name);
2187 /* Vendor Identifier */
2188 eiter = entries + size;
2189 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
2190 snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
2192 alen = strlen(eiter->a.vendor_identifier);
2193 alen += 4 - (alen & 3);
2194 eiter->len = cpu_to_be16(4 + alen);
2197 ql_dbg(ql_dbg_disc, vha, 0x201b,
2198 "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
2200 /* Update MS request size. */
2201 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2203 ql_dbg(ql_dbg_disc, vha, 0x20b5,
2204 "RHBA identifier = %016llx.\n",
2205 wwn_to_u64(ct_req->req.rhba2.hba_identifier));
2206 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
2209 /* Execute MS IOCB */
2210 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2211 sizeof(ms_iocb_entry_t));
2212 if (rval != QLA_SUCCESS) {
2214 ql_dbg(ql_dbg_disc, vha, 0x20b7,
2215 "RHBA issue IOCB failed (%d).\n", rval);
2216 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
2218 rval = QLA_FUNCTION_FAILED;
2220 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2221 ct_rsp->header.explanation_code ==
2222 CT_EXPL_ALREADY_REGISTERED) {
2223 ql_dbg(ql_dbg_disc, vha, 0x20b8,
2224 "HBA already registered.\n");
2225 rval = QLA_ALREADY_REGISTERED;
2227 ql_dbg(ql_dbg_disc, vha, 0x2016,
2228 "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2229 ct_rsp->header.reason_code,
2230 ct_rsp->header.explanation_code);
2233 ql_dbg(ql_dbg_disc, vha, 0x20b9,
2234 "RHBA FDMI V2 exiting normally.\n");
2241 * qla2x00_fdmi_dhba() -
2244 * Returns 0 on success.
2247 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2250 struct qla_hw_data *ha = vha->hw;
2251 ms_iocb_entry_t *ms_pkt;
2252 struct ct_sns_req *ct_req;
2253 struct ct_sns_rsp *ct_rsp;
2256 /* Prepare common MS IOCB */
2257 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2260 /* Prepare CT request */
2261 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2262 ct_rsp = &ha->ct_sns->p.rsp;
2264 /* Prepare FDMI command arguments -- portname. */
2265 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2267 ql_dbg(ql_dbg_disc, vha, 0x2036,
2268 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2270 /* Execute MS IOCB */
2271 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2272 sizeof(ms_iocb_entry_t));
2273 if (rval != QLA_SUCCESS) {
2275 ql_dbg(ql_dbg_disc, vha, 0x2037,
2276 "DHBA issue IOCB failed (%d).\n", rval);
2277 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2279 rval = QLA_FUNCTION_FAILED;
2281 ql_dbg(ql_dbg_disc, vha, 0x2038,
2282 "DHBA exiting normally.\n");
2289 * qla2x00_fdmiv2_rpa() -
2292 * Returns 0 on success.
2295 qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
2299 struct qla_hw_data *ha = vha->hw;
2300 ms_iocb_entry_t *ms_pkt;
2301 struct ct_sns_req *ct_req;
2302 struct ct_sns_rsp *ct_rsp;
2304 struct ct_fdmiv2_port_attr *eiter;
2305 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
2306 struct new_utsname *p_sysid = NULL;
2309 /* Prepare common MS IOCB */
2310 /* Request size adjusted after CT preparation */
2311 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
2313 /* Prepare CT request */
2314 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
2315 ct_rsp = &ha->ct_sns->p.rsp;
2317 /* Prepare FDMI command arguments -- attribute block, attributes. */
2318 memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
2319 size = WWN_SIZE + 4;
2322 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
2323 entries = &ct_req->req;
2326 eiter = entries + size;
2327 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
2328 eiter->len = cpu_to_be16(4 + 32);
2329 eiter->a.fc4_types[2] = 0x01;
2332 ql_dbg(ql_dbg_disc, vha, 0x20ba,
2333 "FC4_TYPES=%02x %02x.\n",
2334 eiter->a.fc4_types[2],
2335 eiter->a.fc4_types[1]);
2337 if (vha->flags.nvme_enabled) {
2338 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
2339 ql_dbg(ql_dbg_disc, vha, 0x211f,
2340 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2341 eiter->a.fc4_types[6]);
2344 /* Supported speed. */
2345 eiter = entries + size;
2346 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
2347 eiter->len = cpu_to_be16(4 + 4);
2348 if (IS_CNA_CAPABLE(ha))
2349 eiter->a.sup_speed = cpu_to_be32(
2350 FDMI_PORT_SPEED_10GB);
2351 else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
2352 eiter->a.sup_speed = cpu_to_be32(
2353 FDMI_PORT_SPEED_32GB|
2354 FDMI_PORT_SPEED_16GB|
2355 FDMI_PORT_SPEED_8GB);
2356 else if (IS_QLA2031(ha))
2357 eiter->a.sup_speed = cpu_to_be32(
2358 FDMI_PORT_SPEED_16GB|
2359 FDMI_PORT_SPEED_8GB|
2360 FDMI_PORT_SPEED_4GB);
2361 else if (IS_QLA25XX(ha))
2362 eiter->a.sup_speed = cpu_to_be32(
2363 FDMI_PORT_SPEED_8GB|
2364 FDMI_PORT_SPEED_4GB|
2365 FDMI_PORT_SPEED_2GB|
2366 FDMI_PORT_SPEED_1GB);
2367 else if (IS_QLA24XX_TYPE(ha))
2368 eiter->a.sup_speed = cpu_to_be32(
2369 FDMI_PORT_SPEED_4GB|
2370 FDMI_PORT_SPEED_2GB|
2371 FDMI_PORT_SPEED_1GB);
2372 else if (IS_QLA23XX(ha))
2373 eiter->a.sup_speed = cpu_to_be32(
2374 FDMI_PORT_SPEED_2GB|
2375 FDMI_PORT_SPEED_1GB);
2377 eiter->a.sup_speed = cpu_to_be32(
2378 FDMI_PORT_SPEED_1GB);
2381 ql_dbg(ql_dbg_disc, vha, 0x20bb,
2382 "Supported Port Speed = %x.\n", eiter->a.sup_speed);
2384 /* Current speed. */
2385 eiter = entries + size;
2386 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
2387 eiter->len = cpu_to_be16(4 + 4);
2388 switch (ha->link_data_rate) {
2389 case PORT_SPEED_1GB:
2390 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
2392 case PORT_SPEED_2GB:
2393 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
2395 case PORT_SPEED_4GB:
2396 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
2398 case PORT_SPEED_8GB:
2399 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
2401 case PORT_SPEED_10GB:
2402 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
2404 case PORT_SPEED_16GB:
2405 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
2407 case PORT_SPEED_32GB:
2408 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
2411 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
2416 ql_dbg(ql_dbg_disc, vha, 0x2017,
2417 "Current_Speed = %x.\n", eiter->a.cur_speed);
2419 /* Max frame size. */
2420 eiter = entries + size;
2421 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
2422 eiter->len = cpu_to_be16(4 + 4);
2423 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
2424 le16_to_cpu(icb24->frame_payload_size) :
2425 le16_to_cpu(ha->init_cb->frame_payload_size);
2426 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
2429 ql_dbg(ql_dbg_disc, vha, 0x20bc,
2430 "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
2432 /* OS device name. */
2433 eiter = entries + size;
2434 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
2435 alen = strlen(QLA2XXX_DRIVER_NAME);
2436 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
2437 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
2438 alen += 4 - (alen & 3);
2439 eiter->len = cpu_to_be16(4 + alen);
2442 ql_dbg(ql_dbg_disc, vha, 0x20be,
2443 "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
2446 eiter = entries + size;
2447 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
2448 p_sysid = utsname();
2450 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2451 "%s", p_sysid->nodename);
2453 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2454 "%s", fc_host_system_hostname(vha->host));
2456 alen = strlen(eiter->a.host_name);
2457 alen += 4 - (alen & 3);
2458 eiter->len = cpu_to_be16(4 + alen);
2461 ql_dbg(ql_dbg_disc, vha, 0x201a,
2462 "HostName=%s.\n", eiter->a.host_name);
2465 eiter = entries + size;
2466 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
2467 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
2468 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2469 size += 4 + WWN_SIZE;
2471 ql_dbg(ql_dbg_disc, vha, 0x20c0,
2472 "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
2475 eiter = entries + size;
2476 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
2477 memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
2478 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2479 size += 4 + WWN_SIZE;
2481 ql_dbg(ql_dbg_disc, vha, 0x20c1,
2482 "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
2484 /* Port Symbolic Name */
2485 eiter = entries + size;
2486 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
2487 qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
2488 sizeof(eiter->a.port_sym_name));
2489 alen = strlen(eiter->a.port_sym_name);
2490 alen += 4 - (alen & 3);
2491 eiter->len = cpu_to_be16(4 + alen);
2494 ql_dbg(ql_dbg_disc, vha, 0x20c2,
2495 "port symbolic name = %s\n", eiter->a.port_sym_name);
2498 eiter = entries + size;
2499 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
2500 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
2501 eiter->len = cpu_to_be16(4 + 4);
2504 ql_dbg(ql_dbg_disc, vha, 0x20c3,
2505 "Port Type = %x.\n", eiter->a.port_type);
2507 /* Class of Service */
2508 eiter = entries + size;
2509 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
2510 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
2511 eiter->len = cpu_to_be16(4 + 4);
2514 ql_dbg(ql_dbg_disc, vha, 0x20c4,
2515 "Supported COS = %08x\n", eiter->a.port_supported_cos);
2517 /* Port Fabric Name */
2518 eiter = entries + size;
2519 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2520 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2521 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2522 size += 4 + WWN_SIZE;
2524 ql_dbg(ql_dbg_disc, vha, 0x20c5,
2525 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2528 eiter = entries + size;
2529 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2530 eiter->a.port_fc4_type[0] = 0;
2531 eiter->a.port_fc4_type[1] = 0;
2532 eiter->a.port_fc4_type[2] = 1;
2533 eiter->a.port_fc4_type[3] = 0;
2534 eiter->len = cpu_to_be16(4 + 32);
2537 ql_dbg(ql_dbg_disc, vha, 0x20c6,
2538 "Port Active FC4 Type = %02x %02x.\n",
2539 eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
2541 if (vha->flags.nvme_enabled) {
2542 eiter->a.port_fc4_type[4] = 0;
2543 eiter->a.port_fc4_type[5] = 0;
2544 eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
2545 ql_dbg(ql_dbg_disc, vha, 0x2120,
2546 "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2547 eiter->a.port_fc4_type[6]);
2551 eiter = entries + size;
2552 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2553 eiter->a.port_state = cpu_to_be32(1);
2554 eiter->len = cpu_to_be16(4 + 4);
2557 ql_dbg(ql_dbg_disc, vha, 0x20c7,
2558 "Port State = %x.\n", eiter->a.port_state);
2560 /* Number of Ports */
2561 eiter = entries + size;
2562 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2563 eiter->a.num_ports = cpu_to_be32(1);
2564 eiter->len = cpu_to_be16(4 + 4);
2567 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2568 "Number of ports = %x.\n", eiter->a.num_ports);
2571 eiter = entries + size;
2572 eiter->type = cpu_to_be16(FDMI_PORT_ID);
2573 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2574 eiter->len = cpu_to_be16(4 + 4);
2577 ql_dbg(ql_dbg_disc, vha, 0x201c,
2578 "Port Id = %x.\n", eiter->a.port_id);
2580 /* Update MS request size. */
2581 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2583 ql_dbg(ql_dbg_disc, vha, 0x2018,
2584 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
2585 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
2588 /* Execute MS IOCB */
2589 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2590 sizeof(ms_iocb_entry_t));
2591 if (rval != QLA_SUCCESS) {
2593 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2594 "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
2595 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
2597 rval = QLA_FUNCTION_FAILED;
2598 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2599 ct_rsp->header.explanation_code ==
2600 CT_EXPL_ALREADY_REGISTERED) {
2601 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2602 "RPA FDMI v2 already registered\n");
2603 rval = QLA_ALREADY_REGISTERED;
2605 ql_dbg(ql_dbg_disc, vha, 0x2020,
2606 "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2607 ct_rsp->header.reason_code,
2608 ct_rsp->header.explanation_code);
2611 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2612 "RPA FDMI V2 exiting normally.\n");
2619 * qla2x00_fdmi_register() -
2622 * Returns 0 on success.
2625 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2627 int rval = QLA_FUNCTION_FAILED;
2628 struct qla_hw_data *ha = vha->hw;
2630 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2632 return QLA_FUNCTION_FAILED;
2634 rval = qla2x00_mgmt_svr_login(vha);
2638 rval = qla2x00_fdmiv2_rhba(vha);
2640 if (rval != QLA_ALREADY_REGISTERED)
2643 rval = qla2x00_fdmi_dhba(vha);
2647 rval = qla2x00_fdmiv2_rhba(vha);
2651 rval = qla2x00_fdmiv2_rpa(vha);
2658 rval = qla2x00_fdmi_rhba(vha);
2660 if (rval != QLA_ALREADY_REGISTERED)
2663 rval = qla2x00_fdmi_dhba(vha);
2667 rval = qla2x00_fdmi_rhba(vha);
2671 rval = qla2x00_fdmi_rpa(vha);
2677 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2679 * @list: switch info entries to populate
2681 * Returns 0 on success.
2684 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2686 int rval = QLA_SUCCESS;
2688 struct qla_hw_data *ha = vha->hw;
2689 ms_iocb_entry_t *ms_pkt;
2690 struct ct_sns_req *ct_req;
2691 struct ct_sns_rsp *ct_rsp;
2694 if (!IS_IIDMA_CAPABLE(ha))
2695 return QLA_FUNCTION_FAILED;
2697 arg.iocb = ha->ms_iocb;
2698 arg.req_dma = ha->ct_sns_dma;
2699 arg.rsp_dma = ha->ct_sns_dma;
2700 arg.req_size = GFPN_ID_REQ_SIZE;
2701 arg.rsp_size = GFPN_ID_RSP_SIZE;
2702 arg.nport_handle = NPH_SNS;
2704 for (i = 0; i < ha->max_fibre_devices; i++) {
2706 /* Prepare common MS IOCB */
2707 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2709 /* Prepare CT request */
2710 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2712 ct_rsp = &ha->ct_sns->p.rsp;
2714 /* Prepare CT arguments -- port_id */
2715 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2717 /* Execute MS IOCB */
2718 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2719 sizeof(ms_iocb_entry_t));
2720 if (rval != QLA_SUCCESS) {
2722 ql_dbg(ql_dbg_disc, vha, 0x2023,
2723 "GFPN_ID issue IOCB failed (%d).\n", rval);
2725 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2726 "GFPN_ID") != QLA_SUCCESS) {
2727 rval = QLA_FUNCTION_FAILED;
2730 /* Save fabric portname */
2731 memcpy(list[i].fabric_port_name,
2732 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2735 /* Last device exit. */
2736 if (list[i].d_id.b.rsvd_1 != 0)
2744 static inline struct ct_sns_req *
2745 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2748 memset(p, 0, sizeof(struct ct_sns_pkt));
2750 p->p.req.header.revision = 0x01;
2751 p->p.req.header.gs_type = 0xFA;
2752 p->p.req.header.gs_subtype = 0x01;
2753 p->p.req.command = cpu_to_be16(cmd);
2754 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2760 qla2x00_port_speed_capability(uint16_t speed)
2764 return PORT_SPEED_1GB;
2766 return PORT_SPEED_2GB;
2768 return PORT_SPEED_4GB;
2770 return PORT_SPEED_10GB;
2772 return PORT_SPEED_8GB;
2774 return PORT_SPEED_16GB;
2776 return PORT_SPEED_32GB;
2778 return PORT_SPEED_64GB;
2780 return PORT_SPEED_UNKNOWN;
2785 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2787 * @list: switch info entries to populate
2789 * Returns 0 on success.
2792 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2796 struct qla_hw_data *ha = vha->hw;
2797 ms_iocb_entry_t *ms_pkt;
2798 struct ct_sns_req *ct_req;
2799 struct ct_sns_rsp *ct_rsp;
2802 if (!IS_IIDMA_CAPABLE(ha))
2803 return QLA_FUNCTION_FAILED;
2804 if (!ha->flags.gpsc_supported)
2805 return QLA_FUNCTION_FAILED;
2807 rval = qla2x00_mgmt_svr_login(vha);
2811 arg.iocb = ha->ms_iocb;
2812 arg.req_dma = ha->ct_sns_dma;
2813 arg.rsp_dma = ha->ct_sns_dma;
2814 arg.req_size = GPSC_REQ_SIZE;
2815 arg.rsp_size = GPSC_RSP_SIZE;
2816 arg.nport_handle = vha->mgmt_svr_loop_id;
2818 for (i = 0; i < ha->max_fibre_devices; i++) {
2820 /* Prepare common MS IOCB */
2821 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2823 /* Prepare CT request */
2824 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2826 ct_rsp = &ha->ct_sns->p.rsp;
2828 /* Prepare CT arguments -- port_name */
2829 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2832 /* Execute MS IOCB */
2833 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2834 sizeof(ms_iocb_entry_t));
2835 if (rval != QLA_SUCCESS) {
2837 ql_dbg(ql_dbg_disc, vha, 0x2059,
2838 "GPSC issue IOCB failed (%d).\n", rval);
2839 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2840 "GPSC")) != QLA_SUCCESS) {
2841 /* FM command unsupported? */
2842 if (rval == QLA_INVALID_COMMAND &&
2843 (ct_rsp->header.reason_code ==
2844 CT_REASON_INVALID_COMMAND_CODE ||
2845 ct_rsp->header.reason_code ==
2846 CT_REASON_COMMAND_UNSUPPORTED)) {
2847 ql_dbg(ql_dbg_disc, vha, 0x205a,
2848 "GPSC command unsupported, disabling "
2850 ha->flags.gpsc_supported = 0;
2851 rval = QLA_FUNCTION_FAILED;
2854 rval = QLA_FUNCTION_FAILED;
2856 list->fp_speed = qla2x00_port_speed_capability(
2857 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2858 ql_dbg(ql_dbg_disc, vha, 0x205b,
2859 "GPSC ext entry - fpn "
2860 "%8phN speeds=%04x speed=%04x.\n",
2861 list[i].fabric_port_name,
2862 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2863 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2866 /* Last device exit. */
2867 if (list[i].d_id.b.rsvd_1 != 0)
2875 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2878 * @list: switch info entries to populate
2882 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2887 ms_iocb_entry_t *ms_pkt;
2888 struct ct_sns_req *ct_req;
2889 struct ct_sns_rsp *ct_rsp;
2890 struct qla_hw_data *ha = vha->hw;
2891 uint8_t fcp_scsi_features = 0;
2894 for (i = 0; i < ha->max_fibre_devices; i++) {
2895 /* Set default FC4 Type as UNKNOWN so the default is to
2896 * Process this port */
2897 list[i].fc4_type = FC4_TYPE_UNKNOWN;
2899 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2900 if (!IS_FWI2_CAPABLE(ha))
2903 arg.iocb = ha->ms_iocb;
2904 arg.req_dma = ha->ct_sns_dma;
2905 arg.rsp_dma = ha->ct_sns_dma;
2906 arg.req_size = GFF_ID_REQ_SIZE;
2907 arg.rsp_size = GFF_ID_RSP_SIZE;
2908 arg.nport_handle = NPH_SNS;
2910 /* Prepare common MS IOCB */
2911 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2913 /* Prepare CT request */
2914 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2916 ct_rsp = &ha->ct_sns->p.rsp;
2918 /* Prepare CT arguments -- port_id */
2919 ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2921 /* Execute MS IOCB */
2922 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2923 sizeof(ms_iocb_entry_t));
2925 if (rval != QLA_SUCCESS) {
2926 ql_dbg(ql_dbg_disc, vha, 0x205c,
2927 "GFF_ID issue IOCB failed (%d).\n", rval);
2928 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2929 "GFF_ID") != QLA_SUCCESS) {
2930 ql_dbg(ql_dbg_disc, vha, 0x205d,
2931 "GFF_ID IOCB status had a failure status code.\n");
2934 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2935 fcp_scsi_features &= 0x0f;
2937 if (fcp_scsi_features)
2938 list[i].fc4_type = FC4_TYPE_FCP_SCSI;
2940 list[i].fc4_type = FC4_TYPE_OTHER;
2943 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2944 list[i].fc4f_nvme &= 0xf;
2947 /* Last device exit. */
2948 if (list[i].d_id.b.rsvd_1 != 0)
2953 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2955 struct qla_work_evt *e;
2957 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
2959 return QLA_FUNCTION_FAILED;
2961 e->u.fcport.fcport = fcport;
2962 fcport->flags |= FCF_ASYNC_ACTIVE;
2963 return qla2x00_post_work(vha, e);
2966 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
2968 struct fc_port *fcport = ea->fcport;
2970 ql_dbg(ql_dbg_disc, vha, 0x20d8,
2971 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2972 __func__, fcport->port_name, fcport->disc_state,
2973 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2974 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
2976 if (fcport->disc_state == DSC_DELETE_PEND)
2979 if (ea->sp->gen2 != fcport->login_gen) {
2980 /* target side must have changed it. */
2981 ql_dbg(ql_dbg_disc, vha, 0x20d3,
2982 "%s %8phC generation changed\n",
2983 __func__, fcport->port_name);
2985 } else if (ea->sp->gen1 != fcport->rscn_gen) {
2989 qla_post_iidma_work(vha, fcport);
2992 static void qla24xx_async_gpsc_sp_done(void *s, int res)
2995 struct scsi_qla_host *vha = sp->vha;
2996 struct qla_hw_data *ha = vha->hw;
2997 fc_port_t *fcport = sp->fcport;
2998 struct ct_sns_rsp *ct_rsp;
2999 struct event_arg ea;
3001 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3003 ql_dbg(ql_dbg_disc, vha, 0x2053,
3004 "Async done-%s res %x, WWPN %8phC \n",
3005 sp->name, res, fcport->port_name);
3007 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3009 if (res == QLA_FUNCTION_TIMEOUT)
3012 if (res == (DID_ERROR << 16)) {
3013 /* entry status error */
3016 if ((ct_rsp->header.reason_code ==
3017 CT_REASON_INVALID_COMMAND_CODE) ||
3018 (ct_rsp->header.reason_code ==
3019 CT_REASON_COMMAND_UNSUPPORTED)) {
3020 ql_dbg(ql_dbg_disc, vha, 0x2019,
3021 "GPSC command unsupported, disabling query.\n");
3022 ha->flags.gpsc_supported = 0;
3026 fcport->fp_speed = qla2x00_port_speed_capability(
3027 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3029 ql_dbg(ql_dbg_disc, vha, 0x2054,
3030 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
3031 sp->name, fcport->fabric_port_name,
3032 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
3033 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3035 memset(&ea, 0, sizeof(ea));
3036 ea.event = FCME_GPSC_DONE;
3040 qla2x00_fcport_event_handler(vha, &ea);
3046 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3048 int rval = QLA_FUNCTION_FAILED;
3049 struct ct_sns_req *ct_req;
3052 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3055 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3059 sp->type = SRB_CT_PTHRU_CMD;
3061 sp->gen1 = fcport->rscn_gen;
3062 sp->gen2 = fcport->login_gen;
3064 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3066 /* CT_IU preamble */
3067 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
3071 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
3074 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3075 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3076 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3077 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3078 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
3079 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
3080 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
3082 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3083 sp->done = qla24xx_async_gpsc_sp_done;
3085 ql_dbg(ql_dbg_disc, vha, 0x205e,
3086 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
3087 sp->name, fcport->port_name, sp->handle,
3088 fcport->loop_id, fcport->d_id.b.domain,
3089 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3091 rval = qla2x00_start_sp(sp);
3092 if (rval != QLA_SUCCESS)
3098 fcport->flags &= ~FCF_ASYNC_SENT;
3100 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3104 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
3106 struct qla_work_evt *e;
3108 if (test_bit(UNLOADING, &vha->dpc_flags))
3111 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
3113 return QLA_FUNCTION_FAILED;
3115 e->u.gpnid.id = *id;
3116 return qla2x00_post_work(vha, e);
3119 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3121 struct srb_iocb *c = &sp->u.iocb_cmd;
3125 if (c->u.els_plogi.els_plogi_pyld)
3126 dma_free_coherent(&vha->hw->pdev->dev,
3127 c->u.els_plogi.tx_size,
3128 c->u.els_plogi.els_plogi_pyld,
3129 c->u.els_plogi.els_plogi_pyld_dma);
3131 if (c->u.els_plogi.els_resp_pyld)
3132 dma_free_coherent(&vha->hw->pdev->dev,
3133 c->u.els_plogi.rx_size,
3134 c->u.els_plogi.els_resp_pyld,
3135 c->u.els_plogi.els_resp_pyld_dma);
3137 case SRB_CT_PTHRU_CMD:
3139 if (sp->u.iocb_cmd.u.ctarg.req) {
3140 dma_free_coherent(&vha->hw->pdev->dev,
3141 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3142 sp->u.iocb_cmd.u.ctarg.req,
3143 sp->u.iocb_cmd.u.ctarg.req_dma);
3144 sp->u.iocb_cmd.u.ctarg.req = NULL;
3147 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3148 dma_free_coherent(&vha->hw->pdev->dev,
3149 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3150 sp->u.iocb_cmd.u.ctarg.rsp,
3151 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3152 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3160 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3162 fc_port_t *fcport, *conflict, *t;
3165 ql_dbg(ql_dbg_disc, vha, 0xffff,
3166 "%s %d port_id: %06x\n",
3167 __func__, __LINE__, ea->id.b24);
3170 /* cable is disconnected */
3171 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3172 if (fcport->d_id.b24 == ea->id.b24)
3173 fcport->scan_state = QLA_FCPORT_SCAN;
3175 qlt_schedule_sess_for_deletion(fcport);
3178 /* cable is connected */
3179 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3181 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3183 if ((conflict->d_id.b24 == ea->id.b24) &&
3184 (fcport != conflict))
3186 * 2 fcports with conflict Nport ID or
3187 * an existing fcport is having nport ID
3188 * conflict with new fcport.
3191 conflict->scan_state = QLA_FCPORT_SCAN;
3193 qlt_schedule_sess_for_deletion(conflict);
3196 fcport->scan_needed = 0;
3198 fcport->scan_state = QLA_FCPORT_FOUND;
3199 fcport->flags |= FCF_FABRIC_DEVICE;
3200 if (fcport->login_retry == 0) {
3201 fcport->login_retry =
3202 vha->hw->login_retry_count;
3203 ql_dbg(ql_dbg_disc, vha, 0xffff,
3204 "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
3205 fcport->port_name, fcport->loop_id,
3206 fcport->login_retry);
3208 switch (fcport->disc_state) {
3209 case DSC_LOGIN_COMPLETE:
3210 /* recheck session is still intact. */
3211 ql_dbg(ql_dbg_disc, vha, 0x210d,
3212 "%s %d %8phC revalidate session with ADISC\n",
3213 __func__, __LINE__, fcport->port_name);
3214 data[0] = data[1] = 0;
3215 qla2x00_post_async_adisc_work(vha, fcport,
3219 ql_dbg(ql_dbg_disc, vha, 0x210d,
3220 "%s %d %8phC login\n", __func__, __LINE__,
3222 fcport->d_id = ea->id;
3223 qla24xx_fcport_handle_login(vha, fcport);
3225 case DSC_DELETE_PEND:
3226 fcport->d_id = ea->id;
3229 fcport->d_id = ea->id;
3233 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3235 if (conflict->d_id.b24 == ea->id.b24) {
3236 /* 2 fcports with conflict Nport ID or
3237 * an existing fcport is having nport ID
3238 * conflict with new fcport.
3240 ql_dbg(ql_dbg_disc, vha, 0xffff,
3241 "%s %d %8phC DS %d\n",
3243 conflict->port_name,
3244 conflict->disc_state);
3246 conflict->scan_state = QLA_FCPORT_SCAN;
3247 qlt_schedule_sess_for_deletion(conflict);
3251 /* create new fcport */
3252 ql_dbg(ql_dbg_disc, vha, 0x2065,
3253 "%s %d %8phC post new sess\n",
3254 __func__, __LINE__, ea->port_name);
3255 qla24xx_post_newsess_work(vha, &ea->id,
3256 ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
3261 static void qla2x00_async_gpnid_sp_done(void *s, int res)
3264 struct scsi_qla_host *vha = sp->vha;
3265 struct ct_sns_req *ct_req =
3266 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3267 struct ct_sns_rsp *ct_rsp =
3268 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3269 struct event_arg ea;
3270 struct qla_work_evt *e;
3271 unsigned long flags;
3274 ql_dbg(ql_dbg_disc, vha, 0x2066,
3275 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3276 sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
3277 ct_rsp->rsp.gpn_id.port_name);
3279 ql_dbg(ql_dbg_disc, vha, 0x2066,
3280 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3281 sp->name, sp->gen1, &ct_req->req.port_id.port_id,
3282 ct_rsp->rsp.gpn_id.port_name);
3284 memset(&ea, 0, sizeof(ea));
3285 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3287 ea.id = be_to_port_id(ct_req->req.port_id.port_id);
3289 ea.event = FCME_GPNID_DONE;
3291 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3292 list_del(&sp->elem);
3293 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3296 if (res == QLA_FUNCTION_TIMEOUT) {
3297 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3301 } else if (sp->gen1) {
3302 /* There was another RSCN for this Nport ID */
3303 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3308 qla2x00_fcport_event_handler(vha, &ea);
3310 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3312 /* please ignore kernel warning. otherwise, we have mem leak. */
3313 if (sp->u.iocb_cmd.u.ctarg.req) {
3314 dma_free_coherent(&vha->hw->pdev->dev,
3315 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3316 sp->u.iocb_cmd.u.ctarg.req,
3317 sp->u.iocb_cmd.u.ctarg.req_dma);
3318 sp->u.iocb_cmd.u.ctarg.req = NULL;
3320 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3321 dma_free_coherent(&vha->hw->pdev->dev,
3322 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3323 sp->u.iocb_cmd.u.ctarg.rsp,
3324 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3325 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3333 qla2x00_post_work(vha, e);
3336 /* Get WWPN with Nport ID. */
3337 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3339 int rval = QLA_FUNCTION_FAILED;
3340 struct ct_sns_req *ct_req;
3342 struct ct_sns_pkt *ct_sns;
3343 unsigned long flags;
3345 if (!vha->flags.online)
3348 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3352 sp->type = SRB_CT_PTHRU_CMD;
3354 sp->u.iocb_cmd.u.ctarg.id = *id;
3356 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3358 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3359 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3360 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3362 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3367 list_add_tail(&sp->elem, &vha->gpnid_list);
3368 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3370 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3371 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3373 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3374 if (!sp->u.iocb_cmd.u.ctarg.req) {
3375 ql_log(ql_log_warn, vha, 0xd041,
3376 "Failed to allocate ct_sns request.\n");
3380 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3381 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3383 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3384 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3385 ql_log(ql_log_warn, vha, 0xd042,
3386 "Failed to allocate ct_sns request.\n");
3390 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3391 memset(ct_sns, 0, sizeof(*ct_sns));
3393 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3394 /* CT_IU preamble */
3395 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3398 ct_req->req.port_id.port_id = port_id_to_be_id(*id);
3400 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3401 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3402 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3404 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3405 sp->done = qla2x00_async_gpnid_sp_done;
3407 ql_dbg(ql_dbg_disc, vha, 0x2067,
3408 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3409 sp->handle, &ct_req->req.port_id.port_id);
3411 rval = qla2x00_start_sp(sp);
3412 if (rval != QLA_SUCCESS)
3418 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3419 list_del(&sp->elem);
3420 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3422 if (sp->u.iocb_cmd.u.ctarg.req) {
3423 dma_free_coherent(&vha->hw->pdev->dev,
3424 sizeof(struct ct_sns_pkt),
3425 sp->u.iocb_cmd.u.ctarg.req,
3426 sp->u.iocb_cmd.u.ctarg.req_dma);
3427 sp->u.iocb_cmd.u.ctarg.req = NULL;
3429 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3430 dma_free_coherent(&vha->hw->pdev->dev,
3431 sizeof(struct ct_sns_pkt),
3432 sp->u.iocb_cmd.u.ctarg.rsp,
3433 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3434 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3442 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3444 fc_port_t *fcport = ea->fcport;
3446 qla24xx_post_gnl_work(vha, fcport);
3449 void qla24xx_async_gffid_sp_done(void *s, int res)
3452 struct scsi_qla_host *vha = sp->vha;
3453 fc_port_t *fcport = sp->fcport;
3454 struct ct_sns_rsp *ct_rsp;
3455 struct event_arg ea;
3457 ql_dbg(ql_dbg_disc, vha, 0x2133,
3458 "Async done-%s res %x ID %x. %8phC\n",
3459 sp->name, res, fcport->d_id.b24, fcport->port_name);
3461 fcport->flags &= ~FCF_ASYNC_SENT;
3462 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3464 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3465 * The format of the FC-4 Features object, as defined by the FC-4,
3466 * Shall be an array of 4-bit values, one for each type code value
3469 if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
3472 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3473 fcport->fc4_type &= 0xf;
3476 if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
3477 /* w5 [00:03]/28h */
3479 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3480 fcport->fc4f_nvme &= 0xf;
3484 memset(&ea, 0, sizeof(ea));
3486 ea.fcport = sp->fcport;
3488 ea.event = FCME_GFFID_DONE;
3490 qla2x00_fcport_event_handler(vha, &ea);
3494 /* Get FC4 Feature with Nport ID. */
3495 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3497 int rval = QLA_FUNCTION_FAILED;
3498 struct ct_sns_req *ct_req;
3501 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3504 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3508 fcport->flags |= FCF_ASYNC_SENT;
3509 sp->type = SRB_CT_PTHRU_CMD;
3511 sp->gen1 = fcport->rscn_gen;
3512 sp->gen2 = fcport->login_gen;
3514 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3515 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3517 /* CT_IU preamble */
3518 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3521 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3522 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3523 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3525 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3526 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3527 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3528 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3529 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3530 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3531 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3533 sp->done = qla24xx_async_gffid_sp_done;
3535 ql_dbg(ql_dbg_disc, vha, 0x2132,
3536 "Async-%s hdl=%x %8phC.\n", sp->name,
3537 sp->handle, fcport->port_name);
3539 rval = qla2x00_start_sp(sp);
3540 if (rval != QLA_SUCCESS)
3546 fcport->flags &= ~FCF_ASYNC_SENT;
3550 /* GPN_FT + GNN_FT*/
3551 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3553 struct qla_hw_data *ha = vha->hw;
3554 scsi_qla_host_t *vp;
3555 unsigned long flags;
3559 if (!ha->num_vhosts)
3562 spin_lock_irqsave(&ha->vport_slock, flags);
3563 list_for_each_entry(vp, &ha->vp_list, list) {
3564 twwn = wwn_to_u64(vp->port_name);
3570 spin_unlock_irqrestore(&ha->vport_slock, flags);
3575 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3580 struct fab_scan_rp *rp, *trp;
3581 unsigned long flags;
3583 u16 dup = 0, dup_cnt = 0;
3585 ql_dbg(ql_dbg_disc, vha, 0xffff,
3586 "%s enter\n", __func__);
3588 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3589 ql_dbg(ql_dbg_disc, vha, 0xffff,
3590 "%s scan stop due to chip reset %x/%x\n",
3591 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3597 vha->scan.scan_retry++;
3598 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3599 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3600 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3602 ql_dbg(ql_dbg_disc, vha, 0xffff,
3603 "Fabric scan failed on all retries.\n");
3607 vha->scan.scan_retry = 0;
3609 list_for_each_entry(fcport, &vha->vp_fcports, list)
3610 fcport->scan_state = QLA_FCPORT_SCAN;
3612 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3616 rp = &vha->scan.l[i];
3619 wwn = wwn_to_u64(rp->port_name);
3623 /* Remove duplicate NPORT ID entries from switch data base */
3624 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3625 trp = &vha->scan.l[k];
3626 if (rp->id.b24 == trp->id.b24) {
3629 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3631 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3632 rp->id.b24, rp->port_name, trp->port_name);
3633 memset(trp, 0, sizeof(*trp));
3637 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3640 /* Bypass reserved domain fields. */
3641 if ((rp->id.b.domain & 0xf0) == 0xf0)
3644 /* Bypass virtual ports of the same host. */
3645 if (qla2x00_is_a_vp(vha, wwn))
3648 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3649 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3651 fcport->scan_needed = 0;
3652 fcport->scan_state = QLA_FCPORT_FOUND;
3655 * If device was not a fabric device before.
3657 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3658 qla2x00_clear_loop_id(fcport);
3659 fcport->flags |= FCF_FABRIC_DEVICE;
3660 } else if (fcport->d_id.b24 != rp->id.b24) {
3661 qlt_schedule_sess_for_deletion(fcport);
3663 fcport->d_id.b24 = rp->id.b24;
3668 ql_dbg(ql_dbg_disc, vha, 0xffff,
3669 "%s %d %8phC post new sess\n",
3670 __func__, __LINE__, rp->port_name);
3671 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3672 rp->node_name, NULL, rp->fc4type);
3677 ql_log(ql_log_warn, vha, 0xffff,
3678 "Detected %d duplicate NPORT ID(s) from switch data base\n",
3683 * Logout all previous fabric dev marked lost, except FCP2 devices.
3685 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3686 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3687 fcport->scan_needed = 0;
3691 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3692 fcport->scan_needed = 0;
3693 if ((qla_dual_mode_enabled(vha) ||
3694 qla_ini_mode_enabled(vha)) &&
3695 atomic_read(&fcport->state) == FCS_ONLINE) {
3696 if (fcport->loop_id != FC_NO_LOOP_ID) {
3697 if (fcport->flags & FCF_FCP2_DEVICE)
3698 fcport->logout_on_delete = 0;
3700 ql_dbg(ql_dbg_disc, vha, 0x20f0,
3701 "%s %d %8phC post del sess\n",
3705 qlt_schedule_sess_for_deletion(fcport);
3710 if (fcport->scan_needed ||
3711 fcport->disc_state != DSC_LOGIN_COMPLETE) {
3712 if (fcport->login_retry == 0) {
3713 fcport->login_retry =
3714 vha->hw->login_retry_count;
3715 ql_dbg(ql_dbg_disc, vha, 0x20a3,
3716 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
3717 fcport->port_name, fcport->loop_id,
3718 fcport->login_retry);
3720 fcport->scan_needed = 0;
3721 qla24xx_fcport_handle_login(vha, fcport);
3728 qla24xx_sp_unmap(vha, sp);
3729 spin_lock_irqsave(&vha->work_lock, flags);
3730 vha->scan.scan_flags &= ~SF_SCANNING;
3731 spin_unlock_irqrestore(&vha->work_lock, flags);
3734 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3735 if (fcport->scan_needed) {
3736 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3737 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3744 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
3747 struct qla_work_evt *e;
3749 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
3750 return QLA_PARAMETER_ERROR;
3752 e = qla2x00_alloc_work(vha, cmd);
3754 return QLA_FUNCTION_FAILED;
3758 return qla2x00_post_work(vha, e);
3761 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
3764 struct qla_work_evt *e;
3766 if (cmd != QLA_EVT_GPNFT)
3767 return QLA_PARAMETER_ERROR;
3769 e = qla2x00_alloc_work(vha, cmd);
3771 return QLA_FUNCTION_FAILED;
3773 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
3776 return qla2x00_post_work(vha, e);
3779 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3782 struct qla_hw_data *ha = vha->hw;
3783 int num_fibre_dev = ha->max_fibre_devices;
3784 struct ct_sns_req *ct_req =
3785 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3786 struct ct_sns_gpnft_rsp *ct_rsp =
3787 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3788 struct ct_sns_gpn_ft_data *d;
3789 struct fab_scan_rp *rp;
3790 u16 cmd = be16_to_cpu(ct_req->command);
3791 u8 fc4_type = sp->gen2;
3798 for (i = 0; i < num_fibre_dev; i++) {
3799 d = &ct_rsp->entries[i];
3802 id.b.domain = d->port_id[0];
3803 id.b.area = d->port_id[1];
3804 id.b.al_pa = d->port_id[2];
3805 wwn = wwn_to_u64(d->port_name);
3807 if (id.b24 == 0 || wwn == 0)
3810 if (fc4_type == FC4_TYPE_FCP_SCSI) {
3811 if (cmd == GPN_FT_CMD) {
3812 rp = &vha->scan.l[j];
3814 memcpy(rp->port_name, d->port_name, 8);
3816 rp->fc4type = FS_FC4TYPE_FCP;
3818 for (k = 0; k < num_fibre_dev; k++) {
3819 rp = &vha->scan.l[k];
3820 if (id.b24 == rp->id.b24) {
3821 memcpy(rp->node_name,
3828 /* Search if the fibre device supports FC4_TYPE_NVME */
3829 if (cmd == GPN_FT_CMD) {
3832 for (k = 0; k < num_fibre_dev; k++) {
3833 rp = &vha->scan.l[k];
3834 if (!memcmp(rp->port_name,
3837 * Supports FC-NVMe & FCP
3839 rp->fc4type |= FS_FC4TYPE_NVME;
3845 /* We found new FC-NVMe only port */
3847 for (k = 0; k < num_fibre_dev; k++) {
3848 rp = &vha->scan.l[k];
3849 if (wwn_to_u64(rp->port_name)) {
3853 memcpy(rp->port_name,
3862 for (k = 0; k < num_fibre_dev; k++) {
3863 rp = &vha->scan.l[k];
3864 if (id.b24 == rp->id.b24) {
3865 memcpy(rp->node_name,
3875 static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
3878 struct scsi_qla_host *vha = sp->vha;
3879 struct ct_sns_req *ct_req =
3880 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3881 u16 cmd = be16_to_cpu(ct_req->command);
3882 u8 fc4_type = sp->gen2;
3883 unsigned long flags;
3886 /* gen2 field is holding the fc4type */
3887 ql_dbg(ql_dbg_disc, vha, 0xffff,
3888 "Async done-%s res %x FC4Type %x\n",
3889 sp->name, res, sp->gen2);
3891 del_timer(&sp->u.iocb_cmd.timer);
3894 unsigned long flags;
3895 const char *name = sp->name;
3898 * We are in an Interrupt context, queue up this
3899 * sp for GNNFT_DONE work. This will allow all
3900 * the resource to get freed up.
3902 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3903 QLA_EVT_GNNFT_DONE);
3905 /* Cleanup here to prevent memory leak */
3906 qla24xx_sp_unmap(vha, sp);
3908 spin_lock_irqsave(&vha->work_lock, flags);
3909 vha->scan.scan_flags &= ~SF_SCANNING;
3910 vha->scan.scan_retry++;
3911 spin_unlock_irqrestore(&vha->work_lock, flags);
3913 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3914 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3915 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3916 qla2xxx_wake_dpc(vha);
3918 ql_dbg(ql_dbg_disc, vha, 0xffff,
3919 "Async done-%s rescan failed on all retries.\n",
3926 qla2x00_find_free_fcp_nvme_slot(vha, sp);
3928 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
3929 cmd == GNN_FT_CMD) {
3930 spin_lock_irqsave(&vha->work_lock, flags);
3931 vha->scan.scan_flags &= ~SF_SCANNING;
3932 spin_unlock_irqrestore(&vha->work_lock, flags);
3935 rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
3937 qla24xx_sp_unmap(vha, sp);
3938 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3939 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3944 if (cmd == GPN_FT_CMD) {
3945 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3946 QLA_EVT_GPNFT_DONE);
3948 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3949 QLA_EVT_GNNFT_DONE);
3953 qla24xx_sp_unmap(vha, sp);
3954 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3955 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3961 * Get WWNN list for fc4_type
3963 * It is assumed the same SRB is re-used from GPNFT to avoid
3964 * mem free & re-alloc
3966 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
3969 int rval = QLA_FUNCTION_FAILED;
3970 struct ct_sns_req *ct_req;
3971 struct ct_sns_pkt *ct_sns;
3972 unsigned long flags;
3974 if (!vha->flags.online) {
3975 spin_lock_irqsave(&vha->work_lock, flags);
3976 vha->scan.scan_flags &= ~SF_SCANNING;
3977 spin_unlock_irqrestore(&vha->work_lock, flags);
3981 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
3982 ql_log(ql_log_warn, vha, 0xffff,
3983 "%s: req %p rsp %p are not setup\n",
3984 __func__, sp->u.iocb_cmd.u.ctarg.req,
3985 sp->u.iocb_cmd.u.ctarg.rsp);
3986 spin_lock_irqsave(&vha->work_lock, flags);
3987 vha->scan.scan_flags &= ~SF_SCANNING;
3988 spin_unlock_irqrestore(&vha->work_lock, flags);
3990 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3991 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3995 ql_dbg(ql_dbg_disc, vha, 0xfffff,
3996 "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
3997 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
3998 sp->u.iocb_cmd.u.ctarg.req_size);
4000 sp->type = SRB_CT_PTHRU_CMD;
4002 sp->gen1 = vha->hw->base_qpair->chip_reset;
4003 sp->gen2 = fc4_type;
4005 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4006 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4008 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4009 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4011 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4012 /* CT_IU preamble */
4013 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
4014 sp->u.iocb_cmd.u.ctarg.rsp_size);
4017 ct_req->req.gpn_ft.port_type = fc4_type;
4019 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
4020 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4022 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4024 ql_dbg(ql_dbg_disc, vha, 0xffff,
4025 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4026 sp->handle, ct_req->req.gpn_ft.port_type);
4028 rval = qla2x00_start_sp(sp);
4029 if (rval != QLA_SUCCESS) {
4036 if (sp->u.iocb_cmd.u.ctarg.req) {
4037 dma_free_coherent(&vha->hw->pdev->dev,
4038 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4039 sp->u.iocb_cmd.u.ctarg.req,
4040 sp->u.iocb_cmd.u.ctarg.req_dma);
4041 sp->u.iocb_cmd.u.ctarg.req = NULL;
4043 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4044 dma_free_coherent(&vha->hw->pdev->dev,
4045 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4046 sp->u.iocb_cmd.u.ctarg.rsp,
4047 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4048 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4053 spin_lock_irqsave(&vha->work_lock, flags);
4054 vha->scan.scan_flags &= ~SF_SCANNING;
4055 if (vha->scan.scan_flags == 0) {
4056 ql_dbg(ql_dbg_disc, vha, 0xffff,
4057 "%s: schedule\n", __func__);
4058 vha->scan.scan_flags |= SF_QUEUED;
4059 schedule_delayed_work(&vha->scan.scan_work, 5);
4061 spin_unlock_irqrestore(&vha->work_lock, flags);
4067 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4069 ql_dbg(ql_dbg_disc, vha, 0xffff,
4070 "%s enter\n", __func__);
4071 qla24xx_async_gnnft(vha, sp, sp->gen2);
4074 /* Get WWPN list for certain fc4_type */
4075 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4077 int rval = QLA_FUNCTION_FAILED;
4078 struct ct_sns_req *ct_req;
4079 struct ct_sns_pkt *ct_sns;
4081 unsigned long flags;
4083 ql_dbg(ql_dbg_disc, vha, 0xffff,
4084 "%s enter\n", __func__);
4086 if (!vha->flags.online)
4089 spin_lock_irqsave(&vha->work_lock, flags);
4090 if (vha->scan.scan_flags & SF_SCANNING) {
4091 spin_unlock_irqrestore(&vha->work_lock, flags);
4092 ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
4095 vha->scan.scan_flags |= SF_SCANNING;
4096 spin_unlock_irqrestore(&vha->work_lock, flags);
4098 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4099 ql_dbg(ql_dbg_disc, vha, 0xffff,
4100 "%s: Performing FCP Scan\n", __func__);
4103 sp->free(sp); /* should not happen */
4105 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
4107 spin_lock_irqsave(&vha->work_lock, flags);
4108 vha->scan.scan_flags &= ~SF_SCANNING;
4109 spin_unlock_irqrestore(&vha->work_lock, flags);
4113 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
4114 sizeof(struct ct_sns_pkt),
4115 &sp->u.iocb_cmd.u.ctarg.req_dma,
4117 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4118 if (!sp->u.iocb_cmd.u.ctarg.req) {
4119 ql_log(ql_log_warn, vha, 0xffff,
4120 "Failed to allocate ct_sns request.\n");
4121 spin_lock_irqsave(&vha->work_lock, flags);
4122 vha->scan.scan_flags &= ~SF_SCANNING;
4123 spin_unlock_irqrestore(&vha->work_lock, flags);
4127 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4129 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4130 ((vha->hw->max_fibre_devices - 1) *
4131 sizeof(struct ct_sns_gpn_ft_data));
4133 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4135 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4137 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
4138 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4139 ql_log(ql_log_warn, vha, 0xffff,
4140 "Failed to allocate ct_sns request.\n");
4141 spin_lock_irqsave(&vha->work_lock, flags);
4142 vha->scan.scan_flags &= ~SF_SCANNING;
4143 spin_unlock_irqrestore(&vha->work_lock, flags);
4144 dma_free_coherent(&vha->hw->pdev->dev,
4145 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4146 sp->u.iocb_cmd.u.ctarg.req,
4147 sp->u.iocb_cmd.u.ctarg.req_dma);
4148 sp->u.iocb_cmd.u.ctarg.req = NULL;
4152 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4154 ql_dbg(ql_dbg_disc, vha, 0xffff,
4155 "%s scan list size %d\n", __func__, vha->scan.size);
4157 memset(vha->scan.l, 0, vha->scan.size);
4159 ql_dbg(ql_dbg_disc, vha, 0xffff,
4160 "NVME scan did not provide SP\n");
4164 sp->type = SRB_CT_PTHRU_CMD;
4166 sp->gen1 = vha->hw->base_qpair->chip_reset;
4167 sp->gen2 = fc4_type;
4169 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4170 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4172 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4173 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4174 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4176 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4177 /* CT_IU preamble */
4178 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4181 ct_req->req.gpn_ft.port_type = fc4_type;
4183 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4185 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4187 ql_dbg(ql_dbg_disc, vha, 0xffff,
4188 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4189 sp->handle, ct_req->req.gpn_ft.port_type);
4191 rval = qla2x00_start_sp(sp);
4192 if (rval != QLA_SUCCESS) {
4199 if (sp->u.iocb_cmd.u.ctarg.req) {
4200 dma_free_coherent(&vha->hw->pdev->dev,
4201 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4202 sp->u.iocb_cmd.u.ctarg.req,
4203 sp->u.iocb_cmd.u.ctarg.req_dma);
4204 sp->u.iocb_cmd.u.ctarg.req = NULL;
4206 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4207 dma_free_coherent(&vha->hw->pdev->dev,
4208 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4209 sp->u.iocb_cmd.u.ctarg.rsp,
4210 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4211 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4216 spin_lock_irqsave(&vha->work_lock, flags);
4217 vha->scan.scan_flags &= ~SF_SCANNING;
4218 if (vha->scan.scan_flags == 0) {
4219 ql_dbg(ql_dbg_disc, vha, 0xffff,
4220 "%s: schedule\n", __func__);
4221 vha->scan.scan_flags |= SF_QUEUED;
4222 schedule_delayed_work(&vha->scan.scan_work, 5);
4224 spin_unlock_irqrestore(&vha->work_lock, flags);
4230 void qla_scan_work_fn(struct work_struct *work)
4232 struct fab_scan *s = container_of(to_delayed_work(work),
4233 struct fab_scan, scan_work);
4234 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4236 unsigned long flags;
4238 ql_dbg(ql_dbg_disc, vha, 0xffff,
4239 "%s: schedule loop resync\n", __func__);
4240 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4241 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4242 qla2xxx_wake_dpc(vha);
4243 spin_lock_irqsave(&vha->work_lock, flags);
4244 vha->scan.scan_flags &= ~SF_QUEUED;
4245 spin_unlock_irqrestore(&vha->work_lock, flags);
4249 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4251 qla24xx_post_gnl_work(vha, ea->fcport);
4254 static void qla2x00_async_gnnid_sp_done(void *s, int res)
4257 struct scsi_qla_host *vha = sp->vha;
4258 fc_port_t *fcport = sp->fcport;
4259 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4260 struct event_arg ea;
4263 fcport->flags &= ~FCF_ASYNC_SENT;
4264 wwnn = wwn_to_u64(node_name);
4266 memcpy(fcport->node_name, node_name, WWN_SIZE);
4268 memset(&ea, 0, sizeof(ea));
4272 ea.event = FCME_GNNID_DONE;
4274 ql_dbg(ql_dbg_disc, vha, 0x204f,
4275 "Async done-%s res %x, WWPN %8phC %8phC\n",
4276 sp->name, res, fcport->port_name, fcport->node_name);
4278 qla2x00_fcport_event_handler(vha, &ea);
4283 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4285 int rval = QLA_FUNCTION_FAILED;
4286 struct ct_sns_req *ct_req;
4289 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4292 fcport->disc_state = DSC_GNN_ID;
4293 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4297 fcport->flags |= FCF_ASYNC_SENT;
4298 sp->type = SRB_CT_PTHRU_CMD;
4300 sp->gen1 = fcport->rscn_gen;
4301 sp->gen2 = fcport->login_gen;
4303 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4304 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4306 /* CT_IU preamble */
4307 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4311 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4314 /* req & rsp use the same buffer */
4315 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4316 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4317 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4318 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4319 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4320 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4321 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4323 sp->done = qla2x00_async_gnnid_sp_done;
4325 ql_dbg(ql_dbg_disc, vha, 0xffff,
4326 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4327 sp->name, fcport->port_name,
4328 sp->handle, fcport->loop_id, fcport->d_id.b24);
4330 rval = qla2x00_start_sp(sp);
4331 if (rval != QLA_SUCCESS)
4337 fcport->flags &= ~FCF_ASYNC_SENT;
4342 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4344 struct qla_work_evt *e;
4347 ls = atomic_read(&vha->loop_state);
4348 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4349 test_bit(UNLOADING, &vha->dpc_flags))
4352 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4354 return QLA_FUNCTION_FAILED;
4356 e->u.fcport.fcport = fcport;
4357 return qla2x00_post_work(vha, e);
4361 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4363 fc_port_t *fcport = ea->fcport;
4365 ql_dbg(ql_dbg_disc, vha, 0xffff,
4366 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4367 __func__, fcport->port_name, fcport->disc_state,
4368 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4369 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4371 if (fcport->disc_state == DSC_DELETE_PEND)
4374 if (ea->sp->gen2 != fcport->login_gen) {
4375 /* target side must have changed it. */
4376 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4377 "%s %8phC generation changed\n",
4378 __func__, fcport->port_name);
4380 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4384 qla24xx_post_gpsc_work(vha, fcport);
4387 static void qla2x00_async_gfpnid_sp_done(void *s, int res)
4390 struct scsi_qla_host *vha = sp->vha;
4391 fc_port_t *fcport = sp->fcport;
4392 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4393 struct event_arg ea;
4396 wwn = wwn_to_u64(fpn);
4398 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4400 memset(&ea, 0, sizeof(ea));
4404 ea.event = FCME_GFPNID_DONE;
4406 ql_dbg(ql_dbg_disc, vha, 0x204f,
4407 "Async done-%s res %x, WWPN %8phC %8phC\n",
4408 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4410 qla2x00_fcport_event_handler(vha, &ea);
4415 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4417 int rval = QLA_FUNCTION_FAILED;
4418 struct ct_sns_req *ct_req;
4421 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4424 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4428 sp->type = SRB_CT_PTHRU_CMD;
4429 sp->name = "gfpnid";
4430 sp->gen1 = fcport->rscn_gen;
4431 sp->gen2 = fcport->login_gen;
4433 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4434 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4436 /* CT_IU preamble */
4437 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4441 ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
4444 /* req & rsp use the same buffer */
4445 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4446 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4447 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4448 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4449 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4450 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4451 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4453 sp->done = qla2x00_async_gfpnid_sp_done;
4455 ql_dbg(ql_dbg_disc, vha, 0xffff,
4456 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4457 sp->name, fcport->port_name,
4458 sp->handle, fcport->loop_id, fcport->d_id.b24);
4460 rval = qla2x00_start_sp(sp);
4461 if (rval != QLA_SUCCESS)
4468 fcport->flags &= ~FCF_ASYNC_SENT;
4473 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4475 struct qla_work_evt *e;
4478 ls = atomic_read(&vha->loop_state);
4479 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4480 test_bit(UNLOADING, &vha->dpc_flags))
4483 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4485 return QLA_FUNCTION_FAILED;
4487 e->u.fcport.fcport = fcport;
4488 return qla2x00_post_work(vha, e);