1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
13 #define IS_PPCARCH true
15 #define IS_PPCARCH false
18 static struct mb_cmd_name {
22 {MBC_GET_PORT_DATABASE, "GPDB"},
23 {MBC_GET_ID_LIST, "GIDList"},
24 {MBC_GET_LINK_PRIV_STATS, "Stats"},
25 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
28 static const char *mb_to_str(uint16_t cmd)
31 struct mb_cmd_name *e;
33 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
41 static struct rom_cmd {
45 { MBC_EXECUTE_FIRMWARE },
46 { MBC_READ_RAM_WORD },
47 { MBC_MAILBOX_REGISTER_TEST },
48 { MBC_VERIFY_CHECKSUM },
49 { MBC_GET_FIRMWARE_VERSION },
50 { MBC_LOAD_RISC_RAM },
51 { MBC_DUMP_RISC_RAM },
52 { MBC_LOAD_RISC_RAM_EXTENDED },
53 { MBC_DUMP_RISC_RAM_EXTENDED },
54 { MBC_WRITE_RAM_WORD_EXTENDED },
55 { MBC_READ_RAM_EXTENDED },
56 { MBC_GET_RESOURCE_COUNTS },
57 { MBC_SET_FIRMWARE_OPTION },
58 { MBC_MID_INITIALIZE_FIRMWARE },
59 { MBC_GET_FIRMWARE_STATE },
60 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
61 { MBC_GET_RETRY_COUNT },
62 { MBC_TRACE_CONTROL },
63 { MBC_INITIALIZE_MULTIQ },
64 { MBC_IOCB_COMMAND_A64 },
65 { MBC_GET_ADAPTER_LOOP_ID },
67 { MBC_SET_RNID_PARAMS },
68 { MBC_GET_RNID_PARAMS },
69 { MBC_GET_SET_ZIO_THRESHOLD },
72 static int is_rom_cmd(uint16_t cmd)
77 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
87 * qla2x00_mailbox_command
88 * Issue mailbox command and waits for completion.
91 * ha = adapter block pointer.
92 * mcp = driver internal mbx struct pointer.
95 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
98 * 0 : QLA_SUCCESS = cmd performed success
99 * 1 : QLA_FUNCTION_FAILED (error encountered)
100 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
106 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
109 unsigned long flags = 0;
111 uint8_t abort_active, eeh_delay;
113 uint16_t command = 0;
115 __le16 __iomem *optr;
118 unsigned long wait_time;
119 struct qla_hw_data *ha = vha->hw;
120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
124 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
126 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
127 ql_log(ql_log_warn, vha, 0x1001,
128 "PCI channel failed permanently, exiting.\n");
129 return QLA_FUNCTION_TIMEOUT;
132 if (vha->device_flags & DFLG_DEV_FAILED) {
133 ql_log(ql_log_warn, vha, 0x1002,
134 "Device in failed state, exiting.\n");
135 return QLA_FUNCTION_TIMEOUT;
138 /* if PCI error, then avoid mbx processing.*/
139 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
140 test_bit(UNLOADING, &base_vha->dpc_flags)) {
141 ql_log(ql_log_warn, vha, 0xd04e,
142 "PCI error, exiting.\n");
143 return QLA_FUNCTION_TIMEOUT;
147 io_lock_on = base_vha->flags.init_done;
150 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
151 chip_reset = ha->chip_reset;
153 if (ha->flags.pci_channel_io_perm_failure) {
154 ql_log(ql_log_warn, vha, 0x1003,
155 "Perm failure on EEH timeout MBX, exiting.\n");
156 return QLA_FUNCTION_TIMEOUT;
159 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
160 /* Setting Link-Down error */
161 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
162 ql_log(ql_log_warn, vha, 0x1004,
163 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
164 return QLA_FUNCTION_TIMEOUT;
167 /* check if ISP abort is active and return cmd with timeout */
168 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
169 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
170 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
171 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
172 ql_log(ql_log_info, vha, 0x1005,
173 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
175 return QLA_FUNCTION_TIMEOUT;
178 atomic_inc(&ha->num_pend_mbx_stage1);
180 * Wait for active mailbox commands to finish by waiting at most tov
181 * seconds. This is to serialize actual issuing of mailbox cmds during
182 * non ISP abort time.
184 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
185 /* Timeout occurred. Return error. */
186 ql_log(ql_log_warn, vha, 0xd035,
187 "Cmd access timeout, cmd=0x%x, Exiting.\n",
190 atomic_dec(&ha->num_pend_mbx_stage1);
191 return QLA_FUNCTION_TIMEOUT;
193 atomic_dec(&ha->num_pend_mbx_stage1);
194 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
195 ha->flags.eeh_busy) {
196 ql_log(ql_log_warn, vha, 0xd035,
197 "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
198 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
204 /* Save mailbox command for debug */
207 ql_dbg(ql_dbg_mbx, vha, 0x1006,
208 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
210 spin_lock_irqsave(&ha->hardware_lock, flags);
212 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
213 ha->flags.mbox_busy) {
215 spin_unlock_irqrestore(&ha->hardware_lock, flags);
218 ha->flags.mbox_busy = 1;
220 /* Load mailbox registers. */
222 optr = ®->isp82.mailbox_in[0];
223 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
224 optr = ®->isp24.mailbox0;
226 optr = MAILBOX_REG(ha, ®->isp, 0);
229 command = mcp->mb[0];
230 mboxes = mcp->out_mb;
232 ql_dbg(ql_dbg_mbx, vha, 0x1111,
233 "Mailbox registers (OUT):\n");
234 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
235 if (IS_QLA2200(ha) && cnt == 8)
236 optr = MAILBOX_REG(ha, ®->isp, 8);
237 if (mboxes & BIT_0) {
238 ql_dbg(ql_dbg_mbx, vha, 0x1112,
239 "mbox[%d]<-0x%04x\n", cnt, *iptr);
240 wrt_reg_word(optr, *iptr);
242 wrt_reg_word(optr, 0);
250 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
251 "I/O Address = %p.\n", optr);
253 /* Issue set host interrupt command to send cmd out. */
254 ha->flags.mbox_int = 0;
255 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
257 /* Unlock mbx registers and wait for interrupt */
258 ql_dbg(ql_dbg_mbx, vha, 0x100f,
259 "Going to unlock irq & waiting for interrupts. "
260 "jiffies=%lx.\n", jiffies);
262 /* Wait for mbx cmd completion until timeout */
263 atomic_inc(&ha->num_pend_mbx_stage2);
264 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
265 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
268 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
269 else if (IS_FWI2_CAPABLE(ha))
270 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
272 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
273 spin_unlock_irqrestore(&ha->hardware_lock, flags);
276 atomic_inc(&ha->num_pend_mbx_stage3);
277 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
279 ql_dbg(ql_dbg_mbx, vha, 0x117a,
280 "cmd=%x Timeout.\n", command);
281 spin_lock_irqsave(&ha->hardware_lock, flags);
282 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
283 spin_unlock_irqrestore(&ha->hardware_lock, flags);
285 if (chip_reset != ha->chip_reset) {
286 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
288 spin_lock_irqsave(&ha->hardware_lock, flags);
289 ha->flags.mbox_busy = 0;
290 spin_unlock_irqrestore(&ha->hardware_lock,
292 atomic_dec(&ha->num_pend_mbx_stage2);
293 atomic_dec(&ha->num_pend_mbx_stage3);
297 } else if (ha->flags.purge_mbox ||
298 chip_reset != ha->chip_reset) {
299 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
301 spin_lock_irqsave(&ha->hardware_lock, flags);
302 ha->flags.mbox_busy = 0;
303 spin_unlock_irqrestore(&ha->hardware_lock, flags);
304 atomic_dec(&ha->num_pend_mbx_stage2);
305 atomic_dec(&ha->num_pend_mbx_stage3);
309 atomic_dec(&ha->num_pend_mbx_stage3);
311 if (time_after(jiffies, wait_time + 5 * HZ))
312 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
313 command, jiffies_to_msecs(jiffies - wait_time));
315 ql_dbg(ql_dbg_mbx, vha, 0x1011,
316 "Cmd=%x Polling Mode.\n", command);
318 if (IS_P3P_TYPE(ha)) {
319 if (rd_reg_dword(®->isp82.hint) &
320 HINT_MBX_INT_PENDING) {
321 ha->flags.mbox_busy = 0;
322 spin_unlock_irqrestore(&ha->hardware_lock,
324 atomic_dec(&ha->num_pend_mbx_stage2);
325 ql_dbg(ql_dbg_mbx, vha, 0x1012,
326 "Pending mailbox timeout, exiting.\n");
328 rval = QLA_FUNCTION_TIMEOUT;
331 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
332 } else if (IS_FWI2_CAPABLE(ha))
333 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
335 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
336 spin_unlock_irqrestore(&ha->hardware_lock, flags);
338 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
339 while (!ha->flags.mbox_int) {
340 if (ha->flags.purge_mbox ||
341 chip_reset != ha->chip_reset) {
342 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
344 spin_lock_irqsave(&ha->hardware_lock, flags);
345 ha->flags.mbox_busy = 0;
346 spin_unlock_irqrestore(&ha->hardware_lock,
348 atomic_dec(&ha->num_pend_mbx_stage2);
353 if (time_after(jiffies, wait_time))
356 /* Check for pending interrupts. */
357 qla2x00_poll(ha->rsp_q_map[0]);
359 if (!ha->flags.mbox_int &&
361 command == MBC_LOAD_RISC_RAM_EXTENDED))
364 ql_dbg(ql_dbg_mbx, vha, 0x1013,
366 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
368 atomic_dec(&ha->num_pend_mbx_stage2);
370 /* Check whether we timed out */
371 if (ha->flags.mbox_int) {
374 ql_dbg(ql_dbg_mbx, vha, 0x1014,
375 "Cmd=%x completed.\n", command);
377 /* Got interrupt. Clear the flag. */
378 ha->flags.mbox_int = 0;
379 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
381 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
382 spin_lock_irqsave(&ha->hardware_lock, flags);
383 ha->flags.mbox_busy = 0;
384 spin_unlock_irqrestore(&ha->hardware_lock, flags);
386 /* Setting Link-Down error */
387 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
389 rval = QLA_FUNCTION_FAILED;
390 ql_log(ql_log_warn, vha, 0xd048,
391 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
395 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
396 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
397 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
398 MBS_COMMAND_COMPLETE);
399 rval = QLA_FUNCTION_FAILED;
402 /* Load return mailbox registers. */
404 iptr = (uint16_t *)&ha->mailbox_out[0];
407 ql_dbg(ql_dbg_mbx, vha, 0x1113,
408 "Mailbox registers (IN):\n");
409 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
410 if (mboxes & BIT_0) {
412 ql_dbg(ql_dbg_mbx, vha, 0x1114,
413 "mbox[%d]->0x%04x\n", cnt, *iptr2);
423 uint32_t ictrl, host_status, hccr;
426 if (IS_FWI2_CAPABLE(ha)) {
427 mb[0] = rd_reg_word(®->isp24.mailbox0);
428 mb[1] = rd_reg_word(®->isp24.mailbox1);
429 mb[2] = rd_reg_word(®->isp24.mailbox2);
430 mb[3] = rd_reg_word(®->isp24.mailbox3);
431 mb[7] = rd_reg_word(®->isp24.mailbox7);
432 ictrl = rd_reg_dword(®->isp24.ictrl);
433 host_status = rd_reg_dword(®->isp24.host_status);
434 hccr = rd_reg_dword(®->isp24.hccr);
436 ql_log(ql_log_warn, vha, 0xd04c,
437 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
438 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
439 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
440 mb[7], host_status, hccr);
444 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
445 ictrl = rd_reg_word(®->isp.ictrl);
446 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
447 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
448 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
451 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
453 /* Capture FW dump only, if PCI device active */
454 if (!pci_channel_offline(vha->hw->pdev)) {
455 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
456 if (w == 0xffff || ictrl == 0xffffffff ||
457 (chip_reset != ha->chip_reset)) {
458 /* This is special case if there is unload
459 * of driver happening and if PCI device go
460 * into bad state due to PCI error condition
461 * then only PCI ERR flag would be set.
462 * we will do premature exit for above case.
464 spin_lock_irqsave(&ha->hardware_lock, flags);
465 ha->flags.mbox_busy = 0;
466 spin_unlock_irqrestore(&ha->hardware_lock,
468 rval = QLA_FUNCTION_TIMEOUT;
472 /* Attempt to capture firmware dump for further
473 * anallysis of the current formware state. we do not
474 * need to do this if we are intentionally generating
477 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
478 qla2xxx_dump_fw(vha);
479 rval = QLA_FUNCTION_TIMEOUT;
482 spin_lock_irqsave(&ha->hardware_lock, flags);
483 ha->flags.mbox_busy = 0;
484 spin_unlock_irqrestore(&ha->hardware_lock, flags);
489 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
490 ql_dbg(ql_dbg_mbx, vha, 0x101a,
491 "Checking for additional resp interrupt.\n");
493 /* polling mode for non isp_abort commands. */
494 qla2x00_poll(ha->rsp_q_map[0]);
497 if (rval == QLA_FUNCTION_TIMEOUT &&
498 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
499 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
500 ha->flags.eeh_busy) {
501 /* not in dpc. schedule it for dpc to take over. */
502 ql_dbg(ql_dbg_mbx, vha, 0x101b,
503 "Timeout, schedule isp_abort_needed.\n");
505 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
506 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
507 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
508 if (IS_QLA82XX(ha)) {
509 ql_dbg(ql_dbg_mbx, vha, 0x112a,
510 "disabling pause transmit on port "
513 QLA82XX_CRB_NIU + 0x98,
514 CRB_NIU_XG_PAUSE_CTL_P0|
515 CRB_NIU_XG_PAUSE_CTL_P1);
517 ql_log(ql_log_info, base_vha, 0x101c,
518 "Mailbox cmd timeout occurred, cmd=0x%x, "
519 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
520 "abort.\n", command, mcp->mb[0],
523 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
524 qla2xxx_wake_dpc(vha);
526 } else if (current == ha->dpc_thread) {
527 /* call abort directly since we are in the DPC thread */
528 ql_dbg(ql_dbg_mbx, vha, 0x101d,
529 "Timeout, calling abort_isp.\n");
531 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
532 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
533 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
534 if (IS_QLA82XX(ha)) {
535 ql_dbg(ql_dbg_mbx, vha, 0x112b,
536 "disabling pause transmit on port "
539 QLA82XX_CRB_NIU + 0x98,
540 CRB_NIU_XG_PAUSE_CTL_P0|
541 CRB_NIU_XG_PAUSE_CTL_P1);
543 ql_log(ql_log_info, base_vha, 0x101e,
544 "Mailbox cmd timeout occurred, cmd=0x%x, "
545 "mb[0]=0x%x. Scheduling ISP abort ",
546 command, mcp->mb[0]);
548 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
549 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
550 /* Allow next mbx cmd to come in. */
551 complete(&ha->mbx_cmd_comp);
552 if (ha->isp_ops->abort_isp(vha) &&
553 !ha->flags.eeh_busy) {
554 /* Failed. retry later. */
555 set_bit(ISP_ABORT_NEEDED,
558 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
559 ql_dbg(ql_dbg_mbx, vha, 0x101f,
560 "Finished abort_isp.\n");
567 /* Allow next mbx cmd to come in. */
568 complete(&ha->mbx_cmd_comp);
571 if (rval == QLA_ABORTED) {
572 ql_log(ql_log_info, vha, 0xd035,
573 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
576 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
577 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
578 dev_name(&ha->pdev->dev), 0x1020+0x800,
582 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
583 if (mboxes & BIT_0) {
584 printk(" mb[%u]=%x", i, mcp->mb[i]);
587 pr_warn(" cmd=%x ****\n", command);
589 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
590 ql_dbg(ql_dbg_mbx, vha, 0x1198,
591 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
592 rd_reg_dword(®->isp24.host_status),
593 rd_reg_dword(®->isp24.ictrl),
594 rd_reg_dword(®->isp24.istatus));
596 ql_dbg(ql_dbg_mbx, vha, 0x1206,
597 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
598 rd_reg_word(®->isp.ctrl_status),
599 rd_reg_word(®->isp.ictrl),
600 rd_reg_word(®->isp.istatus));
603 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
607 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
609 * The caller of this mailbox encounter pci error.
610 * Hold the thread until PCIE link reset complete to make
611 * sure caller does not unmap dma while recovery is
621 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
622 uint32_t risc_code_size)
625 struct qla_hw_data *ha = vha->hw;
627 mbx_cmd_t *mcp = &mc;
629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
630 "Entered %s.\n", __func__);
632 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
633 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
634 mcp->mb[8] = MSW(risc_addr);
635 mcp->out_mb = MBX_8|MBX_0;
637 mcp->mb[0] = MBC_LOAD_RISC_RAM;
640 mcp->mb[1] = LSW(risc_addr);
641 mcp->mb[2] = MSW(req_dma);
642 mcp->mb[3] = LSW(req_dma);
643 mcp->mb[6] = MSW(MSD(req_dma));
644 mcp->mb[7] = LSW(MSD(req_dma));
645 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
646 if (IS_FWI2_CAPABLE(ha)) {
647 mcp->mb[4] = MSW(risc_code_size);
648 mcp->mb[5] = LSW(risc_code_size);
649 mcp->out_mb |= MBX_5|MBX_4;
651 mcp->mb[4] = LSW(risc_code_size);
652 mcp->out_mb |= MBX_4;
655 mcp->in_mb = MBX_1|MBX_0;
656 mcp->tov = MBX_TOV_SECONDS;
658 rval = qla2x00_mailbox_command(vha, mcp);
660 if (rval != QLA_SUCCESS) {
661 ql_dbg(ql_dbg_mbx, vha, 0x1023,
662 "Failed=%x mb[0]=%x mb[1]=%x.\n",
663 rval, mcp->mb[0], mcp->mb[1]);
666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
667 "Done %s.\n", __func__);
673 #define NVME_ENABLE_FLAG BIT_3
674 #define EDIF_HW_SUPPORT BIT_10
678 * Start adapter firmware.
681 * ha = adapter block pointer.
682 * TARGET_QUEUE_LOCK must be released.
683 * ADAPTER_STATE_LOCK must be released.
686 * qla2x00 local function return status code.
692 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
695 struct qla_hw_data *ha = vha->hw;
697 mbx_cmd_t *mcp = &mc;
699 #define EXE_FW_FORCE_SEMAPHORE BIT_7
702 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
703 "Entered %s.\n", __func__);
706 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
709 if (IS_FWI2_CAPABLE(ha)) {
710 mcp->mb[1] = MSW(risc_addr);
711 mcp->mb[2] = LSW(risc_addr);
717 if (ha->flags.lr_detected) {
719 if (IS_BPM_RANGE_CAPABLE(ha))
721 ha->lr_distance << LR_DIST_FW_POS;
724 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
725 mcp->mb[4] |= NVME_ENABLE_FLAG;
727 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
728 struct nvram_81xx *nv = ha->nvram;
729 /* set minimum speed if specified in nvram */
730 if (nv->min_supported_speed >= 2 &&
731 nv->min_supported_speed <= 5) {
733 mcp->mb[11] |= nv->min_supported_speed & 0xF;
734 mcp->out_mb |= MBX_11;
736 vha->min_supported_speed =
737 nv->min_supported_speed;
741 mcp->mb[11] |= BIT_4;
744 if (ha->flags.exlogins_enabled)
745 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
747 if (ha->flags.exchoffld_enabled)
748 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
751 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
753 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
754 mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
756 mcp->mb[1] = LSW(risc_addr);
757 mcp->out_mb |= MBX_1;
758 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
760 mcp->out_mb |= MBX_2;
764 mcp->tov = MBX_TOV_SECONDS;
766 rval = qla2x00_mailbox_command(vha, mcp);
768 if (rval != QLA_SUCCESS) {
769 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
770 mcp->mb[1] == 0x27 && retry) {
773 ql_dbg(ql_dbg_async, vha, 0x1026,
774 "Exe FW: force semaphore.\n");
780 ql_dbg(ql_dbg_async, vha, 0x509d,
781 "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry);
784 ql_dbg(ql_dbg_mbx, vha, 0x1026,
785 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
790 if (!IS_FWI2_CAPABLE(ha))
793 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
794 ql_dbg(ql_dbg_mbx, vha, 0x119a,
795 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
796 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
797 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
798 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
799 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
800 ha->max_supported_speed == 0 ? "16Gps" :
801 ha->max_supported_speed == 1 ? "32Gps" :
802 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
803 if (vha->min_supported_speed) {
804 ha->min_supported_speed = mcp->mb[5] &
805 (BIT_0 | BIT_1 | BIT_2);
806 ql_dbg(ql_dbg_mbx, vha, 0x119c,
807 "min_supported_speed=%s.\n",
808 ha->min_supported_speed == 6 ? "64Gps" :
809 ha->min_supported_speed == 5 ? "32Gps" :
810 ha->min_supported_speed == 4 ? "16Gps" :
811 ha->min_supported_speed == 3 ? "8Gps" :
812 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
816 if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
817 ha->flags.edif_hw = 1;
818 ql_log(ql_log_info, vha, 0xffff,
819 "%s: edif HW\n", __func__);
823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
824 "Done %s.\n", __func__);
830 * qla_get_exlogin_status
831 * Get extended login status
832 * uses the memory offload control/status Mailbox
835 * ha: adapter state pointer.
836 * fwopt: firmware options
839 * qla2x00 local function status
844 #define FETCH_XLOGINS_STAT 0x8
846 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
847 uint16_t *ex_logins_cnt)
851 mbx_cmd_t *mcp = &mc;
853 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
854 "Entered %s\n", __func__);
856 memset(mcp->mb, 0 , sizeof(mcp->mb));
857 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
858 mcp->mb[1] = FETCH_XLOGINS_STAT;
859 mcp->out_mb = MBX_1|MBX_0;
860 mcp->in_mb = MBX_10|MBX_4|MBX_0;
861 mcp->tov = MBX_TOV_SECONDS;
864 rval = qla2x00_mailbox_command(vha, mcp);
865 if (rval != QLA_SUCCESS) {
866 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
868 *buf_sz = mcp->mb[4];
869 *ex_logins_cnt = mcp->mb[10];
871 ql_log(ql_log_info, vha, 0x1190,
872 "buffer size 0x%x, exchange login count=%d\n",
873 mcp->mb[4], mcp->mb[10]);
875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
876 "Done %s.\n", __func__);
883 * qla_set_exlogin_mem_cfg
884 * set extended login memory configuration
885 * Mbx needs to be issues before init_cb is set
888 * ha: adapter state pointer.
889 * buffer: buffer pointer
890 * phys_addr: physical address of buffer
891 * size: size of buffer
892 * TARGET_QUEUE_LOCK must be released
893 * ADAPTER_STATE_LOCK must be release
896 * qla2x00 local funxtion status code.
901 #define CONFIG_XLOGINS_MEM 0x9
903 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
907 mbx_cmd_t *mcp = &mc;
908 struct qla_hw_data *ha = vha->hw;
910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
911 "Entered %s.\n", __func__);
913 memset(mcp->mb, 0 , sizeof(mcp->mb));
914 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
915 mcp->mb[1] = CONFIG_XLOGINS_MEM;
916 mcp->mb[2] = MSW(phys_addr);
917 mcp->mb[3] = LSW(phys_addr);
918 mcp->mb[6] = MSW(MSD(phys_addr));
919 mcp->mb[7] = LSW(MSD(phys_addr));
920 mcp->mb[8] = MSW(ha->exlogin_size);
921 mcp->mb[9] = LSW(ha->exlogin_size);
922 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
923 mcp->in_mb = MBX_11|MBX_0;
924 mcp->tov = MBX_TOV_SECONDS;
926 rval = qla2x00_mailbox_command(vha, mcp);
927 if (rval != QLA_SUCCESS) {
928 ql_dbg(ql_dbg_mbx, vha, 0x111b,
929 "EXlogin Failed=%x. MB0=%x MB11=%x\n",
930 rval, mcp->mb[0], mcp->mb[11]);
932 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
933 "Done %s.\n", __func__);
940 * qla_get_exchoffld_status
941 * Get exchange offload status
942 * uses the memory offload control/status Mailbox
945 * ha: adapter state pointer.
946 * fwopt: firmware options
949 * qla2x00 local function status
954 #define FETCH_XCHOFFLD_STAT 0x2
956 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
957 uint16_t *ex_logins_cnt)
961 mbx_cmd_t *mcp = &mc;
963 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
964 "Entered %s\n", __func__);
966 memset(mcp->mb, 0 , sizeof(mcp->mb));
967 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
968 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
969 mcp->out_mb = MBX_1|MBX_0;
970 mcp->in_mb = MBX_10|MBX_4|MBX_0;
971 mcp->tov = MBX_TOV_SECONDS;
974 rval = qla2x00_mailbox_command(vha, mcp);
975 if (rval != QLA_SUCCESS) {
976 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
978 *buf_sz = mcp->mb[4];
979 *ex_logins_cnt = mcp->mb[10];
981 ql_log(ql_log_info, vha, 0x118e,
982 "buffer size 0x%x, exchange offload count=%d\n",
983 mcp->mb[4], mcp->mb[10]);
985 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
986 "Done %s.\n", __func__);
993 * qla_set_exchoffld_mem_cfg
994 * Set exchange offload memory configuration
995 * Mbx needs to be issues before init_cb is set
998 * ha: adapter state pointer.
999 * buffer: buffer pointer
1000 * phys_addr: physical address of buffer
1001 * size: size of buffer
1002 * TARGET_QUEUE_LOCK must be released
1003 * ADAPTER_STATE_LOCK must be release
1006 * qla2x00 local funxtion status code.
1011 #define CONFIG_XCHOFFLD_MEM 0x3
1013 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
1017 mbx_cmd_t *mcp = &mc;
1018 struct qla_hw_data *ha = vha->hw;
1020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
1021 "Entered %s.\n", __func__);
1023 memset(mcp->mb, 0 , sizeof(mcp->mb));
1024 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1025 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1026 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1027 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1028 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1029 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1030 mcp->mb[8] = MSW(ha->exchoffld_size);
1031 mcp->mb[9] = LSW(ha->exchoffld_size);
1032 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1033 mcp->in_mb = MBX_11|MBX_0;
1034 mcp->tov = MBX_TOV_SECONDS;
1036 rval = qla2x00_mailbox_command(vha, mcp);
1037 if (rval != QLA_SUCCESS) {
1039 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1042 "Done %s.\n", __func__);
1049 * qla2x00_get_fw_version
1050 * Get firmware version.
1053 * ha: adapter state pointer.
1054 * major: pointer for major number.
1055 * minor: pointer for minor number.
1056 * subminor: pointer for subminor number.
1059 * qla2x00 local function return status code.
1065 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1069 mbx_cmd_t *mcp = &mc;
1070 struct qla_hw_data *ha = vha->hw;
1072 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1073 "Entered %s.\n", __func__);
1075 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1076 mcp->out_mb = MBX_0;
1077 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1078 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1079 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1080 if (IS_FWI2_CAPABLE(ha))
1081 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1082 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1084 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1085 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1088 mcp->tov = MBX_TOV_SECONDS;
1089 rval = qla2x00_mailbox_command(vha, mcp);
1090 if (rval != QLA_SUCCESS)
1093 /* Return mailbox data. */
1094 ha->fw_major_version = mcp->mb[1];
1095 ha->fw_minor_version = mcp->mb[2];
1096 ha->fw_subminor_version = mcp->mb[3];
1097 ha->fw_attributes = mcp->mb[6];
1098 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1099 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1101 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1103 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1104 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1105 ha->mpi_version[1] = mcp->mb[11] >> 8;
1106 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1107 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1108 ha->phy_version[0] = mcp->mb[8] & 0xff;
1109 ha->phy_version[1] = mcp->mb[9] >> 8;
1110 ha->phy_version[2] = mcp->mb[9] & 0xff;
1113 if (IS_FWI2_CAPABLE(ha)) {
1114 ha->fw_attributes_h = mcp->mb[15];
1115 ha->fw_attributes_ext[0] = mcp->mb[16];
1116 ha->fw_attributes_ext[1] = mcp->mb[17];
1117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1118 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1119 __func__, mcp->mb[15], mcp->mb[6]);
1120 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1121 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1122 __func__, mcp->mb[17], mcp->mb[16]);
1124 if (ha->fw_attributes_h & 0x4)
1125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1126 "%s: Firmware supports Extended Login 0x%x\n",
1127 __func__, ha->fw_attributes_h);
1129 if (ha->fw_attributes_h & 0x8)
1130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1131 "%s: Firmware supports Exchange Offload 0x%x\n",
1132 __func__, ha->fw_attributes_h);
1135 * FW supports nvme and driver load parameter requested nvme.
1136 * BIT 26 of fw_attributes indicates NVMe support.
1138 if ((ha->fw_attributes_h &
1139 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1141 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1142 vha->flags.nvme_first_burst = 1;
1144 vha->flags.nvme_enabled = 1;
1145 ql_log(ql_log_info, vha, 0xd302,
1146 "%s: FC-NVMe is Enabled (0x%x)\n",
1147 __func__, ha->fw_attributes_h);
1150 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1151 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1152 ql_log(ql_log_info, vha, 0xd302,
1153 "Firmware supports NVMe2 0x%x\n",
1154 ha->fw_attributes_ext[0]);
1155 vha->flags.nvme2_enabled = 1;
1158 if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
1159 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
1160 ha->flags.edif_enabled = 1;
1161 ql_log(ql_log_info, vha, 0xffff,
1162 "%s: edif is enabled\n", __func__);
1166 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1167 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1168 ha->serdes_version[1] = mcp->mb[8] >> 8;
1169 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1170 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1171 ha->mpi_version[1] = mcp->mb[11] >> 8;
1172 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1173 ha->pep_version[0] = mcp->mb[13] & 0xff;
1174 ha->pep_version[1] = mcp->mb[14] >> 8;
1175 ha->pep_version[2] = mcp->mb[14] & 0xff;
1176 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1177 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1178 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1179 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1180 if (IS_QLA28XX(ha)) {
1181 if (mcp->mb[16] & BIT_10)
1182 ha->flags.secure_fw = 1;
1184 ql_log(ql_log_info, vha, 0xffff,
1185 "Secure Flash Update in FW: %s\n",
1186 (ha->flags.secure_fw) ? "Supported" :
1190 if (ha->flags.scm_supported_a &&
1191 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1192 ha->flags.scm_supported_f = 1;
1193 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1195 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
1196 (ha->flags.scm_supported_f) ? "Supported" :
1199 if (vha->flags.nvme2_enabled) {
1200 /* set BIT_15 of special feature control block for SLER */
1201 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1202 /* set BIT_14 of special feature control block for PI CTRL*/
1203 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1208 if (rval != QLA_SUCCESS) {
1210 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1213 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1214 "Done %s.\n", __func__);
1220 * qla2x00_get_fw_options
1221 * Set firmware options.
1224 * ha = adapter block pointer.
1225 * fwopt = pointer for firmware options.
1228 * qla2x00 local function return status code.
1234 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1238 mbx_cmd_t *mcp = &mc;
1240 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1241 "Entered %s.\n", __func__);
1243 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1244 mcp->out_mb = MBX_0;
1245 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1246 mcp->tov = MBX_TOV_SECONDS;
1248 rval = qla2x00_mailbox_command(vha, mcp);
1250 if (rval != QLA_SUCCESS) {
1252 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1254 fwopts[0] = mcp->mb[0];
1255 fwopts[1] = mcp->mb[1];
1256 fwopts[2] = mcp->mb[2];
1257 fwopts[3] = mcp->mb[3];
1259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1260 "Done %s.\n", __func__);
1268 * qla2x00_set_fw_options
1269 * Set firmware options.
1272 * ha = adapter block pointer.
1273 * fwopt = pointer for firmware options.
1276 * qla2x00 local function return status code.
1282 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1286 mbx_cmd_t *mcp = &mc;
1288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1289 "Entered %s.\n", __func__);
1291 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1292 mcp->mb[1] = fwopts[1];
1293 mcp->mb[2] = fwopts[2];
1294 mcp->mb[3] = fwopts[3];
1295 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1297 if (IS_FWI2_CAPABLE(vha->hw)) {
1298 mcp->in_mb |= MBX_1;
1299 mcp->mb[10] = fwopts[10];
1300 mcp->out_mb |= MBX_10;
1302 mcp->mb[10] = fwopts[10];
1303 mcp->mb[11] = fwopts[11];
1304 mcp->mb[12] = 0; /* Undocumented, but used */
1305 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1307 mcp->tov = MBX_TOV_SECONDS;
1309 rval = qla2x00_mailbox_command(vha, mcp);
1311 fwopts[0] = mcp->mb[0];
1313 if (rval != QLA_SUCCESS) {
1315 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1316 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1320 "Done %s.\n", __func__);
1327 * qla2x00_mbx_reg_test
1328 * Mailbox register wrap test.
1331 * ha = adapter block pointer.
1332 * TARGET_QUEUE_LOCK must be released.
1333 * ADAPTER_STATE_LOCK must be released.
1336 * qla2x00 local function return status code.
1342 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1346 mbx_cmd_t *mcp = &mc;
1348 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1349 "Entered %s.\n", __func__);
1351 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1352 mcp->mb[1] = 0xAAAA;
1353 mcp->mb[2] = 0x5555;
1354 mcp->mb[3] = 0xAA55;
1355 mcp->mb[4] = 0x55AA;
1356 mcp->mb[5] = 0xA5A5;
1357 mcp->mb[6] = 0x5A5A;
1358 mcp->mb[7] = 0x2525;
1359 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1360 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1361 mcp->tov = MBX_TOV_SECONDS;
1363 rval = qla2x00_mailbox_command(vha, mcp);
1365 if (rval == QLA_SUCCESS) {
1366 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1367 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1368 rval = QLA_FUNCTION_FAILED;
1369 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1370 mcp->mb[7] != 0x2525)
1371 rval = QLA_FUNCTION_FAILED;
1374 if (rval != QLA_SUCCESS) {
1376 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1381 "Done %s.\n", __func__);
1388 * qla2x00_verify_checksum
1389 * Verify firmware checksum.
1392 * ha = adapter block pointer.
1393 * TARGET_QUEUE_LOCK must be released.
1394 * ADAPTER_STATE_LOCK must be released.
1397 * qla2x00 local function return status code.
1403 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1407 mbx_cmd_t *mcp = &mc;
1409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1410 "Entered %s.\n", __func__);
1412 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1413 mcp->out_mb = MBX_0;
1415 if (IS_FWI2_CAPABLE(vha->hw)) {
1416 mcp->mb[1] = MSW(risc_addr);
1417 mcp->mb[2] = LSW(risc_addr);
1418 mcp->out_mb |= MBX_2|MBX_1;
1419 mcp->in_mb |= MBX_2|MBX_1;
1421 mcp->mb[1] = LSW(risc_addr);
1422 mcp->out_mb |= MBX_1;
1423 mcp->in_mb |= MBX_1;
1426 mcp->tov = MBX_TOV_SECONDS;
1428 rval = qla2x00_mailbox_command(vha, mcp);
1430 if (rval != QLA_SUCCESS) {
1431 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1432 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1433 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1436 "Done %s.\n", __func__);
1443 * qla2x00_issue_iocb
1444 * Issue IOCB using mailbox command
1447 * ha = adapter state pointer.
1448 * buffer = buffer pointer.
1449 * phys_addr = physical address of buffer.
1450 * size = size of buffer.
1451 * TARGET_QUEUE_LOCK must be released.
1452 * ADAPTER_STATE_LOCK must be released.
1455 * qla2x00 local function return status code.
1461 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1462 dma_addr_t phys_addr, size_t size, uint32_t tov)
1466 mbx_cmd_t *mcp = &mc;
1468 if (!vha->hw->flags.fw_started)
1469 return QLA_INVALID_COMMAND;
1471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1472 "Entered %s.\n", __func__);
1474 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1476 mcp->mb[2] = MSW(LSD(phys_addr));
1477 mcp->mb[3] = LSW(LSD(phys_addr));
1478 mcp->mb[6] = MSW(MSD(phys_addr));
1479 mcp->mb[7] = LSW(MSD(phys_addr));
1480 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1481 mcp->in_mb = MBX_1|MBX_0;
1484 rval = qla2x00_mailbox_command(vha, mcp);
1486 if (rval != QLA_SUCCESS) {
1488 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1490 sts_entry_t *sts_entry = buffer;
1492 /* Mask reserved bits. */
1493 sts_entry->entry_status &=
1494 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1495 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1496 "Done %s (status=%x).\n", __func__,
1497 sts_entry->entry_status);
1504 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1507 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1512 * qla2x00_abort_command
1513 * Abort command aborts a specified IOCB.
1516 * ha = adapter block pointer.
1517 * sp = SB structure pointer.
1520 * qla2x00 local function return status code.
1526 qla2x00_abort_command(srb_t *sp)
1528 unsigned long flags = 0;
1530 uint32_t handle = 0;
1532 mbx_cmd_t *mcp = &mc;
1533 fc_port_t *fcport = sp->fcport;
1534 scsi_qla_host_t *vha = fcport->vha;
1535 struct qla_hw_data *ha = vha->hw;
1536 struct req_que *req;
1537 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1539 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1540 "Entered %s.\n", __func__);
1543 req = sp->qpair->req;
1547 spin_lock_irqsave(&ha->hardware_lock, flags);
1548 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1549 if (req->outstanding_cmds[handle] == sp)
1552 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1554 if (handle == req->num_outstanding_cmds) {
1555 /* command not found */
1556 return QLA_FUNCTION_FAILED;
1559 mcp->mb[0] = MBC_ABORT_COMMAND;
1560 if (HAS_EXTENDED_IDS(ha))
1561 mcp->mb[1] = fcport->loop_id;
1563 mcp->mb[1] = fcport->loop_id << 8;
1564 mcp->mb[2] = (uint16_t)handle;
1565 mcp->mb[3] = (uint16_t)(handle >> 16);
1566 mcp->mb[6] = (uint16_t)cmd->device->lun;
1567 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1569 mcp->tov = MBX_TOV_SECONDS;
1571 rval = qla2x00_mailbox_command(vha, mcp);
1573 if (rval != QLA_SUCCESS) {
1574 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1577 "Done %s.\n", __func__);
1584 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1588 mbx_cmd_t *mcp = &mc;
1589 scsi_qla_host_t *vha;
1593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1594 "Entered %s.\n", __func__);
1596 mcp->mb[0] = MBC_ABORT_TARGET;
1597 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1598 if (HAS_EXTENDED_IDS(vha->hw)) {
1599 mcp->mb[1] = fcport->loop_id;
1601 mcp->out_mb |= MBX_10;
1603 mcp->mb[1] = fcport->loop_id << 8;
1605 mcp->mb[2] = vha->hw->loop_reset_delay;
1606 mcp->mb[9] = vha->vp_idx;
1609 mcp->tov = MBX_TOV_SECONDS;
1611 rval = qla2x00_mailbox_command(vha, mcp);
1612 if (rval != QLA_SUCCESS) {
1613 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1614 "Failed=%x.\n", rval);
1617 /* Issue marker IOCB. */
1618 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1620 if (rval2 != QLA_SUCCESS) {
1621 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1622 "Failed to issue marker IOCB (%x).\n", rval2);
1624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1625 "Done %s.\n", __func__);
1632 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1636 mbx_cmd_t *mcp = &mc;
1637 scsi_qla_host_t *vha;
1641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1642 "Entered %s.\n", __func__);
1644 mcp->mb[0] = MBC_LUN_RESET;
1645 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1646 if (HAS_EXTENDED_IDS(vha->hw))
1647 mcp->mb[1] = fcport->loop_id;
1649 mcp->mb[1] = fcport->loop_id << 8;
1650 mcp->mb[2] = (u32)l;
1652 mcp->mb[9] = vha->vp_idx;
1655 mcp->tov = MBX_TOV_SECONDS;
1657 rval = qla2x00_mailbox_command(vha, mcp);
1658 if (rval != QLA_SUCCESS) {
1659 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1662 /* Issue marker IOCB. */
1663 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1665 if (rval2 != QLA_SUCCESS) {
1666 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1667 "Failed to issue marker IOCB (%x).\n", rval2);
1669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1670 "Done %s.\n", __func__);
1677 * qla2x00_get_adapter_id
1678 * Get adapter ID and topology.
1681 * ha = adapter block pointer.
1682 * id = pointer for loop ID.
1683 * al_pa = pointer for AL_PA.
1684 * area = pointer for area.
1685 * domain = pointer for domain.
1686 * top = pointer for topology.
1687 * TARGET_QUEUE_LOCK must be released.
1688 * ADAPTER_STATE_LOCK must be released.
1691 * qla2x00 local function return status code.
1697 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1698 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1702 mbx_cmd_t *mcp = &mc;
1704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1705 "Entered %s.\n", __func__);
1707 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1708 mcp->mb[9] = vha->vp_idx;
1709 mcp->out_mb = MBX_9|MBX_0;
1710 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1711 if (IS_CNA_CAPABLE(vha->hw))
1712 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1713 if (IS_FWI2_CAPABLE(vha->hw))
1714 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1715 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1716 mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
1718 mcp->tov = MBX_TOV_SECONDS;
1720 rval = qla2x00_mailbox_command(vha, mcp);
1721 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1722 rval = QLA_COMMAND_ERROR;
1723 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1724 rval = QLA_INVALID_COMMAND;
1728 *al_pa = LSB(mcp->mb[2]);
1729 *area = MSB(mcp->mb[2]);
1730 *domain = LSB(mcp->mb[3]);
1732 *sw_cap = mcp->mb[7];
1734 if (rval != QLA_SUCCESS) {
1736 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1738 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1739 "Done %s.\n", __func__);
1741 if (IS_CNA_CAPABLE(vha->hw)) {
1742 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1743 vha->fcoe_fcf_idx = mcp->mb[10];
1744 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1745 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1746 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1747 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1748 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1749 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1751 /* If FA-WWN supported */
1752 if (IS_FAWWN_CAPABLE(vha->hw)) {
1753 if (mcp->mb[7] & BIT_14) {
1754 vha->port_name[0] = MSB(mcp->mb[16]);
1755 vha->port_name[1] = LSB(mcp->mb[16]);
1756 vha->port_name[2] = MSB(mcp->mb[17]);
1757 vha->port_name[3] = LSB(mcp->mb[17]);
1758 vha->port_name[4] = MSB(mcp->mb[18]);
1759 vha->port_name[5] = LSB(mcp->mb[18]);
1760 vha->port_name[6] = MSB(mcp->mb[19]);
1761 vha->port_name[7] = LSB(mcp->mb[19]);
1762 fc_host_port_name(vha->host) =
1763 wwn_to_u64(vha->port_name);
1764 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1765 "FA-WWN acquired %016llx\n",
1766 wwn_to_u64(vha->port_name));
1770 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1771 vha->bbcr = mcp->mb[15];
1772 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1773 ql_log(ql_log_info, vha, 0x11a4,
1774 "SCM: EDC ELS completed, flags 0x%x\n",
1777 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1778 vha->hw->flags.scm_enabled = 1;
1779 vha->scm_fabric_connection_flags |=
1780 SCM_FLAG_RDF_COMPLETED;
1781 ql_log(ql_log_info, vha, 0x11a5,
1782 "SCM: RDF ELS completed, flags 0x%x\n",
1792 * qla2x00_get_retry_cnt
1793 * Get current firmware login retry count and delay.
1796 * ha = adapter block pointer.
1797 * retry_cnt = pointer to login retry count.
1798 * tov = pointer to login timeout value.
1801 * qla2x00 local function return status code.
1807 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1813 mbx_cmd_t *mcp = &mc;
1815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1816 "Entered %s.\n", __func__);
1818 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1819 mcp->out_mb = MBX_0;
1820 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1821 mcp->tov = MBX_TOV_SECONDS;
1823 rval = qla2x00_mailbox_command(vha, mcp);
1825 if (rval != QLA_SUCCESS) {
1827 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1828 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1830 /* Convert returned data and check our values. */
1831 *r_a_tov = mcp->mb[3] / 2;
1832 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1833 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1834 /* Update to the larger values */
1835 *retry_cnt = (uint8_t)mcp->mb[1];
1839 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1840 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1847 * qla2x00_init_firmware
1848 * Initialize adapter firmware.
1851 * ha = adapter block pointer.
1852 * dptr = Initialization control block pointer.
1853 * size = size of initialization control block.
1854 * TARGET_QUEUE_LOCK must be released.
1855 * ADAPTER_STATE_LOCK must be released.
1858 * qla2x00 local function return status code.
1864 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1868 mbx_cmd_t *mcp = &mc;
1869 struct qla_hw_data *ha = vha->hw;
1871 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1872 "Entered %s.\n", __func__);
1874 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1875 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1876 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1878 if (ha->flags.npiv_supported)
1879 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1881 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1884 mcp->mb[2] = MSW(ha->init_cb_dma);
1885 mcp->mb[3] = LSW(ha->init_cb_dma);
1886 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1887 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1888 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1889 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1891 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1892 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1893 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1894 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1895 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1896 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1899 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1900 mcp->mb[1] |= BIT_1;
1901 mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1902 mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1903 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1904 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1905 mcp->mb[15] = sizeof(*ha->sf_init_cb);
1906 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1909 /* 1 and 2 should normally be captured. */
1910 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1911 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1912 /* mb3 is additional info about the installed SFP. */
1913 mcp->in_mb |= MBX_3;
1914 mcp->buf_size = size;
1915 mcp->flags = MBX_DMA_OUT;
1916 mcp->tov = MBX_TOV_SECONDS;
1917 rval = qla2x00_mailbox_command(vha, mcp);
1919 if (rval != QLA_SUCCESS) {
1921 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1922 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1923 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1925 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1926 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1927 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1929 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1930 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1931 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1932 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1935 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1936 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1937 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1938 "Invalid SFP/Validation Failed\n");
1940 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1941 "Done %s.\n", __func__);
1949 * qla2x00_get_port_database
1950 * Issue normal/enhanced get port database mailbox command
1951 * and copy device name as necessary.
1954 * ha = adapter state pointer.
1955 * dev = structure pointer.
1956 * opt = enhanced cmd option byte.
1959 * qla2x00 local function return status code.
1965 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1969 mbx_cmd_t *mcp = &mc;
1970 port_database_t *pd;
1971 struct port_database_24xx *pd24;
1973 struct qla_hw_data *ha = vha->hw;
1975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1976 "Entered %s.\n", __func__);
1979 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1981 ql_log(ql_log_warn, vha, 0x1050,
1982 "Failed to allocate port database structure.\n");
1984 return QLA_MEMORY_ALLOC_FAILED;
1987 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1988 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1989 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1990 mcp->mb[2] = MSW(pd_dma);
1991 mcp->mb[3] = LSW(pd_dma);
1992 mcp->mb[6] = MSW(MSD(pd_dma));
1993 mcp->mb[7] = LSW(MSD(pd_dma));
1994 mcp->mb[9] = vha->vp_idx;
1995 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1997 if (IS_FWI2_CAPABLE(ha)) {
1998 mcp->mb[1] = fcport->loop_id;
2000 mcp->out_mb |= MBX_10|MBX_1;
2001 mcp->in_mb |= MBX_1;
2002 } else if (HAS_EXTENDED_IDS(ha)) {
2003 mcp->mb[1] = fcport->loop_id;
2005 mcp->out_mb |= MBX_10|MBX_1;
2007 mcp->mb[1] = fcport->loop_id << 8 | opt;
2008 mcp->out_mb |= MBX_1;
2010 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
2011 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
2012 mcp->flags = MBX_DMA_IN;
2013 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2014 rval = qla2x00_mailbox_command(vha, mcp);
2015 if (rval != QLA_SUCCESS)
2018 if (IS_FWI2_CAPABLE(ha)) {
2020 u8 current_login_state, last_login_state;
2022 pd24 = (struct port_database_24xx *) pd;
2024 /* Check for logged in state. */
2025 if (NVME_TARGET(ha, fcport)) {
2026 current_login_state = pd24->current_login_state >> 4;
2027 last_login_state = pd24->last_login_state >> 4;
2029 current_login_state = pd24->current_login_state & 0xf;
2030 last_login_state = pd24->last_login_state & 0xf;
2032 fcport->current_login_state = pd24->current_login_state;
2033 fcport->last_login_state = pd24->last_login_state;
2035 /* Check for logged in state. */
2036 if (current_login_state != PDS_PRLI_COMPLETE &&
2037 last_login_state != PDS_PRLI_COMPLETE) {
2038 ql_dbg(ql_dbg_mbx, vha, 0x119a,
2039 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
2040 current_login_state, last_login_state,
2042 rval = QLA_FUNCTION_FAILED;
2048 if (fcport->loop_id == FC_NO_LOOP_ID ||
2049 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2050 memcmp(fcport->port_name, pd24->port_name, 8))) {
2051 /* We lost the device mid way. */
2052 rval = QLA_NOT_LOGGED_IN;
2056 /* Names are little-endian. */
2057 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
2058 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2060 /* Get port_id of device. */
2061 fcport->d_id.b.domain = pd24->port_id[0];
2062 fcport->d_id.b.area = pd24->port_id[1];
2063 fcport->d_id.b.al_pa = pd24->port_id[2];
2064 fcport->d_id.b.rsvd_1 = 0;
2066 /* If not target must be initiator or unknown type. */
2067 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2068 fcport->port_type = FCT_INITIATOR;
2070 fcport->port_type = FCT_TARGET;
2072 /* Passback COS information. */
2073 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2074 FC_COS_CLASS2 : FC_COS_CLASS3;
2076 if (pd24->prli_svc_param_word_3[0] & BIT_7)
2077 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2081 /* Check for logged in state. */
2082 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2083 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2084 ql_dbg(ql_dbg_mbx, vha, 0x100a,
2085 "Unable to verify login-state (%x/%x) - "
2086 "portid=%02x%02x%02x.\n", pd->master_state,
2087 pd->slave_state, fcport->d_id.b.domain,
2088 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2089 rval = QLA_FUNCTION_FAILED;
2093 if (fcport->loop_id == FC_NO_LOOP_ID ||
2094 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2095 memcmp(fcport->port_name, pd->port_name, 8))) {
2096 /* We lost the device mid way. */
2097 rval = QLA_NOT_LOGGED_IN;
2101 /* Names are little-endian. */
2102 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2103 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2105 /* Get port_id of device. */
2106 fcport->d_id.b.domain = pd->port_id[0];
2107 fcport->d_id.b.area = pd->port_id[3];
2108 fcport->d_id.b.al_pa = pd->port_id[2];
2109 fcport->d_id.b.rsvd_1 = 0;
2111 /* If not target must be initiator or unknown type. */
2112 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2113 fcport->port_type = FCT_INITIATOR;
2115 fcport->port_type = FCT_TARGET;
2117 /* Passback COS information. */
2118 fcport->supported_classes = (pd->options & BIT_4) ?
2119 FC_COS_CLASS2 : FC_COS_CLASS3;
2123 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2126 if (rval != QLA_SUCCESS) {
2127 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2128 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2129 mcp->mb[0], mcp->mb[1]);
2131 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2132 "Done %s.\n", __func__);
2139 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2140 struct port_database_24xx *pdb)
2143 mbx_cmd_t *mcp = &mc;
2147 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2148 "Entered %s.\n", __func__);
2150 memset(pdb, 0, sizeof(*pdb));
2152 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2153 sizeof(*pdb), DMA_FROM_DEVICE);
2155 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2156 return QLA_MEMORY_ALLOC_FAILED;
2159 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2160 mcp->mb[1] = nport_handle;
2161 mcp->mb[2] = MSW(LSD(pdb_dma));
2162 mcp->mb[3] = LSW(LSD(pdb_dma));
2163 mcp->mb[6] = MSW(MSD(pdb_dma));
2164 mcp->mb[7] = LSW(MSD(pdb_dma));
2167 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2168 mcp->in_mb = MBX_1|MBX_0;
2169 mcp->buf_size = sizeof(*pdb);
2170 mcp->flags = MBX_DMA_IN;
2171 mcp->tov = vha->hw->login_timeout * 2;
2172 rval = qla2x00_mailbox_command(vha, mcp);
2174 if (rval != QLA_SUCCESS) {
2175 ql_dbg(ql_dbg_mbx, vha, 0x111a,
2176 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2177 rval, mcp->mb[0], mcp->mb[1]);
2179 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2180 "Done %s.\n", __func__);
2183 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2184 sizeof(*pdb), DMA_FROM_DEVICE);
2190 * qla2x00_get_firmware_state
2191 * Get adapter firmware state.
2194 * ha = adapter block pointer.
2195 * dptr = pointer for firmware state.
2196 * TARGET_QUEUE_LOCK must be released.
2197 * ADAPTER_STATE_LOCK must be released.
2200 * qla2x00 local function return status code.
2206 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2210 mbx_cmd_t *mcp = &mc;
2211 struct qla_hw_data *ha = vha->hw;
2213 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2214 "Entered %s.\n", __func__);
2216 if (!ha->flags.fw_started)
2217 return QLA_FUNCTION_FAILED;
2219 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2220 mcp->out_mb = MBX_0;
2221 if (IS_FWI2_CAPABLE(vha->hw))
2222 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2224 mcp->in_mb = MBX_1|MBX_0;
2225 mcp->tov = MBX_TOV_SECONDS;
2227 rval = qla2x00_mailbox_command(vha, mcp);
2229 /* Return firmware states. */
2230 states[0] = mcp->mb[1];
2231 if (IS_FWI2_CAPABLE(vha->hw)) {
2232 states[1] = mcp->mb[2];
2233 states[2] = mcp->mb[3]; /* SFP info */
2234 states[3] = mcp->mb[4];
2235 states[4] = mcp->mb[5];
2236 states[5] = mcp->mb[6]; /* DPORT status */
2239 if (rval != QLA_SUCCESS) {
2241 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2243 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2244 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2245 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2246 "Invalid SFP/Validation Failed\n");
2248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2249 "Done %s.\n", __func__);
2256 * qla2x00_get_port_name
2257 * Issue get port name mailbox command.
2258 * Returned name is in big endian format.
2261 * ha = adapter block pointer.
2262 * loop_id = loop ID of device.
2263 * name = pointer for name.
2264 * TARGET_QUEUE_LOCK must be released.
2265 * ADAPTER_STATE_LOCK must be released.
2268 * qla2x00 local function return status code.
2274 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2279 mbx_cmd_t *mcp = &mc;
2281 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2282 "Entered %s.\n", __func__);
2284 mcp->mb[0] = MBC_GET_PORT_NAME;
2285 mcp->mb[9] = vha->vp_idx;
2286 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2287 if (HAS_EXTENDED_IDS(vha->hw)) {
2288 mcp->mb[1] = loop_id;
2290 mcp->out_mb |= MBX_10;
2292 mcp->mb[1] = loop_id << 8 | opt;
2295 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2296 mcp->tov = MBX_TOV_SECONDS;
2298 rval = qla2x00_mailbox_command(vha, mcp);
2300 if (rval != QLA_SUCCESS) {
2302 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2305 /* This function returns name in big endian. */
2306 name[0] = MSB(mcp->mb[2]);
2307 name[1] = LSB(mcp->mb[2]);
2308 name[2] = MSB(mcp->mb[3]);
2309 name[3] = LSB(mcp->mb[3]);
2310 name[4] = MSB(mcp->mb[6]);
2311 name[5] = LSB(mcp->mb[6]);
2312 name[6] = MSB(mcp->mb[7]);
2313 name[7] = LSB(mcp->mb[7]);
2316 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2317 "Done %s.\n", __func__);
2324 * qla24xx_link_initialization
2325 * Issue link initialization mailbox command.
2328 * ha = adapter block pointer.
2329 * TARGET_QUEUE_LOCK must be released.
2330 * ADAPTER_STATE_LOCK must be released.
2333 * qla2x00 local function return status code.
2339 qla24xx_link_initialize(scsi_qla_host_t *vha)
2343 mbx_cmd_t *mcp = &mc;
2345 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2346 "Entered %s.\n", __func__);
2348 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2349 return QLA_FUNCTION_FAILED;
2351 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2353 if (vha->hw->operating_mode == LOOP)
2354 mcp->mb[1] |= BIT_6;
2356 mcp->mb[1] |= BIT_5;
2359 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2361 mcp->tov = MBX_TOV_SECONDS;
2363 rval = qla2x00_mailbox_command(vha, mcp);
2365 if (rval != QLA_SUCCESS) {
2366 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2369 "Done %s.\n", __func__);
2377 * Issue LIP reset mailbox command.
2380 * ha = adapter block pointer.
2381 * TARGET_QUEUE_LOCK must be released.
2382 * ADAPTER_STATE_LOCK must be released.
2385 * qla2x00 local function return status code.
2391 qla2x00_lip_reset(scsi_qla_host_t *vha)
2395 mbx_cmd_t *mcp = &mc;
2397 ql_dbg(ql_dbg_disc, vha, 0x105a,
2398 "Entered %s.\n", __func__);
2400 if (IS_CNA_CAPABLE(vha->hw)) {
2401 /* Logout across all FCFs. */
2402 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2405 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2406 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2407 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2410 mcp->mb[3] = vha->hw->loop_reset_delay;
2411 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2413 mcp->mb[0] = MBC_LIP_RESET;
2414 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2415 if (HAS_EXTENDED_IDS(vha->hw)) {
2416 mcp->mb[1] = 0x00ff;
2418 mcp->out_mb |= MBX_10;
2420 mcp->mb[1] = 0xff00;
2422 mcp->mb[2] = vha->hw->loop_reset_delay;
2426 mcp->tov = MBX_TOV_SECONDS;
2428 rval = qla2x00_mailbox_command(vha, mcp);
2430 if (rval != QLA_SUCCESS) {
2432 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2436 "Done %s.\n", __func__);
2447 * ha = adapter block pointer.
2448 * sns = pointer for command.
2449 * cmd_size = command size.
2450 * buf_size = response/command size.
2451 * TARGET_QUEUE_LOCK must be released.
2452 * ADAPTER_STATE_LOCK must be released.
2455 * qla2x00 local function return status code.
2461 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2462 uint16_t cmd_size, size_t buf_size)
2466 mbx_cmd_t *mcp = &mc;
2468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2469 "Entered %s.\n", __func__);
2471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2472 "Retry cnt=%d ratov=%d total tov=%d.\n",
2473 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2475 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2476 mcp->mb[1] = cmd_size;
2477 mcp->mb[2] = MSW(sns_phys_address);
2478 mcp->mb[3] = LSW(sns_phys_address);
2479 mcp->mb[6] = MSW(MSD(sns_phys_address));
2480 mcp->mb[7] = LSW(MSD(sns_phys_address));
2481 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2482 mcp->in_mb = MBX_0|MBX_1;
2483 mcp->buf_size = buf_size;
2484 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2485 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2486 rval = qla2x00_mailbox_command(vha, mcp);
2488 if (rval != QLA_SUCCESS) {
2490 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2491 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2492 rval, mcp->mb[0], mcp->mb[1]);
2495 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2496 "Done %s.\n", __func__);
2503 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2504 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2508 struct logio_entry_24xx *lg;
2511 struct qla_hw_data *ha = vha->hw;
2512 struct req_que *req;
2514 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2515 "Entered %s.\n", __func__);
2517 if (vha->vp_idx && vha->qpair)
2518 req = vha->qpair->req;
2520 req = ha->req_q_map[0];
2522 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2524 ql_log(ql_log_warn, vha, 0x1062,
2525 "Failed to allocate login IOCB.\n");
2526 return QLA_MEMORY_ALLOC_FAILED;
2529 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2530 lg->entry_count = 1;
2531 lg->handle = make_handle(req->id, lg->handle);
2532 lg->nport_handle = cpu_to_le16(loop_id);
2533 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2535 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2537 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2538 lg->port_id[0] = al_pa;
2539 lg->port_id[1] = area;
2540 lg->port_id[2] = domain;
2541 lg->vp_index = vha->vp_idx;
2542 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2543 (ha->r_a_tov / 10 * 2) + 2);
2544 if (rval != QLA_SUCCESS) {
2545 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2546 "Failed to issue login IOCB (%x).\n", rval);
2547 } else if (lg->entry_status != 0) {
2548 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2549 "Failed to complete IOCB -- error status (%x).\n",
2551 rval = QLA_FUNCTION_FAILED;
2552 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2553 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2554 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2556 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2557 "Failed to complete IOCB -- completion status (%x) "
2558 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2562 case LSC_SCODE_PORTID_USED:
2563 mb[0] = MBS_PORT_ID_USED;
2564 mb[1] = LSW(iop[1]);
2566 case LSC_SCODE_NPORT_USED:
2567 mb[0] = MBS_LOOP_ID_USED;
2569 case LSC_SCODE_NOLINK:
2570 case LSC_SCODE_NOIOCB:
2571 case LSC_SCODE_NOXCB:
2572 case LSC_SCODE_CMD_FAILED:
2573 case LSC_SCODE_NOFABRIC:
2574 case LSC_SCODE_FW_NOT_READY:
2575 case LSC_SCODE_NOT_LOGGED_IN:
2576 case LSC_SCODE_NOPCB:
2577 case LSC_SCODE_ELS_REJECT:
2578 case LSC_SCODE_CMD_PARAM_ERR:
2579 case LSC_SCODE_NONPORT:
2580 case LSC_SCODE_LOGGED_IN:
2581 case LSC_SCODE_NOFLOGI_ACC:
2583 mb[0] = MBS_COMMAND_ERROR;
2587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2588 "Done %s.\n", __func__);
2590 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2592 mb[0] = MBS_COMMAND_COMPLETE;
2594 if (iop[0] & BIT_4) {
2600 /* Passback COS information. */
2602 if (lg->io_parameter[7] || lg->io_parameter[8])
2603 mb[10] |= BIT_0; /* Class 2. */
2604 if (lg->io_parameter[9] || lg->io_parameter[10])
2605 mb[10] |= BIT_1; /* Class 3. */
2606 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2607 mb[10] |= BIT_7; /* Confirmed Completion
2612 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2618 * qla2x00_login_fabric
2619 * Issue login fabric port mailbox command.
2622 * ha = adapter block pointer.
2623 * loop_id = device loop ID.
2624 * domain = device domain.
2625 * area = device area.
2626 * al_pa = device AL_PA.
2627 * status = pointer for return status.
2628 * opt = command options.
2629 * TARGET_QUEUE_LOCK must be released.
2630 * ADAPTER_STATE_LOCK must be released.
2633 * qla2x00 local function return status code.
2639 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2640 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2644 mbx_cmd_t *mcp = &mc;
2645 struct qla_hw_data *ha = vha->hw;
2647 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2648 "Entered %s.\n", __func__);
2650 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2651 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2652 if (HAS_EXTENDED_IDS(ha)) {
2653 mcp->mb[1] = loop_id;
2655 mcp->out_mb |= MBX_10;
2657 mcp->mb[1] = (loop_id << 8) | opt;
2659 mcp->mb[2] = domain;
2660 mcp->mb[3] = area << 8 | al_pa;
2662 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2663 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2665 rval = qla2x00_mailbox_command(vha, mcp);
2667 /* Return mailbox statuses. */
2674 /* COS retrieved from Get-Port-Database mailbox command. */
2678 if (rval != QLA_SUCCESS) {
2679 /* RLU tmp code: need to change main mailbox_command function to
2680 * return ok even when the mailbox completion value is not
2681 * SUCCESS. The caller needs to be responsible to interpret
2682 * the return values of this mailbox command if we're not
2683 * to change too much of the existing code.
2685 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2686 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2687 mcp->mb[0] == 0x4006)
2691 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2692 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2693 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2696 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2697 "Done %s.\n", __func__);
2704 * qla2x00_login_local_device
2705 * Issue login loop port mailbox command.
2708 * ha = adapter block pointer.
2709 * loop_id = device loop ID.
2710 * opt = command options.
2713 * Return status code.
2720 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2721 uint16_t *mb_ret, uint8_t opt)
2725 mbx_cmd_t *mcp = &mc;
2726 struct qla_hw_data *ha = vha->hw;
2728 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2729 "Entered %s.\n", __func__);
2731 if (IS_FWI2_CAPABLE(ha))
2732 return qla24xx_login_fabric(vha, fcport->loop_id,
2733 fcport->d_id.b.domain, fcport->d_id.b.area,
2734 fcport->d_id.b.al_pa, mb_ret, opt);
2736 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2737 if (HAS_EXTENDED_IDS(ha))
2738 mcp->mb[1] = fcport->loop_id;
2740 mcp->mb[1] = fcport->loop_id << 8;
2742 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2743 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2744 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2746 rval = qla2x00_mailbox_command(vha, mcp);
2748 /* Return mailbox statuses. */
2749 if (mb_ret != NULL) {
2750 mb_ret[0] = mcp->mb[0];
2751 mb_ret[1] = mcp->mb[1];
2752 mb_ret[6] = mcp->mb[6];
2753 mb_ret[7] = mcp->mb[7];
2756 if (rval != QLA_SUCCESS) {
2757 /* AV tmp code: need to change main mailbox_command function to
2758 * return ok even when the mailbox completion value is not
2759 * SUCCESS. The caller needs to be responsible to interpret
2760 * the return values of this mailbox command if we're not
2761 * to change too much of the existing code.
2763 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2766 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2767 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2768 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2771 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2772 "Done %s.\n", __func__);
2779 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2780 uint8_t area, uint8_t al_pa)
2783 struct logio_entry_24xx *lg;
2785 struct qla_hw_data *ha = vha->hw;
2786 struct req_que *req;
2788 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2789 "Entered %s.\n", __func__);
2791 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2793 ql_log(ql_log_warn, vha, 0x106e,
2794 "Failed to allocate logout IOCB.\n");
2795 return QLA_MEMORY_ALLOC_FAILED;
2799 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2800 lg->entry_count = 1;
2801 lg->handle = make_handle(req->id, lg->handle);
2802 lg->nport_handle = cpu_to_le16(loop_id);
2804 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2806 lg->port_id[0] = al_pa;
2807 lg->port_id[1] = area;
2808 lg->port_id[2] = domain;
2809 lg->vp_index = vha->vp_idx;
2810 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2811 (ha->r_a_tov / 10 * 2) + 2);
2812 if (rval != QLA_SUCCESS) {
2813 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2814 "Failed to issue logout IOCB (%x).\n", rval);
2815 } else if (lg->entry_status != 0) {
2816 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2817 "Failed to complete IOCB -- error status (%x).\n",
2819 rval = QLA_FUNCTION_FAILED;
2820 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2821 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2822 "Failed to complete IOCB -- completion status (%x) "
2823 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2824 le32_to_cpu(lg->io_parameter[0]),
2825 le32_to_cpu(lg->io_parameter[1]));
2828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2829 "Done %s.\n", __func__);
2832 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2838 * qla2x00_fabric_logout
2839 * Issue logout fabric port mailbox command.
2842 * ha = adapter block pointer.
2843 * loop_id = device loop ID.
2844 * TARGET_QUEUE_LOCK must be released.
2845 * ADAPTER_STATE_LOCK must be released.
2848 * qla2x00 local function return status code.
2854 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2855 uint8_t area, uint8_t al_pa)
2859 mbx_cmd_t *mcp = &mc;
2861 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2862 "Entered %s.\n", __func__);
2864 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2865 mcp->out_mb = MBX_1|MBX_0;
2866 if (HAS_EXTENDED_IDS(vha->hw)) {
2867 mcp->mb[1] = loop_id;
2869 mcp->out_mb |= MBX_10;
2871 mcp->mb[1] = loop_id << 8;
2874 mcp->in_mb = MBX_1|MBX_0;
2875 mcp->tov = MBX_TOV_SECONDS;
2877 rval = qla2x00_mailbox_command(vha, mcp);
2879 if (rval != QLA_SUCCESS) {
2881 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2882 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2886 "Done %s.\n", __func__);
2893 * qla2x00_full_login_lip
2894 * Issue full login LIP mailbox command.
2897 * ha = adapter block pointer.
2898 * TARGET_QUEUE_LOCK must be released.
2899 * ADAPTER_STATE_LOCK must be released.
2902 * qla2x00 local function return status code.
2908 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2912 mbx_cmd_t *mcp = &mc;
2914 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2915 "Entered %s.\n", __func__);
2917 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2918 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2921 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2923 mcp->tov = MBX_TOV_SECONDS;
2925 rval = qla2x00_mailbox_command(vha, mcp);
2927 if (rval != QLA_SUCCESS) {
2929 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2932 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2933 "Done %s.\n", __func__);
2940 * qla2x00_get_id_list
2943 * ha = adapter block pointer.
2946 * qla2x00 local function return status code.
2952 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2957 mbx_cmd_t *mcp = &mc;
2959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2960 "Entered %s.\n", __func__);
2962 if (id_list == NULL)
2963 return QLA_FUNCTION_FAILED;
2965 mcp->mb[0] = MBC_GET_ID_LIST;
2966 mcp->out_mb = MBX_0;
2967 if (IS_FWI2_CAPABLE(vha->hw)) {
2968 mcp->mb[2] = MSW(id_list_dma);
2969 mcp->mb[3] = LSW(id_list_dma);
2970 mcp->mb[6] = MSW(MSD(id_list_dma));
2971 mcp->mb[7] = LSW(MSD(id_list_dma));
2973 mcp->mb[9] = vha->vp_idx;
2974 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2976 mcp->mb[1] = MSW(id_list_dma);
2977 mcp->mb[2] = LSW(id_list_dma);
2978 mcp->mb[3] = MSW(MSD(id_list_dma));
2979 mcp->mb[6] = LSW(MSD(id_list_dma));
2980 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2982 mcp->in_mb = MBX_1|MBX_0;
2983 mcp->tov = MBX_TOV_SECONDS;
2985 rval = qla2x00_mailbox_command(vha, mcp);
2987 if (rval != QLA_SUCCESS) {
2989 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2991 *entries = mcp->mb[1];
2992 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2993 "Done %s.\n", __func__);
3000 * qla2x00_get_resource_cnts
3001 * Get current firmware resource counts.
3004 * ha = adapter block pointer.
3007 * qla2x00 local function return status code.
3013 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
3015 struct qla_hw_data *ha = vha->hw;
3018 mbx_cmd_t *mcp = &mc;
3020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
3021 "Entered %s.\n", __func__);
3023 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
3024 mcp->out_mb = MBX_0;
3025 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3026 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
3027 IS_QLA27XX(ha) || IS_QLA28XX(ha))
3028 mcp->in_mb |= MBX_12;
3029 mcp->tov = MBX_TOV_SECONDS;
3031 rval = qla2x00_mailbox_command(vha, mcp);
3033 if (rval != QLA_SUCCESS) {
3035 ql_dbg(ql_dbg_mbx, vha, 0x107d,
3036 "Failed mb[0]=%x.\n", mcp->mb[0]);
3038 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
3039 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
3040 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
3041 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
3042 mcp->mb[11], mcp->mb[12]);
3044 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
3045 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3046 ha->cur_fw_xcb_count = mcp->mb[3];
3047 ha->orig_fw_xcb_count = mcp->mb[6];
3048 ha->cur_fw_iocb_count = mcp->mb[7];
3049 ha->orig_fw_iocb_count = mcp->mb[10];
3050 if (ha->flags.npiv_supported)
3051 ha->max_npiv_vports = mcp->mb[11];
3052 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3053 ha->fw_max_fcf_count = mcp->mb[12];
3060 * qla2x00_get_fcal_position_map
3061 * Get FCAL (LILP) position map using mailbox command
3064 * ha = adapter state pointer.
3065 * pos_map = buffer pointer (can be NULL).
3068 * qla2x00 local function return status code.
3074 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
3079 mbx_cmd_t *mcp = &mc;
3081 dma_addr_t pmap_dma;
3082 struct qla_hw_data *ha = vha->hw;
3084 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3085 "Entered %s.\n", __func__);
3087 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3089 ql_log(ql_log_warn, vha, 0x1080,
3090 "Memory alloc failed.\n");
3091 return QLA_MEMORY_ALLOC_FAILED;
3094 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3095 mcp->mb[2] = MSW(pmap_dma);
3096 mcp->mb[3] = LSW(pmap_dma);
3097 mcp->mb[6] = MSW(MSD(pmap_dma));
3098 mcp->mb[7] = LSW(MSD(pmap_dma));
3099 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3100 mcp->in_mb = MBX_1|MBX_0;
3101 mcp->buf_size = FCAL_MAP_SIZE;
3102 mcp->flags = MBX_DMA_IN;
3103 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3104 rval = qla2x00_mailbox_command(vha, mcp);
3106 if (rval == QLA_SUCCESS) {
3107 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3108 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3109 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3110 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3114 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3116 *num_entries = pmap[0];
3118 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3120 if (rval != QLA_SUCCESS) {
3121 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3123 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3124 "Done %s.\n", __func__);
3131 * qla2x00_get_link_status
3134 * ha = adapter block pointer.
3135 * loop_id = device loop ID.
3136 * ret_buf = pointer to link status return buffer.
3140 * BIT_0 = mem alloc error.
3141 * BIT_1 = mailbox error.
3144 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3145 struct link_statistics *stats, dma_addr_t stats_dma)
3149 mbx_cmd_t *mcp = &mc;
3150 uint32_t *iter = (uint32_t *)stats;
3151 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3152 struct qla_hw_data *ha = vha->hw;
3154 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3155 "Entered %s.\n", __func__);
3157 mcp->mb[0] = MBC_GET_LINK_STATUS;
3158 mcp->mb[2] = MSW(LSD(stats_dma));
3159 mcp->mb[3] = LSW(LSD(stats_dma));
3160 mcp->mb[6] = MSW(MSD(stats_dma));
3161 mcp->mb[7] = LSW(MSD(stats_dma));
3162 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3164 if (IS_FWI2_CAPABLE(ha)) {
3165 mcp->mb[1] = loop_id;
3168 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3169 mcp->in_mb |= MBX_1;
3170 } else if (HAS_EXTENDED_IDS(ha)) {
3171 mcp->mb[1] = loop_id;
3173 mcp->out_mb |= MBX_10|MBX_1;
3175 mcp->mb[1] = loop_id << 8;
3176 mcp->out_mb |= MBX_1;
3178 mcp->tov = MBX_TOV_SECONDS;
3179 mcp->flags = IOCTL_CMD;
3180 rval = qla2x00_mailbox_command(vha, mcp);
3182 if (rval == QLA_SUCCESS) {
3183 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3184 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3185 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3186 rval = QLA_FUNCTION_FAILED;
3188 /* Re-endianize - firmware data is le32. */
3189 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3190 "Done %s.\n", __func__);
3191 for ( ; dwords--; iter++)
3196 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3203 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3204 dma_addr_t stats_dma, uint16_t options)
3208 mbx_cmd_t *mcp = &mc;
3209 uint32_t *iter = (uint32_t *)stats;
3210 ushort dwords = sizeof(*stats)/sizeof(*iter);
3212 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3213 "Entered %s.\n", __func__);
3215 memset(&mc, 0, sizeof(mc));
3216 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3217 mc.mb[2] = MSW(LSD(stats_dma));
3218 mc.mb[3] = LSW(LSD(stats_dma));
3219 mc.mb[6] = MSW(MSD(stats_dma));
3220 mc.mb[7] = LSW(MSD(stats_dma));
3222 mc.mb[9] = vha->vp_idx;
3223 mc.mb[10] = options;
3225 rval = qla24xx_send_mb_cmd(vha, &mc);
3227 if (rval == QLA_SUCCESS) {
3228 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3229 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3230 "Failed mb[0]=%x.\n", mcp->mb[0]);
3231 rval = QLA_FUNCTION_FAILED;
3233 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3234 "Done %s.\n", __func__);
3235 /* Re-endianize - firmware data is le32. */
3236 for ( ; dwords--; iter++)
3241 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3248 qla24xx_abort_command(srb_t *sp)
3251 unsigned long flags = 0;
3253 struct abort_entry_24xx *abt;
3256 fc_port_t *fcport = sp->fcport;
3257 struct scsi_qla_host *vha = fcport->vha;
3258 struct qla_hw_data *ha = vha->hw;
3259 struct req_que *req;
3260 struct qla_qpair *qpair = sp->qpair;
3262 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3263 "Entered %s.\n", __func__);
3266 req = sp->qpair->req;
3268 return QLA_ERR_NO_QPAIR;
3270 if (ql2xasynctmfenable)
3271 return qla24xx_async_abort_command(sp);
3273 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3274 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3275 if (req->outstanding_cmds[handle] == sp)
3278 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3279 if (handle == req->num_outstanding_cmds) {
3280 /* Command not found. */
3281 return QLA_ERR_NOT_FOUND;
3284 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3286 ql_log(ql_log_warn, vha, 0x108d,
3287 "Failed to allocate abort IOCB.\n");
3288 return QLA_MEMORY_ALLOC_FAILED;
3291 abt->entry_type = ABORT_IOCB_TYPE;
3292 abt->entry_count = 1;
3293 abt->handle = make_handle(req->id, abt->handle);
3294 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3295 abt->handle_to_abort = make_handle(req->id, handle);
3296 abt->port_id[0] = fcport->d_id.b.al_pa;
3297 abt->port_id[1] = fcport->d_id.b.area;
3298 abt->port_id[2] = fcport->d_id.b.domain;
3299 abt->vp_index = fcport->vha->vp_idx;
3301 abt->req_que_no = cpu_to_le16(req->id);
3302 /* Need to pass original sp */
3303 qla_nvme_abort_set_option(abt, sp);
3305 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3306 if (rval != QLA_SUCCESS) {
3307 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3308 "Failed to issue IOCB (%x).\n", rval);
3309 } else if (abt->entry_status != 0) {
3310 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3311 "Failed to complete IOCB -- error status (%x).\n",
3313 rval = QLA_FUNCTION_FAILED;
3314 } else if (abt->nport_handle != cpu_to_le16(0)) {
3315 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3316 "Failed to complete IOCB -- completion status (%x).\n",
3317 le16_to_cpu(abt->nport_handle));
3318 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3319 rval = QLA_FUNCTION_PARAMETER_ERROR;
3321 rval = QLA_FUNCTION_FAILED;
3323 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3324 "Done %s.\n", __func__);
3326 if (rval == QLA_SUCCESS)
3327 qla_nvme_abort_process_comp_status(abt, sp);
3329 qla_wait_nvme_release_cmd_kref(sp);
3331 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3336 struct tsk_mgmt_cmd {
3338 struct tsk_mgmt_entry tsk;
3339 struct sts_entry_24xx sts;
3344 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3345 uint64_t l, int tag)
3348 struct tsk_mgmt_cmd *tsk;
3349 struct sts_entry_24xx *sts;
3351 scsi_qla_host_t *vha;
3352 struct qla_hw_data *ha;
3353 struct req_que *req;
3354 struct qla_qpair *qpair;
3360 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3361 "Entered %s.\n", __func__);
3363 if (vha->vp_idx && vha->qpair) {
3369 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3371 ql_log(ql_log_warn, vha, 0x1093,
3372 "Failed to allocate task management IOCB.\n");
3373 return QLA_MEMORY_ALLOC_FAILED;
3376 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3377 tsk->p.tsk.entry_count = 1;
3378 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3379 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3380 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3381 tsk->p.tsk.control_flags = cpu_to_le32(type);
3382 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3383 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3384 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3385 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3386 if (type == TCF_LUN_RESET) {
3387 int_to_scsilun(l, &tsk->p.tsk.lun);
3388 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3389 sizeof(tsk->p.tsk.lun));
3393 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3394 if (rval != QLA_SUCCESS) {
3395 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3396 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3397 } else if (sts->entry_status != 0) {
3398 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3399 "Failed to complete IOCB -- error status (%x).\n",
3401 rval = QLA_FUNCTION_FAILED;
3402 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3403 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3404 "Failed to complete IOCB -- completion status (%x).\n",
3405 le16_to_cpu(sts->comp_status));
3406 rval = QLA_FUNCTION_FAILED;
3407 } else if (le16_to_cpu(sts->scsi_status) &
3408 SS_RESPONSE_INFO_LEN_VALID) {
3409 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3410 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3411 "Ignoring inconsistent data length -- not enough "
3412 "response info (%d).\n",
3413 le32_to_cpu(sts->rsp_data_len));
3414 } else if (sts->data[3]) {
3415 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3416 "Failed to complete IOCB -- response (%x).\n",
3418 rval = QLA_FUNCTION_FAILED;
3422 /* Issue marker IOCB. */
3423 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3424 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3425 if (rval2 != QLA_SUCCESS) {
3426 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3427 "Failed to issue marker IOCB (%x).\n", rval2);
3429 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3430 "Done %s.\n", __func__);
3433 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3439 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3441 struct qla_hw_data *ha = fcport->vha->hw;
3443 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3444 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3446 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3450 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3452 struct qla_hw_data *ha = fcport->vha->hw;
3454 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3455 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3457 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3461 qla2x00_system_error(scsi_qla_host_t *vha)
3465 mbx_cmd_t *mcp = &mc;
3466 struct qla_hw_data *ha = vha->hw;
3468 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3469 return QLA_FUNCTION_FAILED;
3471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3472 "Entered %s.\n", __func__);
3474 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3475 mcp->out_mb = MBX_0;
3479 rval = qla2x00_mailbox_command(vha, mcp);
3481 if (rval != QLA_SUCCESS) {
3482 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3484 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3485 "Done %s.\n", __func__);
3492 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3496 mbx_cmd_t *mcp = &mc;
3498 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3499 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3500 return QLA_FUNCTION_FAILED;
3502 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3503 "Entered %s.\n", __func__);
3505 mcp->mb[0] = MBC_WRITE_SERDES;
3507 if (IS_QLA2031(vha->hw))
3508 mcp->mb[2] = data & 0xff;
3513 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3515 mcp->tov = MBX_TOV_SECONDS;
3517 rval = qla2x00_mailbox_command(vha, mcp);
3519 if (rval != QLA_SUCCESS) {
3520 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3521 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3523 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3524 "Done %s.\n", __func__);
3531 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3535 mbx_cmd_t *mcp = &mc;
3537 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3538 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3539 return QLA_FUNCTION_FAILED;
3541 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3542 "Entered %s.\n", __func__);
3544 mcp->mb[0] = MBC_READ_SERDES;
3547 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3548 mcp->in_mb = MBX_1|MBX_0;
3549 mcp->tov = MBX_TOV_SECONDS;
3551 rval = qla2x00_mailbox_command(vha, mcp);
3553 if (IS_QLA2031(vha->hw))
3554 *data = mcp->mb[1] & 0xff;
3558 if (rval != QLA_SUCCESS) {
3559 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3560 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3563 "Done %s.\n", __func__);
3570 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3574 mbx_cmd_t *mcp = &mc;
3576 if (!IS_QLA8044(vha->hw))
3577 return QLA_FUNCTION_FAILED;
3579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3580 "Entered %s.\n", __func__);
3582 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3583 mcp->mb[1] = HCS_WRITE_SERDES;
3584 mcp->mb[3] = LSW(addr);
3585 mcp->mb[4] = MSW(addr);
3586 mcp->mb[5] = LSW(data);
3587 mcp->mb[6] = MSW(data);
3588 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3590 mcp->tov = MBX_TOV_SECONDS;
3592 rval = qla2x00_mailbox_command(vha, mcp);
3594 if (rval != QLA_SUCCESS) {
3595 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3596 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3598 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3599 "Done %s.\n", __func__);
3606 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3610 mbx_cmd_t *mcp = &mc;
3612 if (!IS_QLA8044(vha->hw))
3613 return QLA_FUNCTION_FAILED;
3615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3616 "Entered %s.\n", __func__);
3618 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3619 mcp->mb[1] = HCS_READ_SERDES;
3620 mcp->mb[3] = LSW(addr);
3621 mcp->mb[4] = MSW(addr);
3622 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3623 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3624 mcp->tov = MBX_TOV_SECONDS;
3626 rval = qla2x00_mailbox_command(vha, mcp);
3628 *data = mcp->mb[2] << 16 | mcp->mb[1];
3630 if (rval != QLA_SUCCESS) {
3631 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3632 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3634 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3635 "Done %s.\n", __func__);
3642 * qla2x00_set_serdes_params() -
3644 * @sw_em_1g: serial link options
3645 * @sw_em_2g: serial link options
3646 * @sw_em_4g: serial link options
3651 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3652 uint16_t sw_em_2g, uint16_t sw_em_4g)
3656 mbx_cmd_t *mcp = &mc;
3658 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3659 "Entered %s.\n", __func__);
3661 mcp->mb[0] = MBC_SERDES_PARAMS;
3663 mcp->mb[2] = sw_em_1g | BIT_15;
3664 mcp->mb[3] = sw_em_2g | BIT_15;
3665 mcp->mb[4] = sw_em_4g | BIT_15;
3666 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3668 mcp->tov = MBX_TOV_SECONDS;
3670 rval = qla2x00_mailbox_command(vha, mcp);
3672 if (rval != QLA_SUCCESS) {
3674 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3675 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3679 "Done %s.\n", __func__);
3686 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3690 mbx_cmd_t *mcp = &mc;
3692 if (!IS_FWI2_CAPABLE(vha->hw))
3693 return QLA_FUNCTION_FAILED;
3695 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3696 "Entered %s.\n", __func__);
3698 mcp->mb[0] = MBC_STOP_FIRMWARE;
3700 mcp->out_mb = MBX_1|MBX_0;
3704 rval = qla2x00_mailbox_command(vha, mcp);
3706 if (rval != QLA_SUCCESS) {
3707 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3708 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3709 rval = QLA_INVALID_COMMAND;
3711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3712 "Done %s.\n", __func__);
3719 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3724 mbx_cmd_t *mcp = &mc;
3726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3727 "Entered %s.\n", __func__);
3729 if (!IS_FWI2_CAPABLE(vha->hw))
3730 return QLA_FUNCTION_FAILED;
3732 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3733 return QLA_FUNCTION_FAILED;
3735 mcp->mb[0] = MBC_TRACE_CONTROL;
3736 mcp->mb[1] = TC_EFT_ENABLE;
3737 mcp->mb[2] = LSW(eft_dma);
3738 mcp->mb[3] = MSW(eft_dma);
3739 mcp->mb[4] = LSW(MSD(eft_dma));
3740 mcp->mb[5] = MSW(MSD(eft_dma));
3741 mcp->mb[6] = buffers;
3742 mcp->mb[7] = TC_AEN_DISABLE;
3743 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3744 mcp->in_mb = MBX_1|MBX_0;
3745 mcp->tov = MBX_TOV_SECONDS;
3747 rval = qla2x00_mailbox_command(vha, mcp);
3748 if (rval != QLA_SUCCESS) {
3749 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3750 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3751 rval, mcp->mb[0], mcp->mb[1]);
3753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3754 "Done %s.\n", __func__);
3761 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3765 mbx_cmd_t *mcp = &mc;
3767 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3768 "Entered %s.\n", __func__);
3770 if (!IS_FWI2_CAPABLE(vha->hw))
3771 return QLA_FUNCTION_FAILED;
3773 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3774 return QLA_FUNCTION_FAILED;
3776 mcp->mb[0] = MBC_TRACE_CONTROL;
3777 mcp->mb[1] = TC_EFT_DISABLE;
3778 mcp->out_mb = MBX_1|MBX_0;
3779 mcp->in_mb = MBX_1|MBX_0;
3780 mcp->tov = MBX_TOV_SECONDS;
3782 rval = qla2x00_mailbox_command(vha, mcp);
3783 if (rval != QLA_SUCCESS) {
3784 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3785 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3786 rval, mcp->mb[0], mcp->mb[1]);
3788 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3789 "Done %s.\n", __func__);
3796 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3797 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3801 mbx_cmd_t *mcp = &mc;
3803 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3804 "Entered %s.\n", __func__);
3806 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3807 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3808 !IS_QLA28XX(vha->hw))
3809 return QLA_FUNCTION_FAILED;
3811 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3812 return QLA_FUNCTION_FAILED;
3814 mcp->mb[0] = MBC_TRACE_CONTROL;
3815 mcp->mb[1] = TC_FCE_ENABLE;
3816 mcp->mb[2] = LSW(fce_dma);
3817 mcp->mb[3] = MSW(fce_dma);
3818 mcp->mb[4] = LSW(MSD(fce_dma));
3819 mcp->mb[5] = MSW(MSD(fce_dma));
3820 mcp->mb[6] = buffers;
3821 mcp->mb[7] = TC_AEN_DISABLE;
3823 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3824 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3825 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3827 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3828 mcp->tov = MBX_TOV_SECONDS;
3830 rval = qla2x00_mailbox_command(vha, mcp);
3831 if (rval != QLA_SUCCESS) {
3832 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3833 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3834 rval, mcp->mb[0], mcp->mb[1]);
3836 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3837 "Done %s.\n", __func__);
3840 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3849 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3853 mbx_cmd_t *mcp = &mc;
3855 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3856 "Entered %s.\n", __func__);
3858 if (!IS_FWI2_CAPABLE(vha->hw))
3859 return QLA_FUNCTION_FAILED;
3861 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3862 return QLA_FUNCTION_FAILED;
3864 mcp->mb[0] = MBC_TRACE_CONTROL;
3865 mcp->mb[1] = TC_FCE_DISABLE;
3866 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3867 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3868 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3870 mcp->tov = MBX_TOV_SECONDS;
3872 rval = qla2x00_mailbox_command(vha, mcp);
3873 if (rval != QLA_SUCCESS) {
3874 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3875 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3876 rval, mcp->mb[0], mcp->mb[1]);
3878 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3879 "Done %s.\n", __func__);
3882 *wr = (uint64_t) mcp->mb[5] << 48 |
3883 (uint64_t) mcp->mb[4] << 32 |
3884 (uint64_t) mcp->mb[3] << 16 |
3885 (uint64_t) mcp->mb[2];
3887 *rd = (uint64_t) mcp->mb[9] << 48 |
3888 (uint64_t) mcp->mb[8] << 32 |
3889 (uint64_t) mcp->mb[7] << 16 |
3890 (uint64_t) mcp->mb[6];
3897 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3898 uint16_t *port_speed, uint16_t *mb)
3902 mbx_cmd_t *mcp = &mc;
3904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3905 "Entered %s.\n", __func__);
3907 if (!IS_IIDMA_CAPABLE(vha->hw))
3908 return QLA_FUNCTION_FAILED;
3910 mcp->mb[0] = MBC_PORT_PARAMS;
3911 mcp->mb[1] = loop_id;
3912 mcp->mb[2] = mcp->mb[3] = 0;
3913 mcp->mb[9] = vha->vp_idx;
3914 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3915 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3916 mcp->tov = MBX_TOV_SECONDS;
3918 rval = qla2x00_mailbox_command(vha, mcp);
3920 /* Return mailbox statuses. */
3927 if (rval != QLA_SUCCESS) {
3928 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3931 "Done %s.\n", __func__);
3933 *port_speed = mcp->mb[3];
3940 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3941 uint16_t port_speed, uint16_t *mb)
3945 mbx_cmd_t *mcp = &mc;
3947 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3948 "Entered %s.\n", __func__);
3950 if (!IS_IIDMA_CAPABLE(vha->hw))
3951 return QLA_FUNCTION_FAILED;
3953 mcp->mb[0] = MBC_PORT_PARAMS;
3954 mcp->mb[1] = loop_id;
3956 mcp->mb[3] = port_speed & 0x3F;
3957 mcp->mb[9] = vha->vp_idx;
3958 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3959 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3960 mcp->tov = MBX_TOV_SECONDS;
3962 rval = qla2x00_mailbox_command(vha, mcp);
3964 /* Return mailbox statuses. */
3971 if (rval != QLA_SUCCESS) {
3972 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3973 "Failed=%x.\n", rval);
3975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3976 "Done %s.\n", __func__);
3983 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3984 struct vp_rpt_id_entry_24xx *rptid_entry)
3986 struct qla_hw_data *ha = vha->hw;
3987 scsi_qla_host_t *vp = NULL;
3988 unsigned long flags;
3991 struct fc_port *fcport;
3993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3994 "Entered %s.\n", __func__);
3996 if (rptid_entry->entry_status != 0)
3999 id.b.domain = rptid_entry->port_id[2];
4000 id.b.area = rptid_entry->port_id[1];
4001 id.b.al_pa = rptid_entry->port_id[0];
4003 ha->flags.n2n_ae = 0;
4005 if (rptid_entry->format == 0) {
4007 ql_dbg(ql_dbg_async, vha, 0x10b7,
4008 "Format 0 : Number of VPs setup %d, number of "
4009 "VPs acquired %d.\n", rptid_entry->vp_setup,
4010 rptid_entry->vp_acquired);
4011 ql_dbg(ql_dbg_async, vha, 0x10b8,
4012 "Primary port id %02x%02x%02x.\n",
4013 rptid_entry->port_id[2], rptid_entry->port_id[1],
4014 rptid_entry->port_id[0]);
4015 ha->current_topology = ISP_CFG_NL;
4016 qla_update_host_map(vha, id);
4018 } else if (rptid_entry->format == 1) {
4020 ql_dbg(ql_dbg_async, vha, 0x10b9,
4021 "Format 1: VP[%d] enabled - status %d - with "
4022 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
4023 rptid_entry->vp_status,
4024 rptid_entry->port_id[2], rptid_entry->port_id[1],
4025 rptid_entry->port_id[0]);
4026 ql_dbg(ql_dbg_async, vha, 0x5075,
4027 "Format 1: Remote WWPN %8phC.\n",
4028 rptid_entry->u.f1.port_name);
4030 ql_dbg(ql_dbg_async, vha, 0x5075,
4031 "Format 1: WWPN %8phC.\n",
4034 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
4036 ha->current_topology = ISP_CFG_N;
4037 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4038 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4039 fcport->scan_state = QLA_FCPORT_SCAN;
4040 fcport->n2n_flag = 0;
4043 if (wwn_to_u64(vha->port_name) >
4044 wwn_to_u64(rptid_entry->u.f1.port_name)) {
4046 vha->d_id.b.al_pa = 1;
4047 ha->flags.n2n_bigger = 1;
4050 ql_dbg(ql_dbg_async, vha, 0x5075,
4051 "Format 1: assign local id %x remote id %x\n",
4052 vha->d_id.b24, id.b24);
4054 ql_dbg(ql_dbg_async, vha, 0x5075,
4055 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
4056 rptid_entry->u.f1.port_name);
4057 ha->flags.n2n_bigger = 0;
4060 fcport = qla2x00_find_fcport_by_wwpn(vha,
4061 rptid_entry->u.f1.port_name, 1);
4062 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4066 fcport->plogi_nack_done_deadline = jiffies + HZ;
4067 fcport->dm_login_expire = jiffies +
4068 QLA_N2N_WAIT_TIME * HZ;
4069 fcport->scan_state = QLA_FCPORT_FOUND;
4070 fcport->n2n_flag = 1;
4071 fcport->keep_nport_handle = 1;
4072 fcport->login_retry = vha->hw->login_retry_count;
4073 fcport->fc4_type = FS_FC4TYPE_FCP;
4074 if (vha->flags.nvme_enabled)
4075 fcport->fc4_type |= FS_FC4TYPE_NVME;
4077 if (wwn_to_u64(vha->port_name) >
4078 wwn_to_u64(fcport->port_name)) {
4082 switch (fcport->disc_state) {
4084 set_bit(RELOGIN_NEEDED,
4087 case DSC_DELETE_PEND:
4090 qlt_schedule_sess_for_deletion(fcport);
4094 qla24xx_post_newsess_work(vha, &id,
4095 rptid_entry->u.f1.port_name,
4096 rptid_entry->u.f1.node_name,
4101 /* if our portname is higher then initiate N2N login */
4103 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4106 ha->current_topology = ISP_CFG_FL;
4109 ha->current_topology = ISP_CFG_F;
4115 ha->flags.gpsc_supported = 1;
4116 ha->current_topology = ISP_CFG_F;
4117 /* buffer to buffer credit flag */
4118 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4120 if (rptid_entry->vp_idx == 0) {
4121 if (rptid_entry->vp_status == VP_STAT_COMPL) {
4122 /* FA-WWN is only for physical port */
4123 if (qla_ini_mode_enabled(vha) &&
4124 ha->flags.fawwpn_enabled &&
4125 (rptid_entry->u.f1.flags &
4127 memcpy(vha->port_name,
4128 rptid_entry->u.f1.port_name,
4132 qla_update_host_map(vha, id);
4135 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4136 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4138 if (rptid_entry->vp_status != VP_STAT_COMPL &&
4139 rptid_entry->vp_status != VP_STAT_ID_CHG) {
4140 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4141 "Could not acquire ID for VP[%d].\n",
4142 rptid_entry->vp_idx);
4147 spin_lock_irqsave(&ha->vport_slock, flags);
4148 list_for_each_entry(vp, &ha->vp_list, list) {
4149 if (rptid_entry->vp_idx == vp->vp_idx) {
4154 spin_unlock_irqrestore(&ha->vport_slock, flags);
4159 qla_update_host_map(vp, id);
4162 * Cannot configure here as we are still sitting on the
4163 * response queue. Handle it in dpc context.
4165 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4166 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4167 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4169 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4170 qla2xxx_wake_dpc(vha);
4171 } else if (rptid_entry->format == 2) {
4172 ql_dbg(ql_dbg_async, vha, 0x505f,
4173 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4174 rptid_entry->port_id[2], rptid_entry->port_id[1],
4175 rptid_entry->port_id[0]);
4177 ql_dbg(ql_dbg_async, vha, 0x5075,
4178 "N2N: Remote WWPN %8phC.\n",
4179 rptid_entry->u.f2.port_name);
4181 /* N2N. direct connect */
4182 ha->current_topology = ISP_CFG_N;
4183 ha->flags.rida_fmt2 = 1;
4184 vha->d_id.b.domain = rptid_entry->port_id[2];
4185 vha->d_id.b.area = rptid_entry->port_id[1];
4186 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4188 ha->flags.n2n_ae = 1;
4189 spin_lock_irqsave(&ha->vport_slock, flags);
4190 qla_update_vp_map(vha, SET_AL_PA);
4191 spin_unlock_irqrestore(&ha->vport_slock, flags);
4193 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4194 fcport->scan_state = QLA_FCPORT_SCAN;
4195 fcport->n2n_flag = 0;
4198 fcport = qla2x00_find_fcport_by_wwpn(vha,
4199 rptid_entry->u.f2.port_name, 1);
4202 fcport->login_retry = vha->hw->login_retry_count;
4203 fcport->plogi_nack_done_deadline = jiffies + HZ;
4204 fcport->scan_state = QLA_FCPORT_FOUND;
4205 fcport->keep_nport_handle = 1;
4206 fcport->n2n_flag = 1;
4207 fcport->d_id.b.domain =
4208 rptid_entry->u.f2.remote_nport_id[2];
4209 fcport->d_id.b.area =
4210 rptid_entry->u.f2.remote_nport_id[1];
4211 fcport->d_id.b.al_pa =
4212 rptid_entry->u.f2.remote_nport_id[0];
4215 * For the case where remote port sending PRLO, FW
4216 * sends up RIDA Format 2 as an indication of session
4217 * loss. In other word, FW state change from PRLI
4218 * complete back to PLOGI complete. Delete the
4219 * session and let relogin drive the reconnect.
4221 if (atomic_read(&fcport->state) == FCS_ONLINE)
4222 qlt_schedule_sess_for_deletion(fcport);
4228 * qla24xx_modify_vp_config
4229 * Change VP configuration for vha
4232 * vha = adapter block pointer.
4235 * qla2xxx local function return status code.
4241 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4244 struct vp_config_entry_24xx *vpmod;
4245 dma_addr_t vpmod_dma;
4246 struct qla_hw_data *ha = vha->hw;
4247 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4249 /* This can be called by the parent */
4251 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4252 "Entered %s.\n", __func__);
4254 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4256 ql_log(ql_log_warn, vha, 0x10bc,
4257 "Failed to allocate modify VP IOCB.\n");
4258 return QLA_MEMORY_ALLOC_FAILED;
4261 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4262 vpmod->entry_count = 1;
4263 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4264 vpmod->vp_count = 1;
4265 vpmod->vp_index1 = vha->vp_idx;
4266 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4268 qlt_modify_vp_config(vha, vpmod);
4270 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4271 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4272 vpmod->entry_count = 1;
4274 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4275 if (rval != QLA_SUCCESS) {
4276 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4277 "Failed to issue VP config IOCB (%x).\n", rval);
4278 } else if (vpmod->comp_status != 0) {
4279 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4280 "Failed to complete IOCB -- error status (%x).\n",
4281 vpmod->comp_status);
4282 rval = QLA_FUNCTION_FAILED;
4283 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4284 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4285 "Failed to complete IOCB -- completion status (%x).\n",
4286 le16_to_cpu(vpmod->comp_status));
4287 rval = QLA_FUNCTION_FAILED;
4290 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4291 "Done %s.\n", __func__);
4292 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4294 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4300 * qla2x00_send_change_request
4301 * Receive or disable RSCN request from fabric controller
4304 * ha = adapter block pointer
4305 * format = registration format:
4307 * 1 - Fabric detected registration
4308 * 2 - N_port detected registration
4309 * 3 - Full registration
4310 * FF - clear registration
4311 * vp_idx = Virtual port index
4314 * qla2x00 local function return status code.
4321 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4326 mbx_cmd_t *mcp = &mc;
4328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4329 "Entered %s.\n", __func__);
4331 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4332 mcp->mb[1] = format;
4333 mcp->mb[9] = vp_idx;
4334 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4335 mcp->in_mb = MBX_0|MBX_1;
4336 mcp->tov = MBX_TOV_SECONDS;
4338 rval = qla2x00_mailbox_command(vha, mcp);
4340 if (rval == QLA_SUCCESS) {
4341 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4351 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4356 mbx_cmd_t *mcp = &mc;
4358 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4359 "Entered %s.\n", __func__);
4361 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4362 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4363 mcp->mb[8] = MSW(addr);
4365 mcp->out_mb = MBX_10|MBX_8|MBX_0;
4367 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4368 mcp->out_mb = MBX_0;
4370 mcp->mb[1] = LSW(addr);
4371 mcp->mb[2] = MSW(req_dma);
4372 mcp->mb[3] = LSW(req_dma);
4373 mcp->mb[6] = MSW(MSD(req_dma));
4374 mcp->mb[7] = LSW(MSD(req_dma));
4375 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4376 if (IS_FWI2_CAPABLE(vha->hw)) {
4377 mcp->mb[4] = MSW(size);
4378 mcp->mb[5] = LSW(size);
4379 mcp->out_mb |= MBX_5|MBX_4;
4381 mcp->mb[4] = LSW(size);
4382 mcp->out_mb |= MBX_4;
4386 mcp->tov = MBX_TOV_SECONDS;
4388 rval = qla2x00_mailbox_command(vha, mcp);
4390 if (rval != QLA_SUCCESS) {
4391 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4392 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4394 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4395 "Done %s.\n", __func__);
4400 /* 84XX Support **************************************************************/
4402 struct cs84xx_mgmt_cmd {
4404 struct verify_chip_entry_84xx req;
4405 struct verify_chip_rsp_84xx rsp;
4410 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4413 struct cs84xx_mgmt_cmd *mn;
4416 unsigned long flags;
4417 struct qla_hw_data *ha = vha->hw;
4419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4420 "Entered %s.\n", __func__);
4422 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4424 return QLA_MEMORY_ALLOC_FAILED;
4428 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4429 /* Diagnostic firmware? */
4430 /* options |= MENLO_DIAG_FW; */
4431 /* We update the firmware with only one data sequence. */
4432 options |= VCO_END_OF_DATA;
4436 memset(mn, 0, sizeof(*mn));
4437 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4438 mn->p.req.entry_count = 1;
4439 mn->p.req.options = cpu_to_le16(options);
4441 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4442 "Dump of Verify Request.\n");
4443 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4446 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4447 if (rval != QLA_SUCCESS) {
4448 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4449 "Failed to issue verify IOCB (%x).\n", rval);
4453 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4454 "Dump of Verify Response.\n");
4455 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4458 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4459 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4460 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4461 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4462 "cs=%x fc=%x.\n", status[0], status[1]);
4464 if (status[0] != CS_COMPLETE) {
4465 rval = QLA_FUNCTION_FAILED;
4466 if (!(options & VCO_DONT_UPDATE_FW)) {
4467 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4468 "Firmware update failed. Retrying "
4469 "without update firmware.\n");
4470 options |= VCO_DONT_UPDATE_FW;
4471 options &= ~VCO_FORCE_UPDATE;
4475 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4476 "Firmware updated to %x.\n",
4477 le32_to_cpu(mn->p.rsp.fw_ver));
4479 /* NOTE: we only update OP firmware. */
4480 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4481 ha->cs84xx->op_fw_version =
4482 le32_to_cpu(mn->p.rsp.fw_ver);
4483 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4489 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4491 if (rval != QLA_SUCCESS) {
4492 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4493 "Failed=%x.\n", rval);
4495 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4496 "Done %s.\n", __func__);
4503 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4506 unsigned long flags;
4508 mbx_cmd_t *mcp = &mc;
4509 struct qla_hw_data *ha = vha->hw;
4511 if (!ha->flags.fw_started)
4514 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4515 "Entered %s.\n", __func__);
4517 if (IS_SHADOW_REG_CAPABLE(ha))
4518 req->options |= BIT_13;
4520 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4521 mcp->mb[1] = req->options;
4522 mcp->mb[2] = MSW(LSD(req->dma));
4523 mcp->mb[3] = LSW(LSD(req->dma));
4524 mcp->mb[6] = MSW(MSD(req->dma));
4525 mcp->mb[7] = LSW(MSD(req->dma));
4526 mcp->mb[5] = req->length;
4528 mcp->mb[10] = req->rsp->id;
4529 mcp->mb[12] = req->qos;
4530 mcp->mb[11] = req->vp_idx;
4531 mcp->mb[13] = req->rid;
4532 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4535 mcp->mb[4] = req->id;
4536 /* que in ptr index */
4538 /* que out ptr index */
4539 mcp->mb[9] = *req->out_ptr = 0;
4540 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4541 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4543 mcp->flags = MBX_DMA_OUT;
4544 mcp->tov = MBX_TOV_SECONDS * 2;
4546 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4548 mcp->in_mb |= MBX_1;
4549 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4550 mcp->out_mb |= MBX_15;
4551 /* debug q create issue in SR-IOV */
4552 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4555 spin_lock_irqsave(&ha->hardware_lock, flags);
4556 if (!(req->options & BIT_0)) {
4557 wrt_reg_dword(req->req_q_in, 0);
4558 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4559 wrt_reg_dword(req->req_q_out, 0);
4561 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4563 rval = qla2x00_mailbox_command(vha, mcp);
4564 if (rval != QLA_SUCCESS) {
4565 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4566 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4568 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4569 "Done %s.\n", __func__);
4576 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4579 unsigned long flags;
4581 mbx_cmd_t *mcp = &mc;
4582 struct qla_hw_data *ha = vha->hw;
4584 if (!ha->flags.fw_started)
4587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4588 "Entered %s.\n", __func__);
4590 if (IS_SHADOW_REG_CAPABLE(ha))
4591 rsp->options |= BIT_13;
4593 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4594 mcp->mb[1] = rsp->options;
4595 mcp->mb[2] = MSW(LSD(rsp->dma));
4596 mcp->mb[3] = LSW(LSD(rsp->dma));
4597 mcp->mb[6] = MSW(MSD(rsp->dma));
4598 mcp->mb[7] = LSW(MSD(rsp->dma));
4599 mcp->mb[5] = rsp->length;
4600 mcp->mb[14] = rsp->msix->entry;
4601 mcp->mb[13] = rsp->rid;
4602 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4605 mcp->mb[4] = rsp->id;
4606 /* que in ptr index */
4607 mcp->mb[8] = *rsp->in_ptr = 0;
4608 /* que out ptr index */
4610 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4611 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4613 mcp->flags = MBX_DMA_OUT;
4614 mcp->tov = MBX_TOV_SECONDS * 2;
4616 if (IS_QLA81XX(ha)) {
4617 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4618 mcp->in_mb |= MBX_1;
4619 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4620 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4621 mcp->in_mb |= MBX_1;
4622 /* debug q create issue in SR-IOV */
4623 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4626 spin_lock_irqsave(&ha->hardware_lock, flags);
4627 if (!(rsp->options & BIT_0)) {
4628 wrt_reg_dword(rsp->rsp_q_out, 0);
4629 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4630 wrt_reg_dword(rsp->rsp_q_in, 0);
4633 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4635 rval = qla2x00_mailbox_command(vha, mcp);
4636 if (rval != QLA_SUCCESS) {
4637 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4638 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4640 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4641 "Done %s.\n", __func__);
4648 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4652 mbx_cmd_t *mcp = &mc;
4654 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4655 "Entered %s.\n", __func__);
4657 mcp->mb[0] = MBC_IDC_ACK;
4658 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4659 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4661 mcp->tov = MBX_TOV_SECONDS;
4663 rval = qla2x00_mailbox_command(vha, mcp);
4665 if (rval != QLA_SUCCESS) {
4666 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4667 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4670 "Done %s.\n", __func__);
4677 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4681 mbx_cmd_t *mcp = &mc;
4683 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4684 "Entered %s.\n", __func__);
4686 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4687 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4688 return QLA_FUNCTION_FAILED;
4690 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4691 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4692 mcp->out_mb = MBX_1|MBX_0;
4693 mcp->in_mb = MBX_1|MBX_0;
4694 mcp->tov = MBX_TOV_SECONDS;
4696 rval = qla2x00_mailbox_command(vha, mcp);
4698 if (rval != QLA_SUCCESS) {
4699 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4700 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4701 rval, mcp->mb[0], mcp->mb[1]);
4703 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4704 "Done %s.\n", __func__);
4705 *sector_size = mcp->mb[1];
4712 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4716 mbx_cmd_t *mcp = &mc;
4718 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4719 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4720 return QLA_FUNCTION_FAILED;
4722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4723 "Entered %s.\n", __func__);
4725 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4726 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4727 FAC_OPT_CMD_WRITE_PROTECT;
4728 mcp->out_mb = MBX_1|MBX_0;
4729 mcp->in_mb = MBX_1|MBX_0;
4730 mcp->tov = MBX_TOV_SECONDS;
4732 rval = qla2x00_mailbox_command(vha, mcp);
4734 if (rval != QLA_SUCCESS) {
4735 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4736 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4737 rval, mcp->mb[0], mcp->mb[1]);
4739 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4740 "Done %s.\n", __func__);
4747 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4751 mbx_cmd_t *mcp = &mc;
4753 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4754 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4755 return QLA_FUNCTION_FAILED;
4757 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4758 "Entered %s.\n", __func__);
4760 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4761 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4762 mcp->mb[2] = LSW(start);
4763 mcp->mb[3] = MSW(start);
4764 mcp->mb[4] = LSW(finish);
4765 mcp->mb[5] = MSW(finish);
4766 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4767 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4768 mcp->tov = MBX_TOV_SECONDS;
4770 rval = qla2x00_mailbox_command(vha, mcp);
4772 if (rval != QLA_SUCCESS) {
4773 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4774 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4775 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4778 "Done %s.\n", __func__);
4785 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4787 int rval = QLA_SUCCESS;
4789 mbx_cmd_t *mcp = &mc;
4790 struct qla_hw_data *ha = vha->hw;
4792 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4793 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4797 "Entered %s.\n", __func__);
4799 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4800 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4801 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4802 mcp->out_mb = MBX_1|MBX_0;
4803 mcp->in_mb = MBX_1|MBX_0;
4804 mcp->tov = MBX_TOV_SECONDS;
4806 rval = qla2x00_mailbox_command(vha, mcp);
4808 if (rval != QLA_SUCCESS) {
4809 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4810 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4811 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4813 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4814 "Done %s.\n", __func__);
4821 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4825 mbx_cmd_t *mcp = &mc;
4827 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4828 "Entered %s.\n", __func__);
4830 mcp->mb[0] = MBC_RESTART_MPI_FW;
4831 mcp->out_mb = MBX_0;
4832 mcp->in_mb = MBX_0|MBX_1;
4833 mcp->tov = MBX_TOV_SECONDS;
4835 rval = qla2x00_mailbox_command(vha, mcp);
4837 if (rval != QLA_SUCCESS) {
4838 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4839 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4840 rval, mcp->mb[0], mcp->mb[1]);
4842 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4843 "Done %s.\n", __func__);
4850 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4854 mbx_cmd_t *mcp = &mc;
4858 struct qla_hw_data *ha = vha->hw;
4860 if (!IS_P3P_TYPE(ha))
4861 return QLA_FUNCTION_FAILED;
4863 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4864 "Entered %s.\n", __func__);
4866 str = (__force __le16 *)version;
4867 len = strlen(version);
4869 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4870 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4871 mcp->out_mb = MBX_1|MBX_0;
4872 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4873 mcp->mb[i] = le16_to_cpup(str);
4874 mcp->out_mb |= 1<<i;
4876 for (; i < 16; i++) {
4878 mcp->out_mb |= 1<<i;
4880 mcp->in_mb = MBX_1|MBX_0;
4881 mcp->tov = MBX_TOV_SECONDS;
4883 rval = qla2x00_mailbox_command(vha, mcp);
4885 if (rval != QLA_SUCCESS) {
4886 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4887 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4889 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4890 "Done %s.\n", __func__);
4897 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4901 mbx_cmd_t *mcp = &mc;
4906 struct qla_hw_data *ha = vha->hw;
4908 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4910 return QLA_FUNCTION_FAILED;
4912 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4913 "Entered %s.\n", __func__);
4915 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4917 ql_log(ql_log_warn, vha, 0x117f,
4918 "Failed to allocate driver version param.\n");
4919 return QLA_MEMORY_ALLOC_FAILED;
4922 memcpy(str, "\x7\x3\x11\x0", 4);
4924 len = dwlen * 4 - 4;
4925 memset(str + 4, 0, len);
4926 if (len > strlen(version))
4927 len = strlen(version);
4928 memcpy(str + 4, version, len);
4930 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4931 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4932 mcp->mb[2] = MSW(LSD(str_dma));
4933 mcp->mb[3] = LSW(LSD(str_dma));
4934 mcp->mb[6] = MSW(MSD(str_dma));
4935 mcp->mb[7] = LSW(MSD(str_dma));
4936 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4937 mcp->in_mb = MBX_1|MBX_0;
4938 mcp->tov = MBX_TOV_SECONDS;
4940 rval = qla2x00_mailbox_command(vha, mcp);
4942 if (rval != QLA_SUCCESS) {
4943 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4944 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4946 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4947 "Done %s.\n", __func__);
4950 dma_pool_free(ha->s_dma_pool, str, str_dma);
4956 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4957 void *buf, uint16_t bufsiz)
4961 mbx_cmd_t *mcp = &mc;
4964 if (!IS_FWI2_CAPABLE(vha->hw))
4965 return QLA_FUNCTION_FAILED;
4967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4968 "Entered %s.\n", __func__);
4970 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4971 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4972 mcp->mb[2] = MSW(buf_dma);
4973 mcp->mb[3] = LSW(buf_dma);
4974 mcp->mb[6] = MSW(MSD(buf_dma));
4975 mcp->mb[7] = LSW(MSD(buf_dma));
4976 mcp->mb[8] = bufsiz/4;
4977 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4978 mcp->in_mb = MBX_1|MBX_0;
4979 mcp->tov = MBX_TOV_SECONDS;
4981 rval = qla2x00_mailbox_command(vha, mcp);
4983 if (rval != QLA_SUCCESS) {
4984 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4985 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4988 "Done %s.\n", __func__);
4989 bp = (uint32_t *) buf;
4990 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4991 *bp = le32_to_cpu((__force __le32)*bp);
4997 #define PUREX_CMD_COUNT 4
4999 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
5003 mbx_cmd_t *mcp = &mc;
5004 uint8_t *els_cmd_map;
5005 uint8_t active_cnt = 0;
5006 dma_addr_t els_cmd_map_dma;
5007 uint8_t cmd_opcode[PUREX_CMD_COUNT];
5008 uint8_t i, index, purex_bit;
5009 struct qla_hw_data *ha = vha->hw;
5011 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
5012 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
5016 "Entered %s.\n", __func__);
5018 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5019 &els_cmd_map_dma, GFP_KERNEL);
5021 ql_log(ql_log_warn, vha, 0x7101,
5022 "Failed to allocate RDP els command param.\n");
5023 return QLA_MEMORY_ALLOC_FAILED;
5026 /* List of Purex ELS */
5027 if (ql2xrdpenable) {
5028 cmd_opcode[active_cnt] = ELS_RDP;
5031 if (ha->flags.scm_supported_f) {
5032 cmd_opcode[active_cnt] = ELS_FPIN;
5035 if (ha->flags.edif_enabled) {
5036 cmd_opcode[active_cnt] = ELS_AUTH_ELS;
5040 for (i = 0; i < active_cnt; i++) {
5041 index = cmd_opcode[i] / 8;
5042 purex_bit = cmd_opcode[i] % 8;
5043 els_cmd_map[index] |= 1 << purex_bit;
5046 mcp->mb[0] = MBC_SET_RNID_PARAMS;
5047 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
5048 mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
5049 mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
5050 mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
5051 mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
5052 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5053 mcp->in_mb = MBX_1|MBX_0;
5054 mcp->tov = MBX_TOV_SECONDS;
5055 mcp->flags = MBX_DMA_OUT;
5056 mcp->buf_size = ELS_CMD_MAP_SIZE;
5057 rval = qla2x00_mailbox_command(vha, mcp);
5059 if (rval != QLA_SUCCESS) {
5060 ql_dbg(ql_dbg_mbx, vha, 0x118d,
5061 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
5063 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
5064 "Done %s.\n", __func__);
5067 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5068 els_cmd_map, els_cmd_map_dma);
5074 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
5078 mbx_cmd_t *mcp = &mc;
5080 if (!IS_FWI2_CAPABLE(vha->hw))
5081 return QLA_FUNCTION_FAILED;
5083 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5084 "Entered %s.\n", __func__);
5086 mcp->mb[0] = MBC_GET_RNID_PARAMS;
5087 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
5088 mcp->out_mb = MBX_1|MBX_0;
5089 mcp->in_mb = MBX_1|MBX_0;
5090 mcp->tov = MBX_TOV_SECONDS;
5092 rval = qla2x00_mailbox_command(vha, mcp);
5095 if (rval != QLA_SUCCESS) {
5096 ql_dbg(ql_dbg_mbx, vha, 0x115a,
5097 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5100 "Done %s.\n", __func__);
5107 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5108 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5112 mbx_cmd_t *mcp = &mc;
5113 struct qla_hw_data *ha = vha->hw;
5115 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5116 "Entered %s.\n", __func__);
5118 if (!IS_FWI2_CAPABLE(ha))
5119 return QLA_FUNCTION_FAILED;
5124 mcp->mb[0] = MBC_READ_SFP;
5126 mcp->mb[2] = MSW(LSD(sfp_dma));
5127 mcp->mb[3] = LSW(LSD(sfp_dma));
5128 mcp->mb[6] = MSW(MSD(sfp_dma));
5129 mcp->mb[7] = LSW(MSD(sfp_dma));
5133 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5134 mcp->in_mb = MBX_1|MBX_0;
5135 mcp->tov = MBX_TOV_SECONDS;
5137 rval = qla2x00_mailbox_command(vha, mcp);
5142 if (rval != QLA_SUCCESS) {
5143 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5144 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5145 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5146 /* sfp is not there */
5147 rval = QLA_INTERFACE_ERROR;
5150 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5151 "Done %s.\n", __func__);
5158 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5159 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5163 mbx_cmd_t *mcp = &mc;
5164 struct qla_hw_data *ha = vha->hw;
5166 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5167 "Entered %s.\n", __func__);
5169 if (!IS_FWI2_CAPABLE(ha))
5170 return QLA_FUNCTION_FAILED;
5178 mcp->mb[0] = MBC_WRITE_SFP;
5180 mcp->mb[2] = MSW(LSD(sfp_dma));
5181 mcp->mb[3] = LSW(LSD(sfp_dma));
5182 mcp->mb[6] = MSW(MSD(sfp_dma));
5183 mcp->mb[7] = LSW(MSD(sfp_dma));
5187 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5188 mcp->in_mb = MBX_1|MBX_0;
5189 mcp->tov = MBX_TOV_SECONDS;
5191 rval = qla2x00_mailbox_command(vha, mcp);
5193 if (rval != QLA_SUCCESS) {
5194 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5195 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5197 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5198 "Done %s.\n", __func__);
5205 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5206 uint16_t size_in_bytes, uint16_t *actual_size)
5210 mbx_cmd_t *mcp = &mc;
5212 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5213 "Entered %s.\n", __func__);
5215 if (!IS_CNA_CAPABLE(vha->hw))
5216 return QLA_FUNCTION_FAILED;
5218 mcp->mb[0] = MBC_GET_XGMAC_STATS;
5219 mcp->mb[2] = MSW(stats_dma);
5220 mcp->mb[3] = LSW(stats_dma);
5221 mcp->mb[6] = MSW(MSD(stats_dma));
5222 mcp->mb[7] = LSW(MSD(stats_dma));
5223 mcp->mb[8] = size_in_bytes >> 2;
5224 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5225 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5226 mcp->tov = MBX_TOV_SECONDS;
5228 rval = qla2x00_mailbox_command(vha, mcp);
5230 if (rval != QLA_SUCCESS) {
5231 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5232 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5233 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5236 "Done %s.\n", __func__);
5239 *actual_size = mcp->mb[2] << 2;
5246 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5251 mbx_cmd_t *mcp = &mc;
5253 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5254 "Entered %s.\n", __func__);
5256 if (!IS_CNA_CAPABLE(vha->hw))
5257 return QLA_FUNCTION_FAILED;
5259 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5261 mcp->mb[2] = MSW(tlv_dma);
5262 mcp->mb[3] = LSW(tlv_dma);
5263 mcp->mb[6] = MSW(MSD(tlv_dma));
5264 mcp->mb[7] = LSW(MSD(tlv_dma));
5266 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5267 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5268 mcp->tov = MBX_TOV_SECONDS;
5270 rval = qla2x00_mailbox_command(vha, mcp);
5272 if (rval != QLA_SUCCESS) {
5273 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5274 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5275 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5277 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5278 "Done %s.\n", __func__);
5285 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5289 mbx_cmd_t *mcp = &mc;
5291 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5292 "Entered %s.\n", __func__);
5294 if (!IS_FWI2_CAPABLE(vha->hw))
5295 return QLA_FUNCTION_FAILED;
5297 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5298 mcp->mb[1] = LSW(risc_addr);
5299 mcp->mb[8] = MSW(risc_addr);
5300 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5301 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5302 mcp->tov = MBX_TOV_SECONDS;
5304 rval = qla2x00_mailbox_command(vha, mcp);
5305 if (rval != QLA_SUCCESS) {
5306 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5307 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5309 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5310 "Done %s.\n", __func__);
5311 *data = mcp->mb[3] << 16 | mcp->mb[2];
5318 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5323 mbx_cmd_t *mcp = &mc;
5325 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5326 "Entered %s.\n", __func__);
5328 memset(mcp->mb, 0 , sizeof(mcp->mb));
5329 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5330 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5332 /* transfer count */
5333 mcp->mb[10] = LSW(mreq->transfer_size);
5334 mcp->mb[11] = MSW(mreq->transfer_size);
5336 /* send data address */
5337 mcp->mb[14] = LSW(mreq->send_dma);
5338 mcp->mb[15] = MSW(mreq->send_dma);
5339 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5340 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5342 /* receive data address */
5343 mcp->mb[16] = LSW(mreq->rcv_dma);
5344 mcp->mb[17] = MSW(mreq->rcv_dma);
5345 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5346 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5348 /* Iteration count */
5349 mcp->mb[18] = LSW(mreq->iteration_count);
5350 mcp->mb[19] = MSW(mreq->iteration_count);
5352 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5353 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5354 if (IS_CNA_CAPABLE(vha->hw))
5355 mcp->out_mb |= MBX_2;
5356 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5358 mcp->buf_size = mreq->transfer_size;
5359 mcp->tov = MBX_TOV_SECONDS;
5360 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5362 rval = qla2x00_mailbox_command(vha, mcp);
5364 if (rval != QLA_SUCCESS) {
5365 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5366 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5367 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5368 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5370 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5371 "Done %s.\n", __func__);
5374 /* Copy mailbox information */
5375 memcpy( mresp, mcp->mb, 64);
5380 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5385 mbx_cmd_t *mcp = &mc;
5386 struct qla_hw_data *ha = vha->hw;
5388 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5389 "Entered %s.\n", __func__);
5391 memset(mcp->mb, 0 , sizeof(mcp->mb));
5392 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5393 /* BIT_6 specifies 64bit address */
5394 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5395 if (IS_CNA_CAPABLE(ha)) {
5396 mcp->mb[2] = vha->fcoe_fcf_idx;
5398 mcp->mb[16] = LSW(mreq->rcv_dma);
5399 mcp->mb[17] = MSW(mreq->rcv_dma);
5400 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5401 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5403 mcp->mb[10] = LSW(mreq->transfer_size);
5405 mcp->mb[14] = LSW(mreq->send_dma);
5406 mcp->mb[15] = MSW(mreq->send_dma);
5407 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5408 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5410 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5411 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5412 if (IS_CNA_CAPABLE(ha))
5413 mcp->out_mb |= MBX_2;
5416 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5417 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5418 mcp->in_mb |= MBX_1;
5419 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5421 mcp->in_mb |= MBX_3;
5423 mcp->tov = MBX_TOV_SECONDS;
5424 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5425 mcp->buf_size = mreq->transfer_size;
5427 rval = qla2x00_mailbox_command(vha, mcp);
5429 if (rval != QLA_SUCCESS) {
5430 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5431 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5432 rval, mcp->mb[0], mcp->mb[1]);
5434 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5435 "Done %s.\n", __func__);
5438 /* Copy mailbox information */
5439 memcpy(mresp, mcp->mb, 64);
5444 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5448 mbx_cmd_t *mcp = &mc;
5450 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5451 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5453 mcp->mb[0] = MBC_ISP84XX_RESET;
5454 mcp->mb[1] = enable_diagnostic;
5455 mcp->out_mb = MBX_1|MBX_0;
5456 mcp->in_mb = MBX_1|MBX_0;
5457 mcp->tov = MBX_TOV_SECONDS;
5458 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5459 rval = qla2x00_mailbox_command(vha, mcp);
5461 if (rval != QLA_SUCCESS)
5462 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5465 "Done %s.\n", __func__);
5471 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5475 mbx_cmd_t *mcp = &mc;
5477 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5478 "Entered %s.\n", __func__);
5480 if (!IS_FWI2_CAPABLE(vha->hw))
5481 return QLA_FUNCTION_FAILED;
5483 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5484 mcp->mb[1] = LSW(risc_addr);
5485 mcp->mb[2] = LSW(data);
5486 mcp->mb[3] = MSW(data);
5487 mcp->mb[8] = MSW(risc_addr);
5488 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5489 mcp->in_mb = MBX_1|MBX_0;
5490 mcp->tov = MBX_TOV_SECONDS;
5492 rval = qla2x00_mailbox_command(vha, mcp);
5493 if (rval != QLA_SUCCESS) {
5494 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5495 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5496 rval, mcp->mb[0], mcp->mb[1]);
5498 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5499 "Done %s.\n", __func__);
5506 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5509 uint32_t stat, timer;
5511 struct qla_hw_data *ha = vha->hw;
5512 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5516 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5517 "Entered %s.\n", __func__);
5519 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5521 /* Write the MBC data to the registers */
5522 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5523 wrt_reg_word(®->mailbox1, mb[0]);
5524 wrt_reg_word(®->mailbox2, mb[1]);
5525 wrt_reg_word(®->mailbox3, mb[2]);
5526 wrt_reg_word(®->mailbox4, mb[3]);
5528 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT);
5530 /* Poll for MBC interrupt */
5531 for (timer = 6000000; timer; timer--) {
5532 /* Check for pending interrupts. */
5533 stat = rd_reg_dword(®->host_status);
5534 if (stat & HSRX_RISC_INT) {
5537 if (stat == 0x1 || stat == 0x2 ||
5538 stat == 0x10 || stat == 0x11) {
5539 set_bit(MBX_INTERRUPT,
5540 &ha->mbx_cmd_flags);
5541 mb0 = rd_reg_word(®->mailbox0);
5542 wrt_reg_dword(®->hccr,
5543 HCCRX_CLR_RISC_INT);
5544 rd_reg_dword(®->hccr);
5551 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5552 rval = mb0 & MBS_MASK;
5554 rval = QLA_FUNCTION_FAILED;
5556 if (rval != QLA_SUCCESS) {
5557 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5558 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5561 "Done %s.\n", __func__);
5567 /* Set the specified data rate */
5569 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5573 mbx_cmd_t *mcp = &mc;
5574 struct qla_hw_data *ha = vha->hw;
5577 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5578 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5581 if (!IS_FWI2_CAPABLE(ha))
5582 return QLA_FUNCTION_FAILED;
5584 memset(mcp, 0, sizeof(*mcp));
5585 switch (ha->set_data_rate) {
5586 case PORT_SPEED_AUTO:
5587 case PORT_SPEED_4GB:
5588 case PORT_SPEED_8GB:
5589 case PORT_SPEED_16GB:
5590 case PORT_SPEED_32GB:
5591 val = ha->set_data_rate;
5594 ql_log(ql_log_warn, vha, 0x1199,
5595 "Unrecognized speed setting:%d. Setting Autoneg\n",
5597 val = ha->set_data_rate = PORT_SPEED_AUTO;
5601 mcp->mb[0] = MBC_DATA_RATE;
5605 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5606 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5607 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5608 mcp->in_mb |= MBX_4|MBX_3;
5609 mcp->tov = MBX_TOV_SECONDS;
5611 rval = qla2x00_mailbox_command(vha, mcp);
5612 if (rval != QLA_SUCCESS) {
5613 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5614 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5616 if (mcp->mb[1] != 0x7)
5617 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5618 "Speed set:0x%x\n", mcp->mb[1]);
5620 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5621 "Done %s.\n", __func__);
5628 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5632 mbx_cmd_t *mcp = &mc;
5633 struct qla_hw_data *ha = vha->hw;
5635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5636 "Entered %s.\n", __func__);
5638 if (!IS_FWI2_CAPABLE(ha))
5639 return QLA_FUNCTION_FAILED;
5641 mcp->mb[0] = MBC_DATA_RATE;
5642 mcp->mb[1] = QLA_GET_DATA_RATE;
5643 mcp->out_mb = MBX_1|MBX_0;
5644 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5645 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5646 mcp->in_mb |= MBX_4|MBX_3;
5647 mcp->tov = MBX_TOV_SECONDS;
5649 rval = qla2x00_mailbox_command(vha, mcp);
5650 if (rval != QLA_SUCCESS) {
5651 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5652 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5654 if (mcp->mb[1] != 0x7)
5655 ha->link_data_rate = mcp->mb[1];
5657 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5658 if (mcp->mb[4] & BIT_0)
5659 ql_log(ql_log_info, vha, 0x11a2,
5660 "FEC=enabled (data rate).\n");
5663 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5664 "Done %s.\n", __func__);
5665 if (mcp->mb[1] != 0x7)
5666 ha->link_data_rate = mcp->mb[1];
5673 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5677 mbx_cmd_t *mcp = &mc;
5678 struct qla_hw_data *ha = vha->hw;
5680 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5681 "Entered %s.\n", __func__);
5683 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5684 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5685 return QLA_FUNCTION_FAILED;
5686 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5687 mcp->out_mb = MBX_0;
5688 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5689 mcp->tov = MBX_TOV_SECONDS;
5692 rval = qla2x00_mailbox_command(vha, mcp);
5694 if (rval != QLA_SUCCESS) {
5695 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5696 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5698 /* Copy all bits to preserve original value */
5699 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5701 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5702 "Done %s.\n", __func__);
5708 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5712 mbx_cmd_t *mcp = &mc;
5714 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5715 "Entered %s.\n", __func__);
5717 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5718 /* Copy all bits to preserve original setting */
5719 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5720 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5722 mcp->tov = MBX_TOV_SECONDS;
5724 rval = qla2x00_mailbox_command(vha, mcp);
5726 if (rval != QLA_SUCCESS) {
5727 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5728 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5730 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5731 "Done %s.\n", __func__);
5738 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5743 mbx_cmd_t *mcp = &mc;
5744 struct qla_hw_data *ha = vha->hw;
5746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5747 "Entered %s.\n", __func__);
5749 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5750 return QLA_FUNCTION_FAILED;
5752 mcp->mb[0] = MBC_PORT_PARAMS;
5753 mcp->mb[1] = loop_id;
5754 if (ha->flags.fcp_prio_enabled)
5758 mcp->mb[4] = priority & 0xf;
5759 mcp->mb[9] = vha->vp_idx;
5760 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5761 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5762 mcp->tov = MBX_TOV_SECONDS;
5764 rval = qla2x00_mailbox_command(vha, mcp);
5772 if (rval != QLA_SUCCESS) {
5773 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5776 "Done %s.\n", __func__);
5783 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5785 int rval = QLA_FUNCTION_FAILED;
5786 struct qla_hw_data *ha = vha->hw;
5789 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5790 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5791 "Thermal not supported by this card.\n");
5795 if (IS_QLA25XX(ha)) {
5796 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5797 ha->pdev->subsystem_device == 0x0175) {
5798 rval = qla2x00_read_sfp(vha, 0, &byte,
5799 0x98, 0x1, 1, BIT_13|BIT_0);
5803 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5804 ha->pdev->subsystem_device == 0x338e) {
5805 rval = qla2x00_read_sfp(vha, 0, &byte,
5806 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5810 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5811 "Thermal not supported by this card.\n");
5815 if (IS_QLA82XX(ha)) {
5816 *temp = qla82xx_read_temperature(vha);
5819 } else if (IS_QLA8044(ha)) {
5820 *temp = qla8044_read_temperature(vha);
5825 rval = qla2x00_read_asic_temperature(vha, temp);
5830 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5833 struct qla_hw_data *ha = vha->hw;
5835 mbx_cmd_t *mcp = &mc;
5837 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5838 "Entered %s.\n", __func__);
5840 if (!IS_FWI2_CAPABLE(ha))
5841 return QLA_FUNCTION_FAILED;
5843 memset(mcp, 0, sizeof(mbx_cmd_t));
5844 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5847 mcp->out_mb = MBX_1|MBX_0;
5849 mcp->tov = MBX_TOV_SECONDS;
5852 rval = qla2x00_mailbox_command(vha, mcp);
5853 if (rval != QLA_SUCCESS) {
5854 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5855 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5857 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5858 "Done %s.\n", __func__);
5865 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5868 struct qla_hw_data *ha = vha->hw;
5870 mbx_cmd_t *mcp = &mc;
5872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5873 "Entered %s.\n", __func__);
5875 if (!IS_P3P_TYPE(ha))
5876 return QLA_FUNCTION_FAILED;
5878 memset(mcp, 0, sizeof(mbx_cmd_t));
5879 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5882 mcp->out_mb = MBX_1|MBX_0;
5884 mcp->tov = MBX_TOV_SECONDS;
5887 rval = qla2x00_mailbox_command(vha, mcp);
5888 if (rval != QLA_SUCCESS) {
5889 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5890 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5892 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5893 "Done %s.\n", __func__);
5900 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5902 struct qla_hw_data *ha = vha->hw;
5904 mbx_cmd_t *mcp = &mc;
5905 int rval = QLA_FUNCTION_FAILED;
5907 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5908 "Entered %s.\n", __func__);
5910 memset(mcp->mb, 0 , sizeof(mcp->mb));
5911 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5912 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5913 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5914 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5916 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5917 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5918 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5920 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5921 mcp->tov = MBX_TOV_SECONDS;
5922 rval = qla2x00_mailbox_command(vha, mcp);
5924 /* Always copy back return mailbox values. */
5925 if (rval != QLA_SUCCESS) {
5926 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5927 "mailbox command FAILED=0x%x, subcode=%x.\n",
5928 (mcp->mb[1] << 16) | mcp->mb[0],
5929 (mcp->mb[3] << 16) | mcp->mb[2]);
5931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5932 "Done %s.\n", __func__);
5933 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5934 if (!ha->md_template_size) {
5935 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5936 "Null template size obtained.\n");
5937 rval = QLA_FUNCTION_FAILED;
5944 qla82xx_md_get_template(scsi_qla_host_t *vha)
5946 struct qla_hw_data *ha = vha->hw;
5948 mbx_cmd_t *mcp = &mc;
5949 int rval = QLA_FUNCTION_FAILED;
5951 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5952 "Entered %s.\n", __func__);
5954 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5955 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5956 if (!ha->md_tmplt_hdr) {
5957 ql_log(ql_log_warn, vha, 0x1124,
5958 "Unable to allocate memory for Minidump template.\n");
5962 memset(mcp->mb, 0 , sizeof(mcp->mb));
5963 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5964 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5965 mcp->mb[2] = LSW(RQST_TMPLT);
5966 mcp->mb[3] = MSW(RQST_TMPLT);
5967 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5968 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5969 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5970 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5971 mcp->mb[8] = LSW(ha->md_template_size);
5972 mcp->mb[9] = MSW(ha->md_template_size);
5974 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5975 mcp->tov = MBX_TOV_SECONDS;
5976 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5977 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5978 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5979 rval = qla2x00_mailbox_command(vha, mcp);
5981 if (rval != QLA_SUCCESS) {
5982 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5983 "mailbox command FAILED=0x%x, subcode=%x.\n",
5984 ((mcp->mb[1] << 16) | mcp->mb[0]),
5985 ((mcp->mb[3] << 16) | mcp->mb[2]));
5987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5988 "Done %s.\n", __func__);
5993 qla8044_md_get_template(scsi_qla_host_t *vha)
5995 struct qla_hw_data *ha = vha->hw;
5997 mbx_cmd_t *mcp = &mc;
5998 int rval = QLA_FUNCTION_FAILED;
5999 int offset = 0, size = MINIDUMP_SIZE_36K;
6001 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
6002 "Entered %s.\n", __func__);
6004 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
6005 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
6006 if (!ha->md_tmplt_hdr) {
6007 ql_log(ql_log_warn, vha, 0xb11b,
6008 "Unable to allocate memory for Minidump template.\n");
6012 memset(mcp->mb, 0 , sizeof(mcp->mb));
6013 while (offset < ha->md_template_size) {
6014 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6015 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6016 mcp->mb[2] = LSW(RQST_TMPLT);
6017 mcp->mb[3] = MSW(RQST_TMPLT);
6018 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
6019 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
6020 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
6021 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
6022 mcp->mb[8] = LSW(size);
6023 mcp->mb[9] = MSW(size);
6024 mcp->mb[10] = offset & 0x0000FFFF;
6025 mcp->mb[11] = offset & 0xFFFF0000;
6026 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
6027 mcp->tov = MBX_TOV_SECONDS;
6028 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
6029 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6030 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6031 rval = qla2x00_mailbox_command(vha, mcp);
6033 if (rval != QLA_SUCCESS) {
6034 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
6035 "mailbox command FAILED=0x%x, subcode=%x.\n",
6036 ((mcp->mb[1] << 16) | mcp->mb[0]),
6037 ((mcp->mb[3] << 16) | mcp->mb[2]));
6040 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
6041 "Done %s.\n", __func__);
6042 offset = offset + size;
6048 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6051 struct qla_hw_data *ha = vha->hw;
6053 mbx_cmd_t *mcp = &mc;
6055 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6056 return QLA_FUNCTION_FAILED;
6058 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
6059 "Entered %s.\n", __func__);
6061 memset(mcp, 0, sizeof(mbx_cmd_t));
6062 mcp->mb[0] = MBC_SET_LED_CONFIG;
6063 mcp->mb[1] = led_cfg[0];
6064 mcp->mb[2] = led_cfg[1];
6065 if (IS_QLA8031(ha)) {
6066 mcp->mb[3] = led_cfg[2];
6067 mcp->mb[4] = led_cfg[3];
6068 mcp->mb[5] = led_cfg[4];
6069 mcp->mb[6] = led_cfg[5];
6072 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6074 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6076 mcp->tov = MBX_TOV_SECONDS;
6079 rval = qla2x00_mailbox_command(vha, mcp);
6080 if (rval != QLA_SUCCESS) {
6081 ql_dbg(ql_dbg_mbx, vha, 0x1134,
6082 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6084 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
6085 "Done %s.\n", __func__);
6092 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6095 struct qla_hw_data *ha = vha->hw;
6097 mbx_cmd_t *mcp = &mc;
6099 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6100 return QLA_FUNCTION_FAILED;
6102 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6103 "Entered %s.\n", __func__);
6105 memset(mcp, 0, sizeof(mbx_cmd_t));
6106 mcp->mb[0] = MBC_GET_LED_CONFIG;
6108 mcp->out_mb = MBX_0;
6109 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6111 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6112 mcp->tov = MBX_TOV_SECONDS;
6115 rval = qla2x00_mailbox_command(vha, mcp);
6116 if (rval != QLA_SUCCESS) {
6117 ql_dbg(ql_dbg_mbx, vha, 0x1137,
6118 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6120 led_cfg[0] = mcp->mb[1];
6121 led_cfg[1] = mcp->mb[2];
6122 if (IS_QLA8031(ha)) {
6123 led_cfg[2] = mcp->mb[3];
6124 led_cfg[3] = mcp->mb[4];
6125 led_cfg[4] = mcp->mb[5];
6126 led_cfg[5] = mcp->mb[6];
6128 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6129 "Done %s.\n", __func__);
6136 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6139 struct qla_hw_data *ha = vha->hw;
6141 mbx_cmd_t *mcp = &mc;
6143 if (!IS_P3P_TYPE(ha))
6144 return QLA_FUNCTION_FAILED;
6146 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6147 "Entered %s.\n", __func__);
6149 memset(mcp, 0, sizeof(mbx_cmd_t));
6150 mcp->mb[0] = MBC_SET_LED_CONFIG;
6156 mcp->out_mb = MBX_7|MBX_0;
6158 mcp->tov = MBX_TOV_SECONDS;
6161 rval = qla2x00_mailbox_command(vha, mcp);
6162 if (rval != QLA_SUCCESS) {
6163 ql_dbg(ql_dbg_mbx, vha, 0x1128,
6164 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6166 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6167 "Done %s.\n", __func__);
6174 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6177 struct qla_hw_data *ha = vha->hw;
6179 mbx_cmd_t *mcp = &mc;
6181 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6182 return QLA_FUNCTION_FAILED;
6184 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6185 "Entered %s.\n", __func__);
6187 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6188 mcp->mb[1] = LSW(reg);
6189 mcp->mb[2] = MSW(reg);
6190 mcp->mb[3] = LSW(data);
6191 mcp->mb[4] = MSW(data);
6192 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6194 mcp->in_mb = MBX_1|MBX_0;
6195 mcp->tov = MBX_TOV_SECONDS;
6197 rval = qla2x00_mailbox_command(vha, mcp);
6199 if (rval != QLA_SUCCESS) {
6200 ql_dbg(ql_dbg_mbx, vha, 0x1131,
6201 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6203 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6204 "Done %s.\n", __func__);
6211 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6214 struct qla_hw_data *ha = vha->hw;
6216 mbx_cmd_t *mcp = &mc;
6218 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6220 "Implicit LOGO Unsupported.\n");
6221 return QLA_FUNCTION_FAILED;
6225 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6226 "Entering %s.\n", __func__);
6228 /* Perform Implicit LOGO. */
6229 mcp->mb[0] = MBC_PORT_LOGOUT;
6230 mcp->mb[1] = fcport->loop_id;
6231 mcp->mb[10] = BIT_15;
6232 mcp->out_mb = MBX_10|MBX_1|MBX_0;
6234 mcp->tov = MBX_TOV_SECONDS;
6236 rval = qla2x00_mailbox_command(vha, mcp);
6237 if (rval != QLA_SUCCESS)
6238 ql_dbg(ql_dbg_mbx, vha, 0x113d,
6239 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6241 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6242 "Done %s.\n", __func__);
6248 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6252 mbx_cmd_t *mcp = &mc;
6253 struct qla_hw_data *ha = vha->hw;
6254 unsigned long retry_max_time = jiffies + (2 * HZ);
6256 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6257 return QLA_FUNCTION_FAILED;
6259 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6262 mcp->mb[0] = MBC_READ_REMOTE_REG;
6263 mcp->mb[1] = LSW(reg);
6264 mcp->mb[2] = MSW(reg);
6265 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6266 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6267 mcp->tov = MBX_TOV_SECONDS;
6269 rval = qla2x00_mailbox_command(vha, mcp);
6271 if (rval != QLA_SUCCESS) {
6272 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6273 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6274 rval, mcp->mb[0], mcp->mb[1]);
6276 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6277 if (*data == QLA8XXX_BAD_VALUE) {
6279 * During soft-reset CAMRAM register reads might
6280 * return 0xbad0bad0. So retry for MAX of 2 sec
6281 * while reading camram registers.
6283 if (time_after(jiffies, retry_max_time)) {
6284 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6285 "Failure to read CAMRAM register. "
6286 "data=0x%x.\n", *data);
6287 return QLA_FUNCTION_FAILED;
6292 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6299 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6303 mbx_cmd_t *mcp = &mc;
6304 struct qla_hw_data *ha = vha->hw;
6306 if (!IS_QLA83XX(ha))
6307 return QLA_FUNCTION_FAILED;
6309 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6311 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6312 mcp->out_mb = MBX_0;
6313 mcp->in_mb = MBX_1|MBX_0;
6314 mcp->tov = MBX_TOV_SECONDS;
6316 rval = qla2x00_mailbox_command(vha, mcp);
6318 if (rval != QLA_SUCCESS) {
6319 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6320 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6321 rval, mcp->mb[0], mcp->mb[1]);
6322 qla2xxx_dump_fw(vha);
6324 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6331 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6332 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6336 mbx_cmd_t *mcp = &mc;
6337 uint8_t subcode = (uint8_t)options;
6338 struct qla_hw_data *ha = vha->hw;
6340 if (!IS_QLA8031(ha))
6341 return QLA_FUNCTION_FAILED;
6343 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6345 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6346 mcp->mb[1] = options;
6347 mcp->out_mb = MBX_1|MBX_0;
6348 if (subcode & BIT_2) {
6349 mcp->mb[2] = LSW(start_addr);
6350 mcp->mb[3] = MSW(start_addr);
6351 mcp->mb[4] = LSW(end_addr);
6352 mcp->mb[5] = MSW(end_addr);
6353 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6355 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6356 if (!(subcode & (BIT_2 | BIT_5)))
6357 mcp->in_mb |= MBX_4|MBX_3;
6358 mcp->tov = MBX_TOV_SECONDS;
6360 rval = qla2x00_mailbox_command(vha, mcp);
6362 if (rval != QLA_SUCCESS) {
6363 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6364 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6365 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6367 qla2xxx_dump_fw(vha);
6369 if (subcode & BIT_5)
6370 *sector_size = mcp->mb[1];
6371 else if (subcode & (BIT_6 | BIT_7)) {
6372 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6373 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6374 } else if (subcode & (BIT_3 | BIT_4)) {
6375 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6376 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6378 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6385 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6390 mbx_cmd_t *mcp = &mc;
6392 if (!IS_MCTP_CAPABLE(vha->hw))
6393 return QLA_FUNCTION_FAILED;
6395 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6396 "Entered %s.\n", __func__);
6398 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6399 mcp->mb[1] = LSW(addr);
6400 mcp->mb[2] = MSW(req_dma);
6401 mcp->mb[3] = LSW(req_dma);
6402 mcp->mb[4] = MSW(size);
6403 mcp->mb[5] = LSW(size);
6404 mcp->mb[6] = MSW(MSD(req_dma));
6405 mcp->mb[7] = LSW(MSD(req_dma));
6406 mcp->mb[8] = MSW(addr);
6407 /* Setting RAM ID to valid */
6408 /* For MCTP RAM ID is 0x40 */
6409 mcp->mb[10] = BIT_7 | 0x40;
6411 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6415 mcp->tov = MBX_TOV_SECONDS;
6417 rval = qla2x00_mailbox_command(vha, mcp);
6419 if (rval != QLA_SUCCESS) {
6420 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6421 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6423 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6424 "Done %s.\n", __func__);
6431 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6432 void *dd_buf, uint size, uint options)
6436 mbx_cmd_t *mcp = &mc;
6439 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6440 !IS_QLA28XX(vha->hw))
6441 return QLA_FUNCTION_FAILED;
6443 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6444 "Entered %s.\n", __func__);
6446 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6447 dd_buf, size, DMA_FROM_DEVICE);
6448 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6449 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6450 return QLA_MEMORY_ALLOC_FAILED;
6453 memset(dd_buf, 0, size);
6455 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6456 mcp->mb[1] = options;
6457 mcp->mb[2] = MSW(LSD(dd_dma));
6458 mcp->mb[3] = LSW(LSD(dd_dma));
6459 mcp->mb[6] = MSW(MSD(dd_dma));
6460 mcp->mb[7] = LSW(MSD(dd_dma));
6462 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6463 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6464 mcp->buf_size = size;
6465 mcp->flags = MBX_DMA_IN;
6466 mcp->tov = MBX_TOV_SECONDS * 4;
6467 rval = qla2x00_mailbox_command(vha, mcp);
6469 if (rval != QLA_SUCCESS) {
6470 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6472 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6473 "Done %s.\n", __func__);
6476 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6477 size, DMA_FROM_DEVICE);
6483 qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha,
6484 struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp)
6488 uint size = sizeof(dd->buf);
6489 uint16_t options = dd->options;
6491 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6492 "Entered %s.\n", __func__);
6494 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6495 dd->buf, size, DMA_FROM_DEVICE);
6496 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6497 ql_log(ql_log_warn, vha, 0x1194,
6498 "Failed to map dma buffer.\n");
6499 return QLA_MEMORY_ALLOC_FAILED;
6502 memset(dd->buf, 0, size);
6504 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6505 mcp->mb[1] = options;
6506 mcp->mb[2] = MSW(LSD(dd_dma));
6507 mcp->mb[3] = LSW(LSD(dd_dma));
6508 mcp->mb[6] = MSW(MSD(dd_dma));
6509 mcp->mb[7] = LSW(MSD(dd_dma));
6511 mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0;
6512 mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0;
6513 mcp->buf_size = size;
6514 mcp->flags = MBX_DMA_IN;
6515 mcp->tov = MBX_TOV_SECONDS * 4;
6516 rval = qla2x00_mailbox_command(vha, mcp);
6518 if (rval != QLA_SUCCESS) {
6519 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6521 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6522 "Done %s.\n", __func__);
6525 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE);
6530 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6532 sp->u.iocb_cmd.u.mbx.rc = res;
6534 complete(&sp->u.iocb_cmd.u.mbx.comp);
6535 /* don't free sp here. Let the caller do the free */
6539 * This mailbox uses the iocb interface to send MB command.
6540 * This allows non-critial (non chip setup) command to go
6543 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6545 int rval = QLA_FUNCTION_FAILED;
6549 if (!vha->hw->flags.fw_started)
6553 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6557 c = &sp->u.iocb_cmd;
6558 init_completion(&c->u.mbx.comp);
6560 sp->type = SRB_MB_IOCB;
6561 sp->name = mb_to_str(mcp->mb[0]);
6562 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
6563 qla2x00_async_mb_sp_done);
6565 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6567 rval = qla2x00_start_sp(sp);
6568 if (rval != QLA_SUCCESS) {
6569 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6570 "%s: %s Failed submission. %x.\n",
6571 __func__, sp->name, rval);
6575 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6576 sp->name, sp->handle);
6578 wait_for_completion(&c->u.mbx.comp);
6579 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6583 case QLA_FUNCTION_TIMEOUT:
6584 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6585 __func__, sp->name, rval);
6588 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6589 __func__, sp->name);
6592 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6593 __func__, sp->name, rval);
6599 kref_put(&sp->cmd_kref, qla2x00_sp_release);
6606 * NOTE: Do not call this routine from DPC thread
6608 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6610 int rval = QLA_FUNCTION_FAILED;
6612 struct port_database_24xx *pd;
6613 struct qla_hw_data *ha = vha->hw;
6616 if (!vha->hw->flags.fw_started)
6619 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6621 ql_log(ql_log_warn, vha, 0xd047,
6622 "Failed to allocate port database structure.\n");
6626 memset(&mc, 0, sizeof(mc));
6627 mc.mb[0] = MBC_GET_PORT_DATABASE;
6628 mc.mb[1] = fcport->loop_id;
6629 mc.mb[2] = MSW(pd_dma);
6630 mc.mb[3] = LSW(pd_dma);
6631 mc.mb[6] = MSW(MSD(pd_dma));
6632 mc.mb[7] = LSW(MSD(pd_dma));
6633 mc.mb[9] = vha->vp_idx;
6636 rval = qla24xx_send_mb_cmd(vha, &mc);
6637 if (rval != QLA_SUCCESS) {
6638 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6639 "%s: %8phC fail\n", __func__, fcport->port_name);
6643 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6645 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6646 __func__, fcport->port_name);
6650 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6655 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6656 struct port_database_24xx *pd)
6658 int rval = QLA_SUCCESS;
6660 u8 current_login_state, last_login_state;
6662 if (NVME_TARGET(vha->hw, fcport)) {
6663 current_login_state = pd->current_login_state >> 4;
6664 last_login_state = pd->last_login_state >> 4;
6666 current_login_state = pd->current_login_state & 0xf;
6667 last_login_state = pd->last_login_state & 0xf;
6670 /* Check for logged in state. */
6671 if (current_login_state != PDS_PRLI_COMPLETE) {
6672 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6673 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6674 current_login_state, last_login_state, fcport->loop_id);
6675 rval = QLA_FUNCTION_FAILED;
6679 if (fcport->loop_id == FC_NO_LOOP_ID ||
6680 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6681 memcmp(fcport->port_name, pd->port_name, 8))) {
6682 /* We lost the device mid way. */
6683 rval = QLA_NOT_LOGGED_IN;
6687 /* Names are little-endian. */
6688 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6689 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6691 /* Get port_id of device. */
6692 fcport->d_id.b.domain = pd->port_id[0];
6693 fcport->d_id.b.area = pd->port_id[1];
6694 fcport->d_id.b.al_pa = pd->port_id[2];
6695 fcport->d_id.b.rsvd_1 = 0;
6697 ql_dbg(ql_dbg_disc, vha, 0x2062,
6698 "%8phC SVC Param w3 %02x%02x",
6700 pd->prli_svc_param_word_3[1],
6701 pd->prli_svc_param_word_3[0]);
6703 if (NVME_TARGET(vha->hw, fcport)) {
6704 fcport->port_type = FCT_NVME;
6705 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6706 fcport->port_type |= FCT_NVME_INITIATOR;
6707 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6708 fcport->port_type |= FCT_NVME_TARGET;
6709 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6710 fcport->port_type |= FCT_NVME_DISCOVERY;
6712 /* If not target must be initiator or unknown type. */
6713 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6714 fcport->port_type = FCT_INITIATOR;
6716 fcport->port_type = FCT_TARGET;
6718 /* Passback COS information. */
6719 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6720 FC_COS_CLASS2 : FC_COS_CLASS3;
6722 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6723 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6724 fcport->conf_compl_supported = 1;
6732 * qla24xx_gidlist__wait
6733 * NOTE: don't call this routine from DPC thread.
6735 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6736 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6738 int rval = QLA_FUNCTION_FAILED;
6741 if (!vha->hw->flags.fw_started)
6744 memset(&mc, 0, sizeof(mc));
6745 mc.mb[0] = MBC_GET_ID_LIST;
6746 mc.mb[2] = MSW(id_list_dma);
6747 mc.mb[3] = LSW(id_list_dma);
6748 mc.mb[6] = MSW(MSD(id_list_dma));
6749 mc.mb[7] = LSW(MSD(id_list_dma));
6751 mc.mb[9] = vha->vp_idx;
6753 rval = qla24xx_send_mb_cmd(vha, &mc);
6754 if (rval != QLA_SUCCESS) {
6755 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6756 "%s: fail\n", __func__);
6758 *entries = mc.mb[1];
6759 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6760 "%s: done\n", __func__);
6766 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6770 mbx_cmd_t *mcp = &mc;
6772 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6773 "Entered %s\n", __func__);
6775 memset(mcp->mb, 0 , sizeof(mcp->mb));
6776 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6779 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6780 mcp->in_mb = MBX_2 | MBX_0;
6781 mcp->tov = MBX_TOV_SECONDS;
6784 rval = qla2x00_mailbox_command(vha, mcp);
6786 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6787 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6792 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6796 mbx_cmd_t *mcp = &mc;
6798 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6799 "Entered %s\n", __func__);
6801 memset(mcp->mb, 0, sizeof(mcp->mb));
6802 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6804 mcp->out_mb = MBX_1 | MBX_0;
6805 mcp->in_mb = MBX_2 | MBX_0;
6806 mcp->tov = MBX_TOV_SECONDS;
6809 rval = qla2x00_mailbox_command(vha, mcp);
6810 if (rval == QLA_SUCCESS)
6813 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6814 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6820 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6822 struct qla_hw_data *ha = vha->hw;
6823 uint16_t iter, addr, offset;
6824 dma_addr_t phys_addr;
6828 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6830 phys_addr = ha->sfp_data_dma;
6831 sfp_data = ha->sfp_data;
6834 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6836 /* Skip to next device address. */
6841 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6842 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6843 if (rval != QLA_SUCCESS) {
6844 ql_log(ql_log_warn, vha, 0x706d,
6845 "Unable to read SFP data (%x/%x/%x).\n", rval,
6851 if (buf && (c < count)) {
6854 if ((count - c) >= SFP_BLOCK_SIZE)
6855 sz = SFP_BLOCK_SIZE;
6859 memcpy(buf, sfp_data, sz);
6860 buf += SFP_BLOCK_SIZE;
6863 phys_addr += SFP_BLOCK_SIZE;
6864 sfp_data += SFP_BLOCK_SIZE;
6865 offset += SFP_BLOCK_SIZE;
6871 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6872 uint16_t *out_mb, int out_mb_sz)
6874 int rval = QLA_FUNCTION_FAILED;
6877 if (!vha->hw->flags.fw_started)
6880 memset(&mc, 0, sizeof(mc));
6881 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6883 rval = qla24xx_send_mb_cmd(vha, &mc);
6884 if (rval != QLA_SUCCESS) {
6885 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6886 "%s: fail\n", __func__);
6888 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6889 memcpy(out_mb, mc.mb, out_mb_sz);
6891 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6893 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6894 "%s: done\n", __func__);
6900 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6901 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6906 mbx_cmd_t *mcp = &mc;
6908 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6910 mcp->mb[2] = region;
6911 mcp->mb[3] = MSW(len);
6912 mcp->mb[4] = LSW(len);
6913 mcp->mb[5] = MSW(sfub_dma_addr);
6914 mcp->mb[6] = LSW(sfub_dma_addr);
6915 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6916 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6917 mcp->mb[9] = sfub_len;
6919 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6920 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6921 mcp->tov = MBX_TOV_SECONDS;
6923 rval = qla2x00_mailbox_command(vha, mcp);
6925 if (rval != QLA_SUCCESS) {
6926 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6927 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6934 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6939 mbx_cmd_t *mcp = &mc;
6941 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6942 "Entered %s.\n", __func__);
6944 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6945 mcp->mb[1] = LSW(addr);
6946 mcp->mb[2] = MSW(addr);
6947 mcp->mb[3] = LSW(data);
6948 mcp->mb[4] = MSW(data);
6949 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6950 mcp->in_mb = MBX_1|MBX_0;
6951 mcp->tov = MBX_TOV_SECONDS;
6953 rval = qla2x00_mailbox_command(vha, mcp);
6955 if (rval != QLA_SUCCESS) {
6956 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6957 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6960 "Done %s.\n", __func__);
6966 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6971 mbx_cmd_t *mcp = &mc;
6973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6974 "Entered %s.\n", __func__);
6976 mcp->mb[0] = MBC_READ_REMOTE_REG;
6977 mcp->mb[1] = LSW(addr);
6978 mcp->mb[2] = MSW(addr);
6979 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6980 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6981 mcp->tov = MBX_TOV_SECONDS;
6983 rval = qla2x00_mailbox_command(vha, mcp);
6985 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6987 if (rval != QLA_SUCCESS) {
6988 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6989 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6991 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6992 "Done %s.\n", __func__);
6999 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
7001 struct qla_hw_data *ha = vha->hw;
7003 mbx_cmd_t *mcp = &mc;
7006 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7007 return QLA_FUNCTION_FAILED;
7009 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
7012 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
7013 mcp->mb[1] = options;
7014 mcp->out_mb = MBX_1|MBX_0;
7015 mcp->in_mb = MBX_1|MBX_0;
7016 if (options & BIT_0) {
7017 if (options & BIT_1) {
7018 mcp->mb[2] = led[2];
7019 mcp->out_mb |= MBX_2;
7021 if (options & BIT_2) {
7022 mcp->mb[3] = led[0];
7023 mcp->out_mb |= MBX_3;
7025 if (options & BIT_3) {
7026 mcp->mb[4] = led[1];
7027 mcp->out_mb |= MBX_4;
7030 mcp->in_mb |= MBX_4|MBX_3|MBX_2;
7032 mcp->tov = MBX_TOV_SECONDS;
7034 rval = qla2x00_mailbox_command(vha, mcp);
7036 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
7037 __func__, rval, mcp->mb[0], mcp->mb[1]);
7041 if (options & BIT_0) {
7042 ha->beacon_blink_led = 0;
7043 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
7045 led[2] = mcp->mb[2];
7046 led[0] = mcp->mb[3];
7047 led[1] = mcp->mb[4];
7048 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
7049 __func__, led[0], led[1], led[2]);
7056 * qla_no_op_mb(): This MB is used to check if FW is still alive and
7057 * able to generate an interrupt. Otherwise, a timeout will trigger
7059 * @vha: host adapter pointer
7062 void qla_no_op_mb(struct scsi_qla_host *vha)
7065 mbx_cmd_t *mcp = &mc;
7068 memset(&mc, 0, sizeof(mc));
7069 mcp->mb[0] = 0; // noop cmd= 0
7070 mcp->out_mb = MBX_0;
7074 rval = qla2x00_mailbox_command(vha, mcp);
7077 ql_dbg(ql_dbg_async, vha, 0x7071,
7078 "Failed %s %x\n", __func__, rval);
7082 int qla_mailbox_passthru(scsi_qla_host_t *vha,
7083 uint16_t *mbx_in, uint16_t *mbx_out)
7086 mbx_cmd_t *mcp = &mc;
7089 memset(&mc, 0, sizeof(mc));
7090 /* Receiving all 32 register's contents */
7091 memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t)));
7093 mcp->out_mb = 0xFFFFFFFF;
7094 mcp->in_mb = 0xFFFFFFFF;
7096 mcp->tov = MBX_TOV_SECONDS;
7100 rval = qla2x00_mailbox_command(vha, mcp);
7102 if (rval != QLA_SUCCESS) {
7103 ql_dbg(ql_dbg_mbx, vha, 0xf0a2,
7104 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
7106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n",
7108 /* passing all 32 register's contents */
7109 memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t));