1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
13 #define IS_PPCARCH true
15 #define IS_PPCARCH false
18 static struct mb_cmd_name {
22 {MBC_GET_PORT_DATABASE, "GPDB"},
23 {MBC_GET_ID_LIST, "GIDList"},
24 {MBC_GET_LINK_PRIV_STATS, "Stats"},
25 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
28 static const char *mb_to_str(uint16_t cmd)
31 struct mb_cmd_name *e;
33 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
41 static struct rom_cmd {
45 { MBC_EXECUTE_FIRMWARE },
46 { MBC_READ_RAM_WORD },
47 { MBC_MAILBOX_REGISTER_TEST },
48 { MBC_VERIFY_CHECKSUM },
49 { MBC_GET_FIRMWARE_VERSION },
50 { MBC_LOAD_RISC_RAM },
51 { MBC_DUMP_RISC_RAM },
52 { MBC_LOAD_RISC_RAM_EXTENDED },
53 { MBC_DUMP_RISC_RAM_EXTENDED },
54 { MBC_WRITE_RAM_WORD_EXTENDED },
55 { MBC_READ_RAM_EXTENDED },
56 { MBC_GET_RESOURCE_COUNTS },
57 { MBC_SET_FIRMWARE_OPTION },
58 { MBC_MID_INITIALIZE_FIRMWARE },
59 { MBC_GET_FIRMWARE_STATE },
60 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
61 { MBC_GET_RETRY_COUNT },
62 { MBC_TRACE_CONTROL },
63 { MBC_INITIALIZE_MULTIQ },
64 { MBC_IOCB_COMMAND_A64 },
65 { MBC_GET_ADAPTER_LOOP_ID },
67 { MBC_SET_RNID_PARAMS },
68 { MBC_GET_RNID_PARAMS },
69 { MBC_GET_SET_ZIO_THRESHOLD },
72 static int is_rom_cmd(uint16_t cmd)
77 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
87 * qla2x00_mailbox_command
88 * Issue mailbox command and waits for completion.
91 * ha = adapter block pointer.
92 * mcp = driver internal mbx struct pointer.
95 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
98 * 0 : QLA_SUCCESS = cmd performed success
99 * 1 : QLA_FUNCTION_FAILED (error encountered)
100 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
106 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
109 unsigned long flags = 0;
111 uint8_t abort_active, eeh_delay;
113 uint16_t command = 0;
115 __le16 __iomem *optr;
118 unsigned long wait_time;
119 struct qla_hw_data *ha = vha->hw;
120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
124 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
126 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
127 ql_log(ql_log_warn, vha, 0x1001,
128 "PCI channel failed permanently, exiting.\n");
129 return QLA_FUNCTION_TIMEOUT;
132 if (vha->device_flags & DFLG_DEV_FAILED) {
133 ql_log(ql_log_warn, vha, 0x1002,
134 "Device in failed state, exiting.\n");
135 return QLA_FUNCTION_TIMEOUT;
138 /* if PCI error, then avoid mbx processing.*/
139 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
140 test_bit(UNLOADING, &base_vha->dpc_flags)) {
141 ql_log(ql_log_warn, vha, 0xd04e,
142 "PCI error, exiting.\n");
143 return QLA_FUNCTION_TIMEOUT;
147 io_lock_on = base_vha->flags.init_done;
150 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
151 chip_reset = ha->chip_reset;
153 if (ha->flags.pci_channel_io_perm_failure) {
154 ql_log(ql_log_warn, vha, 0x1003,
155 "Perm failure on EEH timeout MBX, exiting.\n");
156 return QLA_FUNCTION_TIMEOUT;
159 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
160 /* Setting Link-Down error */
161 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
162 ql_log(ql_log_warn, vha, 0x1004,
163 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
164 return QLA_FUNCTION_TIMEOUT;
167 /* check if ISP abort is active and return cmd with timeout */
168 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
169 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
170 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
171 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
172 ql_log(ql_log_info, vha, 0x1005,
173 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
175 return QLA_FUNCTION_TIMEOUT;
178 atomic_inc(&ha->num_pend_mbx_stage1);
180 * Wait for active mailbox commands to finish by waiting at most tov
181 * seconds. This is to serialize actual issuing of mailbox cmds during
182 * non ISP abort time.
184 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
185 /* Timeout occurred. Return error. */
186 ql_log(ql_log_warn, vha, 0xd035,
187 "Cmd access timeout, cmd=0x%x, Exiting.\n",
190 atomic_dec(&ha->num_pend_mbx_stage1);
191 return QLA_FUNCTION_TIMEOUT;
193 atomic_dec(&ha->num_pend_mbx_stage1);
194 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
195 ha->flags.eeh_busy) {
196 ql_log(ql_log_warn, vha, 0xd035,
197 "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
198 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
204 /* Save mailbox command for debug */
207 ql_dbg(ql_dbg_mbx, vha, 0x1006,
208 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
210 spin_lock_irqsave(&ha->hardware_lock, flags);
212 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
213 ha->flags.mbox_busy) {
215 spin_unlock_irqrestore(&ha->hardware_lock, flags);
218 ha->flags.mbox_busy = 1;
220 /* Load mailbox registers. */
222 optr = ®->isp82.mailbox_in[0];
223 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
224 optr = ®->isp24.mailbox0;
226 optr = MAILBOX_REG(ha, ®->isp, 0);
229 command = mcp->mb[0];
230 mboxes = mcp->out_mb;
232 ql_dbg(ql_dbg_mbx, vha, 0x1111,
233 "Mailbox registers (OUT):\n");
234 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
235 if (IS_QLA2200(ha) && cnt == 8)
236 optr = MAILBOX_REG(ha, ®->isp, 8);
237 if (mboxes & BIT_0) {
238 ql_dbg(ql_dbg_mbx, vha, 0x1112,
239 "mbox[%d]<-0x%04x\n", cnt, *iptr);
240 wrt_reg_word(optr, *iptr);
242 wrt_reg_word(optr, 0);
250 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
251 "I/O Address = %p.\n", optr);
253 /* Issue set host interrupt command to send cmd out. */
254 ha->flags.mbox_int = 0;
255 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
257 /* Unlock mbx registers and wait for interrupt */
258 ql_dbg(ql_dbg_mbx, vha, 0x100f,
259 "Going to unlock irq & waiting for interrupts. "
260 "jiffies=%lx.\n", jiffies);
262 /* Wait for mbx cmd completion until timeout */
263 atomic_inc(&ha->num_pend_mbx_stage2);
264 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
265 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
268 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
269 else if (IS_FWI2_CAPABLE(ha))
270 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
272 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
273 spin_unlock_irqrestore(&ha->hardware_lock, flags);
276 atomic_inc(&ha->num_pend_mbx_stage3);
277 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
279 ql_dbg(ql_dbg_mbx, vha, 0x117a,
280 "cmd=%x Timeout.\n", command);
281 spin_lock_irqsave(&ha->hardware_lock, flags);
282 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
283 spin_unlock_irqrestore(&ha->hardware_lock, flags);
285 if (chip_reset != ha->chip_reset) {
286 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
288 spin_lock_irqsave(&ha->hardware_lock, flags);
289 ha->flags.mbox_busy = 0;
290 spin_unlock_irqrestore(&ha->hardware_lock,
292 atomic_dec(&ha->num_pend_mbx_stage2);
293 atomic_dec(&ha->num_pend_mbx_stage3);
297 } else if (ha->flags.purge_mbox ||
298 chip_reset != ha->chip_reset) {
299 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
301 spin_lock_irqsave(&ha->hardware_lock, flags);
302 ha->flags.mbox_busy = 0;
303 spin_unlock_irqrestore(&ha->hardware_lock, flags);
304 atomic_dec(&ha->num_pend_mbx_stage2);
305 atomic_dec(&ha->num_pend_mbx_stage3);
309 atomic_dec(&ha->num_pend_mbx_stage3);
311 if (time_after(jiffies, wait_time + 5 * HZ))
312 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
313 command, jiffies_to_msecs(jiffies - wait_time));
315 ql_dbg(ql_dbg_mbx, vha, 0x1011,
316 "Cmd=%x Polling Mode.\n", command);
318 if (IS_P3P_TYPE(ha)) {
319 if (rd_reg_dword(®->isp82.hint) &
320 HINT_MBX_INT_PENDING) {
321 ha->flags.mbox_busy = 0;
322 spin_unlock_irqrestore(&ha->hardware_lock,
324 atomic_dec(&ha->num_pend_mbx_stage2);
325 ql_dbg(ql_dbg_mbx, vha, 0x1012,
326 "Pending mailbox timeout, exiting.\n");
328 rval = QLA_FUNCTION_TIMEOUT;
331 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
332 } else if (IS_FWI2_CAPABLE(ha))
333 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
335 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
336 spin_unlock_irqrestore(&ha->hardware_lock, flags);
338 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
339 while (!ha->flags.mbox_int) {
340 if (ha->flags.purge_mbox ||
341 chip_reset != ha->chip_reset) {
342 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
344 spin_lock_irqsave(&ha->hardware_lock, flags);
345 ha->flags.mbox_busy = 0;
346 spin_unlock_irqrestore(&ha->hardware_lock,
348 atomic_dec(&ha->num_pend_mbx_stage2);
353 if (time_after(jiffies, wait_time))
356 /* Check for pending interrupts. */
357 qla2x00_poll(ha->rsp_q_map[0]);
359 if (!ha->flags.mbox_int &&
361 command == MBC_LOAD_RISC_RAM_EXTENDED))
364 ql_dbg(ql_dbg_mbx, vha, 0x1013,
366 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
368 atomic_dec(&ha->num_pend_mbx_stage2);
370 /* Check whether we timed out */
371 if (ha->flags.mbox_int) {
374 ql_dbg(ql_dbg_mbx, vha, 0x1014,
375 "Cmd=%x completed.\n", command);
377 /* Got interrupt. Clear the flag. */
378 ha->flags.mbox_int = 0;
379 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
381 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
382 spin_lock_irqsave(&ha->hardware_lock, flags);
383 ha->flags.mbox_busy = 0;
384 spin_unlock_irqrestore(&ha->hardware_lock, flags);
386 /* Setting Link-Down error */
387 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
389 rval = QLA_FUNCTION_FAILED;
390 ql_log(ql_log_warn, vha, 0xd048,
391 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
395 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
396 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
397 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
398 MBS_COMMAND_COMPLETE);
399 rval = QLA_FUNCTION_FAILED;
402 /* Load return mailbox registers. */
404 iptr = (uint16_t *)&ha->mailbox_out[0];
407 ql_dbg(ql_dbg_mbx, vha, 0x1113,
408 "Mailbox registers (IN):\n");
409 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
410 if (mboxes & BIT_0) {
412 ql_dbg(ql_dbg_mbx, vha, 0x1114,
413 "mbox[%d]->0x%04x\n", cnt, *iptr2);
423 uint32_t ictrl, host_status, hccr;
426 if (IS_FWI2_CAPABLE(ha)) {
427 mb[0] = rd_reg_word(®->isp24.mailbox0);
428 mb[1] = rd_reg_word(®->isp24.mailbox1);
429 mb[2] = rd_reg_word(®->isp24.mailbox2);
430 mb[3] = rd_reg_word(®->isp24.mailbox3);
431 mb[7] = rd_reg_word(®->isp24.mailbox7);
432 ictrl = rd_reg_dword(®->isp24.ictrl);
433 host_status = rd_reg_dword(®->isp24.host_status);
434 hccr = rd_reg_dword(®->isp24.hccr);
436 ql_log(ql_log_warn, vha, 0xd04c,
437 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
438 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
439 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
440 mb[7], host_status, hccr);
444 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
445 ictrl = rd_reg_word(®->isp.ictrl);
446 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
447 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
448 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
451 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
453 /* Capture FW dump only, if PCI device active */
454 if (!pci_channel_offline(vha->hw->pdev)) {
455 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
456 if (w == 0xffff || ictrl == 0xffffffff ||
457 (chip_reset != ha->chip_reset)) {
458 /* This is special case if there is unload
459 * of driver happening and if PCI device go
460 * into bad state due to PCI error condition
461 * then only PCI ERR flag would be set.
462 * we will do premature exit for above case.
464 spin_lock_irqsave(&ha->hardware_lock, flags);
465 ha->flags.mbox_busy = 0;
466 spin_unlock_irqrestore(&ha->hardware_lock,
468 rval = QLA_FUNCTION_TIMEOUT;
472 /* Attempt to capture firmware dump for further
473 * anallysis of the current formware state. we do not
474 * need to do this if we are intentionally generating
477 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
478 qla2xxx_dump_fw(vha);
479 rval = QLA_FUNCTION_TIMEOUT;
482 spin_lock_irqsave(&ha->hardware_lock, flags);
483 ha->flags.mbox_busy = 0;
484 spin_unlock_irqrestore(&ha->hardware_lock, flags);
489 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
490 ql_dbg(ql_dbg_mbx, vha, 0x101a,
491 "Checking for additional resp interrupt.\n");
493 /* polling mode for non isp_abort commands. */
494 qla2x00_poll(ha->rsp_q_map[0]);
497 if (rval == QLA_FUNCTION_TIMEOUT &&
498 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
499 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
500 ha->flags.eeh_busy) {
501 /* not in dpc. schedule it for dpc to take over. */
502 ql_dbg(ql_dbg_mbx, vha, 0x101b,
503 "Timeout, schedule isp_abort_needed.\n");
505 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
506 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
507 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
508 if (IS_QLA82XX(ha)) {
509 ql_dbg(ql_dbg_mbx, vha, 0x112a,
510 "disabling pause transmit on port "
513 QLA82XX_CRB_NIU + 0x98,
514 CRB_NIU_XG_PAUSE_CTL_P0|
515 CRB_NIU_XG_PAUSE_CTL_P1);
517 ql_log(ql_log_info, base_vha, 0x101c,
518 "Mailbox cmd timeout occurred, cmd=0x%x, "
519 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
520 "abort.\n", command, mcp->mb[0],
523 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
524 qla2xxx_wake_dpc(vha);
526 } else if (current == ha->dpc_thread) {
527 /* call abort directly since we are in the DPC thread */
528 ql_dbg(ql_dbg_mbx, vha, 0x101d,
529 "Timeout, calling abort_isp.\n");
531 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
532 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
533 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
534 if (IS_QLA82XX(ha)) {
535 ql_dbg(ql_dbg_mbx, vha, 0x112b,
536 "disabling pause transmit on port "
539 QLA82XX_CRB_NIU + 0x98,
540 CRB_NIU_XG_PAUSE_CTL_P0|
541 CRB_NIU_XG_PAUSE_CTL_P1);
543 ql_log(ql_log_info, base_vha, 0x101e,
544 "Mailbox cmd timeout occurred, cmd=0x%x, "
545 "mb[0]=0x%x. Scheduling ISP abort ",
546 command, mcp->mb[0]);
548 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
549 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
550 /* Allow next mbx cmd to come in. */
551 complete(&ha->mbx_cmd_comp);
552 if (ha->isp_ops->abort_isp(vha) &&
553 !ha->flags.eeh_busy) {
554 /* Failed. retry later. */
555 set_bit(ISP_ABORT_NEEDED,
558 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
559 ql_dbg(ql_dbg_mbx, vha, 0x101f,
560 "Finished abort_isp.\n");
567 /* Allow next mbx cmd to come in. */
568 complete(&ha->mbx_cmd_comp);
571 if (rval == QLA_ABORTED) {
572 ql_log(ql_log_info, vha, 0xd035,
573 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
576 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
577 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
578 dev_name(&ha->pdev->dev), 0x1020+0x800,
582 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
583 if (mboxes & BIT_0) {
584 printk(" mb[%u]=%x", i, mcp->mb[i]);
587 pr_warn(" cmd=%x ****\n", command);
589 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
590 ql_dbg(ql_dbg_mbx, vha, 0x1198,
591 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
592 rd_reg_dword(®->isp24.host_status),
593 rd_reg_dword(®->isp24.ictrl),
594 rd_reg_dword(®->isp24.istatus));
596 ql_dbg(ql_dbg_mbx, vha, 0x1206,
597 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
598 rd_reg_word(®->isp.ctrl_status),
599 rd_reg_word(®->isp.ictrl),
600 rd_reg_word(®->isp.istatus));
603 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
607 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
609 * The caller of this mailbox encounter pci error.
610 * Hold the thread until PCIE link reset complete to make
611 * sure caller does not unmap dma while recovery is
621 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
622 uint32_t risc_code_size)
625 struct qla_hw_data *ha = vha->hw;
627 mbx_cmd_t *mcp = &mc;
629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
630 "Entered %s.\n", __func__);
632 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
633 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
634 mcp->mb[8] = MSW(risc_addr);
635 mcp->out_mb = MBX_8|MBX_0;
637 mcp->mb[0] = MBC_LOAD_RISC_RAM;
640 mcp->mb[1] = LSW(risc_addr);
641 mcp->mb[2] = MSW(req_dma);
642 mcp->mb[3] = LSW(req_dma);
643 mcp->mb[6] = MSW(MSD(req_dma));
644 mcp->mb[7] = LSW(MSD(req_dma));
645 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
646 if (IS_FWI2_CAPABLE(ha)) {
647 mcp->mb[4] = MSW(risc_code_size);
648 mcp->mb[5] = LSW(risc_code_size);
649 mcp->out_mb |= MBX_5|MBX_4;
651 mcp->mb[4] = LSW(risc_code_size);
652 mcp->out_mb |= MBX_4;
655 mcp->in_mb = MBX_1|MBX_0;
656 mcp->tov = MBX_TOV_SECONDS;
658 rval = qla2x00_mailbox_command(vha, mcp);
660 if (rval != QLA_SUCCESS) {
661 ql_dbg(ql_dbg_mbx, vha, 0x1023,
662 "Failed=%x mb[0]=%x mb[1]=%x.\n",
663 rval, mcp->mb[0], mcp->mb[1]);
666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
667 "Done %s.\n", __func__);
673 #define NVME_ENABLE_FLAG BIT_3
674 #define EDIF_HW_SUPPORT BIT_10
678 * Start adapter firmware.
681 * ha = adapter block pointer.
682 * TARGET_QUEUE_LOCK must be released.
683 * ADAPTER_STATE_LOCK must be released.
686 * qla2x00 local function return status code.
692 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
695 struct qla_hw_data *ha = vha->hw;
697 mbx_cmd_t *mcp = &mc;
699 #define EXE_FW_FORCE_SEMAPHORE BIT_7
702 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
703 "Entered %s.\n", __func__);
706 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
709 if (IS_FWI2_CAPABLE(ha)) {
710 mcp->mb[1] = MSW(risc_addr);
711 mcp->mb[2] = LSW(risc_addr);
717 if (ha->flags.lr_detected) {
719 if (IS_BPM_RANGE_CAPABLE(ha))
721 ha->lr_distance << LR_DIST_FW_POS;
724 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
725 mcp->mb[4] |= NVME_ENABLE_FLAG;
727 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
728 struct nvram_81xx *nv = ha->nvram;
729 /* set minimum speed if specified in nvram */
730 if (nv->min_supported_speed >= 2 &&
731 nv->min_supported_speed <= 5) {
733 mcp->mb[11] |= nv->min_supported_speed & 0xF;
734 mcp->out_mb |= MBX_11;
736 vha->min_supported_speed =
737 nv->min_supported_speed;
741 mcp->mb[11] |= BIT_4;
744 if (ha->flags.exlogins_enabled)
745 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
747 if (ha->flags.exchoffld_enabled)
748 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
751 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
753 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
754 mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
756 mcp->mb[1] = LSW(risc_addr);
757 mcp->out_mb |= MBX_1;
758 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
760 mcp->out_mb |= MBX_2;
764 mcp->tov = MBX_TOV_SECONDS;
766 rval = qla2x00_mailbox_command(vha, mcp);
768 if (rval != QLA_SUCCESS) {
769 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
770 mcp->mb[1] == 0x27 && retry) {
773 ql_dbg(ql_dbg_async, vha, 0x1026,
774 "Exe FW: force semaphore.\n");
780 ql_dbg(ql_dbg_async, vha, 0x509d,
781 "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry);
784 ql_dbg(ql_dbg_mbx, vha, 0x1026,
785 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
790 if (!IS_FWI2_CAPABLE(ha))
793 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
794 ql_dbg(ql_dbg_mbx, vha, 0x119a,
795 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
796 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
797 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
798 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
799 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
800 ha->max_supported_speed == 0 ? "16Gps" :
801 ha->max_supported_speed == 1 ? "32Gps" :
802 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
803 if (vha->min_supported_speed) {
804 ha->min_supported_speed = mcp->mb[5] &
805 (BIT_0 | BIT_1 | BIT_2);
806 ql_dbg(ql_dbg_mbx, vha, 0x119c,
807 "min_supported_speed=%s.\n",
808 ha->min_supported_speed == 6 ? "64Gps" :
809 ha->min_supported_speed == 5 ? "32Gps" :
810 ha->min_supported_speed == 4 ? "16Gps" :
811 ha->min_supported_speed == 3 ? "8Gps" :
812 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
816 if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
817 ha->flags.edif_hw = 1;
818 ql_log(ql_log_info, vha, 0xffff,
819 "%s: edif HW\n", __func__);
823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
824 "Done %s.\n", __func__);
830 * qla_get_exlogin_status
831 * Get extended login status
832 * uses the memory offload control/status Mailbox
835 * ha: adapter state pointer.
836 * fwopt: firmware options
839 * qla2x00 local function status
844 #define FETCH_XLOGINS_STAT 0x8
846 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
847 uint16_t *ex_logins_cnt)
851 mbx_cmd_t *mcp = &mc;
853 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
854 "Entered %s\n", __func__);
856 memset(mcp->mb, 0 , sizeof(mcp->mb));
857 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
858 mcp->mb[1] = FETCH_XLOGINS_STAT;
859 mcp->out_mb = MBX_1|MBX_0;
860 mcp->in_mb = MBX_10|MBX_4|MBX_0;
861 mcp->tov = MBX_TOV_SECONDS;
864 rval = qla2x00_mailbox_command(vha, mcp);
865 if (rval != QLA_SUCCESS) {
866 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
868 *buf_sz = mcp->mb[4];
869 *ex_logins_cnt = mcp->mb[10];
871 ql_log(ql_log_info, vha, 0x1190,
872 "buffer size 0x%x, exchange login count=%d\n",
873 mcp->mb[4], mcp->mb[10]);
875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
876 "Done %s.\n", __func__);
883 * qla_set_exlogin_mem_cfg
884 * set extended login memory configuration
885 * Mbx needs to be issues before init_cb is set
888 * ha: adapter state pointer.
889 * buffer: buffer pointer
890 * phys_addr: physical address of buffer
891 * size: size of buffer
892 * TARGET_QUEUE_LOCK must be released
893 * ADAPTER_STATE_LOCK must be release
896 * qla2x00 local funxtion status code.
901 #define CONFIG_XLOGINS_MEM 0x9
903 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
907 mbx_cmd_t *mcp = &mc;
908 struct qla_hw_data *ha = vha->hw;
910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
911 "Entered %s.\n", __func__);
913 memset(mcp->mb, 0 , sizeof(mcp->mb));
914 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
915 mcp->mb[1] = CONFIG_XLOGINS_MEM;
916 mcp->mb[2] = MSW(phys_addr);
917 mcp->mb[3] = LSW(phys_addr);
918 mcp->mb[6] = MSW(MSD(phys_addr));
919 mcp->mb[7] = LSW(MSD(phys_addr));
920 mcp->mb[8] = MSW(ha->exlogin_size);
921 mcp->mb[9] = LSW(ha->exlogin_size);
922 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
923 mcp->in_mb = MBX_11|MBX_0;
924 mcp->tov = MBX_TOV_SECONDS;
926 rval = qla2x00_mailbox_command(vha, mcp);
927 if (rval != QLA_SUCCESS) {
928 ql_dbg(ql_dbg_mbx, vha, 0x111b,
929 "EXlogin Failed=%x. MB0=%x MB11=%x\n",
930 rval, mcp->mb[0], mcp->mb[11]);
932 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
933 "Done %s.\n", __func__);
940 * qla_get_exchoffld_status
941 * Get exchange offload status
942 * uses the memory offload control/status Mailbox
945 * ha: adapter state pointer.
946 * fwopt: firmware options
949 * qla2x00 local function status
954 #define FETCH_XCHOFFLD_STAT 0x2
956 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
957 uint16_t *ex_logins_cnt)
961 mbx_cmd_t *mcp = &mc;
963 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
964 "Entered %s\n", __func__);
966 memset(mcp->mb, 0 , sizeof(mcp->mb));
967 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
968 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
969 mcp->out_mb = MBX_1|MBX_0;
970 mcp->in_mb = MBX_10|MBX_4|MBX_0;
971 mcp->tov = MBX_TOV_SECONDS;
974 rval = qla2x00_mailbox_command(vha, mcp);
975 if (rval != QLA_SUCCESS) {
976 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
978 *buf_sz = mcp->mb[4];
979 *ex_logins_cnt = mcp->mb[10];
981 ql_log(ql_log_info, vha, 0x118e,
982 "buffer size 0x%x, exchange offload count=%d\n",
983 mcp->mb[4], mcp->mb[10]);
985 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
986 "Done %s.\n", __func__);
993 * qla_set_exchoffld_mem_cfg
994 * Set exchange offload memory configuration
995 * Mbx needs to be issues before init_cb is set
998 * ha: adapter state pointer.
999 * buffer: buffer pointer
1000 * phys_addr: physical address of buffer
1001 * size: size of buffer
1002 * TARGET_QUEUE_LOCK must be released
1003 * ADAPTER_STATE_LOCK must be release
1006 * qla2x00 local funxtion status code.
1011 #define CONFIG_XCHOFFLD_MEM 0x3
1013 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
1017 mbx_cmd_t *mcp = &mc;
1018 struct qla_hw_data *ha = vha->hw;
1020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
1021 "Entered %s.\n", __func__);
1023 memset(mcp->mb, 0 , sizeof(mcp->mb));
1024 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1025 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1026 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1027 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1028 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1029 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1030 mcp->mb[8] = MSW(ha->exchoffld_size);
1031 mcp->mb[9] = LSW(ha->exchoffld_size);
1032 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1033 mcp->in_mb = MBX_11|MBX_0;
1034 mcp->tov = MBX_TOV_SECONDS;
1036 rval = qla2x00_mailbox_command(vha, mcp);
1037 if (rval != QLA_SUCCESS) {
1039 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1042 "Done %s.\n", __func__);
1049 * qla2x00_get_fw_version
1050 * Get firmware version.
1053 * ha: adapter state pointer.
1054 * major: pointer for major number.
1055 * minor: pointer for minor number.
1056 * subminor: pointer for subminor number.
1059 * qla2x00 local function return status code.
1065 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1069 mbx_cmd_t *mcp = &mc;
1070 struct qla_hw_data *ha = vha->hw;
1072 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1073 "Entered %s.\n", __func__);
1075 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1076 mcp->out_mb = MBX_0;
1077 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1078 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1079 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1080 if (IS_FWI2_CAPABLE(ha))
1081 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1082 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1084 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1085 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1088 mcp->tov = MBX_TOV_SECONDS;
1089 rval = qla2x00_mailbox_command(vha, mcp);
1090 if (rval != QLA_SUCCESS)
1093 /* Return mailbox data. */
1094 ha->fw_major_version = mcp->mb[1];
1095 ha->fw_minor_version = mcp->mb[2];
1096 ha->fw_subminor_version = mcp->mb[3];
1097 ha->fw_attributes = mcp->mb[6];
1098 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1099 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1101 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1103 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1104 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1105 ha->mpi_version[1] = mcp->mb[11] >> 8;
1106 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1107 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1108 ha->phy_version[0] = mcp->mb[8] & 0xff;
1109 ha->phy_version[1] = mcp->mb[9] >> 8;
1110 ha->phy_version[2] = mcp->mb[9] & 0xff;
1113 if (IS_FWI2_CAPABLE(ha)) {
1114 ha->fw_attributes_h = mcp->mb[15];
1115 ha->fw_attributes_ext[0] = mcp->mb[16];
1116 ha->fw_attributes_ext[1] = mcp->mb[17];
1117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1118 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1119 __func__, mcp->mb[15], mcp->mb[6]);
1120 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1121 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1122 __func__, mcp->mb[17], mcp->mb[16]);
1124 if (ha->fw_attributes_h & 0x4)
1125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1126 "%s: Firmware supports Extended Login 0x%x\n",
1127 __func__, ha->fw_attributes_h);
1129 if (ha->fw_attributes_h & 0x8)
1130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1131 "%s: Firmware supports Exchange Offload 0x%x\n",
1132 __func__, ha->fw_attributes_h);
1135 * FW supports nvme and driver load parameter requested nvme.
1136 * BIT 26 of fw_attributes indicates NVMe support.
1138 if ((ha->fw_attributes_h &
1139 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1141 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1142 vha->flags.nvme_first_burst = 1;
1144 vha->flags.nvme_enabled = 1;
1145 ql_log(ql_log_info, vha, 0xd302,
1146 "%s: FC-NVMe is Enabled (0x%x)\n",
1147 __func__, ha->fw_attributes_h);
1150 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1151 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1152 ql_log(ql_log_info, vha, 0xd302,
1153 "Firmware supports NVMe2 0x%x\n",
1154 ha->fw_attributes_ext[0]);
1155 vha->flags.nvme2_enabled = 1;
1158 if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
1159 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
1160 ha->flags.edif_enabled = 1;
1161 ql_log(ql_log_info, vha, 0xffff,
1162 "%s: edif is enabled\n", __func__);
1166 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1167 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1168 ha->serdes_version[1] = mcp->mb[8] >> 8;
1169 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1170 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1171 ha->mpi_version[1] = mcp->mb[11] >> 8;
1172 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1173 ha->pep_version[0] = mcp->mb[13] & 0xff;
1174 ha->pep_version[1] = mcp->mb[14] >> 8;
1175 ha->pep_version[2] = mcp->mb[14] & 0xff;
1176 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1177 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1178 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1179 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1180 if (IS_QLA28XX(ha)) {
1181 if (mcp->mb[16] & BIT_10)
1182 ha->flags.secure_fw = 1;
1184 ql_log(ql_log_info, vha, 0xffff,
1185 "Secure Flash Update in FW: %s\n",
1186 (ha->flags.secure_fw) ? "Supported" :
1190 if (ha->flags.scm_supported_a &&
1191 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1192 ha->flags.scm_supported_f = 1;
1193 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1195 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
1196 (ha->flags.scm_supported_f) ? "Supported" :
1199 if (vha->flags.nvme2_enabled) {
1200 /* set BIT_15 of special feature control block for SLER */
1201 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1202 /* set BIT_14 of special feature control block for PI CTRL*/
1203 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1208 if (rval != QLA_SUCCESS) {
1210 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1213 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1214 "Done %s.\n", __func__);
1220 * qla2x00_get_fw_options
1221 * Set firmware options.
1224 * ha = adapter block pointer.
1225 * fwopt = pointer for firmware options.
1228 * qla2x00 local function return status code.
1234 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1238 mbx_cmd_t *mcp = &mc;
1240 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1241 "Entered %s.\n", __func__);
1243 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1244 mcp->out_mb = MBX_0;
1245 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1246 mcp->tov = MBX_TOV_SECONDS;
1248 rval = qla2x00_mailbox_command(vha, mcp);
1250 if (rval != QLA_SUCCESS) {
1252 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1254 fwopts[0] = mcp->mb[0];
1255 fwopts[1] = mcp->mb[1];
1256 fwopts[2] = mcp->mb[2];
1257 fwopts[3] = mcp->mb[3];
1259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1260 "Done %s.\n", __func__);
1268 * qla2x00_set_fw_options
1269 * Set firmware options.
1272 * ha = adapter block pointer.
1273 * fwopt = pointer for firmware options.
1276 * qla2x00 local function return status code.
1282 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1286 mbx_cmd_t *mcp = &mc;
1288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1289 "Entered %s.\n", __func__);
1291 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1292 mcp->mb[1] = fwopts[1];
1293 mcp->mb[2] = fwopts[2];
1294 mcp->mb[3] = fwopts[3];
1295 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1297 if (IS_FWI2_CAPABLE(vha->hw)) {
1298 mcp->in_mb |= MBX_1;
1299 mcp->mb[10] = fwopts[10];
1300 mcp->out_mb |= MBX_10;
1302 mcp->mb[10] = fwopts[10];
1303 mcp->mb[11] = fwopts[11];
1304 mcp->mb[12] = 0; /* Undocumented, but used */
1305 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1307 mcp->tov = MBX_TOV_SECONDS;
1309 rval = qla2x00_mailbox_command(vha, mcp);
1311 fwopts[0] = mcp->mb[0];
1313 if (rval != QLA_SUCCESS) {
1315 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1316 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1320 "Done %s.\n", __func__);
1327 * qla2x00_mbx_reg_test
1328 * Mailbox register wrap test.
1331 * ha = adapter block pointer.
1332 * TARGET_QUEUE_LOCK must be released.
1333 * ADAPTER_STATE_LOCK must be released.
1336 * qla2x00 local function return status code.
1342 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1346 mbx_cmd_t *mcp = &mc;
1348 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1349 "Entered %s.\n", __func__);
1351 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1352 mcp->mb[1] = 0xAAAA;
1353 mcp->mb[2] = 0x5555;
1354 mcp->mb[3] = 0xAA55;
1355 mcp->mb[4] = 0x55AA;
1356 mcp->mb[5] = 0xA5A5;
1357 mcp->mb[6] = 0x5A5A;
1358 mcp->mb[7] = 0x2525;
1359 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1360 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1361 mcp->tov = MBX_TOV_SECONDS;
1363 rval = qla2x00_mailbox_command(vha, mcp);
1365 if (rval == QLA_SUCCESS) {
1366 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1367 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1368 rval = QLA_FUNCTION_FAILED;
1369 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1370 mcp->mb[7] != 0x2525)
1371 rval = QLA_FUNCTION_FAILED;
1374 if (rval != QLA_SUCCESS) {
1376 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1381 "Done %s.\n", __func__);
1388 * qla2x00_verify_checksum
1389 * Verify firmware checksum.
1392 * ha = adapter block pointer.
1393 * TARGET_QUEUE_LOCK must be released.
1394 * ADAPTER_STATE_LOCK must be released.
1397 * qla2x00 local function return status code.
1403 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1407 mbx_cmd_t *mcp = &mc;
1409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1410 "Entered %s.\n", __func__);
1412 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1413 mcp->out_mb = MBX_0;
1415 if (IS_FWI2_CAPABLE(vha->hw)) {
1416 mcp->mb[1] = MSW(risc_addr);
1417 mcp->mb[2] = LSW(risc_addr);
1418 mcp->out_mb |= MBX_2|MBX_1;
1419 mcp->in_mb |= MBX_2|MBX_1;
1421 mcp->mb[1] = LSW(risc_addr);
1422 mcp->out_mb |= MBX_1;
1423 mcp->in_mb |= MBX_1;
1426 mcp->tov = MBX_TOV_SECONDS;
1428 rval = qla2x00_mailbox_command(vha, mcp);
1430 if (rval != QLA_SUCCESS) {
1431 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1432 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1433 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1436 "Done %s.\n", __func__);
1443 * qla2x00_issue_iocb
1444 * Issue IOCB using mailbox command
1447 * ha = adapter state pointer.
1448 * buffer = buffer pointer.
1449 * phys_addr = physical address of buffer.
1450 * size = size of buffer.
1451 * TARGET_QUEUE_LOCK must be released.
1452 * ADAPTER_STATE_LOCK must be released.
1455 * qla2x00 local function return status code.
1461 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1462 dma_addr_t phys_addr, size_t size, uint32_t tov)
1466 mbx_cmd_t *mcp = &mc;
1468 if (!vha->hw->flags.fw_started)
1469 return QLA_INVALID_COMMAND;
1471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1472 "Entered %s.\n", __func__);
1474 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1476 mcp->mb[2] = MSW(LSD(phys_addr));
1477 mcp->mb[3] = LSW(LSD(phys_addr));
1478 mcp->mb[6] = MSW(MSD(phys_addr));
1479 mcp->mb[7] = LSW(MSD(phys_addr));
1480 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1481 mcp->in_mb = MBX_1|MBX_0;
1484 rval = qla2x00_mailbox_command(vha, mcp);
1486 if (rval != QLA_SUCCESS) {
1488 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1490 sts_entry_t *sts_entry = buffer;
1492 /* Mask reserved bits. */
1493 sts_entry->entry_status &=
1494 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1495 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1496 "Done %s (status=%x).\n", __func__,
1497 sts_entry->entry_status);
1504 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1507 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1512 * qla2x00_abort_command
1513 * Abort command aborts a specified IOCB.
1516 * ha = adapter block pointer.
1517 * sp = SB structure pointer.
1520 * qla2x00 local function return status code.
1526 qla2x00_abort_command(srb_t *sp)
1528 unsigned long flags = 0;
1530 uint32_t handle = 0;
1532 mbx_cmd_t *mcp = &mc;
1533 fc_port_t *fcport = sp->fcport;
1534 scsi_qla_host_t *vha = fcport->vha;
1535 struct qla_hw_data *ha = vha->hw;
1536 struct req_que *req;
1537 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1539 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1540 "Entered %s.\n", __func__);
1543 req = sp->qpair->req;
1547 spin_lock_irqsave(&ha->hardware_lock, flags);
1548 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1549 if (req->outstanding_cmds[handle] == sp)
1552 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1554 if (handle == req->num_outstanding_cmds) {
1555 /* command not found */
1556 return QLA_FUNCTION_FAILED;
1559 mcp->mb[0] = MBC_ABORT_COMMAND;
1560 if (HAS_EXTENDED_IDS(ha))
1561 mcp->mb[1] = fcport->loop_id;
1563 mcp->mb[1] = fcport->loop_id << 8;
1564 mcp->mb[2] = (uint16_t)handle;
1565 mcp->mb[3] = (uint16_t)(handle >> 16);
1566 mcp->mb[6] = (uint16_t)cmd->device->lun;
1567 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1569 mcp->tov = MBX_TOV_SECONDS;
1571 rval = qla2x00_mailbox_command(vha, mcp);
1573 if (rval != QLA_SUCCESS) {
1574 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1577 "Done %s.\n", __func__);
1584 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1588 mbx_cmd_t *mcp = &mc;
1589 scsi_qla_host_t *vha;
1593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1594 "Entered %s.\n", __func__);
1596 mcp->mb[0] = MBC_ABORT_TARGET;
1597 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1598 if (HAS_EXTENDED_IDS(vha->hw)) {
1599 mcp->mb[1] = fcport->loop_id;
1601 mcp->out_mb |= MBX_10;
1603 mcp->mb[1] = fcport->loop_id << 8;
1605 mcp->mb[2] = vha->hw->loop_reset_delay;
1606 mcp->mb[9] = vha->vp_idx;
1609 mcp->tov = MBX_TOV_SECONDS;
1611 rval = qla2x00_mailbox_command(vha, mcp);
1612 if (rval != QLA_SUCCESS) {
1613 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1614 "Failed=%x.\n", rval);
1617 /* Issue marker IOCB. */
1618 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1620 if (rval2 != QLA_SUCCESS) {
1621 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1622 "Failed to issue marker IOCB (%x).\n", rval2);
1624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1625 "Done %s.\n", __func__);
1632 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1636 mbx_cmd_t *mcp = &mc;
1637 scsi_qla_host_t *vha;
1641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1642 "Entered %s.\n", __func__);
1644 mcp->mb[0] = MBC_LUN_RESET;
1645 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1646 if (HAS_EXTENDED_IDS(vha->hw))
1647 mcp->mb[1] = fcport->loop_id;
1649 mcp->mb[1] = fcport->loop_id << 8;
1650 mcp->mb[2] = (u32)l;
1652 mcp->mb[9] = vha->vp_idx;
1655 mcp->tov = MBX_TOV_SECONDS;
1657 rval = qla2x00_mailbox_command(vha, mcp);
1658 if (rval != QLA_SUCCESS) {
1659 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1662 /* Issue marker IOCB. */
1663 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1665 if (rval2 != QLA_SUCCESS) {
1666 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1667 "Failed to issue marker IOCB (%x).\n", rval2);
1669 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1670 "Done %s.\n", __func__);
1677 * qla2x00_get_adapter_id
1678 * Get adapter ID and topology.
1681 * ha = adapter block pointer.
1682 * id = pointer for loop ID.
1683 * al_pa = pointer for AL_PA.
1684 * area = pointer for area.
1685 * domain = pointer for domain.
1686 * top = pointer for topology.
1687 * TARGET_QUEUE_LOCK must be released.
1688 * ADAPTER_STATE_LOCK must be released.
1691 * qla2x00 local function return status code.
1697 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1698 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1702 mbx_cmd_t *mcp = &mc;
1704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1705 "Entered %s.\n", __func__);
1707 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1708 mcp->mb[9] = vha->vp_idx;
1709 mcp->out_mb = MBX_9|MBX_0;
1710 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1711 if (IS_CNA_CAPABLE(vha->hw))
1712 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1713 if (IS_FWI2_CAPABLE(vha->hw))
1714 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1715 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1716 mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
1718 mcp->tov = MBX_TOV_SECONDS;
1720 rval = qla2x00_mailbox_command(vha, mcp);
1721 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1722 rval = QLA_COMMAND_ERROR;
1723 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1724 rval = QLA_INVALID_COMMAND;
1728 *al_pa = LSB(mcp->mb[2]);
1729 *area = MSB(mcp->mb[2]);
1730 *domain = LSB(mcp->mb[3]);
1732 *sw_cap = mcp->mb[7];
1734 if (rval != QLA_SUCCESS) {
1736 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1738 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1739 "Done %s.\n", __func__);
1741 if (IS_CNA_CAPABLE(vha->hw)) {
1742 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1743 vha->fcoe_fcf_idx = mcp->mb[10];
1744 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1745 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1746 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1747 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1748 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1749 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1751 /* If FA-WWN supported */
1752 if (IS_FAWWN_CAPABLE(vha->hw)) {
1753 if (mcp->mb[7] & BIT_14) {
1754 vha->port_name[0] = MSB(mcp->mb[16]);
1755 vha->port_name[1] = LSB(mcp->mb[16]);
1756 vha->port_name[2] = MSB(mcp->mb[17]);
1757 vha->port_name[3] = LSB(mcp->mb[17]);
1758 vha->port_name[4] = MSB(mcp->mb[18]);
1759 vha->port_name[5] = LSB(mcp->mb[18]);
1760 vha->port_name[6] = MSB(mcp->mb[19]);
1761 vha->port_name[7] = LSB(mcp->mb[19]);
1762 fc_host_port_name(vha->host) =
1763 wwn_to_u64(vha->port_name);
1764 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1765 "FA-WWN acquired %016llx\n",
1766 wwn_to_u64(vha->port_name));
1770 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1771 vha->bbcr = mcp->mb[15];
1772 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1773 ql_log(ql_log_info, vha, 0x11a4,
1774 "SCM: EDC ELS completed, flags 0x%x\n",
1777 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1778 vha->hw->flags.scm_enabled = 1;
1779 vha->scm_fabric_connection_flags |=
1780 SCM_FLAG_RDF_COMPLETED;
1781 ql_log(ql_log_info, vha, 0x11a5,
1782 "SCM: RDF ELS completed, flags 0x%x\n",
1792 * qla2x00_get_retry_cnt
1793 * Get current firmware login retry count and delay.
1796 * ha = adapter block pointer.
1797 * retry_cnt = pointer to login retry count.
1798 * tov = pointer to login timeout value.
1801 * qla2x00 local function return status code.
1807 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1813 mbx_cmd_t *mcp = &mc;
1815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1816 "Entered %s.\n", __func__);
1818 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1819 mcp->out_mb = MBX_0;
1820 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1821 mcp->tov = MBX_TOV_SECONDS;
1823 rval = qla2x00_mailbox_command(vha, mcp);
1825 if (rval != QLA_SUCCESS) {
1827 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1828 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1830 /* Convert returned data and check our values. */
1831 *r_a_tov = mcp->mb[3] / 2;
1832 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1833 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1834 /* Update to the larger values */
1835 *retry_cnt = (uint8_t)mcp->mb[1];
1839 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1840 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1847 * qla2x00_init_firmware
1848 * Initialize adapter firmware.
1851 * ha = adapter block pointer.
1852 * dptr = Initialization control block pointer.
1853 * size = size of initialization control block.
1854 * TARGET_QUEUE_LOCK must be released.
1855 * ADAPTER_STATE_LOCK must be released.
1858 * qla2x00 local function return status code.
1864 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1868 mbx_cmd_t *mcp = &mc;
1869 struct qla_hw_data *ha = vha->hw;
1871 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1872 "Entered %s.\n", __func__);
1874 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1875 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1876 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1878 if (ha->flags.npiv_supported)
1879 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1881 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1884 mcp->mb[2] = MSW(ha->init_cb_dma);
1885 mcp->mb[3] = LSW(ha->init_cb_dma);
1886 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1887 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1888 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1889 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1891 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1892 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1893 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1894 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1895 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1896 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1899 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1900 mcp->mb[1] |= BIT_1;
1901 mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1902 mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1903 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1904 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1905 mcp->mb[15] = sizeof(*ha->sf_init_cb);
1906 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1909 /* 1 and 2 should normally be captured. */
1910 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1911 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1912 /* mb3 is additional info about the installed SFP. */
1913 mcp->in_mb |= MBX_3;
1914 mcp->buf_size = size;
1915 mcp->flags = MBX_DMA_OUT;
1916 mcp->tov = MBX_TOV_SECONDS;
1917 rval = qla2x00_mailbox_command(vha, mcp);
1919 if (rval != QLA_SUCCESS) {
1921 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1922 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1923 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1925 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1926 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1927 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1929 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1930 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1931 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1932 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1935 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1936 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1937 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1938 "Invalid SFP/Validation Failed\n");
1940 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1941 "Done %s.\n", __func__);
1949 * qla2x00_get_port_database
1950 * Issue normal/enhanced get port database mailbox command
1951 * and copy device name as necessary.
1954 * ha = adapter state pointer.
1955 * dev = structure pointer.
1956 * opt = enhanced cmd option byte.
1959 * qla2x00 local function return status code.
1965 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1969 mbx_cmd_t *mcp = &mc;
1970 port_database_t *pd;
1971 struct port_database_24xx *pd24;
1973 struct qla_hw_data *ha = vha->hw;
1975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1976 "Entered %s.\n", __func__);
1979 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1981 ql_log(ql_log_warn, vha, 0x1050,
1982 "Failed to allocate port database structure.\n");
1984 return QLA_MEMORY_ALLOC_FAILED;
1987 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1988 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1989 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1990 mcp->mb[2] = MSW(pd_dma);
1991 mcp->mb[3] = LSW(pd_dma);
1992 mcp->mb[6] = MSW(MSD(pd_dma));
1993 mcp->mb[7] = LSW(MSD(pd_dma));
1994 mcp->mb[9] = vha->vp_idx;
1995 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1997 if (IS_FWI2_CAPABLE(ha)) {
1998 mcp->mb[1] = fcport->loop_id;
2000 mcp->out_mb |= MBX_10|MBX_1;
2001 mcp->in_mb |= MBX_1;
2002 } else if (HAS_EXTENDED_IDS(ha)) {
2003 mcp->mb[1] = fcport->loop_id;
2005 mcp->out_mb |= MBX_10|MBX_1;
2007 mcp->mb[1] = fcport->loop_id << 8 | opt;
2008 mcp->out_mb |= MBX_1;
2010 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
2011 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
2012 mcp->flags = MBX_DMA_IN;
2013 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2014 rval = qla2x00_mailbox_command(vha, mcp);
2015 if (rval != QLA_SUCCESS)
2018 if (IS_FWI2_CAPABLE(ha)) {
2020 u8 current_login_state, last_login_state;
2022 pd24 = (struct port_database_24xx *) pd;
2024 /* Check for logged in state. */
2025 if (NVME_TARGET(ha, fcport)) {
2026 current_login_state = pd24->current_login_state >> 4;
2027 last_login_state = pd24->last_login_state >> 4;
2029 current_login_state = pd24->current_login_state & 0xf;
2030 last_login_state = pd24->last_login_state & 0xf;
2032 fcport->current_login_state = pd24->current_login_state;
2033 fcport->last_login_state = pd24->last_login_state;
2035 /* Check for logged in state. */
2036 if (current_login_state != PDS_PRLI_COMPLETE &&
2037 last_login_state != PDS_PRLI_COMPLETE) {
2038 ql_dbg(ql_dbg_mbx, vha, 0x119a,
2039 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
2040 current_login_state, last_login_state,
2042 rval = QLA_FUNCTION_FAILED;
2048 if (fcport->loop_id == FC_NO_LOOP_ID ||
2049 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2050 memcmp(fcport->port_name, pd24->port_name, 8))) {
2051 /* We lost the device mid way. */
2052 rval = QLA_NOT_LOGGED_IN;
2056 /* Names are little-endian. */
2057 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
2058 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2060 /* Get port_id of device. */
2061 fcport->d_id.b.domain = pd24->port_id[0];
2062 fcport->d_id.b.area = pd24->port_id[1];
2063 fcport->d_id.b.al_pa = pd24->port_id[2];
2064 fcport->d_id.b.rsvd_1 = 0;
2066 /* If not target must be initiator or unknown type. */
2067 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2068 fcport->port_type = FCT_INITIATOR;
2070 fcport->port_type = FCT_TARGET;
2072 /* Passback COS information. */
2073 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2074 FC_COS_CLASS2 : FC_COS_CLASS3;
2076 if (pd24->prli_svc_param_word_3[0] & BIT_7)
2077 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2081 /* Check for logged in state. */
2082 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2083 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2084 ql_dbg(ql_dbg_mbx, vha, 0x100a,
2085 "Unable to verify login-state (%x/%x) - "
2086 "portid=%02x%02x%02x.\n", pd->master_state,
2087 pd->slave_state, fcport->d_id.b.domain,
2088 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2089 rval = QLA_FUNCTION_FAILED;
2093 if (fcport->loop_id == FC_NO_LOOP_ID ||
2094 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2095 memcmp(fcport->port_name, pd->port_name, 8))) {
2096 /* We lost the device mid way. */
2097 rval = QLA_NOT_LOGGED_IN;
2101 /* Names are little-endian. */
2102 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2103 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2105 /* Get port_id of device. */
2106 fcport->d_id.b.domain = pd->port_id[0];
2107 fcport->d_id.b.area = pd->port_id[3];
2108 fcport->d_id.b.al_pa = pd->port_id[2];
2109 fcport->d_id.b.rsvd_1 = 0;
2111 /* If not target must be initiator or unknown type. */
2112 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2113 fcport->port_type = FCT_INITIATOR;
2115 fcport->port_type = FCT_TARGET;
2117 /* Passback COS information. */
2118 fcport->supported_classes = (pd->options & BIT_4) ?
2119 FC_COS_CLASS2 : FC_COS_CLASS3;
2123 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2126 if (rval != QLA_SUCCESS) {
2127 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2128 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2129 mcp->mb[0], mcp->mb[1]);
2131 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2132 "Done %s.\n", __func__);
2139 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2140 struct port_database_24xx *pdb)
2143 mbx_cmd_t *mcp = &mc;
2147 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2148 "Entered %s.\n", __func__);
2150 memset(pdb, 0, sizeof(*pdb));
2152 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2153 sizeof(*pdb), DMA_FROM_DEVICE);
2155 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2156 return QLA_MEMORY_ALLOC_FAILED;
2159 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2160 mcp->mb[1] = nport_handle;
2161 mcp->mb[2] = MSW(LSD(pdb_dma));
2162 mcp->mb[3] = LSW(LSD(pdb_dma));
2163 mcp->mb[6] = MSW(MSD(pdb_dma));
2164 mcp->mb[7] = LSW(MSD(pdb_dma));
2167 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2168 mcp->in_mb = MBX_1|MBX_0;
2169 mcp->buf_size = sizeof(*pdb);
2170 mcp->flags = MBX_DMA_IN;
2171 mcp->tov = vha->hw->login_timeout * 2;
2172 rval = qla2x00_mailbox_command(vha, mcp);
2174 if (rval != QLA_SUCCESS) {
2175 ql_dbg(ql_dbg_mbx, vha, 0x111a,
2176 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2177 rval, mcp->mb[0], mcp->mb[1]);
2179 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2180 "Done %s.\n", __func__);
2183 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2184 sizeof(*pdb), DMA_FROM_DEVICE);
2190 * qla2x00_get_firmware_state
2191 * Get adapter firmware state.
2194 * ha = adapter block pointer.
2195 * dptr = pointer for firmware state.
2196 * TARGET_QUEUE_LOCK must be released.
2197 * ADAPTER_STATE_LOCK must be released.
2200 * qla2x00 local function return status code.
2206 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2210 mbx_cmd_t *mcp = &mc;
2211 struct qla_hw_data *ha = vha->hw;
2213 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2214 "Entered %s.\n", __func__);
2216 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2217 mcp->out_mb = MBX_0;
2218 if (IS_FWI2_CAPABLE(vha->hw))
2219 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2221 mcp->in_mb = MBX_1|MBX_0;
2222 mcp->tov = MBX_TOV_SECONDS;
2224 rval = qla2x00_mailbox_command(vha, mcp);
2226 /* Return firmware states. */
2227 states[0] = mcp->mb[1];
2228 if (IS_FWI2_CAPABLE(vha->hw)) {
2229 states[1] = mcp->mb[2];
2230 states[2] = mcp->mb[3]; /* SFP info */
2231 states[3] = mcp->mb[4];
2232 states[4] = mcp->mb[5];
2233 states[5] = mcp->mb[6]; /* DPORT status */
2236 if (rval != QLA_SUCCESS) {
2238 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2240 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2241 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2242 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2243 "Invalid SFP/Validation Failed\n");
2245 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2246 "Done %s.\n", __func__);
2253 * qla2x00_get_port_name
2254 * Issue get port name mailbox command.
2255 * Returned name is in big endian format.
2258 * ha = adapter block pointer.
2259 * loop_id = loop ID of device.
2260 * name = pointer for name.
2261 * TARGET_QUEUE_LOCK must be released.
2262 * ADAPTER_STATE_LOCK must be released.
2265 * qla2x00 local function return status code.
2271 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2276 mbx_cmd_t *mcp = &mc;
2278 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2279 "Entered %s.\n", __func__);
2281 mcp->mb[0] = MBC_GET_PORT_NAME;
2282 mcp->mb[9] = vha->vp_idx;
2283 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2284 if (HAS_EXTENDED_IDS(vha->hw)) {
2285 mcp->mb[1] = loop_id;
2287 mcp->out_mb |= MBX_10;
2289 mcp->mb[1] = loop_id << 8 | opt;
2292 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2293 mcp->tov = MBX_TOV_SECONDS;
2295 rval = qla2x00_mailbox_command(vha, mcp);
2297 if (rval != QLA_SUCCESS) {
2299 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2302 /* This function returns name in big endian. */
2303 name[0] = MSB(mcp->mb[2]);
2304 name[1] = LSB(mcp->mb[2]);
2305 name[2] = MSB(mcp->mb[3]);
2306 name[3] = LSB(mcp->mb[3]);
2307 name[4] = MSB(mcp->mb[6]);
2308 name[5] = LSB(mcp->mb[6]);
2309 name[6] = MSB(mcp->mb[7]);
2310 name[7] = LSB(mcp->mb[7]);
2313 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2314 "Done %s.\n", __func__);
2321 * qla24xx_link_initialization
2322 * Issue link initialization mailbox command.
2325 * ha = adapter block pointer.
2326 * TARGET_QUEUE_LOCK must be released.
2327 * ADAPTER_STATE_LOCK must be released.
2330 * qla2x00 local function return status code.
2336 qla24xx_link_initialize(scsi_qla_host_t *vha)
2340 mbx_cmd_t *mcp = &mc;
2342 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2343 "Entered %s.\n", __func__);
2345 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2346 return QLA_FUNCTION_FAILED;
2348 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2350 if (vha->hw->operating_mode == LOOP)
2351 mcp->mb[1] |= BIT_6;
2353 mcp->mb[1] |= BIT_5;
2356 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2358 mcp->tov = MBX_TOV_SECONDS;
2360 rval = qla2x00_mailbox_command(vha, mcp);
2362 if (rval != QLA_SUCCESS) {
2363 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2366 "Done %s.\n", __func__);
2374 * Issue LIP reset mailbox command.
2377 * ha = adapter block pointer.
2378 * TARGET_QUEUE_LOCK must be released.
2379 * ADAPTER_STATE_LOCK must be released.
2382 * qla2x00 local function return status code.
2388 qla2x00_lip_reset(scsi_qla_host_t *vha)
2392 mbx_cmd_t *mcp = &mc;
2394 ql_dbg(ql_dbg_disc, vha, 0x105a,
2395 "Entered %s.\n", __func__);
2397 if (IS_CNA_CAPABLE(vha->hw)) {
2398 /* Logout across all FCFs. */
2399 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2402 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2403 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2404 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2407 mcp->mb[3] = vha->hw->loop_reset_delay;
2408 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2410 mcp->mb[0] = MBC_LIP_RESET;
2411 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2412 if (HAS_EXTENDED_IDS(vha->hw)) {
2413 mcp->mb[1] = 0x00ff;
2415 mcp->out_mb |= MBX_10;
2417 mcp->mb[1] = 0xff00;
2419 mcp->mb[2] = vha->hw->loop_reset_delay;
2423 mcp->tov = MBX_TOV_SECONDS;
2425 rval = qla2x00_mailbox_command(vha, mcp);
2427 if (rval != QLA_SUCCESS) {
2429 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2432 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2433 "Done %s.\n", __func__);
2444 * ha = adapter block pointer.
2445 * sns = pointer for command.
2446 * cmd_size = command size.
2447 * buf_size = response/command size.
2448 * TARGET_QUEUE_LOCK must be released.
2449 * ADAPTER_STATE_LOCK must be released.
2452 * qla2x00 local function return status code.
2458 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2459 uint16_t cmd_size, size_t buf_size)
2463 mbx_cmd_t *mcp = &mc;
2465 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2466 "Entered %s.\n", __func__);
2468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2469 "Retry cnt=%d ratov=%d total tov=%d.\n",
2470 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2472 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2473 mcp->mb[1] = cmd_size;
2474 mcp->mb[2] = MSW(sns_phys_address);
2475 mcp->mb[3] = LSW(sns_phys_address);
2476 mcp->mb[6] = MSW(MSD(sns_phys_address));
2477 mcp->mb[7] = LSW(MSD(sns_phys_address));
2478 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2479 mcp->in_mb = MBX_0|MBX_1;
2480 mcp->buf_size = buf_size;
2481 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2482 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2483 rval = qla2x00_mailbox_command(vha, mcp);
2485 if (rval != QLA_SUCCESS) {
2487 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2488 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2489 rval, mcp->mb[0], mcp->mb[1]);
2492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2493 "Done %s.\n", __func__);
2500 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2501 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2505 struct logio_entry_24xx *lg;
2508 struct qla_hw_data *ha = vha->hw;
2509 struct req_que *req;
2511 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2512 "Entered %s.\n", __func__);
2514 if (vha->vp_idx && vha->qpair)
2515 req = vha->qpair->req;
2517 req = ha->req_q_map[0];
2519 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2521 ql_log(ql_log_warn, vha, 0x1062,
2522 "Failed to allocate login IOCB.\n");
2523 return QLA_MEMORY_ALLOC_FAILED;
2526 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2527 lg->entry_count = 1;
2528 lg->handle = make_handle(req->id, lg->handle);
2529 lg->nport_handle = cpu_to_le16(loop_id);
2530 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2532 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2534 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2535 lg->port_id[0] = al_pa;
2536 lg->port_id[1] = area;
2537 lg->port_id[2] = domain;
2538 lg->vp_index = vha->vp_idx;
2539 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2540 (ha->r_a_tov / 10 * 2) + 2);
2541 if (rval != QLA_SUCCESS) {
2542 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2543 "Failed to issue login IOCB (%x).\n", rval);
2544 } else if (lg->entry_status != 0) {
2545 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2546 "Failed to complete IOCB -- error status (%x).\n",
2548 rval = QLA_FUNCTION_FAILED;
2549 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2550 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2551 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2553 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2554 "Failed to complete IOCB -- completion status (%x) "
2555 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2559 case LSC_SCODE_PORTID_USED:
2560 mb[0] = MBS_PORT_ID_USED;
2561 mb[1] = LSW(iop[1]);
2563 case LSC_SCODE_NPORT_USED:
2564 mb[0] = MBS_LOOP_ID_USED;
2566 case LSC_SCODE_NOLINK:
2567 case LSC_SCODE_NOIOCB:
2568 case LSC_SCODE_NOXCB:
2569 case LSC_SCODE_CMD_FAILED:
2570 case LSC_SCODE_NOFABRIC:
2571 case LSC_SCODE_FW_NOT_READY:
2572 case LSC_SCODE_NOT_LOGGED_IN:
2573 case LSC_SCODE_NOPCB:
2574 case LSC_SCODE_ELS_REJECT:
2575 case LSC_SCODE_CMD_PARAM_ERR:
2576 case LSC_SCODE_NONPORT:
2577 case LSC_SCODE_LOGGED_IN:
2578 case LSC_SCODE_NOFLOGI_ACC:
2580 mb[0] = MBS_COMMAND_ERROR;
2584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2585 "Done %s.\n", __func__);
2587 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2589 mb[0] = MBS_COMMAND_COMPLETE;
2591 if (iop[0] & BIT_4) {
2597 /* Passback COS information. */
2599 if (lg->io_parameter[7] || lg->io_parameter[8])
2600 mb[10] |= BIT_0; /* Class 2. */
2601 if (lg->io_parameter[9] || lg->io_parameter[10])
2602 mb[10] |= BIT_1; /* Class 3. */
2603 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2604 mb[10] |= BIT_7; /* Confirmed Completion
2609 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2615 * qla2x00_login_fabric
2616 * Issue login fabric port mailbox command.
2619 * ha = adapter block pointer.
2620 * loop_id = device loop ID.
2621 * domain = device domain.
2622 * area = device area.
2623 * al_pa = device AL_PA.
2624 * status = pointer for return status.
2625 * opt = command options.
2626 * TARGET_QUEUE_LOCK must be released.
2627 * ADAPTER_STATE_LOCK must be released.
2630 * qla2x00 local function return status code.
2636 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2637 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2641 mbx_cmd_t *mcp = &mc;
2642 struct qla_hw_data *ha = vha->hw;
2644 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2645 "Entered %s.\n", __func__);
2647 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2648 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2649 if (HAS_EXTENDED_IDS(ha)) {
2650 mcp->mb[1] = loop_id;
2652 mcp->out_mb |= MBX_10;
2654 mcp->mb[1] = (loop_id << 8) | opt;
2656 mcp->mb[2] = domain;
2657 mcp->mb[3] = area << 8 | al_pa;
2659 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2660 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2662 rval = qla2x00_mailbox_command(vha, mcp);
2664 /* Return mailbox statuses. */
2671 /* COS retrieved from Get-Port-Database mailbox command. */
2675 if (rval != QLA_SUCCESS) {
2676 /* RLU tmp code: need to change main mailbox_command function to
2677 * return ok even when the mailbox completion value is not
2678 * SUCCESS. The caller needs to be responsible to interpret
2679 * the return values of this mailbox command if we're not
2680 * to change too much of the existing code.
2682 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2683 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2684 mcp->mb[0] == 0x4006)
2688 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2689 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2690 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2694 "Done %s.\n", __func__);
2701 * qla2x00_login_local_device
2702 * Issue login loop port mailbox command.
2705 * ha = adapter block pointer.
2706 * loop_id = device loop ID.
2707 * opt = command options.
2710 * Return status code.
2717 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2718 uint16_t *mb_ret, uint8_t opt)
2722 mbx_cmd_t *mcp = &mc;
2723 struct qla_hw_data *ha = vha->hw;
2725 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2726 "Entered %s.\n", __func__);
2728 if (IS_FWI2_CAPABLE(ha))
2729 return qla24xx_login_fabric(vha, fcport->loop_id,
2730 fcport->d_id.b.domain, fcport->d_id.b.area,
2731 fcport->d_id.b.al_pa, mb_ret, opt);
2733 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2734 if (HAS_EXTENDED_IDS(ha))
2735 mcp->mb[1] = fcport->loop_id;
2737 mcp->mb[1] = fcport->loop_id << 8;
2739 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2740 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2741 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2743 rval = qla2x00_mailbox_command(vha, mcp);
2745 /* Return mailbox statuses. */
2746 if (mb_ret != NULL) {
2747 mb_ret[0] = mcp->mb[0];
2748 mb_ret[1] = mcp->mb[1];
2749 mb_ret[6] = mcp->mb[6];
2750 mb_ret[7] = mcp->mb[7];
2753 if (rval != QLA_SUCCESS) {
2754 /* AV tmp code: need to change main mailbox_command function to
2755 * return ok even when the mailbox completion value is not
2756 * SUCCESS. The caller needs to be responsible to interpret
2757 * the return values of this mailbox command if we're not
2758 * to change too much of the existing code.
2760 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2763 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2764 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2765 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2768 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2769 "Done %s.\n", __func__);
2776 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2777 uint8_t area, uint8_t al_pa)
2780 struct logio_entry_24xx *lg;
2782 struct qla_hw_data *ha = vha->hw;
2783 struct req_que *req;
2785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2786 "Entered %s.\n", __func__);
2788 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2790 ql_log(ql_log_warn, vha, 0x106e,
2791 "Failed to allocate logout IOCB.\n");
2792 return QLA_MEMORY_ALLOC_FAILED;
2796 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2797 lg->entry_count = 1;
2798 lg->handle = make_handle(req->id, lg->handle);
2799 lg->nport_handle = cpu_to_le16(loop_id);
2801 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2803 lg->port_id[0] = al_pa;
2804 lg->port_id[1] = area;
2805 lg->port_id[2] = domain;
2806 lg->vp_index = vha->vp_idx;
2807 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2808 (ha->r_a_tov / 10 * 2) + 2);
2809 if (rval != QLA_SUCCESS) {
2810 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2811 "Failed to issue logout IOCB (%x).\n", rval);
2812 } else if (lg->entry_status != 0) {
2813 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2814 "Failed to complete IOCB -- error status (%x).\n",
2816 rval = QLA_FUNCTION_FAILED;
2817 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2818 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2819 "Failed to complete IOCB -- completion status (%x) "
2820 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2821 le32_to_cpu(lg->io_parameter[0]),
2822 le32_to_cpu(lg->io_parameter[1]));
2825 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2826 "Done %s.\n", __func__);
2829 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2835 * qla2x00_fabric_logout
2836 * Issue logout fabric port mailbox command.
2839 * ha = adapter block pointer.
2840 * loop_id = device loop ID.
2841 * TARGET_QUEUE_LOCK must be released.
2842 * ADAPTER_STATE_LOCK must be released.
2845 * qla2x00 local function return status code.
2851 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2852 uint8_t area, uint8_t al_pa)
2856 mbx_cmd_t *mcp = &mc;
2858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2859 "Entered %s.\n", __func__);
2861 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2862 mcp->out_mb = MBX_1|MBX_0;
2863 if (HAS_EXTENDED_IDS(vha->hw)) {
2864 mcp->mb[1] = loop_id;
2866 mcp->out_mb |= MBX_10;
2868 mcp->mb[1] = loop_id << 8;
2871 mcp->in_mb = MBX_1|MBX_0;
2872 mcp->tov = MBX_TOV_SECONDS;
2874 rval = qla2x00_mailbox_command(vha, mcp);
2876 if (rval != QLA_SUCCESS) {
2878 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2879 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2882 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2883 "Done %s.\n", __func__);
2890 * qla2x00_full_login_lip
2891 * Issue full login LIP mailbox command.
2894 * ha = adapter block pointer.
2895 * TARGET_QUEUE_LOCK must be released.
2896 * ADAPTER_STATE_LOCK must be released.
2899 * qla2x00 local function return status code.
2905 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2909 mbx_cmd_t *mcp = &mc;
2911 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2912 "Entered %s.\n", __func__);
2914 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2915 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2918 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2920 mcp->tov = MBX_TOV_SECONDS;
2922 rval = qla2x00_mailbox_command(vha, mcp);
2924 if (rval != QLA_SUCCESS) {
2926 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2929 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2930 "Done %s.\n", __func__);
2937 * qla2x00_get_id_list
2940 * ha = adapter block pointer.
2943 * qla2x00 local function return status code.
2949 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2954 mbx_cmd_t *mcp = &mc;
2956 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2957 "Entered %s.\n", __func__);
2959 if (id_list == NULL)
2960 return QLA_FUNCTION_FAILED;
2962 mcp->mb[0] = MBC_GET_ID_LIST;
2963 mcp->out_mb = MBX_0;
2964 if (IS_FWI2_CAPABLE(vha->hw)) {
2965 mcp->mb[2] = MSW(id_list_dma);
2966 mcp->mb[3] = LSW(id_list_dma);
2967 mcp->mb[6] = MSW(MSD(id_list_dma));
2968 mcp->mb[7] = LSW(MSD(id_list_dma));
2970 mcp->mb[9] = vha->vp_idx;
2971 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2973 mcp->mb[1] = MSW(id_list_dma);
2974 mcp->mb[2] = LSW(id_list_dma);
2975 mcp->mb[3] = MSW(MSD(id_list_dma));
2976 mcp->mb[6] = LSW(MSD(id_list_dma));
2977 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2979 mcp->in_mb = MBX_1|MBX_0;
2980 mcp->tov = MBX_TOV_SECONDS;
2982 rval = qla2x00_mailbox_command(vha, mcp);
2984 if (rval != QLA_SUCCESS) {
2986 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2988 *entries = mcp->mb[1];
2989 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2990 "Done %s.\n", __func__);
2997 * qla2x00_get_resource_cnts
2998 * Get current firmware resource counts.
3001 * ha = adapter block pointer.
3004 * qla2x00 local function return status code.
3010 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
3012 struct qla_hw_data *ha = vha->hw;
3015 mbx_cmd_t *mcp = &mc;
3017 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
3018 "Entered %s.\n", __func__);
3020 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
3021 mcp->out_mb = MBX_0;
3022 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3023 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
3024 IS_QLA27XX(ha) || IS_QLA28XX(ha))
3025 mcp->in_mb |= MBX_12;
3026 mcp->tov = MBX_TOV_SECONDS;
3028 rval = qla2x00_mailbox_command(vha, mcp);
3030 if (rval != QLA_SUCCESS) {
3032 ql_dbg(ql_dbg_mbx, vha, 0x107d,
3033 "Failed mb[0]=%x.\n", mcp->mb[0]);
3035 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
3036 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
3037 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
3038 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
3039 mcp->mb[11], mcp->mb[12]);
3041 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
3042 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3043 ha->cur_fw_xcb_count = mcp->mb[3];
3044 ha->orig_fw_xcb_count = mcp->mb[6];
3045 ha->cur_fw_iocb_count = mcp->mb[7];
3046 ha->orig_fw_iocb_count = mcp->mb[10];
3047 if (ha->flags.npiv_supported)
3048 ha->max_npiv_vports = mcp->mb[11];
3049 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3050 ha->fw_max_fcf_count = mcp->mb[12];
3057 * qla2x00_get_fcal_position_map
3058 * Get FCAL (LILP) position map using mailbox command
3061 * ha = adapter state pointer.
3062 * pos_map = buffer pointer (can be NULL).
3065 * qla2x00 local function return status code.
3071 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map,
3076 mbx_cmd_t *mcp = &mc;
3078 dma_addr_t pmap_dma;
3079 struct qla_hw_data *ha = vha->hw;
3081 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3082 "Entered %s.\n", __func__);
3084 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3086 ql_log(ql_log_warn, vha, 0x1080,
3087 "Memory alloc failed.\n");
3088 return QLA_MEMORY_ALLOC_FAILED;
3091 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3092 mcp->mb[2] = MSW(pmap_dma);
3093 mcp->mb[3] = LSW(pmap_dma);
3094 mcp->mb[6] = MSW(MSD(pmap_dma));
3095 mcp->mb[7] = LSW(MSD(pmap_dma));
3096 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3097 mcp->in_mb = MBX_1|MBX_0;
3098 mcp->buf_size = FCAL_MAP_SIZE;
3099 mcp->flags = MBX_DMA_IN;
3100 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3101 rval = qla2x00_mailbox_command(vha, mcp);
3103 if (rval == QLA_SUCCESS) {
3104 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3105 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3106 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3107 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3111 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3113 *num_entries = pmap[0];
3115 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3117 if (rval != QLA_SUCCESS) {
3118 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3120 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3121 "Done %s.\n", __func__);
3128 * qla2x00_get_link_status
3131 * ha = adapter block pointer.
3132 * loop_id = device loop ID.
3133 * ret_buf = pointer to link status return buffer.
3137 * BIT_0 = mem alloc error.
3138 * BIT_1 = mailbox error.
3141 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3142 struct link_statistics *stats, dma_addr_t stats_dma)
3146 mbx_cmd_t *mcp = &mc;
3147 uint32_t *iter = (uint32_t *)stats;
3148 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3149 struct qla_hw_data *ha = vha->hw;
3151 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3152 "Entered %s.\n", __func__);
3154 mcp->mb[0] = MBC_GET_LINK_STATUS;
3155 mcp->mb[2] = MSW(LSD(stats_dma));
3156 mcp->mb[3] = LSW(LSD(stats_dma));
3157 mcp->mb[6] = MSW(MSD(stats_dma));
3158 mcp->mb[7] = LSW(MSD(stats_dma));
3159 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3161 if (IS_FWI2_CAPABLE(ha)) {
3162 mcp->mb[1] = loop_id;
3165 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3166 mcp->in_mb |= MBX_1;
3167 } else if (HAS_EXTENDED_IDS(ha)) {
3168 mcp->mb[1] = loop_id;
3170 mcp->out_mb |= MBX_10|MBX_1;
3172 mcp->mb[1] = loop_id << 8;
3173 mcp->out_mb |= MBX_1;
3175 mcp->tov = MBX_TOV_SECONDS;
3176 mcp->flags = IOCTL_CMD;
3177 rval = qla2x00_mailbox_command(vha, mcp);
3179 if (rval == QLA_SUCCESS) {
3180 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3181 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3182 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3183 rval = QLA_FUNCTION_FAILED;
3185 /* Re-endianize - firmware data is le32. */
3186 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3187 "Done %s.\n", __func__);
3188 for ( ; dwords--; iter++)
3193 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3200 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3201 dma_addr_t stats_dma, uint16_t options)
3205 mbx_cmd_t *mcp = &mc;
3206 uint32_t *iter = (uint32_t *)stats;
3207 ushort dwords = sizeof(*stats)/sizeof(*iter);
3209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3210 "Entered %s.\n", __func__);
3212 memset(&mc, 0, sizeof(mc));
3213 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3214 mc.mb[2] = MSW(LSD(stats_dma));
3215 mc.mb[3] = LSW(LSD(stats_dma));
3216 mc.mb[6] = MSW(MSD(stats_dma));
3217 mc.mb[7] = LSW(MSD(stats_dma));
3219 mc.mb[9] = vha->vp_idx;
3220 mc.mb[10] = options;
3222 rval = qla24xx_send_mb_cmd(vha, &mc);
3224 if (rval == QLA_SUCCESS) {
3225 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3226 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3227 "Failed mb[0]=%x.\n", mcp->mb[0]);
3228 rval = QLA_FUNCTION_FAILED;
3230 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3231 "Done %s.\n", __func__);
3232 /* Re-endianize - firmware data is le32. */
3233 for ( ; dwords--; iter++)
3238 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3245 qla24xx_abort_command(srb_t *sp)
3248 unsigned long flags = 0;
3250 struct abort_entry_24xx *abt;
3253 fc_port_t *fcport = sp->fcport;
3254 struct scsi_qla_host *vha = fcport->vha;
3255 struct qla_hw_data *ha = vha->hw;
3256 struct req_que *req;
3257 struct qla_qpair *qpair = sp->qpair;
3259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3260 "Entered %s.\n", __func__);
3263 req = sp->qpair->req;
3265 return QLA_ERR_NO_QPAIR;
3267 if (ql2xasynctmfenable)
3268 return qla24xx_async_abort_command(sp);
3270 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3271 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3272 if (req->outstanding_cmds[handle] == sp)
3275 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3276 if (handle == req->num_outstanding_cmds) {
3277 /* Command not found. */
3278 return QLA_ERR_NOT_FOUND;
3281 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3283 ql_log(ql_log_warn, vha, 0x108d,
3284 "Failed to allocate abort IOCB.\n");
3285 return QLA_MEMORY_ALLOC_FAILED;
3288 abt->entry_type = ABORT_IOCB_TYPE;
3289 abt->entry_count = 1;
3290 abt->handle = make_handle(req->id, abt->handle);
3291 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3292 abt->handle_to_abort = make_handle(req->id, handle);
3293 abt->port_id[0] = fcport->d_id.b.al_pa;
3294 abt->port_id[1] = fcport->d_id.b.area;
3295 abt->port_id[2] = fcport->d_id.b.domain;
3296 abt->vp_index = fcport->vha->vp_idx;
3298 abt->req_que_no = cpu_to_le16(req->id);
3299 /* Need to pass original sp */
3300 qla_nvme_abort_set_option(abt, sp);
3302 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3303 if (rval != QLA_SUCCESS) {
3304 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3305 "Failed to issue IOCB (%x).\n", rval);
3306 } else if (abt->entry_status != 0) {
3307 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3308 "Failed to complete IOCB -- error status (%x).\n",
3310 rval = QLA_FUNCTION_FAILED;
3311 } else if (abt->nport_handle != cpu_to_le16(0)) {
3312 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3313 "Failed to complete IOCB -- completion status (%x).\n",
3314 le16_to_cpu(abt->nport_handle));
3315 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3316 rval = QLA_FUNCTION_PARAMETER_ERROR;
3318 rval = QLA_FUNCTION_FAILED;
3320 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3321 "Done %s.\n", __func__);
3323 if (rval == QLA_SUCCESS)
3324 qla_nvme_abort_process_comp_status(abt, sp);
3326 qla_wait_nvme_release_cmd_kref(sp);
3328 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3333 struct tsk_mgmt_cmd {
3335 struct tsk_mgmt_entry tsk;
3336 struct sts_entry_24xx sts;
3341 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3342 uint64_t l, int tag)
3345 struct tsk_mgmt_cmd *tsk;
3346 struct sts_entry_24xx *sts;
3348 scsi_qla_host_t *vha;
3349 struct qla_hw_data *ha;
3350 struct req_que *req;
3351 struct qla_qpair *qpair;
3357 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3358 "Entered %s.\n", __func__);
3360 if (vha->vp_idx && vha->qpair) {
3366 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3368 ql_log(ql_log_warn, vha, 0x1093,
3369 "Failed to allocate task management IOCB.\n");
3370 return QLA_MEMORY_ALLOC_FAILED;
3373 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3374 tsk->p.tsk.entry_count = 1;
3375 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3376 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3377 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3378 tsk->p.tsk.control_flags = cpu_to_le32(type);
3379 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3380 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3381 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3382 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3383 if (type == TCF_LUN_RESET) {
3384 int_to_scsilun(l, &tsk->p.tsk.lun);
3385 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3386 sizeof(tsk->p.tsk.lun));
3390 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3391 if (rval != QLA_SUCCESS) {
3392 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3393 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3394 } else if (sts->entry_status != 0) {
3395 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3396 "Failed to complete IOCB -- error status (%x).\n",
3398 rval = QLA_FUNCTION_FAILED;
3399 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3400 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3401 "Failed to complete IOCB -- completion status (%x).\n",
3402 le16_to_cpu(sts->comp_status));
3403 rval = QLA_FUNCTION_FAILED;
3404 } else if (le16_to_cpu(sts->scsi_status) &
3405 SS_RESPONSE_INFO_LEN_VALID) {
3406 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3408 "Ignoring inconsistent data length -- not enough "
3409 "response info (%d).\n",
3410 le32_to_cpu(sts->rsp_data_len));
3411 } else if (sts->data[3]) {
3412 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3413 "Failed to complete IOCB -- response (%x).\n",
3415 rval = QLA_FUNCTION_FAILED;
3419 /* Issue marker IOCB. */
3420 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3421 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3422 if (rval2 != QLA_SUCCESS) {
3423 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3424 "Failed to issue marker IOCB (%x).\n", rval2);
3426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3427 "Done %s.\n", __func__);
3430 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3436 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3438 struct qla_hw_data *ha = fcport->vha->hw;
3440 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3441 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3443 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3447 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3449 struct qla_hw_data *ha = fcport->vha->hw;
3451 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3452 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3454 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3458 qla2x00_system_error(scsi_qla_host_t *vha)
3462 mbx_cmd_t *mcp = &mc;
3463 struct qla_hw_data *ha = vha->hw;
3465 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3466 return QLA_FUNCTION_FAILED;
3468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3469 "Entered %s.\n", __func__);
3471 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3472 mcp->out_mb = MBX_0;
3476 rval = qla2x00_mailbox_command(vha, mcp);
3478 if (rval != QLA_SUCCESS) {
3479 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3482 "Done %s.\n", __func__);
3489 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3493 mbx_cmd_t *mcp = &mc;
3495 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3496 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3497 return QLA_FUNCTION_FAILED;
3499 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3500 "Entered %s.\n", __func__);
3502 mcp->mb[0] = MBC_WRITE_SERDES;
3504 if (IS_QLA2031(vha->hw))
3505 mcp->mb[2] = data & 0xff;
3510 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3512 mcp->tov = MBX_TOV_SECONDS;
3514 rval = qla2x00_mailbox_command(vha, mcp);
3516 if (rval != QLA_SUCCESS) {
3517 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3518 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3520 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3521 "Done %s.\n", __func__);
3528 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3532 mbx_cmd_t *mcp = &mc;
3534 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3535 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3536 return QLA_FUNCTION_FAILED;
3538 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3539 "Entered %s.\n", __func__);
3541 mcp->mb[0] = MBC_READ_SERDES;
3544 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3545 mcp->in_mb = MBX_1|MBX_0;
3546 mcp->tov = MBX_TOV_SECONDS;
3548 rval = qla2x00_mailbox_command(vha, mcp);
3550 if (IS_QLA2031(vha->hw))
3551 *data = mcp->mb[1] & 0xff;
3555 if (rval != QLA_SUCCESS) {
3556 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3557 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3560 "Done %s.\n", __func__);
3567 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3571 mbx_cmd_t *mcp = &mc;
3573 if (!IS_QLA8044(vha->hw))
3574 return QLA_FUNCTION_FAILED;
3576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3577 "Entered %s.\n", __func__);
3579 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3580 mcp->mb[1] = HCS_WRITE_SERDES;
3581 mcp->mb[3] = LSW(addr);
3582 mcp->mb[4] = MSW(addr);
3583 mcp->mb[5] = LSW(data);
3584 mcp->mb[6] = MSW(data);
3585 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3587 mcp->tov = MBX_TOV_SECONDS;
3589 rval = qla2x00_mailbox_command(vha, mcp);
3591 if (rval != QLA_SUCCESS) {
3592 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3593 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3596 "Done %s.\n", __func__);
3603 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3607 mbx_cmd_t *mcp = &mc;
3609 if (!IS_QLA8044(vha->hw))
3610 return QLA_FUNCTION_FAILED;
3612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3613 "Entered %s.\n", __func__);
3615 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3616 mcp->mb[1] = HCS_READ_SERDES;
3617 mcp->mb[3] = LSW(addr);
3618 mcp->mb[4] = MSW(addr);
3619 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3620 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3621 mcp->tov = MBX_TOV_SECONDS;
3623 rval = qla2x00_mailbox_command(vha, mcp);
3625 *data = mcp->mb[2] << 16 | mcp->mb[1];
3627 if (rval != QLA_SUCCESS) {
3628 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3629 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3631 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3632 "Done %s.\n", __func__);
3639 * qla2x00_set_serdes_params() -
3641 * @sw_em_1g: serial link options
3642 * @sw_em_2g: serial link options
3643 * @sw_em_4g: serial link options
3648 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3649 uint16_t sw_em_2g, uint16_t sw_em_4g)
3653 mbx_cmd_t *mcp = &mc;
3655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3656 "Entered %s.\n", __func__);
3658 mcp->mb[0] = MBC_SERDES_PARAMS;
3660 mcp->mb[2] = sw_em_1g | BIT_15;
3661 mcp->mb[3] = sw_em_2g | BIT_15;
3662 mcp->mb[4] = sw_em_4g | BIT_15;
3663 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3665 mcp->tov = MBX_TOV_SECONDS;
3667 rval = qla2x00_mailbox_command(vha, mcp);
3669 if (rval != QLA_SUCCESS) {
3671 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3672 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3675 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3676 "Done %s.\n", __func__);
3683 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3687 mbx_cmd_t *mcp = &mc;
3689 if (!IS_FWI2_CAPABLE(vha->hw))
3690 return QLA_FUNCTION_FAILED;
3692 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3693 "Entered %s.\n", __func__);
3695 mcp->mb[0] = MBC_STOP_FIRMWARE;
3697 mcp->out_mb = MBX_1|MBX_0;
3701 rval = qla2x00_mailbox_command(vha, mcp);
3703 if (rval != QLA_SUCCESS) {
3704 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3705 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3706 rval = QLA_INVALID_COMMAND;
3708 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3709 "Done %s.\n", __func__);
3716 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3721 mbx_cmd_t *mcp = &mc;
3723 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3724 "Entered %s.\n", __func__);
3726 if (!IS_FWI2_CAPABLE(vha->hw))
3727 return QLA_FUNCTION_FAILED;
3729 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3730 return QLA_FUNCTION_FAILED;
3732 mcp->mb[0] = MBC_TRACE_CONTROL;
3733 mcp->mb[1] = TC_EFT_ENABLE;
3734 mcp->mb[2] = LSW(eft_dma);
3735 mcp->mb[3] = MSW(eft_dma);
3736 mcp->mb[4] = LSW(MSD(eft_dma));
3737 mcp->mb[5] = MSW(MSD(eft_dma));
3738 mcp->mb[6] = buffers;
3739 mcp->mb[7] = TC_AEN_DISABLE;
3740 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3741 mcp->in_mb = MBX_1|MBX_0;
3742 mcp->tov = MBX_TOV_SECONDS;
3744 rval = qla2x00_mailbox_command(vha, mcp);
3745 if (rval != QLA_SUCCESS) {
3746 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3747 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3748 rval, mcp->mb[0], mcp->mb[1]);
3750 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3751 "Done %s.\n", __func__);
3758 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3762 mbx_cmd_t *mcp = &mc;
3764 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3765 "Entered %s.\n", __func__);
3767 if (!IS_FWI2_CAPABLE(vha->hw))
3768 return QLA_FUNCTION_FAILED;
3770 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3771 return QLA_FUNCTION_FAILED;
3773 mcp->mb[0] = MBC_TRACE_CONTROL;
3774 mcp->mb[1] = TC_EFT_DISABLE;
3775 mcp->out_mb = MBX_1|MBX_0;
3776 mcp->in_mb = MBX_1|MBX_0;
3777 mcp->tov = MBX_TOV_SECONDS;
3779 rval = qla2x00_mailbox_command(vha, mcp);
3780 if (rval != QLA_SUCCESS) {
3781 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3782 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3783 rval, mcp->mb[0], mcp->mb[1]);
3785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3786 "Done %s.\n", __func__);
3793 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3794 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3798 mbx_cmd_t *mcp = &mc;
3800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3801 "Entered %s.\n", __func__);
3803 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3804 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3805 !IS_QLA28XX(vha->hw))
3806 return QLA_FUNCTION_FAILED;
3808 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3809 return QLA_FUNCTION_FAILED;
3811 mcp->mb[0] = MBC_TRACE_CONTROL;
3812 mcp->mb[1] = TC_FCE_ENABLE;
3813 mcp->mb[2] = LSW(fce_dma);
3814 mcp->mb[3] = MSW(fce_dma);
3815 mcp->mb[4] = LSW(MSD(fce_dma));
3816 mcp->mb[5] = MSW(MSD(fce_dma));
3817 mcp->mb[6] = buffers;
3818 mcp->mb[7] = TC_AEN_DISABLE;
3820 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3821 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3822 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3824 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3825 mcp->tov = MBX_TOV_SECONDS;
3827 rval = qla2x00_mailbox_command(vha, mcp);
3828 if (rval != QLA_SUCCESS) {
3829 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3830 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3831 rval, mcp->mb[0], mcp->mb[1]);
3833 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3834 "Done %s.\n", __func__);
3837 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3846 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3850 mbx_cmd_t *mcp = &mc;
3852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3853 "Entered %s.\n", __func__);
3855 if (!IS_FWI2_CAPABLE(vha->hw))
3856 return QLA_FUNCTION_FAILED;
3858 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3859 return QLA_FUNCTION_FAILED;
3861 mcp->mb[0] = MBC_TRACE_CONTROL;
3862 mcp->mb[1] = TC_FCE_DISABLE;
3863 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3864 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3865 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3867 mcp->tov = MBX_TOV_SECONDS;
3869 rval = qla2x00_mailbox_command(vha, mcp);
3870 if (rval != QLA_SUCCESS) {
3871 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3872 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3873 rval, mcp->mb[0], mcp->mb[1]);
3875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3876 "Done %s.\n", __func__);
3879 *wr = (uint64_t) mcp->mb[5] << 48 |
3880 (uint64_t) mcp->mb[4] << 32 |
3881 (uint64_t) mcp->mb[3] << 16 |
3882 (uint64_t) mcp->mb[2];
3884 *rd = (uint64_t) mcp->mb[9] << 48 |
3885 (uint64_t) mcp->mb[8] << 32 |
3886 (uint64_t) mcp->mb[7] << 16 |
3887 (uint64_t) mcp->mb[6];
3894 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3895 uint16_t *port_speed, uint16_t *mb)
3899 mbx_cmd_t *mcp = &mc;
3901 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3902 "Entered %s.\n", __func__);
3904 if (!IS_IIDMA_CAPABLE(vha->hw))
3905 return QLA_FUNCTION_FAILED;
3907 mcp->mb[0] = MBC_PORT_PARAMS;
3908 mcp->mb[1] = loop_id;
3909 mcp->mb[2] = mcp->mb[3] = 0;
3910 mcp->mb[9] = vha->vp_idx;
3911 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3912 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3913 mcp->tov = MBX_TOV_SECONDS;
3915 rval = qla2x00_mailbox_command(vha, mcp);
3917 /* Return mailbox statuses. */
3924 if (rval != QLA_SUCCESS) {
3925 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3927 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3928 "Done %s.\n", __func__);
3930 *port_speed = mcp->mb[3];
3937 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3938 uint16_t port_speed, uint16_t *mb)
3942 mbx_cmd_t *mcp = &mc;
3944 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3945 "Entered %s.\n", __func__);
3947 if (!IS_IIDMA_CAPABLE(vha->hw))
3948 return QLA_FUNCTION_FAILED;
3950 mcp->mb[0] = MBC_PORT_PARAMS;
3951 mcp->mb[1] = loop_id;
3953 mcp->mb[3] = port_speed & 0x3F;
3954 mcp->mb[9] = vha->vp_idx;
3955 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3956 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3957 mcp->tov = MBX_TOV_SECONDS;
3959 rval = qla2x00_mailbox_command(vha, mcp);
3961 /* Return mailbox statuses. */
3968 if (rval != QLA_SUCCESS) {
3969 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3970 "Failed=%x.\n", rval);
3972 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3973 "Done %s.\n", __func__);
3980 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3981 struct vp_rpt_id_entry_24xx *rptid_entry)
3983 struct qla_hw_data *ha = vha->hw;
3984 scsi_qla_host_t *vp = NULL;
3985 unsigned long flags;
3988 struct fc_port *fcport;
3990 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3991 "Entered %s.\n", __func__);
3993 if (rptid_entry->entry_status != 0)
3996 id.b.domain = rptid_entry->port_id[2];
3997 id.b.area = rptid_entry->port_id[1];
3998 id.b.al_pa = rptid_entry->port_id[0];
4000 ha->flags.n2n_ae = 0;
4002 if (rptid_entry->format == 0) {
4004 ql_dbg(ql_dbg_async, vha, 0x10b7,
4005 "Format 0 : Number of VPs setup %d, number of "
4006 "VPs acquired %d.\n", rptid_entry->vp_setup,
4007 rptid_entry->vp_acquired);
4008 ql_dbg(ql_dbg_async, vha, 0x10b8,
4009 "Primary port id %02x%02x%02x.\n",
4010 rptid_entry->port_id[2], rptid_entry->port_id[1],
4011 rptid_entry->port_id[0]);
4012 ha->current_topology = ISP_CFG_NL;
4013 qla_update_host_map(vha, id);
4015 } else if (rptid_entry->format == 1) {
4017 ql_dbg(ql_dbg_async, vha, 0x10b9,
4018 "Format 1: VP[%d] enabled - status %d - with "
4019 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
4020 rptid_entry->vp_status,
4021 rptid_entry->port_id[2], rptid_entry->port_id[1],
4022 rptid_entry->port_id[0]);
4023 ql_dbg(ql_dbg_async, vha, 0x5075,
4024 "Format 1: Remote WWPN %8phC.\n",
4025 rptid_entry->u.f1.port_name);
4027 ql_dbg(ql_dbg_async, vha, 0x5075,
4028 "Format 1: WWPN %8phC.\n",
4031 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
4033 ha->current_topology = ISP_CFG_N;
4034 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4035 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4036 fcport->scan_state = QLA_FCPORT_SCAN;
4037 fcport->n2n_flag = 0;
4040 if (wwn_to_u64(vha->port_name) >
4041 wwn_to_u64(rptid_entry->u.f1.port_name)) {
4043 vha->d_id.b.al_pa = 1;
4044 ha->flags.n2n_bigger = 1;
4047 ql_dbg(ql_dbg_async, vha, 0x5075,
4048 "Format 1: assign local id %x remote id %x\n",
4049 vha->d_id.b24, id.b24);
4051 ql_dbg(ql_dbg_async, vha, 0x5075,
4052 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
4053 rptid_entry->u.f1.port_name);
4054 ha->flags.n2n_bigger = 0;
4057 fcport = qla2x00_find_fcport_by_wwpn(vha,
4058 rptid_entry->u.f1.port_name, 1);
4059 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4063 fcport->plogi_nack_done_deadline = jiffies + HZ;
4064 fcport->dm_login_expire = jiffies +
4065 QLA_N2N_WAIT_TIME * HZ;
4066 fcport->scan_state = QLA_FCPORT_FOUND;
4067 fcport->n2n_flag = 1;
4068 fcport->keep_nport_handle = 1;
4069 fcport->login_retry = vha->hw->login_retry_count;
4070 fcport->fc4_type = FS_FC4TYPE_FCP;
4071 if (vha->flags.nvme_enabled)
4072 fcport->fc4_type |= FS_FC4TYPE_NVME;
4074 if (wwn_to_u64(vha->port_name) >
4075 wwn_to_u64(fcport->port_name)) {
4079 switch (fcport->disc_state) {
4081 set_bit(RELOGIN_NEEDED,
4084 case DSC_DELETE_PEND:
4087 qlt_schedule_sess_for_deletion(fcport);
4091 qla24xx_post_newsess_work(vha, &id,
4092 rptid_entry->u.f1.port_name,
4093 rptid_entry->u.f1.node_name,
4098 /* if our portname is higher then initiate N2N login */
4100 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4103 ha->current_topology = ISP_CFG_FL;
4106 ha->current_topology = ISP_CFG_F;
4112 ha->flags.gpsc_supported = 1;
4113 ha->current_topology = ISP_CFG_F;
4114 /* buffer to buffer credit flag */
4115 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4117 if (rptid_entry->vp_idx == 0) {
4118 if (rptid_entry->vp_status == VP_STAT_COMPL) {
4119 /* FA-WWN is only for physical port */
4120 if (qla_ini_mode_enabled(vha) &&
4121 ha->flags.fawwpn_enabled &&
4122 (rptid_entry->u.f1.flags &
4124 memcpy(vha->port_name,
4125 rptid_entry->u.f1.port_name,
4129 qla_update_host_map(vha, id);
4132 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4133 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4135 if (rptid_entry->vp_status != VP_STAT_COMPL &&
4136 rptid_entry->vp_status != VP_STAT_ID_CHG) {
4137 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4138 "Could not acquire ID for VP[%d].\n",
4139 rptid_entry->vp_idx);
4144 spin_lock_irqsave(&ha->vport_slock, flags);
4145 list_for_each_entry(vp, &ha->vp_list, list) {
4146 if (rptid_entry->vp_idx == vp->vp_idx) {
4151 spin_unlock_irqrestore(&ha->vport_slock, flags);
4156 qla_update_host_map(vp, id);
4159 * Cannot configure here as we are still sitting on the
4160 * response queue. Handle it in dpc context.
4162 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4163 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4164 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4166 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4167 qla2xxx_wake_dpc(vha);
4168 } else if (rptid_entry->format == 2) {
4169 ql_dbg(ql_dbg_async, vha, 0x505f,
4170 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4171 rptid_entry->port_id[2], rptid_entry->port_id[1],
4172 rptid_entry->port_id[0]);
4174 ql_dbg(ql_dbg_async, vha, 0x5075,
4175 "N2N: Remote WWPN %8phC.\n",
4176 rptid_entry->u.f2.port_name);
4178 /* N2N. direct connect */
4179 ha->current_topology = ISP_CFG_N;
4180 ha->flags.rida_fmt2 = 1;
4181 vha->d_id.b.domain = rptid_entry->port_id[2];
4182 vha->d_id.b.area = rptid_entry->port_id[1];
4183 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4185 ha->flags.n2n_ae = 1;
4186 spin_lock_irqsave(&ha->vport_slock, flags);
4187 qla_update_vp_map(vha, SET_AL_PA);
4188 spin_unlock_irqrestore(&ha->vport_slock, flags);
4190 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4191 fcport->scan_state = QLA_FCPORT_SCAN;
4192 fcport->n2n_flag = 0;
4195 fcport = qla2x00_find_fcport_by_wwpn(vha,
4196 rptid_entry->u.f2.port_name, 1);
4199 fcport->login_retry = vha->hw->login_retry_count;
4200 fcport->plogi_nack_done_deadline = jiffies + HZ;
4201 fcport->scan_state = QLA_FCPORT_FOUND;
4202 fcport->keep_nport_handle = 1;
4203 fcport->n2n_flag = 1;
4204 fcport->d_id.b.domain =
4205 rptid_entry->u.f2.remote_nport_id[2];
4206 fcport->d_id.b.area =
4207 rptid_entry->u.f2.remote_nport_id[1];
4208 fcport->d_id.b.al_pa =
4209 rptid_entry->u.f2.remote_nport_id[0];
4212 * For the case where remote port sending PRLO, FW
4213 * sends up RIDA Format 2 as an indication of session
4214 * loss. In other word, FW state change from PRLI
4215 * complete back to PLOGI complete. Delete the
4216 * session and let relogin drive the reconnect.
4218 if (atomic_read(&fcport->state) == FCS_ONLINE)
4219 qlt_schedule_sess_for_deletion(fcport);
4225 * qla24xx_modify_vp_config
4226 * Change VP configuration for vha
4229 * vha = adapter block pointer.
4232 * qla2xxx local function return status code.
4238 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4241 struct vp_config_entry_24xx *vpmod;
4242 dma_addr_t vpmod_dma;
4243 struct qla_hw_data *ha = vha->hw;
4244 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4246 /* This can be called by the parent */
4248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4249 "Entered %s.\n", __func__);
4251 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4253 ql_log(ql_log_warn, vha, 0x10bc,
4254 "Failed to allocate modify VP IOCB.\n");
4255 return QLA_MEMORY_ALLOC_FAILED;
4258 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4259 vpmod->entry_count = 1;
4260 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4261 vpmod->vp_count = 1;
4262 vpmod->vp_index1 = vha->vp_idx;
4263 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4265 qlt_modify_vp_config(vha, vpmod);
4267 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4268 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4269 vpmod->entry_count = 1;
4271 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4272 if (rval != QLA_SUCCESS) {
4273 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4274 "Failed to issue VP config IOCB (%x).\n", rval);
4275 } else if (vpmod->comp_status != 0) {
4276 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4277 "Failed to complete IOCB -- error status (%x).\n",
4278 vpmod->comp_status);
4279 rval = QLA_FUNCTION_FAILED;
4280 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4281 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4282 "Failed to complete IOCB -- completion status (%x).\n",
4283 le16_to_cpu(vpmod->comp_status));
4284 rval = QLA_FUNCTION_FAILED;
4287 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4288 "Done %s.\n", __func__);
4289 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4291 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4297 * qla2x00_send_change_request
4298 * Receive or disable RSCN request from fabric controller
4301 * ha = adapter block pointer
4302 * format = registration format:
4304 * 1 - Fabric detected registration
4305 * 2 - N_port detected registration
4306 * 3 - Full registration
4307 * FF - clear registration
4308 * vp_idx = Virtual port index
4311 * qla2x00 local function return status code.
4318 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4323 mbx_cmd_t *mcp = &mc;
4325 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4326 "Entered %s.\n", __func__);
4328 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4329 mcp->mb[1] = format;
4330 mcp->mb[9] = vp_idx;
4331 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4332 mcp->in_mb = MBX_0|MBX_1;
4333 mcp->tov = MBX_TOV_SECONDS;
4335 rval = qla2x00_mailbox_command(vha, mcp);
4337 if (rval == QLA_SUCCESS) {
4338 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4348 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4353 mbx_cmd_t *mcp = &mc;
4355 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4356 "Entered %s.\n", __func__);
4358 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4359 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4360 mcp->mb[8] = MSW(addr);
4362 mcp->out_mb = MBX_10|MBX_8|MBX_0;
4364 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4365 mcp->out_mb = MBX_0;
4367 mcp->mb[1] = LSW(addr);
4368 mcp->mb[2] = MSW(req_dma);
4369 mcp->mb[3] = LSW(req_dma);
4370 mcp->mb[6] = MSW(MSD(req_dma));
4371 mcp->mb[7] = LSW(MSD(req_dma));
4372 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4373 if (IS_FWI2_CAPABLE(vha->hw)) {
4374 mcp->mb[4] = MSW(size);
4375 mcp->mb[5] = LSW(size);
4376 mcp->out_mb |= MBX_5|MBX_4;
4378 mcp->mb[4] = LSW(size);
4379 mcp->out_mb |= MBX_4;
4383 mcp->tov = MBX_TOV_SECONDS;
4385 rval = qla2x00_mailbox_command(vha, mcp);
4387 if (rval != QLA_SUCCESS) {
4388 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4389 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4392 "Done %s.\n", __func__);
4397 /* 84XX Support **************************************************************/
4399 struct cs84xx_mgmt_cmd {
4401 struct verify_chip_entry_84xx req;
4402 struct verify_chip_rsp_84xx rsp;
4407 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4410 struct cs84xx_mgmt_cmd *mn;
4413 unsigned long flags;
4414 struct qla_hw_data *ha = vha->hw;
4416 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4417 "Entered %s.\n", __func__);
4419 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4421 return QLA_MEMORY_ALLOC_FAILED;
4425 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4426 /* Diagnostic firmware? */
4427 /* options |= MENLO_DIAG_FW; */
4428 /* We update the firmware with only one data sequence. */
4429 options |= VCO_END_OF_DATA;
4433 memset(mn, 0, sizeof(*mn));
4434 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4435 mn->p.req.entry_count = 1;
4436 mn->p.req.options = cpu_to_le16(options);
4438 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4439 "Dump of Verify Request.\n");
4440 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4443 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4444 if (rval != QLA_SUCCESS) {
4445 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4446 "Failed to issue verify IOCB (%x).\n", rval);
4450 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4451 "Dump of Verify Response.\n");
4452 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4455 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4456 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4457 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4458 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4459 "cs=%x fc=%x.\n", status[0], status[1]);
4461 if (status[0] != CS_COMPLETE) {
4462 rval = QLA_FUNCTION_FAILED;
4463 if (!(options & VCO_DONT_UPDATE_FW)) {
4464 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4465 "Firmware update failed. Retrying "
4466 "without update firmware.\n");
4467 options |= VCO_DONT_UPDATE_FW;
4468 options &= ~VCO_FORCE_UPDATE;
4472 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4473 "Firmware updated to %x.\n",
4474 le32_to_cpu(mn->p.rsp.fw_ver));
4476 /* NOTE: we only update OP firmware. */
4477 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4478 ha->cs84xx->op_fw_version =
4479 le32_to_cpu(mn->p.rsp.fw_ver);
4480 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4486 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4488 if (rval != QLA_SUCCESS) {
4489 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4490 "Failed=%x.\n", rval);
4492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4493 "Done %s.\n", __func__);
4500 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4503 unsigned long flags;
4505 mbx_cmd_t *mcp = &mc;
4506 struct qla_hw_data *ha = vha->hw;
4508 if (!ha->flags.fw_started)
4511 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4512 "Entered %s.\n", __func__);
4514 if (IS_SHADOW_REG_CAPABLE(ha))
4515 req->options |= BIT_13;
4517 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4518 mcp->mb[1] = req->options;
4519 mcp->mb[2] = MSW(LSD(req->dma));
4520 mcp->mb[3] = LSW(LSD(req->dma));
4521 mcp->mb[6] = MSW(MSD(req->dma));
4522 mcp->mb[7] = LSW(MSD(req->dma));
4523 mcp->mb[5] = req->length;
4525 mcp->mb[10] = req->rsp->id;
4526 mcp->mb[12] = req->qos;
4527 mcp->mb[11] = req->vp_idx;
4528 mcp->mb[13] = req->rid;
4529 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4532 mcp->mb[4] = req->id;
4533 /* que in ptr index */
4535 /* que out ptr index */
4536 mcp->mb[9] = *req->out_ptr = 0;
4537 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4538 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4540 mcp->flags = MBX_DMA_OUT;
4541 mcp->tov = MBX_TOV_SECONDS * 2;
4543 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4545 mcp->in_mb |= MBX_1;
4546 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4547 mcp->out_mb |= MBX_15;
4548 /* debug q create issue in SR-IOV */
4549 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4552 spin_lock_irqsave(&ha->hardware_lock, flags);
4553 if (!(req->options & BIT_0)) {
4554 wrt_reg_dword(req->req_q_in, 0);
4555 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4556 wrt_reg_dword(req->req_q_out, 0);
4558 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4560 rval = qla2x00_mailbox_command(vha, mcp);
4561 if (rval != QLA_SUCCESS) {
4562 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4563 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4565 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4566 "Done %s.\n", __func__);
4573 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4576 unsigned long flags;
4578 mbx_cmd_t *mcp = &mc;
4579 struct qla_hw_data *ha = vha->hw;
4581 if (!ha->flags.fw_started)
4584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4585 "Entered %s.\n", __func__);
4587 if (IS_SHADOW_REG_CAPABLE(ha))
4588 rsp->options |= BIT_13;
4590 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4591 mcp->mb[1] = rsp->options;
4592 mcp->mb[2] = MSW(LSD(rsp->dma));
4593 mcp->mb[3] = LSW(LSD(rsp->dma));
4594 mcp->mb[6] = MSW(MSD(rsp->dma));
4595 mcp->mb[7] = LSW(MSD(rsp->dma));
4596 mcp->mb[5] = rsp->length;
4597 mcp->mb[14] = rsp->msix->entry;
4598 mcp->mb[13] = rsp->rid;
4599 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4602 mcp->mb[4] = rsp->id;
4603 /* que in ptr index */
4604 mcp->mb[8] = *rsp->in_ptr = 0;
4605 /* que out ptr index */
4607 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4608 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4610 mcp->flags = MBX_DMA_OUT;
4611 mcp->tov = MBX_TOV_SECONDS * 2;
4613 if (IS_QLA81XX(ha)) {
4614 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4615 mcp->in_mb |= MBX_1;
4616 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4617 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4618 mcp->in_mb |= MBX_1;
4619 /* debug q create issue in SR-IOV */
4620 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4623 spin_lock_irqsave(&ha->hardware_lock, flags);
4624 if (!(rsp->options & BIT_0)) {
4625 wrt_reg_dword(rsp->rsp_q_out, 0);
4626 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4627 wrt_reg_dword(rsp->rsp_q_in, 0);
4630 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4632 rval = qla2x00_mailbox_command(vha, mcp);
4633 if (rval != QLA_SUCCESS) {
4634 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4635 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4638 "Done %s.\n", __func__);
4645 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4649 mbx_cmd_t *mcp = &mc;
4651 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4652 "Entered %s.\n", __func__);
4654 mcp->mb[0] = MBC_IDC_ACK;
4655 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4656 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4658 mcp->tov = MBX_TOV_SECONDS;
4660 rval = qla2x00_mailbox_command(vha, mcp);
4662 if (rval != QLA_SUCCESS) {
4663 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4664 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4667 "Done %s.\n", __func__);
4674 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4678 mbx_cmd_t *mcp = &mc;
4680 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4681 "Entered %s.\n", __func__);
4683 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4684 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4685 return QLA_FUNCTION_FAILED;
4687 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4688 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4689 mcp->out_mb = MBX_1|MBX_0;
4690 mcp->in_mb = MBX_1|MBX_0;
4691 mcp->tov = MBX_TOV_SECONDS;
4693 rval = qla2x00_mailbox_command(vha, mcp);
4695 if (rval != QLA_SUCCESS) {
4696 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4697 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4698 rval, mcp->mb[0], mcp->mb[1]);
4700 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4701 "Done %s.\n", __func__);
4702 *sector_size = mcp->mb[1];
4709 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4713 mbx_cmd_t *mcp = &mc;
4715 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4716 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4717 return QLA_FUNCTION_FAILED;
4719 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4720 "Entered %s.\n", __func__);
4722 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4723 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4724 FAC_OPT_CMD_WRITE_PROTECT;
4725 mcp->out_mb = MBX_1|MBX_0;
4726 mcp->in_mb = MBX_1|MBX_0;
4727 mcp->tov = MBX_TOV_SECONDS;
4729 rval = qla2x00_mailbox_command(vha, mcp);
4731 if (rval != QLA_SUCCESS) {
4732 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4733 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4734 rval, mcp->mb[0], mcp->mb[1]);
4736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4737 "Done %s.\n", __func__);
4744 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4748 mbx_cmd_t *mcp = &mc;
4750 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4751 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4752 return QLA_FUNCTION_FAILED;
4754 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4755 "Entered %s.\n", __func__);
4757 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4758 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4759 mcp->mb[2] = LSW(start);
4760 mcp->mb[3] = MSW(start);
4761 mcp->mb[4] = LSW(finish);
4762 mcp->mb[5] = MSW(finish);
4763 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4764 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4765 mcp->tov = MBX_TOV_SECONDS;
4767 rval = qla2x00_mailbox_command(vha, mcp);
4769 if (rval != QLA_SUCCESS) {
4770 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4771 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4772 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4774 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4775 "Done %s.\n", __func__);
4782 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4784 int rval = QLA_SUCCESS;
4786 mbx_cmd_t *mcp = &mc;
4787 struct qla_hw_data *ha = vha->hw;
4789 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4790 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4793 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4794 "Entered %s.\n", __func__);
4796 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4797 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4798 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4799 mcp->out_mb = MBX_1|MBX_0;
4800 mcp->in_mb = MBX_1|MBX_0;
4801 mcp->tov = MBX_TOV_SECONDS;
4803 rval = qla2x00_mailbox_command(vha, mcp);
4805 if (rval != QLA_SUCCESS) {
4806 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4807 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4808 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4810 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4811 "Done %s.\n", __func__);
4818 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4822 mbx_cmd_t *mcp = &mc;
4824 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4825 "Entered %s.\n", __func__);
4827 mcp->mb[0] = MBC_RESTART_MPI_FW;
4828 mcp->out_mb = MBX_0;
4829 mcp->in_mb = MBX_0|MBX_1;
4830 mcp->tov = MBX_TOV_SECONDS;
4832 rval = qla2x00_mailbox_command(vha, mcp);
4834 if (rval != QLA_SUCCESS) {
4835 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4836 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4837 rval, mcp->mb[0], mcp->mb[1]);
4839 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4840 "Done %s.\n", __func__);
4847 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4851 mbx_cmd_t *mcp = &mc;
4855 struct qla_hw_data *ha = vha->hw;
4857 if (!IS_P3P_TYPE(ha))
4858 return QLA_FUNCTION_FAILED;
4860 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4861 "Entered %s.\n", __func__);
4863 str = (__force __le16 *)version;
4864 len = strlen(version);
4866 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4867 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4868 mcp->out_mb = MBX_1|MBX_0;
4869 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4870 mcp->mb[i] = le16_to_cpup(str);
4871 mcp->out_mb |= 1<<i;
4873 for (; i < 16; i++) {
4875 mcp->out_mb |= 1<<i;
4877 mcp->in_mb = MBX_1|MBX_0;
4878 mcp->tov = MBX_TOV_SECONDS;
4880 rval = qla2x00_mailbox_command(vha, mcp);
4882 if (rval != QLA_SUCCESS) {
4883 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4884 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4887 "Done %s.\n", __func__);
4894 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4898 mbx_cmd_t *mcp = &mc;
4903 struct qla_hw_data *ha = vha->hw;
4905 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4907 return QLA_FUNCTION_FAILED;
4909 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4910 "Entered %s.\n", __func__);
4912 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4914 ql_log(ql_log_warn, vha, 0x117f,
4915 "Failed to allocate driver version param.\n");
4916 return QLA_MEMORY_ALLOC_FAILED;
4919 memcpy(str, "\x7\x3\x11\x0", 4);
4921 len = dwlen * 4 - 4;
4922 memset(str + 4, 0, len);
4923 if (len > strlen(version))
4924 len = strlen(version);
4925 memcpy(str + 4, version, len);
4927 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4928 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4929 mcp->mb[2] = MSW(LSD(str_dma));
4930 mcp->mb[3] = LSW(LSD(str_dma));
4931 mcp->mb[6] = MSW(MSD(str_dma));
4932 mcp->mb[7] = LSW(MSD(str_dma));
4933 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4934 mcp->in_mb = MBX_1|MBX_0;
4935 mcp->tov = MBX_TOV_SECONDS;
4937 rval = qla2x00_mailbox_command(vha, mcp);
4939 if (rval != QLA_SUCCESS) {
4940 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4941 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4943 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4944 "Done %s.\n", __func__);
4947 dma_pool_free(ha->s_dma_pool, str, str_dma);
4953 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4954 void *buf, uint16_t bufsiz)
4958 mbx_cmd_t *mcp = &mc;
4961 if (!IS_FWI2_CAPABLE(vha->hw))
4962 return QLA_FUNCTION_FAILED;
4964 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4965 "Entered %s.\n", __func__);
4967 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4968 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4969 mcp->mb[2] = MSW(buf_dma);
4970 mcp->mb[3] = LSW(buf_dma);
4971 mcp->mb[6] = MSW(MSD(buf_dma));
4972 mcp->mb[7] = LSW(MSD(buf_dma));
4973 mcp->mb[8] = bufsiz/4;
4974 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4975 mcp->in_mb = MBX_1|MBX_0;
4976 mcp->tov = MBX_TOV_SECONDS;
4978 rval = qla2x00_mailbox_command(vha, mcp);
4980 if (rval != QLA_SUCCESS) {
4981 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4982 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4984 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4985 "Done %s.\n", __func__);
4986 bp = (uint32_t *) buf;
4987 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4988 *bp = le32_to_cpu((__force __le32)*bp);
4994 #define PUREX_CMD_COUNT 4
4996 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
5000 mbx_cmd_t *mcp = &mc;
5001 uint8_t *els_cmd_map;
5002 uint8_t active_cnt = 0;
5003 dma_addr_t els_cmd_map_dma;
5004 uint8_t cmd_opcode[PUREX_CMD_COUNT];
5005 uint8_t i, index, purex_bit;
5006 struct qla_hw_data *ha = vha->hw;
5008 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
5009 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5012 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
5013 "Entered %s.\n", __func__);
5015 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5016 &els_cmd_map_dma, GFP_KERNEL);
5018 ql_log(ql_log_warn, vha, 0x7101,
5019 "Failed to allocate RDP els command param.\n");
5020 return QLA_MEMORY_ALLOC_FAILED;
5023 /* List of Purex ELS */
5024 if (ql2xrdpenable) {
5025 cmd_opcode[active_cnt] = ELS_RDP;
5028 if (ha->flags.scm_supported_f) {
5029 cmd_opcode[active_cnt] = ELS_FPIN;
5032 if (ha->flags.edif_enabled) {
5033 cmd_opcode[active_cnt] = ELS_AUTH_ELS;
5037 for (i = 0; i < active_cnt; i++) {
5038 index = cmd_opcode[i] / 8;
5039 purex_bit = cmd_opcode[i] % 8;
5040 els_cmd_map[index] |= 1 << purex_bit;
5043 mcp->mb[0] = MBC_SET_RNID_PARAMS;
5044 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
5045 mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
5046 mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
5047 mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
5048 mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
5049 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5050 mcp->in_mb = MBX_1|MBX_0;
5051 mcp->tov = MBX_TOV_SECONDS;
5052 mcp->flags = MBX_DMA_OUT;
5053 mcp->buf_size = ELS_CMD_MAP_SIZE;
5054 rval = qla2x00_mailbox_command(vha, mcp);
5056 if (rval != QLA_SUCCESS) {
5057 ql_dbg(ql_dbg_mbx, vha, 0x118d,
5058 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
5060 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
5061 "Done %s.\n", __func__);
5064 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5065 els_cmd_map, els_cmd_map_dma);
5071 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
5075 mbx_cmd_t *mcp = &mc;
5077 if (!IS_FWI2_CAPABLE(vha->hw))
5078 return QLA_FUNCTION_FAILED;
5080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5081 "Entered %s.\n", __func__);
5083 mcp->mb[0] = MBC_GET_RNID_PARAMS;
5084 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
5085 mcp->out_mb = MBX_1|MBX_0;
5086 mcp->in_mb = MBX_1|MBX_0;
5087 mcp->tov = MBX_TOV_SECONDS;
5089 rval = qla2x00_mailbox_command(vha, mcp);
5092 if (rval != QLA_SUCCESS) {
5093 ql_dbg(ql_dbg_mbx, vha, 0x115a,
5094 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5096 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5097 "Done %s.\n", __func__);
5104 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5105 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5109 mbx_cmd_t *mcp = &mc;
5110 struct qla_hw_data *ha = vha->hw;
5112 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5113 "Entered %s.\n", __func__);
5115 if (!IS_FWI2_CAPABLE(ha))
5116 return QLA_FUNCTION_FAILED;
5121 mcp->mb[0] = MBC_READ_SFP;
5123 mcp->mb[2] = MSW(LSD(sfp_dma));
5124 mcp->mb[3] = LSW(LSD(sfp_dma));
5125 mcp->mb[6] = MSW(MSD(sfp_dma));
5126 mcp->mb[7] = LSW(MSD(sfp_dma));
5130 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5131 mcp->in_mb = MBX_1|MBX_0;
5132 mcp->tov = MBX_TOV_SECONDS;
5134 rval = qla2x00_mailbox_command(vha, mcp);
5139 if (rval != QLA_SUCCESS) {
5140 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5141 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5142 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5143 /* sfp is not there */
5144 rval = QLA_INTERFACE_ERROR;
5147 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5148 "Done %s.\n", __func__);
5155 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5156 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5160 mbx_cmd_t *mcp = &mc;
5161 struct qla_hw_data *ha = vha->hw;
5163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5164 "Entered %s.\n", __func__);
5166 if (!IS_FWI2_CAPABLE(ha))
5167 return QLA_FUNCTION_FAILED;
5175 mcp->mb[0] = MBC_WRITE_SFP;
5177 mcp->mb[2] = MSW(LSD(sfp_dma));
5178 mcp->mb[3] = LSW(LSD(sfp_dma));
5179 mcp->mb[6] = MSW(MSD(sfp_dma));
5180 mcp->mb[7] = LSW(MSD(sfp_dma));
5184 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5185 mcp->in_mb = MBX_1|MBX_0;
5186 mcp->tov = MBX_TOV_SECONDS;
5188 rval = qla2x00_mailbox_command(vha, mcp);
5190 if (rval != QLA_SUCCESS) {
5191 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5192 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5194 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5195 "Done %s.\n", __func__);
5202 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5203 uint16_t size_in_bytes, uint16_t *actual_size)
5207 mbx_cmd_t *mcp = &mc;
5209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5210 "Entered %s.\n", __func__);
5212 if (!IS_CNA_CAPABLE(vha->hw))
5213 return QLA_FUNCTION_FAILED;
5215 mcp->mb[0] = MBC_GET_XGMAC_STATS;
5216 mcp->mb[2] = MSW(stats_dma);
5217 mcp->mb[3] = LSW(stats_dma);
5218 mcp->mb[6] = MSW(MSD(stats_dma));
5219 mcp->mb[7] = LSW(MSD(stats_dma));
5220 mcp->mb[8] = size_in_bytes >> 2;
5221 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5222 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5223 mcp->tov = MBX_TOV_SECONDS;
5225 rval = qla2x00_mailbox_command(vha, mcp);
5227 if (rval != QLA_SUCCESS) {
5228 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5229 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5230 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5232 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5233 "Done %s.\n", __func__);
5236 *actual_size = mcp->mb[2] << 2;
5243 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5248 mbx_cmd_t *mcp = &mc;
5250 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5251 "Entered %s.\n", __func__);
5253 if (!IS_CNA_CAPABLE(vha->hw))
5254 return QLA_FUNCTION_FAILED;
5256 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5258 mcp->mb[2] = MSW(tlv_dma);
5259 mcp->mb[3] = LSW(tlv_dma);
5260 mcp->mb[6] = MSW(MSD(tlv_dma));
5261 mcp->mb[7] = LSW(MSD(tlv_dma));
5263 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5264 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5265 mcp->tov = MBX_TOV_SECONDS;
5267 rval = qla2x00_mailbox_command(vha, mcp);
5269 if (rval != QLA_SUCCESS) {
5270 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5271 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5272 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5274 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5275 "Done %s.\n", __func__);
5282 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5286 mbx_cmd_t *mcp = &mc;
5288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5289 "Entered %s.\n", __func__);
5291 if (!IS_FWI2_CAPABLE(vha->hw))
5292 return QLA_FUNCTION_FAILED;
5294 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5295 mcp->mb[1] = LSW(risc_addr);
5296 mcp->mb[8] = MSW(risc_addr);
5297 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5298 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5299 mcp->tov = MBX_TOV_SECONDS;
5301 rval = qla2x00_mailbox_command(vha, mcp);
5302 if (rval != QLA_SUCCESS) {
5303 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5304 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5306 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5307 "Done %s.\n", __func__);
5308 *data = mcp->mb[3] << 16 | mcp->mb[2];
5315 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5320 mbx_cmd_t *mcp = &mc;
5322 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5323 "Entered %s.\n", __func__);
5325 memset(mcp->mb, 0 , sizeof(mcp->mb));
5326 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5327 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5329 /* transfer count */
5330 mcp->mb[10] = LSW(mreq->transfer_size);
5331 mcp->mb[11] = MSW(mreq->transfer_size);
5333 /* send data address */
5334 mcp->mb[14] = LSW(mreq->send_dma);
5335 mcp->mb[15] = MSW(mreq->send_dma);
5336 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5337 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5339 /* receive data address */
5340 mcp->mb[16] = LSW(mreq->rcv_dma);
5341 mcp->mb[17] = MSW(mreq->rcv_dma);
5342 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5343 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5345 /* Iteration count */
5346 mcp->mb[18] = LSW(mreq->iteration_count);
5347 mcp->mb[19] = MSW(mreq->iteration_count);
5349 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5350 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5351 if (IS_CNA_CAPABLE(vha->hw))
5352 mcp->out_mb |= MBX_2;
5353 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5355 mcp->buf_size = mreq->transfer_size;
5356 mcp->tov = MBX_TOV_SECONDS;
5357 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5359 rval = qla2x00_mailbox_command(vha, mcp);
5361 if (rval != QLA_SUCCESS) {
5362 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5363 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5364 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5365 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5367 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5368 "Done %s.\n", __func__);
5371 /* Copy mailbox information */
5372 memcpy( mresp, mcp->mb, 64);
5377 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5382 mbx_cmd_t *mcp = &mc;
5383 struct qla_hw_data *ha = vha->hw;
5385 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5386 "Entered %s.\n", __func__);
5388 memset(mcp->mb, 0 , sizeof(mcp->mb));
5389 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5390 /* BIT_6 specifies 64bit address */
5391 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5392 if (IS_CNA_CAPABLE(ha)) {
5393 mcp->mb[2] = vha->fcoe_fcf_idx;
5395 mcp->mb[16] = LSW(mreq->rcv_dma);
5396 mcp->mb[17] = MSW(mreq->rcv_dma);
5397 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5398 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5400 mcp->mb[10] = LSW(mreq->transfer_size);
5402 mcp->mb[14] = LSW(mreq->send_dma);
5403 mcp->mb[15] = MSW(mreq->send_dma);
5404 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5405 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5407 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5408 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5409 if (IS_CNA_CAPABLE(ha))
5410 mcp->out_mb |= MBX_2;
5413 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5414 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5415 mcp->in_mb |= MBX_1;
5416 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5418 mcp->in_mb |= MBX_3;
5420 mcp->tov = MBX_TOV_SECONDS;
5421 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5422 mcp->buf_size = mreq->transfer_size;
5424 rval = qla2x00_mailbox_command(vha, mcp);
5426 if (rval != QLA_SUCCESS) {
5427 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5428 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5429 rval, mcp->mb[0], mcp->mb[1]);
5431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5432 "Done %s.\n", __func__);
5435 /* Copy mailbox information */
5436 memcpy(mresp, mcp->mb, 64);
5441 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5445 mbx_cmd_t *mcp = &mc;
5447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5448 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5450 mcp->mb[0] = MBC_ISP84XX_RESET;
5451 mcp->mb[1] = enable_diagnostic;
5452 mcp->out_mb = MBX_1|MBX_0;
5453 mcp->in_mb = MBX_1|MBX_0;
5454 mcp->tov = MBX_TOV_SECONDS;
5455 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5456 rval = qla2x00_mailbox_command(vha, mcp);
5458 if (rval != QLA_SUCCESS)
5459 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5461 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5462 "Done %s.\n", __func__);
5468 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5472 mbx_cmd_t *mcp = &mc;
5474 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5475 "Entered %s.\n", __func__);
5477 if (!IS_FWI2_CAPABLE(vha->hw))
5478 return QLA_FUNCTION_FAILED;
5480 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5481 mcp->mb[1] = LSW(risc_addr);
5482 mcp->mb[2] = LSW(data);
5483 mcp->mb[3] = MSW(data);
5484 mcp->mb[8] = MSW(risc_addr);
5485 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5486 mcp->in_mb = MBX_1|MBX_0;
5487 mcp->tov = MBX_TOV_SECONDS;
5489 rval = qla2x00_mailbox_command(vha, mcp);
5490 if (rval != QLA_SUCCESS) {
5491 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5492 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5493 rval, mcp->mb[0], mcp->mb[1]);
5495 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5496 "Done %s.\n", __func__);
5503 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5506 uint32_t stat, timer;
5508 struct qla_hw_data *ha = vha->hw;
5509 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5513 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5514 "Entered %s.\n", __func__);
5516 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5518 /* Write the MBC data to the registers */
5519 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5520 wrt_reg_word(®->mailbox1, mb[0]);
5521 wrt_reg_word(®->mailbox2, mb[1]);
5522 wrt_reg_word(®->mailbox3, mb[2]);
5523 wrt_reg_word(®->mailbox4, mb[3]);
5525 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT);
5527 /* Poll for MBC interrupt */
5528 for (timer = 6000000; timer; timer--) {
5529 /* Check for pending interrupts. */
5530 stat = rd_reg_dword(®->host_status);
5531 if (stat & HSRX_RISC_INT) {
5534 if (stat == 0x1 || stat == 0x2 ||
5535 stat == 0x10 || stat == 0x11) {
5536 set_bit(MBX_INTERRUPT,
5537 &ha->mbx_cmd_flags);
5538 mb0 = rd_reg_word(®->mailbox0);
5539 wrt_reg_dword(®->hccr,
5540 HCCRX_CLR_RISC_INT);
5541 rd_reg_dword(®->hccr);
5548 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5549 rval = mb0 & MBS_MASK;
5551 rval = QLA_FUNCTION_FAILED;
5553 if (rval != QLA_SUCCESS) {
5554 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5555 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5557 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5558 "Done %s.\n", __func__);
5564 /* Set the specified data rate */
5566 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5570 mbx_cmd_t *mcp = &mc;
5571 struct qla_hw_data *ha = vha->hw;
5574 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5575 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5578 if (!IS_FWI2_CAPABLE(ha))
5579 return QLA_FUNCTION_FAILED;
5581 memset(mcp, 0, sizeof(*mcp));
5582 switch (ha->set_data_rate) {
5583 case PORT_SPEED_AUTO:
5584 case PORT_SPEED_4GB:
5585 case PORT_SPEED_8GB:
5586 case PORT_SPEED_16GB:
5587 case PORT_SPEED_32GB:
5588 val = ha->set_data_rate;
5591 ql_log(ql_log_warn, vha, 0x1199,
5592 "Unrecognized speed setting:%d. Setting Autoneg\n",
5594 val = ha->set_data_rate = PORT_SPEED_AUTO;
5598 mcp->mb[0] = MBC_DATA_RATE;
5602 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5603 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5604 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5605 mcp->in_mb |= MBX_4|MBX_3;
5606 mcp->tov = MBX_TOV_SECONDS;
5608 rval = qla2x00_mailbox_command(vha, mcp);
5609 if (rval != QLA_SUCCESS) {
5610 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5611 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5613 if (mcp->mb[1] != 0x7)
5614 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5615 "Speed set:0x%x\n", mcp->mb[1]);
5617 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5618 "Done %s.\n", __func__);
5625 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5629 mbx_cmd_t *mcp = &mc;
5630 struct qla_hw_data *ha = vha->hw;
5632 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5633 "Entered %s.\n", __func__);
5635 if (!IS_FWI2_CAPABLE(ha))
5636 return QLA_FUNCTION_FAILED;
5638 mcp->mb[0] = MBC_DATA_RATE;
5639 mcp->mb[1] = QLA_GET_DATA_RATE;
5640 mcp->out_mb = MBX_1|MBX_0;
5641 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5642 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5643 mcp->in_mb |= MBX_4|MBX_3;
5644 mcp->tov = MBX_TOV_SECONDS;
5646 rval = qla2x00_mailbox_command(vha, mcp);
5647 if (rval != QLA_SUCCESS) {
5648 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5649 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5651 if (mcp->mb[1] != 0x7)
5652 ha->link_data_rate = mcp->mb[1];
5654 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5655 if (mcp->mb[4] & BIT_0)
5656 ql_log(ql_log_info, vha, 0x11a2,
5657 "FEC=enabled (data rate).\n");
5660 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5661 "Done %s.\n", __func__);
5662 if (mcp->mb[1] != 0x7)
5663 ha->link_data_rate = mcp->mb[1];
5670 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5674 mbx_cmd_t *mcp = &mc;
5675 struct qla_hw_data *ha = vha->hw;
5677 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5678 "Entered %s.\n", __func__);
5680 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5681 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5682 return QLA_FUNCTION_FAILED;
5683 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5684 mcp->out_mb = MBX_0;
5685 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5686 mcp->tov = MBX_TOV_SECONDS;
5689 rval = qla2x00_mailbox_command(vha, mcp);
5691 if (rval != QLA_SUCCESS) {
5692 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5693 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5695 /* Copy all bits to preserve original value */
5696 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5699 "Done %s.\n", __func__);
5705 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5709 mbx_cmd_t *mcp = &mc;
5711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5712 "Entered %s.\n", __func__);
5714 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5715 /* Copy all bits to preserve original setting */
5716 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5717 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5719 mcp->tov = MBX_TOV_SECONDS;
5721 rval = qla2x00_mailbox_command(vha, mcp);
5723 if (rval != QLA_SUCCESS) {
5724 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5725 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5727 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5728 "Done %s.\n", __func__);
5735 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5740 mbx_cmd_t *mcp = &mc;
5741 struct qla_hw_data *ha = vha->hw;
5743 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5744 "Entered %s.\n", __func__);
5746 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5747 return QLA_FUNCTION_FAILED;
5749 mcp->mb[0] = MBC_PORT_PARAMS;
5750 mcp->mb[1] = loop_id;
5751 if (ha->flags.fcp_prio_enabled)
5755 mcp->mb[4] = priority & 0xf;
5756 mcp->mb[9] = vha->vp_idx;
5757 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5758 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5759 mcp->tov = MBX_TOV_SECONDS;
5761 rval = qla2x00_mailbox_command(vha, mcp);
5769 if (rval != QLA_SUCCESS) {
5770 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5772 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5773 "Done %s.\n", __func__);
5780 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5782 int rval = QLA_FUNCTION_FAILED;
5783 struct qla_hw_data *ha = vha->hw;
5786 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5787 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5788 "Thermal not supported by this card.\n");
5792 if (IS_QLA25XX(ha)) {
5793 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5794 ha->pdev->subsystem_device == 0x0175) {
5795 rval = qla2x00_read_sfp(vha, 0, &byte,
5796 0x98, 0x1, 1, BIT_13|BIT_0);
5800 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5801 ha->pdev->subsystem_device == 0x338e) {
5802 rval = qla2x00_read_sfp(vha, 0, &byte,
5803 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5807 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5808 "Thermal not supported by this card.\n");
5812 if (IS_QLA82XX(ha)) {
5813 *temp = qla82xx_read_temperature(vha);
5816 } else if (IS_QLA8044(ha)) {
5817 *temp = qla8044_read_temperature(vha);
5822 rval = qla2x00_read_asic_temperature(vha, temp);
5827 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5830 struct qla_hw_data *ha = vha->hw;
5832 mbx_cmd_t *mcp = &mc;
5834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5835 "Entered %s.\n", __func__);
5837 if (!IS_FWI2_CAPABLE(ha))
5838 return QLA_FUNCTION_FAILED;
5840 memset(mcp, 0, sizeof(mbx_cmd_t));
5841 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5844 mcp->out_mb = MBX_1|MBX_0;
5846 mcp->tov = MBX_TOV_SECONDS;
5849 rval = qla2x00_mailbox_command(vha, mcp);
5850 if (rval != QLA_SUCCESS) {
5851 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5852 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5854 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5855 "Done %s.\n", __func__);
5862 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5865 struct qla_hw_data *ha = vha->hw;
5867 mbx_cmd_t *mcp = &mc;
5869 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5870 "Entered %s.\n", __func__);
5872 if (!IS_P3P_TYPE(ha))
5873 return QLA_FUNCTION_FAILED;
5875 memset(mcp, 0, sizeof(mbx_cmd_t));
5876 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5879 mcp->out_mb = MBX_1|MBX_0;
5881 mcp->tov = MBX_TOV_SECONDS;
5884 rval = qla2x00_mailbox_command(vha, mcp);
5885 if (rval != QLA_SUCCESS) {
5886 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5887 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5889 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5890 "Done %s.\n", __func__);
5897 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5899 struct qla_hw_data *ha = vha->hw;
5901 mbx_cmd_t *mcp = &mc;
5902 int rval = QLA_FUNCTION_FAILED;
5904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5905 "Entered %s.\n", __func__);
5907 memset(mcp->mb, 0 , sizeof(mcp->mb));
5908 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5909 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5910 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5911 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5913 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5914 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5915 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5917 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5918 mcp->tov = MBX_TOV_SECONDS;
5919 rval = qla2x00_mailbox_command(vha, mcp);
5921 /* Always copy back return mailbox values. */
5922 if (rval != QLA_SUCCESS) {
5923 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5924 "mailbox command FAILED=0x%x, subcode=%x.\n",
5925 (mcp->mb[1] << 16) | mcp->mb[0],
5926 (mcp->mb[3] << 16) | mcp->mb[2]);
5928 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5929 "Done %s.\n", __func__);
5930 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5931 if (!ha->md_template_size) {
5932 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5933 "Null template size obtained.\n");
5934 rval = QLA_FUNCTION_FAILED;
5941 qla82xx_md_get_template(scsi_qla_host_t *vha)
5943 struct qla_hw_data *ha = vha->hw;
5945 mbx_cmd_t *mcp = &mc;
5946 int rval = QLA_FUNCTION_FAILED;
5948 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5949 "Entered %s.\n", __func__);
5951 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5952 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5953 if (!ha->md_tmplt_hdr) {
5954 ql_log(ql_log_warn, vha, 0x1124,
5955 "Unable to allocate memory for Minidump template.\n");
5959 memset(mcp->mb, 0 , sizeof(mcp->mb));
5960 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5961 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5962 mcp->mb[2] = LSW(RQST_TMPLT);
5963 mcp->mb[3] = MSW(RQST_TMPLT);
5964 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5965 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5966 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5967 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5968 mcp->mb[8] = LSW(ha->md_template_size);
5969 mcp->mb[9] = MSW(ha->md_template_size);
5971 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5972 mcp->tov = MBX_TOV_SECONDS;
5973 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5974 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5975 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5976 rval = qla2x00_mailbox_command(vha, mcp);
5978 if (rval != QLA_SUCCESS) {
5979 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5980 "mailbox command FAILED=0x%x, subcode=%x.\n",
5981 ((mcp->mb[1] << 16) | mcp->mb[0]),
5982 ((mcp->mb[3] << 16) | mcp->mb[2]));
5984 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5985 "Done %s.\n", __func__);
5990 qla8044_md_get_template(scsi_qla_host_t *vha)
5992 struct qla_hw_data *ha = vha->hw;
5994 mbx_cmd_t *mcp = &mc;
5995 int rval = QLA_FUNCTION_FAILED;
5996 int offset = 0, size = MINIDUMP_SIZE_36K;
5998 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5999 "Entered %s.\n", __func__);
6001 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
6002 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
6003 if (!ha->md_tmplt_hdr) {
6004 ql_log(ql_log_warn, vha, 0xb11b,
6005 "Unable to allocate memory for Minidump template.\n");
6009 memset(mcp->mb, 0 , sizeof(mcp->mb));
6010 while (offset < ha->md_template_size) {
6011 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6012 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
6013 mcp->mb[2] = LSW(RQST_TMPLT);
6014 mcp->mb[3] = MSW(RQST_TMPLT);
6015 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
6016 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
6017 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
6018 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
6019 mcp->mb[8] = LSW(size);
6020 mcp->mb[9] = MSW(size);
6021 mcp->mb[10] = offset & 0x0000FFFF;
6022 mcp->mb[11] = offset & 0xFFFF0000;
6023 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
6024 mcp->tov = MBX_TOV_SECONDS;
6025 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
6026 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6027 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6028 rval = qla2x00_mailbox_command(vha, mcp);
6030 if (rval != QLA_SUCCESS) {
6031 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
6032 "mailbox command FAILED=0x%x, subcode=%x.\n",
6033 ((mcp->mb[1] << 16) | mcp->mb[0]),
6034 ((mcp->mb[3] << 16) | mcp->mb[2]));
6037 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
6038 "Done %s.\n", __func__);
6039 offset = offset + size;
6045 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6048 struct qla_hw_data *ha = vha->hw;
6050 mbx_cmd_t *mcp = &mc;
6052 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6053 return QLA_FUNCTION_FAILED;
6055 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
6056 "Entered %s.\n", __func__);
6058 memset(mcp, 0, sizeof(mbx_cmd_t));
6059 mcp->mb[0] = MBC_SET_LED_CONFIG;
6060 mcp->mb[1] = led_cfg[0];
6061 mcp->mb[2] = led_cfg[1];
6062 if (IS_QLA8031(ha)) {
6063 mcp->mb[3] = led_cfg[2];
6064 mcp->mb[4] = led_cfg[3];
6065 mcp->mb[5] = led_cfg[4];
6066 mcp->mb[6] = led_cfg[5];
6069 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6071 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6073 mcp->tov = MBX_TOV_SECONDS;
6076 rval = qla2x00_mailbox_command(vha, mcp);
6077 if (rval != QLA_SUCCESS) {
6078 ql_dbg(ql_dbg_mbx, vha, 0x1134,
6079 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6081 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
6082 "Done %s.\n", __func__);
6089 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6092 struct qla_hw_data *ha = vha->hw;
6094 mbx_cmd_t *mcp = &mc;
6096 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6097 return QLA_FUNCTION_FAILED;
6099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6100 "Entered %s.\n", __func__);
6102 memset(mcp, 0, sizeof(mbx_cmd_t));
6103 mcp->mb[0] = MBC_GET_LED_CONFIG;
6105 mcp->out_mb = MBX_0;
6106 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6108 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6109 mcp->tov = MBX_TOV_SECONDS;
6112 rval = qla2x00_mailbox_command(vha, mcp);
6113 if (rval != QLA_SUCCESS) {
6114 ql_dbg(ql_dbg_mbx, vha, 0x1137,
6115 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6117 led_cfg[0] = mcp->mb[1];
6118 led_cfg[1] = mcp->mb[2];
6119 if (IS_QLA8031(ha)) {
6120 led_cfg[2] = mcp->mb[3];
6121 led_cfg[3] = mcp->mb[4];
6122 led_cfg[4] = mcp->mb[5];
6123 led_cfg[5] = mcp->mb[6];
6125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6126 "Done %s.\n", __func__);
6133 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6136 struct qla_hw_data *ha = vha->hw;
6138 mbx_cmd_t *mcp = &mc;
6140 if (!IS_P3P_TYPE(ha))
6141 return QLA_FUNCTION_FAILED;
6143 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6144 "Entered %s.\n", __func__);
6146 memset(mcp, 0, sizeof(mbx_cmd_t));
6147 mcp->mb[0] = MBC_SET_LED_CONFIG;
6153 mcp->out_mb = MBX_7|MBX_0;
6155 mcp->tov = MBX_TOV_SECONDS;
6158 rval = qla2x00_mailbox_command(vha, mcp);
6159 if (rval != QLA_SUCCESS) {
6160 ql_dbg(ql_dbg_mbx, vha, 0x1128,
6161 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6164 "Done %s.\n", __func__);
6171 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6174 struct qla_hw_data *ha = vha->hw;
6176 mbx_cmd_t *mcp = &mc;
6178 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6179 return QLA_FUNCTION_FAILED;
6181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6182 "Entered %s.\n", __func__);
6184 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6185 mcp->mb[1] = LSW(reg);
6186 mcp->mb[2] = MSW(reg);
6187 mcp->mb[3] = LSW(data);
6188 mcp->mb[4] = MSW(data);
6189 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6191 mcp->in_mb = MBX_1|MBX_0;
6192 mcp->tov = MBX_TOV_SECONDS;
6194 rval = qla2x00_mailbox_command(vha, mcp);
6196 if (rval != QLA_SUCCESS) {
6197 ql_dbg(ql_dbg_mbx, vha, 0x1131,
6198 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6200 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6201 "Done %s.\n", __func__);
6208 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6211 struct qla_hw_data *ha = vha->hw;
6213 mbx_cmd_t *mcp = &mc;
6215 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6216 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6217 "Implicit LOGO Unsupported.\n");
6218 return QLA_FUNCTION_FAILED;
6222 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6223 "Entering %s.\n", __func__);
6225 /* Perform Implicit LOGO. */
6226 mcp->mb[0] = MBC_PORT_LOGOUT;
6227 mcp->mb[1] = fcport->loop_id;
6228 mcp->mb[10] = BIT_15;
6229 mcp->out_mb = MBX_10|MBX_1|MBX_0;
6231 mcp->tov = MBX_TOV_SECONDS;
6233 rval = qla2x00_mailbox_command(vha, mcp);
6234 if (rval != QLA_SUCCESS)
6235 ql_dbg(ql_dbg_mbx, vha, 0x113d,
6236 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6238 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6239 "Done %s.\n", __func__);
6245 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6249 mbx_cmd_t *mcp = &mc;
6250 struct qla_hw_data *ha = vha->hw;
6251 unsigned long retry_max_time = jiffies + (2 * HZ);
6253 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6254 return QLA_FUNCTION_FAILED;
6256 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6259 mcp->mb[0] = MBC_READ_REMOTE_REG;
6260 mcp->mb[1] = LSW(reg);
6261 mcp->mb[2] = MSW(reg);
6262 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6263 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6264 mcp->tov = MBX_TOV_SECONDS;
6266 rval = qla2x00_mailbox_command(vha, mcp);
6268 if (rval != QLA_SUCCESS) {
6269 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6270 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6271 rval, mcp->mb[0], mcp->mb[1]);
6273 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6274 if (*data == QLA8XXX_BAD_VALUE) {
6276 * During soft-reset CAMRAM register reads might
6277 * return 0xbad0bad0. So retry for MAX of 2 sec
6278 * while reading camram registers.
6280 if (time_after(jiffies, retry_max_time)) {
6281 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6282 "Failure to read CAMRAM register. "
6283 "data=0x%x.\n", *data);
6284 return QLA_FUNCTION_FAILED;
6289 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6296 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6300 mbx_cmd_t *mcp = &mc;
6301 struct qla_hw_data *ha = vha->hw;
6303 if (!IS_QLA83XX(ha))
6304 return QLA_FUNCTION_FAILED;
6306 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6308 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6309 mcp->out_mb = MBX_0;
6310 mcp->in_mb = MBX_1|MBX_0;
6311 mcp->tov = MBX_TOV_SECONDS;
6313 rval = qla2x00_mailbox_command(vha, mcp);
6315 if (rval != QLA_SUCCESS) {
6316 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6317 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6318 rval, mcp->mb[0], mcp->mb[1]);
6319 qla2xxx_dump_fw(vha);
6321 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6328 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6329 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6333 mbx_cmd_t *mcp = &mc;
6334 uint8_t subcode = (uint8_t)options;
6335 struct qla_hw_data *ha = vha->hw;
6337 if (!IS_QLA8031(ha))
6338 return QLA_FUNCTION_FAILED;
6340 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6342 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6343 mcp->mb[1] = options;
6344 mcp->out_mb = MBX_1|MBX_0;
6345 if (subcode & BIT_2) {
6346 mcp->mb[2] = LSW(start_addr);
6347 mcp->mb[3] = MSW(start_addr);
6348 mcp->mb[4] = LSW(end_addr);
6349 mcp->mb[5] = MSW(end_addr);
6350 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6352 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6353 if (!(subcode & (BIT_2 | BIT_5)))
6354 mcp->in_mb |= MBX_4|MBX_3;
6355 mcp->tov = MBX_TOV_SECONDS;
6357 rval = qla2x00_mailbox_command(vha, mcp);
6359 if (rval != QLA_SUCCESS) {
6360 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6361 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6362 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6364 qla2xxx_dump_fw(vha);
6366 if (subcode & BIT_5)
6367 *sector_size = mcp->mb[1];
6368 else if (subcode & (BIT_6 | BIT_7)) {
6369 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6370 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6371 } else if (subcode & (BIT_3 | BIT_4)) {
6372 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6373 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6375 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6382 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6387 mbx_cmd_t *mcp = &mc;
6389 if (!IS_MCTP_CAPABLE(vha->hw))
6390 return QLA_FUNCTION_FAILED;
6392 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6393 "Entered %s.\n", __func__);
6395 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6396 mcp->mb[1] = LSW(addr);
6397 mcp->mb[2] = MSW(req_dma);
6398 mcp->mb[3] = LSW(req_dma);
6399 mcp->mb[4] = MSW(size);
6400 mcp->mb[5] = LSW(size);
6401 mcp->mb[6] = MSW(MSD(req_dma));
6402 mcp->mb[7] = LSW(MSD(req_dma));
6403 mcp->mb[8] = MSW(addr);
6404 /* Setting RAM ID to valid */
6405 /* For MCTP RAM ID is 0x40 */
6406 mcp->mb[10] = BIT_7 | 0x40;
6408 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6412 mcp->tov = MBX_TOV_SECONDS;
6414 rval = qla2x00_mailbox_command(vha, mcp);
6416 if (rval != QLA_SUCCESS) {
6417 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6418 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6420 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6421 "Done %s.\n", __func__);
6428 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6429 void *dd_buf, uint size, uint options)
6433 mbx_cmd_t *mcp = &mc;
6436 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6437 !IS_QLA28XX(vha->hw))
6438 return QLA_FUNCTION_FAILED;
6440 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6441 "Entered %s.\n", __func__);
6443 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6444 dd_buf, size, DMA_FROM_DEVICE);
6445 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6446 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6447 return QLA_MEMORY_ALLOC_FAILED;
6450 memset(dd_buf, 0, size);
6452 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6453 mcp->mb[1] = options;
6454 mcp->mb[2] = MSW(LSD(dd_dma));
6455 mcp->mb[3] = LSW(LSD(dd_dma));
6456 mcp->mb[6] = MSW(MSD(dd_dma));
6457 mcp->mb[7] = LSW(MSD(dd_dma));
6459 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6460 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6461 mcp->buf_size = size;
6462 mcp->flags = MBX_DMA_IN;
6463 mcp->tov = MBX_TOV_SECONDS * 4;
6464 rval = qla2x00_mailbox_command(vha, mcp);
6466 if (rval != QLA_SUCCESS) {
6467 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6469 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6470 "Done %s.\n", __func__);
6473 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6474 size, DMA_FROM_DEVICE);
6480 qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha,
6481 struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp)
6485 uint size = sizeof(dd->buf);
6486 uint16_t options = dd->options;
6488 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6489 "Entered %s.\n", __func__);
6491 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6492 dd->buf, size, DMA_FROM_DEVICE);
6493 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6494 ql_log(ql_log_warn, vha, 0x1194,
6495 "Failed to map dma buffer.\n");
6496 return QLA_MEMORY_ALLOC_FAILED;
6499 memset(dd->buf, 0, size);
6501 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6502 mcp->mb[1] = options;
6503 mcp->mb[2] = MSW(LSD(dd_dma));
6504 mcp->mb[3] = LSW(LSD(dd_dma));
6505 mcp->mb[6] = MSW(MSD(dd_dma));
6506 mcp->mb[7] = LSW(MSD(dd_dma));
6508 mcp->out_mb = MBX_8 | MBX_7 | MBX_6 | MBX_3 | MBX_2 | MBX_1 | MBX_0;
6509 mcp->in_mb = MBX_3 | MBX_2 | MBX_1 | MBX_0;
6510 mcp->buf_size = size;
6511 mcp->flags = MBX_DMA_IN;
6512 mcp->tov = MBX_TOV_SECONDS * 4;
6513 rval = qla2x00_mailbox_command(vha, mcp);
6515 if (rval != QLA_SUCCESS) {
6516 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6518 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6519 "Done %s.\n", __func__);
6522 dma_unmap_single(&vha->hw->pdev->dev, dd_dma, size, DMA_FROM_DEVICE);
6527 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6529 sp->u.iocb_cmd.u.mbx.rc = res;
6531 complete(&sp->u.iocb_cmd.u.mbx.comp);
6532 /* don't free sp here. Let the caller do the free */
6536 * This mailbox uses the iocb interface to send MB command.
6537 * This allows non-critial (non chip setup) command to go
6540 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6542 int rval = QLA_FUNCTION_FAILED;
6546 if (!vha->hw->flags.fw_started)
6550 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6554 c = &sp->u.iocb_cmd;
6555 init_completion(&c->u.mbx.comp);
6557 sp->type = SRB_MB_IOCB;
6558 sp->name = mb_to_str(mcp->mb[0]);
6559 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
6560 qla2x00_async_mb_sp_done);
6562 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6564 rval = qla2x00_start_sp(sp);
6565 if (rval != QLA_SUCCESS) {
6566 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6567 "%s: %s Failed submission. %x.\n",
6568 __func__, sp->name, rval);
6572 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6573 sp->name, sp->handle);
6575 wait_for_completion(&c->u.mbx.comp);
6576 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6580 case QLA_FUNCTION_TIMEOUT:
6581 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6582 __func__, sp->name, rval);
6585 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6586 __func__, sp->name);
6589 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6590 __func__, sp->name, rval);
6596 kref_put(&sp->cmd_kref, qla2x00_sp_release);
6603 * NOTE: Do not call this routine from DPC thread
6605 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6607 int rval = QLA_FUNCTION_FAILED;
6609 struct port_database_24xx *pd;
6610 struct qla_hw_data *ha = vha->hw;
6613 if (!vha->hw->flags.fw_started)
6616 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6618 ql_log(ql_log_warn, vha, 0xd047,
6619 "Failed to allocate port database structure.\n");
6623 memset(&mc, 0, sizeof(mc));
6624 mc.mb[0] = MBC_GET_PORT_DATABASE;
6625 mc.mb[1] = fcport->loop_id;
6626 mc.mb[2] = MSW(pd_dma);
6627 mc.mb[3] = LSW(pd_dma);
6628 mc.mb[6] = MSW(MSD(pd_dma));
6629 mc.mb[7] = LSW(MSD(pd_dma));
6630 mc.mb[9] = vha->vp_idx;
6633 rval = qla24xx_send_mb_cmd(vha, &mc);
6634 if (rval != QLA_SUCCESS) {
6635 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6636 "%s: %8phC fail\n", __func__, fcport->port_name);
6640 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6642 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6643 __func__, fcport->port_name);
6647 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6652 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6653 struct port_database_24xx *pd)
6655 int rval = QLA_SUCCESS;
6657 u8 current_login_state, last_login_state;
6659 if (NVME_TARGET(vha->hw, fcport)) {
6660 current_login_state = pd->current_login_state >> 4;
6661 last_login_state = pd->last_login_state >> 4;
6663 current_login_state = pd->current_login_state & 0xf;
6664 last_login_state = pd->last_login_state & 0xf;
6667 /* Check for logged in state. */
6668 if (current_login_state != PDS_PRLI_COMPLETE) {
6669 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6670 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6671 current_login_state, last_login_state, fcport->loop_id);
6672 rval = QLA_FUNCTION_FAILED;
6676 if (fcport->loop_id == FC_NO_LOOP_ID ||
6677 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6678 memcmp(fcport->port_name, pd->port_name, 8))) {
6679 /* We lost the device mid way. */
6680 rval = QLA_NOT_LOGGED_IN;
6684 /* Names are little-endian. */
6685 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6686 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6688 /* Get port_id of device. */
6689 fcport->d_id.b.domain = pd->port_id[0];
6690 fcport->d_id.b.area = pd->port_id[1];
6691 fcport->d_id.b.al_pa = pd->port_id[2];
6692 fcport->d_id.b.rsvd_1 = 0;
6694 ql_dbg(ql_dbg_disc, vha, 0x2062,
6695 "%8phC SVC Param w3 %02x%02x",
6697 pd->prli_svc_param_word_3[1],
6698 pd->prli_svc_param_word_3[0]);
6700 if (NVME_TARGET(vha->hw, fcport)) {
6701 fcport->port_type = FCT_NVME;
6702 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6703 fcport->port_type |= FCT_NVME_INITIATOR;
6704 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6705 fcport->port_type |= FCT_NVME_TARGET;
6706 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6707 fcport->port_type |= FCT_NVME_DISCOVERY;
6709 /* If not target must be initiator or unknown type. */
6710 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6711 fcport->port_type = FCT_INITIATOR;
6713 fcport->port_type = FCT_TARGET;
6715 /* Passback COS information. */
6716 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6717 FC_COS_CLASS2 : FC_COS_CLASS3;
6719 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6720 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6721 fcport->conf_compl_supported = 1;
6729 * qla24xx_gidlist__wait
6730 * NOTE: don't call this routine from DPC thread.
6732 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6733 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6735 int rval = QLA_FUNCTION_FAILED;
6738 if (!vha->hw->flags.fw_started)
6741 memset(&mc, 0, sizeof(mc));
6742 mc.mb[0] = MBC_GET_ID_LIST;
6743 mc.mb[2] = MSW(id_list_dma);
6744 mc.mb[3] = LSW(id_list_dma);
6745 mc.mb[6] = MSW(MSD(id_list_dma));
6746 mc.mb[7] = LSW(MSD(id_list_dma));
6748 mc.mb[9] = vha->vp_idx;
6750 rval = qla24xx_send_mb_cmd(vha, &mc);
6751 if (rval != QLA_SUCCESS) {
6752 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6753 "%s: fail\n", __func__);
6755 *entries = mc.mb[1];
6756 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6757 "%s: done\n", __func__);
6763 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6767 mbx_cmd_t *mcp = &mc;
6769 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6770 "Entered %s\n", __func__);
6772 memset(mcp->mb, 0 , sizeof(mcp->mb));
6773 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6776 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6777 mcp->in_mb = MBX_2 | MBX_0;
6778 mcp->tov = MBX_TOV_SECONDS;
6781 rval = qla2x00_mailbox_command(vha, mcp);
6783 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6784 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6789 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6793 mbx_cmd_t *mcp = &mc;
6795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6796 "Entered %s\n", __func__);
6798 memset(mcp->mb, 0, sizeof(mcp->mb));
6799 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6801 mcp->out_mb = MBX_1 | MBX_0;
6802 mcp->in_mb = MBX_2 | MBX_0;
6803 mcp->tov = MBX_TOV_SECONDS;
6806 rval = qla2x00_mailbox_command(vha, mcp);
6807 if (rval == QLA_SUCCESS)
6810 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6811 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6817 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6819 struct qla_hw_data *ha = vha->hw;
6820 uint16_t iter, addr, offset;
6821 dma_addr_t phys_addr;
6825 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6827 phys_addr = ha->sfp_data_dma;
6828 sfp_data = ha->sfp_data;
6831 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6833 /* Skip to next device address. */
6838 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6839 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6840 if (rval != QLA_SUCCESS) {
6841 ql_log(ql_log_warn, vha, 0x706d,
6842 "Unable to read SFP data (%x/%x/%x).\n", rval,
6848 if (buf && (c < count)) {
6851 if ((count - c) >= SFP_BLOCK_SIZE)
6852 sz = SFP_BLOCK_SIZE;
6856 memcpy(buf, sfp_data, sz);
6857 buf += SFP_BLOCK_SIZE;
6860 phys_addr += SFP_BLOCK_SIZE;
6861 sfp_data += SFP_BLOCK_SIZE;
6862 offset += SFP_BLOCK_SIZE;
6868 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6869 uint16_t *out_mb, int out_mb_sz)
6871 int rval = QLA_FUNCTION_FAILED;
6874 if (!vha->hw->flags.fw_started)
6877 memset(&mc, 0, sizeof(mc));
6878 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6880 rval = qla24xx_send_mb_cmd(vha, &mc);
6881 if (rval != QLA_SUCCESS) {
6882 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6883 "%s: fail\n", __func__);
6885 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6886 memcpy(out_mb, mc.mb, out_mb_sz);
6888 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6890 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6891 "%s: done\n", __func__);
6897 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6898 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6903 mbx_cmd_t *mcp = &mc;
6905 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6907 mcp->mb[2] = region;
6908 mcp->mb[3] = MSW(len);
6909 mcp->mb[4] = LSW(len);
6910 mcp->mb[5] = MSW(sfub_dma_addr);
6911 mcp->mb[6] = LSW(sfub_dma_addr);
6912 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6913 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6914 mcp->mb[9] = sfub_len;
6916 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6917 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6918 mcp->tov = MBX_TOV_SECONDS;
6920 rval = qla2x00_mailbox_command(vha, mcp);
6922 if (rval != QLA_SUCCESS) {
6923 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6924 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6931 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6936 mbx_cmd_t *mcp = &mc;
6938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6939 "Entered %s.\n", __func__);
6941 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6942 mcp->mb[1] = LSW(addr);
6943 mcp->mb[2] = MSW(addr);
6944 mcp->mb[3] = LSW(data);
6945 mcp->mb[4] = MSW(data);
6946 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6947 mcp->in_mb = MBX_1|MBX_0;
6948 mcp->tov = MBX_TOV_SECONDS;
6950 rval = qla2x00_mailbox_command(vha, mcp);
6952 if (rval != QLA_SUCCESS) {
6953 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6954 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6956 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6957 "Done %s.\n", __func__);
6963 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6968 mbx_cmd_t *mcp = &mc;
6970 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6971 "Entered %s.\n", __func__);
6973 mcp->mb[0] = MBC_READ_REMOTE_REG;
6974 mcp->mb[1] = LSW(addr);
6975 mcp->mb[2] = MSW(addr);
6976 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6977 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6978 mcp->tov = MBX_TOV_SECONDS;
6980 rval = qla2x00_mailbox_command(vha, mcp);
6982 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6984 if (rval != QLA_SUCCESS) {
6985 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6986 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6988 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6989 "Done %s.\n", __func__);
6996 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
6998 struct qla_hw_data *ha = vha->hw;
7000 mbx_cmd_t *mcp = &mc;
7003 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7004 return QLA_FUNCTION_FAILED;
7006 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
7009 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
7010 mcp->mb[1] = options;
7011 mcp->out_mb = MBX_1|MBX_0;
7012 mcp->in_mb = MBX_1|MBX_0;
7013 if (options & BIT_0) {
7014 if (options & BIT_1) {
7015 mcp->mb[2] = led[2];
7016 mcp->out_mb |= MBX_2;
7018 if (options & BIT_2) {
7019 mcp->mb[3] = led[0];
7020 mcp->out_mb |= MBX_3;
7022 if (options & BIT_3) {
7023 mcp->mb[4] = led[1];
7024 mcp->out_mb |= MBX_4;
7027 mcp->in_mb |= MBX_4|MBX_3|MBX_2;
7029 mcp->tov = MBX_TOV_SECONDS;
7031 rval = qla2x00_mailbox_command(vha, mcp);
7033 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
7034 __func__, rval, mcp->mb[0], mcp->mb[1]);
7038 if (options & BIT_0) {
7039 ha->beacon_blink_led = 0;
7040 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
7042 led[2] = mcp->mb[2];
7043 led[0] = mcp->mb[3];
7044 led[1] = mcp->mb[4];
7045 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
7046 __func__, led[0], led[1], led[2]);
7053 * qla_no_op_mb(): This MB is used to check if FW is still alive and
7054 * able to generate an interrupt. Otherwise, a timeout will trigger
7056 * @vha: host adapter pointer
7059 void qla_no_op_mb(struct scsi_qla_host *vha)
7062 mbx_cmd_t *mcp = &mc;
7065 memset(&mc, 0, sizeof(mc));
7066 mcp->mb[0] = 0; // noop cmd= 0
7067 mcp->out_mb = MBX_0;
7071 rval = qla2x00_mailbox_command(vha, mcp);
7074 ql_dbg(ql_dbg_async, vha, 0x7071,
7075 "Failed %s %x\n", __func__, rval);
7079 int qla_mailbox_passthru(scsi_qla_host_t *vha,
7080 uint16_t *mbx_in, uint16_t *mbx_out)
7083 mbx_cmd_t *mcp = &mc;
7086 memset(&mc, 0, sizeof(mc));
7087 /* Receiving all 32 register's contents */
7088 memcpy(&mcp->mb, (char *)mbx_in, (32 * sizeof(uint16_t)));
7090 mcp->out_mb = 0xFFFFFFFF;
7091 mcp->in_mb = 0xFFFFFFFF;
7093 mcp->tov = MBX_TOV_SECONDS;
7097 rval = qla2x00_mailbox_command(vha, mcp);
7099 if (rval != QLA_SUCCESS) {
7100 ql_dbg(ql_dbg_mbx, vha, 0xf0a2,
7101 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
7103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, "Done %s.\n",
7105 /* passing all 32 register's contents */
7106 memcpy(mbx_out, &mcp->mb, 32 * sizeof(uint16_t));