1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic qlcnic NIC Driver
4 * Copyright (c) 2009-2013 QLogic Corporation
10 #include "qlcnic_hdr.h"
11 #include "qlcnic_83xx_hw.h"
12 #include "qlcnic_hw.h"
14 #define QLC_83XX_MINIDUMP_FLASH 0x520000
15 #define QLC_83XX_OCM_INDEX 3
16 #define QLC_83XX_PCI_INDEX 0
17 #define QLC_83XX_DMA_ENGINE_INDEX 8
19 static const u32 qlcnic_ms_read_data[] = {
20 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
23 #define QLCNIC_DUMP_WCRB BIT_0
24 #define QLCNIC_DUMP_RWCRB BIT_1
25 #define QLCNIC_DUMP_ANDCRB BIT_2
26 #define QLCNIC_DUMP_ORCRB BIT_3
27 #define QLCNIC_DUMP_POLLCRB BIT_4
28 #define QLCNIC_DUMP_RD_SAVE BIT_5
29 #define QLCNIC_DUMP_WRT_SAVED BIT_6
30 #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
31 #define QLCNIC_DUMP_SKIP BIT_7
33 #define QLCNIC_DUMP_MASK_MAX 0xff
35 struct qlcnic_pex_dma_descriptor {
41 u32 dma_bus_addr_high;
45 struct qlcnic_common_entry_hdr {
49 #if defined(__LITTLE_ENDIAN)
62 #if defined(__LITTLE_ENDIAN)
76 #if defined(__LITTLE_ENDIAN)
87 #if defined(__LITTLE_ENDIAN)
105 #if defined(__LITTLE_ENDIAN)
117 #if defined(__LITTLE_ENDIAN)
134 u32 read_addr_stride;
159 #if defined(__LITTLE_ENDIAN)
170 #if defined(__LITTLE_ENDIAN)
185 #if defined(__LITTLE_ENDIAN)
206 #if defined(__LITTLE_ENDIAN)
228 struct qlcnic_dump_entry {
229 struct qlcnic_common_entry_hdr hdr;
232 struct __cache cache;
238 struct __pollrdmwr pollrdmwr;
240 struct __pollrd pollrd;
244 enum qlcnic_minidump_opcode {
246 QLCNIC_DUMP_READ_CRB = 1,
247 QLCNIC_DUMP_READ_MUX = 2,
248 QLCNIC_DUMP_QUEUE = 3,
249 QLCNIC_DUMP_BRD_CONFIG = 4,
250 QLCNIC_DUMP_READ_OCM = 6,
251 QLCNIC_DUMP_PEG_REG = 7,
252 QLCNIC_DUMP_L1_DTAG = 8,
253 QLCNIC_DUMP_L1_ITAG = 9,
254 QLCNIC_DUMP_L1_DATA = 11,
255 QLCNIC_DUMP_L1_INST = 12,
256 QLCNIC_DUMP_L2_DTAG = 21,
257 QLCNIC_DUMP_L2_ITAG = 22,
258 QLCNIC_DUMP_L2_DATA = 23,
259 QLCNIC_DUMP_L2_INST = 24,
260 QLCNIC_DUMP_POLL_RD = 35,
261 QLCNIC_READ_MUX2 = 36,
262 QLCNIC_READ_POLLRDMWR = 37,
263 QLCNIC_DUMP_READ_ROM = 71,
264 QLCNIC_DUMP_READ_MEM = 72,
265 QLCNIC_DUMP_READ_CTRL = 98,
266 QLCNIC_DUMP_TLHDR = 99,
267 QLCNIC_DUMP_RDEND = 255
270 inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
272 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
274 return hdr->saved_state[index];
277 inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
280 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
282 hdr->saved_state[index] = value;
285 void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
287 struct qlcnic_82xx_dump_template_hdr *hdr;
289 hdr = fw_dump->tmpl_hdr;
290 fw_dump->tmpl_hdr_size = hdr->size;
291 fw_dump->version = hdr->version;
292 fw_dump->num_entries = hdr->num_entries;
293 fw_dump->offset = hdr->offset;
295 hdr->drv_cap_mask = hdr->cap_mask;
296 fw_dump->cap_mask = hdr->cap_mask;
298 fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
301 inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
303 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
305 return hdr->cap_sizes[index];
308 void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
310 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
312 hdr->sys_info[idx] = value;
315 void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
317 struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
319 hdr->drv_cap_mask = mask;
322 inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
324 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
326 return hdr->saved_state[index];
329 inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
332 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
334 hdr->saved_state[index] = value;
337 #define QLCNIC_TEMPLATE_VERSION (0x20001)
339 void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
341 struct qlcnic_83xx_dump_template_hdr *hdr;
343 hdr = fw_dump->tmpl_hdr;
344 fw_dump->tmpl_hdr_size = hdr->size;
345 fw_dump->version = hdr->version;
346 fw_dump->num_entries = hdr->num_entries;
347 fw_dump->offset = hdr->offset;
349 hdr->drv_cap_mask = hdr->cap_mask;
350 fw_dump->cap_mask = hdr->cap_mask;
352 fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
353 QLCNIC_TEMPLATE_VERSION;
356 inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
358 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
360 return hdr->cap_sizes[index];
363 void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
365 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
367 hdr->sys_info[idx] = value;
370 void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
372 struct qlcnic_83xx_dump_template_hdr *hdr;
375 hdr->drv_cap_mask = mask;
378 struct qlcnic_dump_operations {
379 enum qlcnic_minidump_opcode opcode;
380 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
384 static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
385 struct qlcnic_dump_entry *entry, __le32 *buffer)
389 struct __crb *crb = &entry->region.crb;
393 for (i = 0; i < crb->no_ops; i++) {
394 data = qlcnic_ind_rd(adapter, addr);
395 *buffer++ = cpu_to_le32(addr);
396 *buffer++ = cpu_to_le32(data);
399 return crb->no_ops * 2 * sizeof(u32);
402 static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
403 struct qlcnic_dump_entry *entry, __le32 *buffer)
405 void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
406 struct __ctrl *ctr = &entry->region.ctrl;
407 int i, k, timeout = 0;
408 u32 addr, data, temp;
412 no_ops = ctr->no_ops;
414 for (i = 0; i < no_ops; i++) {
416 for (k = 0; k < 8; k++) {
417 if (!(ctr->opcode & (1 << k)))
420 case QLCNIC_DUMP_WCRB:
421 qlcnic_ind_wr(adapter, addr, ctr->val1);
423 case QLCNIC_DUMP_RWCRB:
424 data = qlcnic_ind_rd(adapter, addr);
425 qlcnic_ind_wr(adapter, addr, data);
427 case QLCNIC_DUMP_ANDCRB:
428 data = qlcnic_ind_rd(adapter, addr);
429 qlcnic_ind_wr(adapter, addr,
432 case QLCNIC_DUMP_ORCRB:
433 data = qlcnic_ind_rd(adapter, addr);
434 qlcnic_ind_wr(adapter, addr,
437 case QLCNIC_DUMP_POLLCRB:
438 while (timeout <= ctr->timeout) {
439 data = qlcnic_ind_rd(adapter, addr);
440 if ((data & ctr->val2) == ctr->val1)
442 usleep_range(1000, 2000);
445 if (timeout > ctr->timeout) {
446 dev_info(&adapter->pdev->dev,
447 "Timed out, aborting poll CRB\n");
451 case QLCNIC_DUMP_RD_SAVE:
454 addr = qlcnic_get_saved_state(adapter,
457 data = qlcnic_ind_rd(adapter, addr);
458 qlcnic_set_saved_state(adapter, hdr,
461 case QLCNIC_DUMP_WRT_SAVED:
464 data = qlcnic_get_saved_state(adapter,
472 addr = qlcnic_get_saved_state(adapter,
475 qlcnic_ind_wr(adapter, addr, data);
477 case QLCNIC_DUMP_MOD_SAVE_ST:
478 data = qlcnic_get_saved_state(adapter, hdr,
480 data <<= ctr->shl_val;
481 data >>= ctr->shr_val;
486 qlcnic_set_saved_state(adapter, hdr,
490 dev_info(&adapter->pdev->dev,
500 static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
501 struct qlcnic_dump_entry *entry, __le32 *buffer)
505 struct __mux *mux = &entry->region.mux;
508 for (loop = 0; loop < mux->no_ops; loop++) {
509 qlcnic_ind_wr(adapter, mux->addr, val);
510 data = qlcnic_ind_rd(adapter, mux->read_addr);
511 *buffer++ = cpu_to_le32(val);
512 *buffer++ = cpu_to_le32(data);
513 val += mux->val_stride;
515 return 2 * mux->no_ops * sizeof(u32);
518 static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
519 struct qlcnic_dump_entry *entry, __le32 *buffer)
522 u32 cnt, addr, data, que_id = 0;
523 struct __queue *que = &entry->region.que;
525 addr = que->read_addr;
526 cnt = que->read_addr_cnt;
528 for (loop = 0; loop < que->no_ops; loop++) {
529 qlcnic_ind_wr(adapter, que->sel_addr, que_id);
530 addr = que->read_addr;
531 for (i = 0; i < cnt; i++) {
532 data = qlcnic_ind_rd(adapter, addr);
533 *buffer++ = cpu_to_le32(data);
534 addr += que->read_addr_stride;
536 que_id += que->stride;
538 return que->no_ops * cnt * sizeof(u32);
541 static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
542 struct qlcnic_dump_entry *entry, __le32 *buffer)
547 struct __ocm *ocm = &entry->region.ocm;
549 addr = adapter->ahw->pci_base0 + ocm->read_addr;
550 for (i = 0; i < ocm->no_ops; i++) {
552 *buffer++ = cpu_to_le32(data);
553 addr += ocm->read_addr_stride;
555 return ocm->no_ops * sizeof(u32);
558 static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
559 struct qlcnic_dump_entry *entry, __le32 *buffer)
562 u32 fl_addr, size, val, lck_val, addr;
563 struct __mem *rom = &entry->region.mem;
566 size = rom->size / 4;
568 lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
569 if (!lck_val && count < MAX_CTL_CHECK) {
570 usleep_range(10000, 11000);
574 QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
575 adapter->ahw->pci_func);
576 for (i = 0; i < size; i++) {
577 addr = fl_addr & 0xFFFF0000;
578 qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
579 addr = LSW(fl_addr) + FLASH_ROM_DATA;
580 val = qlcnic_ind_rd(adapter, addr);
582 *buffer++ = cpu_to_le32(val);
584 QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
588 static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
589 struct qlcnic_dump_entry *entry, __le32 *buffer)
592 u32 cnt, val, data, addr;
593 struct __cache *l1 = &entry->region.cache;
595 val = l1->init_tag_val;
597 for (i = 0; i < l1->no_ops; i++) {
598 qlcnic_ind_wr(adapter, l1->addr, val);
599 qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
600 addr = l1->read_addr;
601 cnt = l1->read_addr_num;
603 data = qlcnic_ind_rd(adapter, addr);
604 *buffer++ = cpu_to_le32(data);
605 addr += l1->read_addr_stride;
610 return l1->no_ops * l1->read_addr_num * sizeof(u32);
613 static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
614 struct qlcnic_dump_entry *entry, __le32 *buffer)
617 u32 cnt, val, data, addr;
618 u8 poll_mask, poll_to, time_out = 0;
619 struct __cache *l2 = &entry->region.cache;
621 val = l2->init_tag_val;
622 poll_mask = LSB(MSW(l2->ctrl_val));
623 poll_to = MSB(MSW(l2->ctrl_val));
625 for (i = 0; i < l2->no_ops; i++) {
626 qlcnic_ind_wr(adapter, l2->addr, val);
627 if (LSW(l2->ctrl_val))
628 qlcnic_ind_wr(adapter, l2->ctrl_addr,
633 data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
634 if (!(data & poll_mask))
636 usleep_range(1000, 2000);
638 } while (time_out <= poll_to);
640 if (time_out > poll_to) {
641 dev_err(&adapter->pdev->dev,
642 "Timeout exceeded in %s, aborting dump\n",
647 addr = l2->read_addr;
648 cnt = l2->read_addr_num;
650 data = qlcnic_ind_rd(adapter, addr);
651 *buffer++ = cpu_to_le32(data);
652 addr += l2->read_addr_stride;
657 return l2->no_ops * l2->read_addr_num * sizeof(u32);
660 static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
661 struct __mem *mem, __le32 *buffer,
664 u32 addr, data, test;
667 reg_read = mem->size;
669 /* check for data size of multiple of 16 and 16 byte alignment */
670 if ((addr & 0xf) || (reg_read%16)) {
671 dev_info(&adapter->pdev->dev,
672 "Unaligned memory addr:0x%x size:0x%x\n",
678 mutex_lock(&adapter->ahw->mem_lock);
680 while (reg_read != 0) {
681 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
682 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
683 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
685 for (i = 0; i < MAX_CTL_CHECK; i++) {
686 test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
687 if (!(test & TA_CTL_BUSY))
690 if (i == MAX_CTL_CHECK) {
691 if (printk_ratelimit()) {
692 dev_err(&adapter->pdev->dev,
693 "failed to read through agent\n");
698 for (i = 0; i < 4; i++) {
699 data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
700 *buffer++ = cpu_to_le32(data);
708 mutex_unlock(&adapter->ahw->mem_lock);
712 /* DMA register base address */
713 #define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
715 /* DMA register offsets w.r.t base address */
716 #define QLC_DMA_CMD_BUFF_ADDR_LOW 0
717 #define QLC_DMA_CMD_BUFF_ADDR_HI 4
718 #define QLC_DMA_CMD_STATUS_CTRL 8
720 static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
723 struct device *dev = &adapter->pdev->dev;
724 u32 dma_no, dma_base_addr, temp_addr;
728 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
729 dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
730 QLC_83XX_DMA_ENGINE_INDEX);
731 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
733 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
734 ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
738 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
739 ret = qlcnic_ind_wr(adapter, temp_addr, 0);
743 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
744 ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
748 /* Wait for DMA to complete */
749 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
750 for (i = 0; i < 400; i++) {
751 dma_sts = qlcnic_ind_rd(adapter, temp_addr);
754 usleep_range(250, 500);
760 dev_info(dev, "PEX DMA operation timed out");
767 static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
769 __le32 *buffer, int *ret)
771 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
772 u32 temp, dma_base_addr, size = 0, read_size = 0;
773 struct qlcnic_pex_dma_descriptor *dma_descr;
774 struct device *dev = &adapter->pdev->dev;
775 dma_addr_t dma_phys_addr;
779 tmpl_hdr = fw_dump->tmpl_hdr;
781 /* Check if DMA engine is available */
782 temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
783 QLC_83XX_DMA_ENGINE_INDEX);
784 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
785 temp = qlcnic_ind_rd(adapter,
786 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
788 if (!(temp & BIT_31)) {
789 dev_info(dev, "%s: DMA engine is not available\n", __func__);
794 /* Create DMA descriptor */
795 dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
802 /* dma_desc_cmd 0:15 = 0
803 * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3
804 * dma_desc_cmd 20:23 = pci function number
805 * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15
807 dma_phys_addr = fw_dump->phys_addr;
808 dma_buffer = fw_dump->dma_buffer;
810 temp = mem->dma_desc_cmd & 0xff0f;
811 temp |= (adapter->ahw->pci_func & 0xf) << 4;
812 dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
813 dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
814 dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
815 dma_descr->src_addr_high = 0;
817 /* Collect memory dump using multiple DMA operations if required */
818 while (read_size < mem->size) {
819 if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
820 size = QLC_PEX_DMA_READ_SIZE;
822 size = mem->size - read_size;
824 dma_descr->src_addr_low = mem->addr + read_size;
825 dma_descr->read_data_size = size;
827 /* Write DMA descriptor to MS memory*/
828 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
829 *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
830 (u32 *)dma_descr, temp);
832 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
833 mem->desc_card_addr);
837 *ret = qlcnic_start_pex_dma(adapter, mem);
839 dev_info(dev, "Failed to start PEX DMA operation\n");
843 memcpy(buffer, dma_buffer, size);
854 static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
855 struct qlcnic_dump_entry *entry, __le32 *buffer)
857 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
858 struct device *dev = &adapter->pdev->dev;
859 struct __mem *mem = &entry->region.mem;
863 if (fw_dump->use_pex_dma) {
864 data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
868 "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
874 data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
877 "Failed to read memory dump using test agent method: mask[0x%x]\n",
885 static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
886 struct qlcnic_dump_entry *entry, __le32 *buffer)
888 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
892 static int qlcnic_valid_dump_entry(struct device *dev,
893 struct qlcnic_dump_entry *entry, u32 size)
896 if (size != entry->hdr.cap_size) {
898 "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
899 entry->hdr.type, entry->hdr.mask, size,
900 entry->hdr.cap_size);
906 static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
907 struct qlcnic_dump_entry *entry,
910 struct __pollrdmwr *poll = &entry->region.pollrdmwr;
911 u32 data, wait_count, poll_wait, temp;
913 poll_wait = poll->poll_wait;
915 qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
918 while (wait_count < poll_wait) {
919 data = qlcnic_ind_rd(adapter, poll->addr1);
920 if ((data & poll->poll_mask) != 0)
925 if (wait_count == poll_wait) {
926 dev_err(&adapter->pdev->dev,
927 "Timeout exceeded in %s, aborting dump\n",
932 data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
933 qlcnic_ind_wr(adapter, poll->addr2, data);
934 qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
937 while (wait_count < poll_wait) {
938 temp = qlcnic_ind_rd(adapter, poll->addr1);
939 if ((temp & poll->poll_mask) != 0)
944 *buffer++ = cpu_to_le32(poll->addr2);
945 *buffer++ = cpu_to_le32(data);
947 return 2 * sizeof(u32);
951 static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
952 struct qlcnic_dump_entry *entry, __le32 *buffer)
954 struct __pollrd *pollrd = &entry->region.pollrd;
955 u32 data, wait_count, poll_wait, sel_val;
958 poll_wait = pollrd->poll_wait;
959 sel_val = pollrd->sel_val;
961 for (i = 0; i < pollrd->no_ops; i++) {
962 qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
964 while (wait_count < poll_wait) {
965 data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
966 if ((data & pollrd->poll_mask) != 0)
971 if (wait_count == poll_wait) {
972 dev_err(&adapter->pdev->dev,
973 "Timeout exceeded in %s, aborting dump\n",
978 data = qlcnic_ind_rd(adapter, pollrd->read_addr);
979 *buffer++ = cpu_to_le32(sel_val);
980 *buffer++ = cpu_to_le32(data);
981 sel_val += pollrd->sel_val_stride;
983 return pollrd->no_ops * (2 * sizeof(u32));
986 static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
987 struct qlcnic_dump_entry *entry, __le32 *buffer)
989 struct __mux2 *mux2 = &entry->region.mux2;
991 u32 t_sel_val, sel_val1, sel_val2;
994 sel_val1 = mux2->sel_val1;
995 sel_val2 = mux2->sel_val2;
997 for (i = 0; i < mux2->no_ops; i++) {
998 qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
999 t_sel_val = sel_val1 & mux2->sel_val_mask;
1000 qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
1001 data = qlcnic_ind_rd(adapter, mux2->read_addr);
1002 *buffer++ = cpu_to_le32(t_sel_val);
1003 *buffer++ = cpu_to_le32(data);
1004 qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
1005 t_sel_val = sel_val2 & mux2->sel_val_mask;
1006 qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
1007 data = qlcnic_ind_rd(adapter, mux2->read_addr);
1008 *buffer++ = cpu_to_le32(t_sel_val);
1009 *buffer++ = cpu_to_le32(data);
1010 sel_val1 += mux2->sel_val_stride;
1011 sel_val2 += mux2->sel_val_stride;
1014 return mux2->no_ops * (4 * sizeof(u32));
1017 static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
1018 struct qlcnic_dump_entry *entry, __le32 *buffer)
1021 struct __mem *rom = &entry->region.mem;
1023 fl_addr = rom->addr;
1024 size = rom->size / 4;
1026 if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
1027 (u8 *)buffer, size))
1033 static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
1034 {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
1035 {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
1036 {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
1037 {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
1038 {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
1039 {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
1040 {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
1041 {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
1042 {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
1043 {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
1044 {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
1045 {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
1046 {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
1047 {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
1048 {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
1049 {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
1050 {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
1051 {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
1052 {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
1053 {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
1056 static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
1057 {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
1058 {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
1059 {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
1060 {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
1061 {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
1062 {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
1063 {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
1064 {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
1065 {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
1066 {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
1067 {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
1068 {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
1069 {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
1070 {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
1071 {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
1072 {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
1073 {QLCNIC_READ_MUX2, qlcnic_read_mux2},
1074 {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
1075 {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
1076 {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
1077 {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
1078 {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
1079 {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
1082 static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
1085 int count = temp_size / sizeof(uint32_t);
1087 sum += *temp_buffer++;
1089 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
1093 static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
1094 u8 *buffer, u32 size)
1098 if (qlcnic_82xx_check(adapter))
1101 if (qlcnic_83xx_lock_flash(adapter))
1104 ret = qlcnic_83xx_lockless_flash_read32(adapter,
1105 QLC_83XX_MINIDUMP_FLASH,
1106 buffer, size / sizeof(u32));
1108 qlcnic_83xx_unlock_flash(adapter);
1114 qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
1115 struct qlcnic_cmd_args *cmd)
1117 struct qlcnic_83xx_dump_template_hdr tmp_hdr;
1118 u32 size = sizeof(tmp_hdr) / sizeof(u32);
1121 if (qlcnic_82xx_check(adapter))
1124 if (qlcnic_83xx_lock_flash(adapter))
1127 ret = qlcnic_83xx_lockless_flash_read32(adapter,
1128 QLC_83XX_MINIDUMP_FLASH,
1129 (u8 *)&tmp_hdr, size);
1131 qlcnic_83xx_unlock_flash(adapter);
1133 cmd->rsp.arg[2] = tmp_hdr.size;
1134 cmd->rsp.arg[3] = tmp_hdr.version;
1139 static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
1140 u32 *version, u32 *temp_size,
1144 struct qlcnic_cmd_args cmd;
1146 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
1149 err = qlcnic_issue_cmd(adapter, &cmd);
1150 if (err != QLCNIC_RCODE_SUCCESS) {
1151 if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
1152 qlcnic_free_mbx_args(&cmd);
1155 *use_flash_temp = 1;
1158 *temp_size = cmd.rsp.arg[2];
1159 *version = cmd.rsp.arg[3];
1160 qlcnic_free_mbx_args(&cmd);
1168 static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
1169 u32 *buffer, u32 temp_size)
1174 struct qlcnic_cmd_args cmd;
1175 dma_addr_t tmp_addr_t = 0;
1177 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
1178 &tmp_addr_t, GFP_KERNEL);
1182 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
1187 cmd.req.arg[1] = LSD(tmp_addr_t);
1188 cmd.req.arg[2] = MSD(tmp_addr_t);
1189 cmd.req.arg[3] = temp_size;
1190 err = qlcnic_issue_cmd(adapter, &cmd);
1193 if (err == QLCNIC_RCODE_SUCCESS) {
1194 for (i = 0; i < temp_size / sizeof(u32); i++)
1195 *buffer++ = __le32_to_cpu(*tmp_buf++);
1198 qlcnic_free_mbx_args(&cmd);
1201 dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
1206 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1208 struct qlcnic_hardware_context *ahw;
1209 struct qlcnic_fw_dump *fw_dump;
1210 u32 version, csum, *tmp_buf;
1211 u8 use_flash_temp = 0;
1217 fw_dump = &ahw->fw_dump;
1218 err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
1221 dev_err(&adapter->pdev->dev,
1222 "Can't get template size %d\n", err);
1226 fw_dump->tmpl_hdr = vzalloc(temp_size);
1227 if (!fw_dump->tmpl_hdr)
1230 tmp_buf = (u32 *)fw_dump->tmpl_hdr;
1234 err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
1238 err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
1242 dev_err(&adapter->pdev->dev,
1243 "Failed to get minidump template header %d\n",
1245 vfree(fw_dump->tmpl_hdr);
1246 fw_dump->tmpl_hdr = NULL;
1251 csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
1254 dev_err(&adapter->pdev->dev,
1255 "Template header checksum validation failed\n");
1256 vfree(fw_dump->tmpl_hdr);
1257 fw_dump->tmpl_hdr = NULL;
1261 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
1263 if (fw_dump->use_pex_dma) {
1264 fw_dump->dma_buffer = NULL;
1265 temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
1266 QLC_PEX_DMA_READ_SIZE,
1267 &fw_dump->phys_addr,
1270 fw_dump->use_pex_dma = false;
1272 fw_dump->dma_buffer = temp_buffer;
1276 dev_info(&adapter->pdev->dev,
1277 "Default minidump capture mask 0x%x\n",
1280 qlcnic_enable_fw_dump_state(adapter);
1285 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1287 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1288 const struct qlcnic_dump_operations *fw_dump_ops;
1289 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
1290 u32 entry_offset, dump, no_entries, buf_offset = 0;
1291 int i, k, ops_cnt, ops_index, dump_size = 0;
1292 struct device *dev = &adapter->pdev->dev;
1293 struct qlcnic_hardware_context *ahw;
1294 struct qlcnic_dump_entry *entry;
1299 char *msg[] = {mesg, NULL};
1302 tmpl_hdr = fw_dump->tmpl_hdr;
1304 /* Return if we don't have firmware dump template header */
1308 if (!qlcnic_check_fw_dump_state(adapter)) {
1309 dev_info(&adapter->pdev->dev, "Dump not enabled\n");
1314 dev_info(&adapter->pdev->dev,
1315 "Previous dump not cleared, not capturing dump\n");
1319 netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
1320 /* Calculate the size for dump data area only */
1321 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1322 if (i & fw_dump->cap_mask)
1323 dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
1328 fw_dump->data = vzalloc(dump_size);
1332 buffer = fw_dump->data;
1333 fw_dump->size = dump_size;
1334 no_entries = fw_dump->num_entries;
1335 entry_offset = fw_dump->offset;
1336 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
1337 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
1339 if (qlcnic_82xx_check(adapter)) {
1340 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1341 fw_dump_ops = qlcnic_fw_dump_ops;
1343 hdr_83xx = tmpl_hdr;
1344 ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
1345 fw_dump_ops = qlcnic_83xx_fw_dump_ops;
1346 ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
1347 hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
1348 hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
1351 for (i = 0; i < no_entries; i++) {
1352 entry = tmpl_hdr + entry_offset;
1353 if (!(entry->hdr.mask & fw_dump->cap_mask)) {
1354 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1355 entry_offset += entry->hdr.offset;
1359 /* Find the handler for this entry */
1361 while (ops_index < ops_cnt) {
1362 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1367 if (ops_index == ops_cnt) {
1368 dev_info(dev, "Skipping unknown entry opcode %d\n",
1370 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1371 entry_offset += entry->hdr.offset;
1375 /* Collect dump for this entry */
1376 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1377 if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
1378 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1379 entry_offset += entry->hdr.offset;
1383 buf_offset += entry->hdr.cap_size;
1384 entry_offset += entry->hdr.offset;
1385 buffer = fw_dump->data + buf_offset;
1390 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
1391 netdev_info(adapter->netdev,
1392 "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
1393 fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
1395 /* Send a udev event to notify availability of FW dump */
1396 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
1402 qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
1404 /* For special adapters (with 0x8830 device ID), where iSCSI firmware
1405 * dump needs to be captured as part of regular firmware dump
1406 * collection process, firmware exports it's capability through
1407 * capability registers
1409 return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
1410 (adapter->ahw->extra_capability[0] &
1411 QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
1414 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
1416 u32 prev_version, current_version;
1417 struct qlcnic_hardware_context *ahw = adapter->ahw;
1418 struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
1419 struct pci_dev *pdev = adapter->pdev;
1420 bool extended = false;
1423 prev_version = adapter->fw_version;
1424 current_version = qlcnic_83xx_get_fw_version(adapter);
1426 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
1427 vfree(fw_dump->tmpl_hdr);
1428 fw_dump->tmpl_hdr = NULL;
1430 if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
1431 extended = !qlcnic_83xx_extend_md_capab(adapter);
1433 ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
1437 dev_info(&pdev->dev, "Supports FW dump capability\n");
1439 /* Once we have minidump template with extended iSCSI dump
1440 * capability, update the minidump capture mask to 0x1f as
1441 * per FW requirement
1444 struct qlcnic_83xx_dump_template_hdr *hdr;
1446 hdr = fw_dump->tmpl_hdr;
1449 hdr->drv_cap_mask = 0x1f;
1450 fw_dump->cap_mask = 0x1f;
1451 dev_info(&pdev->dev,
1452 "Extended iSCSI dump capability and updated capture mask to 0x1f\n");