1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
16 #include "qed_reg_addr.h"
18 /* Memory groups enum */
30 MEM_GROUP_CONN_CFC_MEM,
31 MEM_GROUP_TASK_CFC_MEM,
52 /* Memory groups names */
53 static const char * const s_mem_group_names[] = {
85 /* Idle check conditions */
87 static u32 cond5(const u32 *r, const u32 *imm)
89 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
92 static u32 cond7(const u32 *r, const u32 *imm)
94 return ((r[0] >> imm[0]) & imm[1]) != imm[2];
97 static u32 cond6(const u32 *r, const u32 *imm)
99 return (r[0] & imm[0]) != imm[1];
102 static u32 cond9(const u32 *r, const u32 *imm)
104 return ((r[0] & imm[0]) >> imm[1]) !=
105 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
108 static u32 cond10(const u32 *r, const u32 *imm)
110 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
113 static u32 cond4(const u32 *r, const u32 *imm)
115 return (r[0] & ~imm[0]) != imm[1];
118 static u32 cond0(const u32 *r, const u32 *imm)
120 return (r[0] & ~r[1]) != imm[0];
123 static u32 cond1(const u32 *r, const u32 *imm)
125 return r[0] != imm[0];
128 static u32 cond11(const u32 *r, const u32 *imm)
130 return r[0] != r[1] && r[2] == imm[0];
133 static u32 cond12(const u32 *r, const u32 *imm)
135 return r[0] != r[1] && r[2] > imm[0];
138 static u32 cond3(const u32 *r, const u32 *imm)
143 static u32 cond13(const u32 *r, const u32 *imm)
145 return r[0] & imm[0];
148 static u32 cond8(const u32 *r, const u32 *imm)
150 return r[0] < (r[1] - imm[0]);
153 static u32 cond2(const u32 *r, const u32 *imm)
155 return r[0] > imm[0];
158 /* Array of Idle Check conditions */
159 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
176 /******************************* Data Types **********************************/
186 struct chip_platform_defs {
192 /* Chip constant definitions */
195 struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
198 /* Platform constant definitions */
199 struct platform_defs {
206 /* Storm constant definitions.
207 * Addresses are in bytes, sizes are in quad-regs.
211 enum block_id block_id;
212 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
214 u32 sem_fast_mem_addr;
215 u32 sem_frame_mode_addr;
216 u32 sem_slow_enable_addr;
217 u32 sem_slow_mode_addr;
218 u32 sem_slow_mode1_conf_addr;
219 u32 sem_sync_dbg_empty_addr;
220 u32 sem_slow_dbg_empty_addr;
222 u32 cm_conn_ag_ctx_lid_size;
223 u32 cm_conn_ag_ctx_rd_addr;
224 u32 cm_conn_st_ctx_lid_size;
225 u32 cm_conn_st_ctx_rd_addr;
226 u32 cm_task_ag_ctx_lid_size;
227 u32 cm_task_ag_ctx_rd_addr;
228 u32 cm_task_st_ctx_lid_size;
229 u32 cm_task_st_ctx_rd_addr;
232 /* Block constant definitions */
235 bool exists[MAX_CHIP_IDS];
236 bool associated_to_storm;
238 /* Valid only if associated_to_storm is true */
240 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
244 u32 dbg_force_valid_addr;
245 u32 dbg_force_frame_addr;
248 /* If true, block is taken out of reset before dump */
250 enum dbg_reset_regs reset_reg;
252 /* Bit offset in reset register */
256 /* Reset register definitions */
257 struct reset_reg_defs {
259 bool exists[MAX_CHIP_IDS];
260 u32 unreset_val[MAX_CHIP_IDS];
263 struct grc_param_defs {
264 u32 default_val[MAX_CHIP_IDS];
269 u32 exclude_all_preset_val;
270 u32 crash_preset_val;
273 /* Address is in 128b units. Width is in bits. */
274 struct rss_mem_defs {
275 const char *mem_name;
276 const char *type_name;
279 u32 num_entries[MAX_CHIP_IDS];
282 struct vfc_ram_defs {
283 const char *mem_name;
284 const char *type_name;
289 struct big_ram_defs {
290 const char *instance_name;
291 enum mem_groups mem_group_id;
292 enum mem_groups ram_mem_group_id;
293 enum dbg_grc_params grc_param;
296 u32 is_256b_reg_addr;
297 u32 is_256b_bit_offset[MAX_CHIP_IDS];
298 u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
302 const char *phy_name;
304 /* PHY base GRC address */
307 /* Relative address of indirect TBUS address register (bits 0..7) */
308 u32 tbus_addr_lo_addr;
310 /* Relative address of indirect TBUS address register (bits 8..10) */
311 u32 tbus_addr_hi_addr;
313 /* Relative address of indirect TBUS data register (bits 0..7) */
314 u32 tbus_data_lo_addr;
316 /* Relative address of indirect TBUS data register (bits 8..11) */
317 u32 tbus_data_hi_addr;
320 /******************************** Constants **********************************/
322 #define MAX_LCIDS 320
323 #define MAX_LTIDS 320
325 #define NUM_IOR_SETS 2
326 #define IORS_PER_SET 176
327 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
329 #define BYTES_IN_DWORD sizeof(u32)
331 /* In the macros below, size and offset are specified in bits */
332 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
333 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
334 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
335 #define FIELD_DWORD_OFFSET(type, field) \
336 (int)(FIELD_BIT_OFFSET(type, field) / 32)
337 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
338 #define FIELD_BIT_MASK(type, field) \
339 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
340 FIELD_DWORD_SHIFT(type, field))
342 #define SET_VAR_FIELD(var, type, field, val) \
344 var[FIELD_DWORD_OFFSET(type, field)] &= \
345 (~FIELD_BIT_MASK(type, field)); \
346 var[FIELD_DWORD_OFFSET(type, field)] |= \
347 (val) << FIELD_DWORD_SHIFT(type, field); \
350 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
352 for (i = 0; i < (arr_size); i++) \
353 qed_wr(dev, ptt, addr, (arr)[i]); \
356 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
358 for (i = 0; i < (arr_size); i++) \
359 (arr)[i] = qed_rd(dev, ptt, addr); \
362 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
363 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
365 /* Extra lines include a signature line + optional latency events line */
366 #define NUM_EXTRA_DBG_LINES(block_desc) \
367 (1 + ((block_desc)->has_latency_events ? 1 : 0))
368 #define NUM_DBG_LINES(block_desc) \
369 ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
371 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
372 #define RAM_LINES_TO_BYTES(lines) \
373 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
375 #define REG_DUMP_LEN_SHIFT 24
376 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
377 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
379 #define IDLE_CHK_RULE_SIZE_DWORDS \
380 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
382 #define IDLE_CHK_RESULT_HDR_DWORDS \
383 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
385 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
386 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
388 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
390 /* The sizes and offsets below are specified in bits */
391 #define VFC_CAM_CMD_STRUCT_SIZE 64
392 #define VFC_CAM_CMD_ROW_OFFSET 48
393 #define VFC_CAM_CMD_ROW_SIZE 9
394 #define VFC_CAM_ADDR_STRUCT_SIZE 16
395 #define VFC_CAM_ADDR_OP_OFFSET 0
396 #define VFC_CAM_ADDR_OP_SIZE 4
397 #define VFC_CAM_RESP_STRUCT_SIZE 256
398 #define VFC_RAM_ADDR_STRUCT_SIZE 16
399 #define VFC_RAM_ADDR_OP_OFFSET 0
400 #define VFC_RAM_ADDR_OP_SIZE 2
401 #define VFC_RAM_ADDR_ROW_OFFSET 2
402 #define VFC_RAM_ADDR_ROW_SIZE 10
403 #define VFC_RAM_RESP_STRUCT_SIZE 256
405 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
406 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
407 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
408 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
409 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
410 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
412 #define NUM_VFC_RAM_TYPES 4
414 #define VFC_CAM_NUM_ROWS 512
416 #define VFC_OPCODE_CAM_RD 14
417 #define VFC_OPCODE_RAM_RD 0
419 #define NUM_RSS_MEM_TYPES 5
421 #define NUM_BIG_RAM_TYPES 3
422 #define BIG_RAM_NAME_LEN 3
424 #define NUM_PHY_TBUS_ADDRESSES 2048
425 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
427 #define RESET_REG_UNRESET_OFFSET 4
429 #define STALL_DELAY_MS 500
431 #define STATIC_DEBUG_LINE_DWORDS 9
433 #define NUM_COMMON_GLOBAL_PARAMS 8
435 #define FW_IMG_MAIN 1
437 #define REG_FIFO_ELEMENT_DWORDS 2
438 #define REG_FIFO_DEPTH_ELEMENTS 32
439 #define REG_FIFO_DEPTH_DWORDS \
440 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
442 #define IGU_FIFO_ELEMENT_DWORDS 4
443 #define IGU_FIFO_DEPTH_ELEMENTS 64
444 #define IGU_FIFO_DEPTH_DWORDS \
445 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
447 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
448 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
449 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
450 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
451 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
453 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
455 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
457 #define EMPTY_FW_VERSION_STR "???_???_???_???"
458 #define EMPTY_FW_IMAGE_STR "???????????????"
460 /***************************** Constant Arrays *******************************/
468 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
470 /* Chip constant definitions array */
471 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
473 {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
478 {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
489 /* Storm constant definitions array */
490 static struct storm_defs s_storm_defs[] = {
493 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
494 DBG_BUS_CLIENT_RBCT}, true,
495 TSEM_REG_FAST_MEMORY,
496 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
497 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
498 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
499 TCM_REG_CTX_RBC_ACCS,
500 4, TCM_REG_AGG_CON_CTX,
501 16, TCM_REG_SM_CON_CTX,
502 2, TCM_REG_AGG_TASK_CTX,
503 4, TCM_REG_SM_TASK_CTX},
507 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
508 DBG_BUS_CLIENT_RBCM}, false,
509 MSEM_REG_FAST_MEMORY,
510 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
511 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
512 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
513 MCM_REG_CTX_RBC_ACCS,
514 1, MCM_REG_AGG_CON_CTX,
515 10, MCM_REG_SM_CON_CTX,
516 2, MCM_REG_AGG_TASK_CTX,
517 7, MCM_REG_SM_TASK_CTX},
521 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
522 DBG_BUS_CLIENT_RBCU}, false,
523 USEM_REG_FAST_MEMORY,
524 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
525 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
526 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
527 UCM_REG_CTX_RBC_ACCS,
528 2, UCM_REG_AGG_CON_CTX,
529 13, UCM_REG_SM_CON_CTX,
530 3, UCM_REG_AGG_TASK_CTX,
531 3, UCM_REG_SM_TASK_CTX},
535 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
536 DBG_BUS_CLIENT_RBCX}, false,
537 XSEM_REG_FAST_MEMORY,
538 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
539 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
540 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
541 XCM_REG_CTX_RBC_ACCS,
542 9, XCM_REG_AGG_CON_CTX,
543 15, XCM_REG_SM_CON_CTX,
549 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
550 DBG_BUS_CLIENT_RBCY}, false,
551 YSEM_REG_FAST_MEMORY,
552 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
553 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
554 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
555 YCM_REG_CTX_RBC_ACCS,
556 2, YCM_REG_AGG_CON_CTX,
557 3, YCM_REG_SM_CON_CTX,
558 2, YCM_REG_AGG_TASK_CTX,
559 12, YCM_REG_SM_TASK_CTX},
563 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
564 DBG_BUS_CLIENT_RBCS}, true,
565 PSEM_REG_FAST_MEMORY,
566 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
567 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
568 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
569 PCM_REG_CTX_RBC_ACCS,
571 10, PCM_REG_SM_CON_CTX,
576 /* Block definitions array */
578 static struct block_defs block_grc_defs = {
580 {true, true, true}, false, 0,
581 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
582 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
583 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
584 GRC_REG_DBG_FORCE_FRAME,
585 true, false, DBG_RESET_REG_MISC_PL_UA, 1
588 static struct block_defs block_miscs_defs = {
589 "miscs", {true, true, true}, false, 0,
590 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
592 false, false, MAX_DBG_RESET_REGS, 0
595 static struct block_defs block_misc_defs = {
596 "misc", {true, true, true}, false, 0,
597 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
599 false, false, MAX_DBG_RESET_REGS, 0
602 static struct block_defs block_dbu_defs = {
603 "dbu", {true, true, true}, false, 0,
604 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
606 false, false, MAX_DBG_RESET_REGS, 0
609 static struct block_defs block_pglue_b_defs = {
611 {true, true, true}, false, 0,
612 {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
613 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
614 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
615 PGLUE_B_REG_DBG_FORCE_FRAME,
616 true, false, DBG_RESET_REG_MISCS_PL_HV, 1
619 static struct block_defs block_cnig_defs = {
621 {true, true, true}, false, 0,
622 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
623 DBG_BUS_CLIENT_RBCW},
624 CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
625 CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
626 CNIG_REG_DBG_FORCE_FRAME_K2_E5,
627 true, false, DBG_RESET_REG_MISCS_PL_HV, 0
630 static struct block_defs block_cpmu_defs = {
631 "cpmu", {true, true, true}, false, 0,
632 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
634 true, false, DBG_RESET_REG_MISCS_PL_HV, 8
637 static struct block_defs block_ncsi_defs = {
639 {true, true, true}, false, 0,
640 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
641 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
642 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
643 NCSI_REG_DBG_FORCE_FRAME,
644 true, false, DBG_RESET_REG_MISCS_PL_HV, 5
647 static struct block_defs block_opte_defs = {
648 "opte", {true, true, false}, false, 0,
649 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
651 true, false, DBG_RESET_REG_MISCS_PL_HV, 4
654 static struct block_defs block_bmb_defs = {
656 {true, true, true}, false, 0,
657 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
658 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
659 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
660 BMB_REG_DBG_FORCE_FRAME,
661 true, false, DBG_RESET_REG_MISCS_PL_UA, 7
664 static struct block_defs block_pcie_defs = {
666 {true, true, true}, false, 0,
667 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
668 DBG_BUS_CLIENT_RBCH},
669 PCIE_REG_DBG_COMMON_SELECT_K2_E5,
670 PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
671 PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
672 PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
673 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
674 false, false, MAX_DBG_RESET_REGS, 0
677 static struct block_defs block_mcp_defs = {
678 "mcp", {true, true, true}, false, 0,
679 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
681 false, false, MAX_DBG_RESET_REGS, 0
684 static struct block_defs block_mcp2_defs = {
686 {true, true, true}, false, 0,
687 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
688 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
689 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
690 MCP2_REG_DBG_FORCE_FRAME,
691 false, false, MAX_DBG_RESET_REGS, 0
694 static struct block_defs block_pswhst_defs = {
696 {true, true, true}, false, 0,
697 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
698 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
699 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
700 PSWHST_REG_DBG_FORCE_FRAME,
701 true, false, DBG_RESET_REG_MISC_PL_HV, 0
704 static struct block_defs block_pswhst2_defs = {
706 {true, true, true}, false, 0,
707 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
708 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
709 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
710 PSWHST2_REG_DBG_FORCE_FRAME,
711 true, false, DBG_RESET_REG_MISC_PL_HV, 0
714 static struct block_defs block_pswrd_defs = {
716 {true, true, true}, false, 0,
717 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
718 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
719 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
720 PSWRD_REG_DBG_FORCE_FRAME,
721 true, false, DBG_RESET_REG_MISC_PL_HV, 2
724 static struct block_defs block_pswrd2_defs = {
726 {true, true, true}, false, 0,
727 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
728 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
729 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
730 PSWRD2_REG_DBG_FORCE_FRAME,
731 true, false, DBG_RESET_REG_MISC_PL_HV, 2
734 static struct block_defs block_pswwr_defs = {
736 {true, true, true}, false, 0,
737 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
738 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
739 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
740 PSWWR_REG_DBG_FORCE_FRAME,
741 true, false, DBG_RESET_REG_MISC_PL_HV, 3
744 static struct block_defs block_pswwr2_defs = {
745 "pswwr2", {true, true, true}, false, 0,
746 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
748 true, false, DBG_RESET_REG_MISC_PL_HV, 3
751 static struct block_defs block_pswrq_defs = {
753 {true, true, true}, false, 0,
754 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
755 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
756 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
757 PSWRQ_REG_DBG_FORCE_FRAME,
758 true, false, DBG_RESET_REG_MISC_PL_HV, 1
761 static struct block_defs block_pswrq2_defs = {
763 {true, true, true}, false, 0,
764 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
765 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
766 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
767 PSWRQ2_REG_DBG_FORCE_FRAME,
768 true, false, DBG_RESET_REG_MISC_PL_HV, 1
771 static struct block_defs block_pglcs_defs = {
773 {true, true, true}, false, 0,
774 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
775 DBG_BUS_CLIENT_RBCH},
776 PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
777 PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
778 PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
779 true, false, DBG_RESET_REG_MISCS_PL_HV, 2
782 static struct block_defs block_ptu_defs = {
784 {true, true, true}, false, 0,
785 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
786 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
787 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
788 PTU_REG_DBG_FORCE_FRAME,
789 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
792 static struct block_defs block_dmae_defs = {
794 {true, true, true}, false, 0,
795 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
796 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
797 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
798 DMAE_REG_DBG_FORCE_FRAME,
799 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
802 static struct block_defs block_tcm_defs = {
804 {true, true, true}, true, DBG_TSTORM_ID,
805 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
806 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
807 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
808 TCM_REG_DBG_FORCE_FRAME,
809 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
812 static struct block_defs block_mcm_defs = {
814 {true, true, true}, true, DBG_MSTORM_ID,
815 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
816 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
817 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
818 MCM_REG_DBG_FORCE_FRAME,
819 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
822 static struct block_defs block_ucm_defs = {
824 {true, true, true}, true, DBG_USTORM_ID,
825 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
826 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
827 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
828 UCM_REG_DBG_FORCE_FRAME,
829 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
832 static struct block_defs block_xcm_defs = {
834 {true, true, true}, true, DBG_XSTORM_ID,
835 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
836 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
837 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
838 XCM_REG_DBG_FORCE_FRAME,
839 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
842 static struct block_defs block_ycm_defs = {
844 {true, true, true}, true, DBG_YSTORM_ID,
845 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
846 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
847 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
848 YCM_REG_DBG_FORCE_FRAME,
849 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
852 static struct block_defs block_pcm_defs = {
854 {true, true, true}, true, DBG_PSTORM_ID,
855 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
856 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
857 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
858 PCM_REG_DBG_FORCE_FRAME,
859 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
862 static struct block_defs block_qm_defs = {
864 {true, true, true}, false, 0,
865 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
866 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
867 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
868 QM_REG_DBG_FORCE_FRAME,
869 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
872 static struct block_defs block_tm_defs = {
874 {true, true, true}, false, 0,
875 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
876 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
877 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
878 TM_REG_DBG_FORCE_FRAME,
879 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
882 static struct block_defs block_dorq_defs = {
884 {true, true, true}, false, 0,
885 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
886 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
887 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
888 DORQ_REG_DBG_FORCE_FRAME,
889 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
892 static struct block_defs block_brb_defs = {
894 {true, true, true}, false, 0,
895 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
896 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
897 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
898 BRB_REG_DBG_FORCE_FRAME,
899 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
902 static struct block_defs block_src_defs = {
904 {true, true, true}, false, 0,
905 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
906 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
907 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
908 SRC_REG_DBG_FORCE_FRAME,
909 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
912 static struct block_defs block_prs_defs = {
914 {true, true, true}, false, 0,
915 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
916 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
917 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
918 PRS_REG_DBG_FORCE_FRAME,
919 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
922 static struct block_defs block_tsdm_defs = {
924 {true, true, true}, true, DBG_TSTORM_ID,
925 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
926 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
927 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
928 TSDM_REG_DBG_FORCE_FRAME,
929 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
932 static struct block_defs block_msdm_defs = {
934 {true, true, true}, true, DBG_MSTORM_ID,
935 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
936 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
937 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
938 MSDM_REG_DBG_FORCE_FRAME,
939 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
942 static struct block_defs block_usdm_defs = {
944 {true, true, true}, true, DBG_USTORM_ID,
945 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
946 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
947 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
948 USDM_REG_DBG_FORCE_FRAME,
949 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
952 static struct block_defs block_xsdm_defs = {
954 {true, true, true}, true, DBG_XSTORM_ID,
955 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
956 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
957 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
958 XSDM_REG_DBG_FORCE_FRAME,
959 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
962 static struct block_defs block_ysdm_defs = {
964 {true, true, true}, true, DBG_YSTORM_ID,
965 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
966 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
967 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
968 YSDM_REG_DBG_FORCE_FRAME,
969 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
972 static struct block_defs block_psdm_defs = {
974 {true, true, true}, true, DBG_PSTORM_ID,
975 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
976 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
977 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
978 PSDM_REG_DBG_FORCE_FRAME,
979 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
982 static struct block_defs block_tsem_defs = {
984 {true, true, true}, true, DBG_TSTORM_ID,
985 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
986 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
987 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
988 TSEM_REG_DBG_FORCE_FRAME,
989 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
992 static struct block_defs block_msem_defs = {
994 {true, true, true}, true, DBG_MSTORM_ID,
995 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
996 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
997 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
998 MSEM_REG_DBG_FORCE_FRAME,
999 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
1002 static struct block_defs block_usem_defs = {
1004 {true, true, true}, true, DBG_USTORM_ID,
1005 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1006 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1007 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1008 USEM_REG_DBG_FORCE_FRAME,
1009 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
1012 static struct block_defs block_xsem_defs = {
1014 {true, true, true}, true, DBG_XSTORM_ID,
1015 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1016 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1017 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1018 XSEM_REG_DBG_FORCE_FRAME,
1019 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1022 static struct block_defs block_ysem_defs = {
1024 {true, true, true}, true, DBG_YSTORM_ID,
1025 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1026 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1027 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1028 YSEM_REG_DBG_FORCE_FRAME,
1029 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1032 static struct block_defs block_psem_defs = {
1034 {true, true, true}, true, DBG_PSTORM_ID,
1035 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1036 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1037 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1038 PSEM_REG_DBG_FORCE_FRAME,
1039 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1042 static struct block_defs block_rss_defs = {
1044 {true, true, true}, false, 0,
1045 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1046 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1047 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1048 RSS_REG_DBG_FORCE_FRAME,
1049 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1052 static struct block_defs block_tmld_defs = {
1054 {true, true, true}, false, 0,
1055 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1056 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1057 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1058 TMLD_REG_DBG_FORCE_FRAME,
1059 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1062 static struct block_defs block_muld_defs = {
1064 {true, true, true}, false, 0,
1065 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1066 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1067 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1068 MULD_REG_DBG_FORCE_FRAME,
1069 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1072 static struct block_defs block_yuld_defs = {
1074 {true, true, false}, false, 0,
1075 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1076 MAX_DBG_BUS_CLIENTS},
1077 YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1078 YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1079 YULD_REG_DBG_FORCE_FRAME_BB_K2,
1080 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1084 static struct block_defs block_xyld_defs = {
1086 {true, true, true}, false, 0,
1087 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1088 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1089 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1090 XYLD_REG_DBG_FORCE_FRAME,
1091 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1094 static struct block_defs block_ptld_defs = {
1096 {false, false, true}, false, 0,
1097 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1098 PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1099 PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1100 PTLD_REG_DBG_FORCE_FRAME_E5,
1101 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1105 static struct block_defs block_ypld_defs = {
1107 {false, false, true}, false, 0,
1108 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1109 YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1110 YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1111 YPLD_REG_DBG_FORCE_FRAME_E5,
1112 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1116 static struct block_defs block_prm_defs = {
1118 {true, true, true}, false, 0,
1119 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1120 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1121 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1122 PRM_REG_DBG_FORCE_FRAME,
1123 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1126 static struct block_defs block_pbf_pb1_defs = {
1128 {true, true, true}, false, 0,
1129 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1130 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1131 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1132 PBF_PB1_REG_DBG_FORCE_FRAME,
1133 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1137 static struct block_defs block_pbf_pb2_defs = {
1139 {true, true, true}, false, 0,
1140 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1141 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1142 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1143 PBF_PB2_REG_DBG_FORCE_FRAME,
1144 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1148 static struct block_defs block_rpb_defs = {
1150 {true, true, true}, false, 0,
1151 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1152 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1153 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1154 RPB_REG_DBG_FORCE_FRAME,
1155 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1158 static struct block_defs block_btb_defs = {
1160 {true, true, true}, false, 0,
1161 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1162 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1163 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1164 BTB_REG_DBG_FORCE_FRAME,
1165 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1168 static struct block_defs block_pbf_defs = {
1170 {true, true, true}, false, 0,
1171 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1172 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1173 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1174 PBF_REG_DBG_FORCE_FRAME,
1175 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1178 static struct block_defs block_rdif_defs = {
1180 {true, true, true}, false, 0,
1181 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1182 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1183 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1184 RDIF_REG_DBG_FORCE_FRAME,
1185 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1188 static struct block_defs block_tdif_defs = {
1190 {true, true, true}, false, 0,
1191 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1192 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1193 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1194 TDIF_REG_DBG_FORCE_FRAME,
1195 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1198 static struct block_defs block_cdu_defs = {
1200 {true, true, true}, false, 0,
1201 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1202 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1203 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1204 CDU_REG_DBG_FORCE_FRAME,
1205 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1208 static struct block_defs block_ccfc_defs = {
1210 {true, true, true}, false, 0,
1211 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1212 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1213 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1214 CCFC_REG_DBG_FORCE_FRAME,
1215 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1218 static struct block_defs block_tcfc_defs = {
1220 {true, true, true}, false, 0,
1221 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1222 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1223 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1224 TCFC_REG_DBG_FORCE_FRAME,
1225 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1228 static struct block_defs block_igu_defs = {
1230 {true, true, true}, false, 0,
1231 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1232 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1233 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1234 IGU_REG_DBG_FORCE_FRAME,
1235 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1238 static struct block_defs block_cau_defs = {
1240 {true, true, true}, false, 0,
1241 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1242 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1243 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1244 CAU_REG_DBG_FORCE_FRAME,
1245 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1248 static struct block_defs block_rgfs_defs = {
1249 "rgfs", {false, false, true}, false, 0,
1250 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1252 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1255 static struct block_defs block_rgsrc_defs = {
1257 {false, false, true}, false, 0,
1258 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1259 RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1260 RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1261 RGSRC_REG_DBG_FORCE_FRAME_E5,
1262 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1266 static struct block_defs block_tgfs_defs = {
1267 "tgfs", {false, false, true}, false, 0,
1268 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1270 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1273 static struct block_defs block_tgsrc_defs = {
1275 {false, false, true}, false, 0,
1276 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1277 TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1278 TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1279 TGSRC_REG_DBG_FORCE_FRAME_E5,
1280 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1284 static struct block_defs block_umac_defs = {
1286 {true, true, true}, false, 0,
1287 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1288 DBG_BUS_CLIENT_RBCZ},
1289 UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1290 UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1291 UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1292 true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1295 static struct block_defs block_xmac_defs = {
1296 "xmac", {true, false, false}, false, 0,
1297 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1299 false, false, MAX_DBG_RESET_REGS, 0
1302 static struct block_defs block_dbg_defs = {
1303 "dbg", {true, true, true}, false, 0,
1304 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1306 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1309 static struct block_defs block_nig_defs = {
1311 {true, true, true}, false, 0,
1312 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1313 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1314 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1315 NIG_REG_DBG_FORCE_FRAME,
1316 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1319 static struct block_defs block_wol_defs = {
1321 {false, true, true}, false, 0,
1322 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1323 WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1324 WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1325 WOL_REG_DBG_FORCE_FRAME_K2_E5,
1326 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1329 static struct block_defs block_bmbn_defs = {
1331 {false, true, true}, false, 0,
1332 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1333 DBG_BUS_CLIENT_RBCB},
1334 BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1335 BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1336 BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1337 false, false, MAX_DBG_RESET_REGS, 0
1340 static struct block_defs block_ipc_defs = {
1341 "ipc", {true, true, true}, false, 0,
1342 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1344 true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1347 static struct block_defs block_nwm_defs = {
1349 {false, true, true}, false, 0,
1350 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1351 NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1352 NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1353 NWM_REG_DBG_FORCE_FRAME_K2_E5,
1354 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1357 static struct block_defs block_nws_defs = {
1359 {false, true, true}, false, 0,
1360 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1361 NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1362 NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1363 NWS_REG_DBG_FORCE_FRAME_K2_E5,
1364 true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1367 static struct block_defs block_ms_defs = {
1369 {false, true, true}, false, 0,
1370 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1371 MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1372 MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1373 MS_REG_DBG_FORCE_FRAME_K2_E5,
1374 true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1377 static struct block_defs block_phy_pcie_defs = {
1379 {false, true, true}, false, 0,
1380 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1381 DBG_BUS_CLIENT_RBCH},
1382 PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1383 PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1384 PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1385 PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1386 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1387 false, false, MAX_DBG_RESET_REGS, 0
1390 static struct block_defs block_led_defs = {
1391 "led", {false, true, true}, false, 0,
1392 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1394 true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1397 static struct block_defs block_avs_wrap_defs = {
1398 "avs_wrap", {false, true, false}, false, 0,
1399 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1401 true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1404 static struct block_defs block_pxpreqbus_defs = {
1405 "pxpreqbus", {false, false, false}, false, 0,
1406 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1408 false, false, MAX_DBG_RESET_REGS, 0
1411 static struct block_defs block_misc_aeu_defs = {
1412 "misc_aeu", {true, true, true}, false, 0,
1413 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1415 false, false, MAX_DBG_RESET_REGS, 0
1418 static struct block_defs block_bar0_map_defs = {
1419 "bar0_map", {true, true, true}, false, 0,
1420 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1422 false, false, MAX_DBG_RESET_REGS, 0
1425 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1430 &block_pglue_b_defs,
1440 &block_pswhst2_defs,
1482 &block_pbf_pb1_defs,
1483 &block_pbf_pb2_defs,
1508 &block_phy_pcie_defs,
1510 &block_avs_wrap_defs,
1511 &block_pxpreqbus_defs,
1512 &block_misc_aeu_defs,
1513 &block_bar0_map_defs,
1516 static struct platform_defs s_platform_defs[] = {
1517 {"asic", 1, 256, 32768},
1518 {"reserved", 0, 0, 0},
1519 {"reserved2", 0, 0, 0},
1520 {"reserved3", 0, 0, 0}
1523 static struct grc_param_defs s_grc_param_defs[] = {
1524 /* DBG_GRC_PARAM_DUMP_TSTORM */
1525 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1527 /* DBG_GRC_PARAM_DUMP_MSTORM */
1528 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1530 /* DBG_GRC_PARAM_DUMP_USTORM */
1531 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1533 /* DBG_GRC_PARAM_DUMP_XSTORM */
1534 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1536 /* DBG_GRC_PARAM_DUMP_YSTORM */
1537 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1539 /* DBG_GRC_PARAM_DUMP_PSTORM */
1540 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1542 /* DBG_GRC_PARAM_DUMP_REGS */
1543 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1545 /* DBG_GRC_PARAM_DUMP_RAM */
1546 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1548 /* DBG_GRC_PARAM_DUMP_PBUF */
1549 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1551 /* DBG_GRC_PARAM_DUMP_IOR */
1552 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1554 /* DBG_GRC_PARAM_DUMP_VFC */
1555 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1557 /* DBG_GRC_PARAM_DUMP_CM_CTX */
1558 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1560 /* DBG_GRC_PARAM_DUMP_ILT */
1561 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1563 /* DBG_GRC_PARAM_DUMP_RSS */
1564 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1566 /* DBG_GRC_PARAM_DUMP_CAU */
1567 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1569 /* DBG_GRC_PARAM_DUMP_QM */
1570 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1572 /* DBG_GRC_PARAM_DUMP_MCP */
1573 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1575 /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
1576 {{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
1578 /* DBG_GRC_PARAM_DUMP_CFC */
1579 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1581 /* DBG_GRC_PARAM_DUMP_IGU */
1582 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1584 /* DBG_GRC_PARAM_DUMP_BRB */
1585 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1587 /* DBG_GRC_PARAM_DUMP_BTB */
1588 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1590 /* DBG_GRC_PARAM_DUMP_BMB */
1591 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1593 /* DBG_GRC_PARAM_DUMP_NIG */
1594 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1596 /* DBG_GRC_PARAM_DUMP_MULD */
1597 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1599 /* DBG_GRC_PARAM_DUMP_PRS */
1600 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1602 /* DBG_GRC_PARAM_DUMP_DMAE */
1603 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1605 /* DBG_GRC_PARAM_DUMP_TM */
1606 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1608 /* DBG_GRC_PARAM_DUMP_SDM */
1609 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1611 /* DBG_GRC_PARAM_DUMP_DIF */
1612 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1614 /* DBG_GRC_PARAM_DUMP_STATIC */
1615 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1617 /* DBG_GRC_PARAM_UNSTALL */
1618 {{0, 0, 0}, 0, 1, false, false, 0, 0},
1620 /* DBG_GRC_PARAM_NUM_LCIDS */
1621 {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
1622 MAX_LCIDS, MAX_LCIDS},
1624 /* DBG_GRC_PARAM_NUM_LTIDS */
1625 {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
1626 MAX_LTIDS, MAX_LTIDS},
1628 /* DBG_GRC_PARAM_EXCLUDE_ALL */
1629 {{0, 0, 0}, 0, 1, true, false, 0, 0},
1631 /* DBG_GRC_PARAM_CRASH */
1632 {{0, 0, 0}, 0, 1, true, false, 0, 0},
1634 /* DBG_GRC_PARAM_PARITY_SAFE */
1635 {{0, 0, 0}, 0, 1, false, false, 1, 0},
1637 /* DBG_GRC_PARAM_DUMP_CM */
1638 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1640 /* DBG_GRC_PARAM_DUMP_PHY */
1641 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1643 /* DBG_GRC_PARAM_NO_MCP */
1644 {{0, 0, 0}, 0, 1, false, false, 0, 0},
1646 /* DBG_GRC_PARAM_NO_FW_VER */
1647 {{0, 0, 0}, 0, 1, false, false, 0, 0}
1650 static struct rss_mem_defs s_rss_mem_defs[] = {
1651 { "rss_mem_cid", "rss_cid", 0, 32,
1654 { "rss_mem_key_msb", "rss_key", 1024, 256,
1657 { "rss_mem_key_lsb", "rss_key", 2048, 64,
1660 { "rss_mem_info", "rss_info", 3072, 16,
1663 { "rss_mem_ind", "rss_ind", 4096, 16,
1664 {16384, 26624, 32768} }
1667 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1668 {"vfc_ram_tt1", "vfc_ram", 0, 512},
1669 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
1670 {"vfc_ram_stt2", "vfc_ram", 640, 32},
1671 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1674 static struct big_ram_defs s_big_ram_defs[] = {
1675 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1676 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1677 MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1678 {153600, 180224, 282624} },
1680 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1681 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1682 MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1683 {92160, 117760, 168960} },
1685 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1686 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1687 MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1688 {36864, 36864, 36864} }
1691 static struct reset_reg_defs s_reset_regs_defs[] = {
1692 /* DBG_RESET_REG_MISCS_PL_UA */
1693 { MISCS_REG_RESET_PL_UA,
1694 {true, true, true}, {0x0, 0x0, 0x0} },
1696 /* DBG_RESET_REG_MISCS_PL_HV */
1697 { MISCS_REG_RESET_PL_HV,
1698 {true, true, true}, {0x0, 0x400, 0x600} },
1700 /* DBG_RESET_REG_MISCS_PL_HV_2 */
1701 { MISCS_REG_RESET_PL_HV_2_K2_E5,
1702 {false, true, true}, {0x0, 0x0, 0x0} },
1704 /* DBG_RESET_REG_MISC_PL_UA */
1705 { MISC_REG_RESET_PL_UA,
1706 {true, true, true}, {0x0, 0x0, 0x0} },
1708 /* DBG_RESET_REG_MISC_PL_HV */
1709 { MISC_REG_RESET_PL_HV,
1710 {true, true, true}, {0x0, 0x0, 0x0} },
1712 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1713 { MISC_REG_RESET_PL_PDA_VMAIN_1,
1714 {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1716 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1717 { MISC_REG_RESET_PL_PDA_VMAIN_2,
1718 {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1720 /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1721 { MISC_REG_RESET_PL_PDA_VAUX,
1722 {true, true, true}, {0x2, 0x2, 0x2} },
1725 static struct phy_defs s_phy_defs[] = {
1726 {"nw_phy", NWS_REG_NWS_CMU_K2,
1727 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1728 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1729 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1730 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1731 {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1732 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1733 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1734 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1735 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1736 {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1737 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1738 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1739 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1740 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1741 {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1742 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1743 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1744 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1745 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1748 /**************************** Private Functions ******************************/
1750 /* Reads and returns a single dword from the specified unaligned buffer */
1751 static u32 qed_read_unaligned_dword(u8 *buf)
1755 memcpy((u8 *)&dword, buf, sizeof(dword));
1759 /* Returns the value of the specified GRC param */
1760 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1761 enum dbg_grc_params grc_param)
1763 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1765 return dev_data->grc.param_val[grc_param];
1768 /* Initializes the GRC parameters */
1769 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1771 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1773 if (!dev_data->grc.params_initialized) {
1774 qed_dbg_grc_set_params_default(p_hwfn);
1775 dev_data->grc.params_initialized = 1;
1779 /* Initializes debug data for the specified device */
1780 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1781 struct qed_ptt *p_ptt)
1783 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1785 if (dev_data->initialized)
1786 return DBG_STATUS_OK;
1788 if (QED_IS_K2(p_hwfn->cdev)) {
1789 dev_data->chip_id = CHIP_K2;
1790 dev_data->mode_enable[MODE_K2] = 1;
1791 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1792 dev_data->chip_id = CHIP_BB;
1793 dev_data->mode_enable[MODE_BB] = 1;
1795 return DBG_STATUS_UNKNOWN_CHIP;
1798 dev_data->platform_id = PLATFORM_ASIC;
1799 dev_data->mode_enable[MODE_ASIC] = 1;
1801 /* Initializes the GRC parameters */
1802 qed_dbg_grc_init_params(p_hwfn);
1804 dev_data->use_dmae = true;
1805 dev_data->num_regs_read = 0;
1806 dev_data->initialized = 1;
1808 return DBG_STATUS_OK;
1811 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1812 enum block_id block_id)
1814 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1816 return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1821 /* Reads the FW info structure for the specified Storm from the chip,
1822 * and writes it to the specified fw_info pointer.
1824 static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
1825 struct qed_ptt *p_ptt,
1826 u8 storm_id, struct fw_info *fw_info)
1828 struct storm_defs *storm = &s_storm_defs[storm_id];
1829 struct fw_info_location fw_info_location;
1832 memset(&fw_info_location, 0, sizeof(fw_info_location));
1833 memset(fw_info, 0, sizeof(*fw_info));
1835 /* Read first the address that points to fw_info location.
1836 * The address is located in the last line of the Storm RAM.
1838 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1839 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1840 sizeof(fw_info_location);
1841 dest = (u32 *)&fw_info_location;
1843 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1844 i++, addr += BYTES_IN_DWORD)
1845 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1847 /* Read FW version info from Storm RAM */
1848 if (fw_info_location.size > 0 && fw_info_location.size <=
1850 addr = fw_info_location.grc_addr;
1851 dest = (u32 *)fw_info;
1852 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1853 i++, addr += BYTES_IN_DWORD)
1854 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1858 /* Dumps the specified string to the specified buffer.
1859 * Returns the dumped size in bytes.
1861 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1864 strcpy(dump_buf, str);
1866 return (u32)strlen(str) + 1;
1869 /* Dumps zeros to align the specified buffer to dwords.
1870 * Returns the dumped size in bytes.
1872 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1874 u8 offset_in_dword, align_size;
1876 offset_in_dword = (u8)(byte_offset & 0x3);
1877 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1879 if (dump && align_size)
1880 memset(dump_buf, 0, align_size);
1885 /* Writes the specified string param to the specified buffer.
1886 * Returns the dumped size in dwords.
1888 static u32 qed_dump_str_param(u32 *dump_buf,
1890 const char *param_name, const char *param_val)
1892 char *char_buf = (char *)dump_buf;
1895 /* Dump param name */
1896 offset += qed_dump_str(char_buf + offset, dump, param_name);
1898 /* Indicate a string param value */
1900 *(char_buf + offset) = 1;
1903 /* Dump param value */
1904 offset += qed_dump_str(char_buf + offset, dump, param_val);
1906 /* Align buffer to next dword */
1907 offset += qed_dump_align(char_buf + offset, dump, offset);
1909 return BYTES_TO_DWORDS(offset);
1912 /* Writes the specified numeric param to the specified buffer.
1913 * Returns the dumped size in dwords.
1915 static u32 qed_dump_num_param(u32 *dump_buf,
1916 bool dump, const char *param_name, u32 param_val)
1918 char *char_buf = (char *)dump_buf;
1921 /* Dump param name */
1922 offset += qed_dump_str(char_buf + offset, dump, param_name);
1924 /* Indicate a numeric param value */
1926 *(char_buf + offset) = 0;
1929 /* Align buffer to next dword */
1930 offset += qed_dump_align(char_buf + offset, dump, offset);
1932 /* Dump param value (and change offset from bytes to dwords) */
1933 offset = BYTES_TO_DWORDS(offset);
1935 *(dump_buf + offset) = param_val;
1941 /* Reads the FW version and writes it as a param to the specified buffer.
1942 * Returns the dumped size in dwords.
1944 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1945 struct qed_ptt *p_ptt,
1946 u32 *dump_buf, bool dump)
1948 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1949 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1950 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1951 struct fw_info fw_info = { {0}, {0} };
1954 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1955 /* Read FW image/version from PRAM in a non-reset SEMI */
1959 for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
1961 struct storm_defs *storm = &s_storm_defs[storm_id];
1963 /* Read FW version/image */
1964 if (dev_data->block_in_reset[storm->block_id])
1967 /* Read FW info for the current Storm */
1968 qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
1970 /* Create FW version/image strings */
1971 if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1972 "%d_%d_%d_%d", fw_info.ver.num.major,
1973 fw_info.ver.num.minor, fw_info.ver.num.rev,
1974 fw_info.ver.num.eng) < 0)
1976 "Unexpected debug error: invalid FW version string\n");
1977 switch (fw_info.ver.image_id) {
1979 strcpy(fw_img_str, "main");
1982 strcpy(fw_img_str, "unknown");
1990 /* Dump FW version, image and timestamp */
1991 offset += qed_dump_str_param(dump_buf + offset,
1992 dump, "fw-version", fw_ver_str);
1993 offset += qed_dump_str_param(dump_buf + offset,
1994 dump, "fw-image", fw_img_str);
1995 offset += qed_dump_num_param(dump_buf + offset,
1997 "fw-timestamp", fw_info.ver.timestamp);
2002 /* Reads the MFW version and writes it as a param to the specified buffer.
2003 * Returns the dumped size in dwords.
2005 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2006 struct qed_ptt *p_ptt,
2007 u32 *dump_buf, bool dump)
2009 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2012 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2013 u32 global_section_offsize, global_section_addr, mfw_ver;
2014 u32 public_data_addr, global_section_offsize_addr;
2016 /* Find MCP public data GRC address. Needs to be ORed with
2017 * MCP_REG_SCRATCH due to a HW bug.
2019 public_data_addr = qed_rd(p_hwfn,
2021 MISC_REG_SHARED_MEM_ADDR) |
2024 /* Find MCP public global section offset */
2025 global_section_offsize_addr = public_data_addr +
2026 offsetof(struct mcp_public_data,
2028 sizeof(offsize_t) * PUBLIC_GLOBAL;
2029 global_section_offsize = qed_rd(p_hwfn, p_ptt,
2030 global_section_offsize_addr);
2031 global_section_addr =
2033 (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2035 /* Read MFW version from MCP public global section */
2036 mfw_ver = qed_rd(p_hwfn, p_ptt,
2037 global_section_addr +
2038 offsetof(struct public_global, mfw_ver));
2040 /* Dump MFW version param */
2041 if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2042 (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2043 (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2045 "Unexpected debug error: invalid MFW version string\n");
2048 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2051 /* Writes a section header to the specified buffer.
2052 * Returns the dumped size in dwords.
2054 static u32 qed_dump_section_hdr(u32 *dump_buf,
2055 bool dump, const char *name, u32 num_params)
2057 return qed_dump_num_param(dump_buf, dump, name, num_params);
2060 /* Writes the common global params to the specified buffer.
2061 * Returns the dumped size in dwords.
2063 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2064 struct qed_ptt *p_ptt,
2067 u8 num_specific_global_params)
2069 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2073 /* Dump global params section header */
2074 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2075 offset += qed_dump_section_hdr(dump_buf + offset,
2076 dump, "global_params", num_params);
2079 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2080 offset += qed_dump_mfw_ver_param(p_hwfn,
2081 p_ptt, dump_buf + offset, dump);
2082 offset += qed_dump_num_param(dump_buf + offset,
2083 dump, "tools-version", TOOLS_VERSION);
2084 offset += qed_dump_str_param(dump_buf + offset,
2087 s_chip_defs[dev_data->chip_id].name);
2088 offset += qed_dump_str_param(dump_buf + offset,
2091 s_platform_defs[dev_data->platform_id].
2094 qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2100 /* Writes the "last" section (including CRC) to the specified buffer at the
2101 * given offset. Returns the dumped size in dwords.
2103 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2105 u32 start_offset = offset;
2107 /* Dump CRC section header */
2108 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2110 /* Calculate CRC32 and add it to the dword after the "last" section */
2112 *(dump_buf + offset) = ~crc32(0xffffffff,
2114 DWORDS_TO_BYTES(offset));
2118 return offset - start_offset;
2121 /* Update blocks reset state */
2122 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2123 struct qed_ptt *p_ptt)
2125 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2126 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2129 /* Read reset registers */
2130 for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2131 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2132 reg_val[i] = qed_rd(p_hwfn,
2133 p_ptt, s_reset_regs_defs[i].addr);
2135 /* Check if blocks are in reset */
2136 for (i = 0; i < MAX_BLOCK_ID; i++) {
2137 struct block_defs *block = s_block_defs[i];
2139 dev_data->block_in_reset[i] = block->has_reset_bit &&
2140 !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2144 /* Enable / disable the Debug block */
2145 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2146 struct qed_ptt *p_ptt, bool enable)
2148 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2151 /* Resets the Debug block */
2152 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2153 struct qed_ptt *p_ptt)
2155 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2156 struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2158 dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2159 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2161 old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2163 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2164 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2167 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2168 struct qed_ptt *p_ptt,
2169 enum dbg_bus_frame_modes mode)
2171 qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2174 /* Enable / disable Debug Bus clients according to the specified mask
2175 * (1 = enable, 0 = disable).
2177 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2178 struct qed_ptt *p_ptt, u32 client_mask)
2180 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2183 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2185 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2190 /* Get next element from modes tree buffer */
2191 ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2192 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2195 case INIT_MODE_OP_NOT:
2196 return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2197 case INIT_MODE_OP_OR:
2198 case INIT_MODE_OP_AND:
2199 arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2200 arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2201 return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2202 arg2) : (arg1 && arg2);
2204 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2208 /* Returns true if the specified entity (indicated by GRC param) should be
2209 * included in the dump, false otherwise.
2211 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2212 enum dbg_grc_params grc_param)
2214 return qed_grc_get_param(p_hwfn, grc_param) > 0;
2217 /* Returns true of the specified Storm should be included in the dump, false
2220 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2221 enum dbg_storms storm)
2223 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2226 /* Returns true if the specified memory should be included in the dump, false
2229 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2230 enum block_id block_id, u8 mem_group_id)
2232 struct block_defs *block = s_block_defs[block_id];
2235 /* Check Storm match */
2236 if (block->associated_to_storm &&
2237 !qed_grc_is_storm_included(p_hwfn,
2238 (enum dbg_storms)block->storm_id))
2241 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2242 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2244 if (mem_group_id == big_ram->mem_group_id ||
2245 mem_group_id == big_ram->ram_mem_group_id)
2246 return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2249 switch (mem_group_id) {
2250 case MEM_GROUP_PXP_ILT:
2251 case MEM_GROUP_PXP_MEM:
2252 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2254 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2255 case MEM_GROUP_PBUF:
2256 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2257 case MEM_GROUP_CAU_MEM:
2258 case MEM_GROUP_CAU_SB:
2259 case MEM_GROUP_CAU_PI:
2260 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2261 case MEM_GROUP_QM_MEM:
2262 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2263 case MEM_GROUP_CFC_MEM:
2264 case MEM_GROUP_CONN_CFC_MEM:
2265 case MEM_GROUP_TASK_CFC_MEM:
2266 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2267 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2268 case MEM_GROUP_IGU_MEM:
2269 case MEM_GROUP_IGU_MSIX:
2270 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2271 case MEM_GROUP_MULD_MEM:
2272 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2273 case MEM_GROUP_PRS_MEM:
2274 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2275 case MEM_GROUP_DMAE_MEM:
2276 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2277 case MEM_GROUP_TM_MEM:
2278 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2279 case MEM_GROUP_SDM_MEM:
2280 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2281 case MEM_GROUP_TDIF_CTX:
2282 case MEM_GROUP_RDIF_CTX:
2283 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2284 case MEM_GROUP_CM_MEM:
2285 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2287 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2293 /* Stalls all Storms */
2294 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2295 struct qed_ptt *p_ptt, bool stall)
2300 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2301 if (!qed_grc_is_storm_included(p_hwfn,
2302 (enum dbg_storms)storm_id))
2305 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2306 SEM_FAST_REG_STALL_0_BB_K2;
2307 qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2310 msleep(STALL_DELAY_MS);
2313 /* Takes all blocks out of reset */
2314 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2315 struct qed_ptt *p_ptt)
2317 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2318 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2321 /* Fill reset regs values */
2322 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2323 struct block_defs *block = s_block_defs[block_id];
2325 if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2327 reg_val[block->reset_reg] |=
2328 BIT(block->reset_bit_offset);
2331 /* Write reset registers */
2332 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2333 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2337 s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2342 s_reset_regs_defs[i].addr +
2343 RESET_REG_UNRESET_OFFSET, reg_val[i]);
2347 /* Returns the attention block data of the specified block */
2348 static const struct dbg_attn_block_type_data *
2349 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2351 const struct dbg_attn_block *base_attn_block_arr =
2352 (const struct dbg_attn_block *)
2353 s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2355 return &base_attn_block_arr[block_id].per_type_data[attn_type];
2358 /* Returns the attention registers of the specified block */
2359 static const struct dbg_attn_reg *
2360 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2363 const struct dbg_attn_block_type_data *block_type_data =
2364 qed_get_block_attn_data(block_id, attn_type);
2366 *num_attn_regs = block_type_data->num_regs;
2368 return &((const struct dbg_attn_reg *)
2369 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2373 /* For each block, clear the status of all parities */
2374 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2375 struct qed_ptt *p_ptt)
2377 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2378 const struct dbg_attn_reg *attn_reg_arr;
2379 u8 reg_idx, num_attn_regs;
2382 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2383 if (dev_data->block_in_reset[block_id])
2386 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2390 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2391 const struct dbg_attn_reg *reg_data =
2392 &attn_reg_arr[reg_idx];
2393 u16 modes_buf_offset;
2397 eval_mode = GET_FIELD(reg_data->mode.data,
2398 DBG_MODE_HDR_EVAL_MODE) > 0;
2400 GET_FIELD(reg_data->mode.data,
2401 DBG_MODE_HDR_MODES_BUF_OFFSET);
2403 /* If Mode match: clear parity status */
2405 qed_is_mode_match(p_hwfn, &modes_buf_offset))
2406 qed_rd(p_hwfn, p_ptt,
2407 DWORDS_TO_BYTES(reg_data->
2413 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2414 * The following parameters are dumped:
2415 * - count: no. of dumped entries
2416 * - split: split type
2417 * - id: split ID (dumped only if split_id >= 0)
2418 * - param_name: user parameter value (dumped only if param_name != NULL
2419 * and param_val != NULL).
2421 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2423 u32 num_reg_entries,
2424 const char *split_type,
2426 const char *param_name, const char *param_val)
2428 u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2431 offset += qed_dump_section_hdr(dump_buf + offset,
2432 dump, "grc_regs", num_params);
2433 offset += qed_dump_num_param(dump_buf + offset,
2434 dump, "count", num_reg_entries);
2435 offset += qed_dump_str_param(dump_buf + offset,
2436 dump, "split", split_type);
2438 offset += qed_dump_num_param(dump_buf + offset,
2439 dump, "id", split_id);
2440 if (param_name && param_val)
2441 offset += qed_dump_str_param(dump_buf + offset,
2442 dump, param_name, param_val);
2447 /* Reads the specified registers into the specified buffer.
2448 * The addr and len arguments are specified in dwords.
2450 void qed_read_regs(struct qed_hwfn *p_hwfn,
2451 struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2455 for (i = 0; i < len; i++)
2456 buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2459 /* Dumps the GRC registers in the specified address range.
2460 * Returns the dumped size in dwords.
2461 * The addr and len arguments are specified in dwords.
2463 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2464 struct qed_ptt *p_ptt,
2466 bool dump, u32 addr, u32 len, bool wide_bus)
2468 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2473 /* Print log if needed */
2474 dev_data->num_regs_read += len;
2475 if (dev_data->num_regs_read >=
2476 s_platform_defs[dev_data->platform_id].log_thresh) {
2479 "Dumping %d registers...\n",
2480 dev_data->num_regs_read);
2481 dev_data->num_regs_read = 0;
2484 /* Try reading using DMAE */
2485 if (dev_data->use_dmae &&
2486 (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2488 if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2489 (u64)(uintptr_t)(dump_buf), len, 0))
2491 dev_data->use_dmae = 0;
2494 "Failed reading from chip using DMAE, using GRC instead\n");
2497 /* Read registers */
2498 qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2503 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2504 * The addr and len arguments are specified in dwords.
2506 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2507 bool dump, u32 addr, u32 len)
2510 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2515 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2516 * The addr and len arguments are specified in dwords.
2518 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2519 struct qed_ptt *p_ptt,
2521 bool dump, u32 addr, u32 len, bool wide_bus)
2525 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2526 offset += qed_grc_dump_addr_range(p_hwfn,
2529 dump, addr, len, wide_bus);
2534 /* Dumps GRC registers sequence with skip cycle.
2535 * Returns the dumped size in dwords.
2536 * - addr: start GRC address in dwords
2537 * - total_len: total no. of dwords to dump
2538 * - read_len: no. consecutive dwords to read
2539 * - skip_len: no. of dwords to skip (and fill with zeros)
2541 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2542 struct qed_ptt *p_ptt,
2547 u32 read_len, u32 skip_len)
2549 u32 offset = 0, reg_offset = 0;
2551 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2554 return offset + total_len;
2556 while (reg_offset < total_len) {
2557 u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2559 offset += qed_grc_dump_addr_range(p_hwfn,
2562 dump, addr, curr_len, false);
2563 reg_offset += curr_len;
2566 if (reg_offset < total_len) {
2567 curr_len = min_t(u32, skip_len, total_len - skip_len);
2568 memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2570 reg_offset += curr_len;
2578 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2579 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2580 struct qed_ptt *p_ptt,
2581 struct dbg_array input_regs_arr,
2584 bool block_enable[MAX_BLOCK_ID],
2585 u32 *num_dumped_reg_entries)
2587 u32 i, offset = 0, input_offset = 0;
2588 bool mode_match = true;
2590 *num_dumped_reg_entries = 0;
2592 while (input_offset < input_regs_arr.size_in_dwords) {
2593 const struct dbg_dump_cond_hdr *cond_hdr =
2594 (const struct dbg_dump_cond_hdr *)
2595 &input_regs_arr.ptr[input_offset++];
2596 u16 modes_buf_offset;
2599 /* Check mode/block */
2600 eval_mode = GET_FIELD(cond_hdr->mode.data,
2601 DBG_MODE_HDR_EVAL_MODE) > 0;
2604 GET_FIELD(cond_hdr->mode.data,
2605 DBG_MODE_HDR_MODES_BUF_OFFSET);
2606 mode_match = qed_is_mode_match(p_hwfn,
2610 if (!mode_match || !block_enable[cond_hdr->block_id]) {
2611 input_offset += cond_hdr->data_size;
2615 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2616 const struct dbg_dump_reg *reg =
2617 (const struct dbg_dump_reg *)
2618 &input_regs_arr.ptr[input_offset];
2622 addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2623 len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2624 wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2625 offset += qed_grc_dump_reg_entry(p_hwfn,
2632 (*num_dumped_reg_entries)++;
2639 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2640 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2641 struct qed_ptt *p_ptt,
2642 struct dbg_array input_regs_arr,
2645 bool block_enable[MAX_BLOCK_ID],
2646 const char *split_type_name,
2648 const char *param_name,
2649 const char *param_val)
2651 u32 num_dumped_reg_entries, offset;
2653 /* Calculate register dump header size (and skip it for now) */
2654 offset = qed_grc_dump_regs_hdr(dump_buf,
2658 split_id, param_name, param_val);
2660 /* Dump registers */
2661 offset += qed_grc_dump_regs_entries(p_hwfn,
2667 &num_dumped_reg_entries);
2669 /* Write register dump header */
2670 if (dump && num_dumped_reg_entries > 0)
2671 qed_grc_dump_regs_hdr(dump_buf,
2673 num_dumped_reg_entries,
2675 split_id, param_name, param_val);
2677 return num_dumped_reg_entries > 0 ? offset : 0;
2680 /* Dumps registers according to the input registers array. Returns the dumped
2683 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2684 struct qed_ptt *p_ptt,
2687 bool block_enable[MAX_BLOCK_ID],
2688 const char *param_name, const char *param_val)
2690 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2691 struct chip_platform_defs *chip_platform;
2692 u32 offset = 0, input_offset = 0;
2693 struct chip_defs *chip;
2694 u8 port_id, pf_id, vf_id;
2697 chip = &s_chip_defs[dev_data->chip_id];
2698 chip_platform = &chip->per_platform[dev_data->platform_id];
2700 while (input_offset <
2701 s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2702 const struct dbg_dump_split_hdr *split_hdr;
2703 struct dbg_array curr_input_regs_arr;
2704 u32 split_data_size;
2708 (const struct dbg_dump_split_hdr *)
2709 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2711 GET_FIELD(split_hdr->hdr,
2712 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2714 GET_FIELD(split_hdr->hdr,
2715 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2716 curr_input_regs_arr.ptr =
2717 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2718 curr_input_regs_arr.size_in_dwords = split_data_size;
2720 switch (split_type_id) {
2721 case SPLIT_TYPE_NONE:
2722 offset += qed_grc_dump_split_data(p_hwfn,
2724 curr_input_regs_arr,
2734 case SPLIT_TYPE_PORT:
2735 for (port_id = 0; port_id < chip_platform->num_ports;
2738 qed_port_pretend(p_hwfn, p_ptt,
2741 qed_grc_dump_split_data(p_hwfn, p_ptt,
2742 curr_input_regs_arr,
2752 case SPLIT_TYPE_PORT_PF:
2753 for (pf_id = 0; pf_id < chip_platform->num_pfs;
2756 PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2759 fid = pf_id << pfid_shift;
2760 qed_fid_pretend(p_hwfn, p_ptt, fid);
2764 qed_grc_dump_split_data(p_hwfn,
2766 curr_input_regs_arr,
2778 for (vf_id = 0; vf_id < chip_platform->num_vfs;
2781 PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
2783 PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
2786 fid = BIT(vfvalid_shift) |
2787 (vf_id << vfid_shift);
2788 qed_fid_pretend(p_hwfn, p_ptt, fid);
2792 qed_grc_dump_split_data(p_hwfn, p_ptt,
2793 curr_input_regs_arr,
2806 input_offset += split_data_size;
2809 /* Pretend to original PF */
2811 fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2812 qed_fid_pretend(p_hwfn, p_ptt, fid);
2818 /* Dump reset registers. Returns the dumped size in dwords. */
2819 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2820 struct qed_ptt *p_ptt,
2821 u32 *dump_buf, bool dump)
2823 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2824 u32 i, offset = 0, num_regs = 0;
2826 /* Calculate header size */
2827 offset += qed_grc_dump_regs_hdr(dump_buf,
2828 false, 0, "eng", -1, NULL, NULL);
2830 /* Write reset registers */
2831 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2832 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2835 offset += qed_grc_dump_reg_entry(p_hwfn,
2840 (s_reset_regs_defs[i].addr), 1,
2847 qed_grc_dump_regs_hdr(dump_buf,
2848 true, num_regs, "eng", -1, NULL, NULL);
2853 /* Dump registers that are modified during GRC Dump and therefore must be
2854 * dumped first. Returns the dumped size in dwords.
2856 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2857 struct qed_ptt *p_ptt,
2858 u32 *dump_buf, bool dump)
2860 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2861 u32 block_id, offset = 0, num_reg_entries = 0;
2862 const struct dbg_attn_reg *attn_reg_arr;
2863 u8 storm_id, reg_idx, num_attn_regs;
2865 /* Calculate header size */
2866 offset += qed_grc_dump_regs_hdr(dump_buf,
2867 false, 0, "eng", -1, NULL, NULL);
2869 /* Write parity registers */
2870 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2871 if (dev_data->block_in_reset[block_id] && dump)
2874 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2878 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2879 const struct dbg_attn_reg *reg_data =
2880 &attn_reg_arr[reg_idx];
2881 u16 modes_buf_offset;
2886 eval_mode = GET_FIELD(reg_data->mode.data,
2887 DBG_MODE_HDR_EVAL_MODE) > 0;
2889 GET_FIELD(reg_data->mode.data,
2890 DBG_MODE_HDR_MODES_BUF_OFFSET);
2892 !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2895 /* Mode match: read & dump registers */
2896 addr = reg_data->mask_address;
2897 offset += qed_grc_dump_reg_entry(p_hwfn,
2903 addr = GET_FIELD(reg_data->data,
2904 DBG_ATTN_REG_STS_ADDRESS);
2905 offset += qed_grc_dump_reg_entry(p_hwfn,
2911 num_reg_entries += 2;
2915 /* Write Storm stall status registers */
2916 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2917 struct storm_defs *storm = &s_storm_defs[storm_id];
2920 if (dev_data->block_in_reset[storm->block_id] && dump)
2924 BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2925 SEM_FAST_REG_STALLED);
2926 offset += qed_grc_dump_reg_entry(p_hwfn,
2938 qed_grc_dump_regs_hdr(dump_buf,
2940 num_reg_entries, "eng", -1, NULL, NULL);
2945 /* Dumps registers that can't be represented in the debug arrays */
2946 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2947 struct qed_ptt *p_ptt,
2948 u32 *dump_buf, bool dump)
2950 u32 offset = 0, addr;
2952 offset += qed_grc_dump_regs_hdr(dump_buf,
2953 dump, 2, "eng", -1, NULL, NULL);
2955 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2958 addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2959 offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2964 RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2967 addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2969 qed_grc_dump_reg_entry_skip(p_hwfn,
2974 TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2981 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2982 * dwords. The following parameters are dumped:
2983 * - name: dumped only if it's not NULL.
2984 * - addr: in dwords, dumped only if name is NULL.
2985 * - len: in dwords, always dumped.
2986 * - width: dumped if it's not zero.
2987 * - packed: dumped only if it's not false.
2988 * - mem_group: always dumped.
2989 * - is_storm: true only if the memory is related to a Storm.
2990 * - storm_letter: valid only if is_storm is true.
2993 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
3001 const char *mem_group,
3002 bool is_storm, char storm_letter)
3010 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3017 /* Dump section header */
3018 offset += qed_dump_section_hdr(dump_buf + offset,
3019 dump, "grc_mem", num_params);
3024 strcpy(buf, "?STORM_");
3025 buf[0] = storm_letter;
3026 strcpy(buf + strlen(buf), name);
3031 offset += qed_dump_str_param(dump_buf + offset,
3035 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3037 offset += qed_dump_num_param(dump_buf + offset,
3038 dump, "addr", addr_in_bytes);
3042 offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3044 /* Dump bit width */
3046 offset += qed_dump_num_param(dump_buf + offset,
3047 dump, "width", bit_width);
3051 offset += qed_dump_num_param(dump_buf + offset,
3056 strcpy(buf, "?STORM_");
3057 buf[0] = storm_letter;
3058 strcpy(buf + strlen(buf), mem_group);
3060 strcpy(buf, mem_group);
3063 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3068 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3069 * Returns the dumped size in dwords.
3070 * The addr and len arguments are specified in dwords.
3072 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3073 struct qed_ptt *p_ptt,
3082 const char *mem_group,
3083 bool is_storm, char storm_letter)
3087 offset += qed_grc_dump_mem_hdr(p_hwfn,
3095 mem_group, is_storm, storm_letter);
3096 offset += qed_grc_dump_addr_range(p_hwfn,
3099 dump, addr, len, wide_bus);
3104 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3105 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3106 struct qed_ptt *p_ptt,
3107 struct dbg_array input_mems_arr,
3108 u32 *dump_buf, bool dump)
3110 u32 i, offset = 0, input_offset = 0;
3111 bool mode_match = true;
3113 while (input_offset < input_mems_arr.size_in_dwords) {
3114 const struct dbg_dump_cond_hdr *cond_hdr;
3115 u16 modes_buf_offset;
3119 cond_hdr = (const struct dbg_dump_cond_hdr *)
3120 &input_mems_arr.ptr[input_offset++];
3121 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3123 /* Check required mode */
3124 eval_mode = GET_FIELD(cond_hdr->mode.data,
3125 DBG_MODE_HDR_EVAL_MODE) > 0;
3128 GET_FIELD(cond_hdr->mode.data,
3129 DBG_MODE_HDR_MODES_BUF_OFFSET);
3130 mode_match = qed_is_mode_match(p_hwfn,
3135 input_offset += cond_hdr->data_size;
3139 for (i = 0; i < num_entries;
3140 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3141 const struct dbg_dump_mem *mem =
3142 (const struct dbg_dump_mem *)
3143 &input_mems_arr.ptr[input_offset];
3144 u8 mem_group_id = GET_FIELD(mem->dword0,
3145 DBG_DUMP_MEM_MEM_GROUP_ID);
3146 bool is_storm = false, mem_wide_bus;
3147 enum dbg_grc_params grc_param;
3148 char storm_letter = 'a';
3149 enum block_id block_id;
3150 u32 mem_addr, mem_len;
3152 if (mem_group_id >= MEM_GROUPS_NUM) {
3153 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3157 block_id = (enum block_id)cond_hdr->block_id;
3158 if (!qed_grc_is_mem_included(p_hwfn,
3163 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3164 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3165 mem_wide_bus = GET_FIELD(mem->dword1,
3166 DBG_DUMP_MEM_WIDE_BUS);
3168 /* Update memory length for CCFC/TCFC memories
3169 * according to number of LCIDs/LTIDs.
3171 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3172 if (mem_len % MAX_LCIDS) {
3174 "Invalid CCFC connection memory size\n");
3178 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3179 mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3180 (mem_len / MAX_LCIDS);
3181 } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3182 if (mem_len % MAX_LTIDS) {
3184 "Invalid TCFC task memory size\n");
3188 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3189 mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3190 (mem_len / MAX_LTIDS);
3193 /* If memory is associated with Storm, update Storm
3197 [cond_hdr->block_id]->associated_to_storm) {
3200 s_storm_defs[s_block_defs
3201 [cond_hdr->block_id]->
3206 offset += qed_grc_dump_mem(p_hwfn,
3216 s_mem_group_names[mem_group_id],
3225 /* Dumps GRC memories according to the input array dump_mem.
3226 * Returns the dumped size in dwords.
3228 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3229 struct qed_ptt *p_ptt,
3230 u32 *dump_buf, bool dump)
3232 u32 offset = 0, input_offset = 0;
3234 while (input_offset <
3235 s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3236 const struct dbg_dump_split_hdr *split_hdr;
3237 struct dbg_array curr_input_mems_arr;
3238 u32 split_data_size;
3241 split_hdr = (const struct dbg_dump_split_hdr *)
3242 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3244 GET_FIELD(split_hdr->hdr,
3245 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3247 GET_FIELD(split_hdr->hdr,
3248 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3249 curr_input_mems_arr.ptr =
3250 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3251 curr_input_mems_arr.size_in_dwords = split_data_size;
3253 switch (split_type_id) {
3254 case SPLIT_TYPE_NONE:
3255 offset += qed_grc_dump_mem_entries(p_hwfn,
3257 curr_input_mems_arr,
3264 "Dumping split memories is currently not supported\n");
3268 input_offset += split_data_size;
3274 /* Dumps GRC context data for the specified Storm.
3275 * Returns the dumped size in dwords.
3276 * The lid_size argument is specified in quad-regs.
3278 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3279 struct qed_ptt *p_ptt,
3288 struct storm_defs *storm = &s_storm_defs[storm_id];
3289 u32 i, lid, total_size, offset = 0;
3294 lid_size *= BYTES_IN_DWORD;
3295 total_size = num_lids * lid_size;
3297 offset += qed_grc_dump_mem_hdr(p_hwfn,
3304 false, name, true, storm->letter);
3307 return offset + total_size;
3309 /* Dump context data */
3310 for (lid = 0; lid < num_lids; lid++) {
3311 for (i = 0; i < lid_size; i++, offset++) {
3313 p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3314 *(dump_buf + offset) = qed_rd(p_hwfn,
3315 p_ptt, rd_reg_addr);
3322 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3323 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3324 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3326 enum dbg_grc_params grc_param;
3330 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3331 struct storm_defs *storm = &s_storm_defs[storm_id];
3333 if (!qed_grc_is_storm_included(p_hwfn,
3334 (enum dbg_storms)storm_id))
3337 /* Dump Conn AG context size */
3338 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3340 qed_grc_dump_ctx_data(p_hwfn,
3345 qed_grc_get_param(p_hwfn,
3347 storm->cm_conn_ag_ctx_lid_size,
3348 storm->cm_conn_ag_ctx_rd_addr,
3351 /* Dump Conn ST context size */
3352 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3354 qed_grc_dump_ctx_data(p_hwfn,
3359 qed_grc_get_param(p_hwfn,
3361 storm->cm_conn_st_ctx_lid_size,
3362 storm->cm_conn_st_ctx_rd_addr,
3365 /* Dump Task AG context size */
3366 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3368 qed_grc_dump_ctx_data(p_hwfn,
3373 qed_grc_get_param(p_hwfn,
3375 storm->cm_task_ag_ctx_lid_size,
3376 storm->cm_task_ag_ctx_rd_addr,
3379 /* Dump Task ST context size */
3380 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3382 qed_grc_dump_ctx_data(p_hwfn,
3387 qed_grc_get_param(p_hwfn,
3389 storm->cm_task_st_ctx_lid_size,
3390 storm->cm_task_st_ctx_rd_addr,
3397 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3398 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3399 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3401 char buf[10] = "IOR_SET_?";
3402 u32 addr, offset = 0;
3403 u8 storm_id, set_id;
3405 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3406 struct storm_defs *storm = &s_storm_defs[storm_id];
3408 if (!qed_grc_is_storm_included(p_hwfn,
3409 (enum dbg_storms)storm_id))
3412 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3413 addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3414 SEM_FAST_REG_STORM_REG_FILE) +
3415 IOR_SET_OFFSET(set_id);
3416 buf[strlen(buf) - 1] = '0' + set_id;
3417 offset += qed_grc_dump_mem(p_hwfn,
3436 /* Dump VFC CAM. Returns the dumped size in dwords. */
3437 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3438 struct qed_ptt *p_ptt,
3439 u32 *dump_buf, bool dump, u8 storm_id)
3441 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3442 struct storm_defs *storm = &s_storm_defs[storm_id];
3443 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3444 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3445 u32 row, i, offset = 0;
3447 offset += qed_grc_dump_mem_hdr(p_hwfn,
3454 false, "vfc_cam", true, storm->letter);
3457 return offset + total_size;
3459 /* Prepare CAM address */
3460 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3462 for (row = 0; row < VFC_CAM_NUM_ROWS;
3463 row++, offset += VFC_CAM_RESP_DWORDS) {
3464 /* Write VFC CAM command */
3465 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3468 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3469 cam_cmd, VFC_CAM_CMD_DWORDS);
3471 /* Write VFC CAM address */
3474 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3475 cam_addr, VFC_CAM_ADDR_DWORDS);
3477 /* Read VFC CAM read response */
3480 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3481 dump_buf + offset, VFC_CAM_RESP_DWORDS);
3487 /* Dump VFC RAM. Returns the dumped size in dwords. */
3488 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3489 struct qed_ptt *p_ptt,
3492 u8 storm_id, struct vfc_ram_defs *ram_defs)
3494 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3495 struct storm_defs *storm = &s_storm_defs[storm_id];
3496 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3497 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3498 u32 row, i, offset = 0;
3500 offset += qed_grc_dump_mem_hdr(p_hwfn,
3508 ram_defs->type_name,
3509 true, storm->letter);
3511 /* Prepare RAM address */
3512 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3515 return offset + total_size;
3517 for (row = ram_defs->base_row;
3518 row < ram_defs->base_row + ram_defs->num_rows;
3519 row++, offset += VFC_RAM_RESP_DWORDS) {
3520 /* Write VFC RAM command */
3523 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3524 ram_cmd, VFC_RAM_CMD_DWORDS);
3526 /* Write VFC RAM address */
3527 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3530 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3531 ram_addr, VFC_RAM_ADDR_DWORDS);
3533 /* Read VFC RAM read response */
3536 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3537 dump_buf + offset, VFC_RAM_RESP_DWORDS);
3543 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3544 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3545 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3547 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3551 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3552 if (!qed_grc_is_storm_included(p_hwfn,
3553 (enum dbg_storms)storm_id) ||
3554 !s_storm_defs[storm_id].has_vfc ||
3555 (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3560 offset += qed_grc_dump_vfc_cam(p_hwfn,
3566 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3567 offset += qed_grc_dump_vfc_ram(p_hwfn,
3572 &s_vfc_ram_defs[i]);
3578 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3579 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3580 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3582 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3586 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3587 u32 rss_addr, num_entries, total_dwords;
3588 struct rss_mem_defs *rss_defs;
3589 u32 addr, num_dwords_to_read;
3592 rss_defs = &s_rss_mem_defs[rss_mem_id];
3593 rss_addr = rss_defs->addr;
3594 num_entries = rss_defs->num_entries[dev_data->chip_id];
3595 total_dwords = (num_entries * rss_defs->entry_width) / 32;
3596 packed = (rss_defs->entry_width == 16);
3598 offset += qed_grc_dump_mem_hdr(p_hwfn,
3604 rss_defs->entry_width,
3606 rss_defs->type_name, false, 0);
3610 offset += total_dwords;
3614 addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3615 while (total_dwords) {
3616 num_dwords_to_read = min_t(u32,
3617 RSS_REG_RSS_RAM_DATA_SIZE,
3619 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3620 offset += qed_grc_dump_addr_range(p_hwfn,
3627 total_dwords -= num_dwords_to_read;
3635 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3636 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3637 struct qed_ptt *p_ptt,
3638 u32 *dump_buf, bool dump, u8 big_ram_id)
3640 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3641 u32 block_size, ram_size, offset = 0, reg_val, i;
3642 char mem_name[12] = "???_BIG_RAM";
3643 char type_name[8] = "???_RAM";
3644 struct big_ram_defs *big_ram;
3646 big_ram = &s_big_ram_defs[big_ram_id];
3647 ram_size = big_ram->ram_size[dev_data->chip_id];
3649 reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3650 block_size = reg_val &
3651 BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3654 strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3655 strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3657 /* Dump memory header */
3658 offset += qed_grc_dump_mem_hdr(p_hwfn,
3665 false, type_name, false, 0);
3667 /* Read and dump Big RAM data */
3669 return offset + ram_size;
3672 for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3676 qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3677 addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3678 len = BRB_REG_BIG_RAM_DATA_SIZE;
3679 offset += qed_grc_dump_addr_range(p_hwfn,
3691 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3692 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3694 bool block_enable[MAX_BLOCK_ID] = { 0 };
3695 u32 offset = 0, addr;
3696 bool halted = false;
3699 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3700 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3702 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3705 /* Dump MCP scratchpad */
3706 offset += qed_grc_dump_mem(p_hwfn,
3711 BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3712 MCP_REG_SCRATCH_SIZE_BB_K2,
3713 false, 0, false, "MCP", false, 0);
3715 /* Dump MCP cpu_reg_file */
3716 offset += qed_grc_dump_mem(p_hwfn,
3721 BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3722 MCP_REG_CPU_REG_FILE_SIZE,
3723 false, 0, false, "MCP", false, 0);
3725 /* Dump MCP registers */
3726 block_enable[BLOCK_MCP] = true;
3727 offset += qed_grc_dump_registers(p_hwfn,
3730 dump, block_enable, "block", "MCP");
3732 /* Dump required non-MCP registers */
3733 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3734 dump, 1, "eng", -1, "block", "MCP");
3735 addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3736 offset += qed_grc_dump_reg_entry(p_hwfn,
3745 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3746 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3751 /* Dumps the tbus indirect memory for all PHYs. */
3752 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3753 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3755 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3759 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3760 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3761 struct phy_defs *phy_defs;
3764 phy_defs = &s_phy_defs[phy_id];
3765 addr_lo_addr = phy_defs->base_addr +
3766 phy_defs->tbus_addr_lo_addr;
3767 addr_hi_addr = phy_defs->base_addr +
3768 phy_defs->tbus_addr_hi_addr;
3769 data_lo_addr = phy_defs->base_addr +
3770 phy_defs->tbus_data_lo_addr;
3771 data_hi_addr = phy_defs->base_addr +
3772 phy_defs->tbus_data_hi_addr;
3774 if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3775 phy_defs->phy_name) < 0)
3777 "Unexpected debug error: invalid PHY memory name\n");
3779 offset += qed_grc_dump_mem_hdr(p_hwfn,
3784 PHY_DUMP_SIZE_DWORDS,
3785 16, true, mem_name, false, 0);
3788 offset += PHY_DUMP_SIZE_DWORDS;
3792 bytes_buf = (u8 *)(dump_buf + offset);
3793 for (tbus_hi_offset = 0;
3794 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3796 qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3797 for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3800 p_ptt, addr_lo_addr, tbus_lo_offset);
3801 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3804 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3810 offset += PHY_DUMP_SIZE_DWORDS;
3816 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3817 struct qed_ptt *p_ptt,
3818 enum block_id block_id,
3822 u8 force_valid_mask, u8 force_frame_mask)
3824 struct block_defs *block = s_block_defs[block_id];
3826 qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3827 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3828 qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3829 qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3830 qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3833 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3834 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3835 struct qed_ptt *p_ptt,
3836 u32 *dump_buf, bool dump)
3838 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3839 u32 block_id, line_id, offset = 0;
3841 /* Don't dump static debug if a debug bus recording is in progress */
3842 if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3846 /* Disable all blocks debug output */
3847 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3848 struct block_defs *block = s_block_defs[block_id];
3850 if (block->dbg_client_id[dev_data->chip_id] !=
3851 MAX_DBG_BUS_CLIENTS)
3852 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3856 qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3857 qed_bus_set_framing_mode(p_hwfn,
3858 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3860 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3861 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3862 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3865 /* Dump all static debug lines for each relevant block */
3866 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3867 struct block_defs *block = s_block_defs[block_id];
3868 struct dbg_bus_block *block_desc;
3869 u32 block_dwords, addr, len;
3872 if (block->dbg_client_id[dev_data->chip_id] ==
3873 MAX_DBG_BUS_CLIENTS)
3876 block_desc = get_dbg_bus_block_desc(p_hwfn,
3877 (enum block_id)block_id);
3878 block_dwords = NUM_DBG_LINES(block_desc) *
3879 STATIC_DEBUG_LINE_DWORDS;
3881 /* Dump static section params */
3882 offset += qed_grc_dump_mem_hdr(p_hwfn,
3888 32, false, "STATIC", false, 0);
3891 offset += block_dwords;
3895 /* If all lines are invalid - dump zeros */
3896 if (dev_data->block_in_reset[block_id]) {
3897 memset(dump_buf + offset, 0,
3898 DWORDS_TO_BYTES(block_dwords));
3899 offset += block_dwords;
3903 /* Enable block's client */
3904 dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3905 qed_bus_enable_clients(p_hwfn,
3907 BIT(dbg_client_id));
3909 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3910 len = STATIC_DEBUG_LINE_DWORDS;
3911 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3913 /* Configure debug line ID */
3914 qed_config_dbg_line(p_hwfn,
3916 (enum block_id)block_id,
3917 (u8)line_id, 0xf, 0, 0, 0);
3919 /* Read debug line info */
3920 offset += qed_grc_dump_addr_range(p_hwfn,
3929 /* Disable block's client and debug output */
3930 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3931 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3935 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3936 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3942 /* Performs GRC Dump to the specified buffer.
3943 * Returns the dumped size in dwords.
3945 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3946 struct qed_ptt *p_ptt,
3948 bool dump, u32 *num_dumped_dwords)
3950 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3951 bool parities_masked = false;
3952 u8 i, port_mode = 0;
3955 *num_dumped_dwords = 0;
3958 /* Find port mode */
3959 switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
3971 /* Update reset state */
3972 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3975 /* Dump global params */
3976 offset += qed_dump_common_global_params(p_hwfn,
3978 dump_buf + offset, dump, 4);
3979 offset += qed_dump_str_param(dump_buf + offset,
3980 dump, "dump-type", "grc-dump");
3981 offset += qed_dump_num_param(dump_buf + offset,
3984 qed_grc_get_param(p_hwfn,
3985 DBG_GRC_PARAM_NUM_LCIDS));
3986 offset += qed_dump_num_param(dump_buf + offset,
3989 qed_grc_get_param(p_hwfn,
3990 DBG_GRC_PARAM_NUM_LTIDS));
3991 offset += qed_dump_num_param(dump_buf + offset,
3992 dump, "num-ports", port_mode);
3994 /* Dump reset registers (dumped before taking blocks out of reset ) */
3995 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3996 offset += qed_grc_dump_reset_regs(p_hwfn,
3998 dump_buf + offset, dump);
4000 /* Take all blocks out of reset (using reset registers) */
4002 qed_grc_unreset_blocks(p_hwfn, p_ptt);
4003 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4006 /* Disable all parities using MFW command */
4008 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4009 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4010 if (!parities_masked) {
4012 "Failed to mask parities using MFW\n");
4013 if (qed_grc_get_param
4014 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4015 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4019 /* Dump modified registers (dumped before modifying them) */
4020 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4021 offset += qed_grc_dump_modified_regs(p_hwfn,
4023 dump_buf + offset, dump);
4027 (qed_grc_is_included(p_hwfn,
4028 DBG_GRC_PARAM_DUMP_IOR) ||
4029 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4030 qed_grc_stall_storms(p_hwfn, p_ptt, true);
4033 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4034 bool block_enable[MAX_BLOCK_ID];
4036 /* Dump all blocks except MCP */
4037 for (i = 0; i < MAX_BLOCK_ID; i++)
4038 block_enable[i] = true;
4039 block_enable[BLOCK_MCP] = false;
4040 offset += qed_grc_dump_registers(p_hwfn,
4045 block_enable, NULL, NULL);
4047 /* Dump special registers */
4048 offset += qed_grc_dump_special_regs(p_hwfn,
4050 dump_buf + offset, dump);
4054 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4057 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4058 offset += qed_grc_dump_mcp(p_hwfn,
4059 p_ptt, dump_buf + offset, dump);
4062 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4063 offset += qed_grc_dump_ctx(p_hwfn,
4064 p_ptt, dump_buf + offset, dump);
4066 /* Dump RSS memories */
4067 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4068 offset += qed_grc_dump_rss(p_hwfn,
4069 p_ptt, dump_buf + offset, dump);
4072 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4073 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4074 offset += qed_grc_dump_big_ram(p_hwfn,
4080 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4081 offset += qed_grc_dump_iors(p_hwfn,
4082 p_ptt, dump_buf + offset, dump);
4085 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4086 offset += qed_grc_dump_vfc(p_hwfn,
4087 p_ptt, dump_buf + offset, dump);
4090 if (qed_grc_is_included(p_hwfn,
4091 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4092 CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4093 offset += qed_grc_dump_phy(p_hwfn,
4094 p_ptt, dump_buf + offset, dump);
4096 /* Dump static debug data */
4097 if (qed_grc_is_included(p_hwfn,
4098 DBG_GRC_PARAM_DUMP_STATIC) &&
4099 dev_data->bus.state == DBG_BUS_STATE_IDLE)
4100 offset += qed_grc_dump_static_debug(p_hwfn,
4102 dump_buf + offset, dump);
4104 /* Dump last section */
4105 offset += qed_dump_last_section(dump_buf, offset, dump);
4108 /* Unstall storms */
4109 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4110 qed_grc_stall_storms(p_hwfn, p_ptt, false);
4112 /* Clear parity status */
4113 qed_grc_clear_all_prty(p_hwfn, p_ptt);
4115 /* Enable all parities using MFW command */
4116 if (parities_masked)
4117 qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4120 *num_dumped_dwords = offset;
4122 return DBG_STATUS_OK;
4125 /* Writes the specified failing Idle Check rule to the specified buffer.
4126 * Returns the dumped size in dwords.
4128 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4129 struct qed_ptt *p_ptt,
4134 const struct dbg_idle_chk_rule *rule,
4135 u16 fail_entry_id, u32 *cond_reg_values)
4137 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4138 const struct dbg_idle_chk_cond_reg *cond_regs;
4139 const struct dbg_idle_chk_info_reg *info_regs;
4140 u32 i, next_reg_offset = 0, offset = 0;
4141 struct dbg_idle_chk_result_hdr *hdr;
4142 const union dbg_idle_chk_reg *regs;
4145 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4146 regs = &((const union dbg_idle_chk_reg *)
4147 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4148 cond_regs = ®s[0].cond_reg;
4149 info_regs = ®s[rule->num_cond_regs].info_reg;
4151 /* Dump rule data */
4153 memset(hdr, 0, sizeof(*hdr));
4154 hdr->rule_id = rule_id;
4155 hdr->mem_entry_id = fail_entry_id;
4156 hdr->severity = rule->severity;
4157 hdr->num_dumped_cond_regs = rule->num_cond_regs;
4160 offset += IDLE_CHK_RESULT_HDR_DWORDS;
4162 /* Dump condition register values */
4163 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4164 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4165 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4167 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4168 (dump_buf + offset);
4170 /* Write register header */
4172 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4177 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4178 memset(reg_hdr, 0, sizeof(*reg_hdr));
4179 reg_hdr->start_entry = reg->start_entry;
4180 reg_hdr->size = reg->entry_size;
4181 SET_FIELD(reg_hdr->data,
4182 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4183 reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4184 SET_FIELD(reg_hdr->data,
4185 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4187 /* Write register values */
4188 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4189 dump_buf[offset] = cond_reg_values[next_reg_offset];
4192 /* Dump info register values */
4193 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4194 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4197 /* Check if register's block is in reset */
4199 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4203 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4204 if (block_id >= MAX_BLOCK_ID) {
4205 DP_NOTICE(p_hwfn, "Invalid block_id\n");
4209 if (!dev_data->block_in_reset[block_id]) {
4210 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4211 bool wide_bus, eval_mode, mode_match = true;
4212 u16 modes_buf_offset;
4215 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4216 (dump_buf + offset);
4219 eval_mode = GET_FIELD(reg->mode.data,
4220 DBG_MODE_HDR_EVAL_MODE) > 0;
4223 GET_FIELD(reg->mode.data,
4224 DBG_MODE_HDR_MODES_BUF_OFFSET);
4226 qed_is_mode_match(p_hwfn,
4233 addr = GET_FIELD(reg->data,
4234 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4235 wide_bus = GET_FIELD(reg->data,
4236 DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4238 /* Write register header */
4239 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4240 hdr->num_dumped_info_regs++;
4241 memset(reg_hdr, 0, sizeof(*reg_hdr));
4242 reg_hdr->size = reg->size;
4243 SET_FIELD(reg_hdr->data,
4244 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4245 rule->num_cond_regs + reg_id);
4247 /* Write register values */
4248 offset += qed_grc_dump_addr_range(p_hwfn,
4253 reg->size, wide_bus);
4260 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4262 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4263 u32 *dump_buf, bool dump,
4264 const struct dbg_idle_chk_rule *input_rules,
4265 u32 num_input_rules, u32 *num_failing_rules)
4267 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4268 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4273 *num_failing_rules = 0;
4275 for (i = 0; i < num_input_rules; i++) {
4276 const struct dbg_idle_chk_cond_reg *cond_regs;
4277 const struct dbg_idle_chk_rule *rule;
4278 const union dbg_idle_chk_reg *regs;
4279 u16 num_reg_entries = 1;
4280 bool check_rule = true;
4281 const u32 *imm_values;
4283 rule = &input_rules[i];
4284 regs = &((const union dbg_idle_chk_reg *)
4285 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4287 cond_regs = ®s[0].cond_reg;
4288 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4291 /* Check if all condition register blocks are out of reset, and
4292 * find maximal number of entries (all condition registers that
4293 * are memories must have the same size, which is > 1).
4295 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4298 GET_FIELD(cond_regs[reg_id].data,
4299 DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4301 if (block_id >= MAX_BLOCK_ID) {
4302 DP_NOTICE(p_hwfn, "Invalid block_id\n");
4306 check_rule = !dev_data->block_in_reset[block_id];
4307 if (cond_regs[reg_id].num_entries > num_reg_entries)
4308 num_reg_entries = cond_regs[reg_id].num_entries;
4311 if (!check_rule && dump)
4315 u32 entry_dump_size =
4316 qed_idle_chk_dump_failure(p_hwfn,
4325 offset += num_reg_entries * entry_dump_size;
4326 (*num_failing_rules) += num_reg_entries;
4330 /* Go over all register entries (number of entries is the same
4331 * for all condition registers).
4333 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4334 u32 next_reg_offset = 0;
4336 /* Read current entry of all condition registers */
4337 for (reg_id = 0; reg_id < rule->num_cond_regs;
4339 const struct dbg_idle_chk_cond_reg *reg =
4341 u32 padded_entry_size, addr;
4344 /* Find GRC address (if it's a memory, the
4345 * address of the specific entry is calculated).
4347 addr = GET_FIELD(reg->data,
4348 DBG_IDLE_CHK_COND_REG_ADDRESS);
4350 GET_FIELD(reg->data,
4351 DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4352 if (reg->num_entries > 1 ||
4353 reg->start_entry > 0) {
4355 reg->entry_size > 1 ?
4356 roundup_pow_of_two(reg->entry_size) :
4358 addr += (reg->start_entry + entry_id) *
4362 /* Read registers */
4363 if (next_reg_offset + reg->entry_size >=
4364 IDLE_CHK_MAX_ENTRIES_SIZE) {
4366 "idle check registers entry is too large\n");
4371 qed_grc_dump_addr_range(p_hwfn, p_ptt,
4379 /* Call rule condition function.
4380 * If returns true, it's a failure.
4382 if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4384 offset += qed_idle_chk_dump_failure(p_hwfn,
4392 (*num_failing_rules)++;
4400 /* Performs Idle Check Dump to the specified buffer.
4401 * Returns the dumped size in dwords.
4403 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4404 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4406 u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4407 u32 num_failing_rules = 0;
4409 /* Dump global params */
4410 offset += qed_dump_common_global_params(p_hwfn,
4412 dump_buf + offset, dump, 1);
4413 offset += qed_dump_str_param(dump_buf + offset,
4414 dump, "dump-type", "idle-chk");
4416 /* Dump idle check section header with a single parameter */
4417 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4418 num_failing_rules_offset = offset;
4419 offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4421 while (input_offset <
4422 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4423 const struct dbg_idle_chk_cond_hdr *cond_hdr =
4424 (const struct dbg_idle_chk_cond_hdr *)
4425 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4427 bool eval_mode, mode_match = true;
4428 u32 curr_failing_rules;
4429 u16 modes_buf_offset;
4432 eval_mode = GET_FIELD(cond_hdr->mode.data,
4433 DBG_MODE_HDR_EVAL_MODE) > 0;
4436 GET_FIELD(cond_hdr->mode.data,
4437 DBG_MODE_HDR_MODES_BUF_OFFSET);
4438 mode_match = qed_is_mode_match(p_hwfn,
4444 qed_idle_chk_dump_rule_entries(p_hwfn,
4448 (const struct dbg_idle_chk_rule *)
4449 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4451 cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4452 &curr_failing_rules);
4453 num_failing_rules += curr_failing_rules;
4456 input_offset += cond_hdr->data_size;
4459 /* Overwrite num_rules parameter */
4461 qed_dump_num_param(dump_buf + num_failing_rules_offset,
4462 dump, "num_rules", num_failing_rules);
4464 /* Dump last section */
4465 offset += qed_dump_last_section(dump_buf, offset, dump);
4470 /* Finds the meta data image in NVRAM */
4471 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4472 struct qed_ptt *p_ptt,
4474 u32 *nvram_offset_bytes,
4475 u32 *nvram_size_bytes)
4477 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4478 struct mcp_file_att file_att;
4481 /* Call NVRAM get file command */
4482 nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4484 DRV_MSG_CODE_NVM_GET_FILE_ATT,
4488 &ret_txn_size, (u32 *)&file_att);
4490 /* Check response */
4492 (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4493 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4495 /* Update return values */
4496 *nvram_offset_bytes = file_att.nvm_start_addr;
4497 *nvram_size_bytes = file_att.len;
4501 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4502 image_type, *nvram_offset_bytes, *nvram_size_bytes);
4504 /* Check alignment */
4505 if (*nvram_size_bytes & 0x3)
4506 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4508 return DBG_STATUS_OK;
4511 /* Reads data from NVRAM */
4512 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4513 struct qed_ptt *p_ptt,
4514 u32 nvram_offset_bytes,
4515 u32 nvram_size_bytes, u32 *ret_buf)
4517 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4518 s32 bytes_left = nvram_size_bytes;
4519 u32 read_offset = 0;
4523 "nvram_read: reading image of size %d bytes from NVRAM\n",
4529 MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4531 /* Call NVRAM read command */
4532 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4533 DRV_MSG_CODE_NVM_READ_NVRAM,
4534 (nvram_offset_bytes +
4537 DRV_MB_PARAM_NVM_LEN_OFFSET),
4538 &ret_mcp_resp, &ret_mcp_param,
4540 (u32 *)((u8 *)ret_buf + read_offset)))
4541 return DBG_STATUS_NVRAM_READ_FAILED;
4543 /* Check response */
4544 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4545 return DBG_STATUS_NVRAM_READ_FAILED;
4547 /* Update read offset */
4548 read_offset += ret_read_size;
4549 bytes_left -= ret_read_size;
4550 } while (bytes_left > 0);
4552 return DBG_STATUS_OK;
4555 /* Get info on the MCP Trace data in the scratchpad:
4556 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4557 * - trace_data_size (OUT): trace data size in bytes (without the header)
4559 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4560 struct qed_ptt *p_ptt,
4561 u32 *trace_data_grc_addr,
4562 u32 *trace_data_size)
4564 u32 spad_trace_offsize, signature;
4566 /* Read trace section offsize structure from MCP scratchpad */
4567 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4569 /* Extract trace section address from offsize (in scratchpad) */
4570 *trace_data_grc_addr =
4571 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4573 /* Read signature from MCP trace section */
4574 signature = qed_rd(p_hwfn, p_ptt,
4575 *trace_data_grc_addr +
4576 offsetof(struct mcp_trace, signature));
4578 if (signature != MFW_TRACE_SIGNATURE)
4579 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4581 /* Read trace size from MCP trace section */
4582 *trace_data_size = qed_rd(p_hwfn,
4584 *trace_data_grc_addr +
4585 offsetof(struct mcp_trace, size));
4587 return DBG_STATUS_OK;
4590 /* Reads MCP trace meta data image from NVRAM
4591 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4592 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4593 * loaded from file).
4594 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4596 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4597 struct qed_ptt *p_ptt,
4598 u32 trace_data_size_bytes,
4599 u32 *running_bundle_id,
4600 u32 *trace_meta_offset,
4601 u32 *trace_meta_size)
4603 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4605 /* Read MCP trace section offsize structure from MCP scratchpad */
4606 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4608 /* Find running bundle ID */
4610 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4611 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4612 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4613 if (*running_bundle_id > 1)
4614 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4616 /* Find image in NVRAM */
4618 (*running_bundle_id ==
4619 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4620 return qed_find_nvram_image(p_hwfn,
4623 trace_meta_offset, trace_meta_size);
4626 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4627 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4628 struct qed_ptt *p_ptt,
4629 u32 nvram_offset_in_bytes,
4630 u32 size_in_bytes, u32 *buf)
4632 u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4633 enum dbg_status status;
4636 /* Read meta data from NVRAM */
4637 status = qed_nvram_read(p_hwfn,
4639 nvram_offset_in_bytes, size_in_bytes, buf);
4640 if (status != DBG_STATUS_OK)
4643 /* Extract and check first signature */
4644 signature = qed_read_unaligned_dword(byte_buf);
4645 byte_buf += sizeof(signature);
4646 if (signature != NVM_MAGIC_VALUE)
4647 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4649 /* Extract number of modules */
4650 modules_num = *(byte_buf++);
4652 /* Skip all modules */
4653 for (i = 0; i < modules_num; i++) {
4654 module_len = *(byte_buf++);
4655 byte_buf += module_len;
4658 /* Extract and check second signature */
4659 signature = qed_read_unaligned_dword(byte_buf);
4660 byte_buf += sizeof(signature);
4661 if (signature != NVM_MAGIC_VALUE)
4662 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4664 return DBG_STATUS_OK;
4667 /* Dump MCP Trace */
4668 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4669 struct qed_ptt *p_ptt,
4671 bool dump, u32 *num_dumped_dwords)
4673 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4674 u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4675 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4676 enum dbg_status status;
4680 *num_dumped_dwords = 0;
4682 mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4684 /* Get trace data info */
4685 status = qed_mcp_trace_get_data_info(p_hwfn,
4687 &trace_data_grc_addr,
4688 &trace_data_size_bytes);
4689 if (status != DBG_STATUS_OK)
4692 /* Dump global params */
4693 offset += qed_dump_common_global_params(p_hwfn,
4695 dump_buf + offset, dump, 1);
4696 offset += qed_dump_str_param(dump_buf + offset,
4697 dump, "dump-type", "mcp-trace");
4699 /* Halt MCP while reading from scratchpad so the read data will be
4700 * consistent. if halt fails, MCP trace is taken anyway, with a small
4701 * risk that it may be corrupt.
4703 if (dump && mcp_access) {
4704 halted = !qed_mcp_halt(p_hwfn, p_ptt);
4706 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4709 /* Find trace data size */
4710 trace_data_size_dwords =
4711 DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4714 /* Dump trace data section header and param */
4715 offset += qed_dump_section_hdr(dump_buf + offset,
4716 dump, "mcp_trace_data", 1);
4717 offset += qed_dump_num_param(dump_buf + offset,
4718 dump, "size", trace_data_size_dwords);
4720 /* Read trace data from scratchpad into dump buffer */
4721 offset += qed_grc_dump_addr_range(p_hwfn,
4725 BYTES_TO_DWORDS(trace_data_grc_addr),
4726 trace_data_size_dwords, false);
4728 /* Resume MCP (only if halt succeeded) */
4729 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4730 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4732 /* Dump trace meta section header */
4733 offset += qed_dump_section_hdr(dump_buf + offset,
4734 dump, "mcp_trace_meta", 1);
4736 /* If MCP Trace meta size parameter was set, use it.
4737 * Otherwise, read trace meta.
4738 * trace_meta_size_bytes is dword-aligned.
4740 trace_meta_size_bytes =
4741 qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4742 if ((!trace_meta_size_bytes || dump) && mcp_access) {
4743 status = qed_mcp_trace_get_meta_info(p_hwfn,
4745 trace_data_size_bytes,
4747 &trace_meta_offset_bytes,
4748 &trace_meta_size_bytes);
4749 if (status == DBG_STATUS_OK)
4750 trace_meta_size_dwords =
4751 BYTES_TO_DWORDS(trace_meta_size_bytes);
4754 /* Dump trace meta size param */
4755 offset += qed_dump_num_param(dump_buf + offset,
4756 dump, "size", trace_meta_size_dwords);
4758 /* Read trace meta image into dump buffer */
4759 if (dump && trace_meta_size_dwords)
4760 status = qed_mcp_trace_read_meta(p_hwfn,
4762 trace_meta_offset_bytes,
4763 trace_meta_size_bytes,
4765 if (status == DBG_STATUS_OK)
4766 offset += trace_meta_size_dwords;
4768 /* Dump last section */
4769 offset += qed_dump_last_section(dump_buf, offset, dump);
4771 *num_dumped_dwords = offset;
4773 /* If no mcp access, indicate that the dump doesn't contain the meta
4776 return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4780 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4781 struct qed_ptt *p_ptt,
4783 bool dump, u32 *num_dumped_dwords)
4785 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4788 *num_dumped_dwords = 0;
4790 /* Dump global params */
4791 offset += qed_dump_common_global_params(p_hwfn,
4793 dump_buf + offset, dump, 1);
4794 offset += qed_dump_str_param(dump_buf + offset,
4795 dump, "dump-type", "reg-fifo");
4797 /* Dump fifo data section header and param. The size param is 0 for
4798 * now, and is overwritten after reading the FIFO.
4800 offset += qed_dump_section_hdr(dump_buf + offset,
4801 dump, "reg_fifo_data", 1);
4802 size_param_offset = offset;
4803 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4806 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4807 * test how much data is available, except for reading it.
4809 offset += REG_FIFO_DEPTH_DWORDS;
4813 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4814 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4816 /* Pull available data from fifo. Use DMAE since this is widebus memory
4817 * and must be accessed atomically. Test for dwords_read not passing
4818 * buffer size since more entries could be added to the buffer as we are
4821 addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4822 len = REG_FIFO_ELEMENT_DWORDS;
4823 for (dwords_read = 0;
4824 fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4825 dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4826 offset += qed_grc_dump_addr_range(p_hwfn,
4833 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4834 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4837 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4840 /* Dump last section */
4841 offset += qed_dump_last_section(dump_buf, offset, dump);
4843 *num_dumped_dwords = offset;
4845 return DBG_STATUS_OK;
4849 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4850 struct qed_ptt *p_ptt,
4852 bool dump, u32 *num_dumped_dwords)
4854 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4857 *num_dumped_dwords = 0;
4859 /* Dump global params */
4860 offset += qed_dump_common_global_params(p_hwfn,
4862 dump_buf + offset, dump, 1);
4863 offset += qed_dump_str_param(dump_buf + offset,
4864 dump, "dump-type", "igu-fifo");
4866 /* Dump fifo data section header and param. The size param is 0 for
4867 * now, and is overwritten after reading the FIFO.
4869 offset += qed_dump_section_hdr(dump_buf + offset,
4870 dump, "igu_fifo_data", 1);
4871 size_param_offset = offset;
4872 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4875 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4876 * test how much data is available, except for reading it.
4878 offset += IGU_FIFO_DEPTH_DWORDS;
4882 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4883 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4885 /* Pull available data from fifo. Use DMAE since this is widebus memory
4886 * and must be accessed atomically. Test for dwords_read not passing
4887 * buffer size since more entries could be added to the buffer as we are
4890 addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4891 len = IGU_FIFO_ELEMENT_DWORDS;
4892 for (dwords_read = 0;
4893 fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4894 dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4895 offset += qed_grc_dump_addr_range(p_hwfn,
4902 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4903 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4906 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4909 /* Dump last section */
4910 offset += qed_dump_last_section(dump_buf, offset, dump);
4912 *num_dumped_dwords = offset;
4914 return DBG_STATUS_OK;
4917 /* Protection Override dump */
4918 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4919 struct qed_ptt *p_ptt,
4922 u32 *num_dumped_dwords)
4924 u32 size_param_offset, override_window_dwords, offset = 0, addr;
4926 *num_dumped_dwords = 0;
4928 /* Dump global params */
4929 offset += qed_dump_common_global_params(p_hwfn,
4931 dump_buf + offset, dump, 1);
4932 offset += qed_dump_str_param(dump_buf + offset,
4933 dump, "dump-type", "protection-override");
4935 /* Dump data section header and param. The size param is 0 for now,
4936 * and is overwritten after reading the data.
4938 offset += qed_dump_section_hdr(dump_buf + offset,
4939 dump, "protection_override_data", 1);
4940 size_param_offset = offset;
4941 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4944 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4948 /* Add override window info to buffer */
4949 override_window_dwords =
4950 qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4951 PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4952 addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4953 offset += qed_grc_dump_addr_range(p_hwfn,
4958 override_window_dwords,
4960 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4961 override_window_dwords);
4963 /* Dump last section */
4964 offset += qed_dump_last_section(dump_buf, offset, dump);
4966 *num_dumped_dwords = offset;
4968 return DBG_STATUS_OK;
4971 /* Performs FW Asserts Dump to the specified buffer.
4972 * Returns the dumped size in dwords.
4974 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4975 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4977 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4978 struct fw_asserts_ram_section *asserts;
4979 char storm_letter_str[2] = "?";
4980 struct fw_info fw_info;
4984 /* Dump global params */
4985 offset += qed_dump_common_global_params(p_hwfn,
4987 dump_buf + offset, dump, 1);
4988 offset += qed_dump_str_param(dump_buf + offset,
4989 dump, "dump-type", "fw-asserts");
4991 /* Find Storm dump size */
4992 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4993 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4994 struct storm_defs *storm = &s_storm_defs[storm_id];
4995 u32 last_list_idx, addr;
4997 if (dev_data->block_in_reset[storm->block_id])
5000 /* Read FW info for the current Storm */
5001 qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
5003 asserts = &fw_info.fw_asserts_section;
5005 /* Dump FW Asserts section header and params */
5006 storm_letter_str[0] = storm->letter;
5007 offset += qed_dump_section_hdr(dump_buf + offset,
5008 dump, "fw_asserts", 2);
5009 offset += qed_dump_str_param(dump_buf + offset,
5010 dump, "storm", storm_letter_str);
5011 offset += qed_dump_num_param(dump_buf + offset,
5014 asserts->list_element_dword_size);
5016 /* Read and dump FW Asserts data */
5018 offset += asserts->list_element_dword_size;
5022 fw_asserts_section_addr = storm->sem_fast_mem_addr +
5023 SEM_FAST_REG_INT_RAM +
5024 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
5025 next_list_idx_addr = fw_asserts_section_addr +
5026 DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5027 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5028 last_list_idx = (next_list_idx > 0 ?
5030 asserts->list_num_elements) - 1;
5031 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5032 asserts->list_dword_offset +
5033 last_list_idx * asserts->list_element_dword_size;
5035 qed_grc_dump_addr_range(p_hwfn, p_ptt,
5038 asserts->list_element_dword_size,
5042 /* Dump last section */
5043 offset += qed_dump_last_section(dump_buf, offset, dump);
5048 /***************************** Public Functions *******************************/
5050 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
5052 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5055 /* convert binary data to debug arrays */
5056 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5057 s_dbg_arrays[buf_id].ptr =
5058 (u32 *)(bin_ptr + buf_array[buf_id].offset);
5059 s_dbg_arrays[buf_id].size_in_dwords =
5060 BYTES_TO_DWORDS(buf_array[buf_id].length);
5063 return DBG_STATUS_OK;
5066 /* Assign default GRC param values */
5067 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5069 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5072 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5073 if (!s_grc_param_defs[i].is_persistent)
5074 dev_data->grc.param_val[i] =
5075 s_grc_param_defs[i].default_val[dev_data->chip_id];
5078 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5079 struct qed_ptt *p_ptt,
5082 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5086 if (status != DBG_STATUS_OK)
5089 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5090 !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5091 !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5092 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5093 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5094 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5096 return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5099 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5100 struct qed_ptt *p_ptt,
5102 u32 buf_size_in_dwords,
5103 u32 *num_dumped_dwords)
5105 u32 needed_buf_size_in_dwords;
5106 enum dbg_status status;
5108 *num_dumped_dwords = 0;
5110 status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5112 &needed_buf_size_in_dwords);
5113 if (status != DBG_STATUS_OK)
5116 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5117 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5120 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5122 /* Revert GRC params to their default */
5123 qed_dbg_grc_set_params_default(p_hwfn);
5128 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5129 struct qed_ptt *p_ptt,
5132 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5133 struct idle_chk_data *idle_chk;
5134 enum dbg_status status;
5136 idle_chk = &dev_data->idle_chk;
5139 status = qed_dbg_dev_init(p_hwfn, p_ptt);
5140 if (status != DBG_STATUS_OK)
5143 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5144 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5145 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5146 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5147 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5149 if (!idle_chk->buf_size_set) {
5150 idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5151 p_ptt, NULL, false);
5152 idle_chk->buf_size_set = true;
5155 *buf_size = idle_chk->buf_size;
5157 return DBG_STATUS_OK;
5160 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5161 struct qed_ptt *p_ptt,
5163 u32 buf_size_in_dwords,
5164 u32 *num_dumped_dwords)
5166 u32 needed_buf_size_in_dwords;
5167 enum dbg_status status;
5169 *num_dumped_dwords = 0;
5171 status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5173 &needed_buf_size_in_dwords);
5174 if (status != DBG_STATUS_OK)
5177 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5178 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5180 /* Update reset state */
5181 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5183 /* Idle Check Dump */
5184 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5186 /* Revert GRC params to their default */
5187 qed_dbg_grc_set_params_default(p_hwfn);
5189 return DBG_STATUS_OK;
5192 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5193 struct qed_ptt *p_ptt,
5196 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5200 if (status != DBG_STATUS_OK)
5203 return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5206 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5207 struct qed_ptt *p_ptt,
5209 u32 buf_size_in_dwords,
5210 u32 *num_dumped_dwords)
5212 u32 needed_buf_size_in_dwords;
5213 enum dbg_status status;
5216 qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5218 &needed_buf_size_in_dwords);
5219 if (status != DBG_STATUS_OK && status !=
5220 DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5223 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5224 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5226 /* Update reset state */
5227 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5230 status = qed_mcp_trace_dump(p_hwfn,
5231 p_ptt, dump_buf, true, num_dumped_dwords);
5233 /* Revert GRC params to their default */
5234 qed_dbg_grc_set_params_default(p_hwfn);
5239 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5240 struct qed_ptt *p_ptt,
5243 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5247 if (status != DBG_STATUS_OK)
5250 return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5253 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5254 struct qed_ptt *p_ptt,
5256 u32 buf_size_in_dwords,
5257 u32 *num_dumped_dwords)
5259 u32 needed_buf_size_in_dwords;
5260 enum dbg_status status;
5262 *num_dumped_dwords = 0;
5264 status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5266 &needed_buf_size_in_dwords);
5267 if (status != DBG_STATUS_OK)
5270 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5271 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5273 /* Update reset state */
5274 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5276 status = qed_reg_fifo_dump(p_hwfn,
5277 p_ptt, dump_buf, true, num_dumped_dwords);
5279 /* Revert GRC params to their default */
5280 qed_dbg_grc_set_params_default(p_hwfn);
5285 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5286 struct qed_ptt *p_ptt,
5289 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5293 if (status != DBG_STATUS_OK)
5296 return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5299 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5300 struct qed_ptt *p_ptt,
5302 u32 buf_size_in_dwords,
5303 u32 *num_dumped_dwords)
5305 u32 needed_buf_size_in_dwords;
5306 enum dbg_status status;
5308 *num_dumped_dwords = 0;
5310 status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5312 &needed_buf_size_in_dwords);
5313 if (status != DBG_STATUS_OK)
5316 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5317 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5319 /* Update reset state */
5320 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5322 status = qed_igu_fifo_dump(p_hwfn,
5323 p_ptt, dump_buf, true, num_dumped_dwords);
5324 /* Revert GRC params to their default */
5325 qed_dbg_grc_set_params_default(p_hwfn);
5331 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5332 struct qed_ptt *p_ptt,
5335 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5339 if (status != DBG_STATUS_OK)
5342 return qed_protection_override_dump(p_hwfn,
5343 p_ptt, NULL, false, buf_size);
5346 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5347 struct qed_ptt *p_ptt,
5349 u32 buf_size_in_dwords,
5350 u32 *num_dumped_dwords)
5352 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5353 enum dbg_status status;
5355 *num_dumped_dwords = 0;
5358 qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5361 if (status != DBG_STATUS_OK)
5364 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5365 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5367 /* Update reset state */
5368 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5370 status = qed_protection_override_dump(p_hwfn,
5373 true, num_dumped_dwords);
5375 /* Revert GRC params to their default */
5376 qed_dbg_grc_set_params_default(p_hwfn);
5381 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5382 struct qed_ptt *p_ptt,
5385 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5389 if (status != DBG_STATUS_OK)
5392 /* Update reset state */
5393 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5395 *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5397 return DBG_STATUS_OK;
5400 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5401 struct qed_ptt *p_ptt,
5403 u32 buf_size_in_dwords,
5404 u32 *num_dumped_dwords)
5406 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5407 enum dbg_status status;
5409 *num_dumped_dwords = 0;
5412 qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5415 if (status != DBG_STATUS_OK)
5418 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5419 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5421 *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5423 /* Revert GRC params to their default */
5424 qed_dbg_grc_set_params_default(p_hwfn);
5426 return DBG_STATUS_OK;
5429 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5430 struct qed_ptt *p_ptt,
5431 enum block_id block_id,
5432 enum dbg_attn_type attn_type,
5434 struct dbg_attn_block_result *results)
5436 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5437 u8 reg_idx, num_attn_regs, num_result_regs = 0;
5438 const struct dbg_attn_reg *attn_reg_arr;
5440 if (status != DBG_STATUS_OK)
5443 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5444 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5445 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5446 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5448 attn_reg_arr = qed_get_block_attn_regs(block_id,
5449 attn_type, &num_attn_regs);
5451 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5452 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5453 struct dbg_attn_reg_result *reg_result;
5454 u32 sts_addr, sts_val;
5455 u16 modes_buf_offset;
5459 eval_mode = GET_FIELD(reg_data->mode.data,
5460 DBG_MODE_HDR_EVAL_MODE) > 0;
5461 modes_buf_offset = GET_FIELD(reg_data->mode.data,
5462 DBG_MODE_HDR_MODES_BUF_OFFSET);
5463 if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5466 /* Mode match - read attention status register */
5467 sts_addr = DWORDS_TO_BYTES(clear_status ?
5468 reg_data->sts_clr_address :
5469 GET_FIELD(reg_data->data,
5470 DBG_ATTN_REG_STS_ADDRESS));
5471 sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5475 /* Non-zero attention status - add to results */
5476 reg_result = &results->reg_results[num_result_regs];
5477 SET_FIELD(reg_result->data,
5478 DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5479 SET_FIELD(reg_result->data,
5480 DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5481 GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5482 reg_result->block_attn_offset = reg_data->block_attn_offset;
5483 reg_result->sts_val = sts_val;
5484 reg_result->mask_val = qed_rd(p_hwfn,
5487 (reg_data->mask_address));
5491 results->block_id = (u8)block_id;
5492 results->names_offset =
5493 qed_get_block_attn_data(block_id, attn_type)->names_offset;
5494 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5495 SET_FIELD(results->data,
5496 DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5498 return DBG_STATUS_OK;
5501 /******************************* Data Types **********************************/
5508 struct mcp_trace_format {
5510 #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
5511 #define MCP_TRACE_FORMAT_MODULE_SHIFT 0
5512 #define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
5513 #define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
5514 #define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
5515 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
5516 #define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
5517 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
5518 #define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
5519 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
5520 #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
5521 #define MCP_TRACE_FORMAT_LEN_SHIFT 24
5526 /* Meta data structure, generated by a perl script during MFW build. therefore,
5527 * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
5530 struct mcp_trace_meta {
5534 struct mcp_trace_format *formats;
5537 /* REG fifo element */
5538 struct reg_fifo_element {
5540 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
5541 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
5542 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
5543 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
5544 #define REG_FIFO_ELEMENT_PF_SHIFT 24
5545 #define REG_FIFO_ELEMENT_PF_MASK 0xf
5546 #define REG_FIFO_ELEMENT_VF_SHIFT 28
5547 #define REG_FIFO_ELEMENT_VF_MASK 0xff
5548 #define REG_FIFO_ELEMENT_PORT_SHIFT 36
5549 #define REG_FIFO_ELEMENT_PORT_MASK 0x3
5550 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
5551 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
5552 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
5553 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
5554 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43
5555 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf
5556 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47
5557 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
5560 /* IGU fifo element */
5561 struct igu_fifo_element {
5563 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
5564 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
5565 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
5566 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
5567 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
5568 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
5569 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
5570 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
5571 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
5572 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
5575 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
5576 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
5577 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
5578 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
5582 struct igu_fifo_wr_data {
5584 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
5585 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
5586 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
5587 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
5588 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
5589 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
5590 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
5591 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
5592 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
5593 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
5594 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
5595 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
5598 struct igu_fifo_cleanup_wr_data {
5600 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
5601 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
5602 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
5603 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
5604 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
5605 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
5606 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
5607 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
5610 /* Protection override element */
5611 struct protection_override_element {
5613 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
5614 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
5615 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
5616 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
5617 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
5618 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
5619 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
5620 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
5621 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
5622 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
5623 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
5624 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
5627 enum igu_fifo_sources {
5641 enum igu_fifo_addr_types {
5642 IGU_ADDR_TYPE_MSIX_MEM,
5643 IGU_ADDR_TYPE_WRITE_PBA,
5644 IGU_ADDR_TYPE_WRITE_INT_ACK,
5645 IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5646 IGU_ADDR_TYPE_READ_INT,
5647 IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5648 IGU_ADDR_TYPE_RESERVED
5651 struct igu_fifo_addr_data {
5656 enum igu_fifo_addr_types type;
5659 /******************************** Constants **********************************/
5661 #define MAX_MSG_LEN 1024
5663 #define MCP_TRACE_MAX_MODULE_LEN 8
5664 #define MCP_TRACE_FORMAT_MAX_PARAMS 3
5665 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5666 (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5668 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4
5669 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
5671 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
5673 /***************************** Constant Arrays *******************************/
5675 struct user_dbg_array {
5681 static struct user_dbg_array
5682 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5684 /* Block names array */
5685 static struct block_info s_block_info_arr[] = {
5687 {"miscs", BLOCK_MISCS},
5688 {"misc", BLOCK_MISC},
5690 {"pglue_b", BLOCK_PGLUE_B},
5691 {"cnig", BLOCK_CNIG},
5692 {"cpmu", BLOCK_CPMU},
5693 {"ncsi", BLOCK_NCSI},
5694 {"opte", BLOCK_OPTE},
5696 {"pcie", BLOCK_PCIE},
5698 {"mcp2", BLOCK_MCP2},
5699 {"pswhst", BLOCK_PSWHST},
5700 {"pswhst2", BLOCK_PSWHST2},
5701 {"pswrd", BLOCK_PSWRD},
5702 {"pswrd2", BLOCK_PSWRD2},
5703 {"pswwr", BLOCK_PSWWR},
5704 {"pswwr2", BLOCK_PSWWR2},
5705 {"pswrq", BLOCK_PSWRQ},
5706 {"pswrq2", BLOCK_PSWRQ2},
5707 {"pglcs", BLOCK_PGLCS},
5709 {"dmae", BLOCK_DMAE},
5718 {"dorq", BLOCK_DORQ},
5722 {"tsdm", BLOCK_TSDM},
5723 {"msdm", BLOCK_MSDM},
5724 {"usdm", BLOCK_USDM},
5725 {"xsdm", BLOCK_XSDM},
5726 {"ysdm", BLOCK_YSDM},
5727 {"psdm", BLOCK_PSDM},
5728 {"tsem", BLOCK_TSEM},
5729 {"msem", BLOCK_MSEM},
5730 {"usem", BLOCK_USEM},
5731 {"xsem", BLOCK_XSEM},
5732 {"ysem", BLOCK_YSEM},
5733 {"psem", BLOCK_PSEM},
5735 {"tmld", BLOCK_TMLD},
5736 {"muld", BLOCK_MULD},
5737 {"yuld", BLOCK_YULD},
5738 {"xyld", BLOCK_XYLD},
5739 {"ptld", BLOCK_PTLD},
5740 {"ypld", BLOCK_YPLD},
5742 {"pbf_pb1", BLOCK_PBF_PB1},
5743 {"pbf_pb2", BLOCK_PBF_PB2},
5747 {"rdif", BLOCK_RDIF},
5748 {"tdif", BLOCK_TDIF},
5750 {"ccfc", BLOCK_CCFC},
5751 {"tcfc", BLOCK_TCFC},
5754 {"rgfs", BLOCK_RGFS},
5755 {"rgsrc", BLOCK_RGSRC},
5756 {"tgfs", BLOCK_TGFS},
5757 {"tgsrc", BLOCK_TGSRC},
5758 {"umac", BLOCK_UMAC},
5759 {"xmac", BLOCK_XMAC},
5763 {"bmbn", BLOCK_BMBN},
5768 {"phy_pcie", BLOCK_PHY_PCIE},
5770 {"avs_wrap", BLOCK_AVS_WRAP},
5771 {"pxpreqbus", BLOCK_PXPREQBUS},
5772 {"misc_aeu", BLOCK_MISC_AEU},
5773 {"bar0_map", BLOCK_BAR0_MAP}
5776 /* Status string array */
5777 static const char * const s_status_str[] = {
5779 "Operation completed successfully",
5781 /* DBG_STATUS_APP_VERSION_NOT_SET */
5782 "Debug application version wasn't set",
5784 /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5785 "Unsupported debug application version",
5787 /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5788 "The debug block wasn't reset since the last recording",
5790 /* DBG_STATUS_INVALID_ARGS */
5791 "Invalid arguments",
5793 /* DBG_STATUS_OUTPUT_ALREADY_SET */
5794 "The debug output was already set",
5796 /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5797 "Invalid PCI buffer size",
5799 /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5800 "PCI buffer allocation failed",
5802 /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5803 "A PCI buffer wasn't allocated",
5805 /* DBG_STATUS_TOO_MANY_INPUTS */
5806 "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5808 /* DBG_STATUS_INPUT_OVERLAP */
5809 "Overlapping debug bus inputs",
5811 /* DBG_STATUS_HW_ONLY_RECORDING */
5812 "Cannot record Storm data since the entire recording cycle is used by HW",
5814 /* DBG_STATUS_STORM_ALREADY_ENABLED */
5815 "The Storm was already enabled",
5817 /* DBG_STATUS_STORM_NOT_ENABLED */
5818 "The specified Storm wasn't enabled",
5820 /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5821 "The block was already enabled",
5823 /* DBG_STATUS_BLOCK_NOT_ENABLED */
5824 "The specified block wasn't enabled",
5826 /* DBG_STATUS_NO_INPUT_ENABLED */
5827 "No input was enabled for recording",
5829 /* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5830 "Filters and triggers are not allowed when recording in 64b units",
5832 /* DBG_STATUS_FILTER_ALREADY_ENABLED */
5833 "The filter was already enabled",
5835 /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5836 "The trigger was already enabled",
5838 /* DBG_STATUS_TRIGGER_NOT_ENABLED */
5839 "The trigger wasn't enabled",
5841 /* DBG_STATUS_CANT_ADD_CONSTRAINT */
5842 "A constraint can be added only after a filter was enabled or a trigger state was added",
5844 /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5845 "Cannot add more than 3 trigger states",
5847 /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5848 "Cannot add more than 4 constraints per filter or trigger state",
5850 /* DBG_STATUS_RECORDING_NOT_STARTED */
5851 "The recording wasn't started",
5853 /* DBG_STATUS_DATA_DIDNT_TRIGGER */
5854 "A trigger was configured, but it didn't trigger",
5856 /* DBG_STATUS_NO_DATA_RECORDED */
5857 "No data was recorded",
5859 /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5860 "Dump buffer is too small",
5862 /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5863 "Dumped data is not aligned to chunks",
5865 /* DBG_STATUS_UNKNOWN_CHIP */
5868 /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5869 "Failed allocating virtual memory",
5871 /* DBG_STATUS_BLOCK_IN_RESET */
5872 "The input block is in reset",
5874 /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5875 "Invalid MCP trace signature found in NVRAM",
5877 /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5878 "Invalid bundle ID found in NVRAM",
5880 /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5881 "Failed getting NVRAM image",
5883 /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5884 "NVRAM image is not dword-aligned",
5886 /* DBG_STATUS_NVRAM_READ_FAILED */
5887 "Failed reading from NVRAM",
5889 /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5890 "Idle check parsing failed",
5892 /* DBG_STATUS_MCP_TRACE_BAD_DATA */
5893 "MCP Trace data is corrupt",
5895 /* DBG_STATUS_MCP_TRACE_NO_META */
5896 "Dump doesn't contain meta data - it must be provided in image file",
5898 /* DBG_STATUS_MCP_COULD_NOT_HALT */
5899 "Failed to halt MCP",
5901 /* DBG_STATUS_MCP_COULD_NOT_RESUME */
5902 "Failed to resume MCP after halt",
5904 /* DBG_STATUS_RESERVED2 */
5905 "Reserved debug status - shouldn't be returned",
5907 /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5908 "Failed to empty SEMI sync FIFO",
5910 /* DBG_STATUS_IGU_FIFO_BAD_DATA */
5911 "IGU FIFO data is corrupt",
5913 /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5914 "MCP failed to mask parities",
5916 /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5917 "FW Asserts parsing failed",
5919 /* DBG_STATUS_REG_FIFO_BAD_DATA */
5920 "GRC FIFO data is corrupt",
5922 /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5923 "Protection Override data is corrupt",
5925 /* DBG_STATUS_DBG_ARRAY_NOT_SET */
5926 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5928 /* DBG_STATUS_FILTER_BUG */
5929 "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
5931 /* DBG_STATUS_NON_MATCHING_LINES */
5932 "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
5934 /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
5935 "The selected trigger dword offset wasn't enabled in the recorded HW block",
5937 /* DBG_STATUS_DBG_BUS_IN_USE */
5938 "The debug bus is in use"
5941 /* Idle check severity names array */
5942 static const char * const s_idle_chk_severity_str[] = {
5944 "Error if no traffic",
5948 /* MCP Trace level names array */
5949 static const char * const s_mcp_trace_level_str[] = {
5955 /* Access type names array */
5956 static const char * const s_access_strs[] = {
5961 /* Privilege type names array */
5962 static const char * const s_privilege_strs[] = {
5969 /* Protection type names array */
5970 static const char * const s_protection_strs[] = {
5981 /* Master type names array */
5982 static const char * const s_master_strs[] = {
6001 /* REG FIFO error messages array */
6002 static const char * const s_reg_fifo_error_strs[] = {
6004 "address doesn't belong to any block",
6005 "reserved address in block or write to read-only address",
6006 "privilege/protection mismatch",
6007 "path isolation error"
6010 /* IGU FIFO sources array */
6011 static const char * const s_igu_fifo_source_strs[] = {
6025 /* IGU FIFO error messages */
6026 static const char * const s_igu_fifo_error_strs[] = {
6029 "function disabled",
6030 "VF sent command to attnetion address",
6031 "host sent prod update command",
6032 "read of during interrupt register while in MIMD mode",
6033 "access to PXP BAR reserved address",
6034 "producer update command to attention index",
6036 "SB index not valid",
6037 "SB relative index and FID not found",
6039 "command with error flag asserted (PCI error or CAU discard)",
6040 "VF sent cleanup and RF cleanup is disabled",
6041 "cleanup command on type bigger than 4"
6044 /* IGU FIFO address data */
6045 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6046 {0x0, 0x101, "MSI-X Memory", NULL,
6047 IGU_ADDR_TYPE_MSIX_MEM},
6048 {0x102, 0x1ff, "reserved", NULL,
6049 IGU_ADDR_TYPE_RESERVED},
6050 {0x200, 0x200, "Write PBA[0:63]", NULL,
6051 IGU_ADDR_TYPE_WRITE_PBA},
6052 {0x201, 0x201, "Write PBA[64:127]", "reserved",
6053 IGU_ADDR_TYPE_WRITE_PBA},
6054 {0x202, 0x202, "Write PBA[128]", "reserved",
6055 IGU_ADDR_TYPE_WRITE_PBA},
6056 {0x203, 0x3ff, "reserved", NULL,
6057 IGU_ADDR_TYPE_RESERVED},
6058 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6059 IGU_ADDR_TYPE_WRITE_INT_ACK},
6060 {0x5f0, 0x5f0, "Attention bits update", NULL,
6061 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6062 {0x5f1, 0x5f1, "Attention bits set", NULL,
6063 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6064 {0x5f2, 0x5f2, "Attention bits clear", NULL,
6065 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6066 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6067 IGU_ADDR_TYPE_READ_INT},
6068 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6069 IGU_ADDR_TYPE_READ_INT},
6070 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6071 IGU_ADDR_TYPE_READ_INT},
6072 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6073 IGU_ADDR_TYPE_READ_INT},
6074 {0x5f7, 0x5ff, "reserved", NULL,
6075 IGU_ADDR_TYPE_RESERVED},
6076 {0x600, 0x7ff, "Producer update", NULL,
6077 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6080 /******************************** Variables **********************************/
6082 /* MCP Trace meta data array - used in case the dump doesn't contain the
6083 * meta data (e.g. due to no NVRAM access).
6085 static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 };
6087 /* Parsed MCP Trace meta data info, based on MCP trace meta array */
6088 static struct mcp_trace_meta s_mcp_trace_meta;
6089 static bool s_mcp_trace_meta_valid;
6091 /* Temporary buffer, used for print size calculations */
6092 static char s_temp_buf[MAX_MSG_LEN];
6094 /**************************** Private Functions ******************************/
6096 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6098 return (a + b) % size;
6101 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6103 return (size + a - b) % size;
6106 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6107 * bytes) and returns them as a dword value. the specified buffer offset is
6110 static u32 qed_read_from_cyclic_buf(void *buf,
6112 u32 buf_size, u8 num_bytes_to_read)
6114 u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6117 val_ptr = (u8 *)&val;
6119 /* Assume running on a LITTLE ENDIAN and the buffer is network order
6120 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6122 for (i = 0; i < num_bytes_to_read; i++) {
6123 val_ptr[i] = bytes_buf[*offset];
6124 *offset = qed_cyclic_add(*offset, 1, buf_size);
6130 /* Reads and returns the next byte from the specified buffer.
6131 * The specified buffer offset is updated.
6133 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6135 return ((u8 *)buf)[(*offset)++];
6138 /* Reads and returns the next dword from the specified buffer.
6139 * The specified buffer offset is updated.
6141 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6143 u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6150 /* Reads the next string from the specified buffer, and copies it to the
6151 * specified pointer. The specified buffer offset is updated.
6153 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6155 const char *source_str = &((const char *)buf)[*offset];
6157 strncpy(dest, source_str, size);
6158 dest[size - 1] = '\0';
6162 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6163 * If the specified buffer in NULL, a temporary buffer pointer is returned.
6165 static char *qed_get_buf_ptr(void *buf, u32 offset)
6167 return buf ? (char *)buf + offset : s_temp_buf;
6170 /* Reads a param from the specified buffer. Returns the number of dwords read.
6171 * If the returned str_param is NULL, the param is numeric and its value is
6172 * returned in num_param.
6173 * Otheriwise, the param is a string and its pointer is returned in str_param.
6175 static u32 qed_read_param(u32 *dump_buf,
6176 const char **param_name,
6177 const char **param_str_val, u32 *param_num_val)
6179 char *char_buf = (char *)dump_buf;
6182 /* Extract param name */
6183 *param_name = char_buf;
6184 offset += strlen(*param_name) + 1;
6186 /* Check param type */
6187 if (*(char_buf + offset++)) {
6189 *param_str_val = char_buf + offset;
6191 offset += strlen(*param_str_val) + 1;
6193 offset += (4 - (offset & 0x3));
6196 *param_str_val = NULL;
6198 offset += (4 - (offset & 0x3));
6199 *param_num_val = *(u32 *)(char_buf + offset);
6203 return (u32)offset / 4;
6206 /* Reads a section header from the specified buffer.
6207 * Returns the number of dwords read.
6209 static u32 qed_read_section_hdr(u32 *dump_buf,
6210 const char **section_name,
6211 u32 *num_section_params)
6213 const char *param_str_val;
6215 return qed_read_param(dump_buf,
6216 section_name, ¶m_str_val, num_section_params);
6219 /* Reads section params from the specified buffer and prints them to the results
6220 * buffer. Returns the number of dwords read.
6222 static u32 qed_print_section_params(u32 *dump_buf,
6223 u32 num_section_params,
6224 char *results_buf, u32 *num_chars_printed)
6226 u32 i, dump_offset = 0, results_offset = 0;
6228 for (i = 0; i < num_section_params; i++) {
6229 const char *param_name, *param_str_val;
6230 u32 param_num_val = 0;
6232 dump_offset += qed_read_param(dump_buf + dump_offset,
6234 ¶m_str_val, ¶m_num_val);
6238 sprintf(qed_get_buf_ptr(results_buf,
6240 "%s: %s\n", param_name, param_str_val);
6241 else if (strcmp(param_name, "fw-timestamp"))
6243 sprintf(qed_get_buf_ptr(results_buf,
6245 "%s: %d\n", param_name, param_num_val);
6248 results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6251 *num_chars_printed = results_offset;
6256 /* Parses the idle check rules and returns the number of characters printed.
6257 * In case of parsing error, returns 0.
6259 static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
6262 bool print_fw_idle_chk,
6264 u32 *num_errors, u32 *num_warnings)
6266 /* Offset in results_buf in bytes */
6267 u32 results_offset = 0;
6275 /* Go over dumped results */
6276 for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6278 const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6279 struct dbg_idle_chk_result_hdr *hdr;
6280 const char *parsing_str, *lsi_msg;
6281 u32 parsing_str_offset;
6285 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6287 (const struct dbg_idle_chk_rule_parsing_data *)
6288 &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6290 parsing_str_offset =
6291 GET_FIELD(rule_parsing_data->data,
6292 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6294 GET_FIELD(rule_parsing_data->data,
6295 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6298 s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6299 [parsing_str_offset];
6300 lsi_msg = parsing_str;
6303 if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6306 /* Skip rule header */
6307 dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6309 /* Update errors/warnings count */
6310 if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6311 hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6316 /* Print rule severity */
6318 sprintf(qed_get_buf_ptr(results_buf,
6319 results_offset), "%s: ",
6320 s_idle_chk_severity_str[hdr->severity]);
6322 /* Print rule message */
6324 parsing_str += strlen(parsing_str) + 1;
6326 sprintf(qed_get_buf_ptr(results_buf,
6327 results_offset), "%s.",
6329 print_fw_idle_chk ? parsing_str : lsi_msg);
6330 parsing_str += strlen(parsing_str) + 1;
6332 /* Print register values */
6334 sprintf(qed_get_buf_ptr(results_buf,
6335 results_offset), " Registers:");
6337 i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6339 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6344 (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6345 is_mem = GET_FIELD(reg_hdr->data,
6346 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6347 reg_id = GET_FIELD(reg_hdr->data,
6348 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6350 /* Skip reg header */
6351 dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6353 /* Skip register names until the required reg_id is
6356 for (; reg_id > curr_reg_id;
6358 parsing_str += strlen(parsing_str) + 1);
6361 sprintf(qed_get_buf_ptr(results_buf,
6362 results_offset), " %s",
6364 if (i < hdr->num_dumped_cond_regs && is_mem)
6366 sprintf(qed_get_buf_ptr(results_buf,
6368 "[%d]", hdr->mem_entry_id +
6369 reg_hdr->start_entry);
6371 sprintf(qed_get_buf_ptr(results_buf,
6372 results_offset), "=");
6373 for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6375 sprintf(qed_get_buf_ptr(results_buf,
6378 if (j < reg_hdr->size - 1)
6380 sprintf(qed_get_buf_ptr
6382 results_offset), ",");
6387 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6390 /* Check if end of dump buffer was exceeded */
6391 if (dump_buf > dump_buf_end)
6394 return results_offset;
6397 /* Parses an idle check dump buffer.
6398 * If result_buf is not NULL, the idle check results are printed to it.
6399 * In any case, the required results buffer size is assigned to
6400 * parsed_results_bytes.
6401 * The parsing status is returned.
6403 static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
6404 u32 num_dumped_dwords,
6406 u32 *parsed_results_bytes,
6410 const char *section_name, *param_name, *param_str_val;
6411 u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6412 u32 num_section_params = 0, num_rules;
6414 /* Offset in results_buf in bytes */
6415 u32 results_offset = 0;
6417 *parsed_results_bytes = 0;
6421 if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6422 !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6423 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6425 /* Read global_params section */
6426 dump_buf += qed_read_section_hdr(dump_buf,
6427 §ion_name, &num_section_params);
6428 if (strcmp(section_name, "global_params"))
6429 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6431 /* Print global params */
6432 dump_buf += qed_print_section_params(dump_buf,
6434 results_buf, &results_offset);
6436 /* Read idle_chk section */
6437 dump_buf += qed_read_section_hdr(dump_buf,
6438 §ion_name, &num_section_params);
6439 if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6440 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6441 dump_buf += qed_read_param(dump_buf,
6442 ¶m_name, ¶m_str_val, &num_rules);
6443 if (strcmp(param_name, "num_rules"))
6444 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6447 u32 rules_print_size;
6449 /* Print FW output */
6451 sprintf(qed_get_buf_ptr(results_buf,
6453 "FW_IDLE_CHECK:\n");
6455 qed_parse_idle_chk_dump_rules(dump_buf,
6465 results_offset += rules_print_size;
6466 if (!rules_print_size)
6467 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6469 /* Print LSI output */
6471 sprintf(qed_get_buf_ptr(results_buf,
6473 "\nLSI_IDLE_CHECK:\n");
6475 qed_parse_idle_chk_dump_rules(dump_buf,
6485 results_offset += rules_print_size;
6486 if (!rules_print_size)
6487 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6490 /* Print errors/warnings count */
6493 sprintf(qed_get_buf_ptr(results_buf,
6495 "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6496 *num_errors, *num_warnings);
6497 else if (*num_warnings)
6499 sprintf(qed_get_buf_ptr(results_buf,
6501 "\nIdle Check completed successfully (with %d warnings)\n",
6505 sprintf(qed_get_buf_ptr(results_buf,
6507 "\nIdle Check completed successfully\n");
6509 /* Add 1 for string NULL termination */
6510 *parsed_results_bytes = results_offset + 1;
6512 return DBG_STATUS_OK;
6515 /* Frees the specified MCP Trace meta data */
6516 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
6517 struct mcp_trace_meta *meta)
6521 s_mcp_trace_meta_valid = false;
6523 /* Release modules */
6524 if (meta->modules) {
6525 for (i = 0; i < meta->modules_num; i++)
6526 kfree(meta->modules[i]);
6527 kfree(meta->modules);
6530 /* Release formats */
6531 if (meta->formats) {
6532 for (i = 0; i < meta->formats_num; i++)
6533 kfree(meta->formats[i].format_str);
6534 kfree(meta->formats);
6538 /* Allocates and fills MCP Trace meta data based on the specified meta data
6540 * Returns debug status code.
6542 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
6543 const u32 *meta_buf,
6544 struct mcp_trace_meta *meta)
6546 u8 *meta_buf_bytes = (u8 *)meta_buf;
6547 u32 offset = 0, signature, i;
6549 /* Free the previous meta before loading a new one. */
6550 if (s_mcp_trace_meta_valid)
6551 qed_mcp_trace_free_meta(p_hwfn, meta);
6553 memset(meta, 0, sizeof(*meta));
6555 /* Read first signature */
6556 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6557 if (signature != NVM_MAGIC_VALUE)
6558 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6560 /* Read no. of modules and allocate memory for their pointers */
6561 meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6562 meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
6564 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6566 /* Allocate and read all module strings */
6567 for (i = 0; i < meta->modules_num; i++) {
6568 u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6570 *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6571 if (!(*(meta->modules + i))) {
6572 /* Update number of modules to be released */
6573 meta->modules_num = i ? i - 1 : 0;
6574 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6577 qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6578 *(meta->modules + i));
6579 if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6580 (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6583 /* Read second signature */
6584 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6585 if (signature != NVM_MAGIC_VALUE)
6586 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6588 /* Read number of formats and allocate memory for all formats */
6589 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6590 meta->formats = kzalloc(meta->formats_num *
6591 sizeof(struct mcp_trace_format),
6594 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6596 /* Allocate and read all strings */
6597 for (i = 0; i < meta->formats_num; i++) {
6598 struct mcp_trace_format *format_ptr = &meta->formats[i];
6601 format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6605 MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6606 format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6607 if (!format_ptr->format_str) {
6608 /* Update number of modules to be released */
6609 meta->formats_num = i ? i - 1 : 0;
6610 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6613 qed_read_str_from_buf(meta_buf_bytes,
6615 format_len, format_ptr->format_str);
6618 s_mcp_trace_meta_valid = true;
6619 return DBG_STATUS_OK;
6622 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6623 * are printed to it. The parsing status is returned.
6625 * trace_buf - MCP trace cyclic buffer
6626 * trace_buf_size - MCP trace cyclic buffer size in bytes
6627 * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6629 * data_size - size in bytes of data to parse.
6630 * parsed_buf - destination buffer for parsed data.
6631 * parsed_bytes - size of parsed data in bytes.
6633 static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
6640 u32 param_mask, param_shift;
6641 enum dbg_status status;
6645 if (!s_mcp_trace_meta_valid)
6646 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6648 status = DBG_STATUS_OK;
6651 struct mcp_trace_format *format_ptr;
6652 u8 format_level, format_module;
6653 u32 params[3] = { 0, 0, 0 };
6654 u32 header, format_idx, i;
6656 if (data_size < MFW_TRACE_ENTRY_SIZE)
6657 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6659 header = qed_read_from_cyclic_buf(trace_buf,
6662 MFW_TRACE_ENTRY_SIZE);
6663 data_size -= MFW_TRACE_ENTRY_SIZE;
6664 format_idx = header & MFW_TRACE_EVENTID_MASK;
6666 /* Skip message if its index doesn't exist in the meta data */
6667 if (format_idx > s_mcp_trace_meta.formats_num) {
6669 (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
6670 MFW_TRACE_PRM_SIZE_SHIFT);
6672 if (data_size < format_size)
6673 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6675 data_offset = qed_cyclic_add(data_offset,
6678 data_size -= format_size;
6682 format_ptr = &s_mcp_trace_meta.formats[format_idx];
6685 param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
6686 param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6687 i < MCP_TRACE_FORMAT_MAX_PARAMS;
6689 param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6690 param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6691 /* Extract param size (0..3) */
6692 u8 param_size = (u8)((format_ptr->data & param_mask) >>
6695 /* If the param size is zero, there are no other
6701 /* Size is encoded using 2 bits, where 3 is used to
6704 if (param_size == 3)
6707 if (data_size < param_size)
6708 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6710 params[i] = qed_read_from_cyclic_buf(trace_buf,
6714 data_size -= param_size;
6717 format_level = (u8)((format_ptr->data &
6718 MCP_TRACE_FORMAT_LEVEL_MASK) >>
6719 MCP_TRACE_FORMAT_LEVEL_SHIFT);
6720 format_module = (u8)((format_ptr->data &
6721 MCP_TRACE_FORMAT_MODULE_MASK) >>
6722 MCP_TRACE_FORMAT_MODULE_SHIFT);
6723 if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6724 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6726 /* Print current message to results buffer */
6728 sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
6730 s_mcp_trace_level_str[format_level],
6731 s_mcp_trace_meta.modules[format_module]);
6733 sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
6734 format_ptr->format_str,
6735 params[0], params[1], params[2]);
6738 /* Add string NULL terminator */
6744 /* Parses an MCP Trace dump buffer.
6745 * If result_buf is not NULL, the MCP Trace results are printed to it.
6746 * In any case, the required results buffer size is assigned to
6748 * The parsing status is returned.
6750 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6755 const char *section_name, *param_name, *param_str_val;
6756 u32 data_size, trace_data_dwords, trace_meta_dwords;
6757 u32 offset, results_offset, parsed_buf_bytes;
6758 u32 param_num_val, num_section_params;
6759 struct mcp_trace *trace;
6760 enum dbg_status status;
6761 const u32 *meta_buf;
6766 /* Read global_params section */
6767 dump_buf += qed_read_section_hdr(dump_buf,
6768 §ion_name, &num_section_params);
6769 if (strcmp(section_name, "global_params"))
6770 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6772 /* Print global params */
6773 dump_buf += qed_print_section_params(dump_buf,
6775 parsed_buf, &results_offset);
6777 /* Read trace_data section */
6778 dump_buf += qed_read_section_hdr(dump_buf,
6779 §ion_name, &num_section_params);
6780 if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6781 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6782 dump_buf += qed_read_param(dump_buf,
6783 ¶m_name, ¶m_str_val, ¶m_num_val);
6784 if (strcmp(param_name, "size"))
6785 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6786 trace_data_dwords = param_num_val;
6788 /* Prepare trace info */
6789 trace = (struct mcp_trace *)dump_buf;
6790 trace_buf = (u8 *)dump_buf + sizeof(*trace);
6791 offset = trace->trace_oldest;
6792 data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6793 dump_buf += trace_data_dwords;
6795 /* Read meta_data section */
6796 dump_buf += qed_read_section_hdr(dump_buf,
6797 §ion_name, &num_section_params);
6798 if (strcmp(section_name, "mcp_trace_meta"))
6799 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6800 dump_buf += qed_read_param(dump_buf,
6801 ¶m_name, ¶m_str_val, ¶m_num_val);
6802 if (strcmp(param_name, "size"))
6803 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6804 trace_meta_dwords = param_num_val;
6806 /* Choose meta data buffer */
6807 if (!trace_meta_dwords) {
6808 /* Dump doesn't include meta data */
6809 if (!s_mcp_trace_meta_arr.ptr)
6810 return DBG_STATUS_MCP_TRACE_NO_META;
6811 meta_buf = s_mcp_trace_meta_arr.ptr;
6813 /* Dump includes meta data */
6814 meta_buf = dump_buf;
6817 /* Allocate meta data memory */
6818 status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta);
6819 if (status != DBG_STATUS_OK)
6822 status = qed_parse_mcp_trace_buf(trace_buf,
6827 parsed_buf + results_offset :
6830 if (status != DBG_STATUS_OK)
6833 *parsed_bytes = results_offset + parsed_buf_bytes;
6835 return DBG_STATUS_OK;
6838 /* Parses a Reg FIFO dump buffer.
6839 * If result_buf is not NULL, the Reg FIFO results are printed to it.
6840 * In any case, the required results buffer size is assigned to
6841 * parsed_results_bytes.
6842 * The parsing status is returned.
6844 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6846 u32 *parsed_results_bytes)
6848 const char *section_name, *param_name, *param_str_val;
6849 u32 param_num_val, num_section_params, num_elements;
6850 struct reg_fifo_element *elements;
6851 u8 i, j, err_val, vf_val;
6852 u32 results_offset = 0;
6855 /* Read global_params section */
6856 dump_buf += qed_read_section_hdr(dump_buf,
6857 §ion_name, &num_section_params);
6858 if (strcmp(section_name, "global_params"))
6859 return DBG_STATUS_REG_FIFO_BAD_DATA;
6861 /* Print global params */
6862 dump_buf += qed_print_section_params(dump_buf,
6864 results_buf, &results_offset);
6866 /* Read reg_fifo_data section */
6867 dump_buf += qed_read_section_hdr(dump_buf,
6868 §ion_name, &num_section_params);
6869 if (strcmp(section_name, "reg_fifo_data"))
6870 return DBG_STATUS_REG_FIFO_BAD_DATA;
6871 dump_buf += qed_read_param(dump_buf,
6872 ¶m_name, ¶m_str_val, ¶m_num_val);
6873 if (strcmp(param_name, "size"))
6874 return DBG_STATUS_REG_FIFO_BAD_DATA;
6875 if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6876 return DBG_STATUS_REG_FIFO_BAD_DATA;
6877 num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6878 elements = (struct reg_fifo_element *)dump_buf;
6880 /* Decode elements */
6881 for (i = 0; i < num_elements; i++) {
6882 bool err_printed = false;
6884 /* Discover if element belongs to a VF or a PF */
6885 vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6886 if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6887 sprintf(vf_str, "%s", "N/A");
6889 sprintf(vf_str, "%d", vf_val);
6891 /* Add parsed element to parsed buffer */
6893 sprintf(qed_get_buf_ptr(results_buf,
6895 "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6897 (u32)GET_FIELD(elements[i].data,
6898 REG_FIFO_ELEMENT_ADDRESS) *
6899 REG_FIFO_ELEMENT_ADDR_FACTOR,
6900 s_access_strs[GET_FIELD(elements[i].data,
6901 REG_FIFO_ELEMENT_ACCESS)],
6902 (u32)GET_FIELD(elements[i].data,
6903 REG_FIFO_ELEMENT_PF),
6905 (u32)GET_FIELD(elements[i].data,
6906 REG_FIFO_ELEMENT_PORT),
6907 s_privilege_strs[GET_FIELD(elements[i].data,
6908 REG_FIFO_ELEMENT_PRIVILEGE)],
6909 s_protection_strs[GET_FIELD(elements[i].data,
6910 REG_FIFO_ELEMENT_PROTECTION)],
6911 s_master_strs[GET_FIELD(elements[i].data,
6912 REG_FIFO_ELEMENT_MASTER)]);
6916 err_val = GET_FIELD(elements[i].data,
6917 REG_FIFO_ELEMENT_ERROR);
6918 j < ARRAY_SIZE(s_reg_fifo_error_strs);
6919 j++, err_val >>= 1) {
6920 if (err_val & 0x1) {
6923 sprintf(qed_get_buf_ptr
6925 results_offset), ", ");
6927 sprintf(qed_get_buf_ptr
6928 (results_buf, results_offset), "%s",
6929 s_reg_fifo_error_strs[j]);
6935 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6938 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6940 "fifo contained %d elements", num_elements);
6942 /* Add 1 for string NULL termination */
6943 *parsed_results_bytes = results_offset + 1;
6945 return DBG_STATUS_OK;
6948 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6951 u32 *results_offset)
6953 const struct igu_fifo_addr_data *found_addr = NULL;
6954 u8 source, err_type, i, is_cleanup;
6955 char parsed_addr_data[32];
6956 char parsed_wr_data[256];
6957 u32 wr_data, prod_cons;
6958 bool is_wr_cmd, is_pf;
6962 /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6965 dword12 = ((u64)element->dword2 << 32) | element->dword1;
6966 is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6967 is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6968 cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6969 source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6970 err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6972 if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6973 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6974 if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6975 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6977 /* Find address data */
6978 for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6979 const struct igu_fifo_addr_data *curr_addr =
6980 &s_igu_fifo_addr_data[i];
6982 if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6983 curr_addr->end_addr)
6984 found_addr = curr_addr;
6988 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6990 /* Prepare parsed address data */
6991 switch (found_addr->type) {
6992 case IGU_ADDR_TYPE_MSIX_MEM:
6993 sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6995 case IGU_ADDR_TYPE_WRITE_INT_ACK:
6996 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6997 sprintf(parsed_addr_data,
6998 " SB = 0x%x", cmd_addr - found_addr->start_addr);
7001 parsed_addr_data[0] = '\0';
7005 parsed_wr_data[0] = '\0';
7009 /* Prepare parsed write data */
7010 wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7011 prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7012 is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7014 if (source == IGU_SRC_ATTN) {
7015 sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7018 u8 cleanup_val, cleanup_type;
7022 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7025 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7027 sprintf(parsed_wr_data,
7028 "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7029 cleanup_val ? "set" : "clear",
7032 u8 update_flag, en_dis_int_for_sb, segment;
7035 update_flag = GET_FIELD(wr_data,
7036 IGU_FIFO_WR_DATA_UPDATE_FLAG);
7039 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7040 segment = GET_FIELD(wr_data,
7041 IGU_FIFO_WR_DATA_SEGMENT);
7042 timer_mask = GET_FIELD(wr_data,
7043 IGU_FIFO_WR_DATA_TIMER_MASK);
7045 sprintf(parsed_wr_data,
7046 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7048 update_flag ? "update" : "nop",
7050 (en_dis_int_for_sb == 1 ? "disable" : "nop") :
7052 segment ? "attn" : "regular",
7057 /* Add parsed element to parsed buffer */
7058 *results_offset += sprintf(qed_get_buf_ptr(results_buf,
7060 "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7061 element->dword2, element->dword1,
7063 is_pf ? "pf" : "vf",
7064 GET_FIELD(element->dword0,
7065 IGU_FIFO_ELEMENT_DWORD0_FID),
7066 s_igu_fifo_source_strs[source],
7067 is_wr_cmd ? "wr" : "rd",
7069 (!is_pf && found_addr->vf_desc)
7070 ? found_addr->vf_desc
7074 s_igu_fifo_error_strs[err_type]);
7076 return DBG_STATUS_OK;
7079 /* Parses an IGU FIFO dump buffer.
7080 * If result_buf is not NULL, the IGU FIFO results are printed to it.
7081 * In any case, the required results buffer size is assigned to
7082 * parsed_results_bytes.
7083 * The parsing status is returned.
7085 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7087 u32 *parsed_results_bytes)
7089 const char *section_name, *param_name, *param_str_val;
7090 u32 param_num_val, num_section_params, num_elements;
7091 struct igu_fifo_element *elements;
7092 enum dbg_status status;
7093 u32 results_offset = 0;
7096 /* Read global_params section */
7097 dump_buf += qed_read_section_hdr(dump_buf,
7098 §ion_name, &num_section_params);
7099 if (strcmp(section_name, "global_params"))
7100 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7102 /* Print global params */
7103 dump_buf += qed_print_section_params(dump_buf,
7105 results_buf, &results_offset);
7107 /* Read igu_fifo_data section */
7108 dump_buf += qed_read_section_hdr(dump_buf,
7109 §ion_name, &num_section_params);
7110 if (strcmp(section_name, "igu_fifo_data"))
7111 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7112 dump_buf += qed_read_param(dump_buf,
7113 ¶m_name, ¶m_str_val, ¶m_num_val);
7114 if (strcmp(param_name, "size"))
7115 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7116 if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7117 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7118 num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7119 elements = (struct igu_fifo_element *)dump_buf;
7121 /* Decode elements */
7122 for (i = 0; i < num_elements; i++) {
7123 status = qed_parse_igu_fifo_element(&elements[i],
7126 if (status != DBG_STATUS_OK)
7130 results_offset += sprintf(qed_get_buf_ptr(results_buf,
7132 "fifo contained %d elements", num_elements);
7134 /* Add 1 for string NULL termination */
7135 *parsed_results_bytes = results_offset + 1;
7137 return DBG_STATUS_OK;
7140 static enum dbg_status
7141 qed_parse_protection_override_dump(u32 *dump_buf,
7143 u32 *parsed_results_bytes)
7145 const char *section_name, *param_name, *param_str_val;
7146 u32 param_num_val, num_section_params, num_elements;
7147 struct protection_override_element *elements;
7148 u32 results_offset = 0;
7151 /* Read global_params section */
7152 dump_buf += qed_read_section_hdr(dump_buf,
7153 §ion_name, &num_section_params);
7154 if (strcmp(section_name, "global_params"))
7155 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7157 /* Print global params */
7158 dump_buf += qed_print_section_params(dump_buf,
7160 results_buf, &results_offset);
7162 /* Read protection_override_data section */
7163 dump_buf += qed_read_section_hdr(dump_buf,
7164 §ion_name, &num_section_params);
7165 if (strcmp(section_name, "protection_override_data"))
7166 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7167 dump_buf += qed_read_param(dump_buf,
7168 ¶m_name, ¶m_str_val, ¶m_num_val);
7169 if (strcmp(param_name, "size"))
7170 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7171 if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7172 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7173 num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7174 elements = (struct protection_override_element *)dump_buf;
7176 /* Decode elements */
7177 for (i = 0; i < num_elements; i++) {
7178 u32 address = GET_FIELD(elements[i].data,
7179 PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7180 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7183 sprintf(qed_get_buf_ptr(results_buf,
7185 "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7187 (u32)GET_FIELD(elements[i].data,
7188 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7189 (u32)GET_FIELD(elements[i].data,
7190 PROTECTION_OVERRIDE_ELEMENT_READ),
7191 (u32)GET_FIELD(elements[i].data,
7192 PROTECTION_OVERRIDE_ELEMENT_WRITE),
7193 s_protection_strs[GET_FIELD(elements[i].data,
7194 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7195 s_protection_strs[GET_FIELD(elements[i].data,
7196 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7199 results_offset += sprintf(qed_get_buf_ptr(results_buf,
7201 "protection override contained %d elements",
7204 /* Add 1 for string NULL termination */
7205 *parsed_results_bytes = results_offset + 1;
7207 return DBG_STATUS_OK;
7210 /* Parses a FW Asserts dump buffer.
7211 * If result_buf is not NULL, the FW Asserts results are printed to it.
7212 * In any case, the required results buffer size is assigned to
7213 * parsed_results_bytes.
7214 * The parsing status is returned.
7216 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7218 u32 *parsed_results_bytes)
7220 u32 num_section_params, param_num_val, i, results_offset = 0;
7221 const char *param_name, *param_str_val, *section_name;
7222 bool last_section_found = false;
7224 *parsed_results_bytes = 0;
7226 /* Read global_params section */
7227 dump_buf += qed_read_section_hdr(dump_buf,
7228 §ion_name, &num_section_params);
7229 if (strcmp(section_name, "global_params"))
7230 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7232 /* Print global params */
7233 dump_buf += qed_print_section_params(dump_buf,
7235 results_buf, &results_offset);
7237 while (!last_section_found) {
7238 dump_buf += qed_read_section_hdr(dump_buf,
7240 &num_section_params);
7241 if (!strcmp(section_name, "fw_asserts")) {
7242 /* Extract params */
7243 const char *storm_letter = NULL;
7244 u32 storm_dump_size = 0;
7246 for (i = 0; i < num_section_params; i++) {
7247 dump_buf += qed_read_param(dump_buf,
7251 if (!strcmp(param_name, "storm"))
7252 storm_letter = param_str_val;
7253 else if (!strcmp(param_name, "size"))
7254 storm_dump_size = param_num_val;
7257 DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7260 if (!storm_letter || !storm_dump_size)
7261 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7265 sprintf(qed_get_buf_ptr(results_buf,
7267 "\n%sSTORM_ASSERT: size=%d\n",
7268 storm_letter, storm_dump_size);
7269 for (i = 0; i < storm_dump_size; i++, dump_buf++)
7271 sprintf(qed_get_buf_ptr(results_buf,
7273 "%08x\n", *dump_buf);
7274 } else if (!strcmp(section_name, "last")) {
7275 last_section_found = true;
7277 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7281 /* Add 1 for string NULL termination */
7282 *parsed_results_bytes = results_offset + 1;
7284 return DBG_STATUS_OK;
7287 /***************************** Public Functions *******************************/
7289 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7291 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7294 /* Convert binary data to debug arrays */
7295 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7296 s_user_dbg_arrays[buf_id].ptr =
7297 (u32 *)(bin_ptr + buf_array[buf_id].offset);
7298 s_user_dbg_arrays[buf_id].size_in_dwords =
7299 BYTES_TO_DWORDS(buf_array[buf_id].length);
7302 return DBG_STATUS_OK;
7305 const char *qed_dbg_get_status_str(enum dbg_status status)
7308 MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7311 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7313 u32 num_dumped_dwords,
7314 u32 *results_buf_size)
7316 u32 num_errors, num_warnings;
7318 return qed_parse_idle_chk_dump(dump_buf,
7322 &num_errors, &num_warnings);
7325 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7327 u32 num_dumped_dwords,
7332 u32 parsed_buf_size;
7334 return qed_parse_idle_chk_dump(dump_buf,
7338 num_errors, num_warnings);
7341 void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
7343 s_mcp_trace_meta_arr.ptr = data;
7344 s_mcp_trace_meta_arr.size_in_dwords = size;
7347 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7349 u32 num_dumped_dwords,
7350 u32 *results_buf_size)
7352 return qed_parse_mcp_trace_dump(p_hwfn,
7353 dump_buf, NULL, results_buf_size);
7356 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7358 u32 num_dumped_dwords,
7361 u32 parsed_buf_size;
7363 return qed_parse_mcp_trace_dump(p_hwfn,
7365 results_buf, &parsed_buf_size);
7368 enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf,
7369 u32 num_dumped_bytes,
7374 return qed_parse_mcp_trace_buf(dump_buf,
7378 results_buf, &parsed_bytes);
7381 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7383 u32 num_dumped_dwords,
7384 u32 *results_buf_size)
7386 return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7389 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7391 u32 num_dumped_dwords,
7394 u32 parsed_buf_size;
7396 return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7399 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7401 u32 num_dumped_dwords,
7402 u32 *results_buf_size)
7404 return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7407 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7409 u32 num_dumped_dwords,
7412 u32 parsed_buf_size;
7414 return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7418 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7420 u32 num_dumped_dwords,
7421 u32 *results_buf_size)
7423 return qed_parse_protection_override_dump(dump_buf,
7424 NULL, results_buf_size);
7427 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7429 u32 num_dumped_dwords,
7432 u32 parsed_buf_size;
7434 return qed_parse_protection_override_dump(dump_buf,
7439 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7441 u32 num_dumped_dwords,
7442 u32 *results_buf_size)
7444 return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7447 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7449 u32 num_dumped_dwords,
7452 u32 parsed_buf_size;
7454 return qed_parse_fw_asserts_dump(dump_buf,
7455 results_buf, &parsed_buf_size);
7458 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7459 struct dbg_attn_block_result *results)
7461 struct user_dbg_array *block_attn, *pstrings;
7462 const u32 *block_attn_name_offsets;
7463 enum dbg_attn_type attn_type;
7464 const char *block_name;
7467 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7468 attn_type = (enum dbg_attn_type)
7469 GET_FIELD(results->data,
7470 DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7471 block_name = s_block_info_arr[results->block_id].name;
7473 if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7474 !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7475 !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7476 return DBG_STATUS_DBG_ARRAY_NOT_SET;
7478 block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7479 block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7481 /* Go over registers with a non-zero attention status */
7482 for (i = 0; i < num_regs; i++) {
7483 struct dbg_attn_bit_mapping *bit_mapping;
7484 struct dbg_attn_reg_result *reg_result;
7485 u8 num_reg_attn, bit_idx = 0;
7487 reg_result = &results->reg_results[i];
7488 num_reg_attn = GET_FIELD(reg_result->data,
7489 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7490 block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7491 bit_mapping = &((struct dbg_attn_bit_mapping *)
7492 block_attn->ptr)[reg_result->block_attn_offset];
7494 pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7496 /* Go over attention status bits */
7497 for (j = 0; j < num_reg_attn; j++) {
7498 u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7499 DBG_ATTN_BIT_MAPPING_VAL);
7500 const char *attn_name, *attn_type_str, *masked_str;
7501 u32 attn_name_offset, sts_addr;
7503 /* Check if bit mask should be advanced (due to unused
7506 if (GET_FIELD(bit_mapping[j].data,
7507 DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7508 bit_idx += (u8)attn_idx_val;
7512 /* Check current bit index */
7513 if (!(reg_result->sts_val & BIT(bit_idx))) {
7518 /* Find attention name */
7520 block_attn_name_offsets[attn_idx_val];
7521 attn_name = &((const char *)
7522 pstrings->ptr)[attn_name_offset];
7523 attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7524 "Interrupt" : "Parity";
7525 masked_str = reg_result->mask_val & BIT(bit_idx) ?
7527 sts_addr = GET_FIELD(reg_result->data,
7528 DBG_ATTN_REG_RESULT_STS_ADDRESS);
7530 "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7531 block_name, attn_type_str, attn_name,
7532 sts_addr, bit_idx, masked_str);
7538 return DBG_STATUS_OK;
7541 /* Wrapper for unifying the idle_chk and mcp_trace api */
7542 static enum dbg_status
7543 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7545 u32 num_dumped_dwords,
7548 u32 num_errors, num_warnnings;
7550 return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7551 results_buf, &num_errors,
7555 /* Feature meta data lookup table */
7558 enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7559 struct qed_ptt *p_ptt, u32 *size);
7560 enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7561 struct qed_ptt *p_ptt, u32 *dump_buf,
7562 u32 buf_size, u32 *dumped_dwords);
7563 enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7564 u32 *dump_buf, u32 num_dumped_dwords,
7566 enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7568 u32 num_dumped_dwords,
7569 u32 *results_buf_size);
7570 } qed_features_lookup[] = {
7572 "grc", qed_dbg_grc_get_dump_buf_size,
7573 qed_dbg_grc_dump, NULL, NULL}, {
7575 qed_dbg_idle_chk_get_dump_buf_size,
7576 qed_dbg_idle_chk_dump,
7577 qed_print_idle_chk_results_wrapper,
7578 qed_get_idle_chk_results_buf_size}, {
7580 qed_dbg_mcp_trace_get_dump_buf_size,
7581 qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7582 qed_get_mcp_trace_results_buf_size}, {
7584 qed_dbg_reg_fifo_get_dump_buf_size,
7585 qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7586 qed_get_reg_fifo_results_buf_size}, {
7588 qed_dbg_igu_fifo_get_dump_buf_size,
7589 qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7590 qed_get_igu_fifo_results_buf_size}, {
7591 "protection_override",
7592 qed_dbg_protection_override_get_dump_buf_size,
7593 qed_dbg_protection_override_dump,
7594 qed_print_protection_override_results,
7595 qed_get_protection_override_results_buf_size}, {
7597 qed_dbg_fw_asserts_get_dump_buf_size,
7598 qed_dbg_fw_asserts_dump,
7599 qed_print_fw_asserts_results,
7600 qed_get_fw_asserts_results_buf_size},};
7602 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7604 u32 i, precision = 80;
7609 pr_notice("\n%.*s", precision, p_text_buf);
7610 for (i = precision; i < text_size; i += precision)
7611 pr_cont("%.*s", precision, p_text_buf + i);
7615 #define QED_RESULTS_BUF_MIN_SIZE 16
7616 /* Generic function for decoding debug feature info */
7617 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7618 enum qed_dbg_features feature_idx)
7620 struct qed_dbg_feature *feature =
7621 &p_hwfn->cdev->dbg_params.features[feature_idx];
7622 u32 text_size_bytes, null_char_pos, i;
7626 /* Check if feature supports formatting capability */
7627 if (!qed_features_lookup[feature_idx].results_buf_size)
7628 return DBG_STATUS_OK;
7630 /* Obtain size of formatted output */
7631 rc = qed_features_lookup[feature_idx].
7632 results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7633 feature->dumped_dwords, &text_size_bytes);
7634 if (rc != DBG_STATUS_OK)
7637 /* Make sure that the allocated size is a multiple of dword (4 bytes) */
7638 null_char_pos = text_size_bytes - 1;
7639 text_size_bytes = (text_size_bytes + 3) & ~0x3;
7641 if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7642 DP_NOTICE(p_hwfn->cdev,
7643 "formatted size of feature was too small %d. Aborting\n",
7645 return DBG_STATUS_INVALID_ARGS;
7648 /* Allocate temp text buf */
7649 text_buf = vzalloc(text_size_bytes);
7651 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7653 /* Decode feature opcodes to string on temp buf */
7654 rc = qed_features_lookup[feature_idx].
7655 print_results(p_hwfn, (u32 *)feature->dump_buf,
7656 feature->dumped_dwords, text_buf);
7657 if (rc != DBG_STATUS_OK) {
7662 /* Replace the original null character with a '\n' character.
7663 * The bytes that were added as a result of the dword alignment are also
7664 * padded with '\n' characters.
7666 for (i = null_char_pos; i < text_size_bytes; i++)
7669 /* Dump printable feature to log */
7670 if (p_hwfn->cdev->dbg_params.print_data)
7671 qed_dbg_print_feature(text_buf, text_size_bytes);
7673 /* Free the old dump_buf and point the dump_buf to the newly allocagted
7674 * and formatted text buffer.
7676 vfree(feature->dump_buf);
7677 feature->dump_buf = text_buf;
7678 feature->buf_size = text_size_bytes;
7679 feature->dumped_dwords = text_size_bytes / 4;
7683 /* Generic function for performing the dump of a debug feature. */
7684 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7685 struct qed_ptt *p_ptt,
7686 enum qed_dbg_features feature_idx)
7688 struct qed_dbg_feature *feature =
7689 &p_hwfn->cdev->dbg_params.features[feature_idx];
7690 u32 buf_size_dwords;
7693 DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7694 qed_features_lookup[feature_idx].name);
7696 /* Dump_buf was already allocated need to free (this can happen if dump
7697 * was called but file was never read).
7698 * We can't use the buffer as is since size may have changed.
7700 if (feature->dump_buf) {
7701 vfree(feature->dump_buf);
7702 feature->dump_buf = NULL;
7705 /* Get buffer size from hsi, allocate accordingly, and perform the
7708 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7710 if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7712 feature->buf_size = buf_size_dwords * sizeof(u32);
7713 feature->dump_buf = vmalloc(feature->buf_size);
7714 if (!feature->dump_buf)
7715 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7717 rc = qed_features_lookup[feature_idx].
7718 perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7719 feature->buf_size / sizeof(u32),
7720 &feature->dumped_dwords);
7722 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7723 * In this case the buffer holds valid binary data, but we wont able
7724 * to parse it (since parsing relies on data in NVRAM which is only
7725 * accessible when MFW is responsive). skip the formatting but return
7726 * success so that binary data is provided.
7728 if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7729 return DBG_STATUS_OK;
7731 if (rc != DBG_STATUS_OK)
7735 rc = format_feature(p_hwfn, feature_idx);
7739 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7741 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7744 int qed_dbg_grc_size(struct qed_dev *cdev)
7746 return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7749 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7751 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7755 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7757 return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7760 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7762 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7766 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7768 return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7771 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7773 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7777 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7779 return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7782 int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7783 enum qed_nvm_images image_id, u32 *length)
7785 struct qed_nvm_image_att image_att;
7789 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7793 *length = image_att.length;
7798 int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7799 u32 *num_dumped_bytes, enum qed_nvm_images image_id)
7801 struct qed_hwfn *p_hwfn =
7802 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
7807 *num_dumped_bytes = 0;
7808 rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7812 DP_NOTICE(p_hwfn->cdev,
7813 "Collecting a debug feature [\"nvram image %d\"]\n",
7816 len_rounded = roundup(len_rounded, sizeof(u32));
7817 rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7821 /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7822 if (image_id != QED_NVM_IMAGE_NVM_META)
7823 for (i = 0; i < len_rounded; i += 4) {
7824 val = cpu_to_be32(*(u32 *)(buffer + i));
7825 *(u32 *)(buffer + i) = val;
7828 *num_dumped_bytes = len_rounded;
7833 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7834 u32 *num_dumped_bytes)
7836 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7840 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7842 return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7845 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7846 u32 *num_dumped_bytes)
7848 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7852 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7854 return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7857 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7858 u32 *num_dumped_bytes)
7860 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7864 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7866 return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7869 /* Defines the amount of bytes allocated for recording the length of debugfs
7872 #define REGDUMP_HEADER_SIZE sizeof(u32)
7873 #define REGDUMP_HEADER_FEATURE_SHIFT 24
7874 #define REGDUMP_HEADER_ENGINE_SHIFT 31
7875 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
7876 enum debug_print_features {
7882 PROTECTION_OVERRIDE = 5,
7891 static u32 qed_calc_regdump_header(enum debug_print_features feature,
7892 int engine, u32 feature_size, u8 omit_engine)
7894 /* Insert the engine, feature and mode inside the header and combine it
7895 * with feature size.
7897 return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
7898 (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
7899 (engine << REGDUMP_HEADER_ENGINE_SHIFT);
7902 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7904 u8 cur_engine, omit_engine = 0, org_engine;
7905 u32 offset = 0, feature_size;
7908 if (cdev->num_hwfns == 1)
7911 org_engine = qed_get_debug_engine(cdev);
7912 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7913 /* Collect idle_chks and grcDump for each hw function */
7914 DP_VERBOSE(cdev, QED_MSG_DEBUG,
7915 "obtaining idle_chk and grcdump for current engine\n");
7916 qed_set_debug_engine(cdev, cur_engine);
7918 /* First idle_chk */
7919 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7920 REGDUMP_HEADER_SIZE, &feature_size);
7922 *(u32 *)((u8 *)buffer + offset) =
7923 qed_calc_regdump_header(IDLE_CHK, cur_engine,
7924 feature_size, omit_engine);
7925 offset += (feature_size + REGDUMP_HEADER_SIZE);
7927 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7930 /* Second idle_chk */
7931 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7932 REGDUMP_HEADER_SIZE, &feature_size);
7934 *(u32 *)((u8 *)buffer + offset) =
7935 qed_calc_regdump_header(IDLE_CHK, cur_engine,
7936 feature_size, omit_engine);
7937 offset += (feature_size + REGDUMP_HEADER_SIZE);
7939 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7943 rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7944 REGDUMP_HEADER_SIZE, &feature_size);
7946 *(u32 *)((u8 *)buffer + offset) =
7947 qed_calc_regdump_header(REG_FIFO, cur_engine,
7948 feature_size, omit_engine);
7949 offset += (feature_size + REGDUMP_HEADER_SIZE);
7951 DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7955 rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7956 REGDUMP_HEADER_SIZE, &feature_size);
7958 *(u32 *)((u8 *)buffer + offset) =
7959 qed_calc_regdump_header(IGU_FIFO, cur_engine,
7960 feature_size, omit_engine);
7961 offset += (feature_size + REGDUMP_HEADER_SIZE);
7963 DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7966 /* protection_override dump */
7967 rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7968 REGDUMP_HEADER_SIZE,
7971 *(u32 *)((u8 *)buffer + offset) =
7972 qed_calc_regdump_header(PROTECTION_OVERRIDE,
7974 feature_size, omit_engine);
7975 offset += (feature_size + REGDUMP_HEADER_SIZE);
7978 "qed_dbg_protection_override failed. rc = %d\n",
7982 /* fw_asserts dump */
7983 rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7984 REGDUMP_HEADER_SIZE, &feature_size);
7986 *(u32 *)((u8 *)buffer + offset) =
7987 qed_calc_regdump_header(FW_ASSERTS, cur_engine,
7988 feature_size, omit_engine);
7989 offset += (feature_size + REGDUMP_HEADER_SIZE);
7991 DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7995 /* GRC dump - must be last because when mcp stuck it will
7996 * clutter idle_chk, reg_fifo, ...
7998 rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7999 REGDUMP_HEADER_SIZE, &feature_size);
8001 *(u32 *)((u8 *)buffer + offset) =
8002 qed_calc_regdump_header(GRC_DUMP, cur_engine,
8003 feature_size, omit_engine);
8004 offset += (feature_size + REGDUMP_HEADER_SIZE);
8006 DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8010 qed_set_debug_engine(cdev, org_engine);
8012 rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8013 REGDUMP_HEADER_SIZE, &feature_size);
8015 *(u32 *)((u8 *)buffer + offset) =
8016 qed_calc_regdump_header(MCP_TRACE, cur_engine,
8017 feature_size, omit_engine);
8018 offset += (feature_size + REGDUMP_HEADER_SIZE);
8020 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8024 rc = qed_dbg_nvm_image(cdev,
8025 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8026 &feature_size, QED_NVM_IMAGE_NVM_CFG1);
8028 *(u32 *)((u8 *)buffer + offset) =
8029 qed_calc_regdump_header(NVM_CFG1, cur_engine,
8030 feature_size, omit_engine);
8031 offset += (feature_size + REGDUMP_HEADER_SIZE);
8032 } else if (rc != -ENOENT) {
8034 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8035 QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
8039 rc = qed_dbg_nvm_image(cdev,
8040 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8041 &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
8043 *(u32 *)((u8 *)buffer + offset) =
8044 qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
8045 feature_size, omit_engine);
8046 offset += (feature_size + REGDUMP_HEADER_SIZE);
8047 } else if (rc != -ENOENT) {
8049 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8050 QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
8055 rc = qed_dbg_nvm_image(cdev,
8056 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8057 &feature_size, QED_NVM_IMAGE_NVM_META);
8059 *(u32 *)((u8 *)buffer + offset) =
8060 qed_calc_regdump_header(NVM_META, cur_engine,
8061 feature_size, omit_engine);
8062 offset += (feature_size + REGDUMP_HEADER_SIZE);
8063 } else if (rc != -ENOENT) {
8065 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8066 QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
8072 int qed_dbg_all_data_size(struct qed_dev *cdev)
8074 struct qed_hwfn *p_hwfn =
8075 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8076 u32 regs_len = 0, image_len = 0;
8077 u8 cur_engine, org_engine;
8079 org_engine = qed_get_debug_engine(cdev);
8080 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8081 /* Engine specific */
8082 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8083 "calculating idle_chk and grcdump register length for current engine\n");
8084 qed_set_debug_engine(cdev, cur_engine);
8085 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8086 REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8087 REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8088 REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8089 REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8090 REGDUMP_HEADER_SIZE +
8091 qed_dbg_protection_override_size(cdev) +
8092 REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8095 qed_set_debug_engine(cdev, org_engine);
8098 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8099 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8101 regs_len += REGDUMP_HEADER_SIZE + image_len;
8102 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8104 regs_len += REGDUMP_HEADER_SIZE + image_len;
8105 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8107 regs_len += REGDUMP_HEADER_SIZE + image_len;
8112 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8113 enum qed_dbg_features feature, u32 *num_dumped_bytes)
8115 struct qed_hwfn *p_hwfn =
8116 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8117 struct qed_dbg_feature *qed_feature =
8118 &cdev->dbg_params.features[feature];
8119 enum dbg_status dbg_rc;
8120 struct qed_ptt *p_ptt;
8124 p_ptt = qed_ptt_acquire(p_hwfn);
8129 dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8130 if (dbg_rc != DBG_STATUS_OK) {
8131 DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8132 qed_dbg_get_status_str(dbg_rc));
8133 *num_dumped_bytes = 0;
8138 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8139 "copying debugfs feature to external buffer\n");
8140 memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8141 *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
8145 qed_ptt_release(p_hwfn, p_ptt);
8149 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8151 struct qed_hwfn *p_hwfn =
8152 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8153 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8154 struct qed_dbg_feature *qed_feature =
8155 &cdev->dbg_params.features[feature];
8156 u32 buf_size_dwords;
8162 rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8164 if (rc != DBG_STATUS_OK)
8165 buf_size_dwords = 0;
8167 qed_ptt_release(p_hwfn, p_ptt);
8168 qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8169 return qed_feature->buf_size;
8172 u8 qed_get_debug_engine(struct qed_dev *cdev)
8174 return cdev->dbg_params.engine_for_debug;
8177 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8179 DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8181 cdev->dbg_params.engine_for_debug = engine_number;
8184 void qed_dbg_pf_init(struct qed_dev *cdev)
8186 const u8 *dbg_values;
8188 /* Debug values are after init values.
8189 * The offset is the first dword of the file.
8191 dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8192 qed_dbg_set_bin_ptr((u8 *)dbg_values);
8193 qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
8196 void qed_dbg_pf_exit(struct qed_dev *cdev)
8198 struct qed_dbg_feature *feature = NULL;
8199 enum qed_dbg_features feature_idx;
8201 /* Debug features' buffers may be allocated if debug feature was used
8202 * but dump wasn't called.
8204 for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8205 feature = &cdev->dbg_params.features[feature_idx];
8206 if (feature->dump_buf) {
8207 vfree(feature->dump_buf);
8208 feature->dump_buf = NULL;