1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
4 #include <linux/device.h>
6 #include "hclge_debugfs.h"
7 #include "hclge_main.h"
11 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
12 { .reg_type = "bios common",
13 .dfx_msg = &hclge_dbg_bios_common_reg[0],
14 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
15 .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
16 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
18 .dfx_msg = &hclge_dbg_ssu_reg_0[0],
19 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
20 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
21 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
23 .dfx_msg = &hclge_dbg_ssu_reg_1[0],
24 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
25 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
26 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
28 .dfx_msg = &hclge_dbg_ssu_reg_2[0],
29 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
30 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
31 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
32 { .reg_type = "igu egu",
33 .dfx_msg = &hclge_dbg_igu_egu_reg[0],
34 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
35 .offset = HCLGE_DBG_DFX_IGU_OFFSET,
36 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
38 .dfx_msg = &hclge_dbg_rpu_reg_0[0],
39 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
40 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
41 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
43 .dfx_msg = &hclge_dbg_rpu_reg_1[0],
44 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
45 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
46 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
48 .dfx_msg = &hclge_dbg_ncsi_reg[0],
49 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
50 .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
51 .cmd = HCLGE_OPC_DFX_NCSI_REG } },
53 .dfx_msg = &hclge_dbg_rtc_reg[0],
54 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
55 .offset = HCLGE_DBG_DFX_RTC_OFFSET,
56 .cmd = HCLGE_OPC_DFX_RTC_REG } },
58 .dfx_msg = &hclge_dbg_ppp_reg[0],
59 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
60 .offset = HCLGE_DBG_DFX_PPP_OFFSET,
61 .cmd = HCLGE_OPC_DFX_PPP_REG } },
63 .dfx_msg = &hclge_dbg_rcb_reg[0],
64 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
65 .offset = HCLGE_DBG_DFX_RCB_OFFSET,
66 .cmd = HCLGE_OPC_DFX_RCB_REG } },
68 .dfx_msg = &hclge_dbg_tqp_reg[0],
69 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
70 .offset = HCLGE_DBG_DFX_TQP_OFFSET,
71 .cmd = HCLGE_OPC_DFX_TQP_REG } },
74 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
76 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
81 ret = hclge_query_bd_num_cmd_send(hdev, desc);
83 dev_err(&hdev->pdev->dev,
84 "get dfx bdnum fail, ret = %d\n", ret);
88 entries_per_desc = ARRAY_SIZE(desc[0].data);
89 index = offset % entries_per_desc;
90 return le32_to_cpu(desc[offset / entries_per_desc].data[index]);
93 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
94 struct hclge_desc *desc_src,
95 int index, int bd_num,
96 enum hclge_opcode_type cmd)
98 struct hclge_desc *desc = desc_src;
101 hclge_cmd_setup_basic_desc(desc, cmd, true);
102 desc->data[0] = cpu_to_le32(index);
104 for (i = 1; i < bd_num; i++) {
105 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
107 hclge_cmd_setup_basic_desc(desc, cmd, true);
110 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
112 dev_err(&hdev->pdev->dev,
113 "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
117 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
118 const struct hclge_dbg_reg_type_info *reg_info,
123 const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
124 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
125 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
126 struct hclge_desc *desc_src;
127 struct hclge_desc *desc;
128 int entries_per_desc;
135 ret = kstrtouint(s, 0, &index);
136 index = (ret != 0) ? 0 : index;
139 bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
141 dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
142 reg_msg->offset, bd_num);
146 buf_len = sizeof(struct hclge_desc) * bd_num;
147 desc_src = kzalloc(buf_len, GFP_KERNEL);
152 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
158 entries_per_desc = ARRAY_SIZE(desc->data);
159 min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
162 for (i = 0; i < min_num; i++) {
163 if (i > 0 && (i % entries_per_desc) == 0)
165 if (dfx_message->flag)
166 dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
167 dfx_message->message,
168 le32_to_cpu(desc->data[i % entries_per_desc]));
176 static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev)
178 struct hclge_config_mac_mode_cmd *req;
179 struct hclge_desc desc;
183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
185 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
187 dev_err(&hdev->pdev->dev,
188 "failed to dump mac enable status, ret = %d\n", ret);
192 req = (struct hclge_config_mac_mode_cmd *)desc.data;
193 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
195 dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n",
196 hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
197 dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n",
198 hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
199 dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n",
200 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
201 dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n",
202 hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
203 dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n",
204 hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
205 dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n",
206 hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
207 dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n",
208 hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
209 dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n",
210 hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
211 dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n",
212 hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
213 dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n",
214 hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
215 dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n",
216 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
217 dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n",
218 hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
219 dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n",
220 hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
221 dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n",
222 hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
225 static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev)
227 struct hclge_config_max_frm_size_cmd *req;
228 struct hclge_desc desc;
231 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
233 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
235 dev_err(&hdev->pdev->dev,
236 "failed to dump mac frame size, ret = %d\n", ret);
240 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
242 dev_info(&hdev->pdev->dev, "max_frame_size: %u\n",
243 le16_to_cpu(req->max_frm_size));
244 dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size);
247 static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev)
249 #define HCLGE_MAC_SPEED_SHIFT 0
250 #define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
251 #define HCLGE_MAC_DUPLEX_SHIFT 7
253 struct hclge_config_mac_speed_dup_cmd *req;
254 struct hclge_desc desc;
257 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
259 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
261 dev_err(&hdev->pdev->dev,
262 "failed to dump mac speed duplex, ret = %d\n", ret);
266 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
268 dev_info(&hdev->pdev->dev, "speed: %#lx\n",
269 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
270 HCLGE_MAC_SPEED_SHIFT));
271 dev_info(&hdev->pdev->dev, "duplex: %#x\n",
272 hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
275 static void hclge_dbg_dump_mac(struct hclge_dev *hdev)
277 hclge_dbg_dump_mac_enable_status(hdev);
279 hclge_dbg_dump_mac_frame_size(hdev);
281 hclge_dbg_dump_mac_speed_duplex(hdev);
284 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
286 struct device *dev = &hdev->pdev->dev;
287 struct hclge_dbg_bitmap_cmd *bitmap;
288 enum hclge_opcode_type cmd;
289 int rq_id, pri_id, qset_id;
290 int port_id, nq_id, pg_id;
291 struct hclge_desc desc[2];
295 cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
296 &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
298 dev_err(&hdev->pdev->dev,
299 "dump dcb: bad command parameter, cnt=%d\n", cnt);
303 cmd = HCLGE_OPC_QSET_DFX_STS;
304 ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd);
306 goto err_dcb_cmd_send;
308 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
309 dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
310 dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
311 dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
312 dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
314 cmd = HCLGE_OPC_PRI_DFX_STS;
315 ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd);
317 goto err_dcb_cmd_send;
319 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
320 dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
321 dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
322 dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
324 cmd = HCLGE_OPC_PG_DFX_STS;
325 ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd);
327 goto err_dcb_cmd_send;
329 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
330 dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
331 dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
332 dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
334 cmd = HCLGE_OPC_PORT_DFX_STS;
335 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
337 goto err_dcb_cmd_send;
339 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
340 dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
341 dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
343 cmd = HCLGE_OPC_SCH_NQ_CNT;
344 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
346 goto err_dcb_cmd_send;
348 dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
350 cmd = HCLGE_OPC_SCH_RQ_CNT;
351 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
353 goto err_dcb_cmd_send;
355 dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
357 cmd = HCLGE_OPC_TM_INTERNAL_STS;
358 ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd);
360 goto err_dcb_cmd_send;
362 dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
363 dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
364 dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
365 le32_to_cpu(desc[0].data[3]));
366 dev_info(dev, "tx_private_waterline: 0x%x\n",
367 le32_to_cpu(desc[0].data[4]));
368 dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
369 dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
370 dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
372 cmd = HCLGE_OPC_TM_INTERNAL_CNT;
373 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
375 goto err_dcb_cmd_send;
377 dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
378 dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
380 cmd = HCLGE_OPC_TM_INTERNAL_STS_1;
381 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
383 goto err_dcb_cmd_send;
385 dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
386 dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
387 dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
388 dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
389 le32_to_cpu(desc[0].data[4]));
390 dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
391 le32_to_cpu(desc[0].data[5]));
395 dev_err(&hdev->pdev->dev,
396 "failed to dump dcb dfx, cmd = %#x, ret = %d\n",
400 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
402 const struct hclge_dbg_reg_type_info *reg_info;
403 bool has_dump = false;
406 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
407 reg_info = &hclge_dbg_reg_info[i];
408 if (!strncmp(cmd_buf, reg_info->reg_type,
409 strlen(reg_info->reg_type))) {
410 hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
415 if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) {
416 hclge_dbg_dump_mac(hdev);
420 if (strncmp(cmd_buf, "dcb", 3) == 0) {
421 hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
426 dev_info(&hdev->pdev->dev, "unknown command\n");
431 static void hclge_print_tc_info(struct hclge_dev *hdev, bool flag, int index)
434 dev_info(&hdev->pdev->dev, "tc(%d): no sp mode weight: %u\n",
435 index, hdev->tm_info.pg_info[0].tc_dwrr[index]);
437 dev_info(&hdev->pdev->dev, "tc(%d): sp mode\n", index);
440 static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
442 struct hclge_ets_tc_weight_cmd *ets_weight;
443 struct hclge_desc desc;
446 if (!hnae3_dev_dcb_supported(hdev)) {
447 dev_info(&hdev->pdev->dev,
448 "Only DCB-supported dev supports tc\n");
452 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
454 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
456 dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
460 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
462 dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n",
463 hdev->tm_info.num_tc);
464 dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
465 ets_weight->weight_offset);
467 for (i = 0; i < HNAE3_MAX_TC; i++)
468 hclge_print_tc_info(hdev, ets_weight->tc_weight[i], i);
471 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
473 struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
474 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
475 struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
476 enum hclge_opcode_type cmd;
477 struct hclge_desc desc;
480 cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
481 hclge_cmd_setup_basic_desc(&desc, cmd, true);
482 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
484 goto err_tm_pg_cmd_send;
486 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
487 dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
488 dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
489 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
491 cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
492 hclge_cmd_setup_basic_desc(&desc, cmd, true);
493 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
495 goto err_tm_pg_cmd_send;
497 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
498 dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
499 dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
500 le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
501 dev_info(&hdev->pdev->dev, "PG_P flag: %#x\n", pg_shap_cfg_cmd->flag);
502 dev_info(&hdev->pdev->dev, "PG_P pg_rate: %u(Mbps)\n",
503 le32_to_cpu(pg_shap_cfg_cmd->pg_rate));
505 cmd = HCLGE_OPC_TM_PORT_SHAPPING;
506 hclge_cmd_setup_basic_desc(&desc, cmd, true);
507 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509 goto err_tm_pg_cmd_send;
511 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
512 dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
513 le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
514 dev_info(&hdev->pdev->dev, "PORT flag: %#x\n", port_shap_cfg_cmd->flag);
515 dev_info(&hdev->pdev->dev, "PORT port_rate: %u(Mbps)\n",
516 le32_to_cpu(port_shap_cfg_cmd->port_rate));
518 cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
519 hclge_cmd_setup_basic_desc(&desc, cmd, true);
520 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
522 goto err_tm_pg_cmd_send;
524 dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
525 le32_to_cpu(desc.data[0]));
527 cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
528 hclge_cmd_setup_basic_desc(&desc, cmd, true);
529 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
531 goto err_tm_pg_cmd_send;
533 dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
534 le32_to_cpu(desc.data[0]));
536 cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
537 hclge_cmd_setup_basic_desc(&desc, cmd, true);
538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
540 goto err_tm_pg_cmd_send;
542 dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
543 le32_to_cpu(desc.data[0]));
545 if (!hnae3_dev_dcb_supported(hdev)) {
546 dev_info(&hdev->pdev->dev,
547 "Only DCB-supported dev supports tm mapping\n");
551 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
552 hclge_cmd_setup_basic_desc(&desc, cmd, true);
553 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
555 goto err_tm_pg_cmd_send;
557 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
558 dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
559 bp_to_qs_map_cmd->tc_id);
560 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
561 bp_to_qs_map_cmd->qs_group_id);
562 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
563 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
567 dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
571 static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
573 struct hclge_priority_weight_cmd *priority_weight;
574 struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
575 struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
576 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
577 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
578 struct hclge_pg_weight_cmd *pg_weight;
579 struct hclge_qs_weight_cmd *qs_weight;
580 enum hclge_opcode_type cmd;
581 struct hclge_desc desc;
584 cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
585 hclge_cmd_setup_basic_desc(&desc, cmd, true);
586 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
588 goto err_tm_cmd_send;
590 pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
591 dev_info(&hdev->pdev->dev, "dump tm\n");
592 dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
593 pg_to_pri_map->pg_id);
594 dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
595 pg_to_pri_map->pri_bit_map);
597 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
598 hclge_cmd_setup_basic_desc(&desc, cmd, true);
599 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
601 goto err_tm_cmd_send;
603 qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
604 dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
605 le16_to_cpu(qs_to_pri_map->qs_id));
606 dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
607 qs_to_pri_map->priority);
608 dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
609 qs_to_pri_map->link_vld);
611 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
612 hclge_cmd_setup_basic_desc(&desc, cmd, true);
613 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
615 goto err_tm_cmd_send;
617 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
618 dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
619 le16_to_cpu(nq_to_qs_map->nq_id));
620 dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
621 le16_to_cpu(nq_to_qs_map->qset_id));
623 cmd = HCLGE_OPC_TM_PG_WEIGHT;
624 hclge_cmd_setup_basic_desc(&desc, cmd, true);
625 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
627 goto err_tm_cmd_send;
629 pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
630 dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
631 dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
633 cmd = HCLGE_OPC_TM_QS_WEIGHT;
634 hclge_cmd_setup_basic_desc(&desc, cmd, true);
635 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
637 goto err_tm_cmd_send;
639 qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
640 dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
641 le16_to_cpu(qs_weight->qs_id));
642 dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
644 cmd = HCLGE_OPC_TM_PRI_WEIGHT;
645 hclge_cmd_setup_basic_desc(&desc, cmd, true);
646 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
648 goto err_tm_cmd_send;
650 priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
651 dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
652 dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
654 cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
655 hclge_cmd_setup_basic_desc(&desc, cmd, true);
656 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
658 goto err_tm_cmd_send;
660 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
661 dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
662 dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
663 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
664 dev_info(&hdev->pdev->dev, "PRI_C flag: %#x\n", shap_cfg_cmd->flag);
665 dev_info(&hdev->pdev->dev, "PRI_C pri_rate: %u(Mbps)\n",
666 le32_to_cpu(shap_cfg_cmd->pri_rate));
668 cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
669 hclge_cmd_setup_basic_desc(&desc, cmd, true);
670 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
672 goto err_tm_cmd_send;
674 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
675 dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
676 dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
677 le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
678 dev_info(&hdev->pdev->dev, "PRI_P flag: %#x\n", shap_cfg_cmd->flag);
679 dev_info(&hdev->pdev->dev, "PRI_P pri_rate: %u(Mbps)\n",
680 le32_to_cpu(shap_cfg_cmd->pri_rate));
682 hclge_dbg_dump_tm_pg(hdev);
687 dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
691 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
694 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
695 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
696 u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
697 struct hclge_qs_to_pri_link_cmd *map;
698 struct hclge_tqp_tx_queue_tc_cmd *tc;
699 u16 group_id, queue_id, qset_id;
700 enum hclge_opcode_type cmd;
701 u8 grp_num, pri_id, tc_id;
702 struct hclge_desc desc;
708 ret = kstrtou16(cmd_buf, 0, &queue_id);
709 queue_id = (ret != 0) ? 0 : queue_id;
711 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
712 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
713 hclge_cmd_setup_basic_desc(&desc, cmd, true);
714 nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
715 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
717 goto err_tm_map_cmd_send;
718 qset_id = le16_to_cpu(nq_to_qs_map->qset_id);
720 /* convert qset_id to the following format, drop the vld bit
721 * | qs_id_h | vld | qs_id_l |
722 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
725 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
727 qs_id_l = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_L_MSK,
729 qs_id_h = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
730 HCLGE_TM_QS_ID_H_EXT_S);
732 hnae3_set_field(qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
734 hnae3_set_field(qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
737 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
738 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
739 hclge_cmd_setup_basic_desc(&desc, cmd, true);
740 map->qs_id = cpu_to_le16(qset_id);
741 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
743 goto err_tm_map_cmd_send;
744 pri_id = map->priority;
746 cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
747 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
748 hclge_cmd_setup_basic_desc(&desc, cmd, true);
749 tc->queue_id = cpu_to_le16(queue_id);
750 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
752 goto err_tm_map_cmd_send;
753 tc_id = tc->tc_id & 0x7;
755 dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
756 dev_info(&hdev->pdev->dev, "%04u | %04u | %02u | %02u\n",
757 queue_id, qset_id, pri_id, tc_id);
759 if (!hnae3_dev_dcb_supported(hdev)) {
760 dev_info(&hdev->pdev->dev,
761 "Only DCB-supported dev supports tm mapping\n");
765 grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
766 HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
767 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
768 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
769 for (group_id = 0; group_id < grp_num; group_id++) {
770 hclge_cmd_setup_basic_desc(&desc, cmd, true);
771 bp_to_qs_map_cmd->tc_id = tc_id;
772 bp_to_qs_map_cmd->qs_group_id = group_id;
773 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
775 goto err_tm_map_cmd_send;
777 qset_mapping[group_id] =
778 le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
781 dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
784 for (group_id = 0; group_id < grp_num / 8; group_id++) {
785 dev_info(&hdev->pdev->dev,
786 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
787 group_id * 256, qset_mapping[(u32)(i + 7)],
788 qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)],
789 qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)],
790 qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)],
798 dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
802 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
804 struct hclge_tm_nodes_cmd *nodes;
805 struct hclge_desc desc;
809 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
810 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
812 dev_err(&hdev->pdev->dev,
813 "failed to dump tm nodes, ret = %d\n", ret);
817 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
819 pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
820 pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
821 nodes->pg_base_id, nodes->pg_num);
822 pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
823 nodes->pri_base_id, nodes->pri_num);
824 pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
825 le16_to_cpu(nodes->qset_base_id),
826 le16_to_cpu(nodes->qset_num));
827 pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
828 le16_to_cpu(nodes->queue_base_id),
829 le16_to_cpu(nodes->queue_num));
834 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
836 struct hclge_pri_shaper_para c_shaper_para;
837 struct hclge_pri_shaper_para p_shaper_para;
838 u8 pri_num, sch_mode, weight;
844 ret = hclge_tm_get_pri_num(hdev, &pri_num);
848 pos += scnprintf(buf + pos, len - pos,
849 "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
850 pos += scnprintf(buf + pos, len - pos,
851 "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U ");
852 pos += scnprintf(buf + pos, len - pos,
853 "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
855 for (i = 0; i < pri_num; i++) {
856 ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
860 ret = hclge_tm_get_pri_weight(hdev, i, &weight);
864 ret = hclge_tm_get_pri_shaper(hdev, i,
865 HCLGE_OPC_TM_PRI_C_SHAPPING,
870 ret = hclge_tm_get_pri_shaper(hdev, i,
871 HCLGE_OPC_TM_PRI_P_SHAPPING,
876 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
879 pos += scnprintf(buf + pos, len - pos,
880 "%04u %4s %3u %3u %3u %3u ",
881 i, sch_mode_str, weight, c_shaper_para.ir_b,
882 c_shaper_para.ir_u, c_shaper_para.ir_s);
883 pos += scnprintf(buf + pos, len - pos,
885 c_shaper_para.bs_b, c_shaper_para.bs_s,
886 c_shaper_para.flag, c_shaper_para.rate);
887 pos += scnprintf(buf + pos, len - pos,
888 "%3u %3u %3u %3u %3u ",
889 p_shaper_para.ir_b, p_shaper_para.ir_u,
890 p_shaper_para.ir_s, p_shaper_para.bs_b,
892 pos += scnprintf(buf + pos, len - pos, "%1u %6u\n",
893 p_shaper_para.flag, p_shaper_para.rate);
899 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
901 u8 priority, link_vld, sch_mode, weight;
907 ret = hclge_tm_get_qset_num(hdev, &qset_num);
911 pos = scnprintf(buf, len, "ID MAP_PRI LINK_VLD MODE DWRR\n");
913 for (i = 0; i < qset_num; i++) {
914 ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
918 ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
922 ret = hclge_tm_get_qset_weight(hdev, i, &weight);
926 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
928 pos += scnprintf(buf + pos, len - pos,
929 "%04u %4u %1u %4s %3u\n",
930 i, priority, link_vld, sch_mode_str, weight);
936 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
938 struct hclge_cfg_pause_param_cmd *pause_param;
939 struct hclge_desc desc;
942 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
944 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
946 dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
951 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
952 dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
953 dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
954 pause_param->pause_trans_gap);
955 dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
956 le16_to_cpu(pause_param->pause_trans_time));
959 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
961 struct hclge_qos_pri_map_cmd *pri_map;
962 struct hclge_desc desc;
965 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
967 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
969 dev_err(&hdev->pdev->dev,
970 "dump qos pri map fail, ret = %d\n", ret);
974 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
975 dev_info(&hdev->pdev->dev, "dump qos pri map\n");
976 dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
977 dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
978 dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
979 dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
980 dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
981 dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
982 dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
983 dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
984 dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
987 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev)
989 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
990 struct hclge_desc desc;
993 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
998 dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
999 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1000 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1001 dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
1002 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1007 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev)
1009 struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1010 struct hclge_desc desc;
1013 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1014 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1018 dev_info(&hdev->pdev->dev, "\n");
1019 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1020 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1021 dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
1022 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1024 dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
1025 le16_to_cpu(rx_buf_cmd->shared_buf));
1030 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev)
1032 struct hclge_rx_com_wl *rx_com_wl;
1033 struct hclge_desc desc;
1036 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1037 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1041 rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1042 dev_info(&hdev->pdev->dev, "\n");
1043 dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
1044 le16_to_cpu(rx_com_wl->com_wl.high),
1045 le16_to_cpu(rx_com_wl->com_wl.low));
1050 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev)
1052 struct hclge_rx_com_wl *rx_packet_cnt;
1053 struct hclge_desc desc;
1056 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1057 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1061 rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1062 dev_info(&hdev->pdev->dev,
1063 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1064 le16_to_cpu(rx_packet_cnt->com_wl.high),
1065 le16_to_cpu(rx_packet_cnt->com_wl.low));
1070 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev)
1072 struct hclge_rx_priv_wl_buf *rx_priv_wl;
1073 struct hclge_desc desc[2];
1076 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1077 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1078 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1079 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1083 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1084 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1085 dev_info(&hdev->pdev->dev,
1086 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1087 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1088 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1090 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1091 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1092 dev_info(&hdev->pdev->dev,
1093 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1094 i + HCLGE_TC_NUM_ONE_DESC,
1095 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1096 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1101 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev)
1103 struct hclge_rx_com_thrd *rx_com_thrd;
1104 struct hclge_desc desc[2];
1107 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1108 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1109 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1110 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1114 dev_info(&hdev->pdev->dev, "\n");
1115 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1116 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1117 dev_info(&hdev->pdev->dev,
1118 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1119 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1120 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1122 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1123 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1124 dev_info(&hdev->pdev->dev,
1125 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1126 i + HCLGE_TC_NUM_ONE_DESC,
1127 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1128 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1133 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
1135 enum hclge_opcode_type cmd;
1138 cmd = HCLGE_OPC_TX_BUFF_ALLOC;
1139 ret = hclge_dbg_dump_tx_buf_cfg(hdev);
1141 goto err_qos_cmd_send;
1143 cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
1144 ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev);
1146 goto err_qos_cmd_send;
1148 cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
1149 ret = hclge_dbg_dump_rx_common_wl_cfg(hdev);
1151 goto err_qos_cmd_send;
1153 cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
1154 ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev);
1156 goto err_qos_cmd_send;
1158 dev_info(&hdev->pdev->dev, "\n");
1159 if (!hnae3_dev_dcb_supported(hdev)) {
1160 dev_info(&hdev->pdev->dev,
1161 "Only DCB-supported dev supports rx priv wl\n");
1165 cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
1166 ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev);
1168 goto err_qos_cmd_send;
1170 cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
1171 ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev);
1173 goto err_qos_cmd_send;
1178 dev_err(&hdev->pdev->dev,
1179 "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
1182 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
1184 struct hclge_mac_ethertype_idx_rd_cmd *req0;
1185 char printf_buf[HCLGE_DBG_BUF_LEN];
1186 struct hclge_desc desc;
1187 u32 msg_egress_port;
1190 dev_info(&hdev->pdev->dev, "mng tab:\n");
1191 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
1193 "entry|mac_addr |mask|ether|mask|vlan|mask",
1194 HCLGE_DBG_BUF_LEN - 1);
1195 strncat(printf_buf + strlen(printf_buf),
1196 "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
1197 HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
1199 dev_info(&hdev->pdev->dev, "%s", printf_buf);
1201 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1202 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1204 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1205 req0->index = cpu_to_le16(i);
1207 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1209 dev_err(&hdev->pdev->dev,
1210 "call hclge_cmd_send fail, ret = %d\n", ret);
1214 if (!req0->resp_code)
1217 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
1218 snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
1219 "%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
1220 le16_to_cpu(req0->index),
1221 req0->mac_addr[0], req0->mac_addr[1],
1222 req0->mac_addr[2], req0->mac_addr[3],
1223 req0->mac_addr[4], req0->mac_addr[5]);
1225 snprintf(printf_buf + strlen(printf_buf),
1226 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
1227 "%x |%04x |%x |%04x|%x |%02x |%02x |",
1228 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1229 le16_to_cpu(req0->ethter_type),
1230 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1231 le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG,
1232 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1233 req0->i_port_bitmap, req0->i_port_direction);
1235 msg_egress_port = le16_to_cpu(req0->egress_port);
1236 snprintf(printf_buf + strlen(printf_buf),
1237 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
1238 "%x |%x |%02x |%04x|%x\n",
1239 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1240 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1241 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1242 le16_to_cpu(req0->egress_queue),
1243 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1245 dev_info(&hdev->pdev->dev, "%s", printf_buf);
1249 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
1250 bool sel_x, u32 loc)
1252 struct hclge_fd_tcam_config_1_cmd *req1;
1253 struct hclge_fd_tcam_config_2_cmd *req2;
1254 struct hclge_fd_tcam_config_3_cmd *req3;
1255 struct hclge_desc desc[3];
1259 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1260 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1261 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1262 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1263 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1265 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1266 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1267 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1269 req1->stage = stage;
1270 req1->xy_sel = sel_x ? 1 : 0;
1271 req1->index = cpu_to_le32(loc);
1273 ret = hclge_cmd_send(&hdev->hw, desc, 3);
1277 dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
1278 sel_x ? "x" : "y", loc);
1280 /* tcam_data0 ~ tcam_data1 */
1281 req = (u32 *)req1->tcam_data;
1282 for (i = 0; i < 2; i++)
1283 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
1285 /* tcam_data2 ~ tcam_data7 */
1286 req = (u32 *)req2->tcam_data;
1287 for (i = 0; i < 6; i++)
1288 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
1290 /* tcam_data8 ~ tcam_data12 */
1291 req = (u32 *)req3->tcam_data;
1292 for (i = 0; i < 5; i++)
1293 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
1298 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1300 struct hclge_fd_rule *rule;
1301 struct hlist_node *node;
1304 spin_lock_bh(&hdev->fd_rule_lock);
1305 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1306 rule_locs[cnt] = rule->location;
1309 spin_unlock_bh(&hdev->fd_rule_lock);
1311 if (cnt != hdev->hclge_fd_rule_num)
1317 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
1319 int i, ret, rule_cnt;
1322 if (!hnae3_dev_fd_supported(hdev)) {
1323 dev_err(&hdev->pdev->dev,
1324 "Only FD-supported dev supports dump fd tcam\n");
1328 if (!hdev->hclge_fd_rule_num ||
1329 !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
1332 rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
1333 sizeof(u16), GFP_KERNEL);
1337 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1338 if (rule_cnt <= 0) {
1339 dev_err(&hdev->pdev->dev,
1340 "failed to get rule number, ret = %d\n", rule_cnt);
1345 for (i = 0; i < rule_cnt; i++) {
1346 ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]);
1348 dev_err(&hdev->pdev->dev,
1349 "failed to get fd tcam key x, ret = %d\n", ret);
1354 ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]);
1356 dev_err(&hdev->pdev->dev,
1357 "failed to get fd tcam key y, ret = %d\n", ret);
1366 void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
1368 dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
1369 hdev->rst_stats.pf_rst_cnt);
1370 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
1371 hdev->rst_stats.flr_rst_cnt);
1372 dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n",
1373 hdev->rst_stats.global_rst_cnt);
1374 dev_info(&hdev->pdev->dev, "IMP reset count: %u\n",
1375 hdev->rst_stats.imp_rst_cnt);
1376 dev_info(&hdev->pdev->dev, "reset done count: %u\n",
1377 hdev->rst_stats.reset_done_cnt);
1378 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
1379 hdev->rst_stats.hw_reset_done_cnt);
1380 dev_info(&hdev->pdev->dev, "reset count: %u\n",
1381 hdev->rst_stats.reset_cnt);
1382 dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
1383 hdev->rst_stats.reset_fail_cnt);
1384 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
1385 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
1386 dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n",
1387 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
1388 dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n",
1389 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
1390 dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n",
1391 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
1392 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
1393 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
1394 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
1395 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
1396 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
1399 static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
1401 dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n",
1402 hdev->last_serv_processed);
1403 dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n",
1404 hdev->serv_processed_cnt);
1407 static void hclge_dbg_dump_interrupt(struct hclge_dev *hdev)
1409 dev_info(&hdev->pdev->dev, "num_nic_msi: %u\n", hdev->num_nic_msi);
1410 dev_info(&hdev->pdev->dev, "num_roce_msi: %u\n", hdev->num_roce_msi);
1411 dev_info(&hdev->pdev->dev, "num_msi_used: %u\n", hdev->num_msi_used);
1412 dev_info(&hdev->pdev->dev, "num_msi_left: %u\n", hdev->num_msi_left);
1415 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
1417 struct hclge_desc *desc_src, *desc_tmp;
1418 struct hclge_get_m7_bd_cmd *req;
1419 struct hclge_desc desc;
1420 u32 bd_num, buf_len;
1423 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
1425 req = (struct hclge_get_m7_bd_cmd *)desc.data;
1426 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1428 dev_err(&hdev->pdev->dev,
1429 "get firmware statistics bd number failed, ret = %d\n",
1434 bd_num = le32_to_cpu(req->bd_num);
1436 buf_len = sizeof(struct hclge_desc) * bd_num;
1437 desc_src = kzalloc(buf_len, GFP_KERNEL);
1441 desc_tmp = desc_src;
1442 ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
1443 HCLGE_OPC_M7_STATS_INFO);
1446 dev_err(&hdev->pdev->dev,
1447 "get firmware statistics failed, ret = %d\n", ret);
1451 for (i = 0; i < bd_num; i++) {
1452 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
1453 le32_to_cpu(desc_tmp->data[0]),
1454 le32_to_cpu(desc_tmp->data[1]),
1455 le32_to_cpu(desc_tmp->data[2]));
1456 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
1457 le32_to_cpu(desc_tmp->data[3]),
1458 le32_to_cpu(desc_tmp->data[4]),
1459 le32_to_cpu(desc_tmp->data[5]));
1467 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
1469 static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
1470 struct hclge_desc *desc, int *offset,
1473 #define HCLGE_CMD_DATA_NUM 6
1478 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1479 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1480 if (i == 0 && j == 0)
1483 dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
1485 le32_to_cpu(desc[i].data[j]));
1486 *offset += sizeof(u32);
1487 *length -= sizeof(u32);
1494 /* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
1495 * @hdev: pointer to struct hclge_dev
1496 * @cmd_buf: string that contains offset and length
1498 static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
1499 const char *cmd_buf)
1501 #define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
1502 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
1503 #define HCLGE_NCL_CONFIG_PARAM_NUM 2
1505 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1506 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1512 ret = sscanf(cmd_buf, "%x %x", &offset, &length);
1513 if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) {
1514 dev_err(&hdev->pdev->dev,
1515 "Too few parameters, num = %d.\n", ret);
1519 if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
1520 length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
1521 dev_err(&hdev->pdev->dev,
1522 "Invalid input, offset = %d, length = %d.\n",
1527 dev_info(&hdev->pdev->dev, "offset | data\n");
1529 while (length > 0) {
1531 if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1532 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1534 data0 |= length << 16;
1535 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1536 HCLGE_OPC_QUERY_NCL_CONFIG);
1540 hclge_ncl_config_data_print(hdev, desc, &offset, &length);
1544 static void hclge_dbg_dump_loopback(struct hclge_dev *hdev)
1546 struct phy_device *phydev = hdev->hw.mac.phydev;
1547 struct hclge_config_mac_mode_cmd *req_app;
1548 struct hclge_common_lb_cmd *req_common;
1549 struct hclge_desc desc;
1553 req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1554 req_common = (struct hclge_common_lb_cmd *)desc.data;
1556 dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id);
1558 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1559 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1561 dev_err(&hdev->pdev->dev,
1562 "failed to dump app loopback status, ret = %d\n", ret);
1566 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1567 HCLGE_MAC_APP_LP_B);
1568 dev_info(&hdev->pdev->dev, "app loopback: %s\n",
1569 loopback_en ? "on" : "off");
1571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1572 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1574 dev_err(&hdev->pdev->dev,
1575 "failed to dump common loopback status, ret = %d\n",
1580 loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1581 dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n",
1582 loopback_en ? "on" : "off");
1584 loopback_en = req_common->enable &
1585 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
1586 dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n",
1587 loopback_en ? "on" : "off");
1590 dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
1591 phydev->loopback_enabled ? "on" : "off");
1592 } else if (hnae3_dev_phy_imp_supported(hdev)) {
1593 loopback_en = req_common->enable &
1594 HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1595 dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
1596 loopback_en ? "on" : "off");
1600 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1601 * @hdev: pointer to struct hclge_dev
1603 static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
1605 #define HCLGE_BILLION_NANO_SECONDS 1000000000
1607 struct hclge_mac_tnl_stats stats;
1608 unsigned long rem_nsec;
1610 dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
1612 while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1613 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1614 dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
1615 (unsigned long)stats.time, rem_nsec / 1000,
1620 static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
1622 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1623 u8 ir_u, ir_b, ir_s, bs_b, bs_s;
1624 struct hclge_desc desc;
1629 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1631 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1632 shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
1634 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1636 dev_err(&hdev->pdev->dev,
1637 "qs%u failed to get tx_rate, ret=%d\n",
1642 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1643 ir_b = hclge_tm_get_field(shapping_para, IR_B);
1644 ir_u = hclge_tm_get_field(shapping_para, IR_U);
1645 ir_s = hclge_tm_get_field(shapping_para, IR_S);
1646 bs_b = hclge_tm_get_field(shapping_para, BS_B);
1647 bs_s = hclge_tm_get_field(shapping_para, BS_S);
1648 rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
1650 dev_info(&hdev->pdev->dev,
1651 "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u, flag:%#x, rate:%u(Mbps)\n",
1652 qsid, ir_b, ir_u, ir_s, bs_b, bs_s, shap_cfg_cmd->flag, rate);
1655 static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
1657 struct hnae3_knic_private_info *kinfo;
1658 struct hclge_vport *vport;
1661 for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
1662 vport = &hdev->vport[vport_id];
1663 kinfo = &vport->nic.kinfo;
1665 dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
1667 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1668 u16 qsid = vport->qs_offset + i;
1670 hclge_dbg_dump_qs_shaper_single(hdev, qsid);
1675 static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
1676 const char *cmd_buf)
1681 ret = kstrtou16(cmd_buf, 0, &qsid);
1683 hclge_dbg_dump_qs_shaper_all(hdev);
1687 if (qsid >= hdev->ae_dev->dev_specs.max_qset_num) {
1688 dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-%u]\n",
1689 qsid, hdev->ae_dev->dev_specs.max_qset_num - 1);
1693 hclge_dbg_dump_qs_shaper_single(hdev, qsid);
1696 static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf,
1699 struct hclge_mac_node *mac_node, *tmp;
1700 struct hclge_vport *vport;
1701 struct list_head *list;
1705 ret = kstrtouint(cmd_buf, 0, &func_id);
1707 dev_err(&hdev->pdev->dev,
1708 "dump mac list: bad command string, ret = %d\n", ret);
1712 if (func_id >= hdev->num_alloc_vport) {
1713 dev_err(&hdev->pdev->dev,
1714 "function id(%u) is out of range(0-%u)\n", func_id,
1715 hdev->num_alloc_vport - 1);
1719 vport = &hdev->vport[func_id];
1721 list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
1723 dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n",
1724 func_id, is_unicast ? "uc" : "mc");
1725 dev_info(&hdev->pdev->dev, "mac address state\n");
1727 spin_lock_bh(&vport->mac_list_lock);
1729 list_for_each_entry_safe(mac_node, tmp, list, node) {
1730 dev_info(&hdev->pdev->dev, "%pM %d\n",
1731 mac_node->mac_addr, mac_node->state);
1734 spin_unlock_bh(&vport->mac_list_lock);
1739 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
1741 #define DUMP_REG "dump reg"
1742 #define DUMP_TM_MAP "dump tm map"
1743 #define DUMP_LOOPBACK "dump loopback"
1744 #define DUMP_INTERRUPT "dump intr"
1746 struct hclge_vport *vport = hclge_get_vport(handle);
1747 struct hclge_dev *hdev = vport->back;
1749 if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
1750 hclge_dbg_fd_tcam(hdev);
1751 } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
1752 hclge_dbg_dump_tc(hdev);
1753 } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
1754 hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
1755 } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
1756 hclge_dbg_dump_tm(hdev);
1757 } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
1758 hclge_dbg_dump_qos_pause_cfg(hdev);
1759 } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
1760 hclge_dbg_dump_qos_pri_map(hdev);
1761 } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
1762 hclge_dbg_dump_qos_buf_cfg(hdev);
1763 } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
1764 hclge_dbg_dump_mng_table(hdev);
1765 } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
1766 hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
1767 } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
1768 hclge_dbg_dump_rst_info(hdev);
1769 } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) {
1770 hclge_dbg_dump_serv_info(hdev);
1771 } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
1772 hclge_dbg_get_m7_stats_info(hdev);
1773 } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
1774 hclge_dbg_dump_ncl_config(hdev,
1775 &cmd_buf[sizeof("dump ncl_config")]);
1776 } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
1777 hclge_dbg_dump_mac_tnl_status(hdev);
1778 } else if (strncmp(cmd_buf, DUMP_LOOPBACK,
1779 strlen(DUMP_LOOPBACK)) == 0) {
1780 hclge_dbg_dump_loopback(hdev);
1781 } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
1782 hclge_dbg_dump_qs_shaper(hdev,
1783 &cmd_buf[sizeof("dump qs shaper")]);
1784 } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) {
1785 hclge_dbg_dump_mac_list(hdev,
1786 &cmd_buf[sizeof("dump uc mac list")],
1788 } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) {
1789 hclge_dbg_dump_mac_list(hdev,
1790 &cmd_buf[sizeof("dump mc mac list")],
1792 } else if (strncmp(cmd_buf, DUMP_INTERRUPT,
1793 strlen(DUMP_INTERRUPT)) == 0) {
1794 hclge_dbg_dump_interrupt(hdev);
1796 dev_info(&hdev->pdev->dev, "unknown command\n");
1803 int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf,
1806 struct hclge_vport *vport = hclge_get_vport(handle);
1807 struct hclge_dev *hdev = vport->back;
1809 if (strncmp(cmd_buf, HNAE3_DBG_TM_NODES,
1810 strlen(HNAE3_DBG_TM_NODES)) == 0)
1811 return hclge_dbg_dump_tm_nodes(hdev, buf, len);
1812 else if (strncmp(cmd_buf, HNAE3_DBG_TM_PRI,
1813 strlen(HNAE3_DBG_TM_PRI)) == 0)
1814 return hclge_dbg_dump_tm_pri(hdev, buf, len);
1815 else if (strncmp(cmd_buf, HNAE3_DBG_TM_QSET,
1816 strlen(HNAE3_DBG_TM_QSET)) == 0)
1817 return hclge_dbg_dump_tm_qset(hdev, buf, len);