1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2019 Marvell.
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
15 #include "rvu_struct.h"
19 #include "lmac_common.h"
21 #include "rvu_npc_hash.h"
24 #define DEBUGFS_DIR_NAME "octeontx2"
75 static char *cgx_rx_stats_fields[] = {
76 [CGX_STAT0] = "Received packets",
77 [CGX_STAT1] = "Octets of received packets",
78 [CGX_STAT2] = "Received PAUSE packets",
79 [CGX_STAT3] = "Received PAUSE and control packets",
80 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
81 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
82 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
83 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
84 [CGX_STAT8] = "Error packets",
85 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
86 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
87 [CGX_STAT11] = "NCSI-bound packets dropped",
88 [CGX_STAT12] = "NCSI-bound octets dropped",
91 static char *cgx_tx_stats_fields[] = {
92 [CGX_STAT0] = "Packets dropped due to excessive collisions",
93 [CGX_STAT1] = "Packets dropped due to excessive deferral",
94 [CGX_STAT2] = "Multiple collisions before successful transmission",
95 [CGX_STAT3] = "Single collisions before successful transmission",
96 [CGX_STAT4] = "Total octets sent on the interface",
97 [CGX_STAT5] = "Total frames sent on the interface",
98 [CGX_STAT6] = "Packets sent with an octet count < 64",
99 [CGX_STAT7] = "Packets sent with an octet count == 64",
100 [CGX_STAT8] = "Packets sent with an octet count of 65-127",
101 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
102 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
103 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
104 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
105 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
106 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
107 [CGX_STAT15] = "Packets sent to the multicast DMAC",
108 [CGX_STAT16] = "Transmit underflow and were truncated",
109 [CGX_STAT17] = "Control/PAUSE packets sent",
112 static char *rpm_rx_stats_fields[] = {
113 "Octets of received packets",
114 "Octets of received packets with out error",
115 "Received packets with alignment errors",
116 "Control/PAUSE packets received",
117 "Packets received with Frame too long Errors",
118 "Packets received with a1nrange length Errors",
120 "Packets received with FrameCheckSequenceErrors",
121 "Packets received with VLAN header",
123 "Packets received with unicast DMAC",
124 "Packets received with multicast DMAC",
125 "Packets received with broadcast DMAC",
127 "Total frames received on interface",
128 "Packets received with an octet count < 64",
129 "Packets received with an octet count == 64",
130 "Packets received with an octet count of 65-127",
131 "Packets received with an octet count of 128-255",
132 "Packets received with an octet count of 256-511",
133 "Packets received with an octet count of 512-1023",
134 "Packets received with an octet count of 1024-1518",
135 "Packets received with an octet count of > 1518",
138 "Fragmented Packets",
139 "CBFC(class based flow control) pause frames received for class 0",
140 "CBFC pause frames received for class 1",
141 "CBFC pause frames received for class 2",
142 "CBFC pause frames received for class 3",
143 "CBFC pause frames received for class 4",
144 "CBFC pause frames received for class 5",
145 "CBFC pause frames received for class 6",
146 "CBFC pause frames received for class 7",
147 "CBFC pause frames received for class 8",
148 "CBFC pause frames received for class 9",
149 "CBFC pause frames received for class 10",
150 "CBFC pause frames received for class 11",
151 "CBFC pause frames received for class 12",
152 "CBFC pause frames received for class 13",
153 "CBFC pause frames received for class 14",
154 "CBFC pause frames received for class 15",
155 "MAC control packets received",
158 static char *rpm_tx_stats_fields[] = {
159 "Total octets sent on the interface",
160 "Total octets transmitted OK",
161 "Control/Pause frames sent",
162 "Total frames transmitted OK",
163 "Total frames sent with VLAN header",
165 "Packets sent to unicast DMAC",
166 "Packets sent to the multicast DMAC",
167 "Packets sent to a broadcast DMAC",
168 "Packets sent with an octet count == 64",
169 "Packets sent with an octet count of 65-127",
170 "Packets sent with an octet count of 128-255",
171 "Packets sent with an octet count of 256-511",
172 "Packets sent with an octet count of 512-1023",
173 "Packets sent with an octet count of 1024-1518",
174 "Packets sent with an octet count of > 1518",
175 "CBFC(class based flow control) pause frames transmitted for class 0",
176 "CBFC pause frames transmitted for class 1",
177 "CBFC pause frames transmitted for class 2",
178 "CBFC pause frames transmitted for class 3",
179 "CBFC pause frames transmitted for class 4",
180 "CBFC pause frames transmitted for class 5",
181 "CBFC pause frames transmitted for class 6",
182 "CBFC pause frames transmitted for class 7",
183 "CBFC pause frames transmitted for class 8",
184 "CBFC pause frames transmitted for class 9",
185 "CBFC pause frames transmitted for class 10",
186 "CBFC pause frames transmitted for class 11",
187 "CBFC pause frames transmitted for class 12",
188 "CBFC pause frames transmitted for class 13",
189 "CBFC pause frames transmitted for class 14",
190 "CBFC pause frames transmitted for class 15",
191 "MAC control packets sent",
192 "Total frames sent on the interface"
201 #define rvu_dbg_NULL NULL
202 #define rvu_dbg_open_NULL NULL
204 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
205 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
207 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
209 static const struct file_operations rvu_dbg_##name##_fops = { \
210 .owner = THIS_MODULE, \
211 .open = rvu_dbg_open_##name, \
213 .write = rvu_dbg_##write_op, \
214 .llseek = seq_lseek, \
215 .release = single_release, \
218 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
219 static const struct file_operations rvu_dbg_##name##_fops = { \
220 .owner = THIS_MODULE, \
221 .open = simple_open, \
222 .read = rvu_dbg_##read_op, \
223 .write = rvu_dbg_##write_op \
226 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
228 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
230 struct mcs *mcs = filp->private;
231 struct mcs_port_stats stats;
234 seq_puts(filp, "\n port stats\n");
235 mutex_lock(&mcs->stats_lock);
236 for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
237 mcs_get_port_stats(mcs, &stats, lmac, dir);
238 seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
239 seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
241 if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
242 seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
243 stats.preempt_err_cnt);
245 seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
246 stats.sectag_insert_err_cnt);
248 mutex_unlock(&mcs->stats_lock);
252 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
254 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
257 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
259 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
261 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
264 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
266 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
268 struct mcs *mcs = filp->private;
269 struct mcs_sa_stats stats;
270 struct rsrc_bmap *map;
275 mutex_lock(&mcs->stats_lock);
276 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
277 seq_puts(filp, "\n TX SA stats\n");
278 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
279 seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
280 stats.pkt_encrypt_cnt);
282 seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
283 stats.pkt_protected_cnt);
285 mutex_unlock(&mcs->stats_lock);
291 mutex_lock(&mcs->stats_lock);
292 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
293 seq_puts(filp, "\n RX SA stats\n");
294 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
295 seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
296 seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
297 seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
298 seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
299 seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
301 mutex_unlock(&mcs->stats_lock);
305 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
307 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
310 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
312 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
314 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
317 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
319 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
321 struct mcs *mcs = filp->private;
322 struct mcs_sc_stats stats;
323 struct rsrc_bmap *map;
327 seq_puts(filp, "\n SC stats\n");
329 mutex_lock(&mcs->stats_lock);
330 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
331 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
332 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
333 seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
334 seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
336 if (mcs->hw->mcs_blks == 1) {
337 seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
338 stats.octet_encrypt_cnt);
339 seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
340 stats.octet_protected_cnt);
343 mutex_unlock(&mcs->stats_lock);
347 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
349 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
351 struct mcs *mcs = filp->private;
352 struct mcs_sc_stats stats;
353 struct rsrc_bmap *map;
357 seq_puts(filp, "\n SC stats\n");
359 mutex_lock(&mcs->stats_lock);
360 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
361 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
362 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
363 seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
364 seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
365 seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
366 seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
367 seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
369 if (mcs->hw->mcs_blks > 1) {
370 seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
371 seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
373 if (mcs->hw->mcs_blks == 1) {
374 seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
375 stats.octet_decrypt_cnt);
376 seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
377 stats.octet_validate_cnt);
380 mutex_unlock(&mcs->stats_lock);
384 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
386 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
388 struct mcs *mcs = filp->private;
389 struct mcs_flowid_stats stats;
390 struct rsrc_bmap *map;
393 seq_puts(filp, "\n Flowid stats\n");
396 map = &mcs->rx.flow_ids;
398 map = &mcs->tx.flow_ids;
400 mutex_lock(&mcs->stats_lock);
401 for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
402 mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
403 seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
405 mutex_unlock(&mcs->stats_lock);
409 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
411 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
414 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
416 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
418 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
421 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
423 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
425 struct mcs *mcs = filp->private;
426 struct mcs_secy_stats stats;
427 struct rsrc_bmap *map;
431 seq_puts(filp, "\n MCS TX secy stats\n");
433 mutex_lock(&mcs->stats_lock);
434 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
435 mcs_get_tx_secy_stats(mcs, &stats, secy_id);
436 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
437 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
438 stats.ctl_pkt_bcast_cnt);
439 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
440 stats.ctl_pkt_mcast_cnt);
441 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
442 stats.ctl_pkt_ucast_cnt);
443 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
444 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
445 stats.unctl_pkt_bcast_cnt);
446 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
447 stats.unctl_pkt_mcast_cnt);
448 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
449 stats.unctl_pkt_ucast_cnt);
450 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
451 seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
452 stats.octet_encrypted_cnt);
453 seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
454 stats.octet_protected_cnt);
455 seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
456 stats.pkt_noactivesa_cnt);
457 seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
458 seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
460 mutex_unlock(&mcs->stats_lock);
464 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
466 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
468 struct mcs *mcs = filp->private;
469 struct mcs_secy_stats stats;
470 struct rsrc_bmap *map;
474 seq_puts(filp, "\n MCS secy stats\n");
476 mutex_lock(&mcs->stats_lock);
477 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
478 mcs_get_rx_secy_stats(mcs, &stats, secy_id);
479 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
480 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
481 stats.ctl_pkt_bcast_cnt);
482 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
483 stats.ctl_pkt_mcast_cnt);
484 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
485 stats.ctl_pkt_ucast_cnt);
486 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
487 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
488 stats.unctl_pkt_bcast_cnt);
489 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
490 stats.unctl_pkt_mcast_cnt);
491 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
492 stats.unctl_pkt_ucast_cnt);
493 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
494 seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
495 stats.octet_decrypted_cnt);
496 seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
497 stats.octet_validated_cnt);
498 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
499 stats.pkt_port_disabled_cnt);
500 seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
501 seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
503 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
504 stats.pkt_nosaerror_cnt);
505 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
506 stats.pkt_tagged_ctl_cnt);
507 seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
508 seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
509 if (mcs->hw->mcs_blks > 1)
510 seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
511 stats.pkt_notag_cnt);
513 mutex_unlock(&mcs->stats_lock);
517 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
519 static void rvu_dbg_mcs_init(struct rvu *rvu)
525 if (!rvu->mcs_blk_cnt)
528 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
530 for (i = 0; i < rvu->mcs_blk_cnt; i++) {
531 mcs = mcs_get_pdata(i);
533 sprintf(dname, "mcs%d", i);
534 rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
535 rvu->rvu_dbg.mcs_root);
537 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
539 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
540 &rvu_dbg_mcs_rx_flowid_stats_fops);
542 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
543 &rvu_dbg_mcs_rx_secy_stats_fops);
545 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
546 &rvu_dbg_mcs_rx_sc_stats_fops);
548 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
549 &rvu_dbg_mcs_rx_sa_stats_fops);
551 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
552 &rvu_dbg_mcs_rx_port_stats_fops);
554 rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
556 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
557 &rvu_dbg_mcs_tx_flowid_stats_fops);
559 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
560 &rvu_dbg_mcs_tx_secy_stats_fops);
562 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
563 &rvu_dbg_mcs_tx_sc_stats_fops);
565 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
566 &rvu_dbg_mcs_tx_sa_stats_fops);
568 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
569 &rvu_dbg_mcs_tx_port_stats_fops);
573 #define LMT_MAPTBL_ENTRY_SIZE 16
574 /* Dump LMTST map table */
575 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
577 size_t count, loff_t *ppos)
579 struct rvu *rvu = filp->private_data;
580 u64 lmt_addr, val, tbl_base;
581 int pf, vf, num_vfs, hw_vfs;
582 void __iomem *lmt_map_base;
583 int buf_size = 10240;
589 /* don't allow partial reads */
593 buf = kzalloc(buf_size, GFP_KERNEL);
597 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
599 lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
601 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
606 off += scnprintf(&buf[off], buf_size - 1 - off,
607 "\n\t\t\t\t\tLmtst Map Table Entries");
608 off += scnprintf(&buf[off], buf_size - 1 - off,
609 "\n\t\t\t\t\t=======================");
610 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
611 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
612 off += scnprintf(&buf[off], buf_size - 1 - off,
613 "Lmtline Base (word 0)\t\t");
614 off += scnprintf(&buf[off], buf_size - 1 - off,
615 "Lmt Map Entry (word 1)");
616 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
617 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
618 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
621 index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
622 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
624 lmt_addr = readq(lmt_map_base + index);
625 off += scnprintf(&buf[off], buf_size - 1 - off,
626 " 0x%016llx\t\t", lmt_addr);
628 val = readq(lmt_map_base + index);
629 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
631 /* Reading num of VFs per PF */
632 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
633 for (vf = 0; vf < num_vfs; vf++) {
634 index = (pf * rvu->hw->total_vfs * 16) +
635 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
636 off += scnprintf(&buf[off], buf_size - 1 - off,
637 "PF%d:VF%d \t\t", pf, vf);
638 off += scnprintf(&buf[off], buf_size - 1 - off,
639 " 0x%llx\t\t", (tbl_base + index));
640 lmt_addr = readq(lmt_map_base + index);
641 off += scnprintf(&buf[off], buf_size - 1 - off,
642 " 0x%016llx\t\t", lmt_addr);
644 val = readq(lmt_map_base + index);
645 off += scnprintf(&buf[off], buf_size - 1 - off,
646 " 0x%016llx\n", val);
649 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
651 ret = min(off, count);
652 if (copy_to_user(buffer, buf, ret))
656 iounmap(lmt_map_base);
664 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
666 static void get_lf_str_list(struct rvu_block block, int pcifunc,
669 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
671 for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
672 if (lf >= block.lf.max)
675 if (block.fn_map[lf] != pcifunc)
678 if (lf == prev_lf + 1) {
685 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
687 len += (len ? sprintf(lfs + len, ",%d", lf) :
688 sprintf(lfs + len, "%d", lf));
695 len += sprintf(lfs + len, "-%d", prev_lf);
700 static int get_max_column_width(struct rvu *rvu)
702 int index, pf, vf, lf_str_size = 12, buf_size = 256;
703 struct rvu_block block;
707 buf = kzalloc(buf_size, GFP_KERNEL);
711 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
712 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
713 pcifunc = pf << 10 | vf;
717 for (index = 0; index < BLK_COUNT; index++) {
718 block = rvu->hw->block[index];
719 if (!strlen(block.name))
722 get_lf_str_list(block, pcifunc, buf);
723 if (lf_str_size <= strlen(buf))
724 lf_str_size = strlen(buf) + 1;
733 /* Dumps current provisioning status of all RVU block LFs */
734 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
736 size_t count, loff_t *ppos)
738 int index, off = 0, flag = 0, len = 0, i = 0;
739 struct rvu *rvu = filp->private_data;
740 int bytes_not_copied = 0;
741 struct rvu_block block;
748 /* don't allow partial reads */
752 buf = kzalloc(buf_size, GFP_KERNEL);
756 /* Get the maximum width of a column */
757 lf_str_size = get_max_column_width(rvu);
759 lfs = kzalloc(lf_str_size, GFP_KERNEL);
764 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
766 for (index = 0; index < BLK_COUNT; index++)
767 if (strlen(rvu->hw->block[index].name)) {
768 off += scnprintf(&buf[off], buf_size - 1 - off,
770 rvu->hw->block[index].name);
773 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
774 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
775 if (bytes_not_copied)
780 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
781 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
784 pcifunc = pf << 10 | vf;
789 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
790 off = scnprintf(&buf[off],
792 "%-*s", lf_str_size, lfs);
794 sprintf(lfs, "PF%d", pf);
795 off = scnprintf(&buf[off],
797 "%-*s", lf_str_size, lfs);
800 for (index = 0; index < BLK_COUNT; index++) {
801 block = rvu->hw->block[index];
802 if (!strlen(block.name))
806 get_lf_str_list(block, pcifunc, lfs);
810 off += scnprintf(&buf[off], buf_size - 1 - off,
811 "%-*s", lf_str_size, lfs);
814 off += scnprintf(&buf[off],
815 buf_size - 1 - off, "\n");
816 bytes_not_copied = copy_to_user(buffer +
819 if (bytes_not_copied)
831 if (bytes_not_copied)
837 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
839 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
841 struct rvu *rvu = filp->private;
842 struct pci_dev *pdev = NULL;
843 struct mac_ops *mac_ops;
844 char cgx[10], lmac[10];
845 struct rvu_pfvf *pfvf;
846 int pf, domain, blkid;
851 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
852 /* There can be no CGX devices at all */
855 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
857 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
858 if (!is_pf_cgxmapped(rvu, pf))
861 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
868 pfvf = rvu_get_pfvf(rvu, pcifunc);
870 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
875 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
877 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
878 sprintf(lmac, "LMAC%d", lmac_id);
879 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
880 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
887 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
889 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
892 struct rvu_block *block;
893 struct rvu_hwinfo *hw;
896 block = &hw->block[blkaddr];
898 if (lf < 0 || lf >= block->lf.max) {
899 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
904 *pcifunc = block->fn_map[lf];
907 "This LF is not attached to any RVU PFFUNC\n");
913 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
917 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
921 if (!pfvf->aura_ctx) {
922 seq_puts(m, "Aura context is not initialized\n");
924 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
925 pfvf->aura_ctx->qsize);
926 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
927 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
930 if (!pfvf->pool_ctx) {
931 seq_puts(m, "Pool context is not initialized\n");
933 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
934 pfvf->pool_ctx->qsize);
935 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
936 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
941 /* The 'qsize' entry dumps current Aura/Pool context Qsize
942 * and each context's current enable/disable status in a bitmap.
944 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
947 void (*print_qsize)(struct seq_file *filp,
948 struct rvu_pfvf *pfvf) = NULL;
949 struct dentry *current_dir;
950 struct rvu_pfvf *pfvf;
959 qsize_id = rvu->rvu_dbg.npa_qsize_id;
960 print_qsize = print_npa_qsize;
964 qsize_id = rvu->rvu_dbg.nix_qsize_id;
965 print_qsize = print_nix_qsize;
972 if (blktype == BLKTYPE_NPA) {
973 blkaddr = BLKADDR_NPA;
975 current_dir = filp->file->f_path.dentry->d_parent;
976 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
977 BLKADDR_NIX1 : BLKADDR_NIX0);
980 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
983 pfvf = rvu_get_pfvf(rvu, pcifunc);
984 print_qsize(filp, pfvf);
989 static ssize_t rvu_dbg_qsize_write(struct file *filp,
990 const char __user *buffer, size_t count,
991 loff_t *ppos, int blktype)
993 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
994 struct seq_file *seqfile = filp->private_data;
995 char *cmd_buf, *cmd_buf_tmp, *subtoken;
996 struct rvu *rvu = seqfile->private;
997 struct dentry *current_dir;
1002 cmd_buf = memdup_user_nul(buffer, count);
1003 if (IS_ERR(cmd_buf))
1006 cmd_buf_tmp = strchr(cmd_buf, '\n');
1008 *cmd_buf_tmp = '\0';
1009 count = cmd_buf_tmp - cmd_buf + 1;
1012 cmd_buf_tmp = cmd_buf;
1013 subtoken = strsep(&cmd_buf, " ");
1014 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1018 if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1019 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1020 goto qsize_write_done;
1023 if (blktype == BLKTYPE_NPA) {
1024 blkaddr = BLKADDR_NPA;
1026 current_dir = filp->f_path.dentry->d_parent;
1027 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
1028 BLKADDR_NIX1 : BLKADDR_NIX0);
1031 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1033 goto qsize_write_done;
1035 if (blktype == BLKTYPE_NPA)
1036 rvu->rvu_dbg.npa_qsize_id = lf;
1038 rvu->rvu_dbg.nix_qsize_id = lf;
1042 return ret ? ret : count;
1045 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1046 const char __user *buffer,
1047 size_t count, loff_t *ppos)
1049 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1053 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1055 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1058 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1060 /* Dumps given NPA Aura's context */
1061 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1063 struct npa_aura_s *aura = &rsp->aura;
1064 struct rvu *rvu = m->private;
1066 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1068 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1069 aura->ena, aura->pool_caching);
1070 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1071 aura->pool_way_mask, aura->avg_con);
1072 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1073 aura->pool_drop_ena, aura->aura_drop_ena);
1074 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1075 aura->bp_ena, aura->aura_drop);
1076 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1077 aura->shift, aura->avg_level);
1079 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1080 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1082 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1083 (u64)aura->limit, aura->bp, aura->fc_ena);
1085 if (!is_rvu_otx2(rvu))
1086 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1087 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1088 aura->fc_up_crossing, aura->fc_stype);
1089 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1091 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1093 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1094 aura->pool_drop, aura->update_time);
1095 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1096 aura->err_int, aura->err_int_ena);
1097 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1098 aura->thresh_int, aura->thresh_int_ena);
1099 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1100 aura->thresh_up, aura->thresh_qint_idx);
1101 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1103 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1104 if (!is_rvu_otx2(rvu))
1105 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1108 /* Dumps given NPA Pool's context */
1109 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1111 struct npa_pool_s *pool = &rsp->pool;
1112 struct rvu *rvu = m->private;
1114 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1116 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1117 pool->ena, pool->nat_align);
1118 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1119 pool->stack_caching, pool->stack_way_mask);
1120 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1121 pool->buf_offset, pool->buf_size);
1123 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1124 pool->stack_max_pages, pool->stack_pages);
1126 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1128 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1129 pool->stack_offset, pool->shift, pool->avg_level);
1130 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1131 pool->avg_con, pool->fc_ena, pool->fc_stype);
1132 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1133 pool->fc_hyst_bits, pool->fc_up_crossing);
1134 if (!is_rvu_otx2(rvu))
1135 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1136 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1138 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1140 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1142 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1144 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1145 pool->err_int, pool->err_int_ena);
1146 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1147 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1148 pool->thresh_int_ena, pool->thresh_up);
1149 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1150 pool->thresh_qint_idx, pool->err_qint_idx);
1151 if (!is_rvu_otx2(rvu))
1152 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1155 /* Reads aura/pool's ctx from admin queue */
1156 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1158 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1159 struct npa_aq_enq_req aq_req;
1160 struct npa_aq_enq_rsp rsp;
1161 struct rvu_pfvf *pfvf;
1162 int aura, rc, max_id;
1170 case NPA_AQ_CTYPE_AURA:
1171 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1172 id = rvu->rvu_dbg.npa_aura_ctx.id;
1173 all = rvu->rvu_dbg.npa_aura_ctx.all;
1176 case NPA_AQ_CTYPE_POOL:
1177 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1178 id = rvu->rvu_dbg.npa_pool_ctx.id;
1179 all = rvu->rvu_dbg.npa_pool_ctx.all;
1185 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1188 pfvf = rvu_get_pfvf(rvu, pcifunc);
1189 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1190 seq_puts(m, "Aura context is not initialized\n");
1192 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1193 seq_puts(m, "Pool context is not initialized\n");
1197 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1198 aq_req.hdr.pcifunc = pcifunc;
1199 aq_req.ctype = ctype;
1200 aq_req.op = NPA_AQ_INSTOP_READ;
1201 if (ctype == NPA_AQ_CTYPE_AURA) {
1202 max_id = pfvf->aura_ctx->qsize;
1203 print_npa_ctx = print_npa_aura_ctx;
1205 max_id = pfvf->pool_ctx->qsize;
1206 print_npa_ctx = print_npa_pool_ctx;
1209 if (id < 0 || id >= max_id) {
1210 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1211 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1221 for (aura = id; aura < max_id; aura++) {
1222 aq_req.aura_id = aura;
1224 /* Skip if queue is uninitialized */
1225 if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1228 seq_printf(m, "======%s : %d=======\n",
1229 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1231 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1233 seq_puts(m, "Failed to read context\n");
1236 print_npa_ctx(m, &rsp);
1241 static int write_npa_ctx(struct rvu *rvu, bool all,
1242 int npalf, int id, int ctype)
1244 struct rvu_pfvf *pfvf;
1248 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1251 pfvf = rvu_get_pfvf(rvu, pcifunc);
1253 if (ctype == NPA_AQ_CTYPE_AURA) {
1254 if (!pfvf->aura_ctx) {
1255 dev_warn(rvu->dev, "Aura context is not initialized\n");
1258 max_id = pfvf->aura_ctx->qsize;
1259 } else if (ctype == NPA_AQ_CTYPE_POOL) {
1260 if (!pfvf->pool_ctx) {
1261 dev_warn(rvu->dev, "Pool context is not initialized\n");
1264 max_id = pfvf->pool_ctx->qsize;
1267 if (id < 0 || id >= max_id) {
1268 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1269 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1275 case NPA_AQ_CTYPE_AURA:
1276 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1277 rvu->rvu_dbg.npa_aura_ctx.id = id;
1278 rvu->rvu_dbg.npa_aura_ctx.all = all;
1281 case NPA_AQ_CTYPE_POOL:
1282 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1283 rvu->rvu_dbg.npa_pool_ctx.id = id;
1284 rvu->rvu_dbg.npa_pool_ctx.all = all;
1292 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1293 const char __user *buffer, int *npalf,
1296 int bytes_not_copied;
1301 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1302 if (bytes_not_copied)
1305 cmd_buf[*count] = '\0';
1306 cmd_buf_tmp = strchr(cmd_buf, '\n');
1309 *cmd_buf_tmp = '\0';
1310 *count = cmd_buf_tmp - cmd_buf + 1;
1313 subtoken = strsep(&cmd_buf, " ");
1314 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1317 subtoken = strsep(&cmd_buf, " ");
1318 if (subtoken && strcmp(subtoken, "all") == 0) {
1321 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1330 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1331 const char __user *buffer,
1332 size_t count, loff_t *ppos, int ctype)
1334 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1336 struct seq_file *seqfp = filp->private_data;
1337 struct rvu *rvu = seqfp->private;
1338 int npalf, id = 0, ret;
1341 if ((*ppos != 0) || !count)
1344 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1347 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1351 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1352 ctype_string, ctype_string);
1355 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1359 return ret ? ret : count;
1362 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1363 const char __user *buffer,
1364 size_t count, loff_t *ppos)
1366 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1370 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1372 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1375 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1377 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1378 const char __user *buffer,
1379 size_t count, loff_t *ppos)
1381 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1385 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1387 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1390 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1392 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1393 int ctype, int transaction)
1395 u64 req, out_req, lat, cant_alloc;
1396 struct nix_hw *nix_hw;
1400 if (blk_addr == BLKADDR_NDC_NPA0) {
1403 nix_hw = s->private;
1407 for (port = 0; port < NDC_MAX_PORT; port++) {
1408 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1409 (port, ctype, transaction));
1410 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1411 (port, ctype, transaction));
1412 out_req = rvu_read64(rvu, blk_addr,
1413 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1414 (port, ctype, transaction));
1415 cant_alloc = rvu_read64(rvu, blk_addr,
1416 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1417 (port, transaction));
1418 seq_printf(s, "\nPort:%d\n", port);
1419 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1420 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1421 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1422 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1423 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1427 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1429 seq_puts(s, "\n***** CACHE mode read stats *****\n");
1430 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1431 seq_puts(s, "\n***** CACHE mode write stats *****\n");
1432 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1433 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1434 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1435 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1436 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1440 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1442 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1445 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1447 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1449 struct nix_hw *nix_hw;
1454 if (blk_addr == BLKADDR_NDC_NPA0) {
1457 nix_hw = s->private;
1461 ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1462 max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1463 for (bank = 0; bank < max_bank; bank++) {
1464 seq_printf(s, "BANK:%d\n", bank);
1465 seq_printf(s, "\tHits:\t%lld\n",
1466 (u64)rvu_read64(rvu, blk_addr,
1467 NDC_AF_BANKX_HIT_PC(bank)));
1468 seq_printf(s, "\tMiss:\t%lld\n",
1469 (u64)rvu_read64(rvu, blk_addr,
1470 NDC_AF_BANKX_MISS_PC(bank)));
1475 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1477 struct nix_hw *nix_hw = filp->private;
1481 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1482 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1483 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1485 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1488 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1490 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1492 struct nix_hw *nix_hw = filp->private;
1496 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1497 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1498 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1500 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1503 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1505 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1508 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1511 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1513 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1516 struct nix_hw *nix_hw = filp->private;
1517 int ndc_idx = NPA0_U;
1520 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1521 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1523 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1526 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1528 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1531 struct nix_hw *nix_hw = filp->private;
1532 int ndc_idx = NPA0_U;
1535 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1536 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1538 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1541 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1543 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1544 struct nix_cn10k_sq_ctx_s *sq_ctx)
1546 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1547 sq_ctx->ena, sq_ctx->qint_idx);
1548 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1549 sq_ctx->substream, sq_ctx->sdp_mcast);
1550 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1551 sq_ctx->cq, sq_ctx->sqe_way_mask);
1553 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1554 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1555 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1556 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1557 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1558 sq_ctx->default_chan, sq_ctx->sqb_count);
1560 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1561 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1562 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1563 sq_ctx->sqb_aura, sq_ctx->sq_int);
1564 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1565 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1567 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1568 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1569 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1570 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1571 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1572 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1573 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1574 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1575 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1576 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1578 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1579 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1580 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1581 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1582 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1583 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1584 sq_ctx->smenq_next_sqb);
1586 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1588 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1589 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1590 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1591 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1592 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1593 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1594 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1596 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1597 (u64)sq_ctx->scm_lso_rem);
1598 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1599 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1600 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1601 (u64)sq_ctx->dropped_octs);
1602 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1603 (u64)sq_ctx->dropped_pkts);
1606 /* Dumps given nix_sq's context */
1607 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1609 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1610 struct nix_hw *nix_hw = m->private;
1611 struct rvu *rvu = nix_hw->rvu;
1613 if (!is_rvu_otx2(rvu)) {
1614 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1617 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1618 sq_ctx->sqe_way_mask, sq_ctx->cq);
1619 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1620 sq_ctx->sdp_mcast, sq_ctx->substream);
1621 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1622 sq_ctx->qint_idx, sq_ctx->ena);
1624 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1625 sq_ctx->sqb_count, sq_ctx->default_chan);
1626 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1627 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1628 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1629 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1631 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1632 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1633 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1634 sq_ctx->sq_int, sq_ctx->sqb_aura);
1635 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1637 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1638 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1639 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1640 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1641 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1642 sq_ctx->smenq_offset, sq_ctx->tail_offset);
1643 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1644 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1645 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1646 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1647 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1648 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1650 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1651 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1652 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1653 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1654 sq_ctx->smenq_next_sqb);
1656 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1658 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1659 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1660 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1661 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1662 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1663 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1664 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1666 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1667 (u64)sq_ctx->scm_lso_rem);
1668 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1669 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1670 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1671 (u64)sq_ctx->dropped_octs);
1672 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1673 (u64)sq_ctx->dropped_pkts);
1676 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1677 struct nix_cn10k_rq_ctx_s *rq_ctx)
1679 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1680 rq_ctx->ena, rq_ctx->sso_ena);
1681 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1682 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1683 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1684 rq_ctx->cq, rq_ctx->lenerr_dis);
1685 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1686 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1687 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1688 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1689 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1690 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1691 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1693 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1694 rq_ctx->spb_aura, rq_ctx->lpb_aura);
1695 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1696 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1697 rq_ctx->sso_grp, rq_ctx->sso_tt);
1698 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1699 rq_ctx->pb_caching, rq_ctx->wqe_caching);
1700 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1701 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1702 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1703 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1704 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1705 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1707 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1708 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1709 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1710 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1711 rq_ctx->wqe_skip, rq_ctx->spb_ena);
1712 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1713 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1714 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1715 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1716 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1717 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1719 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1720 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1721 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1722 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1723 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1724 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1725 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1726 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1728 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1729 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1730 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1731 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1732 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1733 rq_ctx->rq_int, rq_ctx->rq_int_ena);
1734 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1736 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1737 rq_ctx->ltag, rq_ctx->good_utag);
1738 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1739 rq_ctx->bad_utag, rq_ctx->flow_tagw);
1740 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1741 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1742 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1743 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1744 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1746 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1747 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1748 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1749 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1750 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1753 /* Dumps given nix_rq's context */
1754 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1756 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1757 struct nix_hw *nix_hw = m->private;
1758 struct rvu *rvu = nix_hw->rvu;
1760 if (!is_rvu_otx2(rvu)) {
1761 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1765 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1766 rq_ctx->wqe_aura, rq_ctx->substream);
1767 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1768 rq_ctx->cq, rq_ctx->ena_wqwd);
1769 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1770 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1771 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1773 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1774 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1775 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1776 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1777 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1778 rq_ctx->pb_caching, rq_ctx->sso_tt);
1779 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1780 rq_ctx->sso_grp, rq_ctx->lpb_aura);
1781 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1783 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1784 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1785 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1786 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1787 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1788 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1789 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1790 rq_ctx->spb_ena, rq_ctx->wqe_skip);
1791 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1793 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1794 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1795 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1796 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1797 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1798 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1799 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1800 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1802 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1803 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1804 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1805 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1806 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1807 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1808 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1810 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1811 rq_ctx->flow_tagw, rq_ctx->bad_utag);
1812 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1813 rq_ctx->good_utag, rq_ctx->ltag);
1815 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1816 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1817 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1818 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1819 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1822 /* Dumps given nix_cq's context */
1823 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1825 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1826 struct nix_hw *nix_hw = m->private;
1827 struct rvu *rvu = nix_hw->rvu;
1829 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1831 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1832 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1833 cq_ctx->avg_con, cq_ctx->cint_idx);
1834 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1835 cq_ctx->cq_err, cq_ctx->qint_idx);
1836 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1837 cq_ctx->bpid, cq_ctx->bp_ena);
1839 if (!is_rvu_otx2(rvu)) {
1840 seq_printf(m, "W1: lbpid_high \t\t\t0x%03x\n", cq_ctx->lbpid_high);
1841 seq_printf(m, "W1: lbpid_med \t\t\t0x%03x\n", cq_ctx->lbpid_med);
1842 seq_printf(m, "W1: lbpid_low \t\t\t0x%03x\n", cq_ctx->lbpid_low);
1843 seq_printf(m, "(W1: lbpid) \t\t\t0x%03x\n",
1844 cq_ctx->lbpid_high << 6 | cq_ctx->lbpid_med << 3 |
1846 seq_printf(m, "W1: lbp_ena \t\t\t\t%d\n\n", cq_ctx->lbp_ena);
1849 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1850 cq_ctx->update_time, cq_ctx->avg_level);
1851 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1852 cq_ctx->head, cq_ctx->tail);
1854 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1855 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1856 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1857 cq_ctx->qsize, cq_ctx->caching);
1858 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1859 cq_ctx->substream, cq_ctx->ena);
1860 if (!is_rvu_otx2(rvu)) {
1861 seq_printf(m, "W3: lbp_frac \t\t\t%d\n", cq_ctx->lbp_frac);
1862 seq_printf(m, "W3: cpt_drop_err_en \t\t\t%d\n",
1863 cq_ctx->cpt_drop_err_en);
1865 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1866 cq_ctx->drop_ena, cq_ctx->drop);
1867 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1870 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1871 void *unused, int ctype)
1873 void (*print_nix_ctx)(struct seq_file *filp,
1874 struct nix_aq_enq_rsp *rsp) = NULL;
1875 struct nix_hw *nix_hw = filp->private;
1876 struct rvu *rvu = nix_hw->rvu;
1877 struct nix_aq_enq_req aq_req;
1878 struct nix_aq_enq_rsp rsp;
1879 char *ctype_string = NULL;
1880 int qidx, rc, max_id = 0;
1881 struct rvu_pfvf *pfvf;
1886 case NIX_AQ_CTYPE_CQ:
1887 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1888 id = rvu->rvu_dbg.nix_cq_ctx.id;
1889 all = rvu->rvu_dbg.nix_cq_ctx.all;
1892 case NIX_AQ_CTYPE_SQ:
1893 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1894 id = rvu->rvu_dbg.nix_sq_ctx.id;
1895 all = rvu->rvu_dbg.nix_sq_ctx.all;
1898 case NIX_AQ_CTYPE_RQ:
1899 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1900 id = rvu->rvu_dbg.nix_rq_ctx.id;
1901 all = rvu->rvu_dbg.nix_rq_ctx.all;
1908 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1911 pfvf = rvu_get_pfvf(rvu, pcifunc);
1912 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1913 seq_puts(filp, "SQ context is not initialized\n");
1915 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1916 seq_puts(filp, "RQ context is not initialized\n");
1918 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1919 seq_puts(filp, "CQ context is not initialized\n");
1923 if (ctype == NIX_AQ_CTYPE_SQ) {
1924 max_id = pfvf->sq_ctx->qsize;
1925 ctype_string = "sq";
1926 print_nix_ctx = print_nix_sq_ctx;
1927 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1928 max_id = pfvf->rq_ctx->qsize;
1929 ctype_string = "rq";
1930 print_nix_ctx = print_nix_rq_ctx;
1931 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1932 max_id = pfvf->cq_ctx->qsize;
1933 ctype_string = "cq";
1934 print_nix_ctx = print_nix_cq_ctx;
1937 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1938 aq_req.hdr.pcifunc = pcifunc;
1939 aq_req.ctype = ctype;
1940 aq_req.op = NIX_AQ_INSTOP_READ;
1945 for (qidx = id; qidx < max_id; qidx++) {
1947 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1948 ctype_string, nixlf, aq_req.qidx);
1949 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1951 seq_puts(filp, "Failed to read the context\n");
1954 print_nix_ctx(filp, &rsp);
1959 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1960 int id, int ctype, char *ctype_string,
1963 struct nix_hw *nix_hw = m->private;
1964 struct rvu_pfvf *pfvf;
1968 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1971 pfvf = rvu_get_pfvf(rvu, pcifunc);
1973 if (ctype == NIX_AQ_CTYPE_SQ) {
1974 if (!pfvf->sq_ctx) {
1975 dev_warn(rvu->dev, "SQ context is not initialized\n");
1978 max_id = pfvf->sq_ctx->qsize;
1979 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1980 if (!pfvf->rq_ctx) {
1981 dev_warn(rvu->dev, "RQ context is not initialized\n");
1984 max_id = pfvf->rq_ctx->qsize;
1985 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1986 if (!pfvf->cq_ctx) {
1987 dev_warn(rvu->dev, "CQ context is not initialized\n");
1990 max_id = pfvf->cq_ctx->qsize;
1993 if (id < 0 || id >= max_id) {
1994 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1995 ctype_string, max_id - 1);
1999 case NIX_AQ_CTYPE_CQ:
2000 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
2001 rvu->rvu_dbg.nix_cq_ctx.id = id;
2002 rvu->rvu_dbg.nix_cq_ctx.all = all;
2005 case NIX_AQ_CTYPE_SQ:
2006 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
2007 rvu->rvu_dbg.nix_sq_ctx.id = id;
2008 rvu->rvu_dbg.nix_sq_ctx.all = all;
2011 case NIX_AQ_CTYPE_RQ:
2012 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
2013 rvu->rvu_dbg.nix_rq_ctx.id = id;
2014 rvu->rvu_dbg.nix_rq_ctx.all = all;
2022 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2023 const char __user *buffer,
2024 size_t count, loff_t *ppos,
2027 struct seq_file *m = filp->private_data;
2028 struct nix_hw *nix_hw = m->private;
2029 struct rvu *rvu = nix_hw->rvu;
2030 char *cmd_buf, *ctype_string;
2031 int nixlf, id = 0, ret;
2034 if ((*ppos != 0) || !count)
2038 case NIX_AQ_CTYPE_SQ:
2039 ctype_string = "sq";
2041 case NIX_AQ_CTYPE_RQ:
2042 ctype_string = "rq";
2044 case NIX_AQ_CTYPE_CQ:
2045 ctype_string = "cq";
2051 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2056 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2060 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2061 ctype_string, ctype_string);
2064 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2069 return ret ? ret : count;
2072 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2073 const char __user *buffer,
2074 size_t count, loff_t *ppos)
2076 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2080 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2082 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2085 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2087 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2088 const char __user *buffer,
2089 size_t count, loff_t *ppos)
2091 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2095 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
2097 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
2100 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2102 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2103 const char __user *buffer,
2104 size_t count, loff_t *ppos)
2106 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2110 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2112 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2115 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2117 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2118 unsigned long *bmap, char *qtype)
2122 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2126 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2127 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2128 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2133 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2136 seq_puts(filp, "cq context is not initialized\n");
2138 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2142 seq_puts(filp, "rq context is not initialized\n");
2144 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2148 seq_puts(filp, "sq context is not initialized\n");
2150 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2154 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2155 const char __user *buffer,
2156 size_t count, loff_t *ppos)
2158 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2162 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2164 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2167 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2169 static void print_band_prof_ctx(struct seq_file *m,
2170 struct nix_bandprof_s *prof)
2174 switch (prof->pc_mode) {
2175 case NIX_RX_PC_MODE_VLAN:
2178 case NIX_RX_PC_MODE_DSCP:
2181 case NIX_RX_PC_MODE_GEN:
2184 case NIX_RX_PC_MODE_RSVD:
2188 seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2189 str = (prof->icolor == 3) ? "Color blind" :
2190 (prof->icolor == 0) ? "Green" :
2191 (prof->icolor == 1) ? "Yellow" : "Red";
2192 seq_printf(m, "W0: icolor\t\t%s\n", str);
2193 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2194 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2195 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2196 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2197 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2198 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2199 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2200 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2202 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2203 str = (prof->lmode == 0) ? "byte" : "packet";
2204 seq_printf(m, "W1: lmode\t\t%s\n", str);
2205 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2206 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2207 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2208 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2209 str = (prof->gc_action == 0) ? "PASS" :
2210 (prof->gc_action == 1) ? "DROP" : "RED";
2211 seq_printf(m, "W1: gc_action\t\t%s\n", str);
2212 str = (prof->yc_action == 0) ? "PASS" :
2213 (prof->yc_action == 1) ? "DROP" : "RED";
2214 seq_printf(m, "W1: yc_action\t\t%s\n", str);
2215 str = (prof->rc_action == 0) ? "PASS" :
2216 (prof->rc_action == 1) ? "DROP" : "RED";
2217 seq_printf(m, "W1: rc_action\t\t%s\n", str);
2218 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2219 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2220 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2222 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2223 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2224 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2225 seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2226 (u64)prof->green_pkt_pass);
2227 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2228 (u64)prof->yellow_pkt_pass);
2229 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2230 seq_printf(m, "W7: green_octs_pass\t%lld\n",
2231 (u64)prof->green_octs_pass);
2232 seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2233 (u64)prof->yellow_octs_pass);
2234 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2235 seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2236 (u64)prof->green_pkt_drop);
2237 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2238 (u64)prof->yellow_pkt_drop);
2239 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2240 seq_printf(m, "W13: green_octs_drop\t%lld\n",
2241 (u64)prof->green_octs_drop);
2242 seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2243 (u64)prof->yellow_octs_drop);
2244 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2245 seq_puts(m, "==============================\n");
2248 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2250 struct nix_hw *nix_hw = m->private;
2251 struct nix_cn10k_aq_enq_req aq_req;
2252 struct nix_cn10k_aq_enq_rsp aq_rsp;
2253 struct rvu *rvu = nix_hw->rvu;
2254 struct nix_ipolicer *ipolicer;
2255 int layer, prof_idx, idx, rc;
2259 /* Ingress policers do not exist on all platforms */
2260 if (!nix_hw->ipolicer)
2263 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2264 if (layer == BAND_PROF_INVAL_LAYER)
2266 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2267 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2269 seq_printf(m, "\n%s bandwidth profiles\n", str);
2270 seq_puts(m, "=======================\n");
2272 ipolicer = &nix_hw->ipolicer[layer];
2274 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2275 if (is_rsrc_free(&ipolicer->band_prof, idx))
2278 prof_idx = (idx & 0x3FFF) | (layer << 14);
2279 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2280 0x00, NIX_AQ_CTYPE_BANDPROF,
2284 "%s: Failed to fetch context of %s profile %d, err %d\n",
2285 __func__, str, idx, rc);
2288 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2289 pcifunc = ipolicer->pfvf_map[idx];
2290 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2291 seq_printf(m, "Allocated to :: PF %d\n",
2292 rvu_get_pf(pcifunc));
2294 seq_printf(m, "Allocated to :: PF %d VF %d\n",
2295 rvu_get_pf(pcifunc),
2296 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2297 print_band_prof_ctx(m, &aq_rsp.prof);
2303 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2305 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2307 struct nix_hw *nix_hw = m->private;
2308 struct nix_ipolicer *ipolicer;
2312 /* Ingress policers do not exist on all platforms */
2313 if (!nix_hw->ipolicer)
2316 seq_puts(m, "\nBandwidth profile resource free count\n");
2317 seq_puts(m, "=====================================\n");
2318 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2319 if (layer == BAND_PROF_INVAL_LAYER)
2321 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2322 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2324 ipolicer = &nix_hw->ipolicer[layer];
2325 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
2326 ipolicer->band_prof.max,
2327 rvu_rsrc_free_count(&ipolicer->band_prof));
2329 seq_puts(m, "=====================================\n");
2334 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2336 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2338 struct nix_hw *nix_hw;
2340 if (!is_block_implemented(rvu->hw, blkaddr))
2343 if (blkaddr == BLKADDR_NIX0) {
2344 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2345 nix_hw = &rvu->hw->nix[0];
2347 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2349 nix_hw = &rvu->hw->nix[1];
2352 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2353 &rvu_dbg_nix_sq_ctx_fops);
2354 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2355 &rvu_dbg_nix_rq_ctx_fops);
2356 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2357 &rvu_dbg_nix_cq_ctx_fops);
2358 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2359 &rvu_dbg_nix_ndc_tx_cache_fops);
2360 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2361 &rvu_dbg_nix_ndc_rx_cache_fops);
2362 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2363 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2364 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2365 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2366 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2367 &rvu_dbg_nix_qsize_fops);
2368 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2369 &rvu_dbg_nix_band_prof_ctx_fops);
2370 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2371 &rvu_dbg_nix_band_prof_rsrc_fops);
2374 static void rvu_dbg_npa_init(struct rvu *rvu)
2376 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2378 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2379 &rvu_dbg_npa_qsize_fops);
2380 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2381 &rvu_dbg_npa_aura_ctx_fops);
2382 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2383 &rvu_dbg_npa_pool_ctx_fops);
2384 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2385 &rvu_dbg_npa_ndc_cache_fops);
2386 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2387 &rvu_dbg_npa_ndc_hits_miss_fops);
2390 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
2393 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2394 NIX_STATS_RX, &(cnt)); \
2396 seq_printf(s, "%s: %llu\n", name, cnt); \
2400 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
2403 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2404 NIX_STATS_TX, &(cnt)); \
2406 seq_printf(s, "%s: %llu\n", name, cnt); \
2410 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2412 struct cgx_link_user_info linfo;
2413 struct mac_ops *mac_ops;
2414 void *cgxd = s->private;
2415 u64 ucast, mcast, bcast;
2416 int stat = 0, err = 0;
2417 u64 tx_stat, rx_stat;
2420 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2421 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2425 mac_ops = get_mac_ops(cgxd);
2426 /* There can be no CGX devices at all */
2431 seq_puts(s, "\n=======Link Status======\n\n");
2432 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2434 seq_puts(s, "Failed to read link status\n");
2435 seq_printf(s, "\nLink is %s %d Mbps\n\n",
2436 linfo.link_up ? "UP" : "DOWN", linfo.speed);
2439 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2441 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2444 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2447 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2450 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2451 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2454 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2457 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2462 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2464 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2467 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2470 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2473 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2474 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2477 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2482 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2483 while (stat < mac_ops->rx_stats_cnt) {
2484 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2487 if (is_rvu_otx2(rvu))
2488 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2491 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2498 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2499 while (stat < mac_ops->tx_stats_cnt) {
2500 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2504 if (is_rvu_otx2(rvu))
2505 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2508 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2516 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2518 struct dentry *current_dir;
2521 current_dir = filp->file->f_path.dentry->d_parent;
2522 buf = strrchr(current_dir->d_name.name, 'c');
2526 return kstrtoint(buf + 1, 10, lmac_id);
2529 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2533 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2535 return cgx_print_stats(filp, lmac_id);
2540 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2542 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2544 struct pci_dev *pdev = NULL;
2545 void *cgxd = s->private;
2546 char *bcast, *mcast;
2553 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2554 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2558 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2561 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2565 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2566 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2567 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2570 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
2571 seq_printf(s, "%s PF%d %9s %9s",
2572 dev_name(&pdev->dev), pf, bcast, mcast);
2573 if (cfg & CGX_DMAC_CAM_ACCEPT)
2574 seq_printf(s, "%12s\n\n", "UNICAST");
2576 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2578 seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
2580 for (index = 0 ; index < 32 ; index++) {
2581 cfg = cgx_read_dmac_entry(cgxd, index);
2582 /* Display enabled dmac entries associated with current lmac */
2583 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2584 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2585 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2586 u64_to_ether_addr(mac, dmac);
2587 seq_printf(s, "%7d %pM\n", index, dmac);
2595 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2599 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2601 return cgx_print_dmac_flt(filp, lmac_id);
2606 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2608 static void rvu_dbg_cgx_init(struct rvu *rvu)
2610 struct mac_ops *mac_ops;
2611 unsigned long lmac_bmap;
2616 if (!cgx_get_cgxcnt_max())
2619 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2623 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2626 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2627 cgx = rvu_cgx_pdata(i, rvu);
2630 lmac_bmap = cgx_get_lmac_bmap(cgx);
2631 /* cgx debugfs dir */
2632 sprintf(dname, "%s%d", mac_ops->name, i);
2633 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2634 rvu->rvu_dbg.cgx_root);
2636 for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
2637 /* lmac debugfs dir */
2638 sprintf(dname, "lmac%d", lmac_id);
2640 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2642 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2643 cgx, &rvu_dbg_cgx_stat_fops);
2644 debugfs_create_file("mac_filter", 0600,
2645 rvu->rvu_dbg.lmac, cgx,
2646 &rvu_dbg_cgx_dmac_flt_fops);
2651 /* NPC debugfs APIs */
2652 static void rvu_print_npc_mcam_info(struct seq_file *s,
2653 u16 pcifunc, int blkaddr)
2655 struct rvu *rvu = s->private;
2656 int entry_acnt, entry_ecnt;
2657 int cntr_acnt, cntr_ecnt;
2659 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2660 &entry_acnt, &entry_ecnt);
2661 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2662 &cntr_acnt, &cntr_ecnt);
2663 if (!entry_acnt && !cntr_acnt)
2666 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2667 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2668 rvu_get_pf(pcifunc));
2670 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2671 rvu_get_pf(pcifunc),
2672 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2675 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2676 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2679 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2680 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2684 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2686 struct rvu *rvu = filp->private;
2687 int pf, vf, numvfs, blkaddr;
2688 struct npc_mcam *mcam;
2689 u16 pcifunc, counters;
2692 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2696 mcam = &rvu->hw->mcam;
2697 counters = rvu->hw->npc_counters;
2699 seq_puts(filp, "\nNPC MCAM info:\n");
2700 /* MCAM keywidth on receive and transmit sides */
2701 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2702 cfg = (cfg >> 32) & 0x07;
2703 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2704 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2705 "224bits" : "448bits"));
2706 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2707 cfg = (cfg >> 32) & 0x07;
2708 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2709 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2710 "224bits" : "448bits"));
2712 mutex_lock(&mcam->lock);
2714 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2715 seq_printf(filp, "\t\t Reserved \t: %d\n",
2716 mcam->total_entries - mcam->bmap_entries);
2717 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2720 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2721 seq_printf(filp, "\t\t Reserved \t: %d\n",
2722 counters - mcam->counters.max);
2723 seq_printf(filp, "\t\t Available \t: %d\n",
2724 rvu_rsrc_free_count(&mcam->counters));
2726 if (mcam->bmap_entries == mcam->bmap_fcnt) {
2727 mutex_unlock(&mcam->lock);
2731 seq_puts(filp, "\n\t\t Current allocation\n");
2732 seq_puts(filp, "\t\t====================\n");
2733 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2734 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2735 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2737 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2738 numvfs = (cfg >> 12) & 0xFF;
2739 for (vf = 0; vf < numvfs; vf++) {
2740 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2741 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2745 mutex_unlock(&mcam->lock);
2749 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2751 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2754 struct rvu *rvu = filp->private;
2755 struct npc_mcam *mcam;
2758 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2762 mcam = &rvu->hw->mcam;
2764 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2765 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2766 rvu_read64(rvu, blkaddr,
2767 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2772 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2774 #define RVU_DBG_PRINT_MPLS_TTL(pkt, mask) \
2776 seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt)); \
2777 seq_printf(s, "mask 0x%lx\n", \
2778 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask)); \
2781 #define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask) \
2783 typeof(_pkt) (pkt) = (_pkt); \
2784 typeof(_mask) (mask) = (_mask); \
2785 seq_printf(s, "%ld %ld %ld\n", \
2786 FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt), \
2787 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt), \
2788 FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt)); \
2789 seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n", \
2790 FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask), \
2791 FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask), \
2792 FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask)); \
2795 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2796 struct rvu_npc_mcam_rule *rule)
2800 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2801 seq_printf(s, "\t%s ", npc_get_field_name(bit));
2804 if (rule->lxmb == 1)
2805 seq_puts(s, "\tL2M nibble is set\n");
2807 seq_puts(s, "\tL2B nibble is set\n");
2810 seq_printf(s, "%pM ", rule->packet.dmac);
2811 seq_printf(s, "mask %pM\n", rule->mask.dmac);
2814 seq_printf(s, "%pM ", rule->packet.smac);
2815 seq_printf(s, "mask %pM\n", rule->mask.smac);
2818 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2819 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2822 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2823 seq_printf(s, "mask 0x%x\n",
2824 ntohs(rule->mask.vlan_tci));
2827 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
2828 seq_printf(s, "mask 0x%x\n",
2829 ntohs(rule->mask.vlan_itci));
2832 seq_printf(s, "%d ", rule->packet.tos);
2833 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2836 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2837 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2840 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2841 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2844 seq_printf(s, "%pI6 ", rule->packet.ip6src);
2845 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2848 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2849 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2851 case NPC_IPFRAG_IPV6:
2852 seq_printf(s, "0x%x ", rule->packet.next_header);
2853 seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
2855 case NPC_IPFRAG_IPV4:
2856 seq_printf(s, "0x%x ", rule->packet.ip_flag);
2857 seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
2861 case NPC_SPORT_SCTP:
2862 seq_printf(s, "%d ", ntohs(rule->packet.sport));
2863 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2867 case NPC_DPORT_SCTP:
2868 seq_printf(s, "%d ", ntohs(rule->packet.dport));
2869 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2872 seq_printf(s, "%d ", rule->packet.tcp_flags);
2873 seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
2876 seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
2877 seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
2879 case NPC_MPLS1_LBTCBOS:
2880 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
2881 rule->mask.mpls_lse[0]);
2884 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
2885 rule->mask.mpls_lse[0]);
2887 case NPC_MPLS2_LBTCBOS:
2888 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
2889 rule->mask.mpls_lse[1]);
2892 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
2893 rule->mask.mpls_lse[1]);
2895 case NPC_MPLS3_LBTCBOS:
2896 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
2897 rule->mask.mpls_lse[2]);
2900 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
2901 rule->mask.mpls_lse[2]);
2903 case NPC_MPLS4_LBTCBOS:
2904 RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
2905 rule->mask.mpls_lse[3]);
2908 RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
2909 rule->mask.mpls_lse[3]);
2912 seq_printf(s, "%d ", rule->packet.icmp_type);
2913 seq_printf(s, "mask 0x%x\n", rule->mask.icmp_type);
2916 seq_printf(s, "%d ", rule->packet.icmp_code);
2917 seq_printf(s, "mask 0x%x\n", rule->mask.icmp_code);
2926 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2927 struct rvu_npc_mcam_rule *rule)
2929 if (is_npc_intf_tx(rule->intf)) {
2930 switch (rule->tx_action.op) {
2931 case NIX_TX_ACTIONOP_DROP:
2932 seq_puts(s, "\taction: Drop\n");
2934 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2935 seq_puts(s, "\taction: Unicast to default channel\n");
2937 case NIX_TX_ACTIONOP_UCAST_CHAN:
2938 seq_printf(s, "\taction: Unicast to channel %d\n",
2939 rule->tx_action.index);
2941 case NIX_TX_ACTIONOP_MCAST:
2942 seq_puts(s, "\taction: Multicast\n");
2944 case NIX_TX_ACTIONOP_DROP_VIOL:
2945 seq_puts(s, "\taction: Lockdown Violation Drop\n");
2951 switch (rule->rx_action.op) {
2952 case NIX_RX_ACTIONOP_DROP:
2953 seq_puts(s, "\taction: Drop\n");
2955 case NIX_RX_ACTIONOP_UCAST:
2956 seq_printf(s, "\taction: Direct to queue %d\n",
2957 rule->rx_action.index);
2959 case NIX_RX_ACTIONOP_RSS:
2960 seq_puts(s, "\taction: RSS\n");
2962 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2963 seq_puts(s, "\taction: Unicast ipsec\n");
2965 case NIX_RX_ACTIONOP_MCAST:
2966 seq_puts(s, "\taction: Multicast\n");
2974 static const char *rvu_dbg_get_intf_name(int intf)
2977 case NIX_INTFX_RX(0):
2979 case NIX_INTFX_RX(1):
2981 case NIX_INTFX_TX(0):
2983 case NIX_INTFX_TX(1):
2992 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2994 struct rvu_npc_mcam_rule *iter;
2995 struct rvu *rvu = s->private;
2996 struct npc_mcam *mcam;
3003 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3007 mcam = &rvu->hw->mcam;
3009 mutex_lock(&mcam->lock);
3010 list_for_each_entry(iter, &mcam->mcam_rules, list) {
3011 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3012 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
3014 if (iter->owner & RVU_PFVF_FUNC_MASK) {
3015 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
3016 seq_printf(s, "VF%d", vf);
3020 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
3022 seq_printf(s, "\tinterface: %s\n",
3023 rvu_dbg_get_intf_name(iter->intf));
3024 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
3026 rvu_dbg_npc_mcam_show_flows(s, iter);
3027 if (is_npc_intf_rx(iter->intf)) {
3028 target = iter->rx_action.pf_func;
3029 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
3030 seq_printf(s, "\tForward to: PF%d ", pf);
3032 if (target & RVU_PFVF_FUNC_MASK) {
3033 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
3034 seq_printf(s, "VF%d", vf);
3037 seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
3038 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
3041 rvu_dbg_npc_mcam_show_action(s, iter);
3043 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
3044 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
3046 if (!iter->has_cntr)
3048 seq_printf(s, "\tcounter: %d\n", iter->cntr);
3050 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
3051 seq_printf(s, "\thits: %lld\n", hits);
3053 mutex_unlock(&mcam->lock);
3058 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
3060 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
3062 struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
3063 struct npc_exact_table_entry *cam_entry;
3064 struct npc_exact_table *table;
3065 struct rvu *rvu = s->private;
3070 table = rvu->hw->table;
3072 mutex_lock(&table->lock);
3074 /* Check if there is at least one entry in mem table */
3075 if (!table->mem_tbl_entry_cnt)
3076 goto dump_cam_table;
3078 /* Print table headers */
3079 seq_puts(s, "\n\tExact Match MEM Table\n");
3080 seq_puts(s, "Index\t");
3082 for (i = 0; i < table->mem_table.ways; i++) {
3083 mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3084 struct npc_exact_table_entry, list);
3086 seq_printf(s, "Way-%d\t\t\t\t\t", i);
3090 for (i = 0; i < table->mem_table.ways; i++)
3091 seq_puts(s, "\tChan MAC \t");
3093 seq_puts(s, "\n\n");
3095 /* Print mem table entries */
3096 for (i = 0; i < table->mem_table.depth; i++) {
3098 for (j = 0; j < table->mem_table.ways; j++) {
3102 if (mem_entry[j]->index != i)
3108 /* No valid entries */
3112 seq_printf(s, "%d\t", i);
3113 for (j = 0; j < table->mem_table.ways; j++) {
3114 if (!(bitmap & BIT(j))) {
3115 seq_puts(s, "nil\t\t\t\t\t");
3119 seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3121 mem_entry[j] = list_next_entry(mem_entry[j], list);
3128 if (!table->cam_tbl_entry_cnt)
3131 seq_puts(s, "\n\tExact Match CAM Table\n");
3132 seq_puts(s, "index\tchan\tMAC\n");
3134 /* Traverse cam table entries */
3135 list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3136 seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3141 mutex_unlock(&table->lock);
3145 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3147 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3149 struct npc_exact_table *table;
3150 struct rvu *rvu = s->private;
3153 table = rvu->hw->table;
3155 seq_puts(s, "\n\tExact Table Info\n");
3156 seq_printf(s, "Exact Match Feature : %s\n",
3157 rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3158 if (!rvu->hw->cap.npc_exact_match_enabled)
3161 seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3162 for (i = 0; i < table->num_drop_rules; i++)
3163 seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3165 seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3166 for (i = 0; i < table->num_drop_rules; i++)
3167 seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3169 seq_puts(s, "\n\tMEM Table Info\n");
3170 seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3171 seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3172 seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3173 seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3174 seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3176 seq_puts(s, "\n\tCAM Table Info\n");
3177 seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3182 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3184 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3186 struct npc_exact_table *table;
3187 struct rvu *rvu = s->private;
3188 struct npc_key_field *field;
3194 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3195 table = rvu->hw->table;
3197 field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3199 seq_puts(s, "\n\t Exact Hit on drop status\n");
3200 seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3202 for (i = 0; i < table->num_drop_rules; i++) {
3203 pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3204 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3206 /* channel will be always in keyword 0 */
3207 cam1 = rvu_read64(rvu, blkaddr,
3208 NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3209 chan = field->kw_mask[0] & cam1;
3211 str = (cfg & 1) ? "enabled" : "disabled";
3213 seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3214 rvu_read64(rvu, blkaddr,
3215 NPC_AF_MATCH_STATX(table->counter_idx[i])),
3222 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3224 static void rvu_dbg_npc_init(struct rvu *rvu)
3226 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3228 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3229 &rvu_dbg_npc_mcam_info_fops);
3230 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3231 &rvu_dbg_npc_mcam_rules_fops);
3233 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3234 &rvu_dbg_npc_rx_miss_act_fops);
3236 if (!rvu->hw->cap.npc_exact_match_enabled)
3239 debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3240 &rvu_dbg_npc_exact_entries_fops);
3242 debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3243 &rvu_dbg_npc_exact_info_fops);
3245 debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3246 &rvu_dbg_npc_exact_drop_cnt_fops);
3250 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3252 struct cpt_ctx *ctx = filp->private;
3253 u64 busy_sts = 0, free_sts = 0;
3254 u32 e_min = 0, e_max = 0, e, i;
3255 u16 max_ses, max_ies, max_aes;
3256 struct rvu *rvu = ctx->rvu;
3257 int blkaddr = ctx->blkaddr;
3260 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3261 max_ses = reg & 0xffff;
3262 max_ies = (reg >> 16) & 0xffff;
3263 max_aes = (reg >> 32) & 0xffff;
3267 e_min = max_ses + max_ies;
3268 e_max = max_ses + max_ies + max_aes;
3276 e_max = max_ses + max_ies;
3282 for (e = e_min, i = 0; e < e_max; e++, i++) {
3283 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3285 busy_sts |= 1ULL << i;
3288 free_sts |= 1ULL << i;
3290 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3291 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3296 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3298 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3301 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3303 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3305 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3308 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3310 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3312 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3315 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3317 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3319 struct cpt_ctx *ctx = filp->private;
3320 u16 max_ses, max_ies, max_aes;
3321 struct rvu *rvu = ctx->rvu;
3322 int blkaddr = ctx->blkaddr;
3326 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3327 max_ses = reg & 0xffff;
3328 max_ies = (reg >> 16) & 0xffff;
3329 max_aes = (reg >> 32) & 0xffff;
3331 e_max = max_ses + max_ies + max_aes;
3333 seq_puts(filp, "===========================================\n");
3334 for (e = 0; e < e_max; e++) {
3335 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3336 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
3338 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3339 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
3341 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3342 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
3344 seq_puts(filp, "===========================================\n");
3349 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3351 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3353 struct cpt_ctx *ctx = filp->private;
3354 int blkaddr = ctx->blkaddr;
3355 struct rvu *rvu = ctx->rvu;
3356 struct rvu_block *block;
3357 struct rvu_hwinfo *hw;
3362 block = &hw->block[blkaddr];
3363 if (!block->lf.bmap)
3366 seq_puts(filp, "===========================================\n");
3367 for (lf = 0; lf < block->lf.max; lf++) {
3368 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3369 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
3370 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3371 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
3372 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3373 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
3374 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3375 (lf << block->lfshift));
3376 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
3377 seq_puts(filp, "===========================================\n");
3382 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3384 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3386 struct cpt_ctx *ctx = filp->private;
3387 struct rvu *rvu = ctx->rvu;
3388 int blkaddr = ctx->blkaddr;
3391 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3392 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3393 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
3394 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3395 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3396 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
3397 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3398 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
3399 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3400 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
3401 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3402 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
3403 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3404 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
3409 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3411 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3413 struct cpt_ctx *ctx = filp->private;
3414 struct rvu *rvu = ctx->rvu;
3415 int blkaddr = ctx->blkaddr;
3418 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3419 seq_printf(filp, "CPT instruction requests %llu\n", reg);
3420 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3421 seq_printf(filp, "CPT instruction latency %llu\n", reg);
3422 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3423 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
3424 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3425 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
3426 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3427 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
3428 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3429 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
3430 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3431 seq_printf(filp, "CPT clock count pc %llu\n", reg);
3436 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3438 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3440 struct cpt_ctx *ctx;
3442 if (!is_block_implemented(rvu->hw, blkaddr))
3445 if (blkaddr == BLKADDR_CPT0) {
3446 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3447 ctx = &rvu->rvu_dbg.cpt_ctx[0];
3448 ctx->blkaddr = BLKADDR_CPT0;
3451 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3453 ctx = &rvu->rvu_dbg.cpt_ctx[1];
3454 ctx->blkaddr = BLKADDR_CPT1;
3458 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3459 &rvu_dbg_cpt_pc_fops);
3460 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3461 &rvu_dbg_cpt_ae_sts_fops);
3462 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3463 &rvu_dbg_cpt_se_sts_fops);
3464 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3465 &rvu_dbg_cpt_ie_sts_fops);
3466 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3467 &rvu_dbg_cpt_engines_info_fops);
3468 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3469 &rvu_dbg_cpt_lfs_info_fops);
3470 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3471 &rvu_dbg_cpt_err_info_fops);
3474 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3476 if (!is_rvu_otx2(rvu))
3482 void rvu_dbg_init(struct rvu *rvu)
3484 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3486 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3487 &rvu_dbg_rsrc_status_fops);
3489 if (!is_rvu_otx2(rvu))
3490 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3491 rvu, &rvu_dbg_lmtst_map_table_fops);
3493 if (!cgx_get_cgxcnt_max())
3496 if (is_rvu_otx2(rvu))
3497 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3498 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3500 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3501 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3504 rvu_dbg_npa_init(rvu);
3505 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3507 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3508 rvu_dbg_cgx_init(rvu);
3509 rvu_dbg_npc_init(rvu);
3510 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3511 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3512 rvu_dbg_mcs_init(rvu);
3515 void rvu_dbg_exit(struct rvu *rvu)
3517 debugfs_remove_recursive(rvu->rvu_dbg.root);
3520 #endif /* CONFIG_DEBUG_FS */