net/mlx5e: Add local loopback counter to vport stats
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_stats.c
CommitLineData
c0752f2b
KH
1/*
2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
69c1280b 33#include "lib/mlx5.h"
c0752f2b 34#include "en.h"
943aa7bd 35#include "en_accel/ktls.h"
0aab3e1b 36#include "en_accel/en_accel.h"
dd1979cf 37#include "en/ptp.h"
0a1498eb 38#include "en/port.h"
c0752f2b 39
cc10e84b
JD
40#ifdef CONFIG_PAGE_POOL_STATS
41#include <net/page_pool.h>
42#endif
43
3460c184
SM
44static unsigned int stats_grps_num(struct mlx5e_priv *priv)
45{
46 return !priv->profile->stats_grps_num ? 0 :
47 priv->profile->stats_grps_num(priv);
48}
49
50unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
51{
f0ff8e8c 52 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
3460c184
SM
53 const unsigned int num_stats_grps = stats_grps_num(priv);
54 unsigned int total = 0;
55 int i;
56
57 for (i = 0; i < num_stats_grps; i++)
f0ff8e8c 58 total += stats_grps[i]->get_num_stats(priv);
3460c184
SM
59
60 return total;
61}
62
b521105b
AH
63void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
64{
65 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
66 const unsigned int num_stats_grps = stats_grps_num(priv);
67 int i;
68
69 for (i = num_stats_grps - 1; i >= 0; i--)
70 if (stats_grps[i]->update_stats &&
71 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
72 stats_grps[i]->update_stats(priv);
73}
74
3460c184
SM
75void mlx5e_stats_update(struct mlx5e_priv *priv)
76{
f0ff8e8c 77 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
3460c184
SM
78 const unsigned int num_stats_grps = stats_grps_num(priv);
79 int i;
80
81 for (i = num_stats_grps - 1; i >= 0; i--)
f0ff8e8c
SM
82 if (stats_grps[i]->update_stats)
83 stats_grps[i]->update_stats(priv);
3460c184
SM
84}
85
86void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
87{
f0ff8e8c 88 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
3460c184
SM
89 const unsigned int num_stats_grps = stats_grps_num(priv);
90 int i;
91
92 for (i = 0; i < num_stats_grps; i++)
f0ff8e8c 93 idx = stats_grps[i]->fill_stats(priv, data, idx);
3460c184
SM
94}
95
96void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
97{
f0ff8e8c 98 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
3460c184
SM
99 const unsigned int num_stats_grps = stats_grps_num(priv);
100 int i, idx = 0;
101
102 for (i = 0; i < num_stats_grps; i++)
f0ff8e8c 103 idx = stats_grps[i]->fill_strings(priv, data, idx);
3460c184
SM
104}
105
106/* Concrete NIC Stats */
107
c0752f2b
KH
108static const struct counter_desc sw_stats_desc[] = {
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
f24686e8 117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
2ad9ecdb 118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
5af75c74
MM
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
bf239741
IL
121
122#ifdef CONFIG_MLX5_EN_TLS
d2ead1f3
TT
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
bf239741 125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
46a3ea98
TT
126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
bf239741 128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
46a3ea98 129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
d2ead1f3
TT
130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
bf239741
IL
132#endif
133
c0752f2b
KH
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
def09e7b
KM
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
f007c13d 141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
f24686e8 142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
c0752f2b
KH
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
0aa1d186
SM
146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
c0752f2b
KH
148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
86690b4b 150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
890388ad 151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
73cab880 152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
c2273219 153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
6c085a8a 154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
c0752f2b 155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
890388ad
TT
156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
c0752f2b
KH
158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
c0752f2b
KH
162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
db75373c 164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
86155656 165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
f65a59ff
TT
166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
58b99ee3 168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
73cab880 169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
c2273219 170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
6c085a8a 171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
58b99ee3
TT
172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
c0752f2b 175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
b71ba6b4
TT
176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
0073c8f7 178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
c0752f2b
KH
179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
dc983f0e 182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
94563847 183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
be5323c8 184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
cc10e84b
JD
185#ifdef CONFIG_PAGE_POOL_STATS
186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
197#endif
76c1e1ac
TT
198#ifdef CONFIG_MLX5_EN_TLS
199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
76c1e1ac
TT
201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
202 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
203 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
e9ce991b 206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
76c1e1ac
TT
207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
209#endif
a1bf74dc 210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
2d7103c8
TT
211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
db05815b 214 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
57d689a8 215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
db05815b
MM
216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
224 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
225 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
226 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
227 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
228 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
229 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
230 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
231 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
232 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
233 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
234 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
235 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
236 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
237 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
238 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
239 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
240 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
c0752f2b
KH
241};
242
243#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
244
96b12796 245static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
c0752f2b
KH
246{
247 return NUM_SW_COUNTERS;
248}
249
96b12796 250static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
c0752f2b
KH
251{
252 int i;
253
254 for (i = 0; i < NUM_SW_COUNTERS; i++)
255 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
256 return idx;
257}
258
96b12796 259static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
c0752f2b
KH
260{
261 int i;
262
263 for (i = 0; i < NUM_SW_COUNTERS; i++)
264 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
265 return idx;
266}
267
1a7f5124
EBE
268static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
269 struct mlx5e_xdpsq_stats *xdpsq_red_stats)
270{
271 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
272 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
273 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
274 s->tx_xdp_nops += xdpsq_red_stats->nops;
275 s->tx_xdp_full += xdpsq_red_stats->full;
276 s->tx_xdp_err += xdpsq_red_stats->err;
277 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
278}
279
280static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
281 struct mlx5e_xdpsq_stats *xdpsq_stats)
282{
283 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
284 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
285 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
286 s->rx_xdp_tx_nops += xdpsq_stats->nops;
287 s->rx_xdp_tx_full += xdpsq_stats->full;
288 s->rx_xdp_tx_err += xdpsq_stats->err;
289 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
290}
291
292static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
293 struct mlx5e_xdpsq_stats *xsksq_stats)
294{
295 s->tx_xsk_xmit += xsksq_stats->xmit;
296 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
297 s->tx_xsk_inlnw += xsksq_stats->inlnw;
298 s->tx_xsk_full += xsksq_stats->full;
299 s->tx_xsk_err += xsksq_stats->err;
300 s->tx_xsk_cqes += xsksq_stats->cqes;
301}
302
303static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
304 struct mlx5e_rq_stats *xskrq_stats)
305{
306 s->rx_xsk_packets += xskrq_stats->packets;
307 s->rx_xsk_bytes += xskrq_stats->bytes;
308 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
309 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
310 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
311 s->rx_xsk_csum_none += xskrq_stats->csum_none;
312 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
313 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
314 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
315 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
316 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
317 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
318 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
319 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
320 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
321 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
322 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
323 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
324 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
325}
326
327static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
328 struct mlx5e_rq_stats *rq_stats)
329{
330 s->rx_packets += rq_stats->packets;
331 s->rx_bytes += rq_stats->bytes;
332 s->rx_lro_packets += rq_stats->lro_packets;
333 s->rx_lro_bytes += rq_stats->lro_bytes;
def09e7b
KM
334 s->rx_gro_packets += rq_stats->gro_packets;
335 s->rx_gro_bytes += rq_stats->gro_bytes;
336 s->rx_gro_skbs += rq_stats->gro_skbs;
337 s->rx_gro_match_packets += rq_stats->gro_match_packets;
338 s->rx_gro_large_hds += rq_stats->gro_large_hds;
1a7f5124
EBE
339 s->rx_ecn_mark += rq_stats->ecn_mark;
340 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
341 s->rx_csum_none += rq_stats->csum_none;
342 s->rx_csum_complete += rq_stats->csum_complete;
343 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
344 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
345 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
346 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
347 s->rx_xdp_drop += rq_stats->xdp_drop;
348 s->rx_xdp_redirect += rq_stats->xdp_redirect;
349 s->rx_wqe_err += rq_stats->wqe_err;
350 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
351 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
352 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
353 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
354 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
355 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
1a7f5124
EBE
356 s->rx_congst_umr += rq_stats->congst_umr;
357 s->rx_arfs_err += rq_stats->arfs_err;
358 s->rx_recover += rq_stats->recover;
cc10e84b
JD
359#ifdef CONFIG_PAGE_POOL_STATS
360 s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
361 s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
362 s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
363 s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill;
364 s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive;
365 s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order;
366 s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached;
367 s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full;
368 s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
369 s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
370 s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
371#endif
1a7f5124
EBE
372#ifdef CONFIG_MLX5_EN_TLS
373 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
374 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
1a7f5124
EBE
375 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
376 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
377 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
378 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
379 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
e9ce991b 380 s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry;
1a7f5124
EBE
381 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
382 s->rx_tls_err += rq_stats->tls_err;
383#endif
384}
385
386static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
387 struct mlx5e_ch_stats *ch_stats)
388{
389 s->ch_events += ch_stats->events;
390 s->ch_poll += ch_stats->poll;
391 s->ch_arm += ch_stats->arm;
392 s->ch_aff_change += ch_stats->aff_change;
393 s->ch_force_irq += ch_stats->force_irq;
394 s->ch_eq_rearm += ch_stats->eq_rearm;
395}
396
397static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
398 struct mlx5e_sq_stats *sq_stats)
399{
400 s->tx_packets += sq_stats->packets;
401 s->tx_bytes += sq_stats->bytes;
402 s->tx_tso_packets += sq_stats->tso_packets;
403 s->tx_tso_bytes += sq_stats->tso_bytes;
404 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
405 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
406 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
407 s->tx_nop += sq_stats->nop;
408 s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
409 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
410 s->tx_queue_stopped += sq_stats->stopped;
411 s->tx_queue_wake += sq_stats->wake;
412 s->tx_queue_dropped += sq_stats->dropped;
413 s->tx_cqe_err += sq_stats->cqe_err;
414 s->tx_recover += sq_stats->recover;
415 s->tx_xmit_more += sq_stats->xmit_more;
416 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
417 s->tx_csum_none += sq_stats->csum_none;
418 s->tx_csum_partial += sq_stats->csum_partial;
419#ifdef CONFIG_MLX5_EN_TLS
420 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
421 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
1a7f5124
EBE
422 s->tx_tls_ooo += sq_stats->tls_ooo;
423 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
424 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
425 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
426 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
427 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
428 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
429#endif
430 s->tx_cqes += sq_stats->cqes;
431}
432
145e5637
EBE
433static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
434 struct mlx5e_sw_stats *s)
435{
436 int i;
437
a28359e9 438 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
145e5637
EBE
439 return;
440
b0d35de4 441 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
145e5637 442
a28359e9
AL
443 if (priv->tx_ptp_opened) {
444 for (i = 0; i < priv->max_opened_tc; i++) {
445 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
446
447 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
448 barrier();
449 }
450 }
451 if (priv->rx_ptp_opened) {
452 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
145e5637
EBE
453
454 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
455 barrier();
456 }
457}
458
214baf22
MM
459static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
460 struct mlx5e_sw_stats *s)
461{
462 struct mlx5e_sq_stats **stats;
463 u16 max_qos_sqs;
464 int i;
465
466 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
db83f24d
MT
467 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
468 stats = READ_ONCE(priv->htb_qos_sq_stats);
214baf22
MM
469
470 for (i = 0; i < max_qos_sqs; i++) {
471 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
472
473 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
474 barrier();
475 }
476}
477
cc10e84b
JD
478#ifdef CONFIG_PAGE_POOL_STATS
479static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
480{
481 struct mlx5e_rq_stats *rq_stats = c->rq.stats;
482 struct page_pool *pool = c->rq.page_pool;
483 struct page_pool_stats stats = { 0 };
484
485 if (!page_pool_get_stats(pool, &stats))
486 return;
487
488 rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
489 rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
490 rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
491 rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
492 rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
493 rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
494
495 rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
496 rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
497 rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
498 rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
499 rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
500}
501#else
502static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
503{
504}
505#endif
506
96b12796 507static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
19386177 508{
9659e49a 509 struct mlx5e_sw_stats *s = &priv->stats.sw;
05909bab 510 int i;
19386177
KH
511
512 memset(s, 0, sizeof(*s));
19386177 513
8772cc49
SM
514 for (i = 0; i < priv->channels.num; i++) /* for active channels only */
515 mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
516
9d758d4a 517 for (i = 0; i < priv->stats_nch; i++) {
05909bab 518 struct mlx5e_channel_stats *channel_stats =
be98737a 519 priv->channel_stats[i];
cc10e84b 520
05909bab 521 int j;
19386177 522
1a7f5124
EBE
523 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
524 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
525 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
58b99ee3 526 /* xdp redirect */
1a7f5124 527 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
db05815b 528 /* AF_XDP zero-copy */
1a7f5124
EBE
529 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
530 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
19386177 531
05909bab 532 for (j = 0; j < priv->max_opened_tc; j++) {
1a7f5124 533 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
42ae1a5c
AB
534
535 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
536 barrier();
19386177
KH
537 }
538 }
145e5637 539 mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
214baf22 540 mlx5e_stats_grp_sw_update_stats_qos(priv, s);
19386177
KH
541}
542
fd8dcdb8
KH
543static const struct counter_desc q_stats_desc[] = {
544 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
545};
546
7cbaf9a3
MS
547static const struct counter_desc drop_rq_stats_desc[] = {
548 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
549};
550
fd8dcdb8 551#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
7cbaf9a3 552#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
fd8dcdb8 553
96b12796 554static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
fd8dcdb8 555{
7cbaf9a3
MS
556 int num_stats = 0;
557
558 if (priv->q_counter)
559 num_stats += NUM_Q_COUNTERS;
560
561 if (priv->drop_rq_q_counter)
562 num_stats += NUM_DROP_RQ_COUNTERS;
563
564 return num_stats;
fd8dcdb8
KH
565}
566
96b12796 567static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
fd8dcdb8
KH
568{
569 int i;
570
571 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
7cbaf9a3
MS
572 strcpy(data + (idx++) * ETH_GSTRING_LEN,
573 q_stats_desc[i].format);
574
575 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
576 strcpy(data + (idx++) * ETH_GSTRING_LEN,
577 drop_rq_stats_desc[i].format);
578
fd8dcdb8
KH
579 return idx;
580}
581
96b12796 582static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
fd8dcdb8
KH
583{
584 int i;
585
586 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
7cbaf9a3
MS
587 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
588 q_stats_desc, i);
589 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
590 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
591 drop_rq_stats_desc, i);
fd8dcdb8
KH
592 return idx;
593}
594
96b12796 595static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
19386177
KH
596{
597 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
66247fbb
LR
598 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
599 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
600 int ret;
601
602 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
603
604 if (priv->q_counter) {
605 MLX5_SET(query_q_counter_in, in, counter_set_id,
606 priv->q_counter);
607 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
608 if (!ret)
609 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
610 out, out_of_buffer);
611 }
612
613 if (priv->drop_rq_q_counter) {
614 MLX5_SET(query_q_counter_in, in, counter_set_id,
615 priv->drop_rq_q_counter);
616 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
617 if (!ret)
618 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
619 out, out_of_buffer);
620 }
19386177
KH
621}
622
5c298143 623#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
0cfafd4b 624static const struct counter_desc vnic_env_stats_steer_desc[] = {
5c298143
MS
625 { "rx_steer_missed_packets",
626 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
627};
628
0cfafd4b
MS
629static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
630 { "dev_internal_queue_oob",
631 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
632};
633
16ab85e7
GP
634static const struct counter_desc vnic_env_stats_drop_desc[] = {
635 { "rx_oversize_pkts_buffer",
636 VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
637};
638
0cfafd4b
MS
639#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
640 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
641 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
642#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
643 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
644 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
16ab85e7
GP
645#define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
646 (MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
647 ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
5c298143 648
96b12796 649static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
5c298143 650{
0cfafd4b 651 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
16ab85e7
GP
652 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
653 NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
5c298143
MS
654}
655
96b12796 656static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
5c298143
MS
657{
658 int i;
659
0cfafd4b
MS
660 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
661 strcpy(data + (idx++) * ETH_GSTRING_LEN,
662 vnic_env_stats_steer_desc[i].format);
5c298143 663
0cfafd4b 664 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
5c298143 665 strcpy(data + (idx++) * ETH_GSTRING_LEN,
0cfafd4b 666 vnic_env_stats_dev_oob_desc[i].format);
16ab85e7
GP
667
668 for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
669 strcpy(data + (idx++) * ETH_GSTRING_LEN,
670 vnic_env_stats_drop_desc[i].format);
671
5c298143
MS
672 return idx;
673}
674
96b12796 675static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
5c298143
MS
676{
677 int i;
678
0cfafd4b 679 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
5c298143 680 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
0cfafd4b
MS
681 vnic_env_stats_steer_desc, i);
682
683 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
684 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
685 vnic_env_stats_dev_oob_desc, i);
16ab85e7
GP
686
687 for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
688 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
689 vnic_env_stats_drop_desc, i);
690
5c298143
MS
691 return idx;
692}
693
96b12796 694static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
5c298143
MS
695{
696 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
a184cda1 697 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
5c298143
MS
698 struct mlx5_core_dev *mdev = priv->mdev;
699
452133dd 700 if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
5c298143
MS
701 return;
702
a184cda1
LR
703 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
704 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
5c298143
MS
705}
706
40cab9f1
KH
707#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
708static const struct counter_desc vport_stats_desc[] = {
709 { "rx_vport_unicast_packets",
710 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
711 { "rx_vport_unicast_bytes",
712 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
713 { "tx_vport_unicast_packets",
714 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
715 { "tx_vport_unicast_bytes",
716 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
717 { "rx_vport_multicast_packets",
718 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
719 { "rx_vport_multicast_bytes",
720 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
721 { "tx_vport_multicast_packets",
722 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
723 { "tx_vport_multicast_bytes",
724 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
725 { "rx_vport_broadcast_packets",
726 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
727 { "rx_vport_broadcast_bytes",
728 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
729 { "tx_vport_broadcast_packets",
730 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
731 { "tx_vport_broadcast_bytes",
732 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
733 { "rx_vport_rdma_unicast_packets",
734 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
735 { "rx_vport_rdma_unicast_bytes",
736 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
737 { "tx_vport_rdma_unicast_packets",
738 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
739 { "tx_vport_rdma_unicast_bytes",
740 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
741 { "rx_vport_rdma_multicast_packets",
742 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
743 { "rx_vport_rdma_multicast_bytes",
744 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
745 { "tx_vport_rdma_multicast_packets",
746 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
747 { "tx_vport_rdma_multicast_bytes",
748 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
749};
750
c8013a1f
OHT
751static const struct counter_desc vport_loopback_stats_desc[] = {
752 { "vport_loopback_packets",
753 VPORT_COUNTER_OFF(local_loopback.packets) },
754 { "vport_loopback_bytes",
755 VPORT_COUNTER_OFF(local_loopback.octets) },
756};
757
40cab9f1 758#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
c8013a1f
OHT
759#define NUM_VPORT_LOOPBACK_COUNTERS(dev) \
760 (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
761 ARRAY_SIZE(vport_loopback_stats_desc) : 0)
40cab9f1 762
96b12796 763static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
40cab9f1 764{
c8013a1f
OHT
765 return NUM_VPORT_COUNTERS +
766 NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev);
40cab9f1
KH
767}
768
96b12796 769static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
40cab9f1
KH
770{
771 int i;
772
773 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
774 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
c8013a1f
OHT
775
776 for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
777 strcpy(data + (idx++) * ETH_GSTRING_LEN,
778 vport_loopback_stats_desc[i].format);
779
40cab9f1
KH
780 return idx;
781}
782
96b12796 783static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
40cab9f1
KH
784{
785 int i;
786
787 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
788 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
789 vport_stats_desc, i);
c8013a1f
OHT
790
791 for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
792 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
793 vport_loopback_stats_desc, i);
794
40cab9f1
KH
795 return idx;
796}
797
96b12796 798static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
19386177 799{
19386177 800 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
a184cda1 801 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
19386177
KH
802 struct mlx5_core_dev *mdev = priv->mdev;
803
804 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
a184cda1 805 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
19386177
KH
806}
807
6e6ef814
KH
808#define PPORT_802_3_OFF(c) \
809 MLX5_BYTE_OFF(ppcnt_reg, \
810 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
811static const struct counter_desc pport_802_3_stats_desc[] = {
812 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
813 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
814 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
815 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
816 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
817 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
818 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
819 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
820 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
821 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
822 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
823 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
824 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
825 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
826 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
827 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
828 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
829 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
830};
831
832#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
833
96b12796 834static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
6e6ef814
KH
835{
836 return NUM_PPORT_802_3_COUNTERS;
837}
838
96b12796 839static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
6e6ef814
KH
840{
841 int i;
842
843 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
844 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
845 return idx;
846}
847
96b12796 848static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
6e6ef814
KH
849{
850 int i;
851
852 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
853 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
854 pport_802_3_stats_desc, i);
855 return idx;
856}
857
75370eb0
ED
858#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
859 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
860
7c453526 861static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
19386177
KH
862{
863 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
864 struct mlx5_core_dev *mdev = priv->mdev;
865 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
866 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
867 void *out;
868
75370eb0
ED
869 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
870 return;
871
19386177
KH
872 MLX5_SET(ppcnt_reg, in, local_port, 1);
873 out = pstats->IEEE_802_3_counters;
874 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
875 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
876}
877
1703bb50 878#define MLX5E_READ_CTR64_BE_F(ptr, set, c) \
098d9ed9
JK
879 be64_to_cpu(*(__be64 *)((char *)ptr + \
880 MLX5_BYTE_OFF(ppcnt_reg, \
1703bb50 881 counter_set.set.c##_high)))
098d9ed9 882
b572ec9f
JK
883static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
884 u32 *ppcnt_ieee_802_3)
098d9ed9 885{
098d9ed9
JK
886 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
887 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
888
889 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
b572ec9f 890 return -EOPNOTSUPP;
098d9ed9
JK
891
892 MLX5_SET(ppcnt_reg, in, local_port, 1);
893 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
b572ec9f
JK
894 return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
895 sz, MLX5_REG_PPCNT, 0, 0);
896}
897
898void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
899 struct ethtool_pause_stats *pause_stats)
900{
901 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
902 struct mlx5_core_dev *mdev = priv->mdev;
903
904 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
905 return;
098d9ed9
JK
906
907 pause_stats->tx_pause_frames =
908 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
1703bb50 909 eth_802_3_cntrs_grp_data_layout,
098d9ed9
JK
910 a_pause_mac_ctrl_frames_transmitted);
911 pause_stats->rx_pause_frames =
912 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
1703bb50 913 eth_802_3_cntrs_grp_data_layout,
098d9ed9
JK
914 a_pause_mac_ctrl_frames_received);
915}
916
b572ec9f
JK
917void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
918 struct ethtool_eth_phy_stats *phy_stats)
919{
920 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
921 struct mlx5_core_dev *mdev = priv->mdev;
922
923 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
924 return;
925
926 phy_stats->SymbolErrorDuringCarrier =
927 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
928 eth_802_3_cntrs_grp_data_layout,
929 a_symbol_error_during_carrier);
930}
931
932void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
933 struct ethtool_eth_mac_stats *mac_stats)
934{
935 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
936 struct mlx5_core_dev *mdev = priv->mdev;
937
938 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
939 return;
940
941#define RD(name) \
942 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \
943 eth_802_3_cntrs_grp_data_layout, \
944 name)
945
946 mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok);
947 mac_stats->FramesReceivedOK = RD(a_frames_received_ok);
948 mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
949 mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok);
950 mac_stats->OctetsReceivedOK = RD(a_octets_received_ok);
951 mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
952 mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
953 mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
954 mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
955 mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors);
956 mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
957 mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors);
958#undef RD
959}
960
961void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
962 struct ethtool_eth_ctrl_stats *ctrl_stats)
963{
964 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
965 struct mlx5_core_dev *mdev = priv->mdev;
966
967 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
968 return;
969
970 ctrl_stats->MACControlFramesTransmitted =
971 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
972 eth_802_3_cntrs_grp_data_layout,
973 a_mac_control_frames_transmitted);
974 ctrl_stats->MACControlFramesReceived =
975 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
976 eth_802_3_cntrs_grp_data_layout,
977 a_mac_control_frames_received);
978 ctrl_stats->UnsupportedOpcodesReceived =
979 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
980 eth_802_3_cntrs_grp_data_layout,
981 a_unsupported_opcodes_received);
982}
983
fc8e64a3
KH
984#define PPORT_2863_OFF(c) \
985 MLX5_BYTE_OFF(ppcnt_reg, \
986 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
987static const struct counter_desc pport_2863_stats_desc[] = {
988 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
989 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
990 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
991};
992
993#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
994
96b12796 995static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
fc8e64a3
KH
996{
997 return NUM_PPORT_2863_COUNTERS;
998}
999
96b12796 1000static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
fc8e64a3
KH
1001{
1002 int i;
1003
1004 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1005 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
1006 return idx;
1007}
1008
96b12796 1009static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
fc8e64a3
KH
1010{
1011 int i;
1012
1013 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1014 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
1015 pport_2863_stats_desc, i);
1016 return idx;
1017}
1018
96b12796 1019static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
19386177
KH
1020{
1021 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1022 struct mlx5_core_dev *mdev = priv->mdev;
1023 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1024 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1025 void *out;
1026
1027 MLX5_SET(ppcnt_reg, in, local_port, 1);
1028 out = pstats->RFC_2863_counters;
1029 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
1030 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1031}
1032
e0e0def9
KH
1033#define PPORT_2819_OFF(c) \
1034 MLX5_BYTE_OFF(ppcnt_reg, \
1035 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
1036static const struct counter_desc pport_2819_stats_desc[] = {
1037 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
1038 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
1039 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
1040 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
1041 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
1042 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
1043 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
1044 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
1045 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
1046 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
1047 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
1048 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
1049 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
1050};
1051
1052#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
1053
96b12796 1054static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
e0e0def9
KH
1055{
1056 return NUM_PPORT_2819_COUNTERS;
1057}
1058
96b12796 1059static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
e0e0def9
KH
1060{
1061 int i;
1062
1063 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1064 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
1065 return idx;
1066}
1067
96b12796 1068static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
e0e0def9
KH
1069{
1070 int i;
1071
1072 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1073 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
1074 pport_2819_stats_desc, i);
1075 return idx;
1076}
1077
96b12796 1078static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
19386177
KH
1079{
1080 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1081 struct mlx5_core_dev *mdev = priv->mdev;
1082 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1083 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1084 void *out;
1085
75370eb0
ED
1086 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1087 return;
1088
19386177
KH
1089 MLX5_SET(ppcnt_reg, in, local_port, 1);
1090 out = pstats->RFC_2819_counters;
1091 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1092 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1093}
1094
b572ec9f
JK
1095static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1096 { 0, 64 },
1097 { 65, 127 },
1098 { 128, 255 },
1099 { 256, 511 },
1100 { 512, 1023 },
1101 { 1024, 1518 },
1102 { 1519, 2047 },
1103 { 2048, 4095 },
1104 { 4096, 8191 },
1105 { 8192, 10239 },
1106 {}
1107};
1108
1109void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1110 struct ethtool_rmon_stats *rmon,
1111 const struct ethtool_rmon_hist_range **ranges)
1112{
1113 u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1114 struct mlx5_core_dev *mdev = priv->mdev;
1115 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1116 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1117
1118 MLX5_SET(ppcnt_reg, in, local_port, 1);
1119 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1120 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1121 sz, MLX5_REG_PPCNT, 0, 0))
1122 return;
1123
1124#define RD(name) \
1125 MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \
1126 eth_2819_cntrs_grp_data_layout, \
1127 name)
1128
1129 rmon->undersize_pkts = RD(ether_stats_undersize_pkts);
1130 rmon->fragments = RD(ether_stats_fragments);
1131 rmon->jabbers = RD(ether_stats_jabbers);
1132
1133 rmon->hist[0] = RD(ether_stats_pkts64octets);
1134 rmon->hist[1] = RD(ether_stats_pkts65to127octets);
1135 rmon->hist[2] = RD(ether_stats_pkts128to255octets);
1136 rmon->hist[3] = RD(ether_stats_pkts256to511octets);
1137 rmon->hist[4] = RD(ether_stats_pkts512to1023octets);
1138 rmon->hist[5] = RD(ether_stats_pkts1024to1518octets);
1139 rmon->hist[6] = RD(ether_stats_pkts1519to2047octets);
1140 rmon->hist[7] = RD(ether_stats_pkts2048to4095octets);
1141 rmon->hist[8] = RD(ether_stats_pkts4096to8191octets);
1142 rmon->hist[9] = RD(ether_stats_pkts8192to10239octets);
1143#undef RD
1144
1145 *ranges = mlx5e_rmon_ranges;
1146}
1147
2e4df0b2
KH
1148#define PPORT_PHY_STATISTICAL_OFF(c) \
1149 MLX5_BYTE_OFF(ppcnt_reg, \
1150 counter_set.phys_layer_statistical_cntrs.c##_high)
1151static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1152 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1153 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1154};
1155
4cb4e98e
SA
1156static const struct counter_desc
1157pport_phy_statistical_err_lanes_stats_desc[] = {
1158 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1159 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1160 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1161 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1162};
1163
1164#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1165 ARRAY_SIZE(pport_phy_statistical_stats_desc)
1166#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1167 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
2e4df0b2 1168
96b12796 1169static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
2e4df0b2 1170{
4cb4e98e
SA
1171 struct mlx5_core_dev *mdev = priv->mdev;
1172 int num_stats;
1173
6ab75516 1174 /* "1" for link_down_events special counter */
4cb4e98e
SA
1175 num_stats = 1;
1176
1177 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1178 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1179
1180 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1181 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1182
1183 return num_stats;
2e4df0b2
KH
1184}
1185
96b12796 1186static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
2e4df0b2 1187{
4cb4e98e 1188 struct mlx5_core_dev *mdev = priv->mdev;
2e4df0b2
KH
1189 int i;
1190
6ab75516
SM
1191 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
1192
4cb4e98e 1193 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
6ab75516
SM
1194 return idx;
1195
1196 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1197 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1198 pport_phy_statistical_stats_desc[i].format);
4cb4e98e
SA
1199
1200 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1201 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1202 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1203 pport_phy_statistical_err_lanes_stats_desc[i].format);
1204
2e4df0b2
KH
1205 return idx;
1206}
1207
96b12796 1208static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
2e4df0b2 1209{
4cb4e98e 1210 struct mlx5_core_dev *mdev = priv->mdev;
2e4df0b2
KH
1211 int i;
1212
6ab75516
SM
1213 /* link_down_events_phy has special handling since it is not stored in __be64 format */
1214 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1215 counter_set.phys_layer_cntrs.link_down_events);
1216
4cb4e98e 1217 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
6ab75516
SM
1218 return idx;
1219
1220 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1221 data[idx++] =
1222 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1223 pport_phy_statistical_stats_desc, i);
4cb4e98e
SA
1224
1225 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1226 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1227 data[idx++] =
1228 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1229 pport_phy_statistical_err_lanes_stats_desc,
1230 i);
2e4df0b2
KH
1231 return idx;
1232}
1233
96b12796 1234static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
19386177
KH
1235{
1236 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1237 struct mlx5_core_dev *mdev = priv->mdev;
1238 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1239 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1240 void *out;
1241
1242 MLX5_SET(ppcnt_reg, in, local_port, 1);
1243 out = pstats->phy_counters;
1244 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1245 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1246
1247 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1248 return;
1249
1250 out = pstats->phy_statistical_counters;
1251 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1252 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1253}
1254
e07c4924
SM
1255void mlx5e_get_link_ext_stats(struct net_device *dev,
1256 struct ethtool_link_ext_stats *stats)
1257{
1258 struct mlx5e_priv *priv = netdev_priv(dev);
1259 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1260 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1261 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1262
1263 MLX5_SET(ppcnt_reg, in, local_port, 1);
1264 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1265 mlx5_core_access_reg(priv->mdev, in, sz, out,
1266 MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0);
1267
1268 stats->link_down_events = MLX5_GET(ppcnt_reg, out,
1269 counter_set.phys_layer_cntrs.link_down_events);
1270}
1271
0a1498eb
LK
1272static int fec_num_lanes(struct mlx5_core_dev *dev)
1273{
1274 u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1275 u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1276 int err;
1277
1278 MLX5_SET(pmlp_reg, in, local_port, 1);
1279 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
1280 MLX5_REG_PMLP, 0, 0);
1281 if (err)
1282 return 0;
1283
1284 return MLX5_GET(pmlp_reg, out, width);
1285}
1286
1287static int fec_active_mode(struct mlx5_core_dev *mdev)
1288{
1289 unsigned long fec_active_long;
1290 u32 fec_active;
1291
1292 if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
1293 return MLX5E_FEC_NOFEC;
1294
1295 fec_active_long = fec_active;
1296 return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
1297}
1298
1299#define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1300 fec_stats->corrected_blocks.lanes[(idx)] = \
1301 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1302 fc_fec_corrected_blocks_lane##idx); \
1303 fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1304 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1305 fc_fec_uncorrectable_blocks_lane##idx); \
1306})
1307
1308static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1309 u32 *ppcnt, u8 lanes)
1310{
1311 if (lanes > 3) { /* 4 lanes */
1312 MLX5E_STATS_SET_FEC_BLOCK(3);
1313 MLX5E_STATS_SET_FEC_BLOCK(2);
1314 }
1315 if (lanes > 1) /* 2 lanes */
1316 MLX5E_STATS_SET_FEC_BLOCK(1);
1317 if (lanes > 0) /* 1 lane */
1318 MLX5E_STATS_SET_FEC_BLOCK(0);
1319}
1320
1321static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1322{
1323 fec_stats->corrected_blocks.total =
1324 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1325 rs_fec_corrected_blocks);
1326 fec_stats->uncorrectable_blocks.total =
1327 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1328 rs_fec_uncorrectable_blocks);
1329}
1330
1331static void fec_set_block_stats(struct mlx5e_priv *priv,
1332 struct ethtool_fec_stats *fec_stats)
1333{
1334 struct mlx5_core_dev *mdev = priv->mdev;
1335 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1336 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1337 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1338 int mode = fec_active_mode(mdev);
1339
1340 if (mode == MLX5E_FEC_NOFEC)
1341 return;
1342
1343 MLX5_SET(ppcnt_reg, in, local_port, 1);
1344 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
342402c4 1345 if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
0a1498eb
LK
1346 return;
1347
1348 switch (mode) {
1349 case MLX5E_FEC_RS_528_514:
1350 case MLX5E_FEC_RS_544_514:
1351 case MLX5E_FEC_LLRS_272_257_1:
1352 fec_set_rs_stats(fec_stats, out);
1353 return;
1354 case MLX5E_FEC_FIRECODE:
1355 fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
1356 }
1357}
1358
1359static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1360 struct ethtool_fec_stats *fec_stats)
1703bb50
JK
1361{
1362 u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1363 struct mlx5_core_dev *mdev = priv->mdev;
0a1498eb 1364 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1703bb50
JK
1365 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1366
1703bb50
JK
1367 MLX5_SET(ppcnt_reg, in, local_port, 1);
1368 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1369 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1370 sz, MLX5_REG_PPCNT, 0, 0))
1371 return;
1372
1373 fec_stats->corrected_bits.total =
1374 MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1375 phys_layer_statistical_cntrs,
1376 phy_corrected_bits);
1377}
1378
0a1498eb
LK
1379void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1380 struct ethtool_fec_stats *fec_stats)
1381{
7fac0529
LK
1382 if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
1383 return;
1384
0a1498eb
LK
1385 fec_set_corrected_bits_total(priv, fec_stats);
1386 fec_set_block_stats(priv, fec_stats);
1387}
1388
3488bd4c
KH
1389#define PPORT_ETH_EXT_OFF(c) \
1390 MLX5_BYTE_OFF(ppcnt_reg, \
1391 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1392static const struct counter_desc pport_eth_ext_stats_desc[] = {
1393 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1394};
1395
1396#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
1397
96b12796 1398static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
3488bd4c
KH
1399{
1400 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1401 return NUM_PPORT_ETH_EXT_COUNTERS;
1402
1403 return 0;
1404}
1405
96b12796 1406static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
3488bd4c
KH
1407{
1408 int i;
1409
1410 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1411 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1412 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1413 pport_eth_ext_stats_desc[i].format);
1414 return idx;
1415}
1416
96b12796 1417static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
3488bd4c
KH
1418{
1419 int i;
1420
1421 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1422 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1423 data[idx++] =
1424 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1425 pport_eth_ext_stats_desc, i);
1426 return idx;
1427}
1428
96b12796 1429static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
19386177
KH
1430{
1431 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1432 struct mlx5_core_dev *mdev = priv->mdev;
1433 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1434 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1435 void *out;
1436
1437 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1438 return;
1439
1440 MLX5_SET(ppcnt_reg, in, local_port, 1);
1441 out = pstats->eth_ext_counters;
1442 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1443 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1444}
1445
9fd2b5f1
KH
1446#define PCIE_PERF_OFF(c) \
1447 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1448static const struct counter_desc pcie_perf_stats_desc[] = {
1449 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1450 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1451};
1452
1453#define PCIE_PERF_OFF64(c) \
1454 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1455static const struct counter_desc pcie_perf_stats_desc64[] = {
1456 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1457};
1458
1459static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1460 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1461 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1462 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1463 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1464};
1465
1466#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
1467#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
1468#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
1469
96b12796 1470static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
9fd2b5f1
KH
1471{
1472 int num_stats = 0;
1473
1474 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1475 num_stats += NUM_PCIE_PERF_COUNTERS;
1476
1477 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1478 num_stats += NUM_PCIE_PERF_COUNTERS64;
1479
1480 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1481 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1482
1483 return num_stats;
1484}
1485
96b12796 1486static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
9fd2b5f1
KH
1487{
1488 int i;
1489
1490 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1491 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1492 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1493 pcie_perf_stats_desc[i].format);
1494
1495 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1496 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1497 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1498 pcie_perf_stats_desc64[i].format);
1499
1500 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1501 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1502 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1503 pcie_perf_stall_stats_desc[i].format);
1504 return idx;
1505}
1506
96b12796 1507static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
9fd2b5f1
KH
1508{
1509 int i;
1510
1511 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1512 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1513 data[idx++] =
1514 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1515 pcie_perf_stats_desc, i);
1516
1517 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1518 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1519 data[idx++] =
1520 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1521 pcie_perf_stats_desc64, i);
1522
1523 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1524 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1525 data[idx++] =
1526 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1527 pcie_perf_stall_stats_desc, i);
1528 return idx;
1529}
1530
96b12796 1531static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
19386177
KH
1532{
1533 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1534 struct mlx5_core_dev *mdev = priv->mdev;
1535 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1536 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1537 void *out;
1538
1539 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1540 return;
1541
1542 out = pcie_stats->pcie_perf_counters;
1543 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1544 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1545}
1546
1297d97f
AL
1547#define PPORT_PER_TC_PRIO_OFF(c) \
1548 MLX5_BYTE_OFF(ppcnt_reg, \
1549 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1550
1551static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1552 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1553};
1554
1555#define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1556
1557#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1558 MLX5_BYTE_OFF(ppcnt_reg, \
1559 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1560
1561static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1562 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1563 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1564};
1565
1566#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1567 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1568
1569static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1570{
1571 struct mlx5_core_dev *mdev = priv->mdev;
1572
1573 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1574 return 0;
1575
1576 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1577}
1578
96b12796 1579static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1297d97f
AL
1580{
1581 struct mlx5_core_dev *mdev = priv->mdev;
1582 int i, prio;
1583
1584 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1585 return idx;
1586
1587 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1588 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1589 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1590 pport_per_tc_prio_stats_desc[i].format, prio);
1591 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1592 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1593 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1594 }
1595
1596 return idx;
1597}
1598
96b12796 1599static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1297d97f
AL
1600{
1601 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1602 struct mlx5_core_dev *mdev = priv->mdev;
1603 int i, prio;
1604
1605 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1606 return idx;
1607
1608 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1609 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1610 data[idx++] =
1611 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1612 pport_per_tc_prio_stats_desc, i);
1613 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1614 data[idx++] =
1615 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1616 pport_per_tc_congest_prio_stats_desc, i);
1617 }
1618
1619 return idx;
1620}
1621
1622static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1623{
1624 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1625 struct mlx5_core_dev *mdev = priv->mdev;
1626 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1627 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1628 void *out;
1629 int prio;
1630
1631 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1632 return;
1633
1634 MLX5_SET(ppcnt_reg, in, pnat, 2);
1635 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1636 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1637 out = pstats->per_tc_prio_counters[prio];
1638 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1639 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1640 }
1641}
1642
1643static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1644{
1645 struct mlx5_core_dev *mdev = priv->mdev;
1646
1647 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1648 return 0;
1649
1650 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1651}
1652
1653static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1654{
1655 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1656 struct mlx5_core_dev *mdev = priv->mdev;
1657 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1658 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1659 void *out;
1660 int prio;
1661
1662 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1663 return;
1664
1665 MLX5_SET(ppcnt_reg, in, pnat, 2);
1666 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1667 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1668 out = pstats->per_tc_congest_prio_counters[prio];
1669 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1670 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1671 }
1672}
1673
96b12796 1674static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1297d97f
AL
1675{
1676 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1677 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1678}
1679
96b12796 1680static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1297d97f
AL
1681{
1682 mlx5e_grp_per_tc_prio_update_stats(priv);
1683 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1684}
1685
4377bea2
KH
1686#define PPORT_PER_PRIO_OFF(c) \
1687 MLX5_BYTE_OFF(ppcnt_reg, \
1688 counter_set.eth_per_prio_grp_data_layout.c##_high)
e6000651
KH
1689static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1690 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1691 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
827a8cb2 1692 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
e6000651
KH
1693 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1694 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1695};
1696
1697#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1698
54c73f86 1699static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
e6000651
KH
1700{
1701 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1702}
1703
1704static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1705 u8 *data,
1706 int idx)
1707{
1708 int i, prio;
1709
1710 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1711 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1712 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1713 pport_per_prio_traffic_stats_desc[i].format, prio);
1714 }
1715
1716 return idx;
1717}
1718
1719static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1720 u64 *data,
1721 int idx)
1722{
1723 int i, prio;
1724
1725 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1726 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1727 data[idx++] =
1728 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1729 pport_per_prio_traffic_stats_desc, i);
1730 }
1731
1732 return idx;
1733}
1734
4377bea2
KH
1735static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1736 /* %s is "global" or "prio{i}" */
1737 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1738 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1739 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1740 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1741 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1742};
1743
2fcb12df 1744static const struct counter_desc pport_pfc_stall_stats_desc[] = {
8742c7eb 1745 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
2fcb12df
IK
1746 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1747};
1748
4377bea2 1749#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
2fcb12df
IK
1750#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1751 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1752 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
4377bea2
KH
1753
1754static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1755{
1756 struct mlx5_core_dev *mdev = priv->mdev;
1757 u8 pfc_en_tx;
1758 u8 pfc_en_rx;
1759 int err;
1760
1761 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1762 return 0;
1763
1764 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1765
1766 return err ? 0 : pfc_en_tx | pfc_en_rx;
1767}
1768
1769static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1770{
1771 struct mlx5_core_dev *mdev = priv->mdev;
1772 u32 rx_pause;
1773 u32 tx_pause;
1774 int err;
1775
1776 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1777 return false;
1778
1779 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1780
1781 return err ? false : rx_pause | tx_pause;
1782}
1783
1784static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1785{
1786 return (mlx5e_query_global_pause_combined(priv) +
1787 hweight8(mlx5e_query_pfc_combined(priv))) *
2fcb12df
IK
1788 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1789 NUM_PPORT_PFC_STALL_COUNTERS(priv);
4377bea2
KH
1790}
1791
1792static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1793 u8 *data,
1794 int idx)
1795{
1796 unsigned long pfc_combined;
1797 int i, prio;
1798
1799 pfc_combined = mlx5e_query_pfc_combined(priv);
1800 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1801 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1802 char pfc_string[ETH_GSTRING_LEN];
1803
1804 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1805 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1806 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1807 }
1808 }
1809
1810 if (mlx5e_query_global_pause_combined(priv)) {
1811 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1812 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1813 pport_per_prio_pfc_stats_desc[i].format, "global");
1814 }
1815 }
1816
2fcb12df
IK
1817 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1818 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1819 pport_pfc_stall_stats_desc[i].format);
1820
4377bea2
KH
1821 return idx;
1822}
1823
1824static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1825 u64 *data,
1826 int idx)
1827{
1828 unsigned long pfc_combined;
1829 int i, prio;
1830
1831 pfc_combined = mlx5e_query_pfc_combined(priv);
1832 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1833 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1834 data[idx++] =
1835 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1836 pport_per_prio_pfc_stats_desc, i);
1837 }
1838 }
1839
1840 if (mlx5e_query_global_pause_combined(priv)) {
1841 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1842 data[idx++] =
1843 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1844 pport_per_prio_pfc_stats_desc, i);
1845 }
1846 }
1847
2fcb12df
IK
1848 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1849 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1850 pport_pfc_stall_stats_desc, i);
1851
4377bea2
KH
1852 return idx;
1853}
1854
96b12796 1855static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
a8984281 1856{
54c73f86 1857 return mlx5e_grp_per_prio_traffic_get_num_stats() +
a8984281
KH
1858 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1859}
1860
96b12796 1861static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
a8984281
KH
1862{
1863 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1864 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1865 return idx;
1866}
1867
96b12796 1868static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
a8984281
KH
1869{
1870 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1871 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1872 return idx;
1873}
1874
96b12796 1875static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
19386177
KH
1876{
1877 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1878 struct mlx5_core_dev *mdev = priv->mdev;
1879 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1880 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1881 int prio;
1882 void *out;
1883
75370eb0
ED
1884 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1885 return;
1886
19386177
KH
1887 MLX5_SET(ppcnt_reg, in, local_port, 1);
1888 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1889 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1890 out = pstats->per_prio_counters[prio];
1891 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1892 mlx5_core_access_reg(mdev, in, sz, out, sz,
1893 MLX5_REG_PPCNT, 0, 0);
1894 }
1895}
1896
0e6f01a4 1897static const struct counter_desc mlx5e_pme_status_desc[] = {
c2fb3db2 1898 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
0e6f01a4
KH
1899};
1900
1901static const struct counter_desc mlx5e_pme_error_desc[] = {
c2fb3db2
MG
1902 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1903 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1904 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
0e6f01a4
KH
1905};
1906
1907#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1908#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1909
96b12796 1910static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
0e6f01a4
KH
1911{
1912 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1913}
1914
96b12796 1915static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
0e6f01a4
KH
1916{
1917 int i;
1918
1919 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1920 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1921
1922 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1923 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1924
1925 return idx;
1926}
1927
96b12796 1928static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
0e6f01a4 1929{
69c1280b 1930 struct mlx5_pme_stats pme_stats;
0e6f01a4
KH
1931 int i;
1932
69c1280b
SM
1933 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1934
0e6f01a4 1935 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
69c1280b 1936 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
0e6f01a4
KH
1937 mlx5e_pme_status_desc, i);
1938
1939 for (i = 0; i < NUM_PME_ERR_STATS; i++)
69c1280b 1940 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
0e6f01a4
KH
1941 mlx5e_pme_error_desc, i);
1942
1943 return idx;
1944}
1945
96b12796
SM
1946static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1947
96b12796 1948static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
43585a41 1949{
943aa7bd 1950 return mlx5e_ktls_get_count(priv);
43585a41
IL
1951}
1952
96b12796 1953static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
43585a41 1954{
943aa7bd 1955 return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
43585a41
IL
1956}
1957
96b12796 1958static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
43585a41 1959{
943aa7bd 1960 return idx + mlx5e_ktls_get_stats(priv, data + idx);
43585a41
IL
1961}
1962
96b12796
SM
1963static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1964
1fe85006
KH
1965static const struct counter_desc rq_stats_desc[] = {
1966 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1967 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1968 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
0aa1d186
SM
1969 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1970 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1fe85006
KH
1971 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1972 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1973 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1974 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
86690b4b 1975 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1fe85006
KH
1976 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1977 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
def09e7b
KM
1978 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
1979 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
1980 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
1981 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
1982 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
f007c13d 1983 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
f24686e8 1984 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1fe85006 1985 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
b71ba6b4
TT
1986 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1987 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
0073c8f7 1988 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1fe85006
KH
1989 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1990 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1991 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
dc983f0e 1992 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
94563847 1993 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
be5323c8 1994 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
cc10e84b
JD
1995#ifdef CONFIG_PAGE_POOL_STATS
1996 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
1997 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
1998 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
1999 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
2000 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
2001 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
2002 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
2003 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
2004 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
2005 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
2006 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
2007#endif
76c1e1ac
TT
2008#ifdef CONFIG_MLX5_EN_TLS
2009 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
2010 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
76c1e1ac
TT
2011 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
2012 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
2013 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
2014 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
2015 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
e9ce991b 2016 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
76c1e1ac
TT
2017 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
2018 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
2019#endif
1fe85006
KH
2020};
2021
2022static const struct counter_desc sq_stats_desc[] = {
2023 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
2024 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
2025 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2026 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2027 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2028 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2029 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2030 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
f24686e8 2031 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1fe85006 2032 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
5af75c74
MM
2033 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2034 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
d2ead1f3
TT
2035#ifdef CONFIG_MLX5_EN_TLS
2036 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2037 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
d2ead1f3 2038 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
d2ead1f3
TT
2039 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2040 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
46a3ea98
TT
2041 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2042 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2043 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2044 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
d2ead1f3 2045#endif
1fe85006
KH
2046 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2047 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1fe85006
KH
2048 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
2049 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
db75373c 2050 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
86155656 2051 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
f65a59ff
TT
2052 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
2053 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1fe85006
KH
2054};
2055
890388ad
TT
2056static const struct counter_desc rq_xdpsq_stats_desc[] = {
2057 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
73cab880 2058 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
c2273219 2059 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
6c085a8a 2060 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
890388ad
TT
2061 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2062 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2063 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2064};
2065
58b99ee3
TT
2066static const struct counter_desc xdpsq_stats_desc[] = {
2067 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
73cab880 2068 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
c2273219 2069 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
6c085a8a 2070 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
58b99ee3
TT
2071 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2072 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2073 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2074};
2075
db05815b
MM
2076static const struct counter_desc xskrq_stats_desc[] = {
2077 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
2078 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
2079 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2080 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2081 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2082 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
2083 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2084 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2085 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2086 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2087 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2088 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2089 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2090 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2091 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2092 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2093 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2094 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2095 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2096};
2097
2098static const struct counter_desc xsksq_stats_desc[] = {
2099 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2100 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2101 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2102 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2103 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2104 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2105};
2106
57d689a8 2107static const struct counter_desc ch_stats_desc[] = {
a1bf74dc 2108 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2d7103c8
TT
2109 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
2110 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
2111 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
db05815b 2112 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
57d689a8
EBE
2113 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2114};
2115
145e5637
EBE
2116static const struct counter_desc ptp_sq_stats_desc[] = {
2117 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2118 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2119 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2120 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2121 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2122 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2123 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2124 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2125 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2126 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2127 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2128 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2129 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2130 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2131};
2132
2133static const struct counter_desc ptp_ch_stats_desc[] = {
2134 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2135 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2136 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2137 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2138};
2139
1880bc4e
EBE
2140static const struct counter_desc ptp_cq_stats_desc[] = {
2141 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2142 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2143 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2144 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
58a51894
AL
2145 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
2146 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
3a50cf1e 2147 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) },
1880bc4e
EBE
2148};
2149
a28359e9
AL
2150static const struct counter_desc ptp_rq_stats_desc[] = {
2151 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2152 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2153 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2154 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2155 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2156 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2157 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2158 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2159 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2160 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2161 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2162 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2163 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2164 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2165 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2166 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2167 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2168 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2169 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2170 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2171 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
a28359e9
AL
2172 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2173 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2174 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2175};
2176
214baf22
MM
2177static const struct counter_desc qos_sq_stats_desc[] = {
2178 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2179 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2180 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2181 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2182 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2183 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2184 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2185 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2186 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2187 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2188 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2189 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2190#ifdef CONFIG_MLX5_EN_TLS
2191 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2192 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
214baf22
MM
2193 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2194 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2195 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2196 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2197 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2198 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2199 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2200#endif
2201 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2202 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2203 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2204 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2205 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2206 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2207 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2208 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2209};
2210
1fe85006
KH
2211#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
2212#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
58b99ee3 2213#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
890388ad 2214#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
db05815b
MM
2215#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
2216#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
57d689a8 2217#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
145e5637
EBE
2218#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
2219#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
1880bc4e 2220#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
a28359e9 2221#define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc)
214baf22
MM
2222#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
2223
2224static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2225{
2226 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
db83f24d 2227 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
214baf22
MM
2228}
2229
2230static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2231{
2232 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
db83f24d 2233 u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
214baf22
MM
2234 int i, qid;
2235
2236 for (qid = 0; qid < max_qos_sqs; qid++)
2237 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2238 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2239 qos_sq_stats_desc[i].format, qid);
2240
2241 return idx;
2242}
2243
2244static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2245{
2246 struct mlx5e_sq_stats **stats;
2247 u16 max_qos_sqs;
2248 int i, qid;
2249
2250 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
db83f24d
MT
2251 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2252 stats = READ_ONCE(priv->htb_qos_sq_stats);
214baf22
MM
2253
2254 for (qid = 0; qid < max_qos_sqs; qid++) {
2255 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2256
2257 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2258 data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2259 }
2260
2261 return idx;
2262}
2263
2264static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
145e5637
EBE
2265
2266static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2267{
a28359e9
AL
2268 int num = NUM_PTP_CH_STATS;
2269
2270 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2271 return 0;
2272
2273 if (priv->tx_ptp_opened)
2274 num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2275 if (priv->rx_ptp_opened)
2276 num += NUM_PTP_RQ_STATS;
2277
2278 return num;
145e5637
EBE
2279}
2280
2281static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2282{
2283 int i, tc;
2284
a28359e9 2285 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
145e5637
EBE
2286 return idx;
2287
2288 for (i = 0; i < NUM_PTP_CH_STATS; i++)
2289 sprintf(data + (idx++) * ETH_GSTRING_LEN,
aef0f8c6 2290 "%s", ptp_ch_stats_desc[i].format);
145e5637 2291
a28359e9
AL
2292 if (priv->tx_ptp_opened) {
2293 for (tc = 0; tc < priv->max_opened_tc; tc++)
2294 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2295 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2296 ptp_sq_stats_desc[i].format, tc);
145e5637 2297
a28359e9
AL
2298 for (tc = 0; tc < priv->max_opened_tc; tc++)
2299 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2300 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2301 ptp_cq_stats_desc[i].format, tc);
2302 }
2303 if (priv->rx_ptp_opened) {
2304 for (i = 0; i < NUM_PTP_RQ_STATS; i++)
1880bc4e 2305 sprintf(data + (idx++) * ETH_GSTRING_LEN,
dd1979cf 2306 ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
a28359e9 2307 }
145e5637
EBE
2308 return idx;
2309}
2310
2311static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2312{
2313 int i, tc;
2314
a28359e9 2315 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
145e5637
EBE
2316 return idx;
2317
2318 for (i = 0; i < NUM_PTP_CH_STATS; i++)
2319 data[idx++] =
b0d35de4 2320 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
145e5637
EBE
2321 ptp_ch_stats_desc, i);
2322
a28359e9
AL
2323 if (priv->tx_ptp_opened) {
2324 for (tc = 0; tc < priv->max_opened_tc; tc++)
2325 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2326 data[idx++] =
2327 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2328 ptp_sq_stats_desc, i);
2329
2330 for (tc = 0; tc < priv->max_opened_tc; tc++)
2331 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2332 data[idx++] =
2333 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2334 ptp_cq_stats_desc, i);
2335 }
2336 if (priv->rx_ptp_opened) {
2337 for (i = 0; i < NUM_PTP_RQ_STATS; i++)
1880bc4e 2338 data[idx++] =
a28359e9
AL
2339 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2340 ptp_rq_stats_desc, i);
2341 }
145e5637
EBE
2342 return idx;
2343}
2344
2345static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
1fe85006 2346
96b12796 2347static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
1fe85006 2348{
9d758d4a 2349 int max_nch = priv->stats_nch;
05909bab 2350
05909bab
EBE
2351 return (NUM_RQ_STATS * max_nch) +
2352 (NUM_CH_STATS * max_nch) +
890388ad 2353 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
58b99ee3 2354 (NUM_RQ_XDPSQ_STATS * max_nch) +
db05815b
MM
2355 (NUM_XDPSQ_STATS * max_nch) +
2356 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2357 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
1fe85006
KH
2358}
2359
96b12796 2360static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
1fe85006 2361{
db05815b 2362 bool is_xsk = priv->xsk.ever_used;
9d758d4a 2363 int max_nch = priv->stats_nch;
1fe85006
KH
2364 int i, j, tc;
2365
05909bab 2366 for (i = 0; i < max_nch; i++)
57d689a8
EBE
2367 for (j = 0; j < NUM_CH_STATS; j++)
2368 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2369 ch_stats_desc[j].format, i);
2370
890388ad 2371 for (i = 0; i < max_nch; i++) {
1fe85006 2372 for (j = 0; j < NUM_RQ_STATS; j++)
890388ad
TT
2373 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2374 rq_stats_desc[j].format, i);
db05815b
MM
2375 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2376 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2377 xskrq_stats_desc[j].format, i);
890388ad
TT
2378 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2379 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2380 rq_xdpsq_stats_desc[j].format, i);
2381 }
1fe85006 2382
05909bab
EBE
2383 for (tc = 0; tc < priv->max_opened_tc; tc++)
2384 for (i = 0; i < max_nch; i++)
1fe85006
KH
2385 for (j = 0; j < NUM_SQ_STATS; j++)
2386 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2387 sq_stats_desc[j].format,
c55d8b10 2388 i + tc * max_nch);
1fe85006 2389
db05815b
MM
2390 for (i = 0; i < max_nch; i++) {
2391 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2392 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2393 xsksq_stats_desc[j].format, i);
58b99ee3
TT
2394 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2395 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2396 xdpsq_stats_desc[j].format, i);
db05815b 2397 }
58b99ee3 2398
1fe85006
KH
2399 return idx;
2400}
2401
96b12796 2402static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
1fe85006 2403{
db05815b 2404 bool is_xsk = priv->xsk.ever_used;
9d758d4a 2405 int max_nch = priv->stats_nch;
1fe85006
KH
2406 int i, j, tc;
2407
05909bab 2408 for (i = 0; i < max_nch; i++)
57d689a8
EBE
2409 for (j = 0; j < NUM_CH_STATS; j++)
2410 data[idx++] =
be98737a 2411 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
57d689a8
EBE
2412 ch_stats_desc, j);
2413
890388ad 2414 for (i = 0; i < max_nch; i++) {
1fe85006
KH
2415 for (j = 0; j < NUM_RQ_STATS; j++)
2416 data[idx++] =
be98737a 2417 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
1fe85006 2418 rq_stats_desc, j);
db05815b
MM
2419 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2420 data[idx++] =
be98737a 2421 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
db05815b 2422 xskrq_stats_desc, j);
890388ad
TT
2423 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2424 data[idx++] =
be98737a 2425 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
890388ad
TT
2426 rq_xdpsq_stats_desc, j);
2427 }
1fe85006 2428
05909bab
EBE
2429 for (tc = 0; tc < priv->max_opened_tc; tc++)
2430 for (i = 0; i < max_nch; i++)
1fe85006
KH
2431 for (j = 0; j < NUM_SQ_STATS; j++)
2432 data[idx++] =
be98737a 2433 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
1fe85006
KH
2434 sq_stats_desc, j);
2435
db05815b
MM
2436 for (i = 0; i < max_nch; i++) {
2437 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2438 data[idx++] =
be98737a 2439 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
db05815b 2440 xsksq_stats_desc, j);
58b99ee3
TT
2441 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2442 data[idx++] =
be98737a 2443 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
58b99ee3 2444 xdpsq_stats_desc, j);
db05815b 2445 }
58b99ee3 2446
1fe85006
KH
2447 return idx;
2448}
2449
96b12796
SM
2450static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2451
2a303f13
SM
2452MLX5E_DEFINE_STATS_GRP(sw, 0);
2453MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2454MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2455MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2456MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2457MLX5E_DEFINE_STATS_GRP(2863, 0);
2458MLX5E_DEFINE_STATS_GRP(2819, 0);
2459MLX5E_DEFINE_STATS_GRP(phy, 0);
2460MLX5E_DEFINE_STATS_GRP(pcie, 0);
2461MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2462MLX5E_DEFINE_STATS_GRP(pme, 0);
2463MLX5E_DEFINE_STATS_GRP(channels, 0);
2464MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
7c453526 2465MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
f0ff8e8c 2466static MLX5E_DEFINE_STATS_GRP(tls, 0);
7c5f940d 2467MLX5E_DEFINE_STATS_GRP(ptp, 0);
214baf22 2468static MLX5E_DEFINE_STATS_GRP(qos, 0);
f0ff8e8c 2469
19386177 2470/* The stats groups order is opposite to the update_stats() order calls */
f0ff8e8c
SM
2471mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2472 &MLX5E_STATS_GRP(sw),
2473 &MLX5E_STATS_GRP(qcnt),
2474 &MLX5E_STATS_GRP(vnic_env),
2475 &MLX5E_STATS_GRP(vport),
2476 &MLX5E_STATS_GRP(802_3),
2477 &MLX5E_STATS_GRP(2863),
2478 &MLX5E_STATS_GRP(2819),
2479 &MLX5E_STATS_GRP(phy),
2480 &MLX5E_STATS_GRP(eth_ext),
2481 &MLX5E_STATS_GRP(pcie),
2482 &MLX5E_STATS_GRP(per_prio),
2483 &MLX5E_STATS_GRP(pme),
0aab3e1b 2484#ifdef CONFIG_MLX5_EN_IPSEC
81f8fba5 2485 &MLX5E_STATS_GRP(ipsec_hw),
0aab3e1b 2486 &MLX5E_STATS_GRP(ipsec_sw),
0aab3e1b 2487#endif
f0ff8e8c
SM
2488 &MLX5E_STATS_GRP(tls),
2489 &MLX5E_STATS_GRP(channels),
2490 &MLX5E_STATS_GRP(per_port_buff_congest),
145e5637 2491 &MLX5E_STATS_GRP(ptp),
214baf22 2492 &MLX5E_STATS_GRP(qos),
807a1b76
LN
2493#ifdef CONFIG_MLX5_EN_MACSEC
2494 &MLX5E_STATS_GRP(macsec_hw),
2495#endif
c0752f2b
KH
2496};
2497
3460c184
SM
2498unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2499{
2500 return ARRAY_SIZE(mlx5e_nic_stats_grps);
2501}