1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
15 struct mlxsw_sp_sb_pr {
16 enum mlxsw_reg_sbpr_mode mode;
20 struct mlxsw_cp_sb_occ {
25 struct mlxsw_sp_sb_cm {
29 struct mlxsw_cp_sb_occ occ;
32 #define MLXSW_SP_SB_INFI -1U
34 struct mlxsw_sp_sb_pm {
37 struct mlxsw_cp_sb_occ occ;
40 struct mlxsw_sp_sb_mm {
46 struct mlxsw_sp_sb_pool_des {
47 enum mlxsw_reg_sbxx_dir dir;
51 /* Order ingress pools before egress pools. */
52 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
53 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
54 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
55 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
56 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
57 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
58 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
59 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
60 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
61 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
64 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
65 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
66 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
67 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
68 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
69 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
70 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
71 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
72 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
75 #define MLXSW_SP_SB_ING_TC_COUNT 8
76 #define MLXSW_SP_SB_EG_TC_COUNT 16
78 struct mlxsw_sp_sb_port {
79 struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
80 struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
81 struct mlxsw_sp_sb_pm *pms;
85 struct mlxsw_sp_sb_pr *prs;
86 struct mlxsw_sp_sb_port *ports;
88 u32 max_headroom_cells;
92 struct mlxsw_sp_sb_vals {
93 unsigned int pool_count;
94 const struct mlxsw_sp_sb_pool_des *pool_dess;
95 const struct mlxsw_sp_sb_pm *pms;
96 const struct mlxsw_sp_sb_pr *prs;
97 const struct mlxsw_sp_sb_mm *mms;
98 const struct mlxsw_sp_sb_cm *cms_ingress;
99 const struct mlxsw_sp_sb_cm *cms_egress;
100 const struct mlxsw_sp_sb_cm *cms_cpu;
101 unsigned int mms_count;
102 unsigned int cms_ingress_count;
103 unsigned int cms_egress_count;
104 unsigned int cms_cpu_count;
107 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
109 return mlxsw_sp->sb->cell_size * cells;
112 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
114 return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
117 u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
119 return mlxsw_sp->sb->max_headroom_cells;
122 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
125 return &mlxsw_sp->sb->prs[pool_index];
128 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
130 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
131 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
133 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
136 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
137 u8 local_port, u8 pg_buff,
138 enum mlxsw_reg_sbxx_dir dir)
140 struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
142 WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
143 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
144 return &sb_port->ing_cms[pg_buff];
146 return &sb_port->eg_cms[pg_buff];
149 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
150 u8 local_port, u16 pool_index)
152 return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
155 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
156 enum mlxsw_reg_sbpr_mode mode,
157 u32 size, bool infi_size)
159 const struct mlxsw_sp_sb_pool_des *des =
160 &mlxsw_sp->sb_vals->pool_dess[pool_index];
161 char sbpr_pl[MLXSW_REG_SBPR_LEN];
162 struct mlxsw_sp_sb_pr *pr;
165 mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
167 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
172 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
173 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
179 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
180 u8 pg_buff, u32 min_buff, u32 max_buff,
181 bool infi_max, u16 pool_index)
183 const struct mlxsw_sp_sb_pool_des *des =
184 &mlxsw_sp->sb_vals->pool_dess[pool_index];
185 char sbcm_pl[MLXSW_REG_SBCM_LEN];
186 struct mlxsw_sp_sb_cm *cm;
189 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
190 min_buff, max_buff, infi_max, des->pool);
191 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
195 if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
197 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
198 mlxsw_sp->sb->sb_size);
200 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
202 cm->min_buff = min_buff;
203 cm->max_buff = max_buff;
204 cm->pool_index = pool_index;
209 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
210 u16 pool_index, u32 min_buff, u32 max_buff)
212 const struct mlxsw_sp_sb_pool_des *des =
213 &mlxsw_sp->sb_vals->pool_dess[pool_index];
214 char sbpm_pl[MLXSW_REG_SBPM_LEN];
215 struct mlxsw_sp_sb_pm *pm;
218 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
220 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
224 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
225 pm->min_buff = min_buff;
226 pm->max_buff = max_buff;
230 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
231 u16 pool_index, struct list_head *bulk_list)
233 const struct mlxsw_sp_sb_pool_des *des =
234 &mlxsw_sp->sb_vals->pool_dess[pool_index];
235 char sbpm_pl[MLXSW_REG_SBPM_LEN];
237 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
239 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
243 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
244 char *sbpm_pl, size_t sbpm_pl_len,
245 unsigned long cb_priv)
247 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
249 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
252 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
253 u16 pool_index, struct list_head *bulk_list)
255 const struct mlxsw_sp_sb_pool_des *des =
256 &mlxsw_sp->sb_vals->pool_dess[pool_index];
257 char sbpm_pl[MLXSW_REG_SBPM_LEN];
258 struct mlxsw_sp_sb_pm *pm;
260 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
261 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
263 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
265 mlxsw_sp_sb_pm_occ_query_cb,
269 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
270 #define MLXSW_SP_PB_HEADROOM 25632
271 #define MLXSW_SP_PB_UNUSED 8
273 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
276 [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
277 [9] = 2 * MLXSW_PORT_MAX_MTU,
279 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
280 char pbmc_pl[MLXSW_REG_PBMC_LEN];
283 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
285 for (i = 0; i < ARRAY_SIZE(pbs); i++) {
286 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
288 if (i == MLXSW_SP_PB_UNUSED)
290 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
292 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
293 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
294 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
297 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
299 char pptb_pl[MLXSW_REG_PPTB_LEN];
302 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
303 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
304 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
305 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
309 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
313 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
316 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
319 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
320 struct mlxsw_sp_sb_port *sb_port)
322 struct mlxsw_sp_sb_pm *pms;
324 pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
332 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
337 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
339 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
340 struct mlxsw_sp_sb_pr *prs;
344 mlxsw_sp->sb->ports = kcalloc(max_ports,
345 sizeof(struct mlxsw_sp_sb_port),
347 if (!mlxsw_sp->sb->ports)
350 prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
356 mlxsw_sp->sb->prs = prs;
358 for (i = 0; i < max_ports; i++) {
359 err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
361 goto err_sb_port_init;
367 for (i--; i >= 0; i--)
368 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
369 kfree(mlxsw_sp->sb->prs);
371 kfree(mlxsw_sp->sb->ports);
375 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
377 int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
380 for (i = max_ports - 1; i >= 0; i--)
381 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
382 kfree(mlxsw_sp->sb->prs);
383 kfree(mlxsw_sp->sb->ports);
386 #define MLXSW_SP_SB_PR(_mode, _size) \
392 #define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
393 #define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
394 #define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
396 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
398 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
399 MLXSW_SP1_SB_PR_INGRESS_SIZE),
400 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
401 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
402 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
403 MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE),
405 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
406 MLXSW_SP1_SB_PR_EGRESS_SIZE),
407 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
408 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
409 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
410 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
413 #define MLXSW_SP2_SB_PR_INGRESS_SIZE 40960000
414 #define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
415 #define MLXSW_SP2_SB_PR_EGRESS_SIZE 40960000
417 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
419 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
420 MLXSW_SP2_SB_PR_INGRESS_SIZE),
421 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
422 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
423 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
424 MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE),
426 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
427 MLXSW_SP2_SB_PR_EGRESS_SIZE),
428 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
429 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
430 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
433 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
434 const struct mlxsw_sp_sb_pr *prs,
440 for (i = 0; i < prs_len; i++) {
441 u32 size = prs[i].size;
444 if (size == MLXSW_SP_SB_INFI) {
445 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
448 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
449 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
458 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
460 .min_buff = _min_buff, \
461 .max_buff = _max_buff, \
462 .pool_index = _pool, \
465 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
466 MLXSW_SP_SB_CM(10000, 8, 0),
467 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
468 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
469 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
470 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
471 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
472 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
473 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
474 MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
475 MLXSW_SP_SB_CM(20000, 1, 3),
478 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
479 MLXSW_SP_SB_CM(0, 7, 0),
480 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
481 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
482 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
483 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
484 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
485 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
486 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
487 MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
488 MLXSW_SP_SB_CM(20000, 1, 3),
491 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
492 MLXSW_SP_SB_CM(1500, 9, 4),
493 MLXSW_SP_SB_CM(1500, 9, 4),
494 MLXSW_SP_SB_CM(1500, 9, 4),
495 MLXSW_SP_SB_CM(1500, 9, 4),
496 MLXSW_SP_SB_CM(1500, 9, 4),
497 MLXSW_SP_SB_CM(1500, 9, 4),
498 MLXSW_SP_SB_CM(1500, 9, 4),
499 MLXSW_SP_SB_CM(1500, 9, 4),
500 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
501 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
502 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
503 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
504 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
505 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
506 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
507 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
508 MLXSW_SP_SB_CM(1, 0xff, 4),
511 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
512 MLXSW_SP_SB_CM(0, 7, 4),
513 MLXSW_SP_SB_CM(0, 7, 4),
514 MLXSW_SP_SB_CM(0, 7, 4),
515 MLXSW_SP_SB_CM(0, 7, 4),
516 MLXSW_SP_SB_CM(0, 7, 4),
517 MLXSW_SP_SB_CM(0, 7, 4),
518 MLXSW_SP_SB_CM(0, 7, 4),
519 MLXSW_SP_SB_CM(0, 7, 4),
520 MLXSW_SP_SB_CM(0, 7, 4),
521 MLXSW_SP_SB_CM(0, 7, 4),
522 MLXSW_SP_SB_CM(0, 7, 4),
523 MLXSW_SP_SB_CM(0, 7, 4),
524 MLXSW_SP_SB_CM(0, 7, 4),
525 MLXSW_SP_SB_CM(0, 7, 4),
526 MLXSW_SP_SB_CM(0, 7, 4),
527 MLXSW_SP_SB_CM(0, 7, 4),
528 MLXSW_SP_SB_CM(1, 0xff, 4),
531 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4)
533 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
534 MLXSW_SP_CPU_PORT_SB_CM,
535 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
536 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
537 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
538 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
539 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
540 MLXSW_SP_CPU_PORT_SB_CM,
541 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
542 MLXSW_SP_CPU_PORT_SB_CM,
543 MLXSW_SP_CPU_PORT_SB_CM,
544 MLXSW_SP_CPU_PORT_SB_CM,
545 MLXSW_SP_CPU_PORT_SB_CM,
546 MLXSW_SP_CPU_PORT_SB_CM,
547 MLXSW_SP_CPU_PORT_SB_CM,
548 MLXSW_SP_CPU_PORT_SB_CM,
549 MLXSW_SP_CPU_PORT_SB_CM,
550 MLXSW_SP_CPU_PORT_SB_CM,
551 MLXSW_SP_CPU_PORT_SB_CM,
552 MLXSW_SP_CPU_PORT_SB_CM,
553 MLXSW_SP_CPU_PORT_SB_CM,
554 MLXSW_SP_CPU_PORT_SB_CM,
555 MLXSW_SP_CPU_PORT_SB_CM,
556 MLXSW_SP_CPU_PORT_SB_CM,
557 MLXSW_SP_CPU_PORT_SB_CM,
558 MLXSW_SP_CPU_PORT_SB_CM,
559 MLXSW_SP_CPU_PORT_SB_CM,
560 MLXSW_SP_CPU_PORT_SB_CM,
561 MLXSW_SP_CPU_PORT_SB_CM,
562 MLXSW_SP_CPU_PORT_SB_CM,
563 MLXSW_SP_CPU_PORT_SB_CM,
564 MLXSW_SP_CPU_PORT_SB_CM,
565 MLXSW_SP_CPU_PORT_SB_CM,
569 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
571 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
573 return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
576 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
577 enum mlxsw_reg_sbxx_dir dir,
578 const struct mlxsw_sp_sb_cm *cms,
581 const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
585 for (i = 0; i < cms_len; i++) {
586 const struct mlxsw_sp_sb_cm *cm;
590 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
591 continue; /* PG number 8 does not exist, skip it */
593 if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
596 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
597 max_buff = cm->max_buff;
598 if (max_buff == MLXSW_SP_SB_INFI) {
599 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
601 true, cm->pool_index);
603 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
605 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
607 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
609 false, cm->pool_index);
617 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
619 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
622 err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
623 mlxsw_sp_port->local_port,
624 MLXSW_REG_SBXX_DIR_INGRESS,
625 mlxsw_sp->sb_vals->cms_ingress,
626 mlxsw_sp->sb_vals->cms_ingress_count);
629 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
630 mlxsw_sp_port->local_port,
631 MLXSW_REG_SBXX_DIR_EGRESS,
632 mlxsw_sp->sb_vals->cms_egress,
633 mlxsw_sp->sb_vals->cms_egress_count);
636 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
638 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
639 mlxsw_sp->sb_vals->cms_cpu,
640 mlxsw_sp->sb_vals->cms_cpu_count);
643 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
645 .min_buff = _min_buff, \
646 .max_buff = _max_buff, \
649 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
651 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
652 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
653 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
654 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
656 MLXSW_SP_SB_PM(0, 7),
657 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
658 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
659 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
660 MLXSW_SP_SB_PM(10000, 90000),
663 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
665 MLXSW_SP_SB_PM(0, 7),
666 MLXSW_SP_SB_PM(0, 0),
667 MLXSW_SP_SB_PM(0, 0),
668 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
670 MLXSW_SP_SB_PM(0, 7),
671 MLXSW_SP_SB_PM(0, 0),
672 MLXSW_SP_SB_PM(0, 0),
673 MLXSW_SP_SB_PM(0, 0),
676 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
678 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
682 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
683 const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i];
687 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
688 max_buff = pm->max_buff;
689 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
690 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
691 err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
692 i, min_buff, max_buff);
699 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
701 .min_buff = _min_buff, \
702 .max_buff = _max_buff, \
703 .pool_index = _pool, \
706 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
707 MLXSW_SP_SB_MM(0, 6, 4),
708 MLXSW_SP_SB_MM(0, 6, 4),
709 MLXSW_SP_SB_MM(0, 6, 4),
710 MLXSW_SP_SB_MM(0, 6, 4),
711 MLXSW_SP_SB_MM(0, 6, 4),
712 MLXSW_SP_SB_MM(0, 6, 4),
713 MLXSW_SP_SB_MM(0, 6, 4),
714 MLXSW_SP_SB_MM(0, 6, 4),
715 MLXSW_SP_SB_MM(0, 6, 4),
716 MLXSW_SP_SB_MM(0, 6, 4),
717 MLXSW_SP_SB_MM(0, 6, 4),
718 MLXSW_SP_SB_MM(0, 6, 4),
719 MLXSW_SP_SB_MM(0, 6, 4),
720 MLXSW_SP_SB_MM(0, 6, 4),
721 MLXSW_SP_SB_MM(0, 6, 4),
724 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
726 char sbmm_pl[MLXSW_REG_SBMM_LEN];
730 for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
731 const struct mlxsw_sp_sb_pool_des *des;
732 const struct mlxsw_sp_sb_mm *mc;
735 mc = &mlxsw_sp->sb_vals->mms[i];
736 des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
737 /* All pools used by sb_mm's are initialized using dynamic
738 * thresholds, therefore 'max_buff' isn't specified in cells.
740 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
741 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
743 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
750 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
751 u16 *p_ingress_len, u16 *p_egress_len)
755 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i)
756 if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
757 MLXSW_REG_SBXX_DIR_EGRESS)
759 WARN(1, "No egress pools\n");
763 *p_egress_len = mlxsw_sp->sb_vals->pool_count - i;
766 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
767 .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
768 .pool_dess = mlxsw_sp1_sb_pool_dess,
769 .pms = mlxsw_sp1_sb_pms,
770 .prs = mlxsw_sp1_sb_prs,
771 .mms = mlxsw_sp_sb_mms,
772 .cms_ingress = mlxsw_sp1_sb_cms_ingress,
773 .cms_egress = mlxsw_sp1_sb_cms_egress,
774 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
775 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
776 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
777 .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
778 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
781 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
782 .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
783 .pool_dess = mlxsw_sp2_sb_pool_dess,
784 .pms = mlxsw_sp2_sb_pms,
785 .prs = mlxsw_sp2_sb_prs,
786 .mms = mlxsw_sp_sb_mms,
787 .cms_ingress = mlxsw_sp2_sb_cms_ingress,
788 .cms_egress = mlxsw_sp2_sb_cms_egress,
789 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
790 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
791 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
792 .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
793 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
796 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
798 u32 max_headroom_size;
803 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
806 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
809 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
812 mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
815 mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
816 mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
818 max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
820 /* Round down, because this limit must not be overstepped. */
821 mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
822 mlxsw_sp->sb->cell_size;
824 err = mlxsw_sp_sb_ports_init(mlxsw_sp);
826 goto err_sb_ports_init;
827 err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
828 mlxsw_sp->sb_vals->pool_count);
830 goto err_sb_prs_init;
831 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
833 goto err_sb_cpu_port_sb_cms_init;
834 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
836 goto err_sb_mms_init;
837 mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
838 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
839 mlxsw_sp->sb->sb_size,
842 MLXSW_SP_SB_ING_TC_COUNT,
843 MLXSW_SP_SB_EG_TC_COUNT);
845 goto err_devlink_sb_register;
849 err_devlink_sb_register:
851 err_sb_cpu_port_sb_cms_init:
853 mlxsw_sp_sb_ports_fini(mlxsw_sp);
859 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
861 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
862 mlxsw_sp_sb_ports_fini(mlxsw_sp);
866 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
870 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
873 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
876 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
881 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
882 unsigned int sb_index, u16 pool_index,
883 struct devlink_sb_pool_info *pool_info)
885 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
886 enum mlxsw_reg_sbxx_dir dir;
887 struct mlxsw_sp_sb_pr *pr;
889 dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
890 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
891 pool_info->pool_type = (enum devlink_sb_pool_type) dir;
892 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
893 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
894 pool_info->cell_size = mlxsw_sp->sb->cell_size;
898 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
899 unsigned int sb_index, u16 pool_index, u32 size,
900 enum devlink_sb_threshold_type threshold_type)
902 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
903 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
904 enum mlxsw_reg_sbpr_mode mode;
906 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
909 mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
910 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
914 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
916 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
919 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
921 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
922 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
923 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
926 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
927 u32 threshold, u32 *p_max_buff)
929 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
931 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
934 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
935 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
936 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
940 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
945 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
946 unsigned int sb_index, u16 pool_index,
949 struct mlxsw_sp_port *mlxsw_sp_port =
950 mlxsw_core_port_driver_priv(mlxsw_core_port);
951 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
952 u8 local_port = mlxsw_sp_port->local_port;
953 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
956 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
961 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
962 unsigned int sb_index, u16 pool_index,
965 struct mlxsw_sp_port *mlxsw_sp_port =
966 mlxsw_core_port_driver_priv(mlxsw_core_port);
967 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
968 u8 local_port = mlxsw_sp_port->local_port;
972 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
973 threshold, &max_buff);
977 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
981 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
982 unsigned int sb_index, u16 tc_index,
983 enum devlink_sb_pool_type pool_type,
984 u16 *p_pool_index, u32 *p_threshold)
986 struct mlxsw_sp_port *mlxsw_sp_port =
987 mlxsw_core_port_driver_priv(mlxsw_core_port);
988 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
989 u8 local_port = mlxsw_sp_port->local_port;
990 u8 pg_buff = tc_index;
991 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
992 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
995 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
997 *p_pool_index = cm->pool_index;
1001 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1002 unsigned int sb_index, u16 tc_index,
1003 enum devlink_sb_pool_type pool_type,
1004 u16 pool_index, u32 threshold)
1006 struct mlxsw_sp_port *mlxsw_sp_port =
1007 mlxsw_core_port_driver_priv(mlxsw_core_port);
1008 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1009 u8 local_port = mlxsw_sp_port->local_port;
1010 u8 pg_buff = tc_index;
1011 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1015 if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir)
1018 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1019 threshold, &max_buff);
1023 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1024 0, max_buff, false, pool_index);
1027 #define MASKED_COUNT_MAX \
1028 (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1029 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1031 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1036 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1037 char *sbsr_pl, size_t sbsr_pl_len,
1038 unsigned long cb_priv)
1040 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1041 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1045 struct mlxsw_sp_sb_cm *cm;
1048 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1051 for (local_port = cb_ctx.local_port_1;
1052 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1053 if (!mlxsw_sp->ports[local_port])
1055 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1056 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1057 MLXSW_REG_SBXX_DIR_INGRESS);
1058 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1059 &cm->occ.cur, &cm->occ.max);
1061 if (++masked_count == cb_ctx.masked_count)
1065 for (local_port = cb_ctx.local_port_1;
1066 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1067 if (!mlxsw_sp->ports[local_port])
1069 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1070 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1071 MLXSW_REG_SBXX_DIR_EGRESS);
1072 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1073 &cm->occ.cur, &cm->occ.max);
1075 if (++masked_count == cb_ctx.masked_count)
1080 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1081 unsigned int sb_index)
1083 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1084 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1085 unsigned long cb_priv;
1086 LIST_HEAD(bulk_list);
1095 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1101 local_port_1 = local_port;
1103 mlxsw_reg_sbsr_pack(sbsr_pl, false);
1104 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1105 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1106 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1107 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1108 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1109 if (!mlxsw_sp->ports[local_port])
1111 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1112 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1113 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1114 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1119 if (++masked_count == MASKED_COUNT_MAX)
1124 cb_ctx.masked_count = masked_count;
1125 cb_ctx.local_port_1 = local_port_1;
1126 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1127 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1128 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1132 if (local_port < mlxsw_core_max_ports(mlxsw_core))
1136 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1143 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1144 unsigned int sb_index)
1146 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1147 LIST_HEAD(bulk_list);
1149 unsigned int masked_count;
1155 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1162 mlxsw_reg_sbsr_pack(sbsr_pl, true);
1163 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1164 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1165 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1166 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1167 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1168 if (!mlxsw_sp->ports[local_port])
1170 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
1171 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1172 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1173 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1178 if (++masked_count == MASKED_COUNT_MAX)
1183 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1184 &bulk_list, NULL, 0);
1187 if (local_port < mlxsw_core_max_ports(mlxsw_core))
1191 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1198 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1199 unsigned int sb_index, u16 pool_index,
1200 u32 *p_cur, u32 *p_max)
1202 struct mlxsw_sp_port *mlxsw_sp_port =
1203 mlxsw_core_port_driver_priv(mlxsw_core_port);
1204 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1205 u8 local_port = mlxsw_sp_port->local_port;
1206 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1209 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1210 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1214 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1215 unsigned int sb_index, u16 tc_index,
1216 enum devlink_sb_pool_type pool_type,
1217 u32 *p_cur, u32 *p_max)
1219 struct mlxsw_sp_port *mlxsw_sp_port =
1220 mlxsw_core_port_driver_priv(mlxsw_core_port);
1221 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1222 u8 local_port = mlxsw_sp_port->local_port;
1223 u8 pg_buff = tc_index;
1224 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1225 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1228 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1229 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);