1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/dcbnl.h>
7 #include <linux/if_ether.h>
8 #include <linux/list.h>
9 #include <linux/netlink.h>
16 struct mlxsw_sp_sb_pr {
17 enum mlxsw_reg_sbpr_mode mode;
23 struct mlxsw_cp_sb_occ {
28 struct mlxsw_sp_sb_cm {
32 struct mlxsw_cp_sb_occ occ;
37 #define MLXSW_SP_SB_INFI -1U
39 struct mlxsw_sp_sb_pm {
42 struct mlxsw_cp_sb_occ occ;
45 struct mlxsw_sp_sb_mm {
51 struct mlxsw_sp_sb_pool_des {
52 enum mlxsw_reg_sbxx_dir dir;
56 #define MLXSW_SP_SB_POOL_ING 0
57 #define MLXSW_SP_SB_POOL_EGR 4
58 #define MLXSW_SP_SB_POOL_EGR_MC 8
59 #define MLXSW_SP_SB_POOL_ING_CPU 9
60 #define MLXSW_SP_SB_POOL_EGR_CPU 10
62 static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
63 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
64 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
65 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
66 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
67 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
68 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
69 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
70 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
71 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
72 {MLXSW_REG_SBXX_DIR_INGRESS, 4},
73 {MLXSW_REG_SBXX_DIR_EGRESS, 4},
76 static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
77 {MLXSW_REG_SBXX_DIR_INGRESS, 0},
78 {MLXSW_REG_SBXX_DIR_INGRESS, 1},
79 {MLXSW_REG_SBXX_DIR_INGRESS, 2},
80 {MLXSW_REG_SBXX_DIR_INGRESS, 3},
81 {MLXSW_REG_SBXX_DIR_EGRESS, 0},
82 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
83 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
84 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
85 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
86 {MLXSW_REG_SBXX_DIR_INGRESS, 4},
87 {MLXSW_REG_SBXX_DIR_EGRESS, 4},
90 #define MLXSW_SP_SB_ING_TC_COUNT 8
91 #define MLXSW_SP_SB_EG_TC_COUNT 16
93 struct mlxsw_sp_sb_port {
94 struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
95 struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
96 struct mlxsw_sp_sb_pm *pms;
100 struct mlxsw_sp_sb_pr *prs;
101 struct mlxsw_sp_sb_port *ports;
103 u32 max_headroom_cells;
107 struct mlxsw_sp_sb_vals {
108 unsigned int pool_count;
109 const struct mlxsw_sp_sb_pool_des *pool_dess;
110 const struct mlxsw_sp_sb_pm *pms;
111 const struct mlxsw_sp_sb_pm *pms_cpu;
112 const struct mlxsw_sp_sb_pr *prs;
113 const struct mlxsw_sp_sb_mm *mms;
114 const struct mlxsw_sp_sb_cm *cms_ingress;
115 const struct mlxsw_sp_sb_cm *cms_egress;
116 const struct mlxsw_sp_sb_cm *cms_cpu;
117 unsigned int mms_count;
118 unsigned int cms_ingress_count;
119 unsigned int cms_egress_count;
120 unsigned int cms_cpu_count;
123 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
125 return mlxsw_sp->sb->cell_size * cells;
128 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
130 return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
133 u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
135 return mlxsw_sp->sb->max_headroom_cells;
138 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
141 return &mlxsw_sp->sb->prs[pool_index];
144 static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
146 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
147 return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
149 return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
152 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
153 u8 local_port, u8 pg_buff,
154 enum mlxsw_reg_sbxx_dir dir)
156 struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
158 WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
159 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
160 return &sb_port->ing_cms[pg_buff];
162 return &sb_port->eg_cms[pg_buff];
165 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
166 u8 local_port, u16 pool_index)
168 return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
171 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
172 enum mlxsw_reg_sbpr_mode mode,
173 u32 size, bool infi_size)
175 const struct mlxsw_sp_sb_pool_des *des =
176 &mlxsw_sp->sb_vals->pool_dess[pool_index];
177 char sbpr_pl[MLXSW_REG_SBPR_LEN];
178 struct mlxsw_sp_sb_pr *pr;
181 mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
183 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
188 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
189 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
195 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
196 u8 pg_buff, u32 min_buff, u32 max_buff,
197 bool infi_max, u16 pool_index)
199 const struct mlxsw_sp_sb_pool_des *des =
200 &mlxsw_sp->sb_vals->pool_dess[pool_index];
201 char sbcm_pl[MLXSW_REG_SBCM_LEN];
202 struct mlxsw_sp_sb_cm *cm;
205 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
206 min_buff, max_buff, infi_max, des->pool);
207 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
211 if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
213 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
214 mlxsw_sp->sb->sb_size);
216 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
218 cm->min_buff = min_buff;
219 cm->max_buff = max_buff;
220 cm->pool_index = pool_index;
225 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
226 u16 pool_index, u32 min_buff, u32 max_buff)
228 const struct mlxsw_sp_sb_pool_des *des =
229 &mlxsw_sp->sb_vals->pool_dess[pool_index];
230 char sbpm_pl[MLXSW_REG_SBPM_LEN];
231 struct mlxsw_sp_sb_pm *pm;
234 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
236 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
240 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
241 pm->min_buff = min_buff;
242 pm->max_buff = max_buff;
246 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
247 u16 pool_index, struct list_head *bulk_list)
249 const struct mlxsw_sp_sb_pool_des *des =
250 &mlxsw_sp->sb_vals->pool_dess[pool_index];
251 char sbpm_pl[MLXSW_REG_SBPM_LEN];
253 if (local_port == MLXSW_PORT_CPU_PORT &&
254 des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
257 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
259 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
263 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
264 char *sbpm_pl, size_t sbpm_pl_len,
265 unsigned long cb_priv)
267 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
269 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
272 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
273 u16 pool_index, struct list_head *bulk_list)
275 const struct mlxsw_sp_sb_pool_des *des =
276 &mlxsw_sp->sb_vals->pool_dess[pool_index];
277 char sbpm_pl[MLXSW_REG_SBPM_LEN];
278 struct mlxsw_sp_sb_pm *pm;
280 if (local_port == MLXSW_PORT_CPU_PORT &&
281 des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
284 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
285 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
287 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
289 mlxsw_sp_sb_pm_occ_query_cb,
293 /* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
294 #define MLXSW_SP_PB_HEADROOM 25632
295 #define MLXSW_SP_PB_UNUSED 8
297 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
300 [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
301 [9] = MLXSW_PORT_MAX_MTU,
303 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
304 char pbmc_pl[MLXSW_REG_PBMC_LEN];
307 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
309 for (i = 0; i < ARRAY_SIZE(pbs); i++) {
310 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
312 if (i == MLXSW_SP_PB_UNUSED)
314 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
316 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
317 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
318 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
321 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
323 char pptb_pl[MLXSW_REG_PPTB_LEN];
326 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
327 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
328 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
329 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
333 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
337 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
340 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
343 static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
344 struct mlxsw_sp_sb_port *sb_port)
346 struct mlxsw_sp_sb_pm *pms;
348 pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
356 static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
361 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
363 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
364 struct mlxsw_sp_sb_pr *prs;
368 mlxsw_sp->sb->ports = kcalloc(max_ports,
369 sizeof(struct mlxsw_sp_sb_port),
371 if (!mlxsw_sp->sb->ports)
374 prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
380 mlxsw_sp->sb->prs = prs;
382 for (i = 0; i < max_ports; i++) {
383 err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
385 goto err_sb_port_init;
391 for (i--; i >= 0; i--)
392 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
393 kfree(mlxsw_sp->sb->prs);
395 kfree(mlxsw_sp->sb->ports);
399 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
401 int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
404 for (i = max_ports - 1; i >= 0; i--)
405 mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
406 kfree(mlxsw_sp->sb->prs);
407 kfree(mlxsw_sp->sb->ports);
410 #define MLXSW_SP_SB_PR(_mode, _size) \
416 #define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
420 .freeze_mode = _freeze_mode, \
421 .freeze_size = _freeze_size, \
424 #define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
425 #define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
426 #define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
428 /* Order according to mlxsw_sp1_sb_pool_dess */
429 static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
430 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
431 MLXSW_SP1_SB_PR_INGRESS_SIZE),
432 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
433 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
434 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
435 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
436 MLXSW_SP1_SB_PR_EGRESS_SIZE, true, false),
437 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
438 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
439 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
440 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
442 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
443 MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
444 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
445 MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
448 #define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568
449 #define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568
450 #define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
452 /* Order according to mlxsw_sp2_sb_pool_dess */
453 static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
454 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
455 MLXSW_SP2_SB_PR_INGRESS_SIZE),
456 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
457 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
458 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
459 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
460 MLXSW_SP2_SB_PR_EGRESS_SIZE, true, false),
461 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
462 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
463 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
464 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
466 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
467 MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
468 MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
469 MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
472 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
473 const struct mlxsw_sp_sb_pr *prs,
479 for (i = 0; i < prs_len; i++) {
480 u32 size = prs[i].size;
483 if (size == MLXSW_SP_SB_INFI) {
484 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
487 size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
488 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
497 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
499 .min_buff = _min_buff, \
500 .max_buff = _max_buff, \
501 .pool_index = _pool, \
504 #define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
506 .min_buff = _min_buff, \
507 .max_buff = _max_buff, \
508 .pool_index = MLXSW_SP_SB_POOL_ING, \
511 #define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
513 .min_buff = _min_buff, \
514 .max_buff = _max_buff, \
515 .pool_index = MLXSW_SP_SB_POOL_EGR, \
518 #define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
520 .min_buff = _min_buff, \
521 .max_buff = _max_buff, \
522 .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
523 .freeze_pool = true, \
524 .freeze_thresh = true, \
527 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
528 MLXSW_SP_SB_CM_ING(10000, 8),
529 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
530 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
531 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
532 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
533 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
534 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
535 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
536 MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
537 MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
540 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
541 MLXSW_SP_SB_CM_ING(0, 7),
542 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
543 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
544 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
545 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
546 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
547 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
548 MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
549 MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
550 MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
553 static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
554 MLXSW_SP_SB_CM_EGR(1500, 9),
555 MLXSW_SP_SB_CM_EGR(1500, 9),
556 MLXSW_SP_SB_CM_EGR(1500, 9),
557 MLXSW_SP_SB_CM_EGR(1500, 9),
558 MLXSW_SP_SB_CM_EGR(1500, 9),
559 MLXSW_SP_SB_CM_EGR(1500, 9),
560 MLXSW_SP_SB_CM_EGR(1500, 9),
561 MLXSW_SP_SB_CM_EGR(1500, 9),
562 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
563 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
564 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
565 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
566 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
567 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
568 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
569 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
570 MLXSW_SP_SB_CM_EGR(1, 0xff),
573 static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
574 MLXSW_SP_SB_CM_EGR(0, 7),
575 MLXSW_SP_SB_CM_EGR(0, 7),
576 MLXSW_SP_SB_CM_EGR(0, 7),
577 MLXSW_SP_SB_CM_EGR(0, 7),
578 MLXSW_SP_SB_CM_EGR(0, 7),
579 MLXSW_SP_SB_CM_EGR(0, 7),
580 MLXSW_SP_SB_CM_EGR(0, 7),
581 MLXSW_SP_SB_CM_EGR(0, 7),
582 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
583 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
584 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
585 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
586 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
587 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
588 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
589 MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
590 MLXSW_SP_SB_CM_EGR(1, 0xff),
593 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
595 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
596 MLXSW_SP_CPU_PORT_SB_CM,
597 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
598 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
599 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
600 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
601 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
602 MLXSW_SP_CPU_PORT_SB_CM,
603 MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
604 MLXSW_SP_CPU_PORT_SB_CM,
605 MLXSW_SP_CPU_PORT_SB_CM,
606 MLXSW_SP_CPU_PORT_SB_CM,
607 MLXSW_SP_CPU_PORT_SB_CM,
608 MLXSW_SP_CPU_PORT_SB_CM,
609 MLXSW_SP_CPU_PORT_SB_CM,
610 MLXSW_SP_CPU_PORT_SB_CM,
611 MLXSW_SP_CPU_PORT_SB_CM,
612 MLXSW_SP_CPU_PORT_SB_CM,
613 MLXSW_SP_CPU_PORT_SB_CM,
614 MLXSW_SP_CPU_PORT_SB_CM,
615 MLXSW_SP_CPU_PORT_SB_CM,
616 MLXSW_SP_CPU_PORT_SB_CM,
617 MLXSW_SP_CPU_PORT_SB_CM,
618 MLXSW_SP_CPU_PORT_SB_CM,
619 MLXSW_SP_CPU_PORT_SB_CM,
620 MLXSW_SP_CPU_PORT_SB_CM,
621 MLXSW_SP_CPU_PORT_SB_CM,
622 MLXSW_SP_CPU_PORT_SB_CM,
623 MLXSW_SP_CPU_PORT_SB_CM,
624 MLXSW_SP_CPU_PORT_SB_CM,
625 MLXSW_SP_CPU_PORT_SB_CM,
626 MLXSW_SP_CPU_PORT_SB_CM,
627 MLXSW_SP_CPU_PORT_SB_CM,
631 mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
633 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
635 return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
638 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
639 enum mlxsw_reg_sbxx_dir dir,
640 const struct mlxsw_sp_sb_cm *cms,
643 const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
647 for (i = 0; i < cms_len; i++) {
648 const struct mlxsw_sp_sb_cm *cm;
652 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
653 continue; /* PG number 8 does not exist, skip it */
655 if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
658 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
659 max_buff = cm->max_buff;
660 if (max_buff == MLXSW_SP_SB_INFI) {
661 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
663 true, cm->pool_index);
665 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
667 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
669 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
671 false, cm->pool_index);
679 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
681 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
684 err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
685 mlxsw_sp_port->local_port,
686 MLXSW_REG_SBXX_DIR_INGRESS,
687 mlxsw_sp->sb_vals->cms_ingress,
688 mlxsw_sp->sb_vals->cms_ingress_count);
691 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
692 mlxsw_sp_port->local_port,
693 MLXSW_REG_SBXX_DIR_EGRESS,
694 mlxsw_sp->sb_vals->cms_egress,
695 mlxsw_sp->sb_vals->cms_egress_count);
698 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
700 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
701 mlxsw_sp->sb_vals->cms_cpu,
702 mlxsw_sp->sb_vals->cms_cpu_count);
705 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
707 .min_buff = _min_buff, \
708 .max_buff = _max_buff, \
711 /* Order according to mlxsw_sp1_sb_pool_dess */
712 static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
713 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
714 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
715 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
716 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
717 MLXSW_SP_SB_PM(0, 7),
718 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
719 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
720 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
721 MLXSW_SP_SB_PM(10000, 90000),
722 MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
723 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
726 /* Order according to mlxsw_sp2_sb_pool_dess */
727 static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
728 MLXSW_SP_SB_PM(0, 7),
729 MLXSW_SP_SB_PM(0, 0),
730 MLXSW_SP_SB_PM(0, 0),
731 MLXSW_SP_SB_PM(0, 0),
732 MLXSW_SP_SB_PM(0, 7),
733 MLXSW_SP_SB_PM(0, 0),
734 MLXSW_SP_SB_PM(0, 0),
735 MLXSW_SP_SB_PM(0, 0),
736 MLXSW_SP_SB_PM(10000, 90000),
737 MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
738 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
741 /* Order according to mlxsw_sp*_sb_pool_dess */
742 static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
743 MLXSW_SP_SB_PM(0, 0),
744 MLXSW_SP_SB_PM(0, 0),
745 MLXSW_SP_SB_PM(0, 0),
746 MLXSW_SP_SB_PM(0, 0),
747 MLXSW_SP_SB_PM(0, 0),
748 MLXSW_SP_SB_PM(0, 0),
749 MLXSW_SP_SB_PM(0, 0),
750 MLXSW_SP_SB_PM(0, 0),
751 MLXSW_SP_SB_PM(0, 90000),
752 MLXSW_SP_SB_PM(0, 0),
753 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
756 static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
757 const struct mlxsw_sp_sb_pm *pms,
762 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
763 const struct mlxsw_sp_sb_pm *pm = &pms[i];
764 const struct mlxsw_sp_sb_pool_des *des;
768 des = &mlxsw_sp->sb_vals->pool_dess[i];
769 if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
772 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
773 max_buff = pm->max_buff;
774 if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
775 max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
776 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
784 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
786 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
788 return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
789 mlxsw_sp->sb_vals->pms, false);
792 static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
794 return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
798 #define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
800 .min_buff = _min_buff, \
801 .max_buff = _max_buff, \
802 .pool_index = MLXSW_SP_SB_POOL_EGR, \
805 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
806 MLXSW_SP_SB_MM(0, 6),
807 MLXSW_SP_SB_MM(0, 6),
808 MLXSW_SP_SB_MM(0, 6),
809 MLXSW_SP_SB_MM(0, 6),
810 MLXSW_SP_SB_MM(0, 6),
811 MLXSW_SP_SB_MM(0, 6),
812 MLXSW_SP_SB_MM(0, 6),
813 MLXSW_SP_SB_MM(0, 6),
814 MLXSW_SP_SB_MM(0, 6),
815 MLXSW_SP_SB_MM(0, 6),
816 MLXSW_SP_SB_MM(0, 6),
817 MLXSW_SP_SB_MM(0, 6),
818 MLXSW_SP_SB_MM(0, 6),
819 MLXSW_SP_SB_MM(0, 6),
820 MLXSW_SP_SB_MM(0, 6),
823 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
825 char sbmm_pl[MLXSW_REG_SBMM_LEN];
829 for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
830 const struct mlxsw_sp_sb_pool_des *des;
831 const struct mlxsw_sp_sb_mm *mc;
834 mc = &mlxsw_sp->sb_vals->mms[i];
835 des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
836 /* All pools used by sb_mm's are initialized using dynamic
837 * thresholds, therefore 'max_buff' isn't specified in cells.
839 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
840 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
842 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
849 static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
850 u16 *p_ingress_len, u16 *p_egress_len)
854 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
855 if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
856 MLXSW_REG_SBXX_DIR_INGRESS)
862 WARN(*p_egress_len == 0, "No egress pools\n");
865 const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
866 .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
867 .pool_dess = mlxsw_sp1_sb_pool_dess,
868 .pms = mlxsw_sp1_sb_pms,
869 .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
870 .prs = mlxsw_sp1_sb_prs,
871 .mms = mlxsw_sp_sb_mms,
872 .cms_ingress = mlxsw_sp1_sb_cms_ingress,
873 .cms_egress = mlxsw_sp1_sb_cms_egress,
874 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
875 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
876 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
877 .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
878 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
881 const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
882 .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
883 .pool_dess = mlxsw_sp2_sb_pool_dess,
884 .pms = mlxsw_sp2_sb_pms,
885 .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
886 .prs = mlxsw_sp2_sb_prs,
887 .mms = mlxsw_sp_sb_mms,
888 .cms_ingress = mlxsw_sp2_sb_cms_ingress,
889 .cms_egress = mlxsw_sp2_sb_cms_egress,
890 .cms_cpu = mlxsw_sp_cpu_port_sb_cms,
891 .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
892 .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
893 .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
894 .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
897 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
899 u32 max_headroom_size;
900 u16 ing_pool_count = 0;
901 u16 eg_pool_count = 0;
904 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
907 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
910 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
913 mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
916 mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
917 mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
919 max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
921 /* Round down, because this limit must not be overstepped. */
922 mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
923 mlxsw_sp->sb->cell_size;
925 err = mlxsw_sp_sb_ports_init(mlxsw_sp);
927 goto err_sb_ports_init;
928 err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
929 mlxsw_sp->sb_vals->pool_count);
931 goto err_sb_prs_init;
932 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
934 goto err_sb_cpu_port_sb_cms_init;
935 err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
937 goto err_sb_cpu_port_pms_init;
938 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
940 goto err_sb_mms_init;
941 mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
942 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
943 mlxsw_sp->sb->sb_size,
946 MLXSW_SP_SB_ING_TC_COUNT,
947 MLXSW_SP_SB_EG_TC_COUNT);
949 goto err_devlink_sb_register;
953 err_devlink_sb_register:
955 err_sb_cpu_port_pms_init:
956 err_sb_cpu_port_sb_cms_init:
958 mlxsw_sp_sb_ports_fini(mlxsw_sp);
964 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
966 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
967 mlxsw_sp_sb_ports_fini(mlxsw_sp);
971 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
975 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
978 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
981 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
986 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
987 unsigned int sb_index, u16 pool_index,
988 struct devlink_sb_pool_info *pool_info)
990 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
991 enum mlxsw_reg_sbxx_dir dir;
992 struct mlxsw_sp_sb_pr *pr;
994 dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
995 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
996 pool_info->pool_type = (enum devlink_sb_pool_type) dir;
997 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
998 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
999 pool_info->cell_size = mlxsw_sp->sb->cell_size;
1003 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
1004 unsigned int sb_index, u16 pool_index, u32 size,
1005 enum devlink_sb_threshold_type threshold_type,
1006 struct netlink_ext_ack *extack)
1008 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1009 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
1010 const struct mlxsw_sp_sb_pr *pr;
1011 enum mlxsw_reg_sbpr_mode mode;
1013 mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
1014 pr = &mlxsw_sp->sb_vals->prs[pool_index];
1016 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
1017 NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
1021 if (pr->freeze_mode && pr->mode != mode) {
1022 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
1026 if (pr->freeze_size && pr->size != size) {
1027 NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
1031 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
1035 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
1037 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1040 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1042 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
1043 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1044 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
1047 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
1048 u32 threshold, u32 *p_max_buff,
1049 struct netlink_ext_ack *extack)
1051 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
1053 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
1056 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
1057 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
1058 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
1059 NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
1064 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
1069 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1070 unsigned int sb_index, u16 pool_index,
1073 struct mlxsw_sp_port *mlxsw_sp_port =
1074 mlxsw_core_port_driver_priv(mlxsw_core_port);
1075 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1076 u8 local_port = mlxsw_sp_port->local_port;
1077 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1080 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
1085 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
1086 unsigned int sb_index, u16 pool_index,
1087 u32 threshold, struct netlink_ext_ack *extack)
1089 struct mlxsw_sp_port *mlxsw_sp_port =
1090 mlxsw_core_port_driver_priv(mlxsw_core_port);
1091 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1092 u8 local_port = mlxsw_sp_port->local_port;
1096 if (local_port == MLXSW_PORT_CPU_PORT) {
1097 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
1101 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1102 threshold, &max_buff, extack);
1106 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
1110 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1111 unsigned int sb_index, u16 tc_index,
1112 enum devlink_sb_pool_type pool_type,
1113 u16 *p_pool_index, u32 *p_threshold)
1115 struct mlxsw_sp_port *mlxsw_sp_port =
1116 mlxsw_core_port_driver_priv(mlxsw_core_port);
1117 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1118 u8 local_port = mlxsw_sp_port->local_port;
1119 u8 pg_buff = tc_index;
1120 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1121 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1124 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
1126 *p_pool_index = cm->pool_index;
1130 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
1131 unsigned int sb_index, u16 tc_index,
1132 enum devlink_sb_pool_type pool_type,
1133 u16 pool_index, u32 threshold,
1134 struct netlink_ext_ack *extack)
1136 struct mlxsw_sp_port *mlxsw_sp_port =
1137 mlxsw_core_port_driver_priv(mlxsw_core_port);
1138 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1139 u8 local_port = mlxsw_sp_port->local_port;
1140 const struct mlxsw_sp_sb_cm *cm;
1141 u8 pg_buff = tc_index;
1142 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1146 if (local_port == MLXSW_PORT_CPU_PORT) {
1147 NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
1151 if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
1152 NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
1156 if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
1157 cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
1159 cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
1161 if (cm->freeze_pool && cm->pool_index != pool_index) {
1162 NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
1166 if (cm->freeze_thresh && cm->max_buff != threshold) {
1167 NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
1171 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
1172 threshold, &max_buff, extack);
1176 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
1177 0, max_buff, false, pool_index);
1180 #define MASKED_COUNT_MAX \
1181 (MLXSW_REG_SBSR_REC_MAX_COUNT / \
1182 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
1184 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
1189 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
1190 char *sbsr_pl, size_t sbsr_pl_len,
1191 unsigned long cb_priv)
1193 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1194 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1198 struct mlxsw_sp_sb_cm *cm;
1201 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
1204 for (local_port = cb_ctx.local_port_1;
1205 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1206 if (!mlxsw_sp->ports[local_port])
1208 if (local_port == MLXSW_PORT_CPU_PORT) {
1209 /* Ingress quotas are not supported for the CPU port */
1213 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
1214 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1215 MLXSW_REG_SBXX_DIR_INGRESS);
1216 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1217 &cm->occ.cur, &cm->occ.max);
1219 if (++masked_count == cb_ctx.masked_count)
1223 for (local_port = cb_ctx.local_port_1;
1224 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1225 if (!mlxsw_sp->ports[local_port])
1227 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
1228 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
1229 MLXSW_REG_SBXX_DIR_EGRESS);
1230 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
1231 &cm->occ.cur, &cm->occ.max);
1233 if (++masked_count == cb_ctx.masked_count)
1238 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
1239 unsigned int sb_index)
1241 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1242 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
1243 unsigned long cb_priv;
1244 LIST_HEAD(bulk_list);
1253 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1257 local_port = MLXSW_PORT_CPU_PORT;
1259 local_port_1 = local_port;
1261 mlxsw_reg_sbsr_pack(sbsr_pl, false);
1262 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1263 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1264 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1265 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1266 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1267 if (!mlxsw_sp->ports[local_port])
1269 if (local_port != MLXSW_PORT_CPU_PORT) {
1270 /* Ingress quotas are not supported for the CPU port */
1271 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1274 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1275 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1276 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
1281 if (++masked_count == MASKED_COUNT_MAX)
1286 cb_ctx.masked_count = masked_count;
1287 cb_ctx.local_port_1 = local_port_1;
1288 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
1289 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1290 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
1294 if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1300 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1307 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
1308 unsigned int sb_index)
1310 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1311 LIST_HEAD(bulk_list);
1313 unsigned int masked_count;
1319 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
1323 local_port = MLXSW_PORT_CPU_PORT;
1326 mlxsw_reg_sbsr_pack(sbsr_pl, true);
1327 for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
1328 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
1329 for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
1330 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
1331 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
1332 if (!mlxsw_sp->ports[local_port])
1334 if (local_port != MLXSW_PORT_CPU_PORT) {
1335 /* Ingress quotas are not supported for the CPU port */
1336 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
1339 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
1340 for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
1341 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
1346 if (++masked_count == MASKED_COUNT_MAX)
1351 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1352 &bulk_list, NULL, 0);
1355 if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
1361 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1368 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1369 unsigned int sb_index, u16 pool_index,
1370 u32 *p_cur, u32 *p_max)
1372 struct mlxsw_sp_port *mlxsw_sp_port =
1373 mlxsw_core_port_driver_priv(mlxsw_core_port);
1374 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1375 u8 local_port = mlxsw_sp_port->local_port;
1376 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1379 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1380 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1384 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1385 unsigned int sb_index, u16 tc_index,
1386 enum devlink_sb_pool_type pool_type,
1387 u32 *p_cur, u32 *p_max)
1389 struct mlxsw_sp_port *mlxsw_sp_port =
1390 mlxsw_core_port_driver_priv(mlxsw_core_port);
1391 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1392 u8 local_port = mlxsw_sp_port->local_port;
1393 u8 pg_buff = tc_index;
1394 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1395 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1398 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1399 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);