Merge tag 'wireless-drivers-for-davem-2017-04-03' of git://git.kernel.org/pub/scm...
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_buffers.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/dcbnl.h>
38 #include <linux/if_ether.h>
39 #include <linux/list.h>
40
41 #include "spectrum.h"
42 #include "core.h"
43 #include "port.h"
44 #include "reg.h"
45
46 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
47                                                  u8 pool,
48                                                  enum mlxsw_reg_sbxx_dir dir)
49 {
50         return &mlxsw_sp->sb.prs[dir][pool];
51 }
52
53 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
54                                                  u8 local_port, u8 pg_buff,
55                                                  enum mlxsw_reg_sbxx_dir dir)
56 {
57         return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
58 }
59
60 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
61                                                  u8 local_port, u8 pool,
62                                                  enum mlxsw_reg_sbxx_dir dir)
63 {
64         return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
65 }
66
67 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
68                                 enum mlxsw_reg_sbxx_dir dir,
69                                 enum mlxsw_reg_sbpr_mode mode, u32 size)
70 {
71         char sbpr_pl[MLXSW_REG_SBPR_LEN];
72         struct mlxsw_sp_sb_pr *pr;
73         int err;
74
75         mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
76         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
77         if (err)
78                 return err;
79
80         pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
81         pr->mode = mode;
82         pr->size = size;
83         return 0;
84 }
85
86 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
87                                 u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
88                                 u32 min_buff, u32 max_buff, u8 pool)
89 {
90         char sbcm_pl[MLXSW_REG_SBCM_LEN];
91         int err;
92
93         mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
94                             min_buff, max_buff, pool);
95         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
96         if (err)
97                 return err;
98         if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
99                 struct mlxsw_sp_sb_cm *cm;
100
101                 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
102                 cm->min_buff = min_buff;
103                 cm->max_buff = max_buff;
104                 cm->pool = pool;
105         }
106         return 0;
107 }
108
109 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
110                                 u8 pool, enum mlxsw_reg_sbxx_dir dir,
111                                 u32 min_buff, u32 max_buff)
112 {
113         char sbpm_pl[MLXSW_REG_SBPM_LEN];
114         struct mlxsw_sp_sb_pm *pm;
115         int err;
116
117         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
118                             min_buff, max_buff);
119         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
120         if (err)
121                 return err;
122
123         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
124         pm->min_buff = min_buff;
125         pm->max_buff = max_buff;
126         return 0;
127 }
128
129 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
130                                     u8 pool, enum mlxsw_reg_sbxx_dir dir,
131                                     struct list_head *bulk_list)
132 {
133         char sbpm_pl[MLXSW_REG_SBPM_LEN];
134
135         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
136         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
137                                      bulk_list, NULL, 0);
138 }
139
140 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
141                                         char *sbpm_pl, size_t sbpm_pl_len,
142                                         unsigned long cb_priv)
143 {
144         struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
145
146         mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
147 }
148
149 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
150                                     u8 pool, enum mlxsw_reg_sbxx_dir dir,
151                                     struct list_head *bulk_list)
152 {
153         char sbpm_pl[MLXSW_REG_SBPM_LEN];
154         struct mlxsw_sp_sb_pm *pm;
155
156         pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
157         mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
158         return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
159                                      bulk_list,
160                                      mlxsw_sp_sb_pm_occ_query_cb,
161                                      (unsigned long) pm);
162 }
163
164 static const u16 mlxsw_sp_pbs[] = {
165         [0] = 2 * ETH_FRAME_LEN,
166         [9] = 2 * MLXSW_PORT_MAX_MTU,
167 };
168
169 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
170 #define MLXSW_SP_PB_UNUSED 8
171
172 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
173 {
174         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
175         char pbmc_pl[MLXSW_REG_PBMC_LEN];
176         int i;
177
178         mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
179                             0xffff, 0xffff / 2);
180         for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
181                 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);
182
183                 if (i == MLXSW_SP_PB_UNUSED)
184                         continue;
185                 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
186         }
187         mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
188                                          MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
189         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
190 }
191
192 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
193 {
194         char pptb_pl[MLXSW_REG_PPTB_LEN];
195         int i;
196
197         mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
198         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
199                 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
200         return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
201                                pptb_pl);
202 }
203
204 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
205 {
206         int err;
207
208         err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
209         if (err)
210                 return err;
211         return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
212 }
213
214 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
215 {
216         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
217
218         mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port),
219                                      GFP_KERNEL);
220         if (!mlxsw_sp->sb.ports)
221                 return -ENOMEM;
222         return 0;
223 }
224
225 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
226 {
227         kfree(mlxsw_sp->sb.ports);
228 }
229
230 #define MLXSW_SP_SB_PR_INGRESS_SIZE     12440000
231 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
232 #define MLXSW_SP_SB_PR_EGRESS_SIZE      13232000
233
234 #define MLXSW_SP_SB_PR(_mode, _size)    \
235         {                               \
236                 .mode = _mode,          \
237                 .size = _size,          \
238         }
239
240 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
241         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
242                        MLXSW_SP_SB_PR_INGRESS_SIZE),
243         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
244         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
245         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
246                        MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
247 };
248
249 #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
250
251 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
252         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
253         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
254         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
255         MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
256 };
257
258 #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
259
260 static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
261                                   enum mlxsw_reg_sbxx_dir dir,
262                                   const struct mlxsw_sp_sb_pr *prs,
263                                   size_t prs_len)
264 {
265         int i;
266         int err;
267
268         for (i = 0; i < prs_len; i++) {
269                 u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
270
271                 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size);
272                 if (err)
273                         return err;
274         }
275         return 0;
276 }
277
278 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
279 {
280         int err;
281
282         err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
283                                      mlxsw_sp_sb_prs_ingress,
284                                      MLXSW_SP_SB_PRS_INGRESS_LEN);
285         if (err)
286                 return err;
287         return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
288                                       mlxsw_sp_sb_prs_egress,
289                                       MLXSW_SP_SB_PRS_EGRESS_LEN);
290 }
291
292 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)     \
293         {                                               \
294                 .min_buff = _min_buff,                  \
295                 .max_buff = _max_buff,                  \
296                 .pool = _pool,                          \
297         }
298
299 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
300         MLXSW_SP_SB_CM(10000, 8, 0),
301         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
302         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
303         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
304         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
305         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
306         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
307         MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
308         MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
309         MLXSW_SP_SB_CM(20000, 1, 3),
310 };
311
312 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
313
314 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
315         MLXSW_SP_SB_CM(1500, 9, 0),
316         MLXSW_SP_SB_CM(1500, 9, 0),
317         MLXSW_SP_SB_CM(1500, 9, 0),
318         MLXSW_SP_SB_CM(1500, 9, 0),
319         MLXSW_SP_SB_CM(1500, 9, 0),
320         MLXSW_SP_SB_CM(1500, 9, 0),
321         MLXSW_SP_SB_CM(1500, 9, 0),
322         MLXSW_SP_SB_CM(1500, 9, 0),
323         MLXSW_SP_SB_CM(0, 0, 0),
324         MLXSW_SP_SB_CM(0, 0, 0),
325         MLXSW_SP_SB_CM(0, 0, 0),
326         MLXSW_SP_SB_CM(0, 0, 0),
327         MLXSW_SP_SB_CM(0, 0, 0),
328         MLXSW_SP_SB_CM(0, 0, 0),
329         MLXSW_SP_SB_CM(0, 0, 0),
330         MLXSW_SP_SB_CM(0, 0, 0),
331         MLXSW_SP_SB_CM(1, 0xff, 0),
332 };
333
334 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
335
336 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
337
338 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
339         MLXSW_SP_CPU_PORT_SB_CM,
340         MLXSW_SP_CPU_PORT_SB_CM,
341         MLXSW_SP_CPU_PORT_SB_CM,
342         MLXSW_SP_CPU_PORT_SB_CM,
343         MLXSW_SP_CPU_PORT_SB_CM,
344         MLXSW_SP_CPU_PORT_SB_CM,
345         MLXSW_SP_CPU_PORT_SB_CM,
346         MLXSW_SP_SB_CM(10000, 0, 0),
347         MLXSW_SP_CPU_PORT_SB_CM,
348         MLXSW_SP_CPU_PORT_SB_CM,
349         MLXSW_SP_CPU_PORT_SB_CM,
350         MLXSW_SP_CPU_PORT_SB_CM,
351         MLXSW_SP_CPU_PORT_SB_CM,
352         MLXSW_SP_CPU_PORT_SB_CM,
353         MLXSW_SP_CPU_PORT_SB_CM,
354         MLXSW_SP_CPU_PORT_SB_CM,
355         MLXSW_SP_CPU_PORT_SB_CM,
356         MLXSW_SP_CPU_PORT_SB_CM,
357         MLXSW_SP_CPU_PORT_SB_CM,
358         MLXSW_SP_CPU_PORT_SB_CM,
359         MLXSW_SP_CPU_PORT_SB_CM,
360         MLXSW_SP_CPU_PORT_SB_CM,
361         MLXSW_SP_CPU_PORT_SB_CM,
362         MLXSW_SP_CPU_PORT_SB_CM,
363         MLXSW_SP_CPU_PORT_SB_CM,
364         MLXSW_SP_CPU_PORT_SB_CM,
365         MLXSW_SP_CPU_PORT_SB_CM,
366         MLXSW_SP_CPU_PORT_SB_CM,
367         MLXSW_SP_CPU_PORT_SB_CM,
368         MLXSW_SP_CPU_PORT_SB_CM,
369         MLXSW_SP_CPU_PORT_SB_CM,
370         MLXSW_SP_CPU_PORT_SB_CM,
371 };
372
373 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
374         ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
375
376 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
377                                   enum mlxsw_reg_sbxx_dir dir,
378                                   const struct mlxsw_sp_sb_cm *cms,
379                                   size_t cms_len)
380 {
381         int i;
382         int err;
383
384         for (i = 0; i < cms_len; i++) {
385                 const struct mlxsw_sp_sb_cm *cm;
386                 u32 min_buff;
387
388                 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
389                         continue; /* PG number 8 does not exist, skip it */
390                 cm = &cms[i];
391                 /* All pools are initialized using dynamic thresholds,
392                  * therefore 'max_buff' isn't specified in cells.
393                  */
394                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
395                 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
396                                            min_buff, cm->max_buff, cm->pool);
397                 if (err)
398                         return err;
399         }
400         return 0;
401 }
402
403 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
404 {
405         int err;
406
407         err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
408                                      mlxsw_sp_port->local_port,
409                                      MLXSW_REG_SBXX_DIR_INGRESS,
410                                      mlxsw_sp_sb_cms_ingress,
411                                      MLXSW_SP_SB_CMS_INGRESS_LEN);
412         if (err)
413                 return err;
414         return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
415                                       mlxsw_sp_port->local_port,
416                                       MLXSW_REG_SBXX_DIR_EGRESS,
417                                       mlxsw_sp_sb_cms_egress,
418                                       MLXSW_SP_SB_CMS_EGRESS_LEN);
419 }
420
421 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
422 {
423         return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
424                                       mlxsw_sp_cpu_port_sb_cms,
425                                       MLXSW_SP_CPU_PORT_SB_MCS_LEN);
426 }
427
428 #define MLXSW_SP_SB_PM(_min_buff, _max_buff)    \
429         {                                       \
430                 .min_buff = _min_buff,          \
431                 .max_buff = _max_buff,          \
432         }
433
434 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
435         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
436         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
437         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
438         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
439 };
440
441 #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
442
443 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
444         MLXSW_SP_SB_PM(0, 7),
445         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
446         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
447         MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
448 };
449
450 #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
451
452 static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
453                                        enum mlxsw_reg_sbxx_dir dir,
454                                        const struct mlxsw_sp_sb_pm *pms,
455                                        size_t pms_len)
456 {
457         int i;
458         int err;
459
460         for (i = 0; i < pms_len; i++) {
461                 const struct mlxsw_sp_sb_pm *pm;
462
463                 pm = &pms[i];
464                 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
465                                            pm->min_buff, pm->max_buff);
466                 if (err)
467                         return err;
468         }
469         return 0;
470 }
471
472 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
473 {
474         int err;
475
476         err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
477                                           mlxsw_sp_port->local_port,
478                                           MLXSW_REG_SBXX_DIR_INGRESS,
479                                           mlxsw_sp_sb_pms_ingress,
480                                           MLXSW_SP_SB_PMS_INGRESS_LEN);
481         if (err)
482                 return err;
483         return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
484                                            mlxsw_sp_port->local_port,
485                                            MLXSW_REG_SBXX_DIR_EGRESS,
486                                            mlxsw_sp_sb_pms_egress,
487                                            MLXSW_SP_SB_PMS_EGRESS_LEN);
488 }
489
490 struct mlxsw_sp_sb_mm {
491         u32 min_buff;
492         u32 max_buff;
493         u8 pool;
494 };
495
496 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool)     \
497         {                                               \
498                 .min_buff = _min_buff,                  \
499                 .max_buff = _max_buff,                  \
500                 .pool = _pool,                          \
501         }
502
503 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
504         MLXSW_SP_SB_MM(20000, 0xff, 0),
505         MLXSW_SP_SB_MM(20000, 0xff, 0),
506         MLXSW_SP_SB_MM(20000, 0xff, 0),
507         MLXSW_SP_SB_MM(20000, 0xff, 0),
508         MLXSW_SP_SB_MM(20000, 0xff, 0),
509         MLXSW_SP_SB_MM(20000, 0xff, 0),
510         MLXSW_SP_SB_MM(20000, 0xff, 0),
511         MLXSW_SP_SB_MM(20000, 0xff, 0),
512         MLXSW_SP_SB_MM(20000, 0xff, 0),
513         MLXSW_SP_SB_MM(20000, 0xff, 0),
514         MLXSW_SP_SB_MM(20000, 0xff, 0),
515         MLXSW_SP_SB_MM(20000, 0xff, 0),
516         MLXSW_SP_SB_MM(20000, 0xff, 0),
517         MLXSW_SP_SB_MM(20000, 0xff, 0),
518         MLXSW_SP_SB_MM(20000, 0xff, 0),
519 };
520
521 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
522
523 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
524 {
525         char sbmm_pl[MLXSW_REG_SBMM_LEN];
526         int i;
527         int err;
528
529         for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
530                 const struct mlxsw_sp_sb_mm *mc;
531                 u32 min_buff;
532
533                 mc = &mlxsw_sp_sb_mms[i];
534                 /* All pools are initialized using dynamic thresholds,
535                  * therefore 'max_buff' isn't specified in cells.
536                  */
537                 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
538                 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
539                                     mc->pool);
540                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
541                 if (err)
542                         return err;
543         }
544         return 0;
545 }
546
547 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
548 {
549         u64 sb_size;
550         int err;
551
552         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
553                 return -EIO;
554         mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
555
556         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
557                 return -EIO;
558         sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
559
560         err = mlxsw_sp_sb_ports_init(mlxsw_sp);
561         if (err)
562                 return err;
563         err = mlxsw_sp_sb_prs_init(mlxsw_sp);
564         if (err)
565                 goto err_sb_prs_init;
566         err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
567         if (err)
568                 goto err_sb_cpu_port_sb_cms_init;
569         err = mlxsw_sp_sb_mms_init(mlxsw_sp);
570         if (err)
571                 goto err_sb_mms_init;
572         err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
573                                   MLXSW_SP_SB_POOL_COUNT,
574                                   MLXSW_SP_SB_POOL_COUNT,
575                                   MLXSW_SP_SB_TC_COUNT,
576                                   MLXSW_SP_SB_TC_COUNT);
577         if (err)
578                 goto err_devlink_sb_register;
579
580         return 0;
581
582 err_devlink_sb_register:
583 err_sb_mms_init:
584 err_sb_cpu_port_sb_cms_init:
585 err_sb_prs_init:
586         mlxsw_sp_sb_ports_fini(mlxsw_sp);
587         return err;
588 }
589
590 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
591 {
592         devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
593         mlxsw_sp_sb_ports_fini(mlxsw_sp);
594 }
595
596 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
597 {
598         int err;
599
600         err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
601         if (err)
602                 return err;
603         err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
604         if (err)
605                 return err;
606         err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
607
608         return err;
609 }
610
611 static u8 pool_get(u16 pool_index)
612 {
613         return pool_index % MLXSW_SP_SB_POOL_COUNT;
614 }
615
616 static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
617 {
618         u16 pool_index;
619
620         pool_index = pool;
621         if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
622                 pool_index += MLXSW_SP_SB_POOL_COUNT;
623         return pool_index;
624 }
625
626 static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
627 {
628         return pool_index < MLXSW_SP_SB_POOL_COUNT ?
629                MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
630 }
631
632 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
633                          unsigned int sb_index, u16 pool_index,
634                          struct devlink_sb_pool_info *pool_info)
635 {
636         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
637         u8 pool = pool_get(pool_index);
638         enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
639         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
640
641         pool_info->pool_type = (enum devlink_sb_pool_type) dir;
642         pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
643         pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
644         return 0;
645 }
646
647 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
648                          unsigned int sb_index, u16 pool_index, u32 size,
649                          enum devlink_sb_threshold_type threshold_type)
650 {
651         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
652         u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
653         u8 pool = pool_get(pool_index);
654         enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
655         enum mlxsw_reg_sbpr_mode mode;
656
657         if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
658                 return -EINVAL;
659
660         mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
661         return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
662 }
663
664 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
665
666 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
667                                      enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
668 {
669         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
670
671         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
672                 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
673         return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
674 }
675
676 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
677                                     enum mlxsw_reg_sbxx_dir dir, u32 threshold,
678                                     u32 *p_max_buff)
679 {
680         struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
681
682         if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
683                 int val;
684
685                 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
686                 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
687                     val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
688                         return -EINVAL;
689                 *p_max_buff = val;
690         } else {
691                 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
692         }
693         return 0;
694 }
695
696 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
697                               unsigned int sb_index, u16 pool_index,
698                               u32 *p_threshold)
699 {
700         struct mlxsw_sp_port *mlxsw_sp_port =
701                         mlxsw_core_port_driver_priv(mlxsw_core_port);
702         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
703         u8 local_port = mlxsw_sp_port->local_port;
704         u8 pool = pool_get(pool_index);
705         enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
706         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
707                                                        pool, dir);
708
709         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
710                                                  pm->max_buff);
711         return 0;
712 }
713
714 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
715                               unsigned int sb_index, u16 pool_index,
716                               u32 threshold)
717 {
718         struct mlxsw_sp_port *mlxsw_sp_port =
719                         mlxsw_core_port_driver_priv(mlxsw_core_port);
720         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
721         u8 local_port = mlxsw_sp_port->local_port;
722         u8 pool = pool_get(pool_index);
723         enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
724         u32 max_buff;
725         int err;
726
727         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
728                                        threshold, &max_buff);
729         if (err)
730                 return err;
731
732         return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
733                                     0, max_buff);
734 }
735
736 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
737                                  unsigned int sb_index, u16 tc_index,
738                                  enum devlink_sb_pool_type pool_type,
739                                  u16 *p_pool_index, u32 *p_threshold)
740 {
741         struct mlxsw_sp_port *mlxsw_sp_port =
742                         mlxsw_core_port_driver_priv(mlxsw_core_port);
743         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
744         u8 local_port = mlxsw_sp_port->local_port;
745         u8 pg_buff = tc_index;
746         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
747         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
748                                                        pg_buff, dir);
749
750         *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
751                                                  cm->max_buff);
752         *p_pool_index = pool_index_get(cm->pool, dir);
753         return 0;
754 }
755
756 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
757                                  unsigned int sb_index, u16 tc_index,
758                                  enum devlink_sb_pool_type pool_type,
759                                  u16 pool_index, u32 threshold)
760 {
761         struct mlxsw_sp_port *mlxsw_sp_port =
762                         mlxsw_core_port_driver_priv(mlxsw_core_port);
763         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
764         u8 local_port = mlxsw_sp_port->local_port;
765         u8 pg_buff = tc_index;
766         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
767         u8 pool = pool_get(pool_index);
768         u32 max_buff;
769         int err;
770
771         if (dir != dir_get(pool_index))
772                 return -EINVAL;
773
774         err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
775                                        threshold, &max_buff);
776         if (err)
777                 return err;
778
779         return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
780                                     0, max_buff, pool);
781 }
782
783 #define MASKED_COUNT_MAX \
784         (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
785
786 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
787         u8 masked_count;
788         u8 local_port_1;
789 };
790
791 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
792                                         char *sbsr_pl, size_t sbsr_pl_len,
793                                         unsigned long cb_priv)
794 {
795         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
796         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
797         u8 masked_count;
798         u8 local_port;
799         int rec_index = 0;
800         struct mlxsw_sp_sb_cm *cm;
801         int i;
802
803         memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
804
805         masked_count = 0;
806         for (local_port = cb_ctx.local_port_1;
807              local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
808                 if (!mlxsw_sp->ports[local_port])
809                         continue;
810                 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
811                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
812                                                 MLXSW_REG_SBXX_DIR_INGRESS);
813                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
814                                                   &cm->occ.cur, &cm->occ.max);
815                 }
816                 if (++masked_count == cb_ctx.masked_count)
817                         break;
818         }
819         masked_count = 0;
820         for (local_port = cb_ctx.local_port_1;
821              local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
822                 if (!mlxsw_sp->ports[local_port])
823                         continue;
824                 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
825                         cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
826                                                 MLXSW_REG_SBXX_DIR_EGRESS);
827                         mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
828                                                   &cm->occ.cur, &cm->occ.max);
829                 }
830                 if (++masked_count == cb_ctx.masked_count)
831                         break;
832         }
833 }
834
835 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
836                              unsigned int sb_index)
837 {
838         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
839         struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
840         unsigned long cb_priv;
841         LIST_HEAD(bulk_list);
842         char *sbsr_pl;
843         u8 masked_count;
844         u8 local_port_1;
845         u8 local_port = 0;
846         int i;
847         int err;
848         int err2;
849
850         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
851         if (!sbsr_pl)
852                 return -ENOMEM;
853
854 next_batch:
855         local_port++;
856         local_port_1 = local_port;
857         masked_count = 0;
858         mlxsw_reg_sbsr_pack(sbsr_pl, false);
859         for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
860                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
861                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
862         }
863         for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
864                 if (!mlxsw_sp->ports[local_port])
865                         continue;
866                 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
867                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
868                 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
869                         err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
870                                                        MLXSW_REG_SBXX_DIR_INGRESS,
871                                                        &bulk_list);
872                         if (err)
873                                 goto out;
874                         err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
875                                                        MLXSW_REG_SBXX_DIR_EGRESS,
876                                                        &bulk_list);
877                         if (err)
878                                 goto out;
879                 }
880                 if (++masked_count == MASKED_COUNT_MAX)
881                         goto do_query;
882         }
883
884 do_query:
885         cb_ctx.masked_count = masked_count;
886         cb_ctx.local_port_1 = local_port_1;
887         memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
888         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
889                                     &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
890                                     cb_priv);
891         if (err)
892                 goto out;
893         if (local_port < mlxsw_core_max_ports(mlxsw_core))
894                 goto next_batch;
895
896 out:
897         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
898         if (!err)
899                 err = err2;
900         kfree(sbsr_pl);
901         return err;
902 }
903
904 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
905                               unsigned int sb_index)
906 {
907         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
908         LIST_HEAD(bulk_list);
909         char *sbsr_pl;
910         unsigned int masked_count;
911         u8 local_port = 0;
912         int i;
913         int err;
914         int err2;
915
916         sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
917         if (!sbsr_pl)
918                 return -ENOMEM;
919
920 next_batch:
921         local_port++;
922         masked_count = 0;
923         mlxsw_reg_sbsr_pack(sbsr_pl, true);
924         for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
925                 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
926                 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
927         }
928         for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
929                 if (!mlxsw_sp->ports[local_port])
930                         continue;
931                 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
932                 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
933                 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
934                         err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
935                                                        MLXSW_REG_SBXX_DIR_INGRESS,
936                                                        &bulk_list);
937                         if (err)
938                                 goto out;
939                         err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
940                                                        MLXSW_REG_SBXX_DIR_EGRESS,
941                                                        &bulk_list);
942                         if (err)
943                                 goto out;
944                 }
945                 if (++masked_count == MASKED_COUNT_MAX)
946                         goto do_query;
947         }
948
949 do_query:
950         err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
951                                     &bulk_list, NULL, 0);
952         if (err)
953                 goto out;
954         if (local_port < mlxsw_core_max_ports(mlxsw_core))
955                 goto next_batch;
956
957 out:
958         err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
959         if (!err)
960                 err = err2;
961         kfree(sbsr_pl);
962         return err;
963 }
964
965 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
966                                   unsigned int sb_index, u16 pool_index,
967                                   u32 *p_cur, u32 *p_max)
968 {
969         struct mlxsw_sp_port *mlxsw_sp_port =
970                         mlxsw_core_port_driver_priv(mlxsw_core_port);
971         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
972         u8 local_port = mlxsw_sp_port->local_port;
973         u8 pool = pool_get(pool_index);
974         enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
975         struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
976                                                        pool, dir);
977
978         *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
979         *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
980         return 0;
981 }
982
983 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
984                                      unsigned int sb_index, u16 tc_index,
985                                      enum devlink_sb_pool_type pool_type,
986                                      u32 *p_cur, u32 *p_max)
987 {
988         struct mlxsw_sp_port *mlxsw_sp_port =
989                         mlxsw_core_port_driver_priv(mlxsw_core_port);
990         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
991         u8 local_port = mlxsw_sp_port->local_port;
992         u8 pg_buff = tc_index;
993         enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
994         struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
995                                                        pg_buff, dir);
996
997         *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
998         *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
999         return 0;
1000 }