net/mlx5: Introduce termination table bits
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
80f09dfc 40#include "rdma.h"
e52c2802
PB
41#include "en.h"
42#include "fs_core.h"
ac004b83 43#include "lib/devcom.h"
a3888f33
BW
44#include "ecpf.h"
45#include "lib/eq.h"
69697b6e 46
cd7e4186
BW
47/* There are two match-all miss flows, one for unicast dst mac and
48 * one for multicast.
49 */
50#define MLX5_ESW_MISS_FLOWS (2)
51
e52c2802
PB
52#define fdb_prio_table(esw, chain, prio, level) \
53 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
54
c9b99abc
BW
55#define UPLINK_REP_INDEX 0
56
879c8f84
BW
57static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
58 u16 vport_num)
59{
5ae51620 60 u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
61
62 WARN_ON(idx > esw->total_vports - 1);
63 return &esw->offloads.vport_reps[idx];
64}
65
e52c2802
PB
66static struct mlx5_flow_table *
67esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
68static void
69esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
70
71bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
72{
73 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
74}
75
76u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
77{
78 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
79 return FDB_MAX_CHAIN;
80
81 return 0;
82}
83
84u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
85{
86 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
87 return FDB_MAX_PRIO;
88
bf07aa73 89 return 1;
e52c2802
PB
90}
91
74491de9 92struct mlx5_flow_handle *
3d80d1a2
OG
93mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
94 struct mlx5_flow_spec *spec,
776b12b6 95 struct mlx5_esw_flow_attr *attr)
3d80d1a2 96{
592d3651 97 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 98 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 99 bool split = !!(attr->split_count);
74491de9 100 struct mlx5_flow_handle *rule;
e52c2802 101 struct mlx5_flow_table *fdb;
592d3651 102 int j, i = 0;
3d80d1a2
OG
103 void *misc;
104
105 if (esw->mode != SRIOV_OFFLOADS)
106 return ERR_PTR(-EOPNOTSUPP);
107
6acfbf38
OG
108 flow_act.action = attr->action;
109 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 110 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
111 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
112 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
113 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
114 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
115 flow_act.vlan[0].vid = attr->vlan_vid[0];
116 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
117 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
118 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
119 flow_act.vlan[1].vid = attr->vlan_vid[1];
120 flow_act.vlan[1].prio = attr->vlan_prio[1];
121 }
6acfbf38 122 }
776b12b6 123
66958ed9 124 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e52c2802
PB
125 if (attr->dest_chain) {
126 struct mlx5_flow_table *ft;
127
128 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
129 if (IS_ERR(ft)) {
130 rule = ERR_CAST(ft);
131 goto err_create_goto_table;
132 }
133
134 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
135 dest[i].ft = ft;
592d3651 136 i++;
e52c2802 137 } else {
e85e02ba 138 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 139 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 140 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 141 dest[i].vport.vhca_id =
df65a573 142 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
143 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
144 dest[i].vport.flags |=
145 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
146 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
147 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
8c4dc42b 148 flow_act.reformat_id = attr->dests[j].encap_id;
a18e879d 149 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b
EB
150 dest[i].vport.reformat_id =
151 attr->dests[j].encap_id;
f493f155 152 }
e52c2802
PB
153 i++;
154 }
56e858df 155 }
e37a79e5 156 }
66958ed9 157 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 158 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 159 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 160 i++;
3d80d1a2
OG
161 }
162
163 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 164 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2 165
10ff5359
SK
166 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
167 MLX5_SET(fte_match_set_misc, misc,
168 source_eswitch_owner_vhca_id,
169 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
170
3d80d1a2
OG
171 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
172 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
10ff5359
SK
173 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
174 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
175 source_eswitch_owner_vhca_id);
3d80d1a2 176
6363651d
OG
177 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
178 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
179 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
180 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
181 if (attr->match_level != MLX5_MATCH_NONE)
182 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
183 } else if (attr->match_level != MLX5_MATCH_NONE) {
184 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
185 }
3d80d1a2 186
aa24670e 187 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
188 flow_act.modify_id = attr->mod_hdr_id;
189
e85e02ba 190 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
191 if (IS_ERR(fdb)) {
192 rule = ERR_CAST(fdb);
193 goto err_esw_get;
194 }
195
196 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 197 if (IS_ERR(rule))
e52c2802 198 goto err_add_rule;
375f51e2
RD
199 else
200 esw->offloads.num_flows++;
3d80d1a2 201
e52c2802
PB
202 return rule;
203
204err_add_rule:
e85e02ba 205 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
206err_esw_get:
207 if (attr->dest_chain)
208 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
209err_create_goto_table:
aa0cbbae 210 return rule;
3d80d1a2
OG
211}
212
e4ad91f2
CM
213struct mlx5_flow_handle *
214mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
215 struct mlx5_flow_spec *spec,
216 struct mlx5_esw_flow_attr *attr)
217{
218 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 219 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
220 struct mlx5_flow_table *fast_fdb;
221 struct mlx5_flow_table *fwd_fdb;
e4ad91f2
CM
222 struct mlx5_flow_handle *rule;
223 void *misc;
224 int i;
225
e52c2802
PB
226 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
227 if (IS_ERR(fast_fdb)) {
228 rule = ERR_CAST(fast_fdb);
229 goto err_get_fast;
230 }
231
232 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
233 if (IS_ERR(fwd_fdb)) {
234 rule = ERR_CAST(fwd_fdb);
235 goto err_get_fwd;
236 }
237
e4ad91f2 238 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 239 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 240 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 241 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 242 dest[i].vport.vhca_id =
df65a573 243 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
244 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
245 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
246 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
247 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b 248 dest[i].vport.reformat_id = attr->dests[i].encap_id;
1cc26d74 249 }
e4ad91f2
CM
250 }
251 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 252 dest[i].ft = fwd_fdb,
e4ad91f2
CM
253 i++;
254
255 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
256 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
257
258 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
259 MLX5_SET(fte_match_set_misc, misc,
260 source_eswitch_owner_vhca_id,
261 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
262
263 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
264 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
265 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
266 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
267 source_eswitch_owner_vhca_id);
268
269 if (attr->match_level == MLX5_MATCH_NONE)
270 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
271 else
272 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
273 MLX5_MATCH_MISC_PARAMETERS;
274
e52c2802 275 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 276
e52c2802
PB
277 if (IS_ERR(rule))
278 goto add_err;
e4ad91f2 279
e52c2802
PB
280 esw->offloads.num_flows++;
281
282 return rule;
283add_err:
284 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
285err_get_fwd:
286 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
287err_get_fast:
e4ad91f2
CM
288 return rule;
289}
290
e52c2802
PB
291static void
292__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
293 struct mlx5_flow_handle *rule,
294 struct mlx5_esw_flow_attr *attr,
295 bool fwd_rule)
296{
e85e02ba 297 bool split = (attr->split_count > 0);
e52c2802
PB
298
299 mlx5_del_flow_rules(rule);
300 esw->offloads.num_flows--;
301
302 if (fwd_rule) {
303 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
304 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
305 } else {
e85e02ba 306 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
307 if (attr->dest_chain)
308 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
309 }
310}
311
d85cdccb
OG
312void
313mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
314 struct mlx5_flow_handle *rule,
315 struct mlx5_esw_flow_attr *attr)
316{
e52c2802 317 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
318}
319
48265006
OG
320void
321mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
322 struct mlx5_flow_handle *rule,
323 struct mlx5_esw_flow_attr *attr)
324{
e52c2802 325 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
326}
327
f5f82476
OG
328static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
329{
330 struct mlx5_eswitch_rep *rep;
331 int vf_vport, err = 0;
332
333 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
334 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
335 rep = &esw->offloads.vport_reps[vf_vport];
6f4e0219 336 if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED)
f5f82476
OG
337 continue;
338
339 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
340 if (err)
341 goto out;
342 }
343
344out:
345 return err;
346}
347
348static struct mlx5_eswitch_rep *
349esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
350{
351 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
352
353 in_rep = attr->in_rep;
df65a573 354 out_rep = attr->dests[0].rep;
f5f82476
OG
355
356 if (push)
357 vport = in_rep;
358 else if (pop)
359 vport = out_rep;
360 else
361 vport = in_rep;
362
363 return vport;
364}
365
366static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
367 bool push, bool pop, bool fwd)
368{
369 struct mlx5_eswitch_rep *in_rep, *out_rep;
370
371 if ((push || pop) && !fwd)
372 goto out_notsupp;
373
374 in_rep = attr->in_rep;
df65a573 375 out_rep = attr->dests[0].rep;
f5f82476 376
b05af6aa 377 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
378 goto out_notsupp;
379
b05af6aa 380 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
381 goto out_notsupp;
382
383 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
384 if (!push && !pop && fwd)
b05af6aa 385 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
386 goto out_notsupp;
387
388 /* protects against (1) setting rules with different vlans to push and
389 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
390 */
1482bd3d 391 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
392 goto out_notsupp;
393
394 return 0;
395
396out_notsupp:
9eb78923 397 return -EOPNOTSUPP;
f5f82476
OG
398}
399
400int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
401 struct mlx5_esw_flow_attr *attr)
402{
403 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
404 struct mlx5_eswitch_rep *vport = NULL;
405 bool push, pop, fwd;
406 int err = 0;
407
6acfbf38 408 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 409 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
410 return 0;
411
f5f82476
OG
412 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
413 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
414 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
415 !attr->dest_chain);
f5f82476
OG
416
417 err = esw_add_vlan_action_check(attr, push, pop, fwd);
418 if (err)
419 return err;
420
421 attr->vlan_handled = false;
422
423 vport = esw_vlan_action_get_vport(attr, push, pop);
424
425 if (!push && !pop && fwd) {
426 /* tracks VF --> wire rules without vlan push action */
b05af6aa 427 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476
OG
428 vport->vlan_refcount++;
429 attr->vlan_handled = true;
430 }
431
432 return 0;
433 }
434
435 if (!push && !pop)
436 return 0;
437
438 if (!(offloads->vlan_push_pop_refcount)) {
439 /* it's the 1st vlan rule, apply global vlan pop policy */
440 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
441 if (err)
442 goto out;
443 }
444 offloads->vlan_push_pop_refcount++;
445
446 if (push) {
447 if (vport->vlan_refcount)
448 goto skip_set_push;
449
1482bd3d 450 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
451 SET_VLAN_INSERT | SET_VLAN_STRIP);
452 if (err)
453 goto out;
1482bd3d 454 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
455skip_set_push:
456 vport->vlan_refcount++;
457 }
458out:
459 if (!err)
460 attr->vlan_handled = true;
461 return err;
462}
463
464int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
465 struct mlx5_esw_flow_attr *attr)
466{
467 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
468 struct mlx5_eswitch_rep *vport = NULL;
469 bool push, pop, fwd;
470 int err = 0;
471
6acfbf38 472 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 473 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
474 return 0;
475
f5f82476
OG
476 if (!attr->vlan_handled)
477 return 0;
478
479 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
480 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
481 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
482
483 vport = esw_vlan_action_get_vport(attr, push, pop);
484
485 if (!push && !pop && fwd) {
486 /* tracks VF --> wire rules without vlan push action */
b05af6aa 487 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
488 vport->vlan_refcount--;
489
490 return 0;
491 }
492
493 if (push) {
494 vport->vlan_refcount--;
495 if (vport->vlan_refcount)
496 goto skip_unset_push;
497
498 vport->vlan = 0;
499 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
500 0, 0, SET_VLAN_STRIP);
501 if (err)
502 goto out;
503 }
504
505skip_unset_push:
506 offloads->vlan_push_pop_refcount--;
507 if (offloads->vlan_push_pop_refcount)
508 return 0;
509
510 /* no more vlan rules, stop global vlan pop policy */
511 err = esw_set_global_vlan_pop(esw, 0);
512
513out:
514 return err;
515}
516
f7a68945 517struct mlx5_flow_handle *
ab22be9b
OG
518mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
519{
66958ed9 520 struct mlx5_flow_act flow_act = {0};
4c5009c5 521 struct mlx5_flow_destination dest = {};
74491de9 522 struct mlx5_flow_handle *flow_rule;
c5bb1730 523 struct mlx5_flow_spec *spec;
ab22be9b
OG
524 void *misc;
525
1b9a07ee 526 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 527 if (!spec) {
ab22be9b
OG
528 flow_rule = ERR_PTR(-ENOMEM);
529 goto out;
530 }
531
c5bb1730 532 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 533 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
534 /* source vport is the esw manager */
535 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 536
c5bb1730 537 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
538 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
539 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
540
c5bb1730 541 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 542 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 543 dest.vport.num = vport;
66958ed9 544 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 545
52fff327 546 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 547 &flow_act, &dest, 1);
ab22be9b
OG
548 if (IS_ERR(flow_rule))
549 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
550out:
c5bb1730 551 kvfree(spec);
ab22be9b
OG
552 return flow_rule;
553}
57cbd893 554EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 555
159fe639
MB
556void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
557{
558 mlx5_del_flow_rules(rule);
559}
560
ac004b83
RD
561static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
562 struct mlx5_flow_spec *spec,
563 struct mlx5_flow_destination *dest)
564{
565 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
566 misc_parameters);
567
568 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
569 MLX5_CAP_GEN(peer_dev, vhca_id));
570
571 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
572
573 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
574 misc_parameters);
575 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
576 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
577 source_eswitch_owner_vhca_id);
578
579 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 580 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 581 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 582 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
583}
584
585static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
586 struct mlx5_core_dev *peer_dev)
587{
588 struct mlx5_flow_destination dest = {};
589 struct mlx5_flow_act flow_act = {0};
590 struct mlx5_flow_handle **flows;
591 struct mlx5_flow_handle *flow;
592 struct mlx5_flow_spec *spec;
593 /* total vports is the same for both e-switches */
594 int nvports = esw->total_vports;
595 void *misc;
596 int err, i;
597
598 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
599 if (!spec)
600 return -ENOMEM;
601
602 peer_miss_rules_setup(peer_dev, spec, &dest);
603
604 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
605 if (!flows) {
606 err = -ENOMEM;
607 goto alloc_flows_err;
608 }
609
610 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
611 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
612 misc_parameters);
613
81cd229c
BW
614 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
615 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_PF);
616 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
617 spec, &flow_act, &dest, 1);
618 if (IS_ERR(flow)) {
619 err = PTR_ERR(flow);
620 goto add_pf_flow_err;
621 }
622 flows[MLX5_VPORT_PF] = flow;
623 }
624
625 if (mlx5_ecpf_vport_exists(esw->dev)) {
626 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
627 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
628 spec, &flow_act, &dest, 1);
629 if (IS_ERR(flow)) {
630 err = PTR_ERR(flow);
631 goto add_ecpf_flow_err;
632 }
633 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
634 }
635
786ef904 636 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
ac004b83
RD
637 MLX5_SET(fte_match_set_misc, misc, source_port, i);
638 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
639 spec, &flow_act, &dest, 1);
640 if (IS_ERR(flow)) {
641 err = PTR_ERR(flow);
81cd229c 642 goto add_vf_flow_err;
ac004b83
RD
643 }
644 flows[i] = flow;
645 }
646
647 esw->fdb_table.offloads.peer_miss_rules = flows;
648
649 kvfree(spec);
650 return 0;
651
81cd229c 652add_vf_flow_err:
879c8f84 653 nvports = --i;
786ef904 654 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 655 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
656
657 if (mlx5_ecpf_vport_exists(esw->dev))
658 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
659add_ecpf_flow_err:
660 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
661 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
662add_pf_flow_err:
663 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
664 kvfree(flows);
665alloc_flows_err:
666 kvfree(spec);
667 return err;
668}
669
670static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
671{
672 struct mlx5_flow_handle **flows;
673 int i;
674
675 flows = esw->fdb_table.offloads.peer_miss_rules;
676
786ef904
PP
677 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
678 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
679 mlx5_del_flow_rules(flows[i]);
680
81cd229c
BW
681 if (mlx5_ecpf_vport_exists(esw->dev))
682 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
683
684 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
685 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
686
ac004b83
RD
687 kvfree(flows);
688}
689
3aa33572
OG
690static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
691{
66958ed9 692 struct mlx5_flow_act flow_act = {0};
4c5009c5 693 struct mlx5_flow_destination dest = {};
74491de9 694 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 695 struct mlx5_flow_spec *spec;
f80be543
MB
696 void *headers_c;
697 void *headers_v;
3aa33572 698 int err = 0;
f80be543
MB
699 u8 *dmac_c;
700 u8 *dmac_v;
3aa33572 701
1b9a07ee 702 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 703 if (!spec) {
3aa33572
OG
704 err = -ENOMEM;
705 goto out;
706 }
707
f80be543
MB
708 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
709 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
710 outer_headers);
711 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
712 outer_headers.dmac_47_16);
713 dmac_c[0] = 0x01;
714
3aa33572 715 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 716 dest.vport.num = esw->manager_vport;
66958ed9 717 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 718
52fff327 719 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 720 &flow_act, &dest, 1);
3aa33572
OG
721 if (IS_ERR(flow_rule)) {
722 err = PTR_ERR(flow_rule);
f80be543 723 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
724 goto out;
725 }
726
f80be543
MB
727 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
728
729 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
730 outer_headers);
731 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
732 outer_headers.dmac_47_16);
733 dmac_v[0] = 0x01;
52fff327 734 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
735 &flow_act, &dest, 1);
736 if (IS_ERR(flow_rule)) {
737 err = PTR_ERR(flow_rule);
738 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
739 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
740 goto out;
741 }
742
743 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
744
3aa33572 745out:
c5bb1730 746 kvfree(spec);
3aa33572
OG
747 return err;
748}
749
1033665e 750#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 751
e52c2802
PB
752/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
753 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
754 * for each flow table pool. We can allocate up to 16M of each pool,
755 * and we keep track of how much we used via put/get_sz_to_pool.
756 * Firmware doesn't report any of this for now.
757 * ESW_POOL is expected to be sorted from large to small
758 */
759#define ESW_SIZE (16 * 1024 * 1024)
760const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
761 64 * 1024, 4 * 1024 };
762
763static int
764get_sz_from_pool(struct mlx5_eswitch *esw)
765{
766 int sz = 0, i;
767
768 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
769 if (esw->fdb_table.offloads.fdb_left[i]) {
770 --esw->fdb_table.offloads.fdb_left[i];
771 sz = ESW_POOLS[i];
772 break;
773 }
774 }
775
776 return sz;
777}
778
779static void
780put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
781{
782 int i;
783
784 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
785 if (sz >= ESW_POOLS[i]) {
786 ++esw->fdb_table.offloads.fdb_left[i];
787 break;
788 }
789 }
790}
791
792static struct mlx5_flow_table *
793create_next_size_table(struct mlx5_eswitch *esw,
794 struct mlx5_flow_namespace *ns,
795 u16 table_prio,
796 int level,
797 u32 flags)
798{
799 struct mlx5_flow_table *fdb;
800 int sz;
801
802 sz = get_sz_from_pool(esw);
803 if (!sz)
804 return ERR_PTR(-ENOSPC);
805
806 fdb = mlx5_create_auto_grouped_flow_table(ns,
807 table_prio,
808 sz,
809 ESW_OFFLOADS_NUM_GROUPS,
810 level,
811 flags);
812 if (IS_ERR(fdb)) {
813 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
814 (int)PTR_ERR(fdb), table_prio, level, sz);
815 put_sz_to_pool(esw, sz);
816 }
817
818 return fdb;
819}
820
821static struct mlx5_flow_table *
822esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
69697b6e 823{
69697b6e 824 struct mlx5_core_dev *dev = esw->dev;
69697b6e 825 struct mlx5_flow_table *fdb = NULL;
e52c2802
PB
826 struct mlx5_flow_namespace *ns;
827 int table_prio, l = 0;
bbd00f7e 828 u32 flags = 0;
69697b6e 829
c92a0b94
PB
830 if (chain == FDB_SLOW_PATH_CHAIN)
831 return esw->fdb_table.offloads.slow_fdb;
832
e52c2802 833 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
264d7bf3 834
e52c2802
PB
835 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
836 if (fdb) {
837 /* take ref on earlier levels as well */
838 while (level >= 0)
839 fdb_prio_table(esw, chain, prio, level--).num_rules++;
840 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
841 return fdb;
842 }
69697b6e 843
e52c2802
PB
844 ns = mlx5_get_fdb_sub_ns(dev, chain);
845 if (!ns) {
846 esw_warn(dev, "Failed to get FDB sub namespace\n");
847 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
848 return ERR_PTR(-EOPNOTSUPP);
849 }
a842dd04 850
7768d197 851 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 852 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 853 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 854
e52c2802 855 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
69697b6e 856
e52c2802
PB
857 /* create earlier levels for correct fs_core lookup when
858 * connecting tables
859 */
860 for (l = 0; l <= level; l++) {
861 if (fdb_prio_table(esw, chain, prio, l).fdb) {
862 fdb_prio_table(esw, chain, prio, l).num_rules++;
863 continue;
864 }
a842dd04 865
e52c2802
PB
866 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
867 if (IS_ERR(fdb)) {
868 l--;
869 goto err_create_fdb;
870 }
871
872 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
873 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
a842dd04 874 }
a842dd04 875
e52c2802
PB
876 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
877 return fdb;
a842dd04 878
e52c2802
PB
879err_create_fdb:
880 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
881 if (l >= 0)
882 esw_put_prio_table(esw, chain, prio, l);
883
884 return fdb;
1967ce6e
OG
885}
886
e52c2802
PB
887static void
888esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1967ce6e 889{
e52c2802
PB
890 int l;
891
c92a0b94
PB
892 if (chain == FDB_SLOW_PATH_CHAIN)
893 return;
894
e52c2802
PB
895 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
896
897 for (l = level; l >= 0; l--) {
898 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
899 continue;
900
901 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
902 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
903 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
904 }
905
906 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
907}
908
909static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
910{
911 /* If lazy creation isn't supported, deref the fast path tables */
912 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
913 esw_put_prio_table(esw, 0, 1, 1);
914 esw_put_prio_table(esw, 0, 1, 0);
915 }
1967ce6e
OG
916}
917
918#define MAX_PF_SQ 256
cd3d07e7 919#define MAX_SQ_NVPORTS 32
1967ce6e
OG
920
921static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
922{
923 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
924 struct mlx5_flow_table_attr ft_attr = {};
925 struct mlx5_core_dev *dev = esw->dev;
e52c2802 926 u32 *flow_group_in, max_flow_counter;
1967ce6e
OG
927 struct mlx5_flow_namespace *root_ns;
928 struct mlx5_flow_table *fdb = NULL;
e52c2802 929 int table_size, ix, err = 0, i;
1967ce6e 930 struct mlx5_flow_group *g;
e52c2802 931 u32 flags = 0, fdb_max;
1967ce6e 932 void *match_criteria;
f80be543 933 u8 *dmac;
1967ce6e
OG
934
935 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 936 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
937 if (!flow_group_in)
938 return -ENOMEM;
939
940 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
941 if (!root_ns) {
942 esw_warn(dev, "Failed to get FDB flow namespace\n");
943 err = -EOPNOTSUPP;
944 goto ns_err;
945 }
946
e52c2802
PB
947 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
948 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
949 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
950
951 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
952 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
953 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
954 fdb_max);
955
956 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
957 esw->fdb_table.offloads.fdb_left[i] =
958 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1967ce6e 959
cd7e4186
BW
960 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
961 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 962
e52c2802
PB
963 /* create the slow path fdb with encap set, so further table instances
964 * can be created at run time while VFs are probed if the FW allows that.
965 */
966 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
967 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
968 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
969
970 ft_attr.flags = flags;
b3ba5149
ES
971 ft_attr.max_fte = table_size;
972 ft_attr.prio = FDB_SLOW_PATH;
973
974 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
975 if (IS_ERR(fdb)) {
976 err = PTR_ERR(fdb);
977 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
978 goto slow_fdb_err;
979 }
52fff327 980 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 981
e52c2802
PB
982 /* If lazy creation isn't supported, open the fast path tables now */
983 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
984 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
985 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
986 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
987 esw_get_prio_table(esw, 0, 1, 0);
988 esw_get_prio_table(esw, 0, 1, 1);
989 } else {
990 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
991 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
992 }
993
69697b6e
OG
994 /* create send-to-vport group */
995 memset(flow_group_in, 0, inlen);
996 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
997 MLX5_MATCH_MISC_PARAMETERS);
998
999 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1000
1001 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1002 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1003
cd3d07e7 1004 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1005 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1006 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1007
1008 g = mlx5_create_flow_group(fdb, flow_group_in);
1009 if (IS_ERR(g)) {
1010 err = PTR_ERR(g);
1011 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1012 goto send_vport_err;
1013 }
1014 esw->fdb_table.offloads.send_to_vport_grp = g;
1015
ac004b83
RD
1016 /* create peer esw miss group */
1017 memset(flow_group_in, 0, inlen);
1018 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1019 MLX5_MATCH_MISC_PARAMETERS);
1020
1021 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1022 match_criteria);
1023
1024 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1025 misc_parameters.source_port);
1026 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1027 misc_parameters.source_eswitch_owner_vhca_id);
1028
1029 MLX5_SET(create_flow_group_in, flow_group_in,
1030 source_eswitch_owner_vhca_id_valid, 1);
1031 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1032 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1033 ix + esw->total_vports - 1);
1034 ix += esw->total_vports;
1035
1036 g = mlx5_create_flow_group(fdb, flow_group_in);
1037 if (IS_ERR(g)) {
1038 err = PTR_ERR(g);
1039 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1040 goto peer_miss_err;
1041 }
1042 esw->fdb_table.offloads.peer_miss_grp = g;
1043
69697b6e
OG
1044 /* create miss group */
1045 memset(flow_group_in, 0, inlen);
f80be543
MB
1046 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1047 MLX5_MATCH_OUTER_HEADERS);
1048 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1049 match_criteria);
1050 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1051 outer_headers.dmac_47_16);
1052 dmac[0] = 0x01;
69697b6e
OG
1053
1054 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1055 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1056 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1057
1058 g = mlx5_create_flow_group(fdb, flow_group_in);
1059 if (IS_ERR(g)) {
1060 err = PTR_ERR(g);
1061 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1062 goto miss_err;
1063 }
1064 esw->fdb_table.offloads.miss_grp = g;
1065
3aa33572
OG
1066 err = esw_add_fdb_miss_rule(esw);
1067 if (err)
1068 goto miss_rule_err;
1069
e52c2802 1070 esw->nvports = nvports;
c88a026e 1071 kvfree(flow_group_in);
69697b6e
OG
1072 return 0;
1073
3aa33572
OG
1074miss_rule_err:
1075 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1076miss_err:
ac004b83
RD
1077 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1078peer_miss_err:
69697b6e
OG
1079 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1080send_vport_err:
e52c2802 1081 esw_destroy_offloads_fast_fdb_tables(esw);
52fff327 1082 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1083slow_fdb_err:
69697b6e
OG
1084ns_err:
1085 kvfree(flow_group_in);
1086 return err;
1087}
1088
1967ce6e 1089static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1090{
e52c2802 1091 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1092 return;
1093
1967ce6e 1094 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1095 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1096 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1097 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1098 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1099 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1100
52fff327 1101 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
e52c2802 1102 esw_destroy_offloads_fast_fdb_tables(esw);
69697b6e 1103}
c116c6ee 1104
cd7e4186 1105static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1106{
b3ba5149 1107 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1108 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1109 struct mlx5_flow_table *ft_offloads;
1110 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1111 int err = 0;
1112
1113 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1114 if (!ns) {
1115 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1116 return -EOPNOTSUPP;
c116c6ee
OG
1117 }
1118
cd7e4186 1119 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
b3ba5149
ES
1120
1121 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1122 if (IS_ERR(ft_offloads)) {
1123 err = PTR_ERR(ft_offloads);
1124 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1125 return err;
1126 }
1127
1128 esw->offloads.ft_offloads = ft_offloads;
1129 return 0;
1130}
1131
1132static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1133{
1134 struct mlx5_esw_offload *offloads = &esw->offloads;
1135
1136 mlx5_destroy_flow_table(offloads->ft_offloads);
1137}
fed9ce22 1138
cd7e4186 1139static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1140{
1141 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1142 struct mlx5_flow_group *g;
fed9ce22
OG
1143 u32 *flow_group_in;
1144 void *match_criteria, *misc;
1145 int err = 0;
fed9ce22 1146
cd7e4186 1147 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1148 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1149 if (!flow_group_in)
1150 return -ENOMEM;
1151
1152 /* create vport rx group */
1153 memset(flow_group_in, 0, inlen);
1154 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1155 MLX5_MATCH_MISC_PARAMETERS);
1156
1157 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1158 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
1159 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1160
1161 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1162 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1163
1164 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1165
1166 if (IS_ERR(g)) {
1167 err = PTR_ERR(g);
1168 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1169 goto out;
1170 }
1171
1172 esw->offloads.vport_rx_group = g;
1173out:
e574978a 1174 kvfree(flow_group_in);
fed9ce22
OG
1175 return err;
1176}
1177
1178static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1179{
1180 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1181}
1182
74491de9 1183struct mlx5_flow_handle *
c966f7d5
GT
1184mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
1185 struct mlx5_flow_destination *dest)
fed9ce22 1186{
66958ed9 1187 struct mlx5_flow_act flow_act = {0};
74491de9 1188 struct mlx5_flow_handle *flow_rule;
c5bb1730 1189 struct mlx5_flow_spec *spec;
fed9ce22
OG
1190 void *misc;
1191
1b9a07ee 1192 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1193 if (!spec) {
fed9ce22
OG
1194 flow_rule = ERR_PTR(-ENOMEM);
1195 goto out;
1196 }
1197
c5bb1730 1198 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
1199 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1200
c5bb1730 1201 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
1202 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1203
c5bb1730 1204 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22 1205
66958ed9 1206 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1207 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1208 &flow_act, dest, 1);
fed9ce22
OG
1209 if (IS_ERR(flow_rule)) {
1210 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1211 goto out;
1212 }
1213
1214out:
c5bb1730 1215 kvfree(spec);
fed9ce22
OG
1216 return flow_rule;
1217}
feae9087 1218
db7ff19e
EB
1219static int esw_offloads_start(struct mlx5_eswitch *esw,
1220 struct netlink_ext_ack *extack)
c930a3ad 1221{
6c419ba8 1222 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad 1223
c96692fb
BW
1224 if (esw->mode != SRIOV_LEGACY &&
1225 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1226 NL_SET_ERR_MSG_MOD(extack,
1227 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1228 return -EINVAL;
1229 }
1230
1231 mlx5_eswitch_disable_sriov(esw);
1232 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8 1233 if (err) {
8c98ee77
EB
1234 NL_SET_ERR_MSG_MOD(extack,
1235 "Failed setting eswitch to offloads");
6c419ba8 1236 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
8c98ee77
EB
1237 if (err1) {
1238 NL_SET_ERR_MSG_MOD(extack,
1239 "Failed setting eswitch back to legacy");
1240 }
6c419ba8 1241 }
bffaa916
RD
1242 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1243 if (mlx5_eswitch_inline_mode_get(esw,
1244 num_vfs,
1245 &esw->offloads.inline_mode)) {
1246 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1247 NL_SET_ERR_MSG_MOD(extack,
1248 "Inline mode is different between vports");
bffaa916
RD
1249 }
1250 }
c930a3ad
OG
1251 return err;
1252}
1253
e8d31c4d
MB
1254void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1255{
1256 kfree(esw->offloads.vport_reps);
1257}
1258
1259int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1260{
2aca1787 1261 int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
e8d31c4d 1262 struct mlx5_core_dev *dev = esw->dev;
e8d31c4d 1263 struct mlx5_eswitch_rep *rep;
f121e0ea 1264 u8 hw_id[ETH_ALEN], rep_type;
e8d31c4d
MB
1265 int vport;
1266
2aca1787 1267 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1268 sizeof(struct mlx5_eswitch_rep),
1269 GFP_KERNEL);
1270 if (!esw->offloads.vport_reps)
1271 return -ENOMEM;
1272
e8d31c4d
MB
1273 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1274
879c8f84 1275 mlx5_esw_for_all_reps(esw, vport, rep) {
5ae51620 1276 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport);
e8d31c4d 1277 ether_addr_copy(rep->hw_id, hw_id);
f121e0ea
BW
1278
1279 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
6f4e0219
BW
1280 atomic_set(&rep->rep_if[rep_type].state,
1281 REP_UNREGISTERED);
e8d31c4d
MB
1282 }
1283
e8d31c4d
MB
1284 return 0;
1285}
1286
c9b99abc
BW
1287static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1288 struct mlx5_eswitch_rep *rep, u8 rep_type)
1289{
6f4e0219
BW
1290 if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
1291 REP_LOADED, REP_REGISTERED) == REP_LOADED)
1292 rep->rep_if[rep_type].unload(rep);
c9b99abc
BW
1293}
1294
29d9fd7d 1295static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1296{
1297 struct mlx5_eswitch_rep *rep;
c9b99abc 1298
81cd229c
BW
1299 if (mlx5_ecpf_vport_exists(esw->dev)) {
1300 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1301 __esw_offloads_unload_rep(esw, rep, rep_type);
1302 }
1303
1304 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1305 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1306 __esw_offloads_unload_rep(esw, rep, rep_type);
1307 }
1308
879c8f84 1309 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1310 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1311}
1312
29d9fd7d
BW
1313static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1314 u8 rep_type)
1315{
1316 struct mlx5_eswitch_rep *rep;
1317 int i;
1318
1319 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1320 __esw_offloads_unload_rep(esw, rep, rep_type);
1321}
1322
1323static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1324{
1325 u8 rep_type = NUM_REP_TYPES;
1326
1327 while (rep_type-- > 0)
1328 __unload_reps_vf_vport(esw, nvports, rep_type);
1329}
1330
1331static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1332 u8 rep_type)
1333{
1334 __unload_reps_vf_vport(esw, nvports, rep_type);
1335
1336 /* Special vports must be the last to unload. */
1337 __unload_reps_special_vport(esw, rep_type);
1338}
1339
1340static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
a4b97ab4
MB
1341{
1342 u8 rep_type = NUM_REP_TYPES;
1343
1344 while (rep_type-- > 0)
29d9fd7d 1345 __unload_reps_all_vport(esw, nvports, rep_type);
a4b97ab4
MB
1346}
1347
c9b99abc
BW
1348static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1349 struct mlx5_eswitch_rep *rep, u8 rep_type)
1350{
f121e0ea
BW
1351 int err = 0;
1352
6f4e0219
BW
1353 if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
1354 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1355 err = rep->rep_if[rep_type].load(esw->dev, rep);
1356 if (err)
1357 atomic_set(&rep->rep_if[rep_type].state,
1358 REP_REGISTERED);
1359 }
f121e0ea 1360
6f4e0219 1361 return err;
c9b99abc
BW
1362}
1363
29d9fd7d 1364static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
c930a3ad 1365{
cb67b832 1366 struct mlx5_eswitch_rep *rep;
c930a3ad
OG
1367 int err;
1368
879c8f84 1369 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1370 err = __esw_offloads_load_rep(esw, rep, rep_type);
81cd229c
BW
1371 if (err)
1372 return err;
1373
1374 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1375 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1376 err = __esw_offloads_load_rep(esw, rep, rep_type);
1377 if (err)
1378 goto err_pf;
1379 }
1380
1381 if (mlx5_ecpf_vport_exists(esw->dev)) {
1382 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1383 err = __esw_offloads_load_rep(esw, rep, rep_type);
1384 if (err)
1385 goto err_ecpf;
1386 }
1387
1388 return 0;
1389
1390err_ecpf:
1391 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1392 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1393 __esw_offloads_unload_rep(esw, rep, rep_type);
1394 }
1395
1396err_pf:
1397 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1398 __esw_offloads_unload_rep(esw, rep, rep_type);
29d9fd7d
BW
1399 return err;
1400}
6ed1803a 1401
29d9fd7d
BW
1402static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1403 u8 rep_type)
1404{
1405 struct mlx5_eswitch_rep *rep;
1406 int err, i;
1407
1408 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
c9b99abc 1409 err = __esw_offloads_load_rep(esw, rep, rep_type);
6ed1803a 1410 if (err)
29d9fd7d 1411 goto err_vf;
6ed1803a
MB
1412 }
1413
1414 return 0;
1415
29d9fd7d
BW
1416err_vf:
1417 __unload_reps_vf_vport(esw, --i, rep_type);
1418 return err;
1419}
1420
1421static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1422{
1423 u8 rep_type = 0;
1424 int err;
1425
1426 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1427 err = __load_reps_vf_vport(esw, nvports, rep_type);
1428 if (err)
1429 goto err_reps;
1430 }
1431
1432 return err;
1433
6ed1803a 1434err_reps:
29d9fd7d
BW
1435 while (rep_type-- > 0)
1436 __unload_reps_vf_vport(esw, nvports, rep_type);
1437 return err;
1438}
1439
1440static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1441 u8 rep_type)
1442{
1443 int err;
1444
1445 /* Special vports must be loaded first. */
1446 err = __load_reps_special_vport(esw, rep_type);
1447 if (err)
1448 return err;
1449
1450 err = __load_reps_vf_vport(esw, nvports, rep_type);
1451 if (err)
1452 goto err_vfs;
1453
1454 return 0;
1455
1456err_vfs:
1457 __unload_reps_special_vport(esw, rep_type);
a4b97ab4
MB
1458 return err;
1459}
1460
29d9fd7d 1461static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports)
a4b97ab4
MB
1462{
1463 u8 rep_type = 0;
1464 int err;
1465
1466 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
29d9fd7d 1467 err = __load_reps_all_vport(esw, nvports, rep_type);
a4b97ab4
MB
1468 if (err)
1469 goto err_reps;
1470 }
1471
1472 return err;
1473
1474err_reps:
1475 while (rep_type-- > 0)
29d9fd7d 1476 __unload_reps_all_vport(esw, nvports, rep_type);
6ed1803a
MB
1477 return err;
1478}
1479
ac004b83
RD
1480#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1481#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1482
1483static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1484 struct mlx5_eswitch *peer_esw)
1485{
1486 int err;
1487
1488 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1489 if (err)
1490 return err;
1491
1492 return 0;
1493}
1494
1495static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1496{
04de7dda 1497 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1498 esw_del_fdb_peer_miss_rules(esw);
1499}
1500
1501static int mlx5_esw_offloads_devcom_event(int event,
1502 void *my_data,
1503 void *event_data)
1504{
1505 struct mlx5_eswitch *esw = my_data;
1506 struct mlx5_eswitch *peer_esw = event_data;
1507 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1508 int err;
1509
1510 switch (event) {
1511 case ESW_OFFLOADS_DEVCOM_PAIR:
1512 err = mlx5_esw_offloads_pair(esw, peer_esw);
1513 if (err)
1514 goto err_out;
1515
1516 err = mlx5_esw_offloads_pair(peer_esw, esw);
1517 if (err)
1518 goto err_pair;
1519
1520 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1521 break;
1522
1523 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1524 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1525 break;
1526
1527 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1528 mlx5_esw_offloads_unpair(peer_esw);
1529 mlx5_esw_offloads_unpair(esw);
1530 break;
1531 }
1532
1533 return 0;
1534
1535err_pair:
1536 mlx5_esw_offloads_unpair(esw);
1537
1538err_out:
1539 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1540 event, err);
1541 return err;
1542}
1543
1544static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1545{
1546 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1547
04de7dda
RD
1548 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1549 mutex_init(&esw->offloads.peer_mutex);
1550
ac004b83
RD
1551 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1552 return;
1553
1554 mlx5_devcom_register_component(devcom,
1555 MLX5_DEVCOM_ESW_OFFLOADS,
1556 mlx5_esw_offloads_devcom_event,
1557 esw);
1558
1559 mlx5_devcom_send_event(devcom,
1560 MLX5_DEVCOM_ESW_OFFLOADS,
1561 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1562}
1563
1564static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1565{
1566 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1567
1568 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1569 return;
1570
1571 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1572 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1573
1574 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1575}
1576
18486737
EB
1577static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1578 struct mlx5_vport *vport)
1579{
1580 struct mlx5_core_dev *dev = esw->dev;
1581 struct mlx5_flow_act flow_act = {0};
1582 struct mlx5_flow_spec *spec;
1583 int err = 0;
1584
1585 /* For prio tag mode, there is only 1 FTEs:
1586 * 1) Untagged packets - push prio tag VLAN, allow
1587 * Unmatched traffic is allowed by default
1588 */
1589
1590 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1591 return -EOPNOTSUPP;
1592
1593 esw_vport_cleanup_ingress_rules(esw, vport);
1594
1595 err = esw_vport_enable_ingress_acl(esw, vport);
1596 if (err) {
1597 mlx5_core_warn(esw->dev,
1598 "failed to enable prio tag ingress acl (%d) on vport[%d]\n",
1599 err, vport->vport);
1600 return err;
1601 }
1602
1603 esw_debug(esw->dev,
1604 "vport[%d] configure ingress rules\n", vport->vport);
1605
1606 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1607 if (!spec) {
1608 err = -ENOMEM;
1609 goto out_no_mem;
1610 }
1611
1612 /* Untagged packets - push prio tag VLAN, allow */
1613 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1614 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1615 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1616 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1617 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1618 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1619 flow_act.vlan[0].vid = 0;
1620 flow_act.vlan[0].prio = 0;
1621 vport->ingress.allow_rule =
1622 mlx5_add_flow_rules(vport->ingress.acl, spec,
1623 &flow_act, NULL, 0);
1624 if (IS_ERR(vport->ingress.allow_rule)) {
1625 err = PTR_ERR(vport->ingress.allow_rule);
1626 esw_warn(esw->dev,
1627 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1628 vport->vport, err);
1629 vport->ingress.allow_rule = NULL;
1630 goto out;
1631 }
1632
1633out:
1634 kvfree(spec);
1635out_no_mem:
1636 if (err)
1637 esw_vport_cleanup_ingress_rules(esw, vport);
1638 return err;
1639}
1640
1641static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1642 struct mlx5_vport *vport)
1643{
1644 struct mlx5_flow_act flow_act = {0};
1645 struct mlx5_flow_spec *spec;
1646 int err = 0;
1647
1648 /* For prio tag mode, there is only 1 FTEs:
1649 * 1) prio tag packets - pop the prio tag VLAN, allow
1650 * Unmatched traffic is allowed by default
1651 */
1652
1653 esw_vport_cleanup_egress_rules(esw, vport);
1654
1655 err = esw_vport_enable_egress_acl(esw, vport);
1656 if (err) {
1657 mlx5_core_warn(esw->dev,
1658 "failed to enable egress acl (%d) on vport[%d]\n",
1659 err, vport->vport);
1660 return err;
1661 }
1662
1663 esw_debug(esw->dev,
1664 "vport[%d] configure prio tag egress rules\n", vport->vport);
1665
1666 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1667 if (!spec) {
1668 err = -ENOMEM;
1669 goto out_no_mem;
1670 }
1671
1672 /* prio tag vlan rule - pop it so VF receives untagged packets */
1673 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1674 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1675 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1676 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1677
1678 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1679 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1680 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1681 vport->egress.allowed_vlan =
1682 mlx5_add_flow_rules(vport->egress.acl, spec,
1683 &flow_act, NULL, 0);
1684 if (IS_ERR(vport->egress.allowed_vlan)) {
1685 err = PTR_ERR(vport->egress.allowed_vlan);
1686 esw_warn(esw->dev,
1687 "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1688 vport->vport, err);
1689 vport->egress.allowed_vlan = NULL;
1690 goto out;
1691 }
1692
1693out:
1694 kvfree(spec);
1695out_no_mem:
1696 if (err)
1697 esw_vport_cleanup_egress_rules(esw, vport);
1698 return err;
1699}
1700
1701static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports)
1702{
786ef904 1703 struct mlx5_vport *vport = NULL;
18486737
EB
1704 int i, j;
1705 int err;
1706
786ef904
PP
1707 mlx5_esw_for_each_vf_vport(esw, i, vport, nvports) {
1708 err = esw_vport_ingress_prio_tag_config(esw, vport);
18486737
EB
1709 if (err)
1710 goto err_ingress;
786ef904 1711 err = esw_vport_egress_prio_tag_config(esw, vport);
18486737
EB
1712 if (err)
1713 goto err_egress;
1714 }
1715
1716 return 0;
1717
1718err_egress:
786ef904 1719 esw_vport_disable_ingress_acl(esw, vport);
18486737 1720err_ingress:
786ef904
PP
1721 mlx5_esw_for_each_vf_vport_reverse(esw, j, vport, i - 1) {
1722 esw_vport_disable_egress_acl(esw, vport);
1723 esw_vport_disable_ingress_acl(esw, vport);
18486737
EB
1724 }
1725
1726 return err;
1727}
1728
1729static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw)
1730{
786ef904 1731 struct mlx5_vport *vport;
18486737
EB
1732 int i;
1733
786ef904
PP
1734 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->nvports) {
1735 esw_vport_disable_egress_acl(esw, vport);
1736 esw_vport_disable_ingress_acl(esw, vport);
18486737
EB
1737 }
1738}
1739
eca8cc38 1740static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
6ed1803a
MB
1741{
1742 int err;
1743
5c1d260e 1744 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
e52c2802
PB
1745 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1746
18486737
EB
1747 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
1748 err = esw_prio_tag_acls_config(esw, nvports);
1749 if (err)
1750 return err;
1751 }
1752
1967ce6e 1753 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 1754 if (err)
c5447c70 1755 return err;
c930a3ad 1756
cd7e4186 1757 err = esw_create_offloads_table(esw, nvports);
c930a3ad
OG
1758 if (err)
1759 goto create_ft_err;
1760
cd7e4186 1761 err = esw_create_vport_rx_group(esw, nvports);
c930a3ad
OG
1762 if (err)
1763 goto create_fg_err;
1764
1765 return 0;
1766
1767create_fg_err:
1768 esw_destroy_offloads_table(esw);
1769
1770create_ft_err:
1967ce6e 1771 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 1772
c930a3ad
OG
1773 return err;
1774}
1775
eca8cc38
BW
1776static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
1777{
1778 esw_destroy_vport_rx_group(esw);
1779 esw_destroy_offloads_table(esw);
1780 esw_destroy_offloads_fdb_tables(esw);
18486737
EB
1781 if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
1782 esw_prio_tag_acls_cleanup(esw);
eca8cc38
BW
1783}
1784
a3888f33
BW
1785static void esw_host_params_event_handler(struct work_struct *work)
1786{
1787 struct mlx5_host_work *host_work;
1788 struct mlx5_eswitch *esw;
1789 int err, num_vf = 0;
1790
1791 host_work = container_of(work, struct mlx5_host_work, work);
1792 esw = host_work->esw;
1793
1794 err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf);
1795 if (err || num_vf == esw->host_info.num_vfs)
1796 goto out;
1797
1798 /* Number of VFs can only change from "0 to x" or "x to 0". */
1799 if (esw->host_info.num_vfs > 0) {
1800 esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs);
1801 } else {
1802 err = esw_offloads_load_vf_reps(esw, num_vf);
1803
1804 if (err)
1805 goto out;
1806 }
1807
1808 esw->host_info.num_vfs = num_vf;
1809
1810out:
1811 kfree(host_work);
1812}
1813
1814static int esw_host_params_event(struct notifier_block *nb,
1815 unsigned long type, void *data)
1816{
1817 struct mlx5_host_work *host_work;
1818 struct mlx5_host_info *host_info;
1819 struct mlx5_eswitch *esw;
1820
1821 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
1822 if (!host_work)
1823 return NOTIFY_DONE;
1824
1825 host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb);
1826 esw = container_of(host_info, struct mlx5_eswitch, host_info);
1827
1828 host_work->esw = esw;
1829
1830 INIT_WORK(&host_work->work, esw_host_params_event_handler);
1831 queue_work(esw->work_queue, &host_work->work);
1832
1833 return NOTIFY_OK;
1834}
1835
c9b99abc
BW
1836int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
1837 int total_nvports)
eca8cc38
BW
1838{
1839 int err;
1840
c9b99abc 1841 err = esw_offloads_steering_init(esw, total_nvports);
eca8cc38
BW
1842 if (err)
1843 return err;
1844
29d9fd7d 1845 err = esw_offloads_load_all_reps(esw, vf_nvports);
eca8cc38
BW
1846 if (err)
1847 goto err_reps;
1848
1849 esw_offloads_devcom_init(esw);
a3888f33
BW
1850
1851 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1852 MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event,
1853 HOST_PARAMS_CHANGE);
1854 mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb);
1855 esw->host_info.num_vfs = vf_nvports;
1856 }
1857
80f09dfc
MG
1858 mlx5_rdma_enable_roce(esw->dev);
1859
eca8cc38
BW
1860 return 0;
1861
1862err_reps:
1863 esw_offloads_steering_cleanup(esw);
1864 return err;
1865}
1866
db7ff19e
EB
1867static int esw_offloads_stop(struct mlx5_eswitch *esw,
1868 struct netlink_ext_ack *extack)
c930a3ad 1869{
6c419ba8 1870 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
1871
1872 mlx5_eswitch_disable_sriov(esw);
1873 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8 1874 if (err) {
8c98ee77 1875 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
6c419ba8 1876 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
8c98ee77
EB
1877 if (err1) {
1878 NL_SET_ERR_MSG_MOD(extack,
1879 "Failed setting eswitch back to offloads");
1880 }
6c419ba8 1881 }
c930a3ad
OG
1882
1883 return err;
1884}
1885
c9b99abc 1886void esw_offloads_cleanup(struct mlx5_eswitch *esw)
c930a3ad 1887{
a3888f33
BW
1888 u16 num_vfs;
1889
1890 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1891 mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb);
1892 flush_workqueue(esw->work_queue);
1893 num_vfs = esw->host_info.num_vfs;
1894 } else {
1895 num_vfs = esw->dev->priv.sriov.num_vfs;
1896 }
c9b99abc 1897
80f09dfc 1898 mlx5_rdma_disable_roce(esw->dev);
ac004b83 1899 esw_offloads_devcom_cleanup(esw);
29d9fd7d 1900 esw_offloads_unload_all_reps(esw, num_vfs);
eca8cc38 1901 esw_offloads_steering_cleanup(esw);
c930a3ad
OG
1902}
1903
ef78618b 1904static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
1905{
1906 switch (mode) {
1907 case DEVLINK_ESWITCH_MODE_LEGACY:
1908 *mlx5_mode = SRIOV_LEGACY;
1909 break;
1910 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1911 *mlx5_mode = SRIOV_OFFLOADS;
1912 break;
1913 default:
1914 return -EINVAL;
1915 }
1916
1917 return 0;
1918}
1919
ef78618b
OG
1920static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1921{
1922 switch (mlx5_mode) {
1923 case SRIOV_LEGACY:
1924 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1925 break;
1926 case SRIOV_OFFLOADS:
1927 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1928 break;
1929 default:
1930 return -EINVAL;
1931 }
1932
1933 return 0;
1934}
1935
bffaa916
RD
1936static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1937{
1938 switch (mode) {
1939 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1940 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1941 break;
1942 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1943 *mlx5_mode = MLX5_INLINE_MODE_L2;
1944 break;
1945 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1946 *mlx5_mode = MLX5_INLINE_MODE_IP;
1947 break;
1948 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1949 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1950 break;
1951 default:
1952 return -EINVAL;
1953 }
1954
1955 return 0;
1956}
1957
1958static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1959{
1960 switch (mlx5_mode) {
1961 case MLX5_INLINE_MODE_NONE:
1962 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1963 break;
1964 case MLX5_INLINE_MODE_L2:
1965 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1966 break;
1967 case MLX5_INLINE_MODE_IP:
1968 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1969 break;
1970 case MLX5_INLINE_MODE_TCP_UDP:
1971 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1972 break;
1973 default:
1974 return -EINVAL;
1975 }
1976
1977 return 0;
1978}
1979
9d1cef19 1980static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 1981{
9d1cef19 1982 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 1983
9d1cef19
OG
1984 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1985 return -EOPNOTSUPP;
c930a3ad 1986
733d3e54
OG
1987 if(!MLX5_ESWITCH_MANAGER(dev))
1988 return -EPERM;
c930a3ad 1989
c96692fb
BW
1990 if (dev->priv.eswitch->mode == SRIOV_NONE &&
1991 !mlx5_core_is_ecpf_esw_manager(dev))
c930a3ad
OG
1992 return -EOPNOTSUPP;
1993
9d1cef19
OG
1994 return 0;
1995}
1996
db7ff19e
EB
1997int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1998 struct netlink_ext_ack *extack)
9d1cef19
OG
1999{
2000 struct mlx5_core_dev *dev = devlink_priv(devlink);
2001 u16 cur_mlx5_mode, mlx5_mode = 0;
2002 int err;
2003
2004 err = mlx5_devlink_eswitch_check(devlink);
2005 if (err)
2006 return err;
2007
2008 cur_mlx5_mode = dev->priv.eswitch->mode;
2009
ef78618b 2010 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2011 return -EINVAL;
2012
2013 if (cur_mlx5_mode == mlx5_mode)
2014 return 0;
2015
2016 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 2017 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 2018 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 2019 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
2020 else
2021 return -EINVAL;
feae9087
OG
2022}
2023
2024int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2025{
9d1cef19
OG
2026 struct mlx5_core_dev *dev = devlink_priv(devlink);
2027 int err;
c930a3ad 2028
9d1cef19
OG
2029 err = mlx5_devlink_eswitch_check(devlink);
2030 if (err)
2031 return err;
c930a3ad 2032
ef78618b 2033 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 2034}
127ea380 2035
db7ff19e
EB
2036int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2037 struct netlink_ext_ack *extack)
bffaa916
RD
2038{
2039 struct mlx5_core_dev *dev = devlink_priv(devlink);
2040 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 2041 int err, vport;
bffaa916
RD
2042 u8 mlx5_mode;
2043
9d1cef19
OG
2044 err = mlx5_devlink_eswitch_check(devlink);
2045 if (err)
2046 return err;
bffaa916 2047
c415f704
OG
2048 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2049 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2050 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2051 return 0;
2052 /* fall through */
2053 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2054 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 2055 return -EOPNOTSUPP;
c415f704
OG
2056 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2057 break;
2058 }
bffaa916 2059
375f51e2 2060 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
2061 NL_SET_ERR_MSG_MOD(extack,
2062 "Can't set inline mode when flows are configured");
375f51e2
RD
2063 return -EOPNOTSUPP;
2064 }
2065
bffaa916
RD
2066 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2067 if (err)
2068 goto out;
2069
9d1cef19 2070 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
2071 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2072 if (err) {
8c98ee77
EB
2073 NL_SET_ERR_MSG_MOD(extack,
2074 "Failed to set min inline on vport");
bffaa916
RD
2075 goto revert_inline_mode;
2076 }
2077 }
2078
2079 esw->offloads.inline_mode = mlx5_mode;
2080 return 0;
2081
2082revert_inline_mode:
2083 while (--vport > 0)
2084 mlx5_modify_nic_vport_min_inline(dev,
2085 vport,
2086 esw->offloads.inline_mode);
2087out:
2088 return err;
2089}
2090
2091int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2092{
2093 struct mlx5_core_dev *dev = devlink_priv(devlink);
2094 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2095 int err;
bffaa916 2096
9d1cef19
OG
2097 err = mlx5_devlink_eswitch_check(devlink);
2098 if (err)
2099 return err;
bffaa916 2100
bffaa916
RD
2101 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2102}
2103
2104int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
2105{
c415f704 2106 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
2107 struct mlx5_core_dev *dev = esw->dev;
2108 int vport;
bffaa916
RD
2109
2110 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2111 return -EOPNOTSUPP;
2112
2113 if (esw->mode == SRIOV_NONE)
2114 return -EOPNOTSUPP;
2115
c415f704
OG
2116 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2117 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2118 mlx5_mode = MLX5_INLINE_MODE_NONE;
2119 goto out;
2120 case MLX5_CAP_INLINE_MODE_L2:
2121 mlx5_mode = MLX5_INLINE_MODE_L2;
2122 goto out;
2123 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2124 goto query_vports;
2125 }
bffaa916 2126
c415f704 2127query_vports:
bffaa916
RD
2128 for (vport = 1; vport <= nvfs; vport++) {
2129 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
2130 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
2131 return -EINVAL;
2132 prev_mlx5_mode = mlx5_mode;
2133 }
2134
c415f704 2135out:
bffaa916
RD
2136 *mode = mlx5_mode;
2137 return 0;
2138}
2139
db7ff19e
EB
2140int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
2141 struct netlink_ext_ack *extack)
7768d197
RD
2142{
2143 struct mlx5_core_dev *dev = devlink_priv(devlink);
2144 struct mlx5_eswitch *esw = dev->priv.eswitch;
2145 int err;
2146
9d1cef19
OG
2147 err = mlx5_devlink_eswitch_check(devlink);
2148 if (err)
2149 return err;
7768d197
RD
2150
2151 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2152 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
2153 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2154 return -EOPNOTSUPP;
2155
2156 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2157 return -EOPNOTSUPP;
2158
2159 if (esw->mode == SRIOV_LEGACY) {
2160 esw->offloads.encap = encap;
2161 return 0;
2162 }
2163
2164 if (esw->offloads.encap == encap)
2165 return 0;
2166
2167 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
2168 NL_SET_ERR_MSG_MOD(extack,
2169 "Can't set encapsulation when flows are configured");
7768d197
RD
2170 return -EOPNOTSUPP;
2171 }
2172
e52c2802 2173 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2174
2175 esw->offloads.encap = encap;
e52c2802
PB
2176
2177 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2178
7768d197 2179 if (err) {
8c98ee77
EB
2180 NL_SET_ERR_MSG_MOD(extack,
2181 "Failed re-creating fast FDB table");
7768d197 2182 esw->offloads.encap = !encap;
e52c2802 2183 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2184 }
e52c2802 2185
7768d197
RD
2186 return err;
2187}
2188
2189int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
2190{
2191 struct mlx5_core_dev *dev = devlink_priv(devlink);
2192 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2193 int err;
7768d197 2194
9d1cef19
OG
2195 err = mlx5_devlink_eswitch_check(devlink);
2196 if (err)
2197 return err;
7768d197
RD
2198
2199 *encap = esw->offloads.encap;
2200 return 0;
2201}
2202
f8e8fa02
BW
2203void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2204 struct mlx5_eswitch_rep_if *__rep_if,
2205 u8 rep_type)
127ea380 2206{
a4b97ab4 2207 struct mlx5_eswitch_rep_if *rep_if;
f8e8fa02
BW
2208 struct mlx5_eswitch_rep *rep;
2209 int i;
9deb2241 2210
f8e8fa02
BW
2211 mlx5_esw_for_all_reps(esw, i, rep) {
2212 rep_if = &rep->rep_if[rep_type];
2213 rep_if->load = __rep_if->load;
2214 rep_if->unload = __rep_if->unload;
2215 rep_if->get_proto_dev = __rep_if->get_proto_dev;
2216 rep_if->priv = __rep_if->priv;
127ea380 2217
6f4e0219 2218 atomic_set(&rep_if->state, REP_REGISTERED);
f8e8fa02 2219 }
127ea380 2220}
f8e8fa02 2221EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2222
f8e8fa02 2223void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2224{
f8e8fa02 2225 u16 max_vf = mlx5_core_max_vfs(esw->dev);
cb67b832 2226 struct mlx5_eswitch_rep *rep;
f8e8fa02 2227 int i;
cb67b832 2228
f8e8fa02
BW
2229 if (esw->mode == SRIOV_OFFLOADS)
2230 __unload_reps_all_vport(esw, max_vf, rep_type);
127ea380 2231
f8e8fa02 2232 mlx5_esw_for_all_reps(esw, i, rep)
6f4e0219 2233 atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED);
127ea380 2234}
f8e8fa02 2235EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2236
a4b97ab4 2237void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2238{
726293f1
HHZ
2239 struct mlx5_eswitch_rep *rep;
2240
879c8f84 2241 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
a4b97ab4 2242 return rep->rep_if[rep_type].priv;
726293f1 2243}
22215908
MB
2244
2245void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2246 int vport,
2247 u8 rep_type)
2248{
22215908
MB
2249 struct mlx5_eswitch_rep *rep;
2250
879c8f84 2251 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2252
6f4e0219 2253 if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED &&
22215908
MB
2254 rep->rep_if[rep_type].get_proto_dev)
2255 return rep->rep_if[rep_type].get_proto_dev(rep);
2256 return NULL;
2257}
57cbd893 2258EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2259
2260void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2261{
879c8f84 2262 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2263}
57cbd893
MB
2264EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2265
2266struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2267 int vport)
2268{
879c8f84 2269 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2270}
2271EXPORT_SYMBOL(mlx5_eswitch_vport_rep);