net/mlx5: Relocate vport macros to the vport header file
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
e52c2802
PB
40#include "en.h"
41#include "fs_core.h"
ac004b83 42#include "lib/devcom.h"
69697b6e 43
1033665e
OG
44enum {
45 FDB_FAST_PATH = 0,
46 FDB_SLOW_PATH
47};
48
e52c2802
PB
49#define fdb_prio_table(esw, chain, prio, level) \
50 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
51
52static struct mlx5_flow_table *
53esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
54static void
55esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
56
57bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
58{
59 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
60}
61
62u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
63{
64 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
65 return FDB_MAX_CHAIN;
66
67 return 0;
68}
69
70u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
71{
72 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
73 return FDB_MAX_PRIO;
74
bf07aa73 75 return 1;
e52c2802
PB
76}
77
74491de9 78struct mlx5_flow_handle *
3d80d1a2
OG
79mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
80 struct mlx5_flow_spec *spec,
776b12b6 81 struct mlx5_esw_flow_attr *attr)
3d80d1a2 82{
592d3651 83 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 84 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 85 bool split = !!(attr->split_count);
74491de9 86 struct mlx5_flow_handle *rule;
e52c2802 87 struct mlx5_flow_table *fdb;
592d3651 88 int j, i = 0;
3d80d1a2
OG
89 void *misc;
90
91 if (esw->mode != SRIOV_OFFLOADS)
92 return ERR_PTR(-EOPNOTSUPP);
93
6acfbf38
OG
94 flow_act.action = attr->action;
95 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 96 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
97 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
98 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
99 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
100 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
101 flow_act.vlan[0].vid = attr->vlan_vid[0];
102 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
103 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
104 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
105 flow_act.vlan[1].vid = attr->vlan_vid[1];
106 flow_act.vlan[1].prio = attr->vlan_prio[1];
107 }
6acfbf38 108 }
776b12b6 109
66958ed9 110 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e52c2802
PB
111 if (attr->dest_chain) {
112 struct mlx5_flow_table *ft;
113
114 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
115 if (IS_ERR(ft)) {
116 rule = ERR_CAST(ft);
117 goto err_create_goto_table;
118 }
119
120 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
121 dest[i].ft = ft;
592d3651 122 i++;
e52c2802 123 } else {
e85e02ba 124 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 125 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 126 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 127 dest[i].vport.vhca_id =
df65a573 128 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
129 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
130 dest[i].vport.flags |=
131 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
132 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
133 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
8c4dc42b 134 flow_act.reformat_id = attr->dests[j].encap_id;
a18e879d 135 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b
EB
136 dest[i].vport.reformat_id =
137 attr->dests[j].encap_id;
f493f155 138 }
e52c2802
PB
139 i++;
140 }
56e858df 141 }
e37a79e5 142 }
66958ed9 143 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 144 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 145 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 146 i++;
3d80d1a2
OG
147 }
148
149 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 150 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2 151
10ff5359
SK
152 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
153 MLX5_SET(fte_match_set_misc, misc,
154 source_eswitch_owner_vhca_id,
155 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
156
3d80d1a2
OG
157 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
158 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
10ff5359
SK
159 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
160 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
161 source_eswitch_owner_vhca_id);
3d80d1a2 162
38aa51c1
OG
163 if (attr->match_level == MLX5_MATCH_NONE)
164 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
165 else
166 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
167 MLX5_MATCH_MISC_PARAMETERS;
168
bbd00f7e
HHZ
169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
170 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 171
aa24670e 172 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
173 flow_act.modify_id = attr->mod_hdr_id;
174
e85e02ba 175 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
176 if (IS_ERR(fdb)) {
177 rule = ERR_CAST(fdb);
178 goto err_esw_get;
179 }
180
181 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 182 if (IS_ERR(rule))
e52c2802 183 goto err_add_rule;
375f51e2
RD
184 else
185 esw->offloads.num_flows++;
3d80d1a2 186
e52c2802
PB
187 return rule;
188
189err_add_rule:
e85e02ba 190 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
191err_esw_get:
192 if (attr->dest_chain)
193 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
194err_create_goto_table:
aa0cbbae 195 return rule;
3d80d1a2
OG
196}
197
e4ad91f2
CM
198struct mlx5_flow_handle *
199mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
200 struct mlx5_flow_spec *spec,
201 struct mlx5_esw_flow_attr *attr)
202{
203 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 204 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
205 struct mlx5_flow_table *fast_fdb;
206 struct mlx5_flow_table *fwd_fdb;
e4ad91f2
CM
207 struct mlx5_flow_handle *rule;
208 void *misc;
209 int i;
210
e52c2802
PB
211 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
212 if (IS_ERR(fast_fdb)) {
213 rule = ERR_CAST(fast_fdb);
214 goto err_get_fast;
215 }
216
217 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
218 if (IS_ERR(fwd_fdb)) {
219 rule = ERR_CAST(fwd_fdb);
220 goto err_get_fwd;
221 }
222
e4ad91f2 223 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 224 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 225 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 226 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 227 dest[i].vport.vhca_id =
df65a573 228 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
229 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
230 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
231 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
232 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
8c4dc42b 233 dest[i].vport.reformat_id = attr->dests[i].encap_id;
1cc26d74 234 }
e4ad91f2
CM
235 }
236 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 237 dest[i].ft = fwd_fdb,
e4ad91f2
CM
238 i++;
239
240 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
241 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
242
243 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
244 MLX5_SET(fte_match_set_misc, misc,
245 source_eswitch_owner_vhca_id,
246 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
247
248 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
249 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
250 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
251 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
252 source_eswitch_owner_vhca_id);
253
254 if (attr->match_level == MLX5_MATCH_NONE)
255 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
256 else
257 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
258 MLX5_MATCH_MISC_PARAMETERS;
259
e52c2802 260 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 261
e52c2802
PB
262 if (IS_ERR(rule))
263 goto add_err;
e4ad91f2 264
e52c2802
PB
265 esw->offloads.num_flows++;
266
267 return rule;
268add_err:
269 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
270err_get_fwd:
271 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
272err_get_fast:
e4ad91f2
CM
273 return rule;
274}
275
e52c2802
PB
276static void
277__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
278 struct mlx5_flow_handle *rule,
279 struct mlx5_esw_flow_attr *attr,
280 bool fwd_rule)
281{
e85e02ba 282 bool split = (attr->split_count > 0);
e52c2802
PB
283
284 mlx5_del_flow_rules(rule);
285 esw->offloads.num_flows--;
286
287 if (fwd_rule) {
288 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
289 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
290 } else {
e85e02ba 291 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
e52c2802
PB
292 if (attr->dest_chain)
293 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
294 }
295}
296
d85cdccb
OG
297void
298mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
299 struct mlx5_flow_handle *rule,
300 struct mlx5_esw_flow_attr *attr)
301{
e52c2802 302 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
303}
304
48265006
OG
305void
306mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
307 struct mlx5_flow_handle *rule,
308 struct mlx5_esw_flow_attr *attr)
309{
e52c2802 310 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
311}
312
f5f82476
OG
313static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
314{
315 struct mlx5_eswitch_rep *rep;
316 int vf_vport, err = 0;
317
318 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
319 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
320 rep = &esw->offloads.vport_reps[vf_vport];
a4b97ab4 321 if (!rep->rep_if[REP_ETH].valid)
f5f82476
OG
322 continue;
323
324 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
325 if (err)
326 goto out;
327 }
328
329out:
330 return err;
331}
332
333static struct mlx5_eswitch_rep *
334esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
335{
336 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
337
338 in_rep = attr->in_rep;
df65a573 339 out_rep = attr->dests[0].rep;
f5f82476
OG
340
341 if (push)
342 vport = in_rep;
343 else if (pop)
344 vport = out_rep;
345 else
346 vport = in_rep;
347
348 return vport;
349}
350
351static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
352 bool push, bool pop, bool fwd)
353{
354 struct mlx5_eswitch_rep *in_rep, *out_rep;
355
356 if ((push || pop) && !fwd)
357 goto out_notsupp;
358
359 in_rep = attr->in_rep;
df65a573 360 out_rep = attr->dests[0].rep;
f5f82476 361
b05af6aa 362 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
363 goto out_notsupp;
364
b05af6aa 365 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
366 goto out_notsupp;
367
368 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
369 if (!push && !pop && fwd)
b05af6aa 370 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
371 goto out_notsupp;
372
373 /* protects against (1) setting rules with different vlans to push and
374 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
375 */
1482bd3d 376 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
377 goto out_notsupp;
378
379 return 0;
380
381out_notsupp:
9eb78923 382 return -EOPNOTSUPP;
f5f82476
OG
383}
384
385int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
386 struct mlx5_esw_flow_attr *attr)
387{
388 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
389 struct mlx5_eswitch_rep *vport = NULL;
390 bool push, pop, fwd;
391 int err = 0;
392
6acfbf38 393 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 394 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
395 return 0;
396
f5f82476
OG
397 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
398 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
399 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
400 !attr->dest_chain);
f5f82476
OG
401
402 err = esw_add_vlan_action_check(attr, push, pop, fwd);
403 if (err)
404 return err;
405
406 attr->vlan_handled = false;
407
408 vport = esw_vlan_action_get_vport(attr, push, pop);
409
410 if (!push && !pop && fwd) {
411 /* tracks VF --> wire rules without vlan push action */
b05af6aa 412 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476
OG
413 vport->vlan_refcount++;
414 attr->vlan_handled = true;
415 }
416
417 return 0;
418 }
419
420 if (!push && !pop)
421 return 0;
422
423 if (!(offloads->vlan_push_pop_refcount)) {
424 /* it's the 1st vlan rule, apply global vlan pop policy */
425 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
426 if (err)
427 goto out;
428 }
429 offloads->vlan_push_pop_refcount++;
430
431 if (push) {
432 if (vport->vlan_refcount)
433 goto skip_set_push;
434
1482bd3d 435 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
436 SET_VLAN_INSERT | SET_VLAN_STRIP);
437 if (err)
438 goto out;
1482bd3d 439 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
440skip_set_push:
441 vport->vlan_refcount++;
442 }
443out:
444 if (!err)
445 attr->vlan_handled = true;
446 return err;
447}
448
449int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
450 struct mlx5_esw_flow_attr *attr)
451{
452 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
453 struct mlx5_eswitch_rep *vport = NULL;
454 bool push, pop, fwd;
455 int err = 0;
456
6acfbf38 457 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 458 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
459 return 0;
460
f5f82476
OG
461 if (!attr->vlan_handled)
462 return 0;
463
464 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
465 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
466 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
467
468 vport = esw_vlan_action_get_vport(attr, push, pop);
469
470 if (!push && !pop && fwd) {
471 /* tracks VF --> wire rules without vlan push action */
b05af6aa 472 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
473 vport->vlan_refcount--;
474
475 return 0;
476 }
477
478 if (push) {
479 vport->vlan_refcount--;
480 if (vport->vlan_refcount)
481 goto skip_unset_push;
482
483 vport->vlan = 0;
484 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
485 0, 0, SET_VLAN_STRIP);
486 if (err)
487 goto out;
488 }
489
490skip_unset_push:
491 offloads->vlan_push_pop_refcount--;
492 if (offloads->vlan_push_pop_refcount)
493 return 0;
494
495 /* no more vlan rules, stop global vlan pop policy */
496 err = esw_set_global_vlan_pop(esw, 0);
497
498out:
499 return err;
500}
501
f7a68945 502struct mlx5_flow_handle *
ab22be9b
OG
503mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
504{
66958ed9 505 struct mlx5_flow_act flow_act = {0};
4c5009c5 506 struct mlx5_flow_destination dest = {};
74491de9 507 struct mlx5_flow_handle *flow_rule;
c5bb1730 508 struct mlx5_flow_spec *spec;
ab22be9b
OG
509 void *misc;
510
1b9a07ee 511 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 512 if (!spec) {
ab22be9b
OG
513 flow_rule = ERR_PTR(-ENOMEM);
514 goto out;
515 }
516
c5bb1730 517 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
518 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
519 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
520
c5bb1730 521 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
522 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
523 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
524
c5bb1730 525 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 526 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 527 dest.vport.num = vport;
66958ed9 528 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 529
52fff327 530 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 531 &flow_act, &dest, 1);
ab22be9b
OG
532 if (IS_ERR(flow_rule))
533 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
534out:
c5bb1730 535 kvfree(spec);
ab22be9b
OG
536 return flow_rule;
537}
57cbd893 538EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 539
159fe639
MB
540void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
541{
542 mlx5_del_flow_rules(rule);
543}
544
ac004b83
RD
545static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
546 struct mlx5_flow_spec *spec,
547 struct mlx5_flow_destination *dest)
548{
549 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
550 misc_parameters);
551
552 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
553 MLX5_CAP_GEN(peer_dev, vhca_id));
554
555 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
556
557 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
558 misc_parameters);
559 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
560 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
561 source_eswitch_owner_vhca_id);
562
563 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
564 dest->vport.num = 0;
565 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 566 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
567}
568
569static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
570 struct mlx5_core_dev *peer_dev)
571{
572 struct mlx5_flow_destination dest = {};
573 struct mlx5_flow_act flow_act = {0};
574 struct mlx5_flow_handle **flows;
575 struct mlx5_flow_handle *flow;
576 struct mlx5_flow_spec *spec;
577 /* total vports is the same for both e-switches */
578 int nvports = esw->total_vports;
579 void *misc;
580 int err, i;
581
582 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
583 if (!spec)
584 return -ENOMEM;
585
586 peer_miss_rules_setup(peer_dev, spec, &dest);
587
588 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
589 if (!flows) {
590 err = -ENOMEM;
591 goto alloc_flows_err;
592 }
593
594 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
595 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
596 misc_parameters);
597
598 for (i = 1; i < nvports; i++) {
599 MLX5_SET(fte_match_set_misc, misc, source_port, i);
600 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
601 spec, &flow_act, &dest, 1);
602 if (IS_ERR(flow)) {
603 err = PTR_ERR(flow);
604 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
605 goto add_flow_err;
606 }
607 flows[i] = flow;
608 }
609
610 esw->fdb_table.offloads.peer_miss_rules = flows;
611
612 kvfree(spec);
613 return 0;
614
615add_flow_err:
616 for (i--; i > 0; i--)
617 mlx5_del_flow_rules(flows[i]);
618 kvfree(flows);
619alloc_flows_err:
620 kvfree(spec);
621 return err;
622}
623
624static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
625{
626 struct mlx5_flow_handle **flows;
627 int i;
628
629 flows = esw->fdb_table.offloads.peer_miss_rules;
630
631 for (i = 1; i < esw->total_vports; i++)
632 mlx5_del_flow_rules(flows[i]);
633
634 kvfree(flows);
635}
636
3aa33572
OG
637static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
638{
66958ed9 639 struct mlx5_flow_act flow_act = {0};
4c5009c5 640 struct mlx5_flow_destination dest = {};
74491de9 641 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 642 struct mlx5_flow_spec *spec;
f80be543
MB
643 void *headers_c;
644 void *headers_v;
3aa33572 645 int err = 0;
f80be543
MB
646 u8 *dmac_c;
647 u8 *dmac_v;
3aa33572 648
1b9a07ee 649 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 650 if (!spec) {
3aa33572
OG
651 err = -ENOMEM;
652 goto out;
653 }
654
f80be543
MB
655 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
656 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
657 outer_headers);
658 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
659 outer_headers.dmac_47_16);
660 dmac_c[0] = 0x01;
661
3aa33572 662 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 663 dest.vport.num = 0;
66958ed9 664 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 665
52fff327 666 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 667 &flow_act, &dest, 1);
3aa33572
OG
668 if (IS_ERR(flow_rule)) {
669 err = PTR_ERR(flow_rule);
f80be543 670 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
671 goto out;
672 }
673
f80be543
MB
674 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
675
676 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
677 outer_headers);
678 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
679 outer_headers.dmac_47_16);
680 dmac_v[0] = 0x01;
52fff327 681 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
682 &flow_act, &dest, 1);
683 if (IS_ERR(flow_rule)) {
684 err = PTR_ERR(flow_rule);
685 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
686 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
687 goto out;
688 }
689
690 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
691
3aa33572 692out:
c5bb1730 693 kvfree(spec);
3aa33572
OG
694 return err;
695}
696
1033665e 697#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 698
e52c2802
PB
699/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
700 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
701 * for each flow table pool. We can allocate up to 16M of each pool,
702 * and we keep track of how much we used via put/get_sz_to_pool.
703 * Firmware doesn't report any of this for now.
704 * ESW_POOL is expected to be sorted from large to small
705 */
706#define ESW_SIZE (16 * 1024 * 1024)
707const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
708 64 * 1024, 4 * 1024 };
709
710static int
711get_sz_from_pool(struct mlx5_eswitch *esw)
712{
713 int sz = 0, i;
714
715 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
716 if (esw->fdb_table.offloads.fdb_left[i]) {
717 --esw->fdb_table.offloads.fdb_left[i];
718 sz = ESW_POOLS[i];
719 break;
720 }
721 }
722
723 return sz;
724}
725
726static void
727put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
728{
729 int i;
730
731 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
732 if (sz >= ESW_POOLS[i]) {
733 ++esw->fdb_table.offloads.fdb_left[i];
734 break;
735 }
736 }
737}
738
739static struct mlx5_flow_table *
740create_next_size_table(struct mlx5_eswitch *esw,
741 struct mlx5_flow_namespace *ns,
742 u16 table_prio,
743 int level,
744 u32 flags)
745{
746 struct mlx5_flow_table *fdb;
747 int sz;
748
749 sz = get_sz_from_pool(esw);
750 if (!sz)
751 return ERR_PTR(-ENOSPC);
752
753 fdb = mlx5_create_auto_grouped_flow_table(ns,
754 table_prio,
755 sz,
756 ESW_OFFLOADS_NUM_GROUPS,
757 level,
758 flags);
759 if (IS_ERR(fdb)) {
760 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
761 (int)PTR_ERR(fdb), table_prio, level, sz);
762 put_sz_to_pool(esw, sz);
763 }
764
765 return fdb;
766}
767
768static struct mlx5_flow_table *
769esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
69697b6e 770{
69697b6e 771 struct mlx5_core_dev *dev = esw->dev;
69697b6e 772 struct mlx5_flow_table *fdb = NULL;
e52c2802
PB
773 struct mlx5_flow_namespace *ns;
774 int table_prio, l = 0;
bbd00f7e 775 u32 flags = 0;
69697b6e 776
c92a0b94
PB
777 if (chain == FDB_SLOW_PATH_CHAIN)
778 return esw->fdb_table.offloads.slow_fdb;
779
e52c2802 780 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
264d7bf3 781
e52c2802
PB
782 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
783 if (fdb) {
784 /* take ref on earlier levels as well */
785 while (level >= 0)
786 fdb_prio_table(esw, chain, prio, level--).num_rules++;
787 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
788 return fdb;
789 }
69697b6e 790
e52c2802
PB
791 ns = mlx5_get_fdb_sub_ns(dev, chain);
792 if (!ns) {
793 esw_warn(dev, "Failed to get FDB sub namespace\n");
794 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
795 return ERR_PTR(-EOPNOTSUPP);
796 }
a842dd04 797
7768d197 798 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
60786f09 799 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
61444b45 800 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
bbd00f7e 801
e52c2802 802 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
69697b6e 803
e52c2802
PB
804 /* create earlier levels for correct fs_core lookup when
805 * connecting tables
806 */
807 for (l = 0; l <= level; l++) {
808 if (fdb_prio_table(esw, chain, prio, l).fdb) {
809 fdb_prio_table(esw, chain, prio, l).num_rules++;
810 continue;
811 }
a842dd04 812
e52c2802
PB
813 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
814 if (IS_ERR(fdb)) {
815 l--;
816 goto err_create_fdb;
817 }
818
819 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
820 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
a842dd04 821 }
a842dd04 822
e52c2802
PB
823 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
824 return fdb;
a842dd04 825
e52c2802
PB
826err_create_fdb:
827 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
828 if (l >= 0)
829 esw_put_prio_table(esw, chain, prio, l);
830
831 return fdb;
1967ce6e
OG
832}
833
e52c2802
PB
834static void
835esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1967ce6e 836{
e52c2802
PB
837 int l;
838
c92a0b94
PB
839 if (chain == FDB_SLOW_PATH_CHAIN)
840 return;
841
e52c2802
PB
842 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
843
844 for (l = level; l >= 0; l--) {
845 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
846 continue;
847
848 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
849 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
850 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
851 }
852
853 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
854}
855
856static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
857{
858 /* If lazy creation isn't supported, deref the fast path tables */
859 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
860 esw_put_prio_table(esw, 0, 1, 1);
861 esw_put_prio_table(esw, 0, 1, 0);
862 }
1967ce6e
OG
863}
864
865#define MAX_PF_SQ 256
cd3d07e7 866#define MAX_SQ_NVPORTS 32
1967ce6e
OG
867
868static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
869{
870 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
871 struct mlx5_flow_table_attr ft_attr = {};
872 struct mlx5_core_dev *dev = esw->dev;
e52c2802 873 u32 *flow_group_in, max_flow_counter;
1967ce6e
OG
874 struct mlx5_flow_namespace *root_ns;
875 struct mlx5_flow_table *fdb = NULL;
e52c2802 876 int table_size, ix, err = 0, i;
1967ce6e 877 struct mlx5_flow_group *g;
e52c2802 878 u32 flags = 0, fdb_max;
1967ce6e 879 void *match_criteria;
f80be543 880 u8 *dmac;
1967ce6e
OG
881
882 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 883 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
884 if (!flow_group_in)
885 return -ENOMEM;
886
887 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
888 if (!root_ns) {
889 esw_warn(dev, "Failed to get FDB flow namespace\n");
890 err = -EOPNOTSUPP;
891 goto ns_err;
892 }
893
e52c2802
PB
894 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
895 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
896 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
897
898 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
899 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
900 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
901 fdb_max);
902
903 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
904 esw->fdb_table.offloads.fdb_left[i] =
905 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1967ce6e 906
ac004b83
RD
907 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
908 esw->total_vports;
b3ba5149 909
e52c2802
PB
910 /* create the slow path fdb with encap set, so further table instances
911 * can be created at run time while VFs are probed if the FW allows that.
912 */
913 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
914 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
915 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
916
917 ft_attr.flags = flags;
b3ba5149
ES
918 ft_attr.max_fte = table_size;
919 ft_attr.prio = FDB_SLOW_PATH;
920
921 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
922 if (IS_ERR(fdb)) {
923 err = PTR_ERR(fdb);
924 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
925 goto slow_fdb_err;
926 }
52fff327 927 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 928
e52c2802
PB
929 /* If lazy creation isn't supported, open the fast path tables now */
930 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
931 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
932 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
933 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
934 esw_get_prio_table(esw, 0, 1, 0);
935 esw_get_prio_table(esw, 0, 1, 1);
936 } else {
937 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
938 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
939 }
940
69697b6e
OG
941 /* create send-to-vport group */
942 memset(flow_group_in, 0, inlen);
943 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
944 MLX5_MATCH_MISC_PARAMETERS);
945
946 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
947
948 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
949 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
950
cd3d07e7 951 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
952 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
953 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
954
955 g = mlx5_create_flow_group(fdb, flow_group_in);
956 if (IS_ERR(g)) {
957 err = PTR_ERR(g);
958 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
959 goto send_vport_err;
960 }
961 esw->fdb_table.offloads.send_to_vport_grp = g;
962
ac004b83
RD
963 /* create peer esw miss group */
964 memset(flow_group_in, 0, inlen);
965 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
966 MLX5_MATCH_MISC_PARAMETERS);
967
968 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
969 match_criteria);
970
971 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
972 misc_parameters.source_port);
973 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
974 misc_parameters.source_eswitch_owner_vhca_id);
975
976 MLX5_SET(create_flow_group_in, flow_group_in,
977 source_eswitch_owner_vhca_id_valid, 1);
978 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
979 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
980 ix + esw->total_vports - 1);
981 ix += esw->total_vports;
982
983 g = mlx5_create_flow_group(fdb, flow_group_in);
984 if (IS_ERR(g)) {
985 err = PTR_ERR(g);
986 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
987 goto peer_miss_err;
988 }
989 esw->fdb_table.offloads.peer_miss_grp = g;
990
69697b6e
OG
991 /* create miss group */
992 memset(flow_group_in, 0, inlen);
f80be543
MB
993 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
994 MLX5_MATCH_OUTER_HEADERS);
995 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
996 match_criteria);
997 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
998 outer_headers.dmac_47_16);
999 dmac[0] = 0x01;
69697b6e
OG
1000
1001 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
f80be543 1002 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
69697b6e
OG
1003
1004 g = mlx5_create_flow_group(fdb, flow_group_in);
1005 if (IS_ERR(g)) {
1006 err = PTR_ERR(g);
1007 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1008 goto miss_err;
1009 }
1010 esw->fdb_table.offloads.miss_grp = g;
1011
3aa33572
OG
1012 err = esw_add_fdb_miss_rule(esw);
1013 if (err)
1014 goto miss_rule_err;
1015
e52c2802 1016 esw->nvports = nvports;
c88a026e 1017 kvfree(flow_group_in);
69697b6e
OG
1018 return 0;
1019
3aa33572
OG
1020miss_rule_err:
1021 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1022miss_err:
ac004b83
RD
1023 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1024peer_miss_err:
69697b6e
OG
1025 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1026send_vport_err:
e52c2802 1027 esw_destroy_offloads_fast_fdb_tables(esw);
52fff327 1028 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1029slow_fdb_err:
69697b6e
OG
1030ns_err:
1031 kvfree(flow_group_in);
1032 return err;
1033}
1034
1967ce6e 1035static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1036{
e52c2802 1037 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1038 return;
1039
1967ce6e 1040 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1041 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1042 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1043 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1044 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1045 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1046
52fff327 1047 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
e52c2802 1048 esw_destroy_offloads_fast_fdb_tables(esw);
69697b6e 1049}
c116c6ee
OG
1050
1051static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1052{
b3ba5149 1053 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1054 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1055 struct mlx5_flow_table *ft_offloads;
1056 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1057 int err = 0;
1058
1059 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1060 if (!ns) {
1061 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1062 return -EOPNOTSUPP;
c116c6ee
OG
1063 }
1064
b3ba5149
ES
1065 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
1066
1067 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1068 if (IS_ERR(ft_offloads)) {
1069 err = PTR_ERR(ft_offloads);
1070 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1071 return err;
1072 }
1073
1074 esw->offloads.ft_offloads = ft_offloads;
1075 return 0;
1076}
1077
1078static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1079{
1080 struct mlx5_esw_offload *offloads = &esw->offloads;
1081
1082 mlx5_destroy_flow_table(offloads->ft_offloads);
1083}
fed9ce22
OG
1084
1085static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1086{
1087 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1088 struct mlx5_flow_group *g;
1089 struct mlx5_priv *priv = &esw->dev->priv;
1090 u32 *flow_group_in;
1091 void *match_criteria, *misc;
1092 int err = 0;
1093 int nvports = priv->sriov.num_vfs + 2;
1094
1b9a07ee 1095 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1096 if (!flow_group_in)
1097 return -ENOMEM;
1098
1099 /* create vport rx group */
1100 memset(flow_group_in, 0, inlen);
1101 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1102 MLX5_MATCH_MISC_PARAMETERS);
1103
1104 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1105 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
1106 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1107
1108 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1109 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1110
1111 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1112
1113 if (IS_ERR(g)) {
1114 err = PTR_ERR(g);
1115 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1116 goto out;
1117 }
1118
1119 esw->offloads.vport_rx_group = g;
1120out:
e574978a 1121 kvfree(flow_group_in);
fed9ce22
OG
1122 return err;
1123}
1124
1125static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1126{
1127 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1128}
1129
74491de9 1130struct mlx5_flow_handle *
c966f7d5
GT
1131mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
1132 struct mlx5_flow_destination *dest)
fed9ce22 1133{
66958ed9 1134 struct mlx5_flow_act flow_act = {0};
74491de9 1135 struct mlx5_flow_handle *flow_rule;
c5bb1730 1136 struct mlx5_flow_spec *spec;
fed9ce22
OG
1137 void *misc;
1138
1b9a07ee 1139 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1140 if (!spec) {
fed9ce22
OG
1141 flow_rule = ERR_PTR(-ENOMEM);
1142 goto out;
1143 }
1144
c5bb1730 1145 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
1146 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1147
c5bb1730 1148 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
1149 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1150
c5bb1730 1151 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22 1152
66958ed9 1153 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1154 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1155 &flow_act, dest, 1);
fed9ce22
OG
1156 if (IS_ERR(flow_rule)) {
1157 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1158 goto out;
1159 }
1160
1161out:
c5bb1730 1162 kvfree(spec);
fed9ce22
OG
1163 return flow_rule;
1164}
feae9087 1165
db7ff19e
EB
1166static int esw_offloads_start(struct mlx5_eswitch *esw,
1167 struct netlink_ext_ack *extack)
c930a3ad 1168{
6c419ba8 1169 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
1170
1171 if (esw->mode != SRIOV_LEGACY) {
8c98ee77
EB
1172 NL_SET_ERR_MSG_MOD(extack,
1173 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1174 return -EINVAL;
1175 }
1176
1177 mlx5_eswitch_disable_sriov(esw);
1178 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8 1179 if (err) {
8c98ee77
EB
1180 NL_SET_ERR_MSG_MOD(extack,
1181 "Failed setting eswitch to offloads");
6c419ba8 1182 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
8c98ee77
EB
1183 if (err1) {
1184 NL_SET_ERR_MSG_MOD(extack,
1185 "Failed setting eswitch back to legacy");
1186 }
6c419ba8 1187 }
bffaa916
RD
1188 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1189 if (mlx5_eswitch_inline_mode_get(esw,
1190 num_vfs,
1191 &esw->offloads.inline_mode)) {
1192 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1193 NL_SET_ERR_MSG_MOD(extack,
1194 "Inline mode is different between vports");
bffaa916
RD
1195 }
1196 }
c930a3ad
OG
1197 return err;
1198}
1199
e8d31c4d
MB
1200void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1201{
1202 kfree(esw->offloads.vport_reps);
1203}
1204
1205int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1206{
1207 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
1208 struct mlx5_core_dev *dev = esw->dev;
1209 struct mlx5_esw_offload *offloads;
1210 struct mlx5_eswitch_rep *rep;
1211 u8 hw_id[ETH_ALEN];
1212 int vport;
1213
1214 esw->offloads.vport_reps = kcalloc(total_vfs,
1215 sizeof(struct mlx5_eswitch_rep),
1216 GFP_KERNEL);
1217 if (!esw->offloads.vport_reps)
1218 return -ENOMEM;
1219
1220 offloads = &esw->offloads;
1221 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1222
1223 for (vport = 0; vport < total_vfs; vport++) {
1224 rep = &offloads->vport_reps[vport];
1225
1226 rep->vport = vport;
1227 ether_addr_copy(rep->hw_id, hw_id);
1228 }
1229
b05af6aa 1230 offloads->vport_reps[0].vport = MLX5_VPORT_UPLINK;
e8d31c4d
MB
1231
1232 return 0;
1233}
1234
a4b97ab4
MB
1235static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
1236 u8 rep_type)
6ed1803a
MB
1237{
1238 struct mlx5_eswitch_rep *rep;
1239 int vport;
1240
1241 for (vport = nvports - 1; vport >= 0; vport--) {
1242 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 1243 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
1244 continue;
1245
a4b97ab4 1246 rep->rep_if[rep_type].unload(rep);
6ed1803a
MB
1247 }
1248}
1249
a4b97ab4
MB
1250static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
1251{
1252 u8 rep_type = NUM_REP_TYPES;
1253
1254 while (rep_type-- > 0)
1255 esw_offloads_unload_reps_type(esw, nvports, rep_type);
1256}
1257
1258static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
1259 u8 rep_type)
c930a3ad 1260{
cb67b832
HHZ
1261 struct mlx5_eswitch_rep *rep;
1262 int vport;
c930a3ad
OG
1263 int err;
1264
6ed1803a
MB
1265 for (vport = 0; vport < nvports; vport++) {
1266 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 1267 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
1268 continue;
1269
a4b97ab4 1270 err = rep->rep_if[rep_type].load(esw->dev, rep);
6ed1803a
MB
1271 if (err)
1272 goto err_reps;
1273 }
1274
1275 return 0;
1276
1277err_reps:
a4b97ab4
MB
1278 esw_offloads_unload_reps_type(esw, vport, rep_type);
1279 return err;
1280}
1281
1282static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
1283{
1284 u8 rep_type = 0;
1285 int err;
1286
1287 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1288 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
1289 if (err)
1290 goto err_reps;
1291 }
1292
1293 return err;
1294
1295err_reps:
1296 while (rep_type-- > 0)
1297 esw_offloads_unload_reps_type(esw, nvports, rep_type);
6ed1803a
MB
1298 return err;
1299}
1300
ac004b83
RD
1301#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1302#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1303
1304static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1305 struct mlx5_eswitch *peer_esw)
1306{
1307 int err;
1308
1309 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1310 if (err)
1311 return err;
1312
1313 return 0;
1314}
1315
04de7dda
RD
1316void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
1317
ac004b83
RD
1318static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1319{
04de7dda 1320 mlx5e_tc_clean_fdb_peer_flows(esw);
ac004b83
RD
1321 esw_del_fdb_peer_miss_rules(esw);
1322}
1323
1324static int mlx5_esw_offloads_devcom_event(int event,
1325 void *my_data,
1326 void *event_data)
1327{
1328 struct mlx5_eswitch *esw = my_data;
1329 struct mlx5_eswitch *peer_esw = event_data;
1330 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1331 int err;
1332
1333 switch (event) {
1334 case ESW_OFFLOADS_DEVCOM_PAIR:
1335 err = mlx5_esw_offloads_pair(esw, peer_esw);
1336 if (err)
1337 goto err_out;
1338
1339 err = mlx5_esw_offloads_pair(peer_esw, esw);
1340 if (err)
1341 goto err_pair;
1342
1343 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1344 break;
1345
1346 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1347 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1348 break;
1349
1350 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1351 mlx5_esw_offloads_unpair(peer_esw);
1352 mlx5_esw_offloads_unpair(esw);
1353 break;
1354 }
1355
1356 return 0;
1357
1358err_pair:
1359 mlx5_esw_offloads_unpair(esw);
1360
1361err_out:
1362 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1363 event, err);
1364 return err;
1365}
1366
1367static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1368{
1369 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1370
04de7dda
RD
1371 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1372 mutex_init(&esw->offloads.peer_mutex);
1373
ac004b83
RD
1374 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1375 return;
1376
1377 mlx5_devcom_register_component(devcom,
1378 MLX5_DEVCOM_ESW_OFFLOADS,
1379 mlx5_esw_offloads_devcom_event,
1380 esw);
1381
1382 mlx5_devcom_send_event(devcom,
1383 MLX5_DEVCOM_ESW_OFFLOADS,
1384 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1385}
1386
1387static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1388{
1389 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1390
1391 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1392 return;
1393
1394 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1395 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1396
1397 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1398}
1399
6ed1803a
MB
1400int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1401{
1402 int err;
1403
e52c2802
PB
1404 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1405
1967ce6e 1406 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 1407 if (err)
c5447c70 1408 return err;
c930a3ad
OG
1409
1410 err = esw_create_offloads_table(esw);
1411 if (err)
1412 goto create_ft_err;
1413
1414 err = esw_create_vport_rx_group(esw);
1415 if (err)
1416 goto create_fg_err;
1417
6ed1803a
MB
1418 err = esw_offloads_load_reps(esw, nvports);
1419 if (err)
1420 goto err_reps;
9da34cd3 1421
ac004b83 1422 esw_offloads_devcom_init(esw);
c930a3ad
OG
1423 return 0;
1424
cb67b832 1425err_reps:
cb67b832
HHZ
1426 esw_destroy_vport_rx_group(esw);
1427
c930a3ad
OG
1428create_fg_err:
1429 esw_destroy_offloads_table(esw);
1430
1431create_ft_err:
1967ce6e 1432 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 1433
c930a3ad
OG
1434 return err;
1435}
1436
db7ff19e
EB
1437static int esw_offloads_stop(struct mlx5_eswitch *esw,
1438 struct netlink_ext_ack *extack)
c930a3ad 1439{
6c419ba8 1440 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
1441
1442 mlx5_eswitch_disable_sriov(esw);
1443 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8 1444 if (err) {
8c98ee77 1445 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
6c419ba8 1446 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
8c98ee77
EB
1447 if (err1) {
1448 NL_SET_ERR_MSG_MOD(extack,
1449 "Failed setting eswitch back to offloads");
1450 }
6c419ba8 1451 }
c930a3ad
OG
1452
1453 return err;
1454}
1455
1456void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
1457{
ac004b83 1458 esw_offloads_devcom_cleanup(esw);
6ed1803a 1459 esw_offloads_unload_reps(esw, nvports);
c930a3ad
OG
1460 esw_destroy_vport_rx_group(esw);
1461 esw_destroy_offloads_table(esw);
1967ce6e 1462 esw_destroy_offloads_fdb_tables(esw);
c930a3ad
OG
1463}
1464
ef78618b 1465static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
1466{
1467 switch (mode) {
1468 case DEVLINK_ESWITCH_MODE_LEGACY:
1469 *mlx5_mode = SRIOV_LEGACY;
1470 break;
1471 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1472 *mlx5_mode = SRIOV_OFFLOADS;
1473 break;
1474 default:
1475 return -EINVAL;
1476 }
1477
1478 return 0;
1479}
1480
ef78618b
OG
1481static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1482{
1483 switch (mlx5_mode) {
1484 case SRIOV_LEGACY:
1485 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1486 break;
1487 case SRIOV_OFFLOADS:
1488 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1489 break;
1490 default:
1491 return -EINVAL;
1492 }
1493
1494 return 0;
1495}
1496
bffaa916
RD
1497static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1498{
1499 switch (mode) {
1500 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1501 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1502 break;
1503 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1504 *mlx5_mode = MLX5_INLINE_MODE_L2;
1505 break;
1506 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1507 *mlx5_mode = MLX5_INLINE_MODE_IP;
1508 break;
1509 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1510 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1511 break;
1512 default:
1513 return -EINVAL;
1514 }
1515
1516 return 0;
1517}
1518
1519static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1520{
1521 switch (mlx5_mode) {
1522 case MLX5_INLINE_MODE_NONE:
1523 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1524 break;
1525 case MLX5_INLINE_MODE_L2:
1526 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1527 break;
1528 case MLX5_INLINE_MODE_IP:
1529 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1530 break;
1531 case MLX5_INLINE_MODE_TCP_UDP:
1532 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1533 break;
1534 default:
1535 return -EINVAL;
1536 }
1537
1538 return 0;
1539}
1540
9d1cef19 1541static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 1542{
9d1cef19 1543 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 1544
9d1cef19
OG
1545 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1546 return -EOPNOTSUPP;
c930a3ad 1547
733d3e54
OG
1548 if(!MLX5_ESWITCH_MANAGER(dev))
1549 return -EPERM;
c930a3ad 1550
9d1cef19 1551 if (dev->priv.eswitch->mode == SRIOV_NONE)
c930a3ad
OG
1552 return -EOPNOTSUPP;
1553
9d1cef19
OG
1554 return 0;
1555}
1556
db7ff19e
EB
1557int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1558 struct netlink_ext_ack *extack)
9d1cef19
OG
1559{
1560 struct mlx5_core_dev *dev = devlink_priv(devlink);
1561 u16 cur_mlx5_mode, mlx5_mode = 0;
1562 int err;
1563
1564 err = mlx5_devlink_eswitch_check(devlink);
1565 if (err)
1566 return err;
1567
1568 cur_mlx5_mode = dev->priv.eswitch->mode;
1569
ef78618b 1570 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
1571 return -EINVAL;
1572
1573 if (cur_mlx5_mode == mlx5_mode)
1574 return 0;
1575
1576 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
db7ff19e 1577 return esw_offloads_start(dev->priv.eswitch, extack);
c930a3ad 1578 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
db7ff19e 1579 return esw_offloads_stop(dev->priv.eswitch, extack);
c930a3ad
OG
1580 else
1581 return -EINVAL;
feae9087
OG
1582}
1583
1584int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1585{
9d1cef19
OG
1586 struct mlx5_core_dev *dev = devlink_priv(devlink);
1587 int err;
c930a3ad 1588
9d1cef19
OG
1589 err = mlx5_devlink_eswitch_check(devlink);
1590 if (err)
1591 return err;
c930a3ad 1592
ef78618b 1593 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 1594}
127ea380 1595
db7ff19e
EB
1596int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
1597 struct netlink_ext_ack *extack)
bffaa916
RD
1598{
1599 struct mlx5_core_dev *dev = devlink_priv(devlink);
1600 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 1601 int err, vport;
bffaa916
RD
1602 u8 mlx5_mode;
1603
9d1cef19
OG
1604 err = mlx5_devlink_eswitch_check(devlink);
1605 if (err)
1606 return err;
bffaa916 1607
c415f704
OG
1608 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1609 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1610 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1611 return 0;
1612 /* fall through */
1613 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 1614 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
bffaa916 1615 return -EOPNOTSUPP;
c415f704
OG
1616 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1617 break;
1618 }
bffaa916 1619
375f51e2 1620 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
1621 NL_SET_ERR_MSG_MOD(extack,
1622 "Can't set inline mode when flows are configured");
375f51e2
RD
1623 return -EOPNOTSUPP;
1624 }
1625
bffaa916
RD
1626 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1627 if (err)
1628 goto out;
1629
9d1cef19 1630 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
1631 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1632 if (err) {
8c98ee77
EB
1633 NL_SET_ERR_MSG_MOD(extack,
1634 "Failed to set min inline on vport");
bffaa916
RD
1635 goto revert_inline_mode;
1636 }
1637 }
1638
1639 esw->offloads.inline_mode = mlx5_mode;
1640 return 0;
1641
1642revert_inline_mode:
1643 while (--vport > 0)
1644 mlx5_modify_nic_vport_min_inline(dev,
1645 vport,
1646 esw->offloads.inline_mode);
1647out:
1648 return err;
1649}
1650
1651int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1652{
1653 struct mlx5_core_dev *dev = devlink_priv(devlink);
1654 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1655 int err;
bffaa916 1656
9d1cef19
OG
1657 err = mlx5_devlink_eswitch_check(devlink);
1658 if (err)
1659 return err;
bffaa916 1660
bffaa916
RD
1661 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1662}
1663
1664int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1665{
c415f704 1666 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1667 struct mlx5_core_dev *dev = esw->dev;
1668 int vport;
bffaa916
RD
1669
1670 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1671 return -EOPNOTSUPP;
1672
1673 if (esw->mode == SRIOV_NONE)
1674 return -EOPNOTSUPP;
1675
c415f704
OG
1676 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1677 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1678 mlx5_mode = MLX5_INLINE_MODE_NONE;
1679 goto out;
1680 case MLX5_CAP_INLINE_MODE_L2:
1681 mlx5_mode = MLX5_INLINE_MODE_L2;
1682 goto out;
1683 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1684 goto query_vports;
1685 }
bffaa916 1686
c415f704 1687query_vports:
bffaa916
RD
1688 for (vport = 1; vport <= nvfs; vport++) {
1689 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1690 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1691 return -EINVAL;
1692 prev_mlx5_mode = mlx5_mode;
1693 }
1694
c415f704 1695out:
bffaa916
RD
1696 *mode = mlx5_mode;
1697 return 0;
1698}
1699
db7ff19e
EB
1700int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
1701 struct netlink_ext_ack *extack)
7768d197
RD
1702{
1703 struct mlx5_core_dev *dev = devlink_priv(devlink);
1704 struct mlx5_eswitch *esw = dev->priv.eswitch;
1705 int err;
1706
9d1cef19
OG
1707 err = mlx5_devlink_eswitch_check(devlink);
1708 if (err)
1709 return err;
7768d197
RD
1710
1711 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 1712 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
7768d197
RD
1713 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1714 return -EOPNOTSUPP;
1715
1716 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1717 return -EOPNOTSUPP;
1718
1719 if (esw->mode == SRIOV_LEGACY) {
1720 esw->offloads.encap = encap;
1721 return 0;
1722 }
1723
1724 if (esw->offloads.encap == encap)
1725 return 0;
1726
1727 if (esw->offloads.num_flows > 0) {
8c98ee77
EB
1728 NL_SET_ERR_MSG_MOD(extack,
1729 "Can't set encapsulation when flows are configured");
7768d197
RD
1730 return -EOPNOTSUPP;
1731 }
1732
e52c2802 1733 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
1734
1735 esw->offloads.encap = encap;
e52c2802
PB
1736
1737 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
1738
7768d197 1739 if (err) {
8c98ee77
EB
1740 NL_SET_ERR_MSG_MOD(extack,
1741 "Failed re-creating fast FDB table");
7768d197 1742 esw->offloads.encap = !encap;
e52c2802 1743 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 1744 }
e52c2802 1745
7768d197
RD
1746 return err;
1747}
1748
1749int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1750{
1751 struct mlx5_core_dev *dev = devlink_priv(devlink);
1752 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1753 int err;
7768d197 1754
9d1cef19
OG
1755 err = mlx5_devlink_eswitch_check(devlink);
1756 if (err)
1757 return err;
7768d197
RD
1758
1759 *encap = esw->offloads.encap;
1760 return 0;
1761}
1762
127ea380 1763void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1764 int vport_index,
a4b97ab4
MB
1765 struct mlx5_eswitch_rep_if *__rep_if,
1766 u8 rep_type)
127ea380
HHZ
1767{
1768 struct mlx5_esw_offload *offloads = &esw->offloads;
a4b97ab4 1769 struct mlx5_eswitch_rep_if *rep_if;
9deb2241 1770
a4b97ab4 1771 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
127ea380 1772
a4b97ab4
MB
1773 rep_if->load = __rep_if->load;
1774 rep_if->unload = __rep_if->unload;
22215908 1775 rep_if->get_proto_dev = __rep_if->get_proto_dev;
a4b97ab4 1776 rep_if->priv = __rep_if->priv;
127ea380 1777
a4b97ab4 1778 rep_if->valid = true;
127ea380 1779}
57cbd893 1780EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
127ea380
HHZ
1781
1782void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
a4b97ab4 1783 int vport_index, u8 rep_type)
127ea380
HHZ
1784{
1785 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1786 struct mlx5_eswitch_rep *rep;
1787
9deb2241 1788 rep = &offloads->vport_reps[vport_index];
cb67b832 1789
9deb2241 1790 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
a4b97ab4 1791 rep->rep_if[rep_type].unload(rep);
127ea380 1792
a4b97ab4 1793 rep->rep_if[rep_type].valid = false;
127ea380 1794}
57cbd893 1795EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
726293f1 1796
a4b97ab4 1797void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1
HHZ
1798{
1799#define UPLINK_REP_INDEX 0
1800 struct mlx5_esw_offload *offloads = &esw->offloads;
1801 struct mlx5_eswitch_rep *rep;
1802
1803 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
a4b97ab4 1804 return rep->rep_if[rep_type].priv;
726293f1 1805}
22215908
MB
1806
1807void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1808 int vport,
1809 u8 rep_type)
1810{
1811 struct mlx5_esw_offload *offloads = &esw->offloads;
1812 struct mlx5_eswitch_rep *rep;
1813
b05af6aa 1814 if (vport == MLX5_VPORT_UPLINK)
22215908
MB
1815 vport = UPLINK_REP_INDEX;
1816
1817 rep = &offloads->vport_reps[vport];
1818
1819 if (rep->rep_if[rep_type].valid &&
1820 rep->rep_if[rep_type].get_proto_dev)
1821 return rep->rep_if[rep_type].get_proto_dev(rep);
1822 return NULL;
1823}
57cbd893 1824EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
1825
1826void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1827{
1828 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1829}
57cbd893
MB
1830EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1831
1832struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1833 int vport)
1834{
1835 return &esw->offloads.vport_reps[vport];
1836}
1837EXPORT_SYMBOL(mlx5_eswitch_vport_rep);