net/mlx5e: Split offloaded eswitch TC rules for port mirroring
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
592d3651 51 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
e4ad91f2 53 struct mlx5_flow_table *ft = NULL;
3d80d1a2 54 struct mlx5_fc *counter = NULL;
74491de9 55 struct mlx5_flow_handle *rule;
592d3651 56 int j, i = 0;
3d80d1a2
OG
57 void *misc;
58
59 if (esw->mode != SRIOV_OFFLOADS)
60 return ERR_PTR(-EOPNOTSUPP);
61
e4ad91f2
CM
62 if (attr->mirror_count)
63 ft = esw->fdb_table.offloads.fwd_fdb;
64 else
65 ft = esw->fdb_table.offloads.fast_fdb;
66
6acfbf38
OG
67 flow_act.action = attr->action;
68 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
69 if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
70 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
71 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
72 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
73 flow_act.vlan.ethtype = ntohs(attr->vlan_proto);
74 flow_act.vlan.vid = attr->vlan_vid;
75 flow_act.vlan.prio = attr->vlan_prio;
76 }
776b12b6 77
66958ed9 78 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
592d3651
CM
79 for (j = attr->mirror_count; j < attr->out_count; j++) {
80 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
81 dest[i].vport.num = attr->out_rep[j]->vport;
e4ad91f2
CM
82 dest[i].vport.vhca_id =
83 MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
84 dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
592d3651 85 i++;
56e858df 86 }
e37a79e5 87 }
66958ed9 88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2 89 counter = mlx5_fc_create(esw->dev, true);
aa0cbbae
OG
90 if (IS_ERR(counter)) {
91 rule = ERR_CAST(counter);
92 goto err_counter_alloc;
93 }
e37a79e5
MB
94 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
95 dest[i].counter = counter;
96 i++;
3d80d1a2
OG
97 }
98
99 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 100 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2 101
10ff5359
SK
102 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
103 MLX5_SET(fte_match_set_misc, misc,
104 source_eswitch_owner_vhca_id,
105 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
106
3d80d1a2
OG
107 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
108 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
10ff5359
SK
109 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
110 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
111 source_eswitch_owner_vhca_id);
3d80d1a2 112
38aa51c1
OG
113 if (attr->match_level == MLX5_MATCH_NONE)
114 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
115 else
116 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
117 MLX5_MATCH_MISC_PARAMETERS;
118
bbd00f7e
HHZ
119 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
120 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 121
aa24670e 122 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
d7e75a32
OG
123 flow_act.modify_id = attr->mod_hdr_id;
124
aa24670e 125 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
45247bf2 126 flow_act.encap_id = attr->encap_id;
a54e20b4 127
e4ad91f2 128 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i);
3d80d1a2 129 if (IS_ERR(rule))
aa0cbbae 130 goto err_add_rule;
375f51e2
RD
131 else
132 esw->offloads.num_flows++;
3d80d1a2
OG
133
134 return rule;
aa0cbbae
OG
135
136err_add_rule:
137 mlx5_fc_destroy(esw->dev, counter);
138err_counter_alloc:
139 return rule;
3d80d1a2
OG
140}
141
e4ad91f2
CM
142struct mlx5_flow_handle *
143mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
144 struct mlx5_flow_spec *spec,
145 struct mlx5_esw_flow_attr *attr)
146{
147 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
148 struct mlx5_flow_act flow_act = {0};
149 struct mlx5_flow_handle *rule;
150 void *misc;
151 int i;
152
153 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
154 for (i = 0; i < attr->mirror_count; i++) {
155 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
156 dest[i].vport.num = attr->out_rep[i]->vport;
157 dest[i].vport.vhca_id =
158 MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
159 dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
160 }
161 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
162 dest[i].ft = esw->fdb_table.offloads.fwd_fdb,
163 i++;
164
165 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
166 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
167
168 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
169 MLX5_SET(fte_match_set_misc, misc,
170 source_eswitch_owner_vhca_id,
171 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
172
173 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
174 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
175 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
176 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
177 source_eswitch_owner_vhca_id);
178
179 if (attr->match_level == MLX5_MATCH_NONE)
180 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
181 else
182 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
183 MLX5_MATCH_MISC_PARAMETERS;
184
185 rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i);
186
187 if (!IS_ERR(rule))
188 esw->offloads.num_flows++;
189
190 return rule;
191}
192
d85cdccb
OG
193void
194mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
195 struct mlx5_flow_handle *rule,
196 struct mlx5_esw_flow_attr *attr)
197{
198 struct mlx5_fc *counter = NULL;
199
aa0cbbae
OG
200 counter = mlx5_flow_rule_counter(rule);
201 mlx5_del_flow_rules(rule);
202 mlx5_fc_destroy(esw->dev, counter);
203 esw->offloads.num_flows--;
d85cdccb
OG
204}
205
f5f82476
OG
206static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
207{
208 struct mlx5_eswitch_rep *rep;
209 int vf_vport, err = 0;
210
211 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
212 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
213 rep = &esw->offloads.vport_reps[vf_vport];
a4b97ab4 214 if (!rep->rep_if[REP_ETH].valid)
f5f82476
OG
215 continue;
216
217 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
218 if (err)
219 goto out;
220 }
221
222out:
223 return err;
224}
225
226static struct mlx5_eswitch_rep *
227esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
228{
229 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
230
231 in_rep = attr->in_rep;
592d3651 232 out_rep = attr->out_rep[0];
f5f82476
OG
233
234 if (push)
235 vport = in_rep;
236 else if (pop)
237 vport = out_rep;
238 else
239 vport = in_rep;
240
241 return vport;
242}
243
244static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
245 bool push, bool pop, bool fwd)
246{
247 struct mlx5_eswitch_rep *in_rep, *out_rep;
248
249 if ((push || pop) && !fwd)
250 goto out_notsupp;
251
252 in_rep = attr->in_rep;
592d3651 253 out_rep = attr->out_rep[0];
f5f82476
OG
254
255 if (push && in_rep->vport == FDB_UPLINK_VPORT)
256 goto out_notsupp;
257
258 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
259 goto out_notsupp;
260
261 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
262 if (!push && !pop && fwd)
263 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
264 goto out_notsupp;
265
266 /* protects against (1) setting rules with different vlans to push and
267 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
268 */
6acfbf38 269 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
f5f82476
OG
270 goto out_notsupp;
271
272 return 0;
273
274out_notsupp:
9eb78923 275 return -EOPNOTSUPP;
f5f82476
OG
276}
277
278int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
279 struct mlx5_esw_flow_attr *attr)
280{
281 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
282 struct mlx5_eswitch_rep *vport = NULL;
283 bool push, pop, fwd;
284 int err = 0;
285
6acfbf38
OG
286 /* nop if we're on the vlan push/pop non emulation mode */
287 if (mlx5_eswitch_vlan_actions_supported(esw->dev))
288 return 0;
289
f5f82476
OG
290 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
291 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
292 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
293
294 err = esw_add_vlan_action_check(attr, push, pop, fwd);
295 if (err)
296 return err;
297
298 attr->vlan_handled = false;
299
300 vport = esw_vlan_action_get_vport(attr, push, pop);
301
302 if (!push && !pop && fwd) {
303 /* tracks VF --> wire rules without vlan push action */
592d3651 304 if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
f5f82476
OG
305 vport->vlan_refcount++;
306 attr->vlan_handled = true;
307 }
308
309 return 0;
310 }
311
312 if (!push && !pop)
313 return 0;
314
315 if (!(offloads->vlan_push_pop_refcount)) {
316 /* it's the 1st vlan rule, apply global vlan pop policy */
317 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
318 if (err)
319 goto out;
320 }
321 offloads->vlan_push_pop_refcount++;
322
323 if (push) {
324 if (vport->vlan_refcount)
325 goto skip_set_push;
326
6acfbf38 327 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
f5f82476
OG
328 SET_VLAN_INSERT | SET_VLAN_STRIP);
329 if (err)
330 goto out;
6acfbf38 331 vport->vlan = attr->vlan_vid;
f5f82476
OG
332skip_set_push:
333 vport->vlan_refcount++;
334 }
335out:
336 if (!err)
337 attr->vlan_handled = true;
338 return err;
339}
340
341int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
342 struct mlx5_esw_flow_attr *attr)
343{
344 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
345 struct mlx5_eswitch_rep *vport = NULL;
346 bool push, pop, fwd;
347 int err = 0;
348
6acfbf38
OG
349 /* nop if we're on the vlan push/pop non emulation mode */
350 if (mlx5_eswitch_vlan_actions_supported(esw->dev))
351 return 0;
352
f5f82476
OG
353 if (!attr->vlan_handled)
354 return 0;
355
356 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
357 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
358 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
359
360 vport = esw_vlan_action_get_vport(attr, push, pop);
361
362 if (!push && !pop && fwd) {
363 /* tracks VF --> wire rules without vlan push action */
592d3651 364 if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
f5f82476
OG
365 vport->vlan_refcount--;
366
367 return 0;
368 }
369
370 if (push) {
371 vport->vlan_refcount--;
372 if (vport->vlan_refcount)
373 goto skip_unset_push;
374
375 vport->vlan = 0;
376 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
377 0, 0, SET_VLAN_STRIP);
378 if (err)
379 goto out;
380 }
381
382skip_unset_push:
383 offloads->vlan_push_pop_refcount--;
384 if (offloads->vlan_push_pop_refcount)
385 return 0;
386
387 /* no more vlan rules, stop global vlan pop policy */
388 err = esw_set_global_vlan_pop(esw, 0);
389
390out:
391 return err;
392}
393
f7a68945 394struct mlx5_flow_handle *
ab22be9b
OG
395mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
396{
66958ed9 397 struct mlx5_flow_act flow_act = {0};
4c5009c5 398 struct mlx5_flow_destination dest = {};
74491de9 399 struct mlx5_flow_handle *flow_rule;
c5bb1730 400 struct mlx5_flow_spec *spec;
ab22be9b
OG
401 void *misc;
402
1b9a07ee 403 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 404 if (!spec) {
ab22be9b
OG
405 flow_rule = ERR_PTR(-ENOMEM);
406 goto out;
407 }
408
c5bb1730 409 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
410 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
411 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
412
c5bb1730 413 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
414 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
415 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
416
c5bb1730 417 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 418 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 419 dest.vport.num = vport;
66958ed9 420 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 421
52fff327 422 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 423 &flow_act, &dest, 1);
ab22be9b
OG
424 if (IS_ERR(flow_rule))
425 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
426out:
c5bb1730 427 kvfree(spec);
ab22be9b
OG
428 return flow_rule;
429}
57cbd893 430EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 431
159fe639
MB
432void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
433{
434 mlx5_del_flow_rules(rule);
435}
436
3aa33572
OG
437static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
438{
66958ed9 439 struct mlx5_flow_act flow_act = {0};
4c5009c5 440 struct mlx5_flow_destination dest = {};
74491de9 441 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 442 struct mlx5_flow_spec *spec;
f80be543
MB
443 void *headers_c;
444 void *headers_v;
3aa33572 445 int err = 0;
f80be543
MB
446 u8 *dmac_c;
447 u8 *dmac_v;
3aa33572 448
1b9a07ee 449 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 450 if (!spec) {
3aa33572
OG
451 err = -ENOMEM;
452 goto out;
453 }
454
f80be543
MB
455 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
456 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
457 outer_headers);
458 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
459 outer_headers.dmac_47_16);
460 dmac_c[0] = 0x01;
461
3aa33572 462 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 463 dest.vport.num = 0;
66958ed9 464 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 465
52fff327 466 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
66958ed9 467 &flow_act, &dest, 1);
3aa33572
OG
468 if (IS_ERR(flow_rule)) {
469 err = PTR_ERR(flow_rule);
f80be543 470 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
471 goto out;
472 }
473
f80be543
MB
474 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
475
476 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
477 outer_headers);
478 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
479 outer_headers.dmac_47_16);
480 dmac_v[0] = 0x01;
52fff327 481 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
f80be543
MB
482 &flow_act, &dest, 1);
483 if (IS_ERR(flow_rule)) {
484 err = PTR_ERR(flow_rule);
485 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
486 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
487 goto out;
488 }
489
490 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
491
3aa33572 492out:
c5bb1730 493 kvfree(spec);
3aa33572
OG
494 return err;
495}
496
1033665e 497#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 498
1967ce6e 499static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
69697b6e 500{
69697b6e
OG
501 struct mlx5_core_dev *dev = esw->dev;
502 struct mlx5_flow_namespace *root_ns;
503 struct mlx5_flow_table *fdb = NULL;
1967ce6e 504 int esw_size, err = 0;
bbd00f7e 505 u32 flags = 0;
a8ffcc74
RL
506 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
507 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
69697b6e 508
69697b6e
OG
509 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
510 if (!root_ns) {
511 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 512 err = -EOPNOTSUPP;
a842dd04 513 goto out_namespace;
69697b6e
OG
514 }
515
264d7bf3
OG
516 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
517 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
a8ffcc74 518 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
264d7bf3 519
a8ffcc74 520 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
264d7bf3 521 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 522
a842dd04
CM
523 if (mlx5_esw_has_fwd_fdb(dev))
524 esw_size >>= 1;
525
7768d197 526 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
bbd00f7e
HHZ
527 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
528
1033665e 529 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 530 esw_size,
c9f1b073 531 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 532 flags);
69697b6e
OG
533 if (IS_ERR(fdb)) {
534 err = PTR_ERR(fdb);
1033665e 535 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
a842dd04 536 goto out_namespace;
69697b6e 537 }
52fff327 538 esw->fdb_table.offloads.fast_fdb = fdb;
69697b6e 539
a842dd04
CM
540 if (!mlx5_esw_has_fwd_fdb(dev))
541 goto out_namespace;
542
543 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
544 esw_size,
545 ESW_OFFLOADS_NUM_GROUPS, 1,
546 flags);
547 if (IS_ERR(fdb)) {
548 err = PTR_ERR(fdb);
549 esw_warn(dev, "Failed to create fwd table err %d\n", err);
550 goto out_ft;
551 }
552 esw->fdb_table.offloads.fwd_fdb = fdb;
553
554 return err;
555
556out_ft:
557 mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
558out_namespace:
1967ce6e
OG
559 return err;
560}
561
562static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
563{
a842dd04
CM
564 if (mlx5_esw_has_fwd_fdb(esw->dev))
565 mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb);
52fff327 566 mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb);
1967ce6e
OG
567}
568
569#define MAX_PF_SQ 256
cd3d07e7 570#define MAX_SQ_NVPORTS 32
1967ce6e
OG
571
572static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
573{
574 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
575 struct mlx5_flow_table_attr ft_attr = {};
576 struct mlx5_core_dev *dev = esw->dev;
577 struct mlx5_flow_namespace *root_ns;
578 struct mlx5_flow_table *fdb = NULL;
579 int table_size, ix, err = 0;
580 struct mlx5_flow_group *g;
581 void *match_criteria;
582 u32 *flow_group_in;
f80be543 583 u8 *dmac;
1967ce6e
OG
584
585 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1b9a07ee 586 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
587 if (!flow_group_in)
588 return -ENOMEM;
589
590 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
591 if (!root_ns) {
592 esw_warn(dev, "Failed to get FDB flow namespace\n");
593 err = -EOPNOTSUPP;
594 goto ns_err;
595 }
596
597 err = esw_create_offloads_fast_fdb_table(esw);
598 if (err)
599 goto fast_fdb_err;
600
f80be543 601 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
b3ba5149
ES
602
603 ft_attr.max_fte = table_size;
604 ft_attr.prio = FDB_SLOW_PATH;
605
606 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
607 if (IS_ERR(fdb)) {
608 err = PTR_ERR(fdb);
609 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
610 goto slow_fdb_err;
611 }
52fff327 612 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 613
69697b6e
OG
614 /* create send-to-vport group */
615 memset(flow_group_in, 0, inlen);
616 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
617 MLX5_MATCH_MISC_PARAMETERS);
618
619 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
620
621 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
622 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
623
cd3d07e7 624 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
625 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
626 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
627
628 g = mlx5_create_flow_group(fdb, flow_group_in);
629 if (IS_ERR(g)) {
630 err = PTR_ERR(g);
631 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
632 goto send_vport_err;
633 }
634 esw->fdb_table.offloads.send_to_vport_grp = g;
635
636 /* create miss group */
637 memset(flow_group_in, 0, inlen);
f80be543
MB
638 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
639 MLX5_MATCH_OUTER_HEADERS);
640 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
641 match_criteria);
642 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
643 outer_headers.dmac_47_16);
644 dmac[0] = 0x01;
69697b6e
OG
645
646 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
f80be543 647 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
69697b6e
OG
648
649 g = mlx5_create_flow_group(fdb, flow_group_in);
650 if (IS_ERR(g)) {
651 err = PTR_ERR(g);
652 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
653 goto miss_err;
654 }
655 esw->fdb_table.offloads.miss_grp = g;
656
3aa33572
OG
657 err = esw_add_fdb_miss_rule(esw);
658 if (err)
659 goto miss_rule_err;
660
69697b6e
OG
661 return 0;
662
3aa33572
OG
663miss_rule_err:
664 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
665miss_err:
666 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
667send_vport_err:
52fff327 668 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 669slow_fdb_err:
a842dd04 670 esw_destroy_offloads_fast_fdb_table(esw);
1033665e 671fast_fdb_err:
69697b6e
OG
672ns_err:
673 kvfree(flow_group_in);
674 return err;
675}
676
1967ce6e 677static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 678{
52fff327 679 if (!esw->fdb_table.offloads.fast_fdb)
69697b6e
OG
680 return;
681
1967ce6e 682 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
683 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
684 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e
OG
685 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
686 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
687
52fff327 688 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1967ce6e 689 esw_destroy_offloads_fast_fdb_table(esw);
69697b6e 690}
c116c6ee
OG
691
692static int esw_create_offloads_table(struct mlx5_eswitch *esw)
693{
b3ba5149 694 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 695 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
696 struct mlx5_flow_table *ft_offloads;
697 struct mlx5_flow_namespace *ns;
c116c6ee
OG
698 int err = 0;
699
700 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
701 if (!ns) {
702 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 703 return -EOPNOTSUPP;
c116c6ee
OG
704 }
705
b3ba5149
ES
706 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
707
708 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
709 if (IS_ERR(ft_offloads)) {
710 err = PTR_ERR(ft_offloads);
711 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
712 return err;
713 }
714
715 esw->offloads.ft_offloads = ft_offloads;
716 return 0;
717}
718
719static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
720{
721 struct mlx5_esw_offload *offloads = &esw->offloads;
722
723 mlx5_destroy_flow_table(offloads->ft_offloads);
724}
fed9ce22
OG
725
726static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
727{
728 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
729 struct mlx5_flow_group *g;
730 struct mlx5_priv *priv = &esw->dev->priv;
731 u32 *flow_group_in;
732 void *match_criteria, *misc;
733 int err = 0;
734 int nvports = priv->sriov.num_vfs + 2;
735
1b9a07ee 736 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
737 if (!flow_group_in)
738 return -ENOMEM;
739
740 /* create vport rx group */
741 memset(flow_group_in, 0, inlen);
742 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
743 MLX5_MATCH_MISC_PARAMETERS);
744
745 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
746 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
747 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
748
749 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
750 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
751
752 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
753
754 if (IS_ERR(g)) {
755 err = PTR_ERR(g);
756 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
757 goto out;
758 }
759
760 esw->offloads.vport_rx_group = g;
761out:
e574978a 762 kvfree(flow_group_in);
fed9ce22
OG
763 return err;
764}
765
766static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
767{
768 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
769}
770
74491de9 771struct mlx5_flow_handle *
fed9ce22
OG
772mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
773{
66958ed9 774 struct mlx5_flow_act flow_act = {0};
4c5009c5 775 struct mlx5_flow_destination dest = {};
74491de9 776 struct mlx5_flow_handle *flow_rule;
c5bb1730 777 struct mlx5_flow_spec *spec;
fed9ce22
OG
778 void *misc;
779
1b9a07ee 780 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 781 if (!spec) {
fed9ce22
OG
782 flow_rule = ERR_PTR(-ENOMEM);
783 goto out;
784 }
785
c5bb1730 786 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
787 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
788
c5bb1730 789 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
790 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
791
c5bb1730 792 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
793 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
794 dest.tir_num = tirn;
795
66958ed9 796 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 797 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
e53eef63 798 &flow_act, &dest, 1);
fed9ce22
OG
799 if (IS_ERR(flow_rule)) {
800 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
801 goto out;
802 }
803
804out:
c5bb1730 805 kvfree(spec);
fed9ce22
OG
806 return flow_rule;
807}
feae9087 808
c930a3ad
OG
809static int esw_offloads_start(struct mlx5_eswitch *esw)
810{
6c419ba8 811 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
812
813 if (esw->mode != SRIOV_LEGACY) {
814 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
815 return -EINVAL;
816 }
817
818 mlx5_eswitch_disable_sriov(esw);
819 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
820 if (err) {
821 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
822 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
823 if (err1)
5403dc70 824 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 825 }
bffaa916
RD
826 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
827 if (mlx5_eswitch_inline_mode_get(esw,
828 num_vfs,
829 &esw->offloads.inline_mode)) {
830 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
831 esw_warn(esw->dev, "Inline mode is different between vports\n");
832 }
833 }
c930a3ad
OG
834 return err;
835}
836
e8d31c4d
MB
837void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
838{
839 kfree(esw->offloads.vport_reps);
840}
841
842int esw_offloads_init_reps(struct mlx5_eswitch *esw)
843{
844 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
845 struct mlx5_core_dev *dev = esw->dev;
846 struct mlx5_esw_offload *offloads;
847 struct mlx5_eswitch_rep *rep;
848 u8 hw_id[ETH_ALEN];
849 int vport;
850
851 esw->offloads.vport_reps = kcalloc(total_vfs,
852 sizeof(struct mlx5_eswitch_rep),
853 GFP_KERNEL);
854 if (!esw->offloads.vport_reps)
855 return -ENOMEM;
856
857 offloads = &esw->offloads;
858 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
859
860 for (vport = 0; vport < total_vfs; vport++) {
861 rep = &offloads->vport_reps[vport];
862
863 rep->vport = vport;
864 ether_addr_copy(rep->hw_id, hw_id);
865 }
866
867 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
868
869 return 0;
870}
871
a4b97ab4
MB
872static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
873 u8 rep_type)
6ed1803a
MB
874{
875 struct mlx5_eswitch_rep *rep;
876 int vport;
877
878 for (vport = nvports - 1; vport >= 0; vport--) {
879 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 880 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
881 continue;
882
a4b97ab4 883 rep->rep_if[rep_type].unload(rep);
6ed1803a
MB
884 }
885}
886
a4b97ab4
MB
887static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
888{
889 u8 rep_type = NUM_REP_TYPES;
890
891 while (rep_type-- > 0)
892 esw_offloads_unload_reps_type(esw, nvports, rep_type);
893}
894
895static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
896 u8 rep_type)
c930a3ad 897{
cb67b832
HHZ
898 struct mlx5_eswitch_rep *rep;
899 int vport;
c930a3ad
OG
900 int err;
901
6ed1803a
MB
902 for (vport = 0; vport < nvports; vport++) {
903 rep = &esw->offloads.vport_reps[vport];
a4b97ab4 904 if (!rep->rep_if[rep_type].valid)
6ed1803a
MB
905 continue;
906
a4b97ab4 907 err = rep->rep_if[rep_type].load(esw->dev, rep);
6ed1803a
MB
908 if (err)
909 goto err_reps;
910 }
911
912 return 0;
913
914err_reps:
a4b97ab4
MB
915 esw_offloads_unload_reps_type(esw, vport, rep_type);
916 return err;
917}
918
919static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
920{
921 u8 rep_type = 0;
922 int err;
923
924 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
925 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
926 if (err)
927 goto err_reps;
928 }
929
930 return err;
931
932err_reps:
933 while (rep_type-- > 0)
934 esw_offloads_unload_reps_type(esw, nvports, rep_type);
6ed1803a
MB
935 return err;
936}
937
938int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
939{
940 int err;
941
1967ce6e 942 err = esw_create_offloads_fdb_tables(esw, nvports);
c930a3ad 943 if (err)
c5447c70 944 return err;
c930a3ad
OG
945
946 err = esw_create_offloads_table(esw);
947 if (err)
948 goto create_ft_err;
949
950 err = esw_create_vport_rx_group(esw);
951 if (err)
952 goto create_fg_err;
953
6ed1803a
MB
954 err = esw_offloads_load_reps(esw, nvports);
955 if (err)
956 goto err_reps;
9da34cd3 957
c930a3ad
OG
958 return 0;
959
cb67b832 960err_reps:
cb67b832
HHZ
961 esw_destroy_vport_rx_group(esw);
962
c930a3ad
OG
963create_fg_err:
964 esw_destroy_offloads_table(esw);
965
966create_ft_err:
1967ce6e 967 esw_destroy_offloads_fdb_tables(esw);
5bae8c03 968
c930a3ad
OG
969 return err;
970}
971
972static int esw_offloads_stop(struct mlx5_eswitch *esw)
973{
6c419ba8 974 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
975
976 mlx5_eswitch_disable_sriov(esw);
977 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
978 if (err) {
979 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
980 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
981 if (err1)
982 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
983 }
c930a3ad 984
5bae8c03 985 /* enable back PF RoCE */
c5447c70 986 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
5bae8c03 987
c930a3ad
OG
988 return err;
989}
990
991void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
992{
6ed1803a 993 esw_offloads_unload_reps(esw, nvports);
c930a3ad
OG
994 esw_destroy_vport_rx_group(esw);
995 esw_destroy_offloads_table(esw);
1967ce6e 996 esw_destroy_offloads_fdb_tables(esw);
c930a3ad
OG
997}
998
ef78618b 999static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
1000{
1001 switch (mode) {
1002 case DEVLINK_ESWITCH_MODE_LEGACY:
1003 *mlx5_mode = SRIOV_LEGACY;
1004 break;
1005 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1006 *mlx5_mode = SRIOV_OFFLOADS;
1007 break;
1008 default:
1009 return -EINVAL;
1010 }
1011
1012 return 0;
1013}
1014
ef78618b
OG
1015static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1016{
1017 switch (mlx5_mode) {
1018 case SRIOV_LEGACY:
1019 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1020 break;
1021 case SRIOV_OFFLOADS:
1022 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1023 break;
1024 default:
1025 return -EINVAL;
1026 }
1027
1028 return 0;
1029}
1030
bffaa916
RD
1031static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1032{
1033 switch (mode) {
1034 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1035 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1036 break;
1037 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1038 *mlx5_mode = MLX5_INLINE_MODE_L2;
1039 break;
1040 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1041 *mlx5_mode = MLX5_INLINE_MODE_IP;
1042 break;
1043 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1044 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1045 break;
1046 default:
1047 return -EINVAL;
1048 }
1049
1050 return 0;
1051}
1052
1053static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1054{
1055 switch (mlx5_mode) {
1056 case MLX5_INLINE_MODE_NONE:
1057 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1058 break;
1059 case MLX5_INLINE_MODE_L2:
1060 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1061 break;
1062 case MLX5_INLINE_MODE_IP:
1063 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1064 break;
1065 case MLX5_INLINE_MODE_TCP_UDP:
1066 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1067 break;
1068 default:
1069 return -EINVAL;
1070 }
1071
1072 return 0;
1073}
1074
9d1cef19 1075static int mlx5_devlink_eswitch_check(struct devlink *devlink)
feae9087 1076{
9d1cef19 1077 struct mlx5_core_dev *dev = devlink_priv(devlink);
c930a3ad 1078
9d1cef19
OG
1079 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1080 return -EOPNOTSUPP;
c930a3ad
OG
1081
1082 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1083 return -EOPNOTSUPP;
1084
9d1cef19 1085 if (dev->priv.eswitch->mode == SRIOV_NONE)
c930a3ad
OG
1086 return -EOPNOTSUPP;
1087
9d1cef19
OG
1088 return 0;
1089}
1090
1091int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
1092{
1093 struct mlx5_core_dev *dev = devlink_priv(devlink);
1094 u16 cur_mlx5_mode, mlx5_mode = 0;
1095 int err;
1096
1097 err = mlx5_devlink_eswitch_check(devlink);
1098 if (err)
1099 return err;
1100
1101 cur_mlx5_mode = dev->priv.eswitch->mode;
1102
ef78618b 1103 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
1104 return -EINVAL;
1105
1106 if (cur_mlx5_mode == mlx5_mode)
1107 return 0;
1108
1109 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1110 return esw_offloads_start(dev->priv.eswitch);
1111 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1112 return esw_offloads_stop(dev->priv.eswitch);
1113 else
1114 return -EINVAL;
feae9087
OG
1115}
1116
1117int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1118{
9d1cef19
OG
1119 struct mlx5_core_dev *dev = devlink_priv(devlink);
1120 int err;
c930a3ad 1121
9d1cef19
OG
1122 err = mlx5_devlink_eswitch_check(devlink);
1123 if (err)
1124 return err;
c930a3ad 1125
ef78618b 1126 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 1127}
127ea380 1128
bffaa916
RD
1129int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
1130{
1131 struct mlx5_core_dev *dev = devlink_priv(devlink);
1132 struct mlx5_eswitch *esw = dev->priv.eswitch;
c415f704 1133 int err, vport;
bffaa916
RD
1134 u8 mlx5_mode;
1135
9d1cef19
OG
1136 err = mlx5_devlink_eswitch_check(devlink);
1137 if (err)
1138 return err;
bffaa916 1139
c415f704
OG
1140 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1141 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1142 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1143 return 0;
1144 /* fall through */
1145 case MLX5_CAP_INLINE_MODE_L2:
1146 esw_warn(dev, "Inline mode can't be set\n");
bffaa916 1147 return -EOPNOTSUPP;
c415f704
OG
1148 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1149 break;
1150 }
bffaa916 1151
375f51e2
RD
1152 if (esw->offloads.num_flows > 0) {
1153 esw_warn(dev, "Can't set inline mode when flows are configured\n");
1154 return -EOPNOTSUPP;
1155 }
1156
bffaa916
RD
1157 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1158 if (err)
1159 goto out;
1160
9d1cef19 1161 for (vport = 1; vport < esw->enabled_vports; vport++) {
bffaa916
RD
1162 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1163 if (err) {
1164 esw_warn(dev, "Failed to set min inline on vport %d\n",
1165 vport);
1166 goto revert_inline_mode;
1167 }
1168 }
1169
1170 esw->offloads.inline_mode = mlx5_mode;
1171 return 0;
1172
1173revert_inline_mode:
1174 while (--vport > 0)
1175 mlx5_modify_nic_vport_min_inline(dev,
1176 vport,
1177 esw->offloads.inline_mode);
1178out:
1179 return err;
1180}
1181
1182int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1183{
1184 struct mlx5_core_dev *dev = devlink_priv(devlink);
1185 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1186 int err;
bffaa916 1187
9d1cef19
OG
1188 err = mlx5_devlink_eswitch_check(devlink);
1189 if (err)
1190 return err;
bffaa916 1191
bffaa916
RD
1192 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1193}
1194
1195int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1196{
c415f704 1197 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
bffaa916
RD
1198 struct mlx5_core_dev *dev = esw->dev;
1199 int vport;
bffaa916
RD
1200
1201 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1202 return -EOPNOTSUPP;
1203
1204 if (esw->mode == SRIOV_NONE)
1205 return -EOPNOTSUPP;
1206
c415f704
OG
1207 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1208 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1209 mlx5_mode = MLX5_INLINE_MODE_NONE;
1210 goto out;
1211 case MLX5_CAP_INLINE_MODE_L2:
1212 mlx5_mode = MLX5_INLINE_MODE_L2;
1213 goto out;
1214 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1215 goto query_vports;
1216 }
bffaa916 1217
c415f704 1218query_vports:
bffaa916
RD
1219 for (vport = 1; vport <= nvfs; vport++) {
1220 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1221 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1222 return -EINVAL;
1223 prev_mlx5_mode = mlx5_mode;
1224 }
1225
c415f704 1226out:
bffaa916
RD
1227 *mode = mlx5_mode;
1228 return 0;
1229}
1230
7768d197
RD
1231int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1232{
1233 struct mlx5_core_dev *dev = devlink_priv(devlink);
1234 struct mlx5_eswitch *esw = dev->priv.eswitch;
1235 int err;
1236
9d1cef19
OG
1237 err = mlx5_devlink_eswitch_check(devlink);
1238 if (err)
1239 return err;
7768d197
RD
1240
1241 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1242 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1243 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1244 return -EOPNOTSUPP;
1245
1246 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1247 return -EOPNOTSUPP;
1248
1249 if (esw->mode == SRIOV_LEGACY) {
1250 esw->offloads.encap = encap;
1251 return 0;
1252 }
1253
1254 if (esw->offloads.encap == encap)
1255 return 0;
1256
1257 if (esw->offloads.num_flows > 0) {
1258 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1259 return -EOPNOTSUPP;
1260 }
1261
1262 esw_destroy_offloads_fast_fdb_table(esw);
1263
1264 esw->offloads.encap = encap;
1265 err = esw_create_offloads_fast_fdb_table(esw);
1266 if (err) {
1267 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1268 esw->offloads.encap = !encap;
2fe30e23 1269 (void)esw_create_offloads_fast_fdb_table(esw);
7768d197
RD
1270 }
1271 return err;
1272}
1273
1274int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1275{
1276 struct mlx5_core_dev *dev = devlink_priv(devlink);
1277 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 1278 int err;
7768d197 1279
9d1cef19
OG
1280 err = mlx5_devlink_eswitch_check(devlink);
1281 if (err)
1282 return err;
7768d197
RD
1283
1284 *encap = esw->offloads.encap;
1285 return 0;
1286}
1287
127ea380 1288void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1289 int vport_index,
a4b97ab4
MB
1290 struct mlx5_eswitch_rep_if *__rep_if,
1291 u8 rep_type)
127ea380
HHZ
1292{
1293 struct mlx5_esw_offload *offloads = &esw->offloads;
a4b97ab4 1294 struct mlx5_eswitch_rep_if *rep_if;
9deb2241 1295
a4b97ab4 1296 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
127ea380 1297
a4b97ab4
MB
1298 rep_if->load = __rep_if->load;
1299 rep_if->unload = __rep_if->unload;
22215908 1300 rep_if->get_proto_dev = __rep_if->get_proto_dev;
a4b97ab4 1301 rep_if->priv = __rep_if->priv;
127ea380 1302
a4b97ab4 1303 rep_if->valid = true;
127ea380 1304}
57cbd893 1305EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
127ea380
HHZ
1306
1307void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
a4b97ab4 1308 int vport_index, u8 rep_type)
127ea380
HHZ
1309{
1310 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1311 struct mlx5_eswitch_rep *rep;
1312
9deb2241 1313 rep = &offloads->vport_reps[vport_index];
cb67b832 1314
9deb2241 1315 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
a4b97ab4 1316 rep->rep_if[rep_type].unload(rep);
127ea380 1317
a4b97ab4 1318 rep->rep_if[rep_type].valid = false;
127ea380 1319}
57cbd893 1320EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
726293f1 1321
a4b97ab4 1322void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1
HHZ
1323{
1324#define UPLINK_REP_INDEX 0
1325 struct mlx5_esw_offload *offloads = &esw->offloads;
1326 struct mlx5_eswitch_rep *rep;
1327
1328 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
a4b97ab4 1329 return rep->rep_if[rep_type].priv;
726293f1 1330}
22215908
MB
1331
1332void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1333 int vport,
1334 u8 rep_type)
1335{
1336 struct mlx5_esw_offload *offloads = &esw->offloads;
1337 struct mlx5_eswitch_rep *rep;
1338
1339 if (vport == FDB_UPLINK_VPORT)
1340 vport = UPLINK_REP_INDEX;
1341
1342 rep = &offloads->vport_reps[vport];
1343
1344 if (rep->rep_if[rep_type].valid &&
1345 rep->rep_if[rep_type].get_proto_dev)
1346 return rep->rep_if[rep_type].get_proto_dev(rep);
1347 return NULL;
1348}
57cbd893 1349EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
1350
1351void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1352{
1353 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1354}
57cbd893
MB
1355EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1356
1357struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1358 int vport)
1359{
1360 return &esw->offloads.vport_reps[vport];
1361}
1362EXPORT_SYMBOL(mlx5_eswitch_vport_rep);