net/mlx5e: Add offloading of NIC TC pedit (header re-write) actions
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
e37a79e5 51 struct mlx5_flow_destination dest[2] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
3d80d1a2 53 struct mlx5_fc *counter = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
3d80d1a2 55 void *misc;
e37a79e5 56 int i = 0;
3d80d1a2
OG
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
ee39fbc4 61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
bb598c1b 62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
776b12b6 63
66958ed9 64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e37a79e5
MB
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
66958ed9 69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2 70 counter = mlx5_fc_create(esw->dev, true);
aa0cbbae
OG
71 if (IS_ERR(counter)) {
72 rule = ERR_CAST(counter);
73 goto err_counter_alloc;
74 }
e37a79e5
MB
75 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76 dest[i].counter = counter;
77 i++;
3d80d1a2
OG
78 }
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 81 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
82
83 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
84 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
85
86 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
87 MLX5_MATCH_MISC_PARAMETERS;
bbd00f7e
HHZ
88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
89 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 90
a54e20b4
HHZ
91 if (attr->encap)
92 flow_act.encap_id = attr->encap->encap_id;
93
74491de9 94 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
66958ed9 95 spec, &flow_act, dest, i);
3d80d1a2 96 if (IS_ERR(rule))
aa0cbbae 97 goto err_add_rule;
375f51e2
RD
98 else
99 esw->offloads.num_flows++;
3d80d1a2
OG
100
101 return rule;
aa0cbbae
OG
102
103err_add_rule:
104 mlx5_fc_destroy(esw->dev, counter);
105err_counter_alloc:
106 return rule;
3d80d1a2
OG
107}
108
d85cdccb
OG
109void
110mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
111 struct mlx5_flow_handle *rule,
112 struct mlx5_esw_flow_attr *attr)
113{
114 struct mlx5_fc *counter = NULL;
115
aa0cbbae
OG
116 counter = mlx5_flow_rule_counter(rule);
117 mlx5_del_flow_rules(rule);
118 mlx5_fc_destroy(esw->dev, counter);
119 esw->offloads.num_flows--;
d85cdccb
OG
120}
121
f5f82476
OG
122static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
123{
124 struct mlx5_eswitch_rep *rep;
125 int vf_vport, err = 0;
126
127 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
128 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
129 rep = &esw->offloads.vport_reps[vf_vport];
130 if (!rep->valid)
131 continue;
132
133 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
134 if (err)
135 goto out;
136 }
137
138out:
139 return err;
140}
141
142static struct mlx5_eswitch_rep *
143esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
144{
145 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
146
147 in_rep = attr->in_rep;
148 out_rep = attr->out_rep;
149
150 if (push)
151 vport = in_rep;
152 else if (pop)
153 vport = out_rep;
154 else
155 vport = in_rep;
156
157 return vport;
158}
159
160static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
161 bool push, bool pop, bool fwd)
162{
163 struct mlx5_eswitch_rep *in_rep, *out_rep;
164
165 if ((push || pop) && !fwd)
166 goto out_notsupp;
167
168 in_rep = attr->in_rep;
169 out_rep = attr->out_rep;
170
171 if (push && in_rep->vport == FDB_UPLINK_VPORT)
172 goto out_notsupp;
173
174 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
175 goto out_notsupp;
176
177 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
178 if (!push && !pop && fwd)
179 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
180 goto out_notsupp;
181
182 /* protects against (1) setting rules with different vlans to push and
183 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
184 */
185 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
186 goto out_notsupp;
187
188 return 0;
189
190out_notsupp:
9eb78923 191 return -EOPNOTSUPP;
f5f82476
OG
192}
193
194int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
195 struct mlx5_esw_flow_attr *attr)
196{
197 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
198 struct mlx5_eswitch_rep *vport = NULL;
199 bool push, pop, fwd;
200 int err = 0;
201
202 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
203 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
204 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
205
206 err = esw_add_vlan_action_check(attr, push, pop, fwd);
207 if (err)
208 return err;
209
210 attr->vlan_handled = false;
211
212 vport = esw_vlan_action_get_vport(attr, push, pop);
213
214 if (!push && !pop && fwd) {
215 /* tracks VF --> wire rules without vlan push action */
216 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
217 vport->vlan_refcount++;
218 attr->vlan_handled = true;
219 }
220
221 return 0;
222 }
223
224 if (!push && !pop)
225 return 0;
226
227 if (!(offloads->vlan_push_pop_refcount)) {
228 /* it's the 1st vlan rule, apply global vlan pop policy */
229 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
230 if (err)
231 goto out;
232 }
233 offloads->vlan_push_pop_refcount++;
234
235 if (push) {
236 if (vport->vlan_refcount)
237 goto skip_set_push;
238
239 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
240 SET_VLAN_INSERT | SET_VLAN_STRIP);
241 if (err)
242 goto out;
243 vport->vlan = attr->vlan;
244skip_set_push:
245 vport->vlan_refcount++;
246 }
247out:
248 if (!err)
249 attr->vlan_handled = true;
250 return err;
251}
252
253int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
254 struct mlx5_esw_flow_attr *attr)
255{
256 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
257 struct mlx5_eswitch_rep *vport = NULL;
258 bool push, pop, fwd;
259 int err = 0;
260
261 if (!attr->vlan_handled)
262 return 0;
263
264 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
265 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
266 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
267
268 vport = esw_vlan_action_get_vport(attr, push, pop);
269
270 if (!push && !pop && fwd) {
271 /* tracks VF --> wire rules without vlan push action */
272 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
273 vport->vlan_refcount--;
274
275 return 0;
276 }
277
278 if (push) {
279 vport->vlan_refcount--;
280 if (vport->vlan_refcount)
281 goto skip_unset_push;
282
283 vport->vlan = 0;
284 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
285 0, 0, SET_VLAN_STRIP);
286 if (err)
287 goto out;
288 }
289
290skip_unset_push:
291 offloads->vlan_push_pop_refcount--;
292 if (offloads->vlan_push_pop_refcount)
293 return 0;
294
295 /* no more vlan rules, stop global vlan pop policy */
296 err = esw_set_global_vlan_pop(esw, 0);
297
298out:
299 return err;
300}
301
74491de9 302static struct mlx5_flow_handle *
ab22be9b
OG
303mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
304{
66958ed9 305 struct mlx5_flow_act flow_act = {0};
ab22be9b 306 struct mlx5_flow_destination dest;
74491de9 307 struct mlx5_flow_handle *flow_rule;
c5bb1730 308 struct mlx5_flow_spec *spec;
ab22be9b
OG
309 void *misc;
310
c5bb1730
MG
311 spec = mlx5_vzalloc(sizeof(*spec));
312 if (!spec) {
ab22be9b
OG
313 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
314 flow_rule = ERR_PTR(-ENOMEM);
315 goto out;
316 }
317
c5bb1730 318 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
319 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
320 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
321
c5bb1730 322 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
323 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
324 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
325
c5bb1730 326 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
327 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
328 dest.vport_num = vport;
66958ed9 329 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 330
74491de9 331 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 332 &flow_act, &dest, 1);
ab22be9b
OG
333 if (IS_ERR(flow_rule))
334 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
335out:
c5bb1730 336 kvfree(spec);
ab22be9b
OG
337 return flow_rule;
338}
339
cb67b832
HHZ
340void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
341 struct mlx5_eswitch_rep *rep)
342{
343 struct mlx5_esw_sq *esw_sq, *tmp;
344
345 if (esw->mode != SRIOV_OFFLOADS)
346 return;
347
348 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
74491de9 349 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
cb67b832
HHZ
350 list_del(&esw_sq->list);
351 kfree(esw_sq);
352 }
353}
354
355int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
356 struct mlx5_eswitch_rep *rep,
357 u16 *sqns_array, int sqns_num)
358{
74491de9 359 struct mlx5_flow_handle *flow_rule;
cb67b832 360 struct mlx5_esw_sq *esw_sq;
cb67b832
HHZ
361 int err;
362 int i;
363
364 if (esw->mode != SRIOV_OFFLOADS)
365 return 0;
366
cb67b832
HHZ
367 for (i = 0; i < sqns_num; i++) {
368 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
369 if (!esw_sq) {
370 err = -ENOMEM;
371 goto out_err;
372 }
373
374 /* Add re-inject rule to the PF/representor sqs */
375 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
9deb2241 376 rep->vport,
cb67b832
HHZ
377 sqns_array[i]);
378 if (IS_ERR(flow_rule)) {
379 err = PTR_ERR(flow_rule);
380 kfree(esw_sq);
381 goto out_err;
382 }
383 esw_sq->send_to_vport_rule = flow_rule;
384 list_add(&esw_sq->list, &rep->vport_sqs_list);
385 }
386 return 0;
387
388out_err:
389 mlx5_eswitch_sqs2vport_stop(esw, rep);
390 return err;
391}
392
3aa33572
OG
393static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
394{
66958ed9 395 struct mlx5_flow_act flow_act = {0};
3aa33572 396 struct mlx5_flow_destination dest;
74491de9 397 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 398 struct mlx5_flow_spec *spec;
3aa33572
OG
399 int err = 0;
400
c5bb1730
MG
401 spec = mlx5_vzalloc(sizeof(*spec));
402 if (!spec) {
3aa33572
OG
403 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
404 err = -ENOMEM;
405 goto out;
406 }
407
408 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
409 dest.vport_num = 0;
66958ed9 410 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 411
74491de9 412 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 413 &flow_act, &dest, 1);
3aa33572
OG
414 if (IS_ERR(flow_rule)) {
415 err = PTR_ERR(flow_rule);
416 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
417 goto out;
418 }
419
420 esw->fdb_table.offloads.miss_rule = flow_rule;
421out:
c5bb1730 422 kvfree(spec);
3aa33572
OG
423 return err;
424}
425
69697b6e 426#define MAX_PF_SQ 256
1033665e 427#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 428
c930a3ad 429static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
69697b6e
OG
430{
431 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
264d7bf3 432 int table_size, ix, esw_size, err = 0;
69697b6e
OG
433 struct mlx5_core_dev *dev = esw->dev;
434 struct mlx5_flow_namespace *root_ns;
435 struct mlx5_flow_table *fdb = NULL;
436 struct mlx5_flow_group *g;
437 u32 *flow_group_in;
438 void *match_criteria;
bbd00f7e 439 u32 flags = 0;
69697b6e
OG
440
441 flow_group_in = mlx5_vzalloc(inlen);
442 if (!flow_group_in)
443 return -ENOMEM;
444
445 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
446 if (!root_ns) {
447 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 448 err = -EOPNOTSUPP;
69697b6e
OG
449 goto ns_err;
450 }
451
264d7bf3
OG
452 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
453 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
454 MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
455
456 esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
457 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 458
bbd00f7e
HHZ
459 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
460 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
461 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
462
1033665e 463 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 464 esw_size,
c9f1b073 465 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 466 flags);
69697b6e
OG
467 if (IS_ERR(fdb)) {
468 err = PTR_ERR(fdb);
1033665e
OG
469 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
470 goto fast_fdb_err;
69697b6e
OG
471 }
472 esw->fdb_table.fdb = fdb;
473
1033665e 474 table_size = nvports + MAX_PF_SQ + 1;
c9f1b073 475 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
1033665e
OG
476 if (IS_ERR(fdb)) {
477 err = PTR_ERR(fdb);
478 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
479 goto slow_fdb_err;
480 }
481 esw->fdb_table.offloads.fdb = fdb;
482
69697b6e
OG
483 /* create send-to-vport group */
484 memset(flow_group_in, 0, inlen);
485 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
486 MLX5_MATCH_MISC_PARAMETERS);
487
488 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
489
490 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
491 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
492
493 ix = nvports + MAX_PF_SQ;
494 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
495 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
496
497 g = mlx5_create_flow_group(fdb, flow_group_in);
498 if (IS_ERR(g)) {
499 err = PTR_ERR(g);
500 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
501 goto send_vport_err;
502 }
503 esw->fdb_table.offloads.send_to_vport_grp = g;
504
505 /* create miss group */
506 memset(flow_group_in, 0, inlen);
507 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
508
509 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
510 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
511
512 g = mlx5_create_flow_group(fdb, flow_group_in);
513 if (IS_ERR(g)) {
514 err = PTR_ERR(g);
515 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
516 goto miss_err;
517 }
518 esw->fdb_table.offloads.miss_grp = g;
519
3aa33572
OG
520 err = esw_add_fdb_miss_rule(esw);
521 if (err)
522 goto miss_rule_err;
523
69697b6e
OG
524 return 0;
525
3aa33572
OG
526miss_rule_err:
527 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
528miss_err:
529 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
530send_vport_err:
1033665e
OG
531 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
532slow_fdb_err:
533 mlx5_destroy_flow_table(esw->fdb_table.fdb);
534fast_fdb_err:
69697b6e
OG
535ns_err:
536 kvfree(flow_group_in);
537 return err;
538}
539
c930a3ad 540static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
69697b6e
OG
541{
542 if (!esw->fdb_table.fdb)
543 return;
544
545 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
74491de9 546 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
69697b6e
OG
547 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
548 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
549
1033665e 550 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
69697b6e
OG
551 mlx5_destroy_flow_table(esw->fdb_table.fdb);
552}
c116c6ee
OG
553
554static int esw_create_offloads_table(struct mlx5_eswitch *esw)
555{
556 struct mlx5_flow_namespace *ns;
557 struct mlx5_flow_table *ft_offloads;
558 struct mlx5_core_dev *dev = esw->dev;
559 int err = 0;
560
561 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
562 if (!ns) {
563 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 564 return -EOPNOTSUPP;
c116c6ee
OG
565 }
566
c9f1b073 567 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
c116c6ee
OG
568 if (IS_ERR(ft_offloads)) {
569 err = PTR_ERR(ft_offloads);
570 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
571 return err;
572 }
573
574 esw->offloads.ft_offloads = ft_offloads;
575 return 0;
576}
577
578static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
579{
580 struct mlx5_esw_offload *offloads = &esw->offloads;
581
582 mlx5_destroy_flow_table(offloads->ft_offloads);
583}
fed9ce22
OG
584
585static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
586{
587 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
588 struct mlx5_flow_group *g;
589 struct mlx5_priv *priv = &esw->dev->priv;
590 u32 *flow_group_in;
591 void *match_criteria, *misc;
592 int err = 0;
593 int nvports = priv->sriov.num_vfs + 2;
594
595 flow_group_in = mlx5_vzalloc(inlen);
596 if (!flow_group_in)
597 return -ENOMEM;
598
599 /* create vport rx group */
600 memset(flow_group_in, 0, inlen);
601 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
602 MLX5_MATCH_MISC_PARAMETERS);
603
604 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
605 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
606 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
607
608 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
609 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
610
611 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
612
613 if (IS_ERR(g)) {
614 err = PTR_ERR(g);
615 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
616 goto out;
617 }
618
619 esw->offloads.vport_rx_group = g;
620out:
621 kfree(flow_group_in);
622 return err;
623}
624
625static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
626{
627 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
628}
629
74491de9 630struct mlx5_flow_handle *
fed9ce22
OG
631mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
632{
66958ed9 633 struct mlx5_flow_act flow_act = {0};
fed9ce22 634 struct mlx5_flow_destination dest;
74491de9 635 struct mlx5_flow_handle *flow_rule;
c5bb1730 636 struct mlx5_flow_spec *spec;
fed9ce22
OG
637 void *misc;
638
c5bb1730
MG
639 spec = mlx5_vzalloc(sizeof(*spec));
640 if (!spec) {
fed9ce22
OG
641 esw_warn(esw->dev, "Failed to alloc match parameters\n");
642 flow_rule = ERR_PTR(-ENOMEM);
643 goto out;
644 }
645
c5bb1730 646 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
647 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
648
c5bb1730 649 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
650 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
651
c5bb1730 652 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
653 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
654 dest.tir_num = tirn;
655
66958ed9 656 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 657 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
66958ed9 658 &flow_act, &dest, 1);
fed9ce22
OG
659 if (IS_ERR(flow_rule)) {
660 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
661 goto out;
662 }
663
664out:
c5bb1730 665 kvfree(spec);
fed9ce22
OG
666 return flow_rule;
667}
feae9087 668
c930a3ad
OG
669static int esw_offloads_start(struct mlx5_eswitch *esw)
670{
6c419ba8 671 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
672
673 if (esw->mode != SRIOV_LEGACY) {
674 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
675 return -EINVAL;
676 }
677
678 mlx5_eswitch_disable_sriov(esw);
679 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
680 if (err) {
681 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
682 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
683 if (err1)
5403dc70 684 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 685 }
bffaa916
RD
686 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
687 if (mlx5_eswitch_inline_mode_get(esw,
688 num_vfs,
689 &esw->offloads.inline_mode)) {
690 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
691 esw_warn(esw->dev, "Inline mode is different between vports\n");
692 }
693 }
c930a3ad
OG
694 return err;
695}
696
697int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
698{
cb67b832
HHZ
699 struct mlx5_eswitch_rep *rep;
700 int vport;
c930a3ad
OG
701 int err;
702
5bae8c03
OG
703 /* disable PF RoCE so missed packets don't go through RoCE steering */
704 mlx5_dev_list_lock();
705 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
706 mlx5_dev_list_unlock();
707
c930a3ad
OG
708 err = esw_create_offloads_fdb_table(esw, nvports);
709 if (err)
5bae8c03 710 goto create_fdb_err;
c930a3ad
OG
711
712 err = esw_create_offloads_table(esw);
713 if (err)
714 goto create_ft_err;
715
716 err = esw_create_vport_rx_group(esw);
717 if (err)
718 goto create_fg_err;
719
cb67b832
HHZ
720 for (vport = 0; vport < nvports; vport++) {
721 rep = &esw->offloads.vport_reps[vport];
722 if (!rep->valid)
723 continue;
724
725 err = rep->load(esw, rep);
726 if (err)
727 goto err_reps;
728 }
9da34cd3 729
c930a3ad
OG
730 return 0;
731
cb67b832
HHZ
732err_reps:
733 for (vport--; vport >= 0; vport--) {
734 rep = &esw->offloads.vport_reps[vport];
735 if (!rep->valid)
736 continue;
737 rep->unload(esw, rep);
738 }
739 esw_destroy_vport_rx_group(esw);
740
c930a3ad
OG
741create_fg_err:
742 esw_destroy_offloads_table(esw);
743
744create_ft_err:
745 esw_destroy_offloads_fdb_table(esw);
5bae8c03
OG
746
747create_fdb_err:
748 /* enable back PF RoCE */
749 mlx5_dev_list_lock();
750 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
751 mlx5_dev_list_unlock();
752
c930a3ad
OG
753 return err;
754}
755
756static int esw_offloads_stop(struct mlx5_eswitch *esw)
757{
6c419ba8 758 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
759
760 mlx5_eswitch_disable_sriov(esw);
761 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
762 if (err) {
763 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
764 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
765 if (err1)
766 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
767 }
c930a3ad 768
5bae8c03
OG
769 /* enable back PF RoCE */
770 mlx5_dev_list_lock();
771 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
772 mlx5_dev_list_unlock();
773
c930a3ad
OG
774 return err;
775}
776
777void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
778{
cb67b832
HHZ
779 struct mlx5_eswitch_rep *rep;
780 int vport;
781
782 for (vport = 0; vport < nvports; vport++) {
783 rep = &esw->offloads.vport_reps[vport];
784 if (!rep->valid)
785 continue;
786 rep->unload(esw, rep);
787 }
788
c930a3ad
OG
789 esw_destroy_vport_rx_group(esw);
790 esw_destroy_offloads_table(esw);
791 esw_destroy_offloads_fdb_table(esw);
792}
793
ef78618b 794static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
795{
796 switch (mode) {
797 case DEVLINK_ESWITCH_MODE_LEGACY:
798 *mlx5_mode = SRIOV_LEGACY;
799 break;
800 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
801 *mlx5_mode = SRIOV_OFFLOADS;
802 break;
803 default:
804 return -EINVAL;
805 }
806
807 return 0;
808}
809
ef78618b
OG
810static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
811{
812 switch (mlx5_mode) {
813 case SRIOV_LEGACY:
814 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
815 break;
816 case SRIOV_OFFLOADS:
817 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
818 break;
819 default:
820 return -EINVAL;
821 }
822
823 return 0;
824}
825
bffaa916
RD
826static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
827{
828 switch (mode) {
829 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
830 *mlx5_mode = MLX5_INLINE_MODE_NONE;
831 break;
832 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
833 *mlx5_mode = MLX5_INLINE_MODE_L2;
834 break;
835 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
836 *mlx5_mode = MLX5_INLINE_MODE_IP;
837 break;
838 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
839 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
840 break;
841 default:
842 return -EINVAL;
843 }
844
845 return 0;
846}
847
848static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
849{
850 switch (mlx5_mode) {
851 case MLX5_INLINE_MODE_NONE:
852 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
853 break;
854 case MLX5_INLINE_MODE_L2:
855 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
856 break;
857 case MLX5_INLINE_MODE_IP:
858 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
859 break;
860 case MLX5_INLINE_MODE_TCP_UDP:
861 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
862 break;
863 default:
864 return -EINVAL;
865 }
866
867 return 0;
868}
869
feae9087
OG
870int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
871{
c930a3ad
OG
872 struct mlx5_core_dev *dev;
873 u16 cur_mlx5_mode, mlx5_mode = 0;
874
875 dev = devlink_priv(devlink);
876
877 if (!MLX5_CAP_GEN(dev, vport_group_manager))
878 return -EOPNOTSUPP;
879
880 cur_mlx5_mode = dev->priv.eswitch->mode;
881
882 if (cur_mlx5_mode == SRIOV_NONE)
883 return -EOPNOTSUPP;
884
ef78618b 885 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
886 return -EINVAL;
887
888 if (cur_mlx5_mode == mlx5_mode)
889 return 0;
890
891 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
892 return esw_offloads_start(dev->priv.eswitch);
893 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
894 return esw_offloads_stop(dev->priv.eswitch);
895 else
896 return -EINVAL;
feae9087
OG
897}
898
899int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
900{
c930a3ad
OG
901 struct mlx5_core_dev *dev;
902
903 dev = devlink_priv(devlink);
904
905 if (!MLX5_CAP_GEN(dev, vport_group_manager))
906 return -EOPNOTSUPP;
907
908 if (dev->priv.eswitch->mode == SRIOV_NONE)
909 return -EOPNOTSUPP;
910
ef78618b 911 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 912}
127ea380 913
bffaa916
RD
914int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
915{
916 struct mlx5_core_dev *dev = devlink_priv(devlink);
917 struct mlx5_eswitch *esw = dev->priv.eswitch;
918 int num_vports = esw->enabled_vports;
919 int err;
920 int vport;
921 u8 mlx5_mode;
922
923 if (!MLX5_CAP_GEN(dev, vport_group_manager))
924 return -EOPNOTSUPP;
925
926 if (esw->mode == SRIOV_NONE)
927 return -EOPNOTSUPP;
928
929 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
930 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
931 return -EOPNOTSUPP;
932
375f51e2
RD
933 if (esw->offloads.num_flows > 0) {
934 esw_warn(dev, "Can't set inline mode when flows are configured\n");
935 return -EOPNOTSUPP;
936 }
937
bffaa916
RD
938 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
939 if (err)
940 goto out;
941
942 for (vport = 1; vport < num_vports; vport++) {
943 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
944 if (err) {
945 esw_warn(dev, "Failed to set min inline on vport %d\n",
946 vport);
947 goto revert_inline_mode;
948 }
949 }
950
951 esw->offloads.inline_mode = mlx5_mode;
952 return 0;
953
954revert_inline_mode:
955 while (--vport > 0)
956 mlx5_modify_nic_vport_min_inline(dev,
957 vport,
958 esw->offloads.inline_mode);
959out:
960 return err;
961}
962
963int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
964{
965 struct mlx5_core_dev *dev = devlink_priv(devlink);
966 struct mlx5_eswitch *esw = dev->priv.eswitch;
967
968 if (!MLX5_CAP_GEN(dev, vport_group_manager))
969 return -EOPNOTSUPP;
970
971 if (esw->mode == SRIOV_NONE)
972 return -EOPNOTSUPP;
973
974 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
975 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
976 return -EOPNOTSUPP;
977
978 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
979}
980
981int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
982{
983 struct mlx5_core_dev *dev = esw->dev;
984 int vport;
985 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
986
987 if (!MLX5_CAP_GEN(dev, vport_group_manager))
988 return -EOPNOTSUPP;
989
990 if (esw->mode == SRIOV_NONE)
991 return -EOPNOTSUPP;
992
993 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
994 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
995 return -EOPNOTSUPP;
996
997 for (vport = 1; vport <= nvfs; vport++) {
998 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
999 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1000 return -EINVAL;
1001 prev_mlx5_mode = mlx5_mode;
1002 }
1003
1004 *mode = mlx5_mode;
1005 return 0;
1006}
1007
127ea380 1008void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241
OG
1009 int vport_index,
1010 struct mlx5_eswitch_rep *__rep)
127ea380
HHZ
1011{
1012 struct mlx5_esw_offload *offloads = &esw->offloads;
9deb2241
OG
1013 struct mlx5_eswitch_rep *rep;
1014
1015 rep = &offloads->vport_reps[vport_index];
127ea380 1016
bac9b6aa
OG
1017 memset(rep, 0, sizeof(*rep));
1018
1019 rep->load = __rep->load;
1020 rep->unload = __rep->unload;
1021 rep->vport = __rep->vport;
726293f1 1022 rep->netdev = __rep->netdev;
bac9b6aa 1023 ether_addr_copy(rep->hw_id, __rep->hw_id);
127ea380 1024
9deb2241
OG
1025 INIT_LIST_HEAD(&rep->vport_sqs_list);
1026 rep->valid = true;
127ea380
HHZ
1027}
1028
1029void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1030 int vport_index)
127ea380
HHZ
1031{
1032 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1033 struct mlx5_eswitch_rep *rep;
1034
9deb2241 1035 rep = &offloads->vport_reps[vport_index];
cb67b832 1036
9deb2241 1037 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
cb67b832 1038 rep->unload(esw, rep);
127ea380 1039
9deb2241 1040 rep->valid = false;
127ea380 1041}
726293f1
HHZ
1042
1043struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1044{
1045#define UPLINK_REP_INDEX 0
1046 struct mlx5_esw_offload *offloads = &esw->offloads;
1047 struct mlx5_eswitch_rep *rep;
1048
1049 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1050 return rep->netdev;
1051}