net/mlx5: Add IPoIB enhanced offloads bits to mlx5_ifc
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
74491de9 46struct mlx5_flow_handle *
3d80d1a2
OG
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2 50{
e37a79e5 51 struct mlx5_flow_destination dest[2] = {};
66958ed9 52 struct mlx5_flow_act flow_act = {0};
3d80d1a2 53 struct mlx5_fc *counter = NULL;
74491de9 54 struct mlx5_flow_handle *rule;
3d80d1a2 55 void *misc;
e37a79e5 56 int i = 0;
3d80d1a2
OG
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
ee39fbc4 61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
bb598c1b 62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
776b12b6 63
66958ed9 64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
e37a79e5
MB
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
67 i++;
68 }
66958ed9 69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3d80d1a2 70 counter = mlx5_fc_create(esw->dev, true);
aa0cbbae
OG
71 if (IS_ERR(counter)) {
72 rule = ERR_CAST(counter);
73 goto err_counter_alloc;
74 }
e37a79e5
MB
75 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76 dest[i].counter = counter;
77 i++;
3d80d1a2
OG
78 }
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 81 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
82
83 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
84 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
85
86 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
87 MLX5_MATCH_MISC_PARAMETERS;
bbd00f7e
HHZ
88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
89 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 90
d7e75a32
OG
91 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
92 flow_act.modify_id = attr->mod_hdr_id;
93
94 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
a54e20b4
HHZ
95 flow_act.encap_id = attr->encap->encap_id;
96
74491de9 97 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
66958ed9 98 spec, &flow_act, dest, i);
3d80d1a2 99 if (IS_ERR(rule))
aa0cbbae 100 goto err_add_rule;
375f51e2
RD
101 else
102 esw->offloads.num_flows++;
3d80d1a2
OG
103
104 return rule;
aa0cbbae
OG
105
106err_add_rule:
107 mlx5_fc_destroy(esw->dev, counter);
108err_counter_alloc:
109 return rule;
3d80d1a2
OG
110}
111
d85cdccb
OG
112void
113mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
114 struct mlx5_flow_handle *rule,
115 struct mlx5_esw_flow_attr *attr)
116{
117 struct mlx5_fc *counter = NULL;
118
aa0cbbae
OG
119 counter = mlx5_flow_rule_counter(rule);
120 mlx5_del_flow_rules(rule);
121 mlx5_fc_destroy(esw->dev, counter);
122 esw->offloads.num_flows--;
d85cdccb
OG
123}
124
f5f82476
OG
125static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
126{
127 struct mlx5_eswitch_rep *rep;
128 int vf_vport, err = 0;
129
130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
132 rep = &esw->offloads.vport_reps[vf_vport];
133 if (!rep->valid)
134 continue;
135
136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
137 if (err)
138 goto out;
139 }
140
141out:
142 return err;
143}
144
145static struct mlx5_eswitch_rep *
146esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
147{
148 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
149
150 in_rep = attr->in_rep;
151 out_rep = attr->out_rep;
152
153 if (push)
154 vport = in_rep;
155 else if (pop)
156 vport = out_rep;
157 else
158 vport = in_rep;
159
160 return vport;
161}
162
163static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
164 bool push, bool pop, bool fwd)
165{
166 struct mlx5_eswitch_rep *in_rep, *out_rep;
167
168 if ((push || pop) && !fwd)
169 goto out_notsupp;
170
171 in_rep = attr->in_rep;
172 out_rep = attr->out_rep;
173
174 if (push && in_rep->vport == FDB_UPLINK_VPORT)
175 goto out_notsupp;
176
177 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
178 goto out_notsupp;
179
180 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
181 if (!push && !pop && fwd)
182 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
183 goto out_notsupp;
184
185 /* protects against (1) setting rules with different vlans to push and
186 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
187 */
188 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
189 goto out_notsupp;
190
191 return 0;
192
193out_notsupp:
9eb78923 194 return -EOPNOTSUPP;
f5f82476
OG
195}
196
197int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
198 struct mlx5_esw_flow_attr *attr)
199{
200 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
201 struct mlx5_eswitch_rep *vport = NULL;
202 bool push, pop, fwd;
203 int err = 0;
204
205 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
206 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
207 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
208
209 err = esw_add_vlan_action_check(attr, push, pop, fwd);
210 if (err)
211 return err;
212
213 attr->vlan_handled = false;
214
215 vport = esw_vlan_action_get_vport(attr, push, pop);
216
217 if (!push && !pop && fwd) {
218 /* tracks VF --> wire rules without vlan push action */
219 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
220 vport->vlan_refcount++;
221 attr->vlan_handled = true;
222 }
223
224 return 0;
225 }
226
227 if (!push && !pop)
228 return 0;
229
230 if (!(offloads->vlan_push_pop_refcount)) {
231 /* it's the 1st vlan rule, apply global vlan pop policy */
232 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
233 if (err)
234 goto out;
235 }
236 offloads->vlan_push_pop_refcount++;
237
238 if (push) {
239 if (vport->vlan_refcount)
240 goto skip_set_push;
241
242 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
243 SET_VLAN_INSERT | SET_VLAN_STRIP);
244 if (err)
245 goto out;
246 vport->vlan = attr->vlan;
247skip_set_push:
248 vport->vlan_refcount++;
249 }
250out:
251 if (!err)
252 attr->vlan_handled = true;
253 return err;
254}
255
256int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
257 struct mlx5_esw_flow_attr *attr)
258{
259 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
260 struct mlx5_eswitch_rep *vport = NULL;
261 bool push, pop, fwd;
262 int err = 0;
263
264 if (!attr->vlan_handled)
265 return 0;
266
267 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
268 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
269 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
270
271 vport = esw_vlan_action_get_vport(attr, push, pop);
272
273 if (!push && !pop && fwd) {
274 /* tracks VF --> wire rules without vlan push action */
275 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
276 vport->vlan_refcount--;
277
278 return 0;
279 }
280
281 if (push) {
282 vport->vlan_refcount--;
283 if (vport->vlan_refcount)
284 goto skip_unset_push;
285
286 vport->vlan = 0;
287 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
288 0, 0, SET_VLAN_STRIP);
289 if (err)
290 goto out;
291 }
292
293skip_unset_push:
294 offloads->vlan_push_pop_refcount--;
295 if (offloads->vlan_push_pop_refcount)
296 return 0;
297
298 /* no more vlan rules, stop global vlan pop policy */
299 err = esw_set_global_vlan_pop(esw, 0);
300
301out:
302 return err;
303}
304
74491de9 305static struct mlx5_flow_handle *
ab22be9b
OG
306mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
307{
66958ed9 308 struct mlx5_flow_act flow_act = {0};
ab22be9b 309 struct mlx5_flow_destination dest;
74491de9 310 struct mlx5_flow_handle *flow_rule;
c5bb1730 311 struct mlx5_flow_spec *spec;
ab22be9b
OG
312 void *misc;
313
c5bb1730
MG
314 spec = mlx5_vzalloc(sizeof(*spec));
315 if (!spec) {
ab22be9b
OG
316 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
317 flow_rule = ERR_PTR(-ENOMEM);
318 goto out;
319 }
320
c5bb1730 321 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
322 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
323 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
324
c5bb1730 325 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
326 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
327 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
328
c5bb1730 329 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
330 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
331 dest.vport_num = vport;
66958ed9 332 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 333
74491de9 334 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 335 &flow_act, &dest, 1);
ab22be9b
OG
336 if (IS_ERR(flow_rule))
337 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
338out:
c5bb1730 339 kvfree(spec);
ab22be9b
OG
340 return flow_rule;
341}
342
cb67b832
HHZ
343void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
344 struct mlx5_eswitch_rep *rep)
345{
346 struct mlx5_esw_sq *esw_sq, *tmp;
347
348 if (esw->mode != SRIOV_OFFLOADS)
349 return;
350
351 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
74491de9 352 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
cb67b832
HHZ
353 list_del(&esw_sq->list);
354 kfree(esw_sq);
355 }
356}
357
358int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
359 struct mlx5_eswitch_rep *rep,
360 u16 *sqns_array, int sqns_num)
361{
74491de9 362 struct mlx5_flow_handle *flow_rule;
cb67b832 363 struct mlx5_esw_sq *esw_sq;
cb67b832
HHZ
364 int err;
365 int i;
366
367 if (esw->mode != SRIOV_OFFLOADS)
368 return 0;
369
cb67b832
HHZ
370 for (i = 0; i < sqns_num; i++) {
371 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
372 if (!esw_sq) {
373 err = -ENOMEM;
374 goto out_err;
375 }
376
377 /* Add re-inject rule to the PF/representor sqs */
378 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
9deb2241 379 rep->vport,
cb67b832
HHZ
380 sqns_array[i]);
381 if (IS_ERR(flow_rule)) {
382 err = PTR_ERR(flow_rule);
383 kfree(esw_sq);
384 goto out_err;
385 }
386 esw_sq->send_to_vport_rule = flow_rule;
387 list_add(&esw_sq->list, &rep->vport_sqs_list);
388 }
389 return 0;
390
391out_err:
392 mlx5_eswitch_sqs2vport_stop(esw, rep);
393 return err;
394}
395
3aa33572
OG
396static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
397{
66958ed9 398 struct mlx5_flow_act flow_act = {0};
3aa33572 399 struct mlx5_flow_destination dest;
74491de9 400 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 401 struct mlx5_flow_spec *spec;
3aa33572
OG
402 int err = 0;
403
c5bb1730
MG
404 spec = mlx5_vzalloc(sizeof(*spec));
405 if (!spec) {
3aa33572
OG
406 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
407 err = -ENOMEM;
408 goto out;
409 }
410
411 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
412 dest.vport_num = 0;
66958ed9 413 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 414
74491de9 415 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
66958ed9 416 &flow_act, &dest, 1);
3aa33572
OG
417 if (IS_ERR(flow_rule)) {
418 err = PTR_ERR(flow_rule);
419 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
420 goto out;
421 }
422
423 esw->fdb_table.offloads.miss_rule = flow_rule;
424out:
c5bb1730 425 kvfree(spec);
3aa33572
OG
426 return err;
427}
428
69697b6e 429#define MAX_PF_SQ 256
1033665e 430#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 431
c930a3ad 432static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
69697b6e
OG
433{
434 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
264d7bf3 435 int table_size, ix, esw_size, err = 0;
69697b6e
OG
436 struct mlx5_core_dev *dev = esw->dev;
437 struct mlx5_flow_namespace *root_ns;
438 struct mlx5_flow_table *fdb = NULL;
439 struct mlx5_flow_group *g;
440 u32 *flow_group_in;
441 void *match_criteria;
bbd00f7e 442 u32 flags = 0;
69697b6e
OG
443
444 flow_group_in = mlx5_vzalloc(inlen);
445 if (!flow_group_in)
446 return -ENOMEM;
447
448 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
449 if (!root_ns) {
450 esw_warn(dev, "Failed to get FDB flow namespace\n");
5403dc70 451 err = -EOPNOTSUPP;
69697b6e
OG
452 goto ns_err;
453 }
454
264d7bf3
OG
455 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
456 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
457 MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
458
459 esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
460 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
69697b6e 461
bbd00f7e
HHZ
462 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
463 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
464 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
465
1033665e 466 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
264d7bf3 467 esw_size,
c9f1b073 468 ESW_OFFLOADS_NUM_GROUPS, 0,
bbd00f7e 469 flags);
69697b6e
OG
470 if (IS_ERR(fdb)) {
471 err = PTR_ERR(fdb);
1033665e
OG
472 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
473 goto fast_fdb_err;
69697b6e
OG
474 }
475 esw->fdb_table.fdb = fdb;
476
1033665e 477 table_size = nvports + MAX_PF_SQ + 1;
c9f1b073 478 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
1033665e
OG
479 if (IS_ERR(fdb)) {
480 err = PTR_ERR(fdb);
481 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
482 goto slow_fdb_err;
483 }
484 esw->fdb_table.offloads.fdb = fdb;
485
69697b6e
OG
486 /* create send-to-vport group */
487 memset(flow_group_in, 0, inlen);
488 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
489 MLX5_MATCH_MISC_PARAMETERS);
490
491 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
492
493 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
494 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
495
496 ix = nvports + MAX_PF_SQ;
497 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
498 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
499
500 g = mlx5_create_flow_group(fdb, flow_group_in);
501 if (IS_ERR(g)) {
502 err = PTR_ERR(g);
503 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
504 goto send_vport_err;
505 }
506 esw->fdb_table.offloads.send_to_vport_grp = g;
507
508 /* create miss group */
509 memset(flow_group_in, 0, inlen);
510 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
511
512 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
513 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
514
515 g = mlx5_create_flow_group(fdb, flow_group_in);
516 if (IS_ERR(g)) {
517 err = PTR_ERR(g);
518 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
519 goto miss_err;
520 }
521 esw->fdb_table.offloads.miss_grp = g;
522
3aa33572
OG
523 err = esw_add_fdb_miss_rule(esw);
524 if (err)
525 goto miss_rule_err;
526
69697b6e
OG
527 return 0;
528
3aa33572
OG
529miss_rule_err:
530 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
531miss_err:
532 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
533send_vport_err:
1033665e
OG
534 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
535slow_fdb_err:
536 mlx5_destroy_flow_table(esw->fdb_table.fdb);
537fast_fdb_err:
69697b6e
OG
538ns_err:
539 kvfree(flow_group_in);
540 return err;
541}
542
c930a3ad 543static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
69697b6e
OG
544{
545 if (!esw->fdb_table.fdb)
546 return;
547
548 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
74491de9 549 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
69697b6e
OG
550 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
551 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
552
1033665e 553 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
69697b6e
OG
554 mlx5_destroy_flow_table(esw->fdb_table.fdb);
555}
c116c6ee
OG
556
557static int esw_create_offloads_table(struct mlx5_eswitch *esw)
558{
559 struct mlx5_flow_namespace *ns;
560 struct mlx5_flow_table *ft_offloads;
561 struct mlx5_core_dev *dev = esw->dev;
562 int err = 0;
563
564 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
565 if (!ns) {
566 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 567 return -EOPNOTSUPP;
c116c6ee
OG
568 }
569
c9f1b073 570 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
c116c6ee
OG
571 if (IS_ERR(ft_offloads)) {
572 err = PTR_ERR(ft_offloads);
573 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
574 return err;
575 }
576
577 esw->offloads.ft_offloads = ft_offloads;
578 return 0;
579}
580
581static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
582{
583 struct mlx5_esw_offload *offloads = &esw->offloads;
584
585 mlx5_destroy_flow_table(offloads->ft_offloads);
586}
fed9ce22
OG
587
588static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
589{
590 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
591 struct mlx5_flow_group *g;
592 struct mlx5_priv *priv = &esw->dev->priv;
593 u32 *flow_group_in;
594 void *match_criteria, *misc;
595 int err = 0;
596 int nvports = priv->sriov.num_vfs + 2;
597
598 flow_group_in = mlx5_vzalloc(inlen);
599 if (!flow_group_in)
600 return -ENOMEM;
601
602 /* create vport rx group */
603 memset(flow_group_in, 0, inlen);
604 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
605 MLX5_MATCH_MISC_PARAMETERS);
606
607 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
608 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
609 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
610
611 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
612 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
613
614 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
615
616 if (IS_ERR(g)) {
617 err = PTR_ERR(g);
618 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
619 goto out;
620 }
621
622 esw->offloads.vport_rx_group = g;
623out:
624 kfree(flow_group_in);
625 return err;
626}
627
628static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
629{
630 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
631}
632
74491de9 633struct mlx5_flow_handle *
fed9ce22
OG
634mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
635{
66958ed9 636 struct mlx5_flow_act flow_act = {0};
fed9ce22 637 struct mlx5_flow_destination dest;
74491de9 638 struct mlx5_flow_handle *flow_rule;
c5bb1730 639 struct mlx5_flow_spec *spec;
fed9ce22
OG
640 void *misc;
641
c5bb1730
MG
642 spec = mlx5_vzalloc(sizeof(*spec));
643 if (!spec) {
fed9ce22
OG
644 esw_warn(esw->dev, "Failed to alloc match parameters\n");
645 flow_rule = ERR_PTR(-ENOMEM);
646 goto out;
647 }
648
c5bb1730 649 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
650 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
651
c5bb1730 652 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
653 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
654
c5bb1730 655 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
656 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
657 dest.tir_num = tirn;
658
66958ed9 659 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 660 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
66958ed9 661 &flow_act, &dest, 1);
fed9ce22
OG
662 if (IS_ERR(flow_rule)) {
663 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
664 goto out;
665 }
666
667out:
c5bb1730 668 kvfree(spec);
fed9ce22
OG
669 return flow_rule;
670}
feae9087 671
c930a3ad
OG
672static int esw_offloads_start(struct mlx5_eswitch *esw)
673{
6c419ba8 674 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
675
676 if (esw->mode != SRIOV_LEGACY) {
677 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
678 return -EINVAL;
679 }
680
681 mlx5_eswitch_disable_sriov(esw);
682 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
683 if (err) {
684 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
685 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
686 if (err1)
5403dc70 687 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
6c419ba8 688 }
bffaa916
RD
689 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
690 if (mlx5_eswitch_inline_mode_get(esw,
691 num_vfs,
692 &esw->offloads.inline_mode)) {
693 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
694 esw_warn(esw->dev, "Inline mode is different between vports\n");
695 }
696 }
c930a3ad
OG
697 return err;
698}
699
700int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
701{
cb67b832
HHZ
702 struct mlx5_eswitch_rep *rep;
703 int vport;
c930a3ad
OG
704 int err;
705
5bae8c03
OG
706 /* disable PF RoCE so missed packets don't go through RoCE steering */
707 mlx5_dev_list_lock();
708 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
709 mlx5_dev_list_unlock();
710
c930a3ad
OG
711 err = esw_create_offloads_fdb_table(esw, nvports);
712 if (err)
5bae8c03 713 goto create_fdb_err;
c930a3ad
OG
714
715 err = esw_create_offloads_table(esw);
716 if (err)
717 goto create_ft_err;
718
719 err = esw_create_vport_rx_group(esw);
720 if (err)
721 goto create_fg_err;
722
cb67b832
HHZ
723 for (vport = 0; vport < nvports; vport++) {
724 rep = &esw->offloads.vport_reps[vport];
725 if (!rep->valid)
726 continue;
727
728 err = rep->load(esw, rep);
729 if (err)
730 goto err_reps;
731 }
9da34cd3 732
c930a3ad
OG
733 return 0;
734
cb67b832
HHZ
735err_reps:
736 for (vport--; vport >= 0; vport--) {
737 rep = &esw->offloads.vport_reps[vport];
738 if (!rep->valid)
739 continue;
740 rep->unload(esw, rep);
741 }
742 esw_destroy_vport_rx_group(esw);
743
c930a3ad
OG
744create_fg_err:
745 esw_destroy_offloads_table(esw);
746
747create_ft_err:
748 esw_destroy_offloads_fdb_table(esw);
5bae8c03
OG
749
750create_fdb_err:
751 /* enable back PF RoCE */
752 mlx5_dev_list_lock();
753 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
754 mlx5_dev_list_unlock();
755
c930a3ad
OG
756 return err;
757}
758
759static int esw_offloads_stop(struct mlx5_eswitch *esw)
760{
6c419ba8 761 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
762
763 mlx5_eswitch_disable_sriov(esw);
764 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
765 if (err) {
766 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
767 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
768 if (err1)
769 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
770 }
c930a3ad 771
5bae8c03
OG
772 /* enable back PF RoCE */
773 mlx5_dev_list_lock();
774 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
775 mlx5_dev_list_unlock();
776
c930a3ad
OG
777 return err;
778}
779
780void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
781{
cb67b832
HHZ
782 struct mlx5_eswitch_rep *rep;
783 int vport;
784
785 for (vport = 0; vport < nvports; vport++) {
786 rep = &esw->offloads.vport_reps[vport];
787 if (!rep->valid)
788 continue;
789 rep->unload(esw, rep);
790 }
791
c930a3ad
OG
792 esw_destroy_vport_rx_group(esw);
793 esw_destroy_offloads_table(esw);
794 esw_destroy_offloads_fdb_table(esw);
795}
796
ef78618b 797static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
798{
799 switch (mode) {
800 case DEVLINK_ESWITCH_MODE_LEGACY:
801 *mlx5_mode = SRIOV_LEGACY;
802 break;
803 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
804 *mlx5_mode = SRIOV_OFFLOADS;
805 break;
806 default:
807 return -EINVAL;
808 }
809
810 return 0;
811}
812
ef78618b
OG
813static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
814{
815 switch (mlx5_mode) {
816 case SRIOV_LEGACY:
817 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
818 break;
819 case SRIOV_OFFLOADS:
820 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
821 break;
822 default:
823 return -EINVAL;
824 }
825
826 return 0;
827}
828
bffaa916
RD
829static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
830{
831 switch (mode) {
832 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
833 *mlx5_mode = MLX5_INLINE_MODE_NONE;
834 break;
835 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
836 *mlx5_mode = MLX5_INLINE_MODE_L2;
837 break;
838 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
839 *mlx5_mode = MLX5_INLINE_MODE_IP;
840 break;
841 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
842 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
843 break;
844 default:
845 return -EINVAL;
846 }
847
848 return 0;
849}
850
851static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
852{
853 switch (mlx5_mode) {
854 case MLX5_INLINE_MODE_NONE:
855 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
856 break;
857 case MLX5_INLINE_MODE_L2:
858 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
859 break;
860 case MLX5_INLINE_MODE_IP:
861 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
862 break;
863 case MLX5_INLINE_MODE_TCP_UDP:
864 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
865 break;
866 default:
867 return -EINVAL;
868 }
869
870 return 0;
871}
872
feae9087
OG
873int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
874{
c930a3ad
OG
875 struct mlx5_core_dev *dev;
876 u16 cur_mlx5_mode, mlx5_mode = 0;
877
878 dev = devlink_priv(devlink);
879
880 if (!MLX5_CAP_GEN(dev, vport_group_manager))
881 return -EOPNOTSUPP;
882
883 cur_mlx5_mode = dev->priv.eswitch->mode;
884
885 if (cur_mlx5_mode == SRIOV_NONE)
886 return -EOPNOTSUPP;
887
ef78618b 888 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
889 return -EINVAL;
890
891 if (cur_mlx5_mode == mlx5_mode)
892 return 0;
893
894 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
895 return esw_offloads_start(dev->priv.eswitch);
896 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
897 return esw_offloads_stop(dev->priv.eswitch);
898 else
899 return -EINVAL;
feae9087
OG
900}
901
902int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
903{
c930a3ad
OG
904 struct mlx5_core_dev *dev;
905
906 dev = devlink_priv(devlink);
907
908 if (!MLX5_CAP_GEN(dev, vport_group_manager))
909 return -EOPNOTSUPP;
910
911 if (dev->priv.eswitch->mode == SRIOV_NONE)
912 return -EOPNOTSUPP;
913
ef78618b 914 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 915}
127ea380 916
bffaa916
RD
917int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
918{
919 struct mlx5_core_dev *dev = devlink_priv(devlink);
920 struct mlx5_eswitch *esw = dev->priv.eswitch;
921 int num_vports = esw->enabled_vports;
922 int err;
923 int vport;
924 u8 mlx5_mode;
925
926 if (!MLX5_CAP_GEN(dev, vport_group_manager))
927 return -EOPNOTSUPP;
928
929 if (esw->mode == SRIOV_NONE)
930 return -EOPNOTSUPP;
931
932 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
933 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
934 return -EOPNOTSUPP;
935
375f51e2
RD
936 if (esw->offloads.num_flows > 0) {
937 esw_warn(dev, "Can't set inline mode when flows are configured\n");
938 return -EOPNOTSUPP;
939 }
940
bffaa916
RD
941 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
942 if (err)
943 goto out;
944
945 for (vport = 1; vport < num_vports; vport++) {
946 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
947 if (err) {
948 esw_warn(dev, "Failed to set min inline on vport %d\n",
949 vport);
950 goto revert_inline_mode;
951 }
952 }
953
954 esw->offloads.inline_mode = mlx5_mode;
955 return 0;
956
957revert_inline_mode:
958 while (--vport > 0)
959 mlx5_modify_nic_vport_min_inline(dev,
960 vport,
961 esw->offloads.inline_mode);
962out:
963 return err;
964}
965
966int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
967{
968 struct mlx5_core_dev *dev = devlink_priv(devlink);
969 struct mlx5_eswitch *esw = dev->priv.eswitch;
970
971 if (!MLX5_CAP_GEN(dev, vport_group_manager))
972 return -EOPNOTSUPP;
973
974 if (esw->mode == SRIOV_NONE)
975 return -EOPNOTSUPP;
976
977 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
978 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
979 return -EOPNOTSUPP;
980
981 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
982}
983
984int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
985{
986 struct mlx5_core_dev *dev = esw->dev;
987 int vport;
988 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
989
990 if (!MLX5_CAP_GEN(dev, vport_group_manager))
991 return -EOPNOTSUPP;
992
993 if (esw->mode == SRIOV_NONE)
994 return -EOPNOTSUPP;
995
996 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
997 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
998 return -EOPNOTSUPP;
999
1000 for (vport = 1; vport <= nvfs; vport++) {
1001 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1002 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1003 return -EINVAL;
1004 prev_mlx5_mode = mlx5_mode;
1005 }
1006
1007 *mode = mlx5_mode;
1008 return 0;
1009}
1010
127ea380 1011void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241
OG
1012 int vport_index,
1013 struct mlx5_eswitch_rep *__rep)
127ea380
HHZ
1014{
1015 struct mlx5_esw_offload *offloads = &esw->offloads;
9deb2241
OG
1016 struct mlx5_eswitch_rep *rep;
1017
1018 rep = &offloads->vport_reps[vport_index];
127ea380 1019
bac9b6aa
OG
1020 memset(rep, 0, sizeof(*rep));
1021
1022 rep->load = __rep->load;
1023 rep->unload = __rep->unload;
1024 rep->vport = __rep->vport;
726293f1 1025 rep->netdev = __rep->netdev;
bac9b6aa 1026 ether_addr_copy(rep->hw_id, __rep->hw_id);
127ea380 1027
9deb2241
OG
1028 INIT_LIST_HEAD(&rep->vport_sqs_list);
1029 rep->valid = true;
127ea380
HHZ
1030}
1031
1032void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
9deb2241 1033 int vport_index)
127ea380
HHZ
1034{
1035 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
1036 struct mlx5_eswitch_rep *rep;
1037
9deb2241 1038 rep = &offloads->vport_reps[vport_index];
cb67b832 1039
9deb2241 1040 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
cb67b832 1041 rep->unload(esw, rep);
127ea380 1042
9deb2241 1043 rep->valid = false;
127ea380 1044}
726293f1
HHZ
1045
1046struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1047{
1048#define UPLINK_REP_INDEX 0
1049 struct mlx5_esw_offload *offloads = &esw->offloads;
1050 struct mlx5_eswitch_rep *rep;
1051
1052 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1053 return rep->netdev;
1054}