net/mlx5: Put elements related to offloaded TC rule in one struct
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/fs.h>
38#include "mlx5_core.h"
39#include "eswitch.h"
40
1033665e
OG
41enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44};
45
3d80d1a2
OG
46struct mlx5_flow_rule *
47mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
776b12b6 49 struct mlx5_esw_flow_attr *attr)
3d80d1a2
OG
50{
51 struct mlx5_flow_destination dest = { 0 };
52 struct mlx5_fc *counter = NULL;
53 struct mlx5_flow_rule *rule;
54 void *misc;
776b12b6 55 int action;
3d80d1a2
OG
56
57 if (esw->mode != SRIOV_OFFLOADS)
58 return ERR_PTR(-EOPNOTSUPP);
59
776b12b6
OG
60 action = attr->action;
61
3d80d1a2
OG
62 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
63 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
776b12b6 64 dest.vport_num = attr->out_rep->vport;
3d80d1a2
OG
65 action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
66 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
67 counter = mlx5_fc_create(esw->dev, true);
68 if (IS_ERR(counter))
69 return ERR_CAST(counter);
70 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
71 dest.counter = counter;
72 }
73
74 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
776b12b6 75 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
3d80d1a2
OG
76
77 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
78 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
79
80 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
81 MLX5_MATCH_MISC_PARAMETERS;
82
83 rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb,
84 spec, action, 0, &dest);
85
86 if (IS_ERR(rule))
87 mlx5_fc_destroy(esw->dev, counter);
88
89 return rule;
90}
91
cb67b832 92static struct mlx5_flow_rule *
ab22be9b
OG
93mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
94{
95 struct mlx5_flow_destination dest;
96 struct mlx5_flow_rule *flow_rule;
c5bb1730 97 struct mlx5_flow_spec *spec;
ab22be9b
OG
98 void *misc;
99
c5bb1730
MG
100 spec = mlx5_vzalloc(sizeof(*spec));
101 if (!spec) {
ab22be9b
OG
102 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
103 flow_rule = ERR_PTR(-ENOMEM);
104 goto out;
105 }
106
c5bb1730 107 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b
OG
108 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
109 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
110
c5bb1730 111 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
112 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
113 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
114
c5bb1730 115 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b
OG
116 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
117 dest.vport_num = vport;
118
1a8ee6f2 119 flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
c5bb1730 120 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
ab22be9b
OG
121 0, &dest);
122 if (IS_ERR(flow_rule))
123 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
124out:
c5bb1730 125 kvfree(spec);
ab22be9b
OG
126 return flow_rule;
127}
128
cb67b832
HHZ
129void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
130 struct mlx5_eswitch_rep *rep)
131{
132 struct mlx5_esw_sq *esw_sq, *tmp;
133
134 if (esw->mode != SRIOV_OFFLOADS)
135 return;
136
137 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
138 mlx5_del_flow_rule(esw_sq->send_to_vport_rule);
139 list_del(&esw_sq->list);
140 kfree(esw_sq);
141 }
142}
143
144int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
145 struct mlx5_eswitch_rep *rep,
146 u16 *sqns_array, int sqns_num)
147{
148 struct mlx5_flow_rule *flow_rule;
149 struct mlx5_esw_sq *esw_sq;
cb67b832
HHZ
150 int err;
151 int i;
152
153 if (esw->mode != SRIOV_OFFLOADS)
154 return 0;
155
cb67b832
HHZ
156 for (i = 0; i < sqns_num; i++) {
157 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
158 if (!esw_sq) {
159 err = -ENOMEM;
160 goto out_err;
161 }
162
163 /* Add re-inject rule to the PF/representor sqs */
164 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
9deb2241 165 rep->vport,
cb67b832
HHZ
166 sqns_array[i]);
167 if (IS_ERR(flow_rule)) {
168 err = PTR_ERR(flow_rule);
169 kfree(esw_sq);
170 goto out_err;
171 }
172 esw_sq->send_to_vport_rule = flow_rule;
173 list_add(&esw_sq->list, &rep->vport_sqs_list);
174 }
175 return 0;
176
177out_err:
178 mlx5_eswitch_sqs2vport_stop(esw, rep);
179 return err;
180}
181
3aa33572
OG
182static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
183{
184 struct mlx5_flow_destination dest;
185 struct mlx5_flow_rule *flow_rule = NULL;
c5bb1730 186 struct mlx5_flow_spec *spec;
3aa33572
OG
187 int err = 0;
188
c5bb1730
MG
189 spec = mlx5_vzalloc(sizeof(*spec));
190 if (!spec) {
3aa33572
OG
191 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
192 err = -ENOMEM;
193 goto out;
194 }
195
196 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
197 dest.vport_num = 0;
198
1033665e 199 flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
c5bb1730
MG
200 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
201 0, &dest);
3aa33572
OG
202 if (IS_ERR(flow_rule)) {
203 err = PTR_ERR(flow_rule);
204 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
205 goto out;
206 }
207
208 esw->fdb_table.offloads.miss_rule = flow_rule;
209out:
c5bb1730 210 kvfree(spec);
3aa33572
OG
211 return err;
212}
213
69697b6e 214#define MAX_PF_SQ 256
1033665e
OG
215#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
216#define ESW_OFFLOADS_NUM_GROUPS 4
69697b6e 217
c930a3ad 218static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
69697b6e
OG
219{
220 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
221 struct mlx5_core_dev *dev = esw->dev;
222 struct mlx5_flow_namespace *root_ns;
223 struct mlx5_flow_table *fdb = NULL;
224 struct mlx5_flow_group *g;
225 u32 *flow_group_in;
226 void *match_criteria;
227 int table_size, ix, err = 0;
228
229 flow_group_in = mlx5_vzalloc(inlen);
230 if (!flow_group_in)
231 return -ENOMEM;
232
233 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
234 if (!root_ns) {
235 esw_warn(dev, "Failed to get FDB flow namespace\n");
236 goto ns_err;
237 }
238
239 esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
240 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
241
1033665e
OG
242 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
243 ESW_OFFLOADS_NUM_ENTRIES,
244 ESW_OFFLOADS_NUM_GROUPS, 0);
69697b6e
OG
245 if (IS_ERR(fdb)) {
246 err = PTR_ERR(fdb);
1033665e
OG
247 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
248 goto fast_fdb_err;
69697b6e
OG
249 }
250 esw->fdb_table.fdb = fdb;
251
1033665e
OG
252 table_size = nvports + MAX_PF_SQ + 1;
253 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0);
254 if (IS_ERR(fdb)) {
255 err = PTR_ERR(fdb);
256 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
257 goto slow_fdb_err;
258 }
259 esw->fdb_table.offloads.fdb = fdb;
260
69697b6e
OG
261 /* create send-to-vport group */
262 memset(flow_group_in, 0, inlen);
263 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
264 MLX5_MATCH_MISC_PARAMETERS);
265
266 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
267
268 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
269 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
270
271 ix = nvports + MAX_PF_SQ;
272 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
273 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
274
275 g = mlx5_create_flow_group(fdb, flow_group_in);
276 if (IS_ERR(g)) {
277 err = PTR_ERR(g);
278 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
279 goto send_vport_err;
280 }
281 esw->fdb_table.offloads.send_to_vport_grp = g;
282
283 /* create miss group */
284 memset(flow_group_in, 0, inlen);
285 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
286
287 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
288 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
289
290 g = mlx5_create_flow_group(fdb, flow_group_in);
291 if (IS_ERR(g)) {
292 err = PTR_ERR(g);
293 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
294 goto miss_err;
295 }
296 esw->fdb_table.offloads.miss_grp = g;
297
3aa33572
OG
298 err = esw_add_fdb_miss_rule(esw);
299 if (err)
300 goto miss_rule_err;
301
69697b6e
OG
302 return 0;
303
3aa33572
OG
304miss_rule_err:
305 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e
OG
306miss_err:
307 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
308send_vport_err:
1033665e
OG
309 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
310slow_fdb_err:
311 mlx5_destroy_flow_table(esw->fdb_table.fdb);
312fast_fdb_err:
69697b6e
OG
313ns_err:
314 kvfree(flow_group_in);
315 return err;
316}
317
c930a3ad 318static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
69697b6e
OG
319{
320 if (!esw->fdb_table.fdb)
321 return;
322
323 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
3aa33572 324 mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule);
69697b6e
OG
325 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
326 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
327
1033665e 328 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
69697b6e
OG
329 mlx5_destroy_flow_table(esw->fdb_table.fdb);
330}
c116c6ee
OG
331
332static int esw_create_offloads_table(struct mlx5_eswitch *esw)
333{
334 struct mlx5_flow_namespace *ns;
335 struct mlx5_flow_table *ft_offloads;
336 struct mlx5_core_dev *dev = esw->dev;
337 int err = 0;
338
339 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
340 if (!ns) {
341 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
342 return -ENOMEM;
343 }
344
345 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
346 if (IS_ERR(ft_offloads)) {
347 err = PTR_ERR(ft_offloads);
348 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
349 return err;
350 }
351
352 esw->offloads.ft_offloads = ft_offloads;
353 return 0;
354}
355
356static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
357{
358 struct mlx5_esw_offload *offloads = &esw->offloads;
359
360 mlx5_destroy_flow_table(offloads->ft_offloads);
361}
fed9ce22
OG
362
363static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
364{
365 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
366 struct mlx5_flow_group *g;
367 struct mlx5_priv *priv = &esw->dev->priv;
368 u32 *flow_group_in;
369 void *match_criteria, *misc;
370 int err = 0;
371 int nvports = priv->sriov.num_vfs + 2;
372
373 flow_group_in = mlx5_vzalloc(inlen);
374 if (!flow_group_in)
375 return -ENOMEM;
376
377 /* create vport rx group */
378 memset(flow_group_in, 0, inlen);
379 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
380 MLX5_MATCH_MISC_PARAMETERS);
381
382 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
383 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
384 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
385
386 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
387 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
388
389 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
390
391 if (IS_ERR(g)) {
392 err = PTR_ERR(g);
393 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
394 goto out;
395 }
396
397 esw->offloads.vport_rx_group = g;
398out:
399 kfree(flow_group_in);
400 return err;
401}
402
403static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
404{
405 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
406}
407
408struct mlx5_flow_rule *
409mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
410{
411 struct mlx5_flow_destination dest;
412 struct mlx5_flow_rule *flow_rule;
c5bb1730 413 struct mlx5_flow_spec *spec;
fed9ce22
OG
414 void *misc;
415
c5bb1730
MG
416 spec = mlx5_vzalloc(sizeof(*spec));
417 if (!spec) {
fed9ce22
OG
418 esw_warn(esw->dev, "Failed to alloc match parameters\n");
419 flow_rule = ERR_PTR(-ENOMEM);
420 goto out;
421 }
422
c5bb1730 423 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
fed9ce22
OG
424 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
425
c5bb1730 426 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
fed9ce22
OG
427 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
428
c5bb1730 429 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
fed9ce22
OG
430 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
431 dest.tir_num = tirn;
432
c5bb1730
MG
433 flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec,
434 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
fed9ce22
OG
435 0, &dest);
436 if (IS_ERR(flow_rule)) {
437 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
438 goto out;
439 }
440
441out:
c5bb1730 442 kvfree(spec);
fed9ce22
OG
443 return flow_rule;
444}
feae9087 445
c930a3ad
OG
446static int esw_offloads_start(struct mlx5_eswitch *esw)
447{
6c419ba8 448 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
449
450 if (esw->mode != SRIOV_LEGACY) {
451 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
452 return -EINVAL;
453 }
454
455 mlx5_eswitch_disable_sriov(esw);
456 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
6c419ba8
OG
457 if (err) {
458 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
459 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
460 if (err1)
461 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
462 }
c930a3ad
OG
463 return err;
464}
465
466int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
467{
cb67b832
HHZ
468 struct mlx5_eswitch_rep *rep;
469 int vport;
c930a3ad
OG
470 int err;
471
472 err = esw_create_offloads_fdb_table(esw, nvports);
473 if (err)
474 return err;
475
476 err = esw_create_offloads_table(esw);
477 if (err)
478 goto create_ft_err;
479
480 err = esw_create_vport_rx_group(esw);
481 if (err)
482 goto create_fg_err;
483
cb67b832
HHZ
484 for (vport = 0; vport < nvports; vport++) {
485 rep = &esw->offloads.vport_reps[vport];
486 if (!rep->valid)
487 continue;
488
489 err = rep->load(esw, rep);
490 if (err)
491 goto err_reps;
492 }
c930a3ad
OG
493 return 0;
494
cb67b832
HHZ
495err_reps:
496 for (vport--; vport >= 0; vport--) {
497 rep = &esw->offloads.vport_reps[vport];
498 if (!rep->valid)
499 continue;
500 rep->unload(esw, rep);
501 }
502 esw_destroy_vport_rx_group(esw);
503
c930a3ad
OG
504create_fg_err:
505 esw_destroy_offloads_table(esw);
506
507create_ft_err:
508 esw_destroy_offloads_fdb_table(esw);
509 return err;
510}
511
512static int esw_offloads_stop(struct mlx5_eswitch *esw)
513{
6c419ba8 514 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
c930a3ad
OG
515
516 mlx5_eswitch_disable_sriov(esw);
517 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
6c419ba8
OG
518 if (err) {
519 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
520 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
521 if (err1)
522 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
523 }
c930a3ad
OG
524
525 return err;
526}
527
528void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
529{
cb67b832
HHZ
530 struct mlx5_eswitch_rep *rep;
531 int vport;
532
533 for (vport = 0; vport < nvports; vport++) {
534 rep = &esw->offloads.vport_reps[vport];
535 if (!rep->valid)
536 continue;
537 rep->unload(esw, rep);
538 }
539
c930a3ad
OG
540 esw_destroy_vport_rx_group(esw);
541 esw_destroy_offloads_table(esw);
542 esw_destroy_offloads_fdb_table(esw);
543}
544
ef78618b 545static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
546{
547 switch (mode) {
548 case DEVLINK_ESWITCH_MODE_LEGACY:
549 *mlx5_mode = SRIOV_LEGACY;
550 break;
551 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
552 *mlx5_mode = SRIOV_OFFLOADS;
553 break;
554 default:
555 return -EINVAL;
556 }
557
558 return 0;
559}
560
ef78618b
OG
561static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
562{
563 switch (mlx5_mode) {
564 case SRIOV_LEGACY:
565 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
566 break;
567 case SRIOV_OFFLOADS:
568 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
569 break;
570 default:
571 return -EINVAL;
572 }
573
574 return 0;
575}
576
feae9087
OG
577int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
578{
c930a3ad
OG
579 struct mlx5_core_dev *dev;
580 u16 cur_mlx5_mode, mlx5_mode = 0;
581
582 dev = devlink_priv(devlink);
583
584 if (!MLX5_CAP_GEN(dev, vport_group_manager))
585 return -EOPNOTSUPP;
586
587 cur_mlx5_mode = dev->priv.eswitch->mode;
588
589 if (cur_mlx5_mode == SRIOV_NONE)
590 return -EOPNOTSUPP;
591
ef78618b 592 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
593 return -EINVAL;
594
595 if (cur_mlx5_mode == mlx5_mode)
596 return 0;
597
598 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
599 return esw_offloads_start(dev->priv.eswitch);
600 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
601 return esw_offloads_stop(dev->priv.eswitch);
602 else
603 return -EINVAL;
feae9087
OG
604}
605
606int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
607{
c930a3ad
OG
608 struct mlx5_core_dev *dev;
609
610 dev = devlink_priv(devlink);
611
612 if (!MLX5_CAP_GEN(dev, vport_group_manager))
613 return -EOPNOTSUPP;
614
615 if (dev->priv.eswitch->mode == SRIOV_NONE)
616 return -EOPNOTSUPP;
617
ef78618b 618 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
feae9087 619}
127ea380
HHZ
620
621void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
9deb2241
OG
622 int vport_index,
623 struct mlx5_eswitch_rep *__rep)
127ea380
HHZ
624{
625 struct mlx5_esw_offload *offloads = &esw->offloads;
9deb2241
OG
626 struct mlx5_eswitch_rep *rep;
627
628 rep = &offloads->vport_reps[vport_index];
127ea380 629
bac9b6aa
OG
630 memset(rep, 0, sizeof(*rep));
631
632 rep->load = __rep->load;
633 rep->unload = __rep->unload;
634 rep->vport = __rep->vport;
635 rep->priv_data = __rep->priv_data;
636 ether_addr_copy(rep->hw_id, __rep->hw_id);
127ea380 637
9deb2241
OG
638 INIT_LIST_HEAD(&rep->vport_sqs_list);
639 rep->valid = true;
127ea380
HHZ
640}
641
642void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
9deb2241 643 int vport_index)
127ea380
HHZ
644{
645 struct mlx5_esw_offload *offloads = &esw->offloads;
cb67b832
HHZ
646 struct mlx5_eswitch_rep *rep;
647
9deb2241 648 rep = &offloads->vport_reps[vport_index];
cb67b832 649
9deb2241 650 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
cb67b832 651 rep->unload(esw, rep);
127ea380 652
9deb2241 653 rep->valid = false;
127ea380 654}