Commit | Line | Data |
---|---|---|
69697b6e OG |
1 | /* |
2 | * Copyright (c) 2016, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/etherdevice.h> | |
34 | #include <linux/mlx5/driver.h> | |
35 | #include <linux/mlx5/mlx5_ifc.h> | |
36 | #include <linux/mlx5/vport.h> | |
37 | #include <linux/mlx5/fs.h> | |
38 | #include "mlx5_core.h" | |
39 | #include "eswitch.h" | |
40 | ||
1033665e OG |
41 | enum { |
42 | FDB_FAST_PATH = 0, | |
43 | FDB_SLOW_PATH | |
44 | }; | |
45 | ||
74491de9 | 46 | struct mlx5_flow_handle * |
3d80d1a2 OG |
47 | mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, |
48 | struct mlx5_flow_spec *spec, | |
776b12b6 | 49 | struct mlx5_esw_flow_attr *attr) |
3d80d1a2 OG |
50 | { |
51 | struct mlx5_flow_destination dest = { 0 }; | |
52 | struct mlx5_fc *counter = NULL; | |
74491de9 | 53 | struct mlx5_flow_handle *rule; |
3d80d1a2 | 54 | void *misc; |
776b12b6 | 55 | int action; |
3d80d1a2 OG |
56 | |
57 | if (esw->mode != SRIOV_OFFLOADS) | |
58 | return ERR_PTR(-EOPNOTSUPP); | |
59 | ||
776b12b6 OG |
60 | action = attr->action; |
61 | ||
3d80d1a2 OG |
62 | if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { |
63 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | |
776b12b6 | 64 | dest.vport_num = attr->out_rep->vport; |
3d80d1a2 OG |
65 | action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
66 | } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { | |
67 | counter = mlx5_fc_create(esw->dev, true); | |
68 | if (IS_ERR(counter)) | |
69 | return ERR_CAST(counter); | |
70 | dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; | |
71 | dest.counter = counter; | |
72 | } | |
73 | ||
74 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); | |
776b12b6 | 75 | MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); |
3d80d1a2 OG |
76 | |
77 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); | |
78 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); | |
79 | ||
80 | spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | | |
81 | MLX5_MATCH_MISC_PARAMETERS; | |
82 | ||
74491de9 MB |
83 | rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, |
84 | spec, action, 0, &dest, 1); | |
3d80d1a2 OG |
85 | |
86 | if (IS_ERR(rule)) | |
87 | mlx5_fc_destroy(esw->dev, counter); | |
88 | ||
89 | return rule; | |
90 | } | |
91 | ||
f5f82476 OG |
92 | static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) |
93 | { | |
94 | struct mlx5_eswitch_rep *rep; | |
95 | int vf_vport, err = 0; | |
96 | ||
97 | esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); | |
98 | for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { | |
99 | rep = &esw->offloads.vport_reps[vf_vport]; | |
100 | if (!rep->valid) | |
101 | continue; | |
102 | ||
103 | err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); | |
104 | if (err) | |
105 | goto out; | |
106 | } | |
107 | ||
108 | out: | |
109 | return err; | |
110 | } | |
111 | ||
112 | static struct mlx5_eswitch_rep * | |
113 | esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) | |
114 | { | |
115 | struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; | |
116 | ||
117 | in_rep = attr->in_rep; | |
118 | out_rep = attr->out_rep; | |
119 | ||
120 | if (push) | |
121 | vport = in_rep; | |
122 | else if (pop) | |
123 | vport = out_rep; | |
124 | else | |
125 | vport = in_rep; | |
126 | ||
127 | return vport; | |
128 | } | |
129 | ||
130 | static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, | |
131 | bool push, bool pop, bool fwd) | |
132 | { | |
133 | struct mlx5_eswitch_rep *in_rep, *out_rep; | |
134 | ||
135 | if ((push || pop) && !fwd) | |
136 | goto out_notsupp; | |
137 | ||
138 | in_rep = attr->in_rep; | |
139 | out_rep = attr->out_rep; | |
140 | ||
141 | if (push && in_rep->vport == FDB_UPLINK_VPORT) | |
142 | goto out_notsupp; | |
143 | ||
144 | if (pop && out_rep->vport == FDB_UPLINK_VPORT) | |
145 | goto out_notsupp; | |
146 | ||
147 | /* vport has vlan push configured, can't offload VF --> wire rules w.o it */ | |
148 | if (!push && !pop && fwd) | |
149 | if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT) | |
150 | goto out_notsupp; | |
151 | ||
152 | /* protects against (1) setting rules with different vlans to push and | |
153 | * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) | |
154 | */ | |
155 | if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan)) | |
156 | goto out_notsupp; | |
157 | ||
158 | return 0; | |
159 | ||
160 | out_notsupp: | |
161 | return -ENOTSUPP; | |
162 | } | |
163 | ||
164 | int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, | |
165 | struct mlx5_esw_flow_attr *attr) | |
166 | { | |
167 | struct offloads_fdb *offloads = &esw->fdb_table.offloads; | |
168 | struct mlx5_eswitch_rep *vport = NULL; | |
169 | bool push, pop, fwd; | |
170 | int err = 0; | |
171 | ||
172 | push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); | |
173 | pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); | |
174 | fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); | |
175 | ||
176 | err = esw_add_vlan_action_check(attr, push, pop, fwd); | |
177 | if (err) | |
178 | return err; | |
179 | ||
180 | attr->vlan_handled = false; | |
181 | ||
182 | vport = esw_vlan_action_get_vport(attr, push, pop); | |
183 | ||
184 | if (!push && !pop && fwd) { | |
185 | /* tracks VF --> wire rules without vlan push action */ | |
186 | if (attr->out_rep->vport == FDB_UPLINK_VPORT) { | |
187 | vport->vlan_refcount++; | |
188 | attr->vlan_handled = true; | |
189 | } | |
190 | ||
191 | return 0; | |
192 | } | |
193 | ||
194 | if (!push && !pop) | |
195 | return 0; | |
196 | ||
197 | if (!(offloads->vlan_push_pop_refcount)) { | |
198 | /* it's the 1st vlan rule, apply global vlan pop policy */ | |
199 | err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP); | |
200 | if (err) | |
201 | goto out; | |
202 | } | |
203 | offloads->vlan_push_pop_refcount++; | |
204 | ||
205 | if (push) { | |
206 | if (vport->vlan_refcount) | |
207 | goto skip_set_push; | |
208 | ||
209 | err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0, | |
210 | SET_VLAN_INSERT | SET_VLAN_STRIP); | |
211 | if (err) | |
212 | goto out; | |
213 | vport->vlan = attr->vlan; | |
214 | skip_set_push: | |
215 | vport->vlan_refcount++; | |
216 | } | |
217 | out: | |
218 | if (!err) | |
219 | attr->vlan_handled = true; | |
220 | return err; | |
221 | } | |
222 | ||
223 | int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, | |
224 | struct mlx5_esw_flow_attr *attr) | |
225 | { | |
226 | struct offloads_fdb *offloads = &esw->fdb_table.offloads; | |
227 | struct mlx5_eswitch_rep *vport = NULL; | |
228 | bool push, pop, fwd; | |
229 | int err = 0; | |
230 | ||
231 | if (!attr->vlan_handled) | |
232 | return 0; | |
233 | ||
234 | push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); | |
235 | pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); | |
236 | fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); | |
237 | ||
238 | vport = esw_vlan_action_get_vport(attr, push, pop); | |
239 | ||
240 | if (!push && !pop && fwd) { | |
241 | /* tracks VF --> wire rules without vlan push action */ | |
242 | if (attr->out_rep->vport == FDB_UPLINK_VPORT) | |
243 | vport->vlan_refcount--; | |
244 | ||
245 | return 0; | |
246 | } | |
247 | ||
248 | if (push) { | |
249 | vport->vlan_refcount--; | |
250 | if (vport->vlan_refcount) | |
251 | goto skip_unset_push; | |
252 | ||
253 | vport->vlan = 0; | |
254 | err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, | |
255 | 0, 0, SET_VLAN_STRIP); | |
256 | if (err) | |
257 | goto out; | |
258 | } | |
259 | ||
260 | skip_unset_push: | |
261 | offloads->vlan_push_pop_refcount--; | |
262 | if (offloads->vlan_push_pop_refcount) | |
263 | return 0; | |
264 | ||
265 | /* no more vlan rules, stop global vlan pop policy */ | |
266 | err = esw_set_global_vlan_pop(esw, 0); | |
267 | ||
268 | out: | |
269 | return err; | |
270 | } | |
271 | ||
74491de9 | 272 | static struct mlx5_flow_handle * |
ab22be9b OG |
273 | mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) |
274 | { | |
275 | struct mlx5_flow_destination dest; | |
74491de9 | 276 | struct mlx5_flow_handle *flow_rule; |
c5bb1730 | 277 | struct mlx5_flow_spec *spec; |
ab22be9b OG |
278 | void *misc; |
279 | ||
c5bb1730 MG |
280 | spec = mlx5_vzalloc(sizeof(*spec)); |
281 | if (!spec) { | |
ab22be9b OG |
282 | esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); |
283 | flow_rule = ERR_PTR(-ENOMEM); | |
284 | goto out; | |
285 | } | |
286 | ||
c5bb1730 | 287 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); |
ab22be9b OG |
288 | MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); |
289 | MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */ | |
290 | ||
c5bb1730 | 291 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
ab22be9b OG |
292 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); |
293 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); | |
294 | ||
c5bb1730 | 295 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
ab22be9b OG |
296 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
297 | dest.vport_num = vport; | |
298 | ||
74491de9 MB |
299 | flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, |
300 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
301 | 0, &dest, 1); | |
ab22be9b OG |
302 | if (IS_ERR(flow_rule)) |
303 | esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); | |
304 | out: | |
c5bb1730 | 305 | kvfree(spec); |
ab22be9b OG |
306 | return flow_rule; |
307 | } | |
308 | ||
cb67b832 HHZ |
309 | void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, |
310 | struct mlx5_eswitch_rep *rep) | |
311 | { | |
312 | struct mlx5_esw_sq *esw_sq, *tmp; | |
313 | ||
314 | if (esw->mode != SRIOV_OFFLOADS) | |
315 | return; | |
316 | ||
317 | list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) { | |
74491de9 | 318 | mlx5_del_flow_rules(esw_sq->send_to_vport_rule); |
cb67b832 HHZ |
319 | list_del(&esw_sq->list); |
320 | kfree(esw_sq); | |
321 | } | |
322 | } | |
323 | ||
324 | int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, | |
325 | struct mlx5_eswitch_rep *rep, | |
326 | u16 *sqns_array, int sqns_num) | |
327 | { | |
74491de9 | 328 | struct mlx5_flow_handle *flow_rule; |
cb67b832 | 329 | struct mlx5_esw_sq *esw_sq; |
cb67b832 HHZ |
330 | int err; |
331 | int i; | |
332 | ||
333 | if (esw->mode != SRIOV_OFFLOADS) | |
334 | return 0; | |
335 | ||
cb67b832 HHZ |
336 | for (i = 0; i < sqns_num; i++) { |
337 | esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL); | |
338 | if (!esw_sq) { | |
339 | err = -ENOMEM; | |
340 | goto out_err; | |
341 | } | |
342 | ||
343 | /* Add re-inject rule to the PF/representor sqs */ | |
344 | flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, | |
9deb2241 | 345 | rep->vport, |
cb67b832 HHZ |
346 | sqns_array[i]); |
347 | if (IS_ERR(flow_rule)) { | |
348 | err = PTR_ERR(flow_rule); | |
349 | kfree(esw_sq); | |
350 | goto out_err; | |
351 | } | |
352 | esw_sq->send_to_vport_rule = flow_rule; | |
353 | list_add(&esw_sq->list, &rep->vport_sqs_list); | |
354 | } | |
355 | return 0; | |
356 | ||
357 | out_err: | |
358 | mlx5_eswitch_sqs2vport_stop(esw, rep); | |
359 | return err; | |
360 | } | |
361 | ||
3aa33572 OG |
362 | static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) |
363 | { | |
364 | struct mlx5_flow_destination dest; | |
74491de9 | 365 | struct mlx5_flow_handle *flow_rule = NULL; |
c5bb1730 | 366 | struct mlx5_flow_spec *spec; |
3aa33572 OG |
367 | int err = 0; |
368 | ||
c5bb1730 MG |
369 | spec = mlx5_vzalloc(sizeof(*spec)); |
370 | if (!spec) { | |
3aa33572 OG |
371 | esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); |
372 | err = -ENOMEM; | |
373 | goto out; | |
374 | } | |
375 | ||
376 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | |
377 | dest.vport_num = 0; | |
378 | ||
74491de9 MB |
379 | flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, |
380 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
381 | 0, &dest, 1); | |
3aa33572 OG |
382 | if (IS_ERR(flow_rule)) { |
383 | err = PTR_ERR(flow_rule); | |
384 | esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); | |
385 | goto out; | |
386 | } | |
387 | ||
388 | esw->fdb_table.offloads.miss_rule = flow_rule; | |
389 | out: | |
c5bb1730 | 390 | kvfree(spec); |
3aa33572 OG |
391 | return err; |
392 | } | |
393 | ||
69697b6e | 394 | #define MAX_PF_SQ 256 |
1033665e OG |
395 | #define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */ |
396 | #define ESW_OFFLOADS_NUM_GROUPS 4 | |
69697b6e | 397 | |
c930a3ad | 398 | static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) |
69697b6e OG |
399 | { |
400 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); | |
401 | struct mlx5_core_dev *dev = esw->dev; | |
402 | struct mlx5_flow_namespace *root_ns; | |
403 | struct mlx5_flow_table *fdb = NULL; | |
404 | struct mlx5_flow_group *g; | |
405 | u32 *flow_group_in; | |
406 | void *match_criteria; | |
407 | int table_size, ix, err = 0; | |
408 | ||
409 | flow_group_in = mlx5_vzalloc(inlen); | |
410 | if (!flow_group_in) | |
411 | return -ENOMEM; | |
412 | ||
413 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); | |
414 | if (!root_ns) { | |
415 | esw_warn(dev, "Failed to get FDB flow namespace\n"); | |
416 | goto ns_err; | |
417 | } | |
418 | ||
419 | esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n", | |
420 | MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); | |
421 | ||
1033665e OG |
422 | fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, |
423 | ESW_OFFLOADS_NUM_ENTRIES, | |
424 | ESW_OFFLOADS_NUM_GROUPS, 0); | |
69697b6e OG |
425 | if (IS_ERR(fdb)) { |
426 | err = PTR_ERR(fdb); | |
1033665e OG |
427 | esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); |
428 | goto fast_fdb_err; | |
69697b6e OG |
429 | } |
430 | esw->fdb_table.fdb = fdb; | |
431 | ||
1033665e OG |
432 | table_size = nvports + MAX_PF_SQ + 1; |
433 | fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0); | |
434 | if (IS_ERR(fdb)) { | |
435 | err = PTR_ERR(fdb); | |
436 | esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); | |
437 | goto slow_fdb_err; | |
438 | } | |
439 | esw->fdb_table.offloads.fdb = fdb; | |
440 | ||
69697b6e OG |
441 | /* create send-to-vport group */ |
442 | memset(flow_group_in, 0, inlen); | |
443 | MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, | |
444 | MLX5_MATCH_MISC_PARAMETERS); | |
445 | ||
446 | match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); | |
447 | ||
448 | MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); | |
449 | MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); | |
450 | ||
451 | ix = nvports + MAX_PF_SQ; | |
452 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); | |
453 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); | |
454 | ||
455 | g = mlx5_create_flow_group(fdb, flow_group_in); | |
456 | if (IS_ERR(g)) { | |
457 | err = PTR_ERR(g); | |
458 | esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err); | |
459 | goto send_vport_err; | |
460 | } | |
461 | esw->fdb_table.offloads.send_to_vport_grp = g; | |
462 | ||
463 | /* create miss group */ | |
464 | memset(flow_group_in, 0, inlen); | |
465 | MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0); | |
466 | ||
467 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); | |
468 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1); | |
469 | ||
470 | g = mlx5_create_flow_group(fdb, flow_group_in); | |
471 | if (IS_ERR(g)) { | |
472 | err = PTR_ERR(g); | |
473 | esw_warn(dev, "Failed to create miss flow group err(%d)\n", err); | |
474 | goto miss_err; | |
475 | } | |
476 | esw->fdb_table.offloads.miss_grp = g; | |
477 | ||
3aa33572 OG |
478 | err = esw_add_fdb_miss_rule(esw); |
479 | if (err) | |
480 | goto miss_rule_err; | |
481 | ||
69697b6e OG |
482 | return 0; |
483 | ||
3aa33572 OG |
484 | miss_rule_err: |
485 | mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); | |
69697b6e OG |
486 | miss_err: |
487 | mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); | |
488 | send_vport_err: | |
1033665e OG |
489 | mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); |
490 | slow_fdb_err: | |
491 | mlx5_destroy_flow_table(esw->fdb_table.fdb); | |
492 | fast_fdb_err: | |
69697b6e OG |
493 | ns_err: |
494 | kvfree(flow_group_in); | |
495 | return err; | |
496 | } | |
497 | ||
c930a3ad | 498 | static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw) |
69697b6e OG |
499 | { |
500 | if (!esw->fdb_table.fdb) | |
501 | return; | |
502 | ||
503 | esw_debug(esw->dev, "Destroy offloads FDB Table\n"); | |
74491de9 | 504 | mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule); |
69697b6e OG |
505 | mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); |
506 | mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); | |
507 | ||
1033665e | 508 | mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); |
69697b6e OG |
509 | mlx5_destroy_flow_table(esw->fdb_table.fdb); |
510 | } | |
c116c6ee OG |
511 | |
512 | static int esw_create_offloads_table(struct mlx5_eswitch *esw) | |
513 | { | |
514 | struct mlx5_flow_namespace *ns; | |
515 | struct mlx5_flow_table *ft_offloads; | |
516 | struct mlx5_core_dev *dev = esw->dev; | |
517 | int err = 0; | |
518 | ||
519 | ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); | |
520 | if (!ns) { | |
521 | esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); | |
522 | return -ENOMEM; | |
523 | } | |
524 | ||
525 | ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0); | |
526 | if (IS_ERR(ft_offloads)) { | |
527 | err = PTR_ERR(ft_offloads); | |
528 | esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); | |
529 | return err; | |
530 | } | |
531 | ||
532 | esw->offloads.ft_offloads = ft_offloads; | |
533 | return 0; | |
534 | } | |
535 | ||
536 | static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) | |
537 | { | |
538 | struct mlx5_esw_offload *offloads = &esw->offloads; | |
539 | ||
540 | mlx5_destroy_flow_table(offloads->ft_offloads); | |
541 | } | |
fed9ce22 OG |
542 | |
543 | static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) | |
544 | { | |
545 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); | |
546 | struct mlx5_flow_group *g; | |
547 | struct mlx5_priv *priv = &esw->dev->priv; | |
548 | u32 *flow_group_in; | |
549 | void *match_criteria, *misc; | |
550 | int err = 0; | |
551 | int nvports = priv->sriov.num_vfs + 2; | |
552 | ||
553 | flow_group_in = mlx5_vzalloc(inlen); | |
554 | if (!flow_group_in) | |
555 | return -ENOMEM; | |
556 | ||
557 | /* create vport rx group */ | |
558 | memset(flow_group_in, 0, inlen); | |
559 | MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, | |
560 | MLX5_MATCH_MISC_PARAMETERS); | |
561 | ||
562 | match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); | |
563 | misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters); | |
564 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); | |
565 | ||
566 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); | |
567 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); | |
568 | ||
569 | g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); | |
570 | ||
571 | if (IS_ERR(g)) { | |
572 | err = PTR_ERR(g); | |
573 | mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); | |
574 | goto out; | |
575 | } | |
576 | ||
577 | esw->offloads.vport_rx_group = g; | |
578 | out: | |
579 | kfree(flow_group_in); | |
580 | return err; | |
581 | } | |
582 | ||
583 | static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) | |
584 | { | |
585 | mlx5_destroy_flow_group(esw->offloads.vport_rx_group); | |
586 | } | |
587 | ||
74491de9 | 588 | struct mlx5_flow_handle * |
fed9ce22 OG |
589 | mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) |
590 | { | |
591 | struct mlx5_flow_destination dest; | |
74491de9 | 592 | struct mlx5_flow_handle *flow_rule; |
c5bb1730 | 593 | struct mlx5_flow_spec *spec; |
fed9ce22 OG |
594 | void *misc; |
595 | ||
c5bb1730 MG |
596 | spec = mlx5_vzalloc(sizeof(*spec)); |
597 | if (!spec) { | |
fed9ce22 OG |
598 | esw_warn(esw->dev, "Failed to alloc match parameters\n"); |
599 | flow_rule = ERR_PTR(-ENOMEM); | |
600 | goto out; | |
601 | } | |
602 | ||
c5bb1730 | 603 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); |
fed9ce22 OG |
604 | MLX5_SET(fte_match_set_misc, misc, source_port, vport); |
605 | ||
c5bb1730 | 606 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
fed9ce22 OG |
607 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); |
608 | ||
c5bb1730 | 609 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
fed9ce22 OG |
610 | dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; |
611 | dest.tir_num = tirn; | |
612 | ||
74491de9 MB |
613 | flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, |
614 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | |
615 | 0, &dest, 1); | |
fed9ce22 OG |
616 | if (IS_ERR(flow_rule)) { |
617 | esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); | |
618 | goto out; | |
619 | } | |
620 | ||
621 | out: | |
c5bb1730 | 622 | kvfree(spec); |
fed9ce22 OG |
623 | return flow_rule; |
624 | } | |
feae9087 | 625 | |
c930a3ad OG |
626 | static int esw_offloads_start(struct mlx5_eswitch *esw) |
627 | { | |
6c419ba8 | 628 | int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; |
c930a3ad OG |
629 | |
630 | if (esw->mode != SRIOV_LEGACY) { | |
631 | esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n"); | |
632 | return -EINVAL; | |
633 | } | |
634 | ||
635 | mlx5_eswitch_disable_sriov(esw); | |
636 | err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); | |
6c419ba8 OG |
637 | if (err) { |
638 | esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); | |
639 | err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); | |
640 | if (err1) | |
641 | esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); | |
642 | } | |
c930a3ad OG |
643 | return err; |
644 | } | |
645 | ||
646 | int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) | |
647 | { | |
cb67b832 HHZ |
648 | struct mlx5_eswitch_rep *rep; |
649 | int vport; | |
c930a3ad OG |
650 | int err; |
651 | ||
652 | err = esw_create_offloads_fdb_table(esw, nvports); | |
653 | if (err) | |
654 | return err; | |
655 | ||
656 | err = esw_create_offloads_table(esw); | |
657 | if (err) | |
658 | goto create_ft_err; | |
659 | ||
660 | err = esw_create_vport_rx_group(esw); | |
661 | if (err) | |
662 | goto create_fg_err; | |
663 | ||
cb67b832 HHZ |
664 | for (vport = 0; vport < nvports; vport++) { |
665 | rep = &esw->offloads.vport_reps[vport]; | |
666 | if (!rep->valid) | |
667 | continue; | |
668 | ||
669 | err = rep->load(esw, rep); | |
670 | if (err) | |
671 | goto err_reps; | |
672 | } | |
c930a3ad OG |
673 | return 0; |
674 | ||
cb67b832 HHZ |
675 | err_reps: |
676 | for (vport--; vport >= 0; vport--) { | |
677 | rep = &esw->offloads.vport_reps[vport]; | |
678 | if (!rep->valid) | |
679 | continue; | |
680 | rep->unload(esw, rep); | |
681 | } | |
682 | esw_destroy_vport_rx_group(esw); | |
683 | ||
c930a3ad OG |
684 | create_fg_err: |
685 | esw_destroy_offloads_table(esw); | |
686 | ||
687 | create_ft_err: | |
688 | esw_destroy_offloads_fdb_table(esw); | |
689 | return err; | |
690 | } | |
691 | ||
692 | static int esw_offloads_stop(struct mlx5_eswitch *esw) | |
693 | { | |
6c419ba8 | 694 | int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; |
c930a3ad OG |
695 | |
696 | mlx5_eswitch_disable_sriov(esw); | |
697 | err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); | |
6c419ba8 OG |
698 | if (err) { |
699 | esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err); | |
700 | err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); | |
701 | if (err1) | |
702 | esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); | |
703 | } | |
c930a3ad OG |
704 | |
705 | return err; | |
706 | } | |
707 | ||
708 | void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) | |
709 | { | |
cb67b832 HHZ |
710 | struct mlx5_eswitch_rep *rep; |
711 | int vport; | |
712 | ||
713 | for (vport = 0; vport < nvports; vport++) { | |
714 | rep = &esw->offloads.vport_reps[vport]; | |
715 | if (!rep->valid) | |
716 | continue; | |
717 | rep->unload(esw, rep); | |
718 | } | |
719 | ||
c930a3ad OG |
720 | esw_destroy_vport_rx_group(esw); |
721 | esw_destroy_offloads_table(esw); | |
722 | esw_destroy_offloads_fdb_table(esw); | |
723 | } | |
724 | ||
ef78618b | 725 | static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) |
c930a3ad OG |
726 | { |
727 | switch (mode) { | |
728 | case DEVLINK_ESWITCH_MODE_LEGACY: | |
729 | *mlx5_mode = SRIOV_LEGACY; | |
730 | break; | |
731 | case DEVLINK_ESWITCH_MODE_SWITCHDEV: | |
732 | *mlx5_mode = SRIOV_OFFLOADS; | |
733 | break; | |
734 | default: | |
735 | return -EINVAL; | |
736 | } | |
737 | ||
738 | return 0; | |
739 | } | |
740 | ||
ef78618b OG |
741 | static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) |
742 | { | |
743 | switch (mlx5_mode) { | |
744 | case SRIOV_LEGACY: | |
745 | *mode = DEVLINK_ESWITCH_MODE_LEGACY; | |
746 | break; | |
747 | case SRIOV_OFFLOADS: | |
748 | *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; | |
749 | break; | |
750 | default: | |
751 | return -EINVAL; | |
752 | } | |
753 | ||
754 | return 0; | |
755 | } | |
756 | ||
feae9087 OG |
757 | int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) |
758 | { | |
c930a3ad OG |
759 | struct mlx5_core_dev *dev; |
760 | u16 cur_mlx5_mode, mlx5_mode = 0; | |
761 | ||
762 | dev = devlink_priv(devlink); | |
763 | ||
764 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | |
765 | return -EOPNOTSUPP; | |
766 | ||
767 | cur_mlx5_mode = dev->priv.eswitch->mode; | |
768 | ||
769 | if (cur_mlx5_mode == SRIOV_NONE) | |
770 | return -EOPNOTSUPP; | |
771 | ||
ef78618b | 772 | if (esw_mode_from_devlink(mode, &mlx5_mode)) |
c930a3ad OG |
773 | return -EINVAL; |
774 | ||
775 | if (cur_mlx5_mode == mlx5_mode) | |
776 | return 0; | |
777 | ||
778 | if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) | |
779 | return esw_offloads_start(dev->priv.eswitch); | |
780 | else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) | |
781 | return esw_offloads_stop(dev->priv.eswitch); | |
782 | else | |
783 | return -EINVAL; | |
feae9087 OG |
784 | } |
785 | ||
786 | int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) | |
787 | { | |
c930a3ad OG |
788 | struct mlx5_core_dev *dev; |
789 | ||
790 | dev = devlink_priv(devlink); | |
791 | ||
792 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | |
793 | return -EOPNOTSUPP; | |
794 | ||
795 | if (dev->priv.eswitch->mode == SRIOV_NONE) | |
796 | return -EOPNOTSUPP; | |
797 | ||
ef78618b | 798 | return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); |
feae9087 | 799 | } |
127ea380 HHZ |
800 | |
801 | void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, | |
9deb2241 OG |
802 | int vport_index, |
803 | struct mlx5_eswitch_rep *__rep) | |
127ea380 HHZ |
804 | { |
805 | struct mlx5_esw_offload *offloads = &esw->offloads; | |
9deb2241 OG |
806 | struct mlx5_eswitch_rep *rep; |
807 | ||
808 | rep = &offloads->vport_reps[vport_index]; | |
127ea380 | 809 | |
bac9b6aa OG |
810 | memset(rep, 0, sizeof(*rep)); |
811 | ||
812 | rep->load = __rep->load; | |
813 | rep->unload = __rep->unload; | |
814 | rep->vport = __rep->vport; | |
815 | rep->priv_data = __rep->priv_data; | |
816 | ether_addr_copy(rep->hw_id, __rep->hw_id); | |
127ea380 | 817 | |
9deb2241 OG |
818 | INIT_LIST_HEAD(&rep->vport_sqs_list); |
819 | rep->valid = true; | |
127ea380 HHZ |
820 | } |
821 | ||
822 | void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, | |
9deb2241 | 823 | int vport_index) |
127ea380 HHZ |
824 | { |
825 | struct mlx5_esw_offload *offloads = &esw->offloads; | |
cb67b832 HHZ |
826 | struct mlx5_eswitch_rep *rep; |
827 | ||
9deb2241 | 828 | rep = &offloads->vport_reps[vport_index]; |
cb67b832 | 829 | |
9deb2241 | 830 | if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled) |
cb67b832 | 831 | rep->unload(esw, rep); |
127ea380 | 832 | |
9deb2241 | 833 | rep->valid = false; |
127ea380 | 834 | } |