net/mlx5: Lag, Control MultiPort E-Switch single FDB mode
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
133dcfc5 34#include <linux/idr.h>
69697b6e
OG
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/mlx5_ifc.h>
37#include <linux/mlx5/vport.h>
38#include <linux/mlx5/fs.h>
39#include "mlx5_core.h"
40#include "eswitch.h"
34ca6535 41#include "esw/indir_table.h"
ea651a86 42#include "esw/acl/ofld.h"
80f09dfc 43#include "rdma.h"
e52c2802
PB
44#include "en.h"
45#include "fs_core.h"
ac004b83 46#include "lib/devcom.h"
a3888f33 47#include "lib/eq.h"
ae430332 48#include "lib/fs_chains.h"
c620b772 49#include "en_tc.h"
c9355682 50#include "en/mapping.h"
c85a6b8f 51#include "devlink.h"
94db3317 52#include "lag/lag.h"
6fda078d 53#include "en/tc/post_meter.h"
69697b6e 54
47dd7e60
PP
55#define mlx5_esw_for_each_rep(esw, i, rep) \
56 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
57
58#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
59 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
60
61#define mlx5_esw_for_each_vf_rep(esw, index, rep) \
62 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
63 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
64
cd7e4186
BW
65/* There are two match-all miss flows, one for unicast dst mac and
66 * one for multicast.
67 */
68#define MLX5_ESW_MISS_FLOWS (2)
c9b99abc
BW
69#define UPLINK_REP_INDEX 0
70
c796bb7c
CM
71#define MLX5_ESW_VPORT_TBL_SIZE 128
72#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
73
8ea7bcf6
JL
74#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
75
c796bb7c
CM
76static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
77 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
78 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
79 .flags = 0,
80};
81
879c8f84
BW
82static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
83 u16 vport_num)
84{
47dd7e60 85 return xa_load(&esw->offloads.vport_reps, vport_num);
879c8f84
BW
86}
87
6f7bbad1
JL
88static void
89mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
90 struct mlx5_flow_spec *spec,
91 struct mlx5_esw_flow_attr *attr)
92{
166f431e
AL
93 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
94 return;
95
96 if (attr->int_port) {
97 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
98
99 return;
100 }
101
102 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
103 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
104 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
6f7bbad1 105}
b7826076 106
f94d6389
CM
107/* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
108 * are not needed as well in the following process. So clear them all for simplicity.
109 */
110void
111mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
112{
113 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
114 void *misc2;
115
116 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
117 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
118
119 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
120 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
121
122 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
123 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
124 }
125}
126
c01cfd0f
JL
127static void
128mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
129 struct mlx5_flow_spec *spec,
a508728a 130 struct mlx5_flow_attr *attr,
b055ecf5
MB
131 struct mlx5_eswitch *src_esw,
132 u16 vport)
c01cfd0f 133{
166f431e
AL
134 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
135 u32 metadata;
c01cfd0f
JL
136 void *misc2;
137 void *misc;
138
139 /* Use metadata matching because vport is not represented by single
140 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
141 */
142 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
a508728a
VB
143 if (mlx5_esw_indir_table_decap_vport(attr))
144 vport = mlx5_esw_indir_table_decap_vport(attr);
166f431e 145
e0bf81bf 146 if (!attr->chain && esw_attr && esw_attr->int_port)
166f431e
AL
147 metadata =
148 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
149 else
150 metadata =
151 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
152
c01cfd0f 153 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
166f431e 154 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
c01cfd0f
JL
155
156 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
157 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
158 mlx5_eswitch_get_vport_metadata_mask());
c01cfd0f
JL
159
160 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
c01cfd0f
JL
161 } else {
162 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
b055ecf5 163 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
c01cfd0f
JL
164
165 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
166 MLX5_SET(fte_match_set_misc, misc,
167 source_eswitch_owner_vhca_id,
b055ecf5 168 MLX5_CAP_GEN(src_esw->dev, vhca_id));
c01cfd0f
JL
169
170 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
171 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
172 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
173 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
174 source_eswitch_owner_vhca_id);
175
176 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
177 }
c01cfd0f
JL
178}
179
a508728a
VB
180static int
181esw_setup_decap_indir(struct mlx5_eswitch *esw,
521933cd 182 struct mlx5_flow_attr *attr)
a508728a
VB
183{
184 struct mlx5_flow_table *ft;
185
e5d4e1da 186 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
a508728a
VB
187 return -EOPNOTSUPP;
188
521933cd 189 ft = mlx5_esw_indir_table_get(esw, attr,
a508728a
VB
190 mlx5_esw_indir_table_decap_vport(attr), true);
191 return PTR_ERR_OR_ZERO(ft);
192}
193
9e51c0a6 194static void
a508728a
VB
195esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
196 struct mlx5_flow_attr *attr)
197{
198 if (mlx5_esw_indir_table_decap_vport(attr))
521933cd 199 mlx5_esw_indir_table_put(esw,
a508728a
VB
200 mlx5_esw_indir_table_decap_vport(attr),
201 true);
202}
203
6fda078d
OS
204static int
205esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
206 struct mlx5e_meter_attr *meter,
207 int i)
208{
209 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
210 dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
211 dest[i].range.min = 0;
212 dest[i].range.max = meter->params.mtu;
213 dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
214 dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
215
216 return 0;
217}
218
f94d6389
CM
219static int
220esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
221 struct mlx5_flow_act *flow_act,
eeed226e 222 u32 sampler_id,
f94d6389
CM
223 int i)
224{
225 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
226 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
eeed226e 227 dest[i].sampler_id = sampler_id;
f94d6389
CM
228
229 return 0;
230}
231
a508728a 232static int
9e51c0a6
VB
233esw_setup_ft_dest(struct mlx5_flow_destination *dest,
234 struct mlx5_flow_act *flow_act,
a508728a 235 struct mlx5_eswitch *esw,
9e51c0a6
VB
236 struct mlx5_flow_attr *attr,
237 int i)
238{
239 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
240 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
241 dest[i].ft = attr->dest_ft;
a508728a
VB
242
243 if (mlx5_esw_indir_table_decap_vport(attr))
521933cd 244 return esw_setup_decap_indir(esw, attr);
a508728a 245 return 0;
9e51c0a6
VB
246}
247
248static void
c0063a43
VB
249esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
250 struct mlx5_fs_chains *chains, int i)
9e51c0a6 251{
2a2c84fa
RD
252 if (mlx5_chains_ignore_flow_level_supported(chains))
253 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
9e51c0a6
VB
254 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
255 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
256}
257
c0063a43
VB
258static void
259esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
260 struct mlx5_eswitch *esw, int i)
261{
262 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
263 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
264 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dcf19b9c 265 dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
c0063a43
VB
266}
267
9e51c0a6
VB
268static int
269esw_setup_chain_dest(struct mlx5_flow_destination *dest,
270 struct mlx5_flow_act *flow_act,
271 struct mlx5_fs_chains *chains,
272 u32 chain, u32 prio, u32 level,
273 int i)
274{
275 struct mlx5_flow_table *ft;
276
277 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
278 ft = mlx5_chains_get_table(chains, chain, prio, level);
279 if (IS_ERR(ft))
280 return PTR_ERR(ft);
281
282 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
283 dest[i].ft = ft;
284 return 0;
285}
286
10742efc
VB
287static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
288 int from, int to)
289{
290 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
291 struct mlx5_fs_chains *chains = esw_chains(esw);
292 int i;
293
294 for (i = from; i < to; i++)
295 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
296 mlx5_chains_put_table(chains, 0, 1, 0);
a508728a
VB
297 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
298 esw_attr->dests[i].mdev))
521933cd 299 mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
a508728a 300 false);
10742efc
VB
301}
302
303static bool
304esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
305{
306 int i;
307
308 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
309 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
310 return true;
311 return false;
312}
313
314static int
315esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
316 struct mlx5_flow_act *flow_act,
317 struct mlx5_eswitch *esw,
318 struct mlx5_fs_chains *chains,
319 struct mlx5_flow_attr *attr,
320 int *i)
321{
322 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
de31854e 323 int err;
10742efc 324
e5d4e1da 325 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
10742efc
VB
326 return -EOPNOTSUPP;
327
de31854e
DC
328 /* flow steering cannot handle more than one dest with the same ft
329 * in a single flow
330 */
331 if (esw_attr->out_count - esw_attr->split_count > 1)
332 return -EOPNOTSUPP;
333
334 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
335 if (err)
336 return err;
27484f71 337
de31854e
DC
338 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
339 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
340 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
10742efc 341 }
de31854e 342 (*i)++;
10742efc 343
de31854e 344 return 0;
10742efc
VB
345}
346
347static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
348 struct mlx5_flow_attr *attr)
349{
350 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
351
352 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
353}
354
a508728a
VB
355static bool
356esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
357{
358 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
e219440d 359 bool result = false;
a508728a
VB
360 int i;
361
e219440d
MD
362 /* Indirect table is supported only for flows with in_port uplink
363 * and the destination is vport on the same eswitch as the uplink,
364 * return false in case at least one of destinations doesn't meet
365 * this criteria.
366 */
367 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
27484f71
AL
368 if (esw_attr->dests[i].rep &&
369 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
e219440d
MD
370 esw_attr->dests[i].mdev)) {
371 result = true;
372 } else {
373 result = false;
374 break;
375 }
376 }
377 return result;
a508728a
VB
378}
379
380static int
381esw_setup_indir_table(struct mlx5_flow_destination *dest,
382 struct mlx5_flow_act *flow_act,
383 struct mlx5_eswitch *esw,
384 struct mlx5_flow_attr *attr,
a508728a
VB
385 bool ignore_flow_lvl,
386 int *i)
387{
388 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
389 int j, err;
390
e5d4e1da 391 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
a508728a
VB
392 return -EOPNOTSUPP;
393
394 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
395 if (ignore_flow_lvl)
396 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
397 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
398
521933cd 399 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
a508728a
VB
400 esw_attr->dests[j].rep->vport, false);
401 if (IS_ERR(dest[*i].ft)) {
402 err = PTR_ERR(dest[*i].ft);
403 goto err_indir_tbl_get;
404 }
405 }
406
407 if (mlx5_esw_indir_table_decap_vport(attr)) {
521933cd 408 err = esw_setup_decap_indir(esw, attr);
a508728a
VB
409 if (err)
410 goto err_indir_tbl_get;
411 }
412
413 return 0;
414
415err_indir_tbl_get:
416 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
417 return err;
418}
419
420static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
421{
422 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
423
424 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
425 esw_cleanup_decap_indir(esw, attr);
426}
427
9e51c0a6
VB
428static void
429esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
430{
431 mlx5_chains_put_table(chains, chain, prio, level);
432}
433
434static void
435esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
436 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
437 int attr_idx, int dest_idx, bool pkt_reformat)
438{
439 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
440 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
c6719725
MD
441 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
442 dest[dest_idx].vport.vhca_id =
443 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
9e51c0a6 444 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
942fca7e
EC
445 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
446 mlx5_lag_mpesw_is_activated(esw->dev))
94db3317 447 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
c6719725 448 }
6d942e40 449 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
9e51c0a6
VB
450 if (pkt_reformat) {
451 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
452 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
453 }
454 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
455 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
456 }
457}
458
459static int
460esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
461 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
462 int i)
463{
464 int j;
465
466 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
467 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
468 return i;
469}
470
e929e3da
MD
471static bool
472esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
473{
474 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
475 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
476 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
477}
478
9e51c0a6
VB
479static int
480esw_setup_dests(struct mlx5_flow_destination *dest,
481 struct mlx5_flow_act *flow_act,
482 struct mlx5_eswitch *esw,
483 struct mlx5_flow_attr *attr,
10742efc 484 struct mlx5_flow_spec *spec,
9e51c0a6
VB
485 int *i)
486{
487 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
488 struct mlx5_fs_chains *chains = esw_chains(esw);
489 int err = 0;
490
10742efc 491 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
e929e3da 492 esw_src_port_rewrite_supported(esw))
e5d4e1da 493 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
10742efc 494
42760d95 495 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
c0063a43
VB
496 esw_setup_slow_path_dest(dest, flow_act, esw, *i);
497 (*i)++;
42760d95
RD
498 goto out;
499 }
500
501 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
502 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
503 (*i)++;
c0063a43
VB
504 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
505 esw_setup_accept_dest(dest, flow_act, chains, *i);
9e51c0a6 506 (*i)++;
6fda078d
OS
507 } else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
508 err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
509 (*i)++;
a508728a 510 } else if (esw_is_indir_table(esw, attr)) {
521933cd 511 err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
10742efc
VB
512 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
513 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
9e51c0a6
VB
514 } else {
515 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
8c9cc1eb
RD
516
517 if (attr->dest_ft) {
521933cd 518 err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
8c9cc1eb
RD
519 (*i)++;
520 } else if (attr->dest_chain) {
521 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
522 1, 0, *i);
523 (*i)++;
524 }
9e51c0a6
VB
525 }
526
42760d95 527out:
9e51c0a6
VB
528 return err;
529}
530
531static void
532esw_cleanup_dests(struct mlx5_eswitch *esw,
533 struct mlx5_flow_attr *attr)
534{
10742efc 535 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
9e51c0a6
VB
536 struct mlx5_fs_chains *chains = esw_chains(esw);
537
a508728a
VB
538 if (attr->dest_ft) {
539 esw_cleanup_decap_indir(esw, attr);
e5d4e1da 540 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
10742efc
VB
541 if (attr->dest_chain)
542 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
a508728a
VB
543 else if (esw_is_indir_table(esw, attr))
544 esw_cleanup_indir_table(esw, attr);
10742efc
VB
545 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
546 esw_cleanup_chain_src_port_rewrite(esw, attr);
547 }
9e51c0a6
VB
548}
549
9153da46
JL
550static void
551esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
552{
553 struct mlx5e_flow_meter_handle *meter;
554
555 meter = attr->meter_attr.meter;
556 flow_act->exe_aso.type = attr->exe_aso_type;
557 flow_act->exe_aso.object_id = meter->obj_id;
558 flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
559 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
560 /* use metadata reg 5 for packet color */
561 flow_act->exe_aso.return_reg_id = 5;
562}
563
74491de9 564struct mlx5_flow_handle *
3d80d1a2
OG
565mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
566 struct mlx5_flow_spec *spec,
c620b772 567 struct mlx5_flow_attr *attr)
3d80d1a2 568{
42f7ad67 569 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
c620b772 570 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 571 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772
AL
572 bool split = !!(esw_attr->split_count);
573 struct mlx5_vport_tbl_attr fwd_attr;
40888162 574 struct mlx5_flow_destination *dest;
74491de9 575 struct mlx5_flow_handle *rule;
e52c2802 576 struct mlx5_flow_table *fdb;
9e51c0a6 577 int i = 0;
3d80d1a2 578
f6455de0 579 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
580 return ERR_PTR(-EOPNOTSUPP);
581
633ad4b2
RD
582 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
583 return ERR_PTR(-EOPNOTSUPP);
584
40888162
MD
585 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
586 if (!dest)
587 return ERR_PTR(-ENOMEM);
588
6acfbf38 589 flow_act.action = attr->action;
633ad4b2
RD
590
591 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
c620b772
AL
592 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
593 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
594 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
cc495188 595 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
c620b772
AL
596 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
597 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
598 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
cc495188 599 }
6acfbf38 600 }
776b12b6 601
10742efc
VB
602 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
603
66958ed9 604 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
9e51c0a6
VB
605 int err;
606
10742efc 607 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
9e51c0a6
VB
608 if (err) {
609 rule = ERR_PTR(err);
610 goto err_create_goto_table;
56e858df 611 }
e37a79e5 612 }
14e6b038 613
c620b772
AL
614 if (esw_attr->decap_pkt_reformat)
615 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
14e6b038 616
66958ed9 617 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 618 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 619 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 620 i++;
3d80d1a2
OG
621 }
622
93b3586e 623 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 624 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
625 if (attr->inner_match_level != MLX5_MATCH_NONE)
626 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 627
aa24670e 628 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 629 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 630
9153da46
JL
631 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
632 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
633 esw_setup_meter(attr, &flow_act);
634
2741f223 635 if (split) {
c620b772
AL
636 fwd_attr.chain = attr->chain;
637 fwd_attr.prio = attr->prio;
638 fwd_attr.vport = esw_attr->in_rep->vport;
c796bb7c 639 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
c620b772 640
0a9e2307 641 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
96e32687 642 } else {
d18296ff 643 if (attr->chain || attr->prio)
ae430332
AL
644 fdb = mlx5_chains_get_table(chains, attr->chain,
645 attr->prio, 0);
d18296ff 646 else
c620b772 647 fdb = attr->ft;
6fb0701a 648
e5d4e1da 649 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
a508728a 650 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
b055ecf5
MB
651 esw_attr->in_mdev->priv.eswitch,
652 esw_attr->in_rep->vport);
96e32687 653 }
e52c2802
PB
654 if (IS_ERR(fdb)) {
655 rule = ERR_CAST(fdb);
656 goto err_esw_get;
657 }
658
5a5624d1
OS
659 if (!i) {
660 kfree(dest);
661 dest = NULL;
662 }
663
84be2fda 664 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
c620b772 665 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
10caabda 666 &flow_act, dest, i);
84be2fda 667 else
10caabda 668 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 669 if (IS_ERR(rule))
e52c2802 670 goto err_add_rule;
375f51e2 671 else
525e84be 672 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 673
40888162 674 kfree(dest);
e52c2802
PB
675 return rule;
676
677err_add_rule:
96e32687 678 if (split)
0a9e2307 679 mlx5_esw_vporttbl_put(esw, &fwd_attr);
d18296ff 680 else if (attr->chain || attr->prio)
ae430332 681 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
e52c2802 682err_esw_get:
9e51c0a6 683 esw_cleanup_dests(esw, attr);
e52c2802 684err_create_goto_table:
40888162 685 kfree(dest);
aa0cbbae 686 return rule;
3d80d1a2
OG
687}
688
e4ad91f2
CM
689struct mlx5_flow_handle *
690mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
691 struct mlx5_flow_spec *spec,
c620b772 692 struct mlx5_flow_attr *attr)
e4ad91f2 693{
42f7ad67 694 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
c620b772 695 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 696 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772 697 struct mlx5_vport_tbl_attr fwd_attr;
40888162 698 struct mlx5_flow_destination *dest;
e52c2802
PB
699 struct mlx5_flow_table *fast_fdb;
700 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 701 struct mlx5_flow_handle *rule;
10742efc 702 int i, err = 0;
e4ad91f2 703
40888162
MD
704 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
705 if (!dest)
706 return ERR_PTR(-ENOMEM);
707
ae430332 708 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
e52c2802
PB
709 if (IS_ERR(fast_fdb)) {
710 rule = ERR_CAST(fast_fdb);
711 goto err_get_fast;
712 }
713
c620b772
AL
714 fwd_attr.chain = attr->chain;
715 fwd_attr.prio = attr->prio;
716 fwd_attr.vport = esw_attr->in_rep->vport;
c796bb7c 717 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
0a9e2307 718 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
e52c2802
PB
719 if (IS_ERR(fwd_fdb)) {
720 rule = ERR_CAST(fwd_fdb);
721 goto err_get_fwd;
722 }
723
e4ad91f2 724 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
10742efc 725 for (i = 0; i < esw_attr->split_count; i++) {
a508728a 726 if (esw_is_indir_table(esw, attr))
521933cd 727 err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
a508728a 728 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
10742efc
VB
729 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
730 &i);
731 else
732 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
733
734 if (err) {
735 rule = ERR_PTR(err);
736 goto err_chain_src_rewrite;
737 }
738 }
e4ad91f2 739 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
873d2f12 740 dest[i].ft = fwd_fdb;
e4ad91f2
CM
741 i++;
742
a508728a 743 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
b055ecf5
MB
744 esw_attr->in_mdev->priv.eswitch,
745 esw_attr->in_rep->vport);
e4ad91f2 746
93b3586e 747 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 748 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 749
278d51f2 750 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
e52c2802 751 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 752
10742efc
VB
753 if (IS_ERR(rule)) {
754 i = esw_attr->split_count;
755 goto err_chain_src_rewrite;
756 }
e4ad91f2 757
525e84be 758 atomic64_inc(&esw->offloads.num_flows);
e52c2802 759
40888162 760 kfree(dest);
e52c2802 761 return rule;
10742efc
VB
762err_chain_src_rewrite:
763 esw_put_dest_tables_loop(esw, attr, 0, i);
0a9e2307 764 mlx5_esw_vporttbl_put(esw, &fwd_attr);
e52c2802 765err_get_fwd:
ae430332 766 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
e52c2802 767err_get_fast:
40888162 768 kfree(dest);
e4ad91f2
CM
769 return rule;
770}
771
e52c2802
PB
772static void
773__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
774 struct mlx5_flow_handle *rule,
c620b772 775 struct mlx5_flow_attr *attr,
e52c2802
PB
776 bool fwd_rule)
777{
c620b772 778 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
ae430332 779 struct mlx5_fs_chains *chains = esw_chains(esw);
c620b772
AL
780 bool split = (esw_attr->split_count > 0);
781 struct mlx5_vport_tbl_attr fwd_attr;
10caabda 782 int i;
e52c2802
PB
783
784 mlx5_del_flow_rules(rule);
10caabda 785
e5d4e1da 786 if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
d8a2034f
EC
787 /* unref the term table */
788 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
c620b772
AL
789 if (esw_attr->dests[i].termtbl)
790 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
d8a2034f 791 }
10caabda
OS
792 }
793
525e84be 794 atomic64_dec(&esw->offloads.num_flows);
e52c2802 795
c620b772
AL
796 if (fwd_rule || split) {
797 fwd_attr.chain = attr->chain;
798 fwd_attr.prio = attr->prio;
799 fwd_attr.vport = esw_attr->in_rep->vport;
c796bb7c 800 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
c620b772
AL
801 }
802
e52c2802 803 if (fwd_rule) {
0a9e2307 804 mlx5_esw_vporttbl_put(esw, &fwd_attr);
ae430332 805 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
10742efc 806 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
e52c2802 807 } else {
96e32687 808 if (split)
0a9e2307 809 mlx5_esw_vporttbl_put(esw, &fwd_attr);
d18296ff 810 else if (attr->chain || attr->prio)
ae430332 811 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
9e51c0a6 812 esw_cleanup_dests(esw, attr);
e52c2802
PB
813 }
814}
815
d85cdccb
OG
816void
817mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
818 struct mlx5_flow_handle *rule,
c620b772 819 struct mlx5_flow_attr *attr)
d85cdccb 820{
e52c2802 821 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
822}
823
48265006
OG
824void
825mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
826 struct mlx5_flow_handle *rule,
c620b772 827 struct mlx5_flow_attr *attr)
48265006 828{
e52c2802 829 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
830}
831
f7a68945 832struct mlx5_flow_handle *
3a46f4fb 833mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
979bf468 834 struct mlx5_eswitch *from_esw,
3a46f4fb 835 struct mlx5_eswitch_rep *rep,
02f3afd9 836 u32 sqn)
ab22be9b 837{
66958ed9 838 struct mlx5_flow_act flow_act = {0};
4c5009c5 839 struct mlx5_flow_destination dest = {};
74491de9 840 struct mlx5_flow_handle *flow_rule;
c5bb1730 841 struct mlx5_flow_spec *spec;
ab22be9b
OG
842 void *misc;
843
1b9a07ee 844 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 845 if (!spec) {
ab22be9b
OG
846 flow_rule = ERR_PTR(-ENOMEM);
847 goto out;
848 }
849
c5bb1730 850 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 851 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a 852 /* source vport is the esw manager */
979bf468 853 MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport);
3a46f4fb 854 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
7d97822a 855 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
979bf468 856 MLX5_CAP_GEN(from_esw->dev, vhca_id));
ab22be9b 857
c5bb1730 858 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
859 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
860 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
3a46f4fb 861 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
7d97822a
MB
862 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
863 source_eswitch_owner_vhca_id);
ab22be9b 864
c5bb1730 865 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 866 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
3a46f4fb
MB
867 dest.vport.num = rep->vport;
868 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
869 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
66958ed9 870 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 871
d0444254
AL
872 if (rep->vport == MLX5_VPORT_UPLINK)
873 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
874
dcf19b9c 875 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
39ac237c 876 spec, &flow_act, &dest, 1);
ab22be9b 877 if (IS_ERR(flow_rule))
3a46f4fb
MB
878 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
879 PTR_ERR(flow_rule));
ab22be9b 880out:
c5bb1730 881 kvfree(spec);
ab22be9b
OG
882 return flow_rule;
883}
57cbd893 884EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 885
159fe639
MB
886void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
887{
888 mlx5_del_flow_rules(rule);
889}
890
430e2d5e 891void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
8e404fef 892{
430e2d5e
RD
893 if (rule)
894 mlx5_del_flow_rules(rule);
f019679e
CM
895}
896
430e2d5e
RD
897struct mlx5_flow_handle *
898mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
8e404fef 899{
8e404fef
VB
900 struct mlx5_flow_destination dest = {};
901 struct mlx5_flow_act flow_act = {0};
902 struct mlx5_flow_handle *flow_rule;
8e404fef 903 struct mlx5_flow_spec *spec;
8e404fef
VB
904
905 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
430e2d5e
RD
906 if (!spec)
907 return ERR_PTR(-ENOMEM);
8e404fef
VB
908
909 MLX5_SET(fte_match_param, spec->match_criteria,
910 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
911 MLX5_SET(fte_match_param, spec->match_criteria,
912 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
913 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
914 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
915
916 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
917 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
918 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
919
430e2d5e
RD
920 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
921 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
922 dest.vport.num = vport_num;
8e404fef 923
dcf19b9c 924 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
430e2d5e
RD
925 spec, &flow_act, &dest, 1);
926 if (IS_ERR(flow_rule))
927 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
928 vport_num, PTR_ERR(flow_rule));
8e404fef 929
8e404fef 930 kvfree(spec);
430e2d5e 931 return flow_rule;
8e404fef
VB
932}
933
5b7cb745
PB
934static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
935{
936 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
937 MLX5_FDB_TO_VPORT_REG_C_1;
938}
939
332bd3a5 940static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
941{
942 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
e08a6832
LR
943 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
944 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
5b7cb745 945 u8 curr, wanted;
c1286050
JL
946 int err;
947
5b7cb745
PB
948 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
949 !mlx5_eswitch_vport_match_metadata_enabled(esw))
332bd3a5 950 return 0;
c1286050 951
e08a6832
LR
952 MLX5_SET(query_esw_vport_context_in, in, opcode,
953 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
954 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
c1286050
JL
955 if (err)
956 return err;
957
5b7cb745
PB
958 curr = MLX5_GET(query_esw_vport_context_out, out,
959 esw_vport_context.fdb_to_vport_reg_c_id);
960 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
961 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
962 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
c1286050 963
332bd3a5 964 if (enable)
5b7cb745 965 curr |= wanted;
332bd3a5 966 else
5b7cb745 967 curr &= ~wanted;
c1286050 968
e08a6832 969 MLX5_SET(modify_esw_vport_context_in, min,
5b7cb745 970 esw_vport_context.fdb_to_vport_reg_c_id, curr);
e08a6832 971 MLX5_SET(modify_esw_vport_context_in, min,
c1286050
JL
972 field_select.fdb_to_vport_reg_c_id, 1);
973
e08a6832 974 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
5b7cb745
PB
975 if (!err) {
976 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
977 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
978 else
979 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
980 }
981
982 return err;
c1286050
JL
983}
984
a5641cb5
JL
985static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
986 struct mlx5_core_dev *peer_dev,
ac004b83
RD
987 struct mlx5_flow_spec *spec,
988 struct mlx5_flow_destination *dest)
989{
a5641cb5 990 void *misc;
ac004b83 991
a5641cb5
JL
992 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
993 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
994 misc_parameters_2);
0f0d3827
PB
995 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
996 mlx5_eswitch_get_vport_metadata_mask());
ac004b83 997
a5641cb5
JL
998 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
999 } else {
1000 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1001 misc_parameters);
ac004b83 1002
a5641cb5
JL
1003 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1004 MLX5_CAP_GEN(peer_dev, vhca_id));
1005
1006 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1007
1008 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1009 misc_parameters);
1010 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1011 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1012 source_eswitch_owner_vhca_id);
1013 }
ac004b83
RD
1014
1015 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1016 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 1017 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 1018 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
1019}
1020
a5641cb5
JL
1021static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1022 struct mlx5_eswitch *peer_esw,
1023 struct mlx5_flow_spec *spec,
1024 u16 vport)
1025{
1026 void *misc;
1027
1028 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1029 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1030 misc_parameters_2);
1031 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1032 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1033 vport));
1034 } else {
1035 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1036 misc_parameters);
1037 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1038 }
1039}
1040
ac004b83
RD
1041static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1042 struct mlx5_core_dev *peer_dev)
1043{
1044 struct mlx5_flow_destination dest = {};
1045 struct mlx5_flow_act flow_act = {0};
1046 struct mlx5_flow_handle **flows;
ac004b83
RD
1047 /* total vports is the same for both e-switches */
1048 int nvports = esw->total_vports;
47dd7e60
PP
1049 struct mlx5_flow_handle *flow;
1050 struct mlx5_flow_spec *spec;
1051 struct mlx5_vport *vport;
1052 unsigned long i;
ac004b83 1053 void *misc;
47dd7e60 1054 int err;
ac004b83
RD
1055
1056 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1057 if (!spec)
1058 return -ENOMEM;
1059
a5641cb5 1060 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83 1061
806bf340 1062 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
ac004b83
RD
1063 if (!flows) {
1064 err = -ENOMEM;
1065 goto alloc_flows_err;
1066 }
1067
1068 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1069 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1070 misc_parameters);
1071
81cd229c 1072 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
47dd7e60 1073 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
a5641cb5
JL
1074 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1075 spec, MLX5_VPORT_PF);
1076
dcf19b9c 1077 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
81cd229c
BW
1078 spec, &flow_act, &dest, 1);
1079 if (IS_ERR(flow)) {
1080 err = PTR_ERR(flow);
1081 goto add_pf_flow_err;
1082 }
47dd7e60 1083 flows[vport->index] = flow;
81cd229c
BW
1084 }
1085
1086 if (mlx5_ecpf_vport_exists(esw->dev)) {
47dd7e60 1087 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
81cd229c 1088 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
dcf19b9c 1089 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
81cd229c
BW
1090 spec, &flow_act, &dest, 1);
1091 if (IS_ERR(flow)) {
1092 err = PTR_ERR(flow);
1093 goto add_ecpf_flow_err;
1094 }
47dd7e60 1095 flows[vport->index] = flow;
81cd229c
BW
1096 }
1097
47dd7e60 1098 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
1099 esw_set_peer_miss_rule_source_port(esw,
1100 peer_dev->priv.eswitch,
47dd7e60 1101 spec, vport->vport);
a5641cb5 1102
dcf19b9c 1103 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
ac004b83
RD
1104 spec, &flow_act, &dest, 1);
1105 if (IS_ERR(flow)) {
1106 err = PTR_ERR(flow);
81cd229c 1107 goto add_vf_flow_err;
ac004b83 1108 }
47dd7e60 1109 flows[vport->index] = flow;
ac004b83
RD
1110 }
1111
1112 esw->fdb_table.offloads.peer_miss_rules = flows;
1113
1114 kvfree(spec);
1115 return 0;
1116
81cd229c 1117add_vf_flow_err:
47dd7e60
PP
1118 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1119 if (!flows[vport->index])
1120 continue;
1121 mlx5_del_flow_rules(flows[vport->index]);
1122 }
1123 if (mlx5_ecpf_vport_exists(esw->dev)) {
1124 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1125 mlx5_del_flow_rules(flows[vport->index]);
1126 }
81cd229c 1127add_ecpf_flow_err:
47dd7e60
PP
1128 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1129 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1130 mlx5_del_flow_rules(flows[vport->index]);
1131 }
81cd229c
BW
1132add_pf_flow_err:
1133 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
1134 kvfree(flows);
1135alloc_flows_err:
1136 kvfree(spec);
1137 return err;
1138}
1139
1140static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1141{
1142 struct mlx5_flow_handle **flows;
47dd7e60
PP
1143 struct mlx5_vport *vport;
1144 unsigned long i;
ac004b83
RD
1145
1146 flows = esw->fdb_table.offloads.peer_miss_rules;
1147
47dd7e60
PP
1148 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1149 mlx5_del_flow_rules(flows[vport->index]);
ac004b83 1150
47dd7e60
PP
1151 if (mlx5_ecpf_vport_exists(esw->dev)) {
1152 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1153 mlx5_del_flow_rules(flows[vport->index]);
1154 }
81cd229c 1155
47dd7e60
PP
1156 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1157 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1158 mlx5_del_flow_rules(flows[vport->index]);
1159 }
ac004b83
RD
1160 kvfree(flows);
1161}
1162
3aa33572
OG
1163static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1164{
66958ed9 1165 struct mlx5_flow_act flow_act = {0};
4c5009c5 1166 struct mlx5_flow_destination dest = {};
74491de9 1167 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 1168 struct mlx5_flow_spec *spec;
f80be543
MB
1169 void *headers_c;
1170 void *headers_v;
3aa33572 1171 int err = 0;
f80be543
MB
1172 u8 *dmac_c;
1173 u8 *dmac_v;
3aa33572 1174
1b9a07ee 1175 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1176 if (!spec) {
3aa33572
OG
1177 err = -ENOMEM;
1178 goto out;
1179 }
1180
f80be543
MB
1181 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1182 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1183 outer_headers);
1184 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1185 outer_headers.dmac_47_16);
1186 dmac_c[0] = 0x01;
1187
3aa33572 1188 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1189 dest.vport.num = esw->manager_vport;
66958ed9 1190 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 1191
dcf19b9c 1192 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
39ac237c 1193 spec, &flow_act, &dest, 1);
3aa33572
OG
1194 if (IS_ERR(flow_rule)) {
1195 err = PTR_ERR(flow_rule);
f80be543 1196 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
1197 goto out;
1198 }
1199
f80be543
MB
1200 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1201
1202 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1203 outer_headers);
1204 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1205 outer_headers.dmac_47_16);
1206 dmac_v[0] = 0x01;
dcf19b9c 1207 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
39ac237c 1208 spec, &flow_act, &dest, 1);
f80be543
MB
1209 if (IS_ERR(flow_rule)) {
1210 err = PTR_ERR(flow_rule);
1211 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1212 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1213 goto out;
1214 }
1215
1216 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1217
3aa33572 1218out:
c5bb1730 1219 kvfree(spec);
3aa33572
OG
1220 return err;
1221}
1222
11b717d6
PB
1223struct mlx5_flow_handle *
1224esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1225{
1226 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1227 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1228 struct mlx5_flow_context *flow_context;
1229 struct mlx5_flow_handle *flow_rule;
1230 struct mlx5_flow_destination dest;
1231 struct mlx5_flow_spec *spec;
1232 void *misc;
1233
60acc105
PB
1234 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1235 return ERR_PTR(-EOPNOTSUPP);
1236
9f4d9283 1237 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
11b717d6
PB
1238 if (!spec)
1239 return ERR_PTR(-ENOMEM);
1240
1241 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1242 misc_parameters_2);
1243 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
a91d98a0 1244 ESW_REG_C0_USER_DATA_METADATA_MASK);
11b717d6
PB
1245 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1246 misc_parameters_2);
1247 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1248 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
6724e66b
PB
1249 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1250 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1251 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
11b717d6
PB
1252
1253 flow_context = &spec->flow_context;
1254 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1255 flow_context->flow_tag = tag;
1256 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1257 dest.ft = esw->offloads.ft_offloads;
1258
1259 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
9f4d9283 1260 kvfree(spec);
11b717d6
PB
1261
1262 if (IS_ERR(flow_rule))
1263 esw_warn(esw->dev,
1264 "Failed to create restore rule for tag: %d, err(%d)\n",
1265 tag, (int)PTR_ERR(flow_rule));
1266
1267 return flow_rule;
1268}
1269
1967ce6e 1270#define MAX_PF_SQ 256
cd3d07e7 1271#define MAX_SQ_NVPORTS 32
1967ce6e 1272
a5641cb5
JL
1273static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1274 u32 *flow_group_in)
1275{
1276 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1277 flow_group_in,
1278 match_criteria);
1279
1280 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1281 MLX5_SET(create_flow_group_in, flow_group_in,
1282 match_criteria_enable,
1283 MLX5_MATCH_MISC_PARAMETERS_2);
1284
0f0d3827
PB
1285 MLX5_SET(fte_match_param, match_criteria,
1286 misc_parameters_2.metadata_reg_c_0,
1287 mlx5_eswitch_get_vport_metadata_mask());
a5641cb5
JL
1288 } else {
1289 MLX5_SET(create_flow_group_in, flow_group_in,
1290 match_criteria_enable,
1291 MLX5_MATCH_MISC_PARAMETERS);
1292
1293 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1294 misc_parameters.source_port);
1295 }
1296}
1297
ae430332 1298#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
0a9e2307 1299static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
4c7f4028
CM
1300{
1301 struct mlx5_vport_tbl_attr attr;
1302 struct mlx5_vport *vport;
47dd7e60 1303 unsigned long i;
4c7f4028
CM
1304
1305 attr.chain = 0;
1306 attr.prio = 1;
47dd7e60 1307 mlx5_esw_for_each_vport(esw, i, vport) {
4c7f4028 1308 attr.vport = vport->vport;
c796bb7c 1309 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
0a9e2307 1310 mlx5_esw_vporttbl_put(esw, &attr);
4c7f4028
CM
1311 }
1312}
1313
0a9e2307 1314static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
4c7f4028
CM
1315{
1316 struct mlx5_vport_tbl_attr attr;
1317 struct mlx5_flow_table *fdb;
1318 struct mlx5_vport *vport;
47dd7e60 1319 unsigned long i;
4c7f4028
CM
1320
1321 attr.chain = 0;
1322 attr.prio = 1;
47dd7e60 1323 mlx5_esw_for_each_vport(esw, i, vport) {
4c7f4028 1324 attr.vport = vport->vport;
c796bb7c 1325 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
0a9e2307 1326 fdb = mlx5_esw_vporttbl_get(esw, &attr);
4c7f4028
CM
1327 if (IS_ERR(fdb))
1328 goto out;
1329 }
1330 return 0;
1331
1332out:
0a9e2307 1333 esw_vport_tbl_put(esw);
4c7f4028
CM
1334 return PTR_ERR(fdb);
1335}
1336
ae430332
AL
1337#define fdb_modify_header_fwd_to_table_supported(esw) \
1338 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1339static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1340{
1341 struct mlx5_core_dev *dev = esw->dev;
1342
1343 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1344 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1345
1346 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1347 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1348 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1349 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1350 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1351 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1352 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1353 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1354 /* Disabled when ttl workaround is needed, e.g
1355 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1356 */
1357 esw_warn(dev,
1358 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1359 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1360 } else {
1361 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1362 esw_info(dev, "Supported tc chains and prios offload\n");
1363 }
1364
1365 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1366 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1367}
1368
1369static int
1370esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1371{
1372 struct mlx5_core_dev *dev = esw->dev;
1373 struct mlx5_flow_table *nf_ft, *ft;
1374 struct mlx5_chains_attr attr = {};
1375 struct mlx5_fs_chains *chains;
1376 u32 fdb_max;
1377 int err;
1378
1379 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1380
1381 esw_init_chains_offload_flags(esw, &attr.flags);
1382 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1383 attr.max_ft_sz = fdb_max;
1384 attr.max_grp_num = esw->params.large_group_num;
1385 attr.default_ft = miss_fdb;
c9355682 1386 attr.mapping = esw->offloads.reg_c0_obj_pool;
ae430332
AL
1387
1388 chains = mlx5_chains_create(dev, &attr);
1389 if (IS_ERR(chains)) {
1390 err = PTR_ERR(chains);
1391 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1392 return err;
1393 }
1394
1395 esw->fdb_table.offloads.esw_chains_priv = chains;
1396
1397 /* Create tc_end_ft which is the always created ft chain */
1398 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1399 1, 0);
1400 if (IS_ERR(nf_ft)) {
1401 err = PTR_ERR(nf_ft);
1402 goto nf_ft_err;
1403 }
1404
1405 /* Always open the root for fast path */
1406 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1407 if (IS_ERR(ft)) {
1408 err = PTR_ERR(ft);
1409 goto level_0_err;
1410 }
1411
1412 /* Open level 1 for split fdb rules now if prios isn't supported */
1413 if (!mlx5_chains_prios_supported(chains)) {
0a9e2307 1414 err = esw_vport_tbl_get(esw);
ae430332
AL
1415 if (err)
1416 goto level_1_err;
1417 }
1418
1419 mlx5_chains_set_end_ft(chains, nf_ft);
1420
1421 return 0;
1422
1423level_1_err:
1424 mlx5_chains_put_table(chains, 0, 1, 0);
1425level_0_err:
1426 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1427nf_ft_err:
1428 mlx5_chains_destroy(chains);
1429 esw->fdb_table.offloads.esw_chains_priv = NULL;
1430
1431 return err;
1432}
1433
1434static void
1435esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1436{
1437 if (!mlx5_chains_prios_supported(chains))
0a9e2307 1438 esw_vport_tbl_put(esw);
ae430332
AL
1439 mlx5_chains_put_table(chains, 0, 1, 0);
1440 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1441 mlx5_chains_destroy(chains);
1442}
1443
1444#else /* CONFIG_MLX5_CLS_ACT */
1445
1446static int
1447esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1448{ return 0; }
1449
1450static void
1451esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1452{}
1453
1454#endif
1455
4a561817
RD
1456static int
1457esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
1458 struct mlx5_flow_table *fdb,
1459 u32 *flow_group_in,
1460 int *ix)
1461{
1462 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1463 struct mlx5_flow_group *g;
1464 void *match_criteria;
1465 int count, err = 0;
1466
1467 memset(flow_group_in, 0, inlen);
1468
1469 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1470 MLX5_MATCH_MISC_PARAMETERS);
1471
1472 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1473
1474 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1475 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1476 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1477 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1478 misc_parameters.source_eswitch_owner_vhca_id);
1479 MLX5_SET(create_flow_group_in, flow_group_in,
1480 source_eswitch_owner_vhca_id_valid, 1);
1481 }
1482
1483 /* See comment at table_size calculation */
1484 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
1485 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1486 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
1487 *ix += count;
1488
1489 g = mlx5_create_flow_group(fdb, flow_group_in);
1490 if (IS_ERR(g)) {
1491 err = PTR_ERR(g);
1492 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1493 goto out;
1494 }
1495 esw->fdb_table.offloads.send_to_vport_grp = g;
1496
1497out:
1498 return err;
1499}
1500
1501static int
1502esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
1503 struct mlx5_flow_table *fdb,
1504 u32 *flow_group_in,
1505 int *ix)
1506{
1507 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
4a561817
RD
1508 struct mlx5_flow_group *g;
1509 void *match_criteria;
1510 int err = 0;
1511
1512 if (!esw_src_port_rewrite_supported(esw))
1513 return 0;
1514
1515 memset(flow_group_in, 0, inlen);
1516
1517 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1518 MLX5_MATCH_MISC_PARAMETERS_2);
1519
1520 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1521
1522 MLX5_SET(fte_match_param, match_criteria,
1523 misc_parameters_2.metadata_reg_c_0,
1524 mlx5_eswitch_get_vport_metadata_mask());
1525 MLX5_SET(fte_match_param, match_criteria,
1526 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1527
430e2d5e
RD
1528 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1529 MLX5_SET(create_flow_group_in, flow_group_in,
1530 end_flow_index, *ix + esw->total_vports - 1);
1531 *ix += esw->total_vports;
4a561817 1532
430e2d5e
RD
1533 g = mlx5_create_flow_group(fdb, flow_group_in);
1534 if (IS_ERR(g)) {
1535 err = PTR_ERR(g);
1536 esw_warn(esw->dev,
1537 "Failed to create send-to-vport meta flow group err(%d)\n", err);
1538 goto send_vport_meta_err;
4a561817 1539 }
430e2d5e 1540 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
4a561817
RD
1541
1542 return 0;
1543
4a561817
RD
1544send_vport_meta_err:
1545 return err;
1546}
1547
1548static int
1549esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
1550 struct mlx5_flow_table *fdb,
1551 u32 *flow_group_in,
1552 int *ix)
1553{
1554 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1555 struct mlx5_flow_group *g;
1556 void *match_criteria;
1557 int err = 0;
1558
1559 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1560 return 0;
1561
1562 memset(flow_group_in, 0, inlen);
1563
1564 esw_set_flow_group_source_port(esw, flow_group_in);
1565
1566 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1567 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1568 flow_group_in,
1569 match_criteria);
1570
1571 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1572 misc_parameters.source_eswitch_owner_vhca_id);
1573
1574 MLX5_SET(create_flow_group_in, flow_group_in,
1575 source_eswitch_owner_vhca_id_valid, 1);
1576 }
1577
1578 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1579 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1580 *ix + esw->total_vports - 1);
1581 *ix += esw->total_vports;
1582
1583 g = mlx5_create_flow_group(fdb, flow_group_in);
1584 if (IS_ERR(g)) {
1585 err = PTR_ERR(g);
1586 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
1587 goto out;
1588 }
1589 esw->fdb_table.offloads.peer_miss_grp = g;
1590
1591out:
1592 return err;
1593}
1594
1595static int
1596esw_create_miss_group(struct mlx5_eswitch *esw,
1597 struct mlx5_flow_table *fdb,
1598 u32 *flow_group_in,
1599 int *ix)
1600{
1601 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1602 struct mlx5_flow_group *g;
1603 void *match_criteria;
1604 int err = 0;
1605 u8 *dmac;
1606
1607 memset(flow_group_in, 0, inlen);
1608
1609 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1610 MLX5_MATCH_OUTER_HEADERS);
1611 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1612 match_criteria);
1613 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1614 outer_headers.dmac_47_16);
1615 dmac[0] = 0x01;
1616
1617 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1618 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1619 *ix + MLX5_ESW_MISS_FLOWS);
1620
1621 g = mlx5_create_flow_group(fdb, flow_group_in);
1622 if (IS_ERR(g)) {
1623 err = PTR_ERR(g);
1624 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
1625 goto miss_err;
1626 }
1627 esw->fdb_table.offloads.miss_grp = g;
1628
1629 err = esw_add_fdb_miss_rule(esw);
1630 if (err)
1631 goto miss_rule_err;
1632
1633 return 0;
1634
1635miss_rule_err:
1636 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1637miss_err:
1638 return err;
1639}
1640
0da3c12d 1641static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1967ce6e
OG
1642{
1643 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1644 struct mlx5_flow_table_attr ft_attr = {};
1645 struct mlx5_core_dev *dev = esw->dev;
1646 struct mlx5_flow_namespace *root_ns;
1647 struct mlx5_flow_table *fdb = NULL;
4a561817 1648 int table_size, ix = 0, err = 0;
39ac237c 1649 u32 flags = 0, *flow_group_in;
1967ce6e
OG
1650
1651 esw_debug(esw->dev, "Create offloads FDB Tables\n");
39ac237c 1652
1b9a07ee 1653 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1654 if (!flow_group_in)
1655 return -ENOMEM;
1656
1657 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1658 if (!root_ns) {
1659 esw_warn(dev, "Failed to get FDB flow namespace\n");
1660 err = -EOPNOTSUPP;
1661 goto ns_err;
1662 }
8463daf1
MG
1663 esw->fdb_table.offloads.ns = root_ns;
1664 err = mlx5_flow_namespace_set_mode(root_ns,
1665 esw->dev->priv.steering->mode);
1666 if (err) {
1667 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1668 goto ns_err;
1669 }
1967ce6e 1670
898b0786
MB
1671 /* To be strictly correct:
1672 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
1673 * should be:
1674 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1675 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
1676 * but as the peer device might not be in switchdev mode it's not
1677 * possible. We use the fact that by default FW sets max vfs and max sfs
1678 * to the same value on both devices. If it needs to be changed in the future note
1679 * the peer miss group should also be created based on the number of
1680 * total vports of the peer (currently is also uses esw->total_vports).
1681 */
1682 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
430e2d5e 1683 esw->total_vports * 2 + MLX5_ESW_MISS_FLOWS;
b3ba5149 1684
e52c2802
PB
1685 /* create the slow path fdb with encap set, so further table instances
1686 * can be created at run time while VFs are probed if the FW allows that.
1687 */
1688 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1689 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1690 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1691
1692 ft_attr.flags = flags;
b3ba5149
ES
1693 ft_attr.max_fte = table_size;
1694 ft_attr.prio = FDB_SLOW_PATH;
1695
1696 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1697 if (IS_ERR(fdb)) {
1698 err = PTR_ERR(fdb);
1699 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1700 goto slow_fdb_err;
1701 }
52fff327 1702 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1703
ec3be887
VB
1704 /* Create empty TC-miss managed table. This allows plugging in following
1705 * priorities without directly exposing their level 0 table to
1706 * eswitch_offloads and passing it as miss_fdb to following call to
1707 * esw_chains_create().
1708 */
1709 memset(&ft_attr, 0, sizeof(ft_attr));
1710 ft_attr.prio = FDB_TC_MISS;
1711 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1712 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1713 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1714 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
1715 goto tc_miss_table_err;
1716 }
1717
1718 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
39ac237c 1719 if (err) {
ae430332 1720 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
39ac237c 1721 goto fdb_chains_err;
e52c2802
PB
1722 }
1723
4a561817
RD
1724 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1725 if (err)
69697b6e 1726 goto send_vport_err;
8e404fef 1727
4a561817
RD
1728 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1729 if (err)
1730 goto send_vport_meta_err;
69697b6e 1731
4a561817
RD
1732 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
1733 if (err)
1734 goto peer_miss_err;
69697b6e 1735
4a561817 1736 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
3aa33572 1737 if (err)
4a561817 1738 goto miss_err;
3aa33572 1739
c88a026e 1740 kvfree(flow_group_in);
69697b6e
OG
1741 return 0;
1742
1743miss_err:
6cec0229
MD
1744 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1745 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
ac004b83 1746peer_miss_err:
8e404fef
VB
1747 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1748 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1749send_vport_meta_err:
69697b6e
OG
1750 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1751send_vport_err:
ae430332 1752 esw_chains_destroy(esw, esw_chains(esw));
39ac237c 1753fdb_chains_err:
ec3be887
VB
1754 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1755tc_miss_table_err:
dcf19b9c 1756 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1033665e 1757slow_fdb_err:
8463daf1
MG
1758 /* Holds true only as long as DMFS is the default */
1759 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1760ns_err:
1761 kvfree(flow_group_in);
1762 return err;
1763}
1764
1967ce6e 1765static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1766{
dcf19b9c 1767 if (!mlx5_eswitch_get_slow_fdb(esw))
69697b6e
OG
1768 return;
1769
1967ce6e 1770 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1771 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1772 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1773 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
8e404fef
VB
1774 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1775 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
6cec0229
MD
1776 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1777 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1778 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1779
ae430332
AL
1780 esw_chains_destroy(esw, esw_chains(esw));
1781
ec3be887 1782 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
dcf19b9c 1783 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
8463daf1
MG
1784 /* Holds true only as long as DMFS is the default */
1785 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1786 MLX5_FLOW_STEERING_MODE_DMFS);
7dc84de9 1787 atomic64_set(&esw->user_count, 0);
69697b6e 1788}
c116c6ee 1789
8ea7bcf6 1790static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
4f4edcc2
AL
1791{
1792 int nvports;
1793
1794 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1795 if (mlx5e_tc_int_port_supported(esw))
1796 nvports += MLX5E_TC_MAX_INT_PORT_NUM;
1797
1798 return nvports;
1799}
1800
8d6bd3c3 1801static int esw_create_offloads_table(struct mlx5_eswitch *esw)
c116c6ee 1802{
b3ba5149 1803 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1804 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1805 struct mlx5_flow_table *ft_offloads;
1806 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1807 int err = 0;
1808
1809 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1810 if (!ns) {
1811 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1812 return -EOPNOTSUPP;
c116c6ee
OG
1813 }
1814
8ea7bcf6
JL
1815 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
1816 MLX5_ESW_FT_OFFLOADS_DROP_RULE;
11b717d6 1817 ft_attr.prio = 1;
b3ba5149
ES
1818
1819 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1820 if (IS_ERR(ft_offloads)) {
1821 err = PTR_ERR(ft_offloads);
1822 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1823 return err;
1824 }
1825
1826 esw->offloads.ft_offloads = ft_offloads;
1827 return 0;
1828}
1829
1830static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1831{
1832 struct mlx5_esw_offload *offloads = &esw->offloads;
1833
1834 mlx5_destroy_flow_table(offloads->ft_offloads);
1835}
fed9ce22 1836
8d6bd3c3 1837static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
fed9ce22
OG
1838{
1839 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1840 struct mlx5_flow_group *g;
fed9ce22 1841 u32 *flow_group_in;
8d6bd3c3 1842 int nvports;
fed9ce22 1843 int err = 0;
fed9ce22 1844
8ea7bcf6 1845 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
1b9a07ee 1846 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1847 if (!flow_group_in)
1848 return -ENOMEM;
1849
1850 /* create vport rx group */
a5641cb5 1851 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1852
1853 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1854 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1855
1856 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1857
1858 if (IS_ERR(g)) {
1859 err = PTR_ERR(g);
1860 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1861 goto out;
1862 }
1863
1864 esw->offloads.vport_rx_group = g;
1865out:
e574978a 1866 kvfree(flow_group_in);
fed9ce22
OG
1867 return err;
1868}
1869
1870static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1871{
1872 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1873}
1874
8ea7bcf6
JL
1875static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
1876{
1877 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
1878 * for the drop rule, which is placed at the end of the table.
1879 * So return the total of vport and int_port as rule index.
1880 */
1881 return esw_get_nr_ft_offloads_steering_src_ports(esw);
1882}
1883
1884static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
1885{
1886 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1887 struct mlx5_flow_group *g;
1888 u32 *flow_group_in;
1889 int flow_index;
1890 int err = 0;
1891
1892 flow_index = esw_create_vport_rx_drop_rule_index(esw);
1893
1894 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1895 if (!flow_group_in)
1896 return -ENOMEM;
1897
1898 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1899 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1900
1901 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1902
1903 if (IS_ERR(g)) {
1904 err = PTR_ERR(g);
1905 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
1906 goto out;
1907 }
1908
1909 esw->offloads.vport_rx_drop_group = g;
1910out:
1911 kvfree(flow_group_in);
1912 return err;
1913}
1914
1915static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
1916{
1917 if (esw->offloads.vport_rx_drop_group)
1918 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
1919}
1920
74491de9 1921struct mlx5_flow_handle *
02f3afd9 1922mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1923 struct mlx5_flow_destination *dest)
fed9ce22 1924{
66958ed9 1925 struct mlx5_flow_act flow_act = {0};
74491de9 1926 struct mlx5_flow_handle *flow_rule;
c5bb1730 1927 struct mlx5_flow_spec *spec;
fed9ce22
OG
1928 void *misc;
1929
1b9a07ee 1930 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1931 if (!spec) {
fed9ce22
OG
1932 flow_rule = ERR_PTR(-ENOMEM);
1933 goto out;
1934 }
1935
a5641cb5
JL
1936 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1937 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1938 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1939 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1940
a5641cb5 1941 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
1942 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1943 mlx5_eswitch_get_vport_metadata_mask());
fed9ce22 1944
a5641cb5
JL
1945 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1946 } else {
1947 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1948 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1949
1950 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1951 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1952
1953 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1954 }
fed9ce22 1955
66958ed9 1956 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1957 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1958 &flow_act, dest, 1);
fed9ce22
OG
1959 if (IS_ERR(flow_rule)) {
1960 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1961 goto out;
1962 }
1963
1964out:
c5bb1730 1965 kvfree(spec);
fed9ce22
OG
1966 return flow_rule;
1967}
feae9087 1968
8ea7bcf6
JL
1969static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
1970{
1971 struct mlx5_flow_act flow_act = {};
1972 struct mlx5_flow_handle *flow_rule;
1973
1974 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1975 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
1976 &flow_act, NULL, 0);
1977 if (IS_ERR(flow_rule)) {
1978 esw_warn(esw->dev,
1979 "fs offloads: Failed to add vport rx drop rule err %ld\n",
1980 PTR_ERR(flow_rule));
1981 return PTR_ERR(flow_rule);
1982 }
1983
1984 esw->offloads.vport_rx_drop_rule = flow_rule;
1985
1986 return 0;
1987}
1988
1989static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
1990{
1991 if (esw->offloads.vport_rx_drop_rule)
1992 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
1993}
1994
47dd7e60 1995static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
cc617ced
PP
1996{
1997 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1998 struct mlx5_core_dev *dev = esw->dev;
47dd7e60
PP
1999 struct mlx5_vport *vport;
2000 unsigned long i;
cc617ced
PP
2001
2002 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2003 return -EOPNOTSUPP;
2004
f019679e 2005 if (!mlx5_esw_is_fdb_created(esw))
cc617ced
PP
2006 return -EOPNOTSUPP;
2007
2008 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2009 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2010 mlx5_mode = MLX5_INLINE_MODE_NONE;
2011 goto out;
2012 case MLX5_CAP_INLINE_MODE_L2:
2013 mlx5_mode = MLX5_INLINE_MODE_L2;
2014 goto out;
2015 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2016 goto query_vports;
2017 }
2018
2019query_vports:
2020 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
47dd7e60
PP
2021 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
2022 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
cc617ced
PP
2023 if (prev_mlx5_mode != mlx5_mode)
2024 return -EINVAL;
2025 prev_mlx5_mode = mlx5_mode;
2026 }
2027
2028out:
2029 *mode = mlx5_mode;
2030 return 0;
e08a6832 2031}
bf3347c4 2032
11b717d6
PB
2033static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2034{
2035 struct mlx5_esw_offload *offloads = &esw->offloads;
2036
60acc105
PB
2037 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2038 return;
2039
6724e66b 2040 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
11b717d6
PB
2041 mlx5_destroy_flow_group(offloads->restore_group);
2042 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
2043}
2044
2045static int esw_create_restore_table(struct mlx5_eswitch *esw)
2046{
d65dbedf 2047 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
11b717d6
PB
2048 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2049 struct mlx5_flow_table_attr ft_attr = {};
2050 struct mlx5_core_dev *dev = esw->dev;
2051 struct mlx5_flow_namespace *ns;
6724e66b 2052 struct mlx5_modify_hdr *mod_hdr;
11b717d6
PB
2053 void *match_criteria, *misc;
2054 struct mlx5_flow_table *ft;
2055 struct mlx5_flow_group *g;
2056 u32 *flow_group_in;
2057 int err = 0;
2058
60acc105
PB
2059 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2060 return 0;
2061
11b717d6
PB
2062 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2063 if (!ns) {
2064 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2065 return -EOPNOTSUPP;
2066 }
2067
2068 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2069 if (!flow_group_in) {
2070 err = -ENOMEM;
2071 goto out_free;
2072 }
2073
a91d98a0 2074 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
11b717d6
PB
2075 ft = mlx5_create_flow_table(ns, &ft_attr);
2076 if (IS_ERR(ft)) {
2077 err = PTR_ERR(ft);
2078 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2079 err);
2080 goto out_free;
2081 }
2082
11b717d6
PB
2083 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2084 match_criteria);
2085 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2086 misc_parameters_2);
2087
2088 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
a91d98a0 2089 ESW_REG_C0_USER_DATA_METADATA_MASK);
11b717d6
PB
2090 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2091 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2092 ft_attr.max_fte - 1);
2093 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2094 MLX5_MATCH_MISC_PARAMETERS_2);
2095 g = mlx5_create_flow_group(ft, flow_group_in);
2096 if (IS_ERR(g)) {
2097 err = PTR_ERR(g);
2098 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2099 err);
2100 goto err_group;
2101 }
2102
6724e66b
PB
2103 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2104 MLX5_SET(copy_action_in, modact, src_field,
2105 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2106 MLX5_SET(copy_action_in, modact, dst_field,
2107 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2108 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2109 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2110 modact);
2111 if (IS_ERR(mod_hdr)) {
e9864539 2112 err = PTR_ERR(mod_hdr);
6724e66b
PB
2113 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2114 err);
6724e66b
PB
2115 goto err_mod_hdr;
2116 }
2117
11b717d6
PB
2118 esw->offloads.ft_offloads_restore = ft;
2119 esw->offloads.restore_group = g;
6724e66b 2120 esw->offloads.restore_copy_hdr_id = mod_hdr;
11b717d6 2121
c8508713
RD
2122 kvfree(flow_group_in);
2123
11b717d6
PB
2124 return 0;
2125
6724e66b
PB
2126err_mod_hdr:
2127 mlx5_destroy_flow_group(g);
11b717d6
PB
2128err_group:
2129 mlx5_destroy_flow_table(ft);
2130out_free:
2131 kvfree(flow_group_in);
2132
2133 return err;
cc617ced
PP
2134}
2135
db7ff19e
EB
2136static int esw_offloads_start(struct mlx5_eswitch *esw,
2137 struct netlink_ext_ack *extack)
c930a3ad 2138{
e12de39c 2139 int err;
c930a3ad 2140
b6f2846a
CM
2141 esw->mode = MLX5_ESWITCH_OFFLOADS;
2142 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
6c419ba8 2143 if (err) {
8c98ee77
EB
2144 NL_SET_ERR_MSG_MOD(extack,
2145 "Failed setting eswitch to offloads");
b6f2846a 2146 esw->mode = MLX5_ESWITCH_LEGACY;
b6f2846a 2147 mlx5_rescan_drivers(esw->dev);
6c419ba8 2148 }
bffaa916
RD
2149 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2150 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
2151 &esw->offloads.inline_mode)) {
2152 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
2153 NL_SET_ERR_MSG_MOD(extack,
2154 "Inline mode is different between vports");
bffaa916
RD
2155 }
2156 }
c930a3ad
OG
2157 return err;
2158}
2159
47dd7e60
PP
2160static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
2161 struct mlx5_eswitch_rep *rep,
2162 xa_mark_t mark)
e8d31c4d 2163{
47dd7e60
PP
2164 bool mark_set;
2165
2166 /* Copy the mark from vport to its rep */
2167 mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
2168 if (mark_set)
2169 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
e8d31c4d
MB
2170}
2171
47dd7e60 2172static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
e8d31c4d 2173{
e8d31c4d 2174 struct mlx5_eswitch_rep *rep;
47dd7e60
PP
2175 int rep_type;
2176 int err;
e8d31c4d 2177
47dd7e60
PP
2178 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
2179 if (!rep)
e8d31c4d
MB
2180 return -ENOMEM;
2181
47dd7e60
PP
2182 rep->vport = vport->vport;
2183 rep->vport_index = vport->index;
2184 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2185 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
f121e0ea 2186
47dd7e60
PP
2187 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2188 if (err)
2189 goto insert_err;
2190
2191 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
2192 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
2193 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
2194 return 0;
2195
2196insert_err:
2197 kfree(rep);
2198 return err;
2199}
2200
2201static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2202 struct mlx5_eswitch_rep *rep)
2203{
2204 xa_erase(&esw->offloads.vport_reps, rep->vport);
2205 kfree(rep);
2206}
2207
d2a651ef 2208static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
47dd7e60
PP
2209{
2210 struct mlx5_eswitch_rep *rep;
2211 unsigned long i;
e8d31c4d 2212
47dd7e60
PP
2213 mlx5_esw_for_each_rep(esw, i, rep)
2214 mlx5_esw_offloads_rep_cleanup(esw, rep);
2215 xa_destroy(&esw->offloads.vport_reps);
2216}
2217
d2a651ef 2218static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
47dd7e60
PP
2219{
2220 struct mlx5_vport *vport;
2221 unsigned long i;
2222 int err;
2223
2224 xa_init(&esw->offloads.vport_reps);
2225
2226 mlx5_esw_for_each_vport(esw, i, vport) {
2227 err = mlx5_esw_offloads_rep_init(esw, vport);
2228 if (err)
2229 goto err;
2230 }
e8d31c4d 2231 return 0;
47dd7e60
PP
2232
2233err:
2234 esw_offloads_cleanup_reps(esw);
2235 return err;
e8d31c4d
MB
2236}
2237
d2a651ef
JP
2238static int esw_port_metadata_set(struct devlink *devlink, u32 id,
2239 struct devlink_param_gset_ctx *ctx)
2240{
2241 struct mlx5_core_dev *dev = devlink_priv(devlink);
2242 struct mlx5_eswitch *esw = dev->priv.eswitch;
2243 int err = 0;
2244
2245 down_write(&esw->mode_lock);
2246 if (mlx5_esw_is_fdb_created(esw)) {
2247 err = -EBUSY;
2248 goto done;
2249 }
2250 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2251 err = -EOPNOTSUPP;
2252 goto done;
2253 }
2254 if (ctx->val.vbool)
2255 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2256 else
2257 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2258done:
2259 up_write(&esw->mode_lock);
2260 return err;
2261}
2262
2263static int esw_port_metadata_get(struct devlink *devlink, u32 id,
2264 struct devlink_param_gset_ctx *ctx)
2265{
2266 struct mlx5_core_dev *dev = devlink_priv(devlink);
2267
2268 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
2269 return 0;
2270}
2271
2272static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
2273 union devlink_param_value val,
2274 struct netlink_ext_ack *extack)
2275{
2276 struct mlx5_core_dev *dev = devlink_priv(devlink);
2277 u8 esw_mode;
2278
2279 esw_mode = mlx5_eswitch_mode(dev);
2280 if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
2281 NL_SET_ERR_MSG_MOD(extack,
2282 "E-Switch must either disabled or non switchdev mode");
2283 return -EBUSY;
2284 }
2285 return 0;
2286}
2287
2288static const struct devlink_param esw_devlink_params[] = {
2289 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
2290 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
2291 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
2292 esw_port_metadata_get,
2293 esw_port_metadata_set,
2294 esw_port_metadata_validate),
2295};
2296
2297int esw_offloads_init(struct mlx5_eswitch *esw)
2298{
2299 int err;
2300
2301 err = esw_offloads_init_reps(esw);
2302 if (err)
2303 return err;
2304
2305 err = devl_params_register(priv_to_devlink(esw->dev),
2306 esw_devlink_params,
2307 ARRAY_SIZE(esw_devlink_params));
2308 if (err)
2309 goto err_params;
2310
2311 return 0;
2312
2313err_params:
2314 esw_offloads_cleanup_reps(esw);
2315 return err;
2316}
2317
2318void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2319{
2320 devl_params_unregister(priv_to_devlink(esw->dev),
2321 esw_devlink_params,
2322 ARRAY_SIZE(esw_devlink_params));
2323 esw_offloads_cleanup_reps(esw);
2324}
2325
c9b99abc
BW
2326static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2327 struct mlx5_eswitch_rep *rep, u8 rep_type)
2328{
8693115a 2329 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 2330 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 2331 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
2332}
2333
d7f33a45
VP
2334static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
2335{
2336 struct mlx5_eswitch_rep *rep;
47dd7e60 2337 unsigned long i;
d7f33a45
VP
2338
2339 mlx5_esw_for_each_sf_rep(esw, i, rep)
2340 __esw_offloads_unload_rep(esw, rep, rep_type);
2341}
2342
4110fc59 2343static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
2344{
2345 struct mlx5_eswitch_rep *rep;
47dd7e60 2346 unsigned long i;
4110fc59 2347
d7f33a45
VP
2348 __unload_reps_sf_vport(esw, rep_type);
2349
47dd7e60 2350 mlx5_esw_for_each_vf_rep(esw, i, rep)
4110fc59 2351 __esw_offloads_unload_rep(esw, rep, rep_type);
c9b99abc 2352
81cd229c
BW
2353 if (mlx5_ecpf_vport_exists(esw->dev)) {
2354 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
2355 __esw_offloads_unload_rep(esw, rep, rep_type);
2356 }
2357
2358 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2359 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
2360 __esw_offloads_unload_rep(esw, rep, rep_type);
2361 }
2362
879c8f84 2363 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 2364 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
2365}
2366
d970812b 2367int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
a4b97ab4 2368{
c2d7712c
BW
2369 struct mlx5_eswitch_rep *rep;
2370 int rep_type;
a4b97ab4
MB
2371 int err;
2372
c2d7712c
BW
2373 rep = mlx5_eswitch_get_rep(esw, vport_num);
2374 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2375 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2376 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2377 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2378 if (err)
2379 goto err_reps;
2380 }
2381
2382 return 0;
a4b97ab4
MB
2383
2384err_reps:
c2d7712c
BW
2385 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2386 for (--rep_type; rep_type >= 0; rep_type--)
2387 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
2388 return err;
2389}
2390
d970812b 2391void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
c2d7712c
BW
2392{
2393 struct mlx5_eswitch_rep *rep;
2394 int rep_type;
2395
c2d7712c
BW
2396 rep = mlx5_eswitch_get_rep(esw, vport_num);
2397 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2398 __esw_offloads_unload_rep(esw, rep, rep_type);
2399}
2400
38679b5a
PP
2401int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2402{
2403 int err;
2404
2405 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2406 return 0;
2407
865d6d1c
RD
2408 if (vport_num != MLX5_VPORT_UPLINK) {
2409 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2410 if (err)
2411 return err;
2412 }
c7eddc60 2413
38679b5a 2414 err = mlx5_esw_offloads_rep_load(esw, vport_num);
c7eddc60
PP
2415 if (err)
2416 goto load_err;
2417 return err;
2418
2419load_err:
865d6d1c
RD
2420 if (vport_num != MLX5_VPORT_UPLINK)
2421 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
38679b5a
PP
2422 return err;
2423}
2424
2425void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2426{
2427 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2428 return;
2429
2430 mlx5_esw_offloads_rep_unload(esw, vport_num);
865d6d1c
RD
2431
2432 if (vport_num != MLX5_VPORT_UPLINK)
2433 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
38679b5a
PP
2434}
2435
db202995
MB
2436static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
2437 struct mlx5_core_dev *slave)
2438{
2439 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2440 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2441 struct mlx5_flow_root_namespace *root;
2442 struct mlx5_flow_namespace *ns;
2443 int err;
2444
2445 MLX5_SET(set_flow_table_root_in, in, opcode,
2446 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2447 MLX5_SET(set_flow_table_root_in, in, table_type,
2448 FS_FT_FDB);
2449
2450 if (master) {
2451 ns = mlx5_get_flow_namespace(master,
2452 MLX5_FLOW_NAMESPACE_FDB);
2453 root = find_root(&ns->node);
2454 mutex_lock(&root->chain_lock);
2455 MLX5_SET(set_flow_table_root_in, in,
2456 table_eswitch_owner_vhca_id_valid, 1);
2457 MLX5_SET(set_flow_table_root_in, in,
2458 table_eswitch_owner_vhca_id,
2459 MLX5_CAP_GEN(master, vhca_id));
2460 MLX5_SET(set_flow_table_root_in, in, table_id,
2461 root->root_ft->id);
2462 } else {
2463 ns = mlx5_get_flow_namespace(slave,
2464 MLX5_FLOW_NAMESPACE_FDB);
2465 root = find_root(&ns->node);
2466 mutex_lock(&root->chain_lock);
2467 MLX5_SET(set_flow_table_root_in, in, table_id,
2468 root->root_ft->id);
2469 }
2470
2471 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2472 mutex_unlock(&root->chain_lock);
2473
2474 return err;
2475}
2476
2477static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
2478 struct mlx5_core_dev *slave,
2479 struct mlx5_vport *vport,
2480 struct mlx5_flow_table *acl)
2481{
2482 struct mlx5_flow_handle *flow_rule = NULL;
2483 struct mlx5_flow_destination dest = {};
2484 struct mlx5_flow_act flow_act = {};
2485 struct mlx5_flow_spec *spec;
2486 int err = 0;
2487 void *misc;
2488
2489 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2490 if (!spec)
2491 return -ENOMEM;
2492
2493 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2494 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2495 misc_parameters);
2496 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2497 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
2498 MLX5_CAP_GEN(slave, vhca_id));
2499
2500 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2501 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2502 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
2503 source_eswitch_owner_vhca_id);
2504
2505 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2506 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2507 dest.vport.num = slave->priv.eswitch->manager_vport;
2508 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
2509 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
2510
2511 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
2512 &dest, 1);
2513 if (IS_ERR(flow_rule))
2514 err = PTR_ERR(flow_rule);
2515 else
2516 vport->egress.offloads.bounce_rule = flow_rule;
2517
2518 kvfree(spec);
2519 return err;
2520}
2521
2522static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
2523 struct mlx5_core_dev *slave)
2524{
2525 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2526 struct mlx5_eswitch *esw = master->priv.eswitch;
2527 struct mlx5_flow_table_attr ft_attr = {
2528 .max_fte = 1, .prio = 0, .level = 0,
43a0696f 2529 .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
db202995
MB
2530 };
2531 struct mlx5_flow_namespace *egress_ns;
2532 struct mlx5_flow_table *acl;
2533 struct mlx5_flow_group *g;
2534 struct mlx5_vport *vport;
2535 void *match_criteria;
2536 u32 *flow_group_in;
2537 int err;
2538
2539 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
2540 if (IS_ERR(vport))
2541 return PTR_ERR(vport);
2542
2543 egress_ns = mlx5_get_flow_vport_acl_namespace(master,
2544 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
2545 vport->index);
2546 if (!egress_ns)
2547 return -EINVAL;
2548
2549 if (vport->egress.acl)
2550 return -EINVAL;
2551
2552 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2553 if (!flow_group_in)
2554 return -ENOMEM;
2555
2556 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
2557 if (IS_ERR(acl)) {
2558 err = PTR_ERR(acl);
2559 goto out;
2560 }
2561
2562 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2563 match_criteria);
2564 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2565 misc_parameters.source_port);
2566 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2567 misc_parameters.source_eswitch_owner_vhca_id);
2568 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2569 MLX5_MATCH_MISC_PARAMETERS);
2570
2571 MLX5_SET(create_flow_group_in, flow_group_in,
2572 source_eswitch_owner_vhca_id_valid, 1);
2573 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2574 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
2575
2576 g = mlx5_create_flow_group(acl, flow_group_in);
2577 if (IS_ERR(g)) {
2578 err = PTR_ERR(g);
2579 goto err_group;
2580 }
2581
2582 err = __esw_set_master_egress_rule(master, slave, vport, acl);
2583 if (err)
2584 goto err_rule;
2585
2586 vport->egress.acl = acl;
2587 vport->egress.offloads.bounce_grp = g;
2588
2589 kvfree(flow_group_in);
2590
2591 return 0;
2592
2593err_rule:
2594 mlx5_destroy_flow_group(g);
2595err_group:
2596 mlx5_destroy_flow_table(acl);
2597out:
2598 kvfree(flow_group_in);
2599 return err;
2600}
2601
2602static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev)
2603{
2604 struct mlx5_vport *vport;
2605
2606 vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
2607 dev->priv.eswitch->manager_vport);
2608
2609 esw_acl_egress_ofld_cleanup(vport);
2610}
2611
2612int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
2613 struct mlx5_eswitch *slave_esw)
2614{
2615 int err;
2616
db202995
MB
2617 err = esw_set_slave_root_fdb(master_esw->dev,
2618 slave_esw->dev);
2619 if (err)
82e86a6c 2620 return err;
db202995
MB
2621
2622 err = esw_set_master_egress_rule(master_esw->dev,
2623 slave_esw->dev);
2624 if (err)
2625 goto err_acl;
2626
2627 return err;
2628
2629err_acl:
2630 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2631
db202995
MB
2632 return err;
2633}
2634
2635void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
2636 struct mlx5_eswitch *slave_esw)
2637{
2638 esw_unset_master_egress_rule(master_esw->dev);
2639 esw_set_slave_root_fdb(NULL, slave_esw->dev);
db202995
MB
2640}
2641
ac004b83
RD
2642#define ESW_OFFLOADS_DEVCOM_PAIR (0)
2643#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2644
c8e6a9e6 2645static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw)
ac004b83 2646{
c8e6a9e6
MB
2647 const struct mlx5_eswitch_rep_ops *ops;
2648 struct mlx5_eswitch_rep *rep;
2649 unsigned long i;
2650 u8 rep_type;
2651
2652 mlx5_esw_for_each_rep(esw, i, rep) {
2653 rep_type = NUM_REP_TYPES;
2654 while (rep_type--) {
2655 ops = esw->offloads.rep_ops[rep_type];
2656 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2657 ops->event)
2658 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL);
2659 }
2660 }
ac004b83
RD
2661}
2662
2663static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
2664{
d956873f 2665#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
04de7dda 2666 mlx5e_tc_clean_fdb_peer_flows(esw);
d956873f 2667#endif
c8e6a9e6 2668 mlx5_esw_offloads_rep_event_unpair(esw);
ac004b83
RD
2669 esw_del_fdb_peer_miss_rules(esw);
2670}
2671
c8e6a9e6
MB
2672static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2673 struct mlx5_eswitch *peer_esw)
2674{
2675 const struct mlx5_eswitch_rep_ops *ops;
2676 struct mlx5_eswitch_rep *rep;
2677 unsigned long i;
2678 u8 rep_type;
2679 int err;
2680
2681 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2682 if (err)
2683 return err;
2684
2685 mlx5_esw_for_each_rep(esw, i, rep) {
2686 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2687 ops = esw->offloads.rep_ops[rep_type];
2688 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2689 ops->event) {
2690 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
2691 if (err)
2692 goto err_out;
2693 }
2694 }
2695 }
2696
2697 return 0;
2698
2699err_out:
2700 mlx5_esw_offloads_unpair(esw);
2701 return err;
2702}
2703
8463daf1
MG
2704static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2705 struct mlx5_eswitch *peer_esw,
2706 bool pair)
2707{
2708 struct mlx5_flow_root_namespace *peer_ns;
2709 struct mlx5_flow_root_namespace *ns;
2710 int err;
2711
2712 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2713 ns = esw->dev->priv.steering->fdb_root_ns;
2714
2715 if (pair) {
2716 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
2717 if (err)
2718 return err;
2719
e53e6655 2720 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
2721 if (err) {
2722 mlx5_flow_namespace_set_peer(ns, NULL);
2723 return err;
2724 }
2725 } else {
2726 mlx5_flow_namespace_set_peer(ns, NULL);
2727 mlx5_flow_namespace_set_peer(peer_ns, NULL);
2728 }
2729
2730 return 0;
2731}
2732
ac004b83
RD
2733static int mlx5_esw_offloads_devcom_event(int event,
2734 void *my_data,
2735 void *event_data)
2736{
2737 struct mlx5_eswitch *esw = my_data;
ac004b83 2738 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 2739 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
2740 int err;
2741
2742 switch (event) {
2743 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
2744 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2745 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2746 break;
2747
8463daf1 2748 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
2749 if (err)
2750 goto err_out;
8463daf1
MG
2751 err = mlx5_esw_offloads_pair(esw, peer_esw);
2752 if (err)
2753 goto err_peer;
ac004b83
RD
2754
2755 err = mlx5_esw_offloads_pair(peer_esw, esw);
2756 if (err)
2757 goto err_pair;
2758
2759 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2760 break;
2761
2762 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2763 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
2764 break;
2765
2766 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2767 mlx5_esw_offloads_unpair(peer_esw);
2768 mlx5_esw_offloads_unpair(esw);
8463daf1 2769 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
2770 break;
2771 }
2772
2773 return 0;
2774
2775err_pair:
2776 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
2777err_peer:
2778 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
2779err_out:
2780 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2781 event, err);
2782 return err;
2783}
2784
2785static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2786{
2787 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2788
04de7dda
RD
2789 INIT_LIST_HEAD(&esw->offloads.peer_flows);
2790 mutex_init(&esw->offloads.peer_mutex);
2791
ac004b83
RD
2792 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2793 return;
2794
3008e6a0
MB
2795 if (!mlx5_is_lag_supported(esw->dev))
2796 return;
2797
ac004b83
RD
2798 mlx5_devcom_register_component(devcom,
2799 MLX5_DEVCOM_ESW_OFFLOADS,
2800 mlx5_esw_offloads_devcom_event,
2801 esw);
2802
2803 mlx5_devcom_send_event(devcom,
2804 MLX5_DEVCOM_ESW_OFFLOADS,
2805 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2806}
2807
2808static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2809{
2810 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2811
2812 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2813 return;
2814
3008e6a0
MB
2815 if (!mlx5_is_lag_supported(esw->dev))
2816 return;
2817
ac004b83
RD
2818 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2819 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2820
2821 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2822}
2823
7bf481d7 2824bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
92ab1eb3
JL
2825{
2826 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2827 return false;
2828
2829 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2830 MLX5_FDB_TO_VPORT_REG_C_0))
2831 return false;
2832
2833 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2834 return false;
2835
92ab1eb3
JL
2836 return true;
2837}
2838
0b0ea3c5
SR
2839#define MLX5_ESW_METADATA_RSVD_UPLINK 1
2840
2841/* Share the same metadata for uplink's. This is fine because:
2842 * (a) In shared FDB mode (LAG) both uplink's are treated the
2843 * same and tagged with the same metadata.
2844 * (b) In non shared FDB mode, packets from physical port0
2845 * cannot hit eswitch of PF1 and vice versa.
2846 */
2847static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
2848{
2849 return MLX5_ESW_METADATA_RSVD_UPLINK;
2850}
2851
133dcfc5
VP
2852u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2853{
7cd7becd 2854 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
4f4edcc2
AL
2855 /* Reserve 0xf for internal port offload */
2856 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
7cd7becd 2857 u32 pf_num;
133dcfc5
VP
2858 int id;
2859
7cd7becd 2860 /* Only 4 bits of pf_num */
2ec16ddd 2861 pf_num = mlx5_get_dev_index(esw->dev);
7cd7becd 2862 if (pf_num > max_pf_num)
2863 return 0;
133dcfc5 2864
7cd7becd 2865 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
0b0ea3c5
SR
2866 /* Use only non-zero vport_id (2-4095) for all PF's */
2867 id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
2868 MLX5_ESW_METADATA_RSVD_UPLINK + 1,
2869 vport_end_ida, GFP_KERNEL);
7cd7becd 2870 if (id < 0)
2871 return 0;
2872 id = (pf_num << ESW_VPORT_BITS) | id;
2873 return id;
133dcfc5
VP
2874}
2875
2876void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2877{
7cd7becd 2878 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2879
2880 /* Metadata contains only 12 bits of actual ida id */
2881 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
133dcfc5
VP
2882}
2883
2884static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2885 struct mlx5_vport *vport)
2886{
0b0ea3c5
SR
2887 if (vport->vport == MLX5_VPORT_UPLINK)
2888 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
2889 else
2890 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2891
133dcfc5
VP
2892 vport->metadata = vport->default_metadata;
2893 return vport->metadata ? 0 : -ENOSPC;
2894}
2895
2896static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2897 struct mlx5_vport *vport)
2898{
406493a5 2899 if (!vport->default_metadata)
133dcfc5
VP
2900 return;
2901
0b0ea3c5
SR
2902 if (vport->vport == MLX5_VPORT_UPLINK)
2903 return;
2904
133dcfc5
VP
2905 WARN_ON(vport->metadata != vport->default_metadata);
2906 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2907}
2908
fc99c3d6
VP
2909static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2910{
2911 struct mlx5_vport *vport;
47dd7e60 2912 unsigned long i;
fc99c3d6
VP
2913
2914 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2915 return;
2916
47dd7e60 2917 mlx5_esw_for_each_vport(esw, i, vport)
fc99c3d6
VP
2918 esw_offloads_vport_metadata_cleanup(esw, vport);
2919}
2920
2921static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2922{
2923 struct mlx5_vport *vport;
47dd7e60 2924 unsigned long i;
fc99c3d6 2925 int err;
fc99c3d6
VP
2926
2927 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2928 return 0;
2929
47dd7e60 2930 mlx5_esw_for_each_vport(esw, i, vport) {
fc99c3d6
VP
2931 err = esw_offloads_vport_metadata_setup(esw, vport);
2932 if (err)
2933 goto metadata_err;
2934 }
2935
2936 return 0;
2937
2938metadata_err:
2939 esw_offloads_metadata_uninit(esw);
2940 return err;
2941}
2942
7bf481d7
PP
2943int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
2944{
2945 int err = 0;
2946
2947 down_write(&esw->mode_lock);
f019679e 2948 if (mlx5_esw_is_fdb_created(esw)) {
7bf481d7
PP
2949 err = -EBUSY;
2950 goto done;
2951 }
2952 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2953 err = -EOPNOTSUPP;
2954 goto done;
2955 }
2956 if (enable)
2957 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2958 else
2959 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2960done:
2961 up_write(&esw->mode_lock);
2962 return err;
2963}
2964
748da30b 2965int
89a0f1fb
PP
2966esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2967 struct mlx5_vport *vport)
7445cfb1 2968{
7445cfb1
JL
2969 int err;
2970
07bab950 2971 err = esw_acl_ingress_ofld_setup(esw, vport);
89a0f1fb 2972 if (err)
fc99c3d6 2973 return err;
7445cfb1 2974
2c40db2f
PP
2975 err = esw_acl_egress_ofld_setup(esw, vport);
2976 if (err)
2977 goto egress_err;
07bab950
VP
2978
2979 return 0;
2980
2981egress_err:
2982 esw_acl_ingress_ofld_cleanup(esw, vport);
89a0f1fb
PP
2983 return err;
2984}
18486737 2985
748da30b 2986void
89a0f1fb
PP
2987esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2988 struct mlx5_vport *vport)
2989{
ea651a86 2990 esw_acl_egress_ofld_cleanup(vport);
07bab950 2991 esw_acl_ingress_ofld_cleanup(esw, vport);
89a0f1fb 2992}
7445cfb1 2993
748da30b 2994static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
2995{
2996 struct mlx5_vport *vport;
18486737 2997
748da30b 2998 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
7bef147a
SM
2999 if (IS_ERR(vport))
3000 return PTR_ERR(vport);
3001
4e9a9ef7 3002 return esw_vport_create_offloads_acl_tables(esw, vport);
18486737
EB
3003}
3004
748da30b 3005static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 3006{
786ef904 3007 struct mlx5_vport *vport;
7445cfb1 3008
748da30b 3009 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
7bef147a
SM
3010 if (IS_ERR(vport))
3011 return;
3012
748da30b 3013 esw_vport_destroy_offloads_acl_tables(esw, vport);
18486737
EB
3014}
3015
db202995
MB
3016int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
3017{
3018 struct mlx5_eswitch_rep *rep;
3019 unsigned long i;
3020 int ret;
3021
3022 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
3023 return 0;
3024
3025 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3026 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
3027 return 0;
3028
3029 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3030 if (ret)
3031 return ret;
3032
3033 mlx5_esw_for_each_rep(esw, i, rep) {
3034 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
3035 mlx5_esw_offloads_rep_load(esw, rep->vport);
3036 }
3037
3038 return 0;
3039}
3040
062f4bf4 3041static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 3042{
34ca6535 3043 struct mlx5_esw_indir_table *indir;
6ed1803a
MB
3044 int err;
3045
5c1d260e 3046 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
f8d1edda
PP
3047 mutex_init(&esw->fdb_table.offloads.vports.lock);
3048 hash_init(esw->fdb_table.offloads.vports.table);
7dc84de9 3049 atomic64_set(&esw->user_count, 0);
e52c2802 3050
34ca6535
VB
3051 indir = mlx5_esw_indir_table_init();
3052 if (IS_ERR(indir)) {
3053 err = PTR_ERR(indir);
3054 goto create_indir_err;
3055 }
3056 esw->fdb_table.offloads.indir = indir;
3057
748da30b 3058 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1 3059 if (err)
f8d1edda 3060 goto create_acl_err;
18486737 3061
8d6bd3c3 3062 err = esw_create_offloads_table(esw);
c930a3ad 3063 if (err)
11b717d6 3064 goto create_offloads_err;
c930a3ad 3065
11b717d6 3066 err = esw_create_restore_table(esw);
c930a3ad 3067 if (err)
11b717d6
PB
3068 goto create_restore_err;
3069
0da3c12d 3070 err = esw_create_offloads_fdb_tables(esw);
11b717d6
PB
3071 if (err)
3072 goto create_fdb_err;
c930a3ad 3073
8d6bd3c3 3074 err = esw_create_vport_rx_group(esw);
c930a3ad
OG
3075 if (err)
3076 goto create_fg_err;
3077
8ea7bcf6
JL
3078 err = esw_create_vport_rx_drop_group(esw);
3079 if (err)
3080 goto create_rx_drop_fg_err;
3081
3082 err = esw_create_vport_rx_drop_rule(esw);
3083 if (err)
3084 goto create_rx_drop_rule_err;
3085
c930a3ad
OG
3086 return 0;
3087
8ea7bcf6
JL
3088create_rx_drop_rule_err:
3089 esw_destroy_vport_rx_drop_group(esw);
3090create_rx_drop_fg_err:
3091 esw_destroy_vport_rx_group(esw);
c930a3ad 3092create_fg_err:
1967ce6e 3093 esw_destroy_offloads_fdb_tables(esw);
7445cfb1 3094create_fdb_err:
11b717d6
PB
3095 esw_destroy_restore_table(esw);
3096create_restore_err:
3097 esw_destroy_offloads_table(esw);
3098create_offloads_err:
748da30b 3099 esw_destroy_uplink_offloads_acl_tables(esw);
f8d1edda 3100create_acl_err:
34ca6535
VB
3101 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3102create_indir_err:
f8d1edda 3103 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
c930a3ad
OG
3104 return err;
3105}
3106
eca8cc38
BW
3107static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3108{
8ea7bcf6
JL
3109 esw_destroy_vport_rx_drop_rule(esw);
3110 esw_destroy_vport_rx_drop_group(esw);
eca8cc38 3111 esw_destroy_vport_rx_group(esw);
eca8cc38 3112 esw_destroy_offloads_fdb_tables(esw);
11b717d6
PB
3113 esw_destroy_restore_table(esw);
3114 esw_destroy_offloads_table(esw);
748da30b 3115 esw_destroy_uplink_offloads_acl_tables(esw);
34ca6535 3116 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
f8d1edda 3117 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
eca8cc38
BW
3118}
3119
7e736f9a
PP
3120static void
3121esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 3122{
f1bc646c 3123 struct devlink *devlink;
5ccf2770 3124 bool host_pf_disabled;
7e736f9a 3125 u16 new_num_vfs;
a3888f33 3126
7e736f9a
PP
3127 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
3128 host_params_context.host_num_of_vfs);
5ccf2770
BW
3129 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
3130 host_params_context.host_pf_disabled);
a3888f33 3131
7e736f9a
PP
3132 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
3133 return;
a3888f33 3134
f1bc646c
MS
3135 devlink = priv_to_devlink(esw->dev);
3136 devl_lock(devlink);
a3888f33 3137 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929 3138 if (esw->esw_funcs.num_vfs > 0) {
23bb50cf 3139 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
a3888f33 3140 } else {
7e736f9a 3141 int err;
a3888f33 3142
23bb50cf
BW
3143 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
3144 MLX5_VPORT_UC_ADDR_CHANGE);
b868c8fe
DC
3145 if (err) {
3146 devl_unlock(devlink);
7e736f9a 3147 return;
b868c8fe 3148 }
a3888f33 3149 }
7e736f9a 3150 esw->esw_funcs.num_vfs = new_num_vfs;
f1bc646c 3151 devl_unlock(devlink);
a3888f33
BW
3152}
3153
7e736f9a 3154static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 3155{
7e736f9a
PP
3156 struct mlx5_host_work *host_work;
3157 struct mlx5_eswitch *esw;
dd28087c 3158 const u32 *out;
ac35dcd6 3159
7e736f9a
PP
3160 host_work = container_of(work, struct mlx5_host_work, work);
3161 esw = host_work->esw;
a3888f33 3162
dd28087c
PP
3163 out = mlx5_esw_query_functions(esw->dev);
3164 if (IS_ERR(out))
7e736f9a 3165 goto out;
a3888f33 3166
7e736f9a 3167 esw_vfs_changed_event_handler(esw, out);
dd28087c 3168 kvfree(out);
a3888f33 3169out:
ac35dcd6
VP
3170 kfree(host_work);
3171}
3172
16fff98a 3173int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 3174{
cd56f929 3175 struct mlx5_esw_functions *esw_funcs;
a3888f33 3176 struct mlx5_host_work *host_work;
a3888f33
BW
3177 struct mlx5_eswitch *esw;
3178
3179 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
3180 if (!host_work)
3181 return NOTIFY_DONE;
3182
cd56f929
VP
3183 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
3184 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
3185
3186 host_work->esw = esw;
3187
062f4bf4 3188 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
3189 queue_work(esw->work_queue, &host_work->work);
3190
3191 return NOTIFY_OK;
3192}
3193
a53cf949
PP
3194static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3195{
3196 const u32 *query_host_out;
3197
3198 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3199 return 0;
3200
3201 query_host_out = mlx5_esw_query_functions(esw->dev);
3202 if (IS_ERR(query_host_out))
3203 return PTR_ERR(query_host_out);
3204
3205 /* Mark non local controller with non zero controller number. */
3206 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3207 host_params_context.host_number);
3208 kvfree(query_host_out);
3209 return 0;
3210}
3211
f1b9acd3
PP
3212bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3213{
3214 /* Local controller is always valid */
3215 if (controller == 0)
3216 return true;
3217
3218 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3219 return false;
3220
3221 /* External host number starts with zero in device */
3222 return (controller == esw->offloads.host_number + 1);
3223}
3224
5896b972 3225int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38 3226{
c9355682 3227 struct mapping_ctx *reg_c0_obj_pool;
3b83b6c2 3228 struct mlx5_vport *vport;
47dd7e60 3229 unsigned long i;
2198b932 3230 u64 mapping_id;
47dd7e60 3231 int err;
eca8cc38 3232
2bb72e7e 3233 mutex_init(&esw->offloads.termtbl_mutex);
8463daf1 3234 mlx5_rdma_enable_roce(esw->dev);
eca8cc38 3235
a53cf949
PP
3236 err = mlx5_esw_host_number_init(esw);
3237 if (err)
cd1ef966 3238 goto err_metadata;
a53cf949 3239
fc99c3d6
VP
3240 err = esw_offloads_metadata_init(esw);
3241 if (err)
3242 goto err_metadata;
3243
332bd3a5
PP
3244 err = esw_set_passing_vport_metadata(esw, true);
3245 if (err)
3246 goto err_vport_metadata;
c1286050 3247
2198b932
RD
3248 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
3249
3250 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
3251 sizeof(struct mlx5_mapped_obj),
3252 ESW_REG_C0_USER_DATA_METADATA_MASK,
3253 true);
3254
c9355682
CM
3255 if (IS_ERR(reg_c0_obj_pool)) {
3256 err = PTR_ERR(reg_c0_obj_pool);
3257 goto err_pool;
3258 }
3259 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3260
7983a675
PB
3261 err = esw_offloads_steering_init(esw);
3262 if (err)
3263 goto err_steering_init;
3264
3b83b6c2
DL
3265 /* Representor will control the vport link state */
3266 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3267 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3268
c2d7712c
BW
3269 /* Uplink vport rep must load first. */
3270 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
925a6acc 3271 if (err)
c2d7712c 3272 goto err_uplink;
c1286050 3273
c2d7712c 3274 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
eca8cc38 3275 if (err)
c2d7712c 3276 goto err_vports;
eca8cc38
BW
3277
3278 esw_offloads_devcom_init(esw);
a3888f33 3279
eca8cc38
BW
3280 return 0;
3281
925a6acc 3282err_vports:
c2d7712c
BW
3283 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3284err_uplink:
7983a675 3285 esw_offloads_steering_cleanup(esw);
79949985 3286err_steering_init:
c9355682
CM
3287 mapping_destroy(reg_c0_obj_pool);
3288err_pool:
79949985 3289 esw_set_passing_vport_metadata(esw, false);
7983a675 3290err_vport_metadata:
fc99c3d6
VP
3291 esw_offloads_metadata_uninit(esw);
3292err_metadata:
8463daf1 3293 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 3294 mutex_destroy(&esw->offloads.termtbl_mutex);
eca8cc38
BW
3295 return err;
3296}
3297
db7ff19e
EB
3298static int esw_offloads_stop(struct mlx5_eswitch *esw,
3299 struct netlink_ext_ack *extack)
c930a3ad 3300{
e12de39c 3301 int err;
c930a3ad 3302
b6f2846a 3303 esw->mode = MLX5_ESWITCH_LEGACY;
2318b8bb
CM
3304
3305 /* If changing from switchdev to legacy mode without sriov enabled,
3306 * no need to create legacy fdb.
3307 */
3308 if (!mlx5_sriov_is_enabled(esw->dev))
3309 return 0;
3310
b6f2846a 3311 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
e12de39c 3312 if (err)
8c98ee77 3313 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
c930a3ad
OG
3314
3315 return err;
3316}
3317
5896b972 3318void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 3319{
ac004b83 3320 esw_offloads_devcom_cleanup(esw);
5896b972 3321 mlx5_eswitch_disable_pf_vf_vports(esw);
c2d7712c 3322 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
332bd3a5 3323 esw_set_passing_vport_metadata(esw, false);
eca8cc38 3324 esw_offloads_steering_cleanup(esw);
c9355682 3325 mapping_destroy(esw->offloads.reg_c0_obj_pool);
fc99c3d6 3326 esw_offloads_metadata_uninit(esw);
8463daf1 3327 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 3328 mutex_destroy(&esw->offloads.termtbl_mutex);
c930a3ad
OG
3329}
3330
ef78618b 3331static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
3332{
3333 switch (mode) {
3334 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 3335 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
3336 break;
3337 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 3338 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
3339 break;
3340 default:
3341 return -EINVAL;
3342 }
3343
3344 return 0;
3345}
3346
ef78618b
OG
3347static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
3348{
3349 switch (mlx5_mode) {
f6455de0 3350 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
3351 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
3352 break;
f6455de0 3353 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
3354 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
3355 break;
3356 default:
3357 return -EINVAL;
3358 }
3359
3360 return 0;
3361}
3362
bffaa916
RD
3363static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
3364{
3365 switch (mode) {
3366 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
3367 *mlx5_mode = MLX5_INLINE_MODE_NONE;
3368 break;
3369 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
3370 *mlx5_mode = MLX5_INLINE_MODE_L2;
3371 break;
3372 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
3373 *mlx5_mode = MLX5_INLINE_MODE_IP;
3374 break;
3375 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
3376 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
3377 break;
3378 default:
3379 return -EINVAL;
3380 }
3381
3382 return 0;
3383}
3384
3385static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
3386{
3387 switch (mlx5_mode) {
3388 case MLX5_INLINE_MODE_NONE:
3389 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
3390 break;
3391 case MLX5_INLINE_MODE_L2:
3392 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
3393 break;
3394 case MLX5_INLINE_MODE_IP:
3395 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
3396 break;
3397 case MLX5_INLINE_MODE_TCP_UDP:
3398 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
3399 break;
3400 default:
3401 return -EINVAL;
3402 }
3403
3404 return 0;
3405}
3406
db7ff19e
EB
3407int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3408 struct netlink_ext_ack *extack)
9d1cef19 3409{
9d1cef19 3410 u16 cur_mlx5_mode, mlx5_mode = 0;
bd939753 3411 struct mlx5_eswitch *esw;
ea2128fd 3412 int err = 0;
9d1cef19 3413
bd939753
PP
3414 esw = mlx5_devlink_eswitch_get(devlink);
3415 if (IS_ERR(esw))
3416 return PTR_ERR(esw);
9d1cef19 3417
ef78618b 3418 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
3419 return -EINVAL;
3420
cac1eb2c 3421 mlx5_lag_disable_change(esw->dev);
7dc84de9
RD
3422 err = mlx5_esw_try_lock(esw);
3423 if (err < 0) {
3424 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
cac1eb2c 3425 goto enable_lag;
7dc84de9
RD
3426 }
3427 cur_mlx5_mode = err;
3428 err = 0;
3429
c930a3ad 3430 if (cur_mlx5_mode == mlx5_mode)
8e0aa4bc 3431 goto unlock;
c930a3ad 3432
f019679e 3433 mlx5_eswitch_disable_locked(esw);
c85a6b8f
AL
3434 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
3435 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
3436 NL_SET_ERR_MSG_MOD(extack,
3437 "Can't change mode while devlink traps are active");
3438 err = -EOPNOTSUPP;
3439 goto unlock;
3440 }
8e0aa4bc 3441 err = esw_offloads_start(esw, extack);
c85a6b8f 3442 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
8e0aa4bc 3443 err = esw_offloads_stop(esw, extack);
f019679e 3444 mlx5_rescan_drivers(esw->dev);
c85a6b8f 3445 } else {
8e0aa4bc 3446 err = -EINVAL;
c85a6b8f 3447 }
8e0aa4bc
PP
3448
3449unlock:
7dc84de9 3450 mlx5_esw_unlock(esw);
cac1eb2c
MB
3451enable_lag:
3452 mlx5_lag_enable_change(esw->dev);
8e0aa4bc 3453 return err;
feae9087
OG
3454}
3455
3456int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3457{
bd939753 3458 struct mlx5_eswitch *esw;
9d1cef19 3459 int err;
c930a3ad 3460
bd939753
PP
3461 esw = mlx5_devlink_eswitch_get(devlink);
3462 if (IS_ERR(esw))
3463 return PTR_ERR(esw);
c930a3ad 3464
efb4879f 3465 down_read(&esw->mode_lock);
8e0aa4bc 3466 err = esw_mode_to_devlink(esw->mode, mode);
efb4879f 3467 up_read(&esw->mode_lock);
8e0aa4bc 3468 return err;
feae9087 3469}
127ea380 3470
47dd7e60
PP
3471static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3472 struct netlink_ext_ack *extack)
3473{
3474 struct mlx5_core_dev *dev = esw->dev;
3475 struct mlx5_vport *vport;
3476 u16 err_vport_num = 0;
3477 unsigned long i;
3478 int err = 0;
3479
3480 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3481 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3482 if (err) {
3483 err_vport_num = vport->vport;
3484 NL_SET_ERR_MSG_MOD(extack,
3485 "Failed to set min inline on vport");
3486 goto revert_inline_mode;
3487 }
3488 }
3489 return 0;
3490
3491revert_inline_mode:
3492 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3493 if (vport->vport == err_vport_num)
3494 break;
3495 mlx5_modify_nic_vport_min_inline(dev,
3496 vport->vport,
3497 esw->offloads.inline_mode);
3498 }
3499 return err;
3500}
3501
db7ff19e
EB
3502int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3503 struct netlink_ext_ack *extack)
bffaa916
RD
3504{
3505 struct mlx5_core_dev *dev = devlink_priv(devlink);
bd939753 3506 struct mlx5_eswitch *esw;
bffaa916 3507 u8 mlx5_mode;
47dd7e60 3508 int err;
bffaa916 3509
bd939753
PP
3510 esw = mlx5_devlink_eswitch_get(devlink);
3511 if (IS_ERR(esw))
3512 return PTR_ERR(esw);
bffaa916 3513
367dfa12 3514 down_write(&esw->mode_lock);
ae24432c 3515
c415f704
OG
3516 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3517 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
bcd68c04
JC
3518 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
3519 err = 0;
8e0aa4bc 3520 goto out;
bcd68c04
JC
3521 }
3522
c8b838d1 3523 fallthrough;
c415f704 3524 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 3525 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
8e0aa4bc
PP
3526 err = -EOPNOTSUPP;
3527 goto out;
c415f704
OG
3528 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3529 break;
3530 }
bffaa916 3531
525e84be 3532 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
3533 NL_SET_ERR_MSG_MOD(extack,
3534 "Can't set inline mode when flows are configured");
8e0aa4bc
PP
3535 err = -EOPNOTSUPP;
3536 goto out;
375f51e2
RD
3537 }
3538
bffaa916
RD
3539 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3540 if (err)
3541 goto out;
3542
47dd7e60
PP
3543 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3544 if (err)
3545 goto out;
bffaa916
RD
3546
3547 esw->offloads.inline_mode = mlx5_mode;
367dfa12 3548 up_write(&esw->mode_lock);
bffaa916
RD
3549 return 0;
3550
bffaa916 3551out:
367dfa12 3552 up_write(&esw->mode_lock);
bffaa916
RD
3553 return err;
3554}
3555
3556int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3557{
bd939753 3558 struct mlx5_eswitch *esw;
9d1cef19 3559 int err;
bffaa916 3560
bd939753
PP
3561 esw = mlx5_devlink_eswitch_get(devlink);
3562 if (IS_ERR(esw))
3563 return PTR_ERR(esw);
bffaa916 3564
efb4879f 3565 down_read(&esw->mode_lock);
8e0aa4bc 3566 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
efb4879f 3567 up_read(&esw->mode_lock);
8e0aa4bc 3568 return err;
bffaa916
RD
3569}
3570
98fdbea5
LR
3571int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3572 enum devlink_eswitch_encap_mode encap,
db7ff19e 3573 struct netlink_ext_ack *extack)
7768d197
RD
3574{
3575 struct mlx5_core_dev *dev = devlink_priv(devlink);
bd939753 3576 struct mlx5_eswitch *esw;
f019679e 3577 int err = 0;
7768d197 3578
bd939753
PP
3579 esw = mlx5_devlink_eswitch_get(devlink);
3580 if (IS_ERR(esw))
3581 return PTR_ERR(esw);
7768d197 3582
367dfa12 3583 down_write(&esw->mode_lock);
ae24432c 3584
7768d197 3585 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 3586 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
8e0aa4bc
PP
3587 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3588 err = -EOPNOTSUPP;
3589 goto unlock;
3590 }
7768d197 3591
8e0aa4bc
PP
3592 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3593 err = -EOPNOTSUPP;
3594 goto unlock;
3595 }
7768d197 3596
f6455de0 3597 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197 3598 esw->offloads.encap = encap;
8e0aa4bc 3599 goto unlock;
7768d197
RD
3600 }
3601
3602 if (esw->offloads.encap == encap)
8e0aa4bc 3603 goto unlock;
7768d197 3604
525e84be 3605 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
3606 NL_SET_ERR_MSG_MOD(extack,
3607 "Can't set encapsulation when flows are configured");
8e0aa4bc
PP
3608 err = -EOPNOTSUPP;
3609 goto unlock;
7768d197
RD
3610 }
3611
e52c2802 3612 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
3613
3614 esw->offloads.encap = encap;
e52c2802 3615
0da3c12d 3616 err = esw_create_offloads_fdb_tables(esw);
e52c2802 3617
7768d197 3618 if (err) {
8c98ee77
EB
3619 NL_SET_ERR_MSG_MOD(extack,
3620 "Failed re-creating fast FDB table");
7768d197 3621 esw->offloads.encap = !encap;
0da3c12d 3622 (void)esw_create_offloads_fdb_tables(esw);
7768d197 3623 }
e52c2802 3624
8e0aa4bc 3625unlock:
367dfa12 3626 up_write(&esw->mode_lock);
7768d197
RD
3627 return err;
3628}
3629
98fdbea5
LR
3630int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3631 enum devlink_eswitch_encap_mode *encap)
7768d197 3632{
bd939753 3633 struct mlx5_eswitch *esw;
7768d197 3634
bd939753
PP
3635 esw = mlx5_devlink_eswitch_get(devlink);
3636 if (IS_ERR(esw))
3637 return PTR_ERR(esw);
3638
efb4879f 3639 down_read(&esw->mode_lock);
7768d197 3640 *encap = esw->offloads.encap;
efb4879f 3641 up_read(&esw->mode_lock);
f019679e 3642 return 0;
7768d197
RD
3643}
3644
c2d7712c
BW
3645static bool
3646mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3647{
3648 /* Currently, only ECPF based device has representor for host PF. */
3649 if (vport_num == MLX5_VPORT_PF &&
3650 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3651 return false;
3652
3653 if (vport_num == MLX5_VPORT_ECPF &&
3654 !mlx5_ecpf_vport_exists(esw->dev))
3655 return false;
3656
3657 return true;
3658}
3659
f8e8fa02 3660void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 3661 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 3662 u8 rep_type)
127ea380 3663{
8693115a 3664 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02 3665 struct mlx5_eswitch_rep *rep;
47dd7e60 3666 unsigned long i;
9deb2241 3667
8693115a 3668 esw->offloads.rep_ops[rep_type] = ops;
47dd7e60
PP
3669 mlx5_esw_for_each_rep(esw, i, rep) {
3670 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
59c904c8 3671 rep->esw = esw;
c2d7712c
BW
3672 rep_data = &rep->rep_data[rep_type];
3673 atomic_set(&rep_data->state, REP_REGISTERED);
3674 }
f8e8fa02 3675 }
127ea380 3676}
f8e8fa02 3677EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 3678
f8e8fa02 3679void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 3680{
cb67b832 3681 struct mlx5_eswitch_rep *rep;
47dd7e60 3682 unsigned long i;
cb67b832 3683
f6455de0 3684 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 3685 __unload_reps_all_vport(esw, rep_type);
127ea380 3686
47dd7e60 3687 mlx5_esw_for_each_rep(esw, i, rep)
8693115a 3688 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 3689}
f8e8fa02 3690EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 3691
a4b97ab4 3692void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 3693{
726293f1
HHZ
3694 struct mlx5_eswitch_rep *rep;
3695
879c8f84 3696 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 3697 return rep->rep_data[rep_type].priv;
726293f1 3698}
22215908
MB
3699
3700void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 3701 u16 vport,
22215908
MB
3702 u8 rep_type)
3703{
22215908
MB
3704 struct mlx5_eswitch_rep *rep;
3705
879c8f84 3706 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 3707
8693115a
PP
3708 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3709 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3710 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
3711 return NULL;
3712}
57cbd893 3713EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
3714
3715void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3716{
879c8f84 3717 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 3718}
57cbd893
MB
3719EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3720
3721struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 3722 u16 vport)
57cbd893 3723{
879c8f84 3724 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
3725}
3726EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c 3727
5b7cb745
PB
3728bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3729{
3730 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3731}
3732EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3733
7445cfb1
JL
3734bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3735{
3736 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3737}
3738EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3739
0f0d3827 3740u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
7445cfb1
JL
3741 u16 vport_num)
3742{
133dcfc5 3743 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
0f0d3827 3744
133dcfc5
VP
3745 if (WARN_ON_ONCE(IS_ERR(vport)))
3746 return 0;
0f0d3827 3747
133dcfc5 3748 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1
JL
3749}
3750EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
d970812b
PP
3751
3752int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
f1b9acd3 3753 u16 vport_num, u32 controller, u32 sfnum)
d970812b
PP
3754{
3755 int err;
3756
3757 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3758 if (err)
3759 return err;
3760
f1b9acd3 3761 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
d970812b
PP
3762 if (err)
3763 goto devlink_err;
3764
606e6a72 3765 mlx5_esw_vport_debugfs_create(esw, vport_num, true, sfnum);
d970812b
PP
3766 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3767 if (err)
3768 goto rep_err;
3769 return 0;
3770
3771rep_err:
606e6a72 3772 mlx5_esw_vport_debugfs_destroy(esw, vport_num);
d970812b
PP
3773 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3774devlink_err:
3775 mlx5_esw_vport_disable(esw, vport_num);
3776 return err;
3777}
3778
3779void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3780{
3781 mlx5_esw_offloads_rep_unload(esw, vport_num);
606e6a72 3782 mlx5_esw_vport_debugfs_destroy(esw, vport_num);
d970812b
PP
3783 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3784 mlx5_esw_vport_disable(esw, vport_num);
3785}
84ae9c1f
VB
3786
3787static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3788{
3789 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3790 void *query_ctx;
3791 void *hca_caps;
3792 int err;
3793
3794 *vhca_id = 0;
3795 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
3796 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3797 return -EPERM;
3798
3799 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3800 if (!query_ctx)
3801 return -ENOMEM;
3802
47d0c500 3803 err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
84ae9c1f
VB
3804 if (err)
3805 goto out_free;
3806
3807 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3808 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3809
3810out_free:
3811 kfree(query_ctx);
3812 return err;
3813}
3814
3815int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3816{
3817 u16 *old_entry, *vhca_map_entry, vhca_id;
3818 int err;
3819
3820 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3821 if (err) {
3822 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3823 vport_num, err);
3824 return err;
3825 }
3826
3827 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3828 if (!vhca_map_entry)
3829 return -ENOMEM;
3830
3831 *vhca_map_entry = vport_num;
3832 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3833 if (xa_is_err(old_entry)) {
3834 kfree(vhca_map_entry);
3835 return xa_err(old_entry);
3836 }
3837 kfree(old_entry);
3838 return 0;
3839}
3840
3841void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
3842{
3843 u16 *vhca_map_entry, vhca_id;
3844 int err;
3845
3846 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3847 if (err)
3848 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
3849 vport_num, err);
3850
3851 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
3852 kfree(vhca_map_entry);
3853}
3854
3855int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
3856{
3857 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
3858
3859 if (!res)
3860 return -ENOENT;
3861
3862 *vport_num = *res;
3863 return 0;
3864}
10742efc
VB
3865
3866u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
3867 u16 vport_num)
3868{
3869 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3870
3871 if (WARN_ON_ONCE(IS_ERR(vport)))
3872 return 0;
3873
3874 return vport->metadata;
3875}
3876EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
e9d491a6
PP
3877
3878static bool
3879is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
3880{
3881 return vport_num == MLX5_VPORT_PF ||
3882 mlx5_eswitch_is_vf_vport(esw, vport_num) ||
3883 mlx5_esw_is_sf_vport(esw, vport_num);
3884}
3885
3886int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
3887 u8 *hw_addr, int *hw_addr_len,
3888 struct netlink_ext_ack *extack)
3889{
3890 struct mlx5_eswitch *esw;
3891 struct mlx5_vport *vport;
3892 u16 vport_num;
3893
3894 esw = mlx5_devlink_eswitch_get(port->devlink);
3895 if (IS_ERR(esw))
3896 return PTR_ERR(esw);
3897
3898 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
3899 if (!is_port_function_supported(esw, vport_num))
3900 return -EOPNOTSUPP;
3901
3902 vport = mlx5_eswitch_get_vport(esw, vport_num);
3903 if (IS_ERR(vport)) {
3904 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
3905 return PTR_ERR(vport);
3906 }
3907
3908 mutex_lock(&esw->state_lock);
3909 ether_addr_copy(hw_addr, vport->info.mac);
3910 *hw_addr_len = ETH_ALEN;
3911 mutex_unlock(&esw->state_lock);
3912 return 0;
3913}
3914
3915int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
3916 const u8 *hw_addr, int hw_addr_len,
3917 struct netlink_ext_ack *extack)
3918{
3919 struct mlx5_eswitch *esw;
3920 u16 vport_num;
3921
3922 esw = mlx5_devlink_eswitch_get(port->devlink);
3923 if (IS_ERR(esw)) {
3924 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
3925 return PTR_ERR(esw);
3926 }
3927
3928 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
3929 if (!is_port_function_supported(esw, vport_num)) {
3930 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
3931 return -EINVAL;
3932 }
3933
3934 return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
3935}
7db98396
YH
3936
3937static struct mlx5_vport *
3938mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw)
3939{
3940 u16 vport_num;
3941
3942 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3943 return ERR_PTR(-EOPNOTSUPP);
3944
3945 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
3946 if (!is_port_function_supported(esw, vport_num))
3947 return ERR_PTR(-EOPNOTSUPP);
3948
3949 return mlx5_eswitch_get_vport(esw, vport_num);
3950}
3951
e5b9642a
SD
3952int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
3953 struct netlink_ext_ack *extack)
3954{
3955 struct mlx5_eswitch *esw;
3956 struct mlx5_vport *vport;
3957 int err = -EOPNOTSUPP;
3958
3959 esw = mlx5_devlink_eswitch_get(port->devlink);
3960 if (IS_ERR(esw))
3961 return PTR_ERR(esw);
3962
3963 if (!MLX5_CAP_GEN(esw->dev, migration)) {
3964 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
3965 return err;
3966 }
3967
3968 vport = mlx5_devlink_port_fn_get_vport(port, esw);
3969 if (IS_ERR(vport)) {
3970 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
3971 return PTR_ERR(vport);
3972 }
3973
3974 mutex_lock(&esw->state_lock);
3975 if (vport->enabled) {
3976 *is_enabled = vport->info.mig_enabled;
3977 err = 0;
3978 }
3979 mutex_unlock(&esw->state_lock);
3980 return err;
3981}
3982
3983int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
3984 struct netlink_ext_ack *extack)
3985{
3986 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3987 struct mlx5_eswitch *esw;
3988 struct mlx5_vport *vport;
3989 void *query_ctx;
3990 void *hca_caps;
3991 int err = -EOPNOTSUPP;
3992
3993 esw = mlx5_devlink_eswitch_get(port->devlink);
3994 if (IS_ERR(esw))
3995 return PTR_ERR(esw);
3996
3997 if (!MLX5_CAP_GEN(esw->dev, migration)) {
3998 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
3999 return err;
4000 }
4001
4002 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4003 if (IS_ERR(vport)) {
4004 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4005 return PTR_ERR(vport);
4006 }
4007
4008 mutex_lock(&esw->state_lock);
4009 if (!vport->enabled) {
4010 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4011 goto out;
4012 }
4013
4014 if (vport->info.mig_enabled == enable) {
4015 err = 0;
4016 goto out;
4017 }
4018
4019 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4020 if (!query_ctx) {
4021 err = -ENOMEM;
4022 goto out;
4023 }
4024
4025 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
4026 MLX5_CAP_GENERAL_2);
4027 if (err) {
4028 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4029 goto out_free;
4030 }
4031
4032 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
e5b9642a
SD
4033 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
4034
4035 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
4036 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
4037 if (err) {
4038 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
4039 goto out_free;
4040 }
4041
4042 vport->info.mig_enabled = enable;
4043
4044out_free:
4045 kfree(query_ctx);
4046out:
4047 mutex_unlock(&esw->state_lock);
4048 return err;
4049}
4050
7db98396
YH
4051int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
4052 struct netlink_ext_ack *extack)
4053{
4054 struct mlx5_eswitch *esw;
4055 struct mlx5_vport *vport;
4056 int err = -EOPNOTSUPP;
4057
4058 esw = mlx5_devlink_eswitch_get(port->devlink);
4059 if (IS_ERR(esw))
4060 return PTR_ERR(esw);
4061
4062 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4063 if (IS_ERR(vport)) {
4064 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4065 return PTR_ERR(vport);
4066 }
4067
4068 mutex_lock(&esw->state_lock);
4069 if (vport->enabled) {
4070 *is_enabled = vport->info.roce_enabled;
4071 err = 0;
4072 }
4073 mutex_unlock(&esw->state_lock);
4074 return err;
4075}
4076
4077int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
4078 struct netlink_ext_ack *extack)
4079{
4080 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4081 struct mlx5_eswitch *esw;
4082 struct mlx5_vport *vport;
4083 int err = -EOPNOTSUPP;
4084 void *query_ctx;
4085 void *hca_caps;
4086 u16 vport_num;
4087
4088 esw = mlx5_devlink_eswitch_get(port->devlink);
4089 if (IS_ERR(esw))
4090 return PTR_ERR(esw);
4091
4092 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4093 if (IS_ERR(vport)) {
4094 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4095 return PTR_ERR(vport);
4096 }
4097 vport_num = vport->vport;
4098
4099 mutex_lock(&esw->state_lock);
4100 if (!vport->enabled) {
4101 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4102 goto out;
4103 }
4104
4105 if (vport->info.roce_enabled == enable) {
4106 err = 0;
4107 goto out;
4108 }
4109
4110 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4111 if (!query_ctx) {
4112 err = -ENOMEM;
4113 goto out;
4114 }
4115
4116 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
4117 MLX5_CAP_GENERAL);
4118 if (err) {
4119 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4120 goto out_free;
4121 }
4122
4123 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
7db98396
YH
4124 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
4125
4126 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
4127 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
4128 if (err) {
4129 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
4130 goto out_free;
4131 }
4132
4133 vport->info.roce_enabled = enable;
4134
4135out_free:
4136 kfree(query_ctx);
4137out:
4138 mutex_unlock(&esw->state_lock);
4139 return err;
4140}