net/mlx5e: E-Switch, Add misc bit when misc fields changed for mirroring
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
CommitLineData
69697b6e
OG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/etherdevice.h>
133dcfc5 34#include <linux/idr.h>
69697b6e
OG
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/mlx5_ifc.h>
37#include <linux/mlx5/vport.h>
38#include <linux/mlx5/fs.h>
39#include "mlx5_core.h"
40#include "eswitch.h"
ea651a86 41#include "esw/acl/ofld.h"
49964352 42#include "esw/chains.h"
80f09dfc 43#include "rdma.h"
e52c2802
PB
44#include "en.h"
45#include "fs_core.h"
ac004b83 46#include "lib/devcom.h"
a3888f33 47#include "lib/eq.h"
69697b6e 48
cd7e4186
BW
49/* There are two match-all miss flows, one for unicast dst mac and
50 * one for multicast.
51 */
52#define MLX5_ESW_MISS_FLOWS (2)
c9b99abc
BW
53#define UPLINK_REP_INDEX 0
54
96e32687
EC
55/* Per vport tables */
56
57#define MLX5_ESW_VPORT_TABLE_SIZE 128
58
59/* This struct is used as a key to the hash table and we need it to be packed
60 * so hash result is consistent
61 */
62struct mlx5_vport_key {
63 u32 chain;
64 u16 prio;
65 u16 vport;
66 u16 vhca_id;
67} __packed;
68
69struct mlx5_vport_table {
70 struct hlist_node hlist;
71 struct mlx5_flow_table *fdb;
72 u32 num_rules;
73 struct mlx5_vport_key key;
74};
75
87dac697
JL
76#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
77
96e32687
EC
78static struct mlx5_flow_table *
79esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
80{
81 struct mlx5_flow_table_attr ft_attr = {};
82 struct mlx5_flow_table *fdb;
83
87dac697 84 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
96e32687
EC
85 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
86 ft_attr.prio = FDB_PER_VPORT;
87 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
88 if (IS_ERR(fdb)) {
89 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
90 PTR_ERR(fdb));
91 }
92
93 return fdb;
94}
95
96static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
97 struct mlx5_esw_flow_attr *attr,
98 struct mlx5_vport_key *key)
99{
100 key->vport = attr->in_rep->vport;
101 key->chain = attr->chain;
102 key->prio = attr->prio;
103 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
104 return jhash(key, sizeof(*key), 0);
105}
106
107/* caller must hold vports.lock */
108static struct mlx5_vport_table *
109esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
110{
111 struct mlx5_vport_table *e;
112
113 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
114 if (!memcmp(&e->key, skey, sizeof(*skey)))
115 return e;
116
117 return NULL;
118}
119
120static void
121esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
122{
123 struct mlx5_vport_table *e;
124 struct mlx5_vport_key key;
125 u32 hkey;
126
127 mutex_lock(&esw->fdb_table.offloads.vports.lock);
128 hkey = flow_attr_to_vport_key(esw, attr, &key);
129 e = esw_vport_tbl_lookup(esw, &key, hkey);
130 if (!e || --e->num_rules)
131 goto out;
132
133 hash_del(&e->hlist);
134 mlx5_destroy_flow_table(e->fdb);
135 kfree(e);
136out:
137 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
138}
139
140static struct mlx5_flow_table *
141esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
142{
143 struct mlx5_core_dev *dev = esw->dev;
144 struct mlx5_flow_namespace *ns;
145 struct mlx5_flow_table *fdb;
146 struct mlx5_vport_table *e;
147 struct mlx5_vport_key skey;
148 u32 hkey;
149
150 mutex_lock(&esw->fdb_table.offloads.vports.lock);
151 hkey = flow_attr_to_vport_key(esw, attr, &skey);
152 e = esw_vport_tbl_lookup(esw, &skey, hkey);
153 if (e) {
154 e->num_rules++;
155 goto out;
156 }
157
158 e = kzalloc(sizeof(*e), GFP_KERNEL);
159 if (!e) {
160 fdb = ERR_PTR(-ENOMEM);
161 goto err_alloc;
162 }
163
164 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
165 if (!ns) {
166 esw_warn(dev, "Failed to get FDB namespace\n");
167 fdb = ERR_PTR(-ENOENT);
168 goto err_ns;
169 }
170
171 fdb = esw_vport_tbl_create(esw, ns);
172 if (IS_ERR(fdb))
173 goto err_ns;
174
175 e->fdb = fdb;
176 e->num_rules = 1;
177 e->key = skey;
178 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
179out:
180 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
181 return e->fdb;
182
183err_ns:
184 kfree(e);
185err_alloc:
186 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
187 return fdb;
188}
189
190int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
191{
192 struct mlx5_esw_flow_attr attr = {};
193 struct mlx5_eswitch_rep rep = {};
194 struct mlx5_flow_table *fdb;
195 struct mlx5_vport *vport;
196 int i;
197
198 attr.prio = 1;
199 attr.in_rep = &rep;
200 mlx5_esw_for_all_vports(esw, i, vport) {
201 attr.in_rep->vport = vport->vport;
202 fdb = esw_vport_tbl_get(esw, &attr);
d9fb932f 203 if (IS_ERR(fdb))
96e32687
EC
204 goto out;
205 }
206 return 0;
207
208out:
209 mlx5_esw_vport_tbl_put(esw);
210 return PTR_ERR(fdb);
211}
212
213void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
214{
215 struct mlx5_esw_flow_attr attr = {};
216 struct mlx5_eswitch_rep rep = {};
217 struct mlx5_vport *vport;
218 int i;
219
220 attr.prio = 1;
221 attr.in_rep = &rep;
222 mlx5_esw_for_all_vports(esw, i, vport) {
223 attr.in_rep->vport = vport->vport;
224 esw_vport_tbl_put(esw, &attr);
225 }
226}
227
228/* End: Per vport tables */
229
879c8f84
BW
230static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
231 u16 vport_num)
232{
02f3afd9 233 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
879c8f84
BW
234
235 WARN_ON(idx > esw->total_vports - 1);
236 return &esw->offloads.vport_reps[idx];
237}
238
b7826076 239
c01cfd0f
JL
240static void
241mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
242 struct mlx5_flow_spec *spec,
243 struct mlx5_esw_flow_attr *attr)
244{
245 void *misc2;
246 void *misc;
247
248 /* Use metadata matching because vport is not represented by single
249 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
250 */
251 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
252 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
253 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
254 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
255 attr->in_rep->vport));
256
257 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
258 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
259 mlx5_eswitch_get_vport_metadata_mask());
c01cfd0f
JL
260
261 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
c01cfd0f
JL
262 } else {
263 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
264 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
265
266 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
267 MLX5_SET(fte_match_set_misc, misc,
268 source_eswitch_owner_vhca_id,
269 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
270
271 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
272 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
273 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
274 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
275 source_eswitch_owner_vhca_id);
276
277 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
278 }
279
280 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
281 attr->in_rep->vport == MLX5_VPORT_UPLINK)
282 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
283}
284
74491de9 285struct mlx5_flow_handle *
3d80d1a2
OG
286mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
287 struct mlx5_flow_spec *spec,
776b12b6 288 struct mlx5_esw_flow_attr *attr)
3d80d1a2 289{
592d3651 290 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 291 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e85e02ba 292 bool split = !!(attr->split_count);
74491de9 293 struct mlx5_flow_handle *rule;
e52c2802 294 struct mlx5_flow_table *fdb;
592d3651 295 int j, i = 0;
3d80d1a2 296
f6455de0 297 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
3d80d1a2
OG
298 return ERR_PTR(-EOPNOTSUPP);
299
6acfbf38
OG
300 flow_act.action = attr->action;
301 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
cc495188 302 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
303 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
304 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
305 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
1482bd3d
JL
306 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
307 flow_act.vlan[0].vid = attr->vlan_vid[0];
308 flow_act.vlan[0].prio = attr->vlan_prio[0];
cc495188
JL
309 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
310 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
311 flow_act.vlan[1].vid = attr->vlan_vid[1];
312 flow_act.vlan[1].prio = attr->vlan_prio[1];
313 }
6acfbf38 314 }
776b12b6 315
66958ed9 316 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
39ac237c 317 struct mlx5_flow_table *ft;
e52c2802 318
d18296ff
PB
319 if (attr->dest_ft) {
320 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
321 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
322 dest[i].ft = attr->dest_ft;
323 i++;
324 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
39ac237c
PB
325 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
326 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
278d51f2 327 dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
39ac237c
PB
328 i++;
329 } else if (attr->dest_chain) {
330 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
331 ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
332 1, 0);
e52c2802
PB
333 if (IS_ERR(ft)) {
334 rule = ERR_CAST(ft);
335 goto err_create_goto_table;
336 }
337
338 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
339 dest[i].ft = ft;
592d3651 340 i++;
e52c2802 341 } else {
e85e02ba 342 for (j = attr->split_count; j < attr->out_count; j++) {
e52c2802 343 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 344 dest[i].vport.num = attr->dests[j].rep->vport;
e52c2802 345 dest[i].vport.vhca_id =
df65a573 346 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
aa39c2c0
EB
347 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
348 dest[i].vport.flags |=
349 MLX5_FLOW_DEST_VPORT_VHCA_ID;
f493f155
EB
350 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
351 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2b688ea5 352 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
a18e879d 353 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5
MG
354 dest[i].vport.pkt_reformat =
355 attr->dests[j].pkt_reformat;
f493f155 356 }
e52c2802
PB
357 i++;
358 }
56e858df 359 }
e37a79e5 360 }
14e6b038
EC
361
362 if (attr->decap_pkt_reformat)
363 flow_act.pkt_reformat = attr->decap_pkt_reformat;
364
66958ed9 365 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
e37a79e5 366 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
171c7625 367 dest[i].counter_id = mlx5_fc_id(attr->counter);
e37a79e5 368 i++;
3d80d1a2
OG
369 }
370
93b3586e 371 if (attr->outer_match_level != MLX5_MATCH_NONE)
6363651d 372 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
93b3586e
HN
373 if (attr->inner_match_level != MLX5_MATCH_NONE)
374 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
3d80d1a2 375
aa24670e 376 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2b688ea5 377 flow_act.modify_hdr = attr->modify_hdr;
d7e75a32 378
96e32687 379 if (split) {
0faddfe6
JL
380 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
381 attr->in_rep->vport == MLX5_VPORT_UPLINK)
382 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
96e32687
EC
383 fdb = esw_vport_tbl_get(esw, attr);
384 } else {
d18296ff
PB
385 if (attr->chain || attr->prio)
386 fdb = mlx5_esw_chains_get_table(esw, attr->chain,
387 attr->prio, 0);
388 else
389 fdb = attr->fdb;
6fb0701a
PB
390
391 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
392 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
96e32687 393 }
e52c2802
PB
394 if (IS_ERR(fdb)) {
395 rule = ERR_CAST(fdb);
396 goto err_esw_get;
397 }
398
84be2fda 399 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
10caabda
OS
400 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
401 &flow_act, dest, i);
84be2fda 402 else
10caabda 403 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
3d80d1a2 404 if (IS_ERR(rule))
e52c2802 405 goto err_add_rule;
375f51e2 406 else
525e84be 407 atomic64_inc(&esw->offloads.num_flows);
3d80d1a2 408
e52c2802
PB
409 return rule;
410
411err_add_rule:
96e32687
EC
412 if (split)
413 esw_vport_tbl_put(esw, attr);
d18296ff 414 else if (attr->chain || attr->prio)
96e32687 415 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 416err_esw_get:
39ac237c
PB
417 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
418 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802 419err_create_goto_table:
aa0cbbae 420 return rule;
3d80d1a2
OG
421}
422
e4ad91f2
CM
423struct mlx5_flow_handle *
424mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
425 struct mlx5_flow_spec *spec,
426 struct mlx5_esw_flow_attr *attr)
427{
428 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
42f7ad67 429 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
e52c2802
PB
430 struct mlx5_flow_table *fast_fdb;
431 struct mlx5_flow_table *fwd_fdb;
e4ad91f2 432 struct mlx5_flow_handle *rule;
e4ad91f2
CM
433 int i;
434
39ac237c 435 fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
e52c2802
PB
436 if (IS_ERR(fast_fdb)) {
437 rule = ERR_CAST(fast_fdb);
438 goto err_get_fast;
439 }
440
96e32687 441 fwd_fdb = esw_vport_tbl_get(esw, attr);
e52c2802
PB
442 if (IS_ERR(fwd_fdb)) {
443 rule = ERR_CAST(fwd_fdb);
444 goto err_get_fwd;
445 }
446
e4ad91f2 447 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
e85e02ba 448 for (i = 0; i < attr->split_count; i++) {
e4ad91f2 449 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
df65a573 450 dest[i].vport.num = attr->dests[i].rep->vport;
e4ad91f2 451 dest[i].vport.vhca_id =
df65a573 452 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
aa39c2c0
EB
453 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
454 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1cc26d74
EB
455 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
456 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
2b688ea5 457 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
1cc26d74 458 }
e4ad91f2
CM
459 }
460 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
e52c2802 461 dest[i].ft = fwd_fdb,
e4ad91f2
CM
462 i++;
463
c01cfd0f 464 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
e4ad91f2 465
93b3586e 466 if (attr->outer_match_level != MLX5_MATCH_NONE)
c01cfd0f 467 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
e4ad91f2 468
278d51f2 469 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
e52c2802 470 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
e4ad91f2 471
e52c2802
PB
472 if (IS_ERR(rule))
473 goto add_err;
e4ad91f2 474
525e84be 475 atomic64_inc(&esw->offloads.num_flows);
e52c2802
PB
476
477 return rule;
478add_err:
96e32687 479 esw_vport_tbl_put(esw, attr);
e52c2802 480err_get_fwd:
39ac237c 481 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 482err_get_fast:
e4ad91f2
CM
483 return rule;
484}
485
e52c2802
PB
486static void
487__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
488 struct mlx5_flow_handle *rule,
489 struct mlx5_esw_flow_attr *attr,
490 bool fwd_rule)
491{
e85e02ba 492 bool split = (attr->split_count > 0);
10caabda 493 int i;
e52c2802
PB
494
495 mlx5_del_flow_rules(rule);
10caabda 496
84be2fda 497 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
d8a2034f
EC
498 /* unref the term table */
499 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
500 if (attr->dests[i].termtbl)
501 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
502 }
10caabda
OS
503 }
504
525e84be 505 atomic64_dec(&esw->offloads.num_flows);
e52c2802
PB
506
507 if (fwd_rule) {
96e32687 508 esw_vport_tbl_put(esw, attr);
39ac237c 509 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
e52c2802 510 } else {
96e32687
EC
511 if (split)
512 esw_vport_tbl_put(esw, attr);
d18296ff 513 else if (attr->chain || attr->prio)
96e32687
EC
514 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
515 0);
e52c2802 516 if (attr->dest_chain)
39ac237c 517 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
e52c2802
PB
518 }
519}
520
d85cdccb
OG
521void
522mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
523 struct mlx5_flow_handle *rule,
524 struct mlx5_esw_flow_attr *attr)
525{
e52c2802 526 __mlx5_eswitch_del_rule(esw, rule, attr, false);
d85cdccb
OG
527}
528
48265006
OG
529void
530mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
531 struct mlx5_flow_handle *rule,
532 struct mlx5_esw_flow_attr *attr)
533{
e52c2802 534 __mlx5_eswitch_del_rule(esw, rule, attr, true);
48265006
OG
535}
536
f5f82476
OG
537static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
538{
539 struct mlx5_eswitch_rep *rep;
411ec9e0 540 int i, err = 0;
f5f82476
OG
541
542 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
411ec9e0 543 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
8693115a 544 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
f5f82476
OG
545 continue;
546
547 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
548 if (err)
549 goto out;
550 }
551
552out:
553 return err;
554}
555
556static struct mlx5_eswitch_rep *
557esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
558{
559 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
560
561 in_rep = attr->in_rep;
df65a573 562 out_rep = attr->dests[0].rep;
f5f82476
OG
563
564 if (push)
565 vport = in_rep;
566 else if (pop)
567 vport = out_rep;
568 else
569 vport = in_rep;
570
571 return vport;
572}
573
574static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
575 bool push, bool pop, bool fwd)
576{
577 struct mlx5_eswitch_rep *in_rep, *out_rep;
578
579 if ((push || pop) && !fwd)
580 goto out_notsupp;
581
582 in_rep = attr->in_rep;
df65a573 583 out_rep = attr->dests[0].rep;
f5f82476 584
b05af6aa 585 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
586 goto out_notsupp;
587
b05af6aa 588 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
589 goto out_notsupp;
590
591 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
592 if (!push && !pop && fwd)
b05af6aa 593 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
594 goto out_notsupp;
595
596 /* protects against (1) setting rules with different vlans to push and
597 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
598 */
1482bd3d 599 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
f5f82476
OG
600 goto out_notsupp;
601
602 return 0;
603
604out_notsupp:
9eb78923 605 return -EOPNOTSUPP;
f5f82476
OG
606}
607
608int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
609 struct mlx5_esw_flow_attr *attr)
610{
611 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
612 struct mlx5_eswitch_rep *vport = NULL;
613 bool push, pop, fwd;
614 int err = 0;
615
6acfbf38 616 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 617 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
618 return 0;
619
f5f82476
OG
620 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
621 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
e52c2802
PB
622 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
623 !attr->dest_chain);
f5f82476 624
0e18134f
VB
625 mutex_lock(&esw->state_lock);
626
f5f82476
OG
627 err = esw_add_vlan_action_check(attr, push, pop, fwd);
628 if (err)
0e18134f 629 goto unlock;
f5f82476 630
39ac237c 631 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
632
633 vport = esw_vlan_action_get_vport(attr, push, pop);
634
635 if (!push && !pop && fwd) {
636 /* tracks VF --> wire rules without vlan push action */
b05af6aa 637 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
f5f82476 638 vport->vlan_refcount++;
39ac237c 639 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
f5f82476
OG
640 }
641
0e18134f 642 goto unlock;
f5f82476
OG
643 }
644
645 if (!push && !pop)
0e18134f 646 goto unlock;
f5f82476
OG
647
648 if (!(offloads->vlan_push_pop_refcount)) {
649 /* it's the 1st vlan rule, apply global vlan pop policy */
650 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
651 if (err)
652 goto out;
653 }
654 offloads->vlan_push_pop_refcount++;
655
656 if (push) {
657 if (vport->vlan_refcount)
658 goto skip_set_push;
659
1482bd3d 660 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
f5f82476
OG
661 SET_VLAN_INSERT | SET_VLAN_STRIP);
662 if (err)
663 goto out;
1482bd3d 664 vport->vlan = attr->vlan_vid[0];
f5f82476
OG
665skip_set_push:
666 vport->vlan_refcount++;
667 }
668out:
669 if (!err)
39ac237c 670 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
0e18134f
VB
671unlock:
672 mutex_unlock(&esw->state_lock);
f5f82476
OG
673 return err;
674}
675
676int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
677 struct mlx5_esw_flow_attr *attr)
678{
679 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
680 struct mlx5_eswitch_rep *vport = NULL;
681 bool push, pop, fwd;
682 int err = 0;
683
6acfbf38 684 /* nop if we're on the vlan push/pop non emulation mode */
cc495188 685 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
6acfbf38
OG
686 return 0;
687
39ac237c 688 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
f5f82476
OG
689 return 0;
690
691 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
692 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
693 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
694
0e18134f
VB
695 mutex_lock(&esw->state_lock);
696
f5f82476
OG
697 vport = esw_vlan_action_get_vport(attr, push, pop);
698
699 if (!push && !pop && fwd) {
700 /* tracks VF --> wire rules without vlan push action */
b05af6aa 701 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
f5f82476
OG
702 vport->vlan_refcount--;
703
0e18134f 704 goto out;
f5f82476
OG
705 }
706
707 if (push) {
708 vport->vlan_refcount--;
709 if (vport->vlan_refcount)
710 goto skip_unset_push;
711
712 vport->vlan = 0;
713 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
714 0, 0, SET_VLAN_STRIP);
715 if (err)
716 goto out;
717 }
718
719skip_unset_push:
720 offloads->vlan_push_pop_refcount--;
721 if (offloads->vlan_push_pop_refcount)
0e18134f 722 goto out;
f5f82476
OG
723
724 /* no more vlan rules, stop global vlan pop policy */
725 err = esw_set_global_vlan_pop(esw, 0);
726
727out:
0e18134f 728 mutex_unlock(&esw->state_lock);
f5f82476
OG
729 return err;
730}
731
f7a68945 732struct mlx5_flow_handle *
02f3afd9
PP
733mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
734 u32 sqn)
ab22be9b 735{
66958ed9 736 struct mlx5_flow_act flow_act = {0};
4c5009c5 737 struct mlx5_flow_destination dest = {};
74491de9 738 struct mlx5_flow_handle *flow_rule;
c5bb1730 739 struct mlx5_flow_spec *spec;
ab22be9b
OG
740 void *misc;
741
1b9a07ee 742 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 743 if (!spec) {
ab22be9b
OG
744 flow_rule = ERR_PTR(-ENOMEM);
745 goto out;
746 }
747
c5bb1730 748 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
ab22be9b 749 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
a1b3839a
BW
750 /* source vport is the esw manager */
751 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
ab22be9b 752
c5bb1730 753 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
ab22be9b
OG
754 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
755 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
756
c5bb1730 757 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
ab22be9b 758 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
b17f7fc1 759 dest.vport.num = vport;
66958ed9 760 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ab22be9b 761
39ac237c
PB
762 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
763 spec, &flow_act, &dest, 1);
ab22be9b
OG
764 if (IS_ERR(flow_rule))
765 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
766out:
c5bb1730 767 kvfree(spec);
ab22be9b
OG
768 return flow_rule;
769}
57cbd893 770EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
ab22be9b 771
159fe639
MB
772void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
773{
774 mlx5_del_flow_rules(rule);
775}
776
5b7cb745
PB
777static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
778{
779 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
780 MLX5_FDB_TO_VPORT_REG_C_1;
781}
782
332bd3a5 783static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
c1286050
JL
784{
785 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
e08a6832
LR
786 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
787 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
5b7cb745 788 u8 curr, wanted;
c1286050
JL
789 int err;
790
5b7cb745
PB
791 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
792 !mlx5_eswitch_vport_match_metadata_enabled(esw))
332bd3a5 793 return 0;
c1286050 794
e08a6832
LR
795 MLX5_SET(query_esw_vport_context_in, in, opcode,
796 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
797 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
c1286050
JL
798 if (err)
799 return err;
800
5b7cb745
PB
801 curr = MLX5_GET(query_esw_vport_context_out, out,
802 esw_vport_context.fdb_to_vport_reg_c_id);
803 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
804 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
805 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
c1286050 806
332bd3a5 807 if (enable)
5b7cb745 808 curr |= wanted;
332bd3a5 809 else
5b7cb745 810 curr &= ~wanted;
c1286050 811
e08a6832 812 MLX5_SET(modify_esw_vport_context_in, min,
5b7cb745 813 esw_vport_context.fdb_to_vport_reg_c_id, curr);
e08a6832 814 MLX5_SET(modify_esw_vport_context_in, min,
c1286050
JL
815 field_select.fdb_to_vport_reg_c_id, 1);
816
e08a6832 817 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
5b7cb745
PB
818 if (!err) {
819 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
820 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
821 else
822 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
823 }
824
825 return err;
c1286050
JL
826}
827
a5641cb5
JL
828static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
829 struct mlx5_core_dev *peer_dev,
ac004b83
RD
830 struct mlx5_flow_spec *spec,
831 struct mlx5_flow_destination *dest)
832{
a5641cb5 833 void *misc;
ac004b83 834
a5641cb5
JL
835 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
836 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
837 misc_parameters_2);
0f0d3827
PB
838 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
839 mlx5_eswitch_get_vport_metadata_mask());
ac004b83 840
a5641cb5
JL
841 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
842 } else {
843 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
844 misc_parameters);
ac004b83 845
a5641cb5
JL
846 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
847 MLX5_CAP_GEN(peer_dev, vhca_id));
848
849 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
850
851 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
852 misc_parameters);
853 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
854 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
855 source_eswitch_owner_vhca_id);
856 }
ac004b83
RD
857
858 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 859 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
ac004b83 860 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
04de7dda 861 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
ac004b83
RD
862}
863
a5641cb5
JL
864static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
865 struct mlx5_eswitch *peer_esw,
866 struct mlx5_flow_spec *spec,
867 u16 vport)
868{
869 void *misc;
870
871 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
872 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
873 misc_parameters_2);
874 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
875 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
876 vport));
877 } else {
878 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
879 misc_parameters);
880 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
881 }
882}
883
ac004b83
RD
884static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
885 struct mlx5_core_dev *peer_dev)
886{
887 struct mlx5_flow_destination dest = {};
888 struct mlx5_flow_act flow_act = {0};
889 struct mlx5_flow_handle **flows;
890 struct mlx5_flow_handle *flow;
891 struct mlx5_flow_spec *spec;
892 /* total vports is the same for both e-switches */
893 int nvports = esw->total_vports;
894 void *misc;
895 int err, i;
896
897 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
898 if (!spec)
899 return -ENOMEM;
900
a5641cb5 901 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
ac004b83
RD
902
903 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
904 if (!flows) {
905 err = -ENOMEM;
906 goto alloc_flows_err;
907 }
908
909 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
910 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
911 misc_parameters);
912
81cd229c 913 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
a5641cb5
JL
914 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
915 spec, MLX5_VPORT_PF);
916
81cd229c
BW
917 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
918 spec, &flow_act, &dest, 1);
919 if (IS_ERR(flow)) {
920 err = PTR_ERR(flow);
921 goto add_pf_flow_err;
922 }
923 flows[MLX5_VPORT_PF] = flow;
924 }
925
926 if (mlx5_ecpf_vport_exists(esw->dev)) {
927 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
928 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
929 spec, &flow_act, &dest, 1);
930 if (IS_ERR(flow)) {
931 err = PTR_ERR(flow);
932 goto add_ecpf_flow_err;
933 }
934 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
935 }
936
786ef904 937 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
a5641cb5
JL
938 esw_set_peer_miss_rule_source_port(esw,
939 peer_dev->priv.eswitch,
940 spec, i);
941
ac004b83
RD
942 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
943 spec, &flow_act, &dest, 1);
944 if (IS_ERR(flow)) {
945 err = PTR_ERR(flow);
81cd229c 946 goto add_vf_flow_err;
ac004b83
RD
947 }
948 flows[i] = flow;
949 }
950
951 esw->fdb_table.offloads.peer_miss_rules = flows;
952
953 kvfree(spec);
954 return 0;
955
81cd229c 956add_vf_flow_err:
879c8f84 957 nvports = --i;
786ef904 958 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
ac004b83 959 mlx5_del_flow_rules(flows[i]);
81cd229c
BW
960
961 if (mlx5_ecpf_vport_exists(esw->dev))
962 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
963add_ecpf_flow_err:
964 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
965 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
966add_pf_flow_err:
967 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
ac004b83
RD
968 kvfree(flows);
969alloc_flows_err:
970 kvfree(spec);
971 return err;
972}
973
974static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
975{
976 struct mlx5_flow_handle **flows;
977 int i;
978
979 flows = esw->fdb_table.offloads.peer_miss_rules;
980
786ef904
PP
981 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
982 mlx5_core_max_vfs(esw->dev))
ac004b83
RD
983 mlx5_del_flow_rules(flows[i]);
984
81cd229c
BW
985 if (mlx5_ecpf_vport_exists(esw->dev))
986 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
987
988 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
989 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
990
ac004b83
RD
991 kvfree(flows);
992}
993
3aa33572
OG
994static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
995{
66958ed9 996 struct mlx5_flow_act flow_act = {0};
4c5009c5 997 struct mlx5_flow_destination dest = {};
74491de9 998 struct mlx5_flow_handle *flow_rule = NULL;
c5bb1730 999 struct mlx5_flow_spec *spec;
f80be543
MB
1000 void *headers_c;
1001 void *headers_v;
3aa33572 1002 int err = 0;
f80be543
MB
1003 u8 *dmac_c;
1004 u8 *dmac_v;
3aa33572 1005
1b9a07ee 1006 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1007 if (!spec) {
3aa33572
OG
1008 err = -ENOMEM;
1009 goto out;
1010 }
1011
f80be543
MB
1012 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1013 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1014 outer_headers);
1015 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1016 outer_headers.dmac_47_16);
1017 dmac_c[0] = 0x01;
1018
3aa33572 1019 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
a1b3839a 1020 dest.vport.num = esw->manager_vport;
66958ed9 1021 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3aa33572 1022
39ac237c
PB
1023 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1024 spec, &flow_act, &dest, 1);
3aa33572
OG
1025 if (IS_ERR(flow_rule)) {
1026 err = PTR_ERR(flow_rule);
f80be543 1027 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
3aa33572
OG
1028 goto out;
1029 }
1030
f80be543
MB
1031 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1032
1033 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1034 outer_headers);
1035 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1036 outer_headers.dmac_47_16);
1037 dmac_v[0] = 0x01;
39ac237c
PB
1038 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1039 spec, &flow_act, &dest, 1);
f80be543
MB
1040 if (IS_ERR(flow_rule)) {
1041 err = PTR_ERR(flow_rule);
1042 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1043 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1044 goto out;
1045 }
1046
1047 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1048
3aa33572 1049out:
c5bb1730 1050 kvfree(spec);
3aa33572
OG
1051 return err;
1052}
1053
11b717d6
PB
1054struct mlx5_flow_handle *
1055esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1056{
1057 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1058 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1059 struct mlx5_flow_context *flow_context;
1060 struct mlx5_flow_handle *flow_rule;
1061 struct mlx5_flow_destination dest;
1062 struct mlx5_flow_spec *spec;
1063 void *misc;
1064
60acc105
PB
1065 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1066 return ERR_PTR(-EOPNOTSUPP);
1067
11b717d6
PB
1068 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1069 if (!spec)
1070 return ERR_PTR(-ENOMEM);
1071
1072 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1073 misc_parameters_2);
1074 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1075 ESW_CHAIN_TAG_METADATA_MASK);
1076 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1077 misc_parameters_2);
1078 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1079 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
6724e66b
PB
1080 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1081 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1082 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
11b717d6
PB
1083
1084 flow_context = &spec->flow_context;
1085 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1086 flow_context->flow_tag = tag;
1087 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1088 dest.ft = esw->offloads.ft_offloads;
1089
1090 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1091 kfree(spec);
1092
1093 if (IS_ERR(flow_rule))
1094 esw_warn(esw->dev,
1095 "Failed to create restore rule for tag: %d, err(%d)\n",
1096 tag, (int)PTR_ERR(flow_rule));
1097
1098 return flow_rule;
1099}
1100
1101u32
1102esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1103{
1104 return ESW_CHAIN_TAG_METADATA_MASK;
1105}
1106
1967ce6e 1107#define MAX_PF_SQ 256
cd3d07e7 1108#define MAX_SQ_NVPORTS 32
1967ce6e 1109
a5641cb5
JL
1110static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1111 u32 *flow_group_in)
1112{
1113 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1114 flow_group_in,
1115 match_criteria);
1116
1117 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1118 MLX5_SET(create_flow_group_in, flow_group_in,
1119 match_criteria_enable,
1120 MLX5_MATCH_MISC_PARAMETERS_2);
1121
0f0d3827
PB
1122 MLX5_SET(fte_match_param, match_criteria,
1123 misc_parameters_2.metadata_reg_c_0,
1124 mlx5_eswitch_get_vport_metadata_mask());
a5641cb5
JL
1125 } else {
1126 MLX5_SET(create_flow_group_in, flow_group_in,
1127 match_criteria_enable,
1128 MLX5_MATCH_MISC_PARAMETERS);
1129
1130 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1131 misc_parameters.source_port);
1132 }
1133}
1134
1967ce6e
OG
1135static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1136{
1137 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1138 struct mlx5_flow_table_attr ft_attr = {};
1139 struct mlx5_core_dev *dev = esw->dev;
1140 struct mlx5_flow_namespace *root_ns;
1141 struct mlx5_flow_table *fdb = NULL;
39ac237c
PB
1142 u32 flags = 0, *flow_group_in;
1143 int table_size, ix, err = 0;
1967ce6e
OG
1144 struct mlx5_flow_group *g;
1145 void *match_criteria;
f80be543 1146 u8 *dmac;
1967ce6e
OG
1147
1148 esw_debug(esw->dev, "Create offloads FDB Tables\n");
39ac237c 1149
1b9a07ee 1150 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1967ce6e
OG
1151 if (!flow_group_in)
1152 return -ENOMEM;
1153
1154 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1155 if (!root_ns) {
1156 esw_warn(dev, "Failed to get FDB flow namespace\n");
1157 err = -EOPNOTSUPP;
1158 goto ns_err;
1159 }
8463daf1
MG
1160 esw->fdb_table.offloads.ns = root_ns;
1161 err = mlx5_flow_namespace_set_mode(root_ns,
1162 esw->dev->priv.steering->mode);
1163 if (err) {
1164 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1165 goto ns_err;
1166 }
1967ce6e 1167
cd7e4186
BW
1168 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1169 MLX5_ESW_MISS_FLOWS + esw->total_vports;
b3ba5149 1170
e52c2802
PB
1171 /* create the slow path fdb with encap set, so further table instances
1172 * can be created at run time while VFs are probed if the FW allows that.
1173 */
1174 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1175 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1176 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1177
1178 ft_attr.flags = flags;
b3ba5149
ES
1179 ft_attr.max_fte = table_size;
1180 ft_attr.prio = FDB_SLOW_PATH;
1181
1182 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1033665e
OG
1183 if (IS_ERR(fdb)) {
1184 err = PTR_ERR(fdb);
1185 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1186 goto slow_fdb_err;
1187 }
52fff327 1188 esw->fdb_table.offloads.slow_fdb = fdb;
1033665e 1189
39ac237c
PB
1190 err = mlx5_esw_chains_create(esw);
1191 if (err) {
1192 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1193 goto fdb_chains_err;
e52c2802
PB
1194 }
1195
69697b6e 1196 /* create send-to-vport group */
69697b6e
OG
1197 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1198 MLX5_MATCH_MISC_PARAMETERS);
1199
1200 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1201
1202 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1203 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1204
cd3d07e7 1205 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
69697b6e
OG
1206 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1207 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1208
1209 g = mlx5_create_flow_group(fdb, flow_group_in);
1210 if (IS_ERR(g)) {
1211 err = PTR_ERR(g);
1212 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1213 goto send_vport_err;
1214 }
1215 esw->fdb_table.offloads.send_to_vport_grp = g;
1216
ac004b83
RD
1217 /* create peer esw miss group */
1218 memset(flow_group_in, 0, inlen);
ac004b83 1219
a5641cb5
JL
1220 esw_set_flow_group_source_port(esw, flow_group_in);
1221
1222 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1223 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1224 flow_group_in,
1225 match_criteria);
ac004b83 1226
a5641cb5
JL
1227 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1228 misc_parameters.source_eswitch_owner_vhca_id);
1229
1230 MLX5_SET(create_flow_group_in, flow_group_in,
1231 source_eswitch_owner_vhca_id_valid, 1);
1232 }
ac004b83 1233
ac004b83
RD
1234 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1235 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1236 ix + esw->total_vports - 1);
1237 ix += esw->total_vports;
1238
1239 g = mlx5_create_flow_group(fdb, flow_group_in);
1240 if (IS_ERR(g)) {
1241 err = PTR_ERR(g);
1242 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1243 goto peer_miss_err;
1244 }
1245 esw->fdb_table.offloads.peer_miss_grp = g;
1246
69697b6e
OG
1247 /* create miss group */
1248 memset(flow_group_in, 0, inlen);
f80be543
MB
1249 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1250 MLX5_MATCH_OUTER_HEADERS);
1251 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1252 match_criteria);
1253 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1254 outer_headers.dmac_47_16);
1255 dmac[0] = 0x01;
69697b6e
OG
1256
1257 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
cd7e4186
BW
1258 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1259 ix + MLX5_ESW_MISS_FLOWS);
69697b6e
OG
1260
1261 g = mlx5_create_flow_group(fdb, flow_group_in);
1262 if (IS_ERR(g)) {
1263 err = PTR_ERR(g);
1264 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1265 goto miss_err;
1266 }
1267 esw->fdb_table.offloads.miss_grp = g;
1268
3aa33572
OG
1269 err = esw_add_fdb_miss_rule(esw);
1270 if (err)
1271 goto miss_rule_err;
1272
e52c2802 1273 esw->nvports = nvports;
c88a026e 1274 kvfree(flow_group_in);
69697b6e
OG
1275 return 0;
1276
3aa33572
OG
1277miss_rule_err:
1278 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
69697b6e 1279miss_err:
ac004b83
RD
1280 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1281peer_miss_err:
69697b6e
OG
1282 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1283send_vport_err:
39ac237c
PB
1284 mlx5_esw_chains_destroy(esw);
1285fdb_chains_err:
52fff327 1286 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1033665e 1287slow_fdb_err:
8463daf1
MG
1288 /* Holds true only as long as DMFS is the default */
1289 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e
OG
1290ns_err:
1291 kvfree(flow_group_in);
1292 return err;
1293}
1294
1967ce6e 1295static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
69697b6e 1296{
e52c2802 1297 if (!esw->fdb_table.offloads.slow_fdb)
69697b6e
OG
1298 return;
1299
1967ce6e 1300 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
f80be543
MB
1301 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1302 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
69697b6e 1303 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
ac004b83 1304 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
69697b6e
OG
1305 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1306
39ac237c 1307 mlx5_esw_chains_destroy(esw);
52fff327 1308 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
8463daf1
MG
1309 /* Holds true only as long as DMFS is the default */
1310 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1311 MLX5_FLOW_STEERING_MODE_DMFS);
69697b6e 1312}
c116c6ee 1313
cd7e4186 1314static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
c116c6ee 1315{
b3ba5149 1316 struct mlx5_flow_table_attr ft_attr = {};
c116c6ee 1317 struct mlx5_core_dev *dev = esw->dev;
b3ba5149
ES
1318 struct mlx5_flow_table *ft_offloads;
1319 struct mlx5_flow_namespace *ns;
c116c6ee
OG
1320 int err = 0;
1321
1322 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1323 if (!ns) {
1324 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
eff596da 1325 return -EOPNOTSUPP;
c116c6ee
OG
1326 }
1327
cd7e4186 1328 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
11b717d6 1329 ft_attr.prio = 1;
b3ba5149
ES
1330
1331 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
c116c6ee
OG
1332 if (IS_ERR(ft_offloads)) {
1333 err = PTR_ERR(ft_offloads);
1334 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1335 return err;
1336 }
1337
1338 esw->offloads.ft_offloads = ft_offloads;
1339 return 0;
1340}
1341
1342static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1343{
1344 struct mlx5_esw_offload *offloads = &esw->offloads;
1345
1346 mlx5_destroy_flow_table(offloads->ft_offloads);
1347}
fed9ce22 1348
cd7e4186 1349static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
fed9ce22
OG
1350{
1351 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1352 struct mlx5_flow_group *g;
fed9ce22 1353 u32 *flow_group_in;
fed9ce22 1354 int err = 0;
fed9ce22 1355
cd7e4186 1356 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1b9a07ee 1357 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
fed9ce22
OG
1358 if (!flow_group_in)
1359 return -ENOMEM;
1360
1361 /* create vport rx group */
a5641cb5 1362 esw_set_flow_group_source_port(esw, flow_group_in);
fed9ce22
OG
1363
1364 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1365 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1366
1367 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1368
1369 if (IS_ERR(g)) {
1370 err = PTR_ERR(g);
1371 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1372 goto out;
1373 }
1374
1375 esw->offloads.vport_rx_group = g;
1376out:
e574978a 1377 kvfree(flow_group_in);
fed9ce22
OG
1378 return err;
1379}
1380
1381static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1382{
1383 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1384}
1385
74491de9 1386struct mlx5_flow_handle *
02f3afd9 1387mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
c966f7d5 1388 struct mlx5_flow_destination *dest)
fed9ce22 1389{
66958ed9 1390 struct mlx5_flow_act flow_act = {0};
74491de9 1391 struct mlx5_flow_handle *flow_rule;
c5bb1730 1392 struct mlx5_flow_spec *spec;
fed9ce22
OG
1393 void *misc;
1394
1b9a07ee 1395 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
c5bb1730 1396 if (!spec) {
fed9ce22
OG
1397 flow_rule = ERR_PTR(-ENOMEM);
1398 goto out;
1399 }
1400
a5641cb5
JL
1401 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1402 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1403 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1404 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
fed9ce22 1405
a5641cb5 1406 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
0f0d3827
PB
1407 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1408 mlx5_eswitch_get_vport_metadata_mask());
fed9ce22 1409
a5641cb5
JL
1410 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1411 } else {
1412 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1413 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1414
1415 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1416 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1417
1418 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1419 }
fed9ce22 1420
66958ed9 1421 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
74491de9 1422 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
c966f7d5 1423 &flow_act, dest, 1);
fed9ce22
OG
1424 if (IS_ERR(flow_rule)) {
1425 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1426 goto out;
1427 }
1428
1429out:
c5bb1730 1430 kvfree(spec);
fed9ce22
OG
1431 return flow_rule;
1432}
feae9087 1433
bf3347c4 1434
cc617ced
PP
1435static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1436{
1437 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1438 struct mlx5_core_dev *dev = esw->dev;
1439 int vport;
1440
1441 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1442 return -EOPNOTSUPP;
1443
1444 if (esw->mode == MLX5_ESWITCH_NONE)
1445 return -EOPNOTSUPP;
1446
1447 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1448 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1449 mlx5_mode = MLX5_INLINE_MODE_NONE;
1450 goto out;
1451 case MLX5_CAP_INLINE_MODE_L2:
1452 mlx5_mode = MLX5_INLINE_MODE_L2;
1453 goto out;
1454 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1455 goto query_vports;
1456 }
1457
1458query_vports:
1459 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1460 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1461 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1462 if (prev_mlx5_mode != mlx5_mode)
1463 return -EINVAL;
1464 prev_mlx5_mode = mlx5_mode;
1465 }
1466
1467out:
1468 *mode = mlx5_mode;
1469 return 0;
e08a6832 1470}
bf3347c4 1471
11b717d6
PB
1472static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1473{
1474 struct mlx5_esw_offload *offloads = &esw->offloads;
1475
60acc105
PB
1476 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1477 return;
1478
6724e66b 1479 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
11b717d6
PB
1480 mlx5_destroy_flow_group(offloads->restore_group);
1481 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1482}
1483
1484static int esw_create_restore_table(struct mlx5_eswitch *esw)
1485{
d65dbedf 1486 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
11b717d6
PB
1487 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1488 struct mlx5_flow_table_attr ft_attr = {};
1489 struct mlx5_core_dev *dev = esw->dev;
1490 struct mlx5_flow_namespace *ns;
6724e66b 1491 struct mlx5_modify_hdr *mod_hdr;
11b717d6
PB
1492 void *match_criteria, *misc;
1493 struct mlx5_flow_table *ft;
1494 struct mlx5_flow_group *g;
1495 u32 *flow_group_in;
1496 int err = 0;
1497
60acc105
PB
1498 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1499 return 0;
1500
11b717d6
PB
1501 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1502 if (!ns) {
1503 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1504 return -EOPNOTSUPP;
1505 }
1506
1507 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1508 if (!flow_group_in) {
1509 err = -ENOMEM;
1510 goto out_free;
1511 }
1512
1513 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1514 ft = mlx5_create_flow_table(ns, &ft_attr);
1515 if (IS_ERR(ft)) {
1516 err = PTR_ERR(ft);
1517 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1518 err);
1519 goto out_free;
1520 }
1521
1522 memset(flow_group_in, 0, inlen);
1523 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1524 match_criteria);
1525 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1526 misc_parameters_2);
1527
1528 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1529 ESW_CHAIN_TAG_METADATA_MASK);
1530 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1531 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1532 ft_attr.max_fte - 1);
1533 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1534 MLX5_MATCH_MISC_PARAMETERS_2);
1535 g = mlx5_create_flow_group(ft, flow_group_in);
1536 if (IS_ERR(g)) {
1537 err = PTR_ERR(g);
1538 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1539 err);
1540 goto err_group;
1541 }
1542
6724e66b
PB
1543 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1544 MLX5_SET(copy_action_in, modact, src_field,
1545 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1546 MLX5_SET(copy_action_in, modact, dst_field,
1547 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1548 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1549 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1550 modact);
1551 if (IS_ERR(mod_hdr)) {
e9864539 1552 err = PTR_ERR(mod_hdr);
6724e66b
PB
1553 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1554 err);
6724e66b
PB
1555 goto err_mod_hdr;
1556 }
1557
11b717d6
PB
1558 esw->offloads.ft_offloads_restore = ft;
1559 esw->offloads.restore_group = g;
6724e66b 1560 esw->offloads.restore_copy_hdr_id = mod_hdr;
11b717d6 1561
c8508713
RD
1562 kvfree(flow_group_in);
1563
11b717d6
PB
1564 return 0;
1565
6724e66b
PB
1566err_mod_hdr:
1567 mlx5_destroy_flow_group(g);
11b717d6
PB
1568err_group:
1569 mlx5_destroy_flow_table(ft);
1570out_free:
1571 kvfree(flow_group_in);
1572
1573 return err;
cc617ced
PP
1574}
1575
db7ff19e
EB
1576static int esw_offloads_start(struct mlx5_eswitch *esw,
1577 struct netlink_ext_ack *extack)
c930a3ad 1578{
062f4bf4 1579 int err, err1;
c930a3ad 1580
f6455de0 1581 if (esw->mode != MLX5_ESWITCH_LEGACY &&
c96692fb 1582 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
8c98ee77
EB
1583 NL_SET_ERR_MSG_MOD(extack,
1584 "Can't set offloads mode, SRIOV legacy not enabled");
c930a3ad
OG
1585 return -EINVAL;
1586 }
1587
8e0aa4bc
PP
1588 mlx5_eswitch_disable_locked(esw, false);
1589 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
1590 esw->dev->priv.sriov.num_vfs);
6c419ba8 1591 if (err) {
8c98ee77
EB
1592 NL_SET_ERR_MSG_MOD(extack,
1593 "Failed setting eswitch to offloads");
8e0aa4bc
PP
1594 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
1595 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
1596 if (err1) {
1597 NL_SET_ERR_MSG_MOD(extack,
1598 "Failed setting eswitch back to legacy");
1599 }
6c419ba8 1600 }
bffaa916
RD
1601 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1602 if (mlx5_eswitch_inline_mode_get(esw,
bffaa916
RD
1603 &esw->offloads.inline_mode)) {
1604 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
8c98ee77
EB
1605 NL_SET_ERR_MSG_MOD(extack,
1606 "Inline mode is different between vports");
bffaa916
RD
1607 }
1608 }
c930a3ad
OG
1609 return err;
1610}
1611
e8d31c4d
MB
1612void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1613{
1614 kfree(esw->offloads.vport_reps);
1615}
1616
1617int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1618{
2752b823 1619 int total_vports = esw->total_vports;
e8d31c4d 1620 struct mlx5_eswitch_rep *rep;
d6518db2 1621 int vport_index;
ef2e4094 1622 u8 rep_type;
e8d31c4d 1623
2aca1787 1624 esw->offloads.vport_reps = kcalloc(total_vports,
e8d31c4d
MB
1625 sizeof(struct mlx5_eswitch_rep),
1626 GFP_KERNEL);
1627 if (!esw->offloads.vport_reps)
1628 return -ENOMEM;
1629
d6518db2
BW
1630 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1631 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
2f69e591 1632 rep->vport_index = vport_index;
f121e0ea
BW
1633
1634 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
8693115a 1635 atomic_set(&rep->rep_data[rep_type].state,
6f4e0219 1636 REP_UNREGISTERED);
e8d31c4d
MB
1637 }
1638
e8d31c4d
MB
1639 return 0;
1640}
1641
c9b99abc
BW
1642static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1643 struct mlx5_eswitch_rep *rep, u8 rep_type)
1644{
8693115a 1645 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
6f4e0219 1646 REP_LOADED, REP_REGISTERED) == REP_LOADED)
8693115a 1647 esw->offloads.rep_ops[rep_type]->unload(rep);
c9b99abc
BW
1648}
1649
4110fc59 1650static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
6ed1803a
MB
1651{
1652 struct mlx5_eswitch_rep *rep;
4110fc59
BW
1653 int i;
1654
1655 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
1656 __esw_offloads_unload_rep(esw, rep, rep_type);
c9b99abc 1657
81cd229c
BW
1658 if (mlx5_ecpf_vport_exists(esw->dev)) {
1659 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1660 __esw_offloads_unload_rep(esw, rep, rep_type);
1661 }
1662
1663 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1664 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1665 __esw_offloads_unload_rep(esw, rep, rep_type);
1666 }
1667
879c8f84 1668 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
c9b99abc 1669 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1670}
1671
c2d7712c 1672int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
a4b97ab4 1673{
c2d7712c
BW
1674 struct mlx5_eswitch_rep *rep;
1675 int rep_type;
a4b97ab4
MB
1676 int err;
1677
c2d7712c
BW
1678 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1679 return 0;
a4b97ab4 1680
c2d7712c
BW
1681 rep = mlx5_eswitch_get_rep(esw, vport_num);
1682 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1683 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1684 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1685 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1686 if (err)
1687 goto err_reps;
1688 }
1689
1690 return 0;
a4b97ab4
MB
1691
1692err_reps:
c2d7712c
BW
1693 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
1694 for (--rep_type; rep_type >= 0; rep_type--)
1695 __esw_offloads_unload_rep(esw, rep, rep_type);
6ed1803a
MB
1696 return err;
1697}
1698
c2d7712c
BW
1699void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
1700{
1701 struct mlx5_eswitch_rep *rep;
1702 int rep_type;
1703
1704 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1705 return;
1706
1707 rep = mlx5_eswitch_get_rep(esw, vport_num);
1708 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
1709 __esw_offloads_unload_rep(esw, rep, rep_type);
1710}
1711
ac004b83
RD
1712#define ESW_OFFLOADS_DEVCOM_PAIR (0)
1713#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1714
1715static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1716 struct mlx5_eswitch *peer_esw)
1717{
1718 int err;
1719
1720 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1721 if (err)
1722 return err;
1723
1724 return 0;
1725}
1726
1727static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1728{
d956873f 1729#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
04de7dda 1730 mlx5e_tc_clean_fdb_peer_flows(esw);
d956873f 1731#endif
ac004b83
RD
1732 esw_del_fdb_peer_miss_rules(esw);
1733}
1734
8463daf1
MG
1735static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1736 struct mlx5_eswitch *peer_esw,
1737 bool pair)
1738{
1739 struct mlx5_flow_root_namespace *peer_ns;
1740 struct mlx5_flow_root_namespace *ns;
1741 int err;
1742
1743 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1744 ns = esw->dev->priv.steering->fdb_root_ns;
1745
1746 if (pair) {
1747 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1748 if (err)
1749 return err;
1750
e53e6655 1751 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
8463daf1
MG
1752 if (err) {
1753 mlx5_flow_namespace_set_peer(ns, NULL);
1754 return err;
1755 }
1756 } else {
1757 mlx5_flow_namespace_set_peer(ns, NULL);
1758 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1759 }
1760
1761 return 0;
1762}
1763
ac004b83
RD
1764static int mlx5_esw_offloads_devcom_event(int event,
1765 void *my_data,
1766 void *event_data)
1767{
1768 struct mlx5_eswitch *esw = my_data;
ac004b83 1769 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
8463daf1 1770 struct mlx5_eswitch *peer_esw = event_data;
ac004b83
RD
1771 int err;
1772
1773 switch (event) {
1774 case ESW_OFFLOADS_DEVCOM_PAIR:
a5641cb5
JL
1775 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1776 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1777 break;
1778
8463daf1 1779 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
ac004b83
RD
1780 if (err)
1781 goto err_out;
8463daf1
MG
1782 err = mlx5_esw_offloads_pair(esw, peer_esw);
1783 if (err)
1784 goto err_peer;
ac004b83
RD
1785
1786 err = mlx5_esw_offloads_pair(peer_esw, esw);
1787 if (err)
1788 goto err_pair;
1789
1790 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1791 break;
1792
1793 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1794 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1795 break;
1796
1797 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1798 mlx5_esw_offloads_unpair(peer_esw);
1799 mlx5_esw_offloads_unpair(esw);
8463daf1 1800 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1801 break;
1802 }
1803
1804 return 0;
1805
1806err_pair:
1807 mlx5_esw_offloads_unpair(esw);
8463daf1
MG
1808err_peer:
1809 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
ac004b83
RD
1810err_out:
1811 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1812 event, err);
1813 return err;
1814}
1815
1816static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1817{
1818 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1819
04de7dda
RD
1820 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1821 mutex_init(&esw->offloads.peer_mutex);
1822
ac004b83
RD
1823 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1824 return;
1825
1826 mlx5_devcom_register_component(devcom,
1827 MLX5_DEVCOM_ESW_OFFLOADS,
1828 mlx5_esw_offloads_devcom_event,
1829 esw);
1830
1831 mlx5_devcom_send_event(devcom,
1832 MLX5_DEVCOM_ESW_OFFLOADS,
1833 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1834}
1835
1836static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1837{
1838 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1839
1840 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1841 return;
1842
1843 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1844 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1845
1846 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1847}
1848
92ab1eb3
JL
1849static bool
1850esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
1851{
1852 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
1853 return false;
1854
1855 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1856 MLX5_FDB_TO_VPORT_REG_C_0))
1857 return false;
1858
1859 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
1860 return false;
1861
1862 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1863 mlx5_ecpf_vport_exists(esw->dev))
1864 return false;
1865
1866 return true;
1867}
1868
1e62e222
MD
1869static bool
1870esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
1871{
1872 return mlx5_core_mp_enabled(esw->dev);
1873}
1874
1875static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
1876{
1877 return esw_check_vport_match_metadata_mandatory(esw) &&
1878 esw_check_vport_match_metadata_supported(esw);
1879}
1880
133dcfc5
VP
1881u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
1882{
1883 u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1;
1884 u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
1885 u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
1886 u32 start;
1887 u32 end;
1888 int id;
1889
1890 /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
1891 WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
1892
1893 /* Trim vhca_id to ESW_VHCA_ID_BITS */
1894 vhca_id &= vhca_id_mask;
1895
1896 start = (vhca_id << ESW_VPORT_BITS);
1897 end = start + num_vports;
1898 if (!vhca_id)
1899 start += 1; /* zero is reserved/invalid metadata */
1900 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, start, end, GFP_KERNEL);
1901
1902 return (id < 0) ? 0 : id;
1903}
1904
1905void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
1906{
1907 ida_free(&esw->offloads.vport_metadata_ida, metadata);
1908}
1909
1910static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
1911 struct mlx5_vport *vport)
1912{
1913 if (vport->vport == MLX5_VPORT_UPLINK)
1914 return 0;
1915
1916 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
1917 vport->metadata = vport->default_metadata;
1918 return vport->metadata ? 0 : -ENOSPC;
1919}
1920
1921static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
1922 struct mlx5_vport *vport)
1923{
1924 if (vport->vport == MLX5_VPORT_UPLINK || !vport->default_metadata)
1925 return;
1926
1927 WARN_ON(vport->metadata != vport->default_metadata);
1928 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
1929}
1930
748da30b 1931int
89a0f1fb
PP
1932esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
1933 struct mlx5_vport *vport)
7445cfb1 1934{
7445cfb1
JL
1935 int err;
1936
133dcfc5
VP
1937 err = esw_offloads_vport_metadata_setup(esw, vport);
1938 if (err)
1939 goto metadata_err;
1940
07bab950 1941 err = esw_acl_ingress_ofld_setup(esw, vport);
89a0f1fb 1942 if (err)
133dcfc5 1943 goto ingress_err;
7445cfb1 1944
89a0f1fb 1945 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
ea651a86 1946 err = esw_acl_egress_ofld_setup(esw, vport);
07bab950
VP
1947 if (err)
1948 goto egress_err;
18486737 1949 }
07bab950
VP
1950
1951 return 0;
1952
1953egress_err:
1954 esw_acl_ingress_ofld_cleanup(esw, vport);
133dcfc5
VP
1955ingress_err:
1956 esw_offloads_vport_metadata_cleanup(esw, vport);
1957metadata_err:
89a0f1fb
PP
1958 return err;
1959}
18486737 1960
748da30b 1961void
89a0f1fb
PP
1962esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
1963 struct mlx5_vport *vport)
1964{
ea651a86 1965 esw_acl_egress_ofld_cleanup(vport);
07bab950 1966 esw_acl_ingress_ofld_cleanup(esw, vport);
133dcfc5 1967 esw_offloads_vport_metadata_cleanup(esw, vport);
89a0f1fb 1968}
7445cfb1 1969
748da30b 1970static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
7445cfb1
JL
1971{
1972 struct mlx5_vport *vport;
7445cfb1 1973 int err;
18486737 1974
1e62e222 1975 if (esw_use_vport_metadata(esw))
92ab1eb3 1976 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737 1977
748da30b
VP
1978 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
1979 err = esw_vport_create_offloads_acl_tables(esw, vport);
1980 if (err)
1981 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
1982 return err;
1983}
1984
748da30b 1985static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
18486737 1986{
786ef904 1987 struct mlx5_vport *vport;
7445cfb1 1988
748da30b
VP
1989 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
1990 esw_vport_destroy_offloads_acl_tables(esw, vport);
7445cfb1 1991 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
18486737
EB
1992}
1993
062f4bf4 1994static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
6ed1803a 1995{
062f4bf4
BW
1996 int num_vfs = esw->esw_funcs.num_vfs;
1997 int total_vports;
6ed1803a
MB
1998 int err;
1999
062f4bf4
BW
2000 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2001 total_vports = esw->total_vports;
2002 else
2003 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2004
5c1d260e 2005 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
f8d1edda
PP
2006 mutex_init(&esw->fdb_table.offloads.vports.lock);
2007 hash_init(esw->fdb_table.offloads.vports.table);
e52c2802 2008
748da30b 2009 err = esw_create_uplink_offloads_acl_tables(esw);
7445cfb1 2010 if (err)
f8d1edda 2011 goto create_acl_err;
18486737 2012
11b717d6 2013 err = esw_create_offloads_table(esw, total_vports);
c930a3ad 2014 if (err)
11b717d6 2015 goto create_offloads_err;
c930a3ad 2016
11b717d6 2017 err = esw_create_restore_table(esw);
c930a3ad 2018 if (err)
11b717d6
PB
2019 goto create_restore_err;
2020
2021 err = esw_create_offloads_fdb_tables(esw, total_vports);
2022 if (err)
2023 goto create_fdb_err;
c930a3ad 2024
062f4bf4 2025 err = esw_create_vport_rx_group(esw, total_vports);
c930a3ad
OG
2026 if (err)
2027 goto create_fg_err;
2028
2029 return 0;
2030
2031create_fg_err:
1967ce6e 2032 esw_destroy_offloads_fdb_tables(esw);
7445cfb1 2033create_fdb_err:
11b717d6
PB
2034 esw_destroy_restore_table(esw);
2035create_restore_err:
2036 esw_destroy_offloads_table(esw);
2037create_offloads_err:
748da30b 2038 esw_destroy_uplink_offloads_acl_tables(esw);
f8d1edda
PP
2039create_acl_err:
2040 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
c930a3ad
OG
2041 return err;
2042}
2043
eca8cc38
BW
2044static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2045{
2046 esw_destroy_vport_rx_group(esw);
eca8cc38 2047 esw_destroy_offloads_fdb_tables(esw);
11b717d6
PB
2048 esw_destroy_restore_table(esw);
2049 esw_destroy_offloads_table(esw);
748da30b 2050 esw_destroy_uplink_offloads_acl_tables(esw);
f8d1edda 2051 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
eca8cc38
BW
2052}
2053
7e736f9a
PP
2054static void
2055esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
a3888f33 2056{
5ccf2770 2057 bool host_pf_disabled;
7e736f9a 2058 u16 new_num_vfs;
a3888f33 2059
7e736f9a
PP
2060 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2061 host_params_context.host_num_of_vfs);
5ccf2770
BW
2062 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2063 host_params_context.host_pf_disabled);
a3888f33 2064
7e736f9a
PP
2065 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2066 return;
a3888f33
BW
2067
2068 /* Number of VFs can only change from "0 to x" or "x to 0". */
cd56f929 2069 if (esw->esw_funcs.num_vfs > 0) {
23bb50cf 2070 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
a3888f33 2071 } else {
7e736f9a 2072 int err;
a3888f33 2073
23bb50cf
BW
2074 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2075 MLX5_VPORT_UC_ADDR_CHANGE);
a3888f33 2076 if (err)
7e736f9a 2077 return;
a3888f33 2078 }
7e736f9a 2079 esw->esw_funcs.num_vfs = new_num_vfs;
a3888f33
BW
2080}
2081
7e736f9a 2082static void esw_functions_changed_event_handler(struct work_struct *work)
ac35dcd6 2083{
7e736f9a
PP
2084 struct mlx5_host_work *host_work;
2085 struct mlx5_eswitch *esw;
dd28087c 2086 const u32 *out;
ac35dcd6 2087
7e736f9a
PP
2088 host_work = container_of(work, struct mlx5_host_work, work);
2089 esw = host_work->esw;
a3888f33 2090
dd28087c
PP
2091 out = mlx5_esw_query_functions(esw->dev);
2092 if (IS_ERR(out))
7e736f9a 2093 goto out;
a3888f33 2094
7e736f9a 2095 esw_vfs_changed_event_handler(esw, out);
dd28087c 2096 kvfree(out);
a3888f33 2097out:
ac35dcd6
VP
2098 kfree(host_work);
2099}
2100
16fff98a 2101int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
a3888f33 2102{
cd56f929 2103 struct mlx5_esw_functions *esw_funcs;
a3888f33 2104 struct mlx5_host_work *host_work;
a3888f33
BW
2105 struct mlx5_eswitch *esw;
2106
2107 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2108 if (!host_work)
2109 return NOTIFY_DONE;
2110
cd56f929
VP
2111 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2112 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
a3888f33
BW
2113
2114 host_work->esw = esw;
2115
062f4bf4 2116 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
a3888f33
BW
2117 queue_work(esw->work_queue, &host_work->work);
2118
2119 return NOTIFY_OK;
2120}
2121
5896b972 2122int esw_offloads_enable(struct mlx5_eswitch *esw)
eca8cc38 2123{
3b83b6c2
DL
2124 struct mlx5_vport *vport;
2125 int err, i;
eca8cc38 2126
9a64144d
MG
2127 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2128 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2129 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2130 else
2131 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2132
2bb72e7e 2133 mutex_init(&esw->offloads.termtbl_mutex);
8463daf1 2134 mlx5_rdma_enable_roce(esw->dev);
eca8cc38 2135
332bd3a5
PP
2136 err = esw_set_passing_vport_metadata(esw, true);
2137 if (err)
2138 goto err_vport_metadata;
c1286050 2139
7983a675
PB
2140 err = esw_offloads_steering_init(esw);
2141 if (err)
2142 goto err_steering_init;
2143
3b83b6c2
DL
2144 /* Representor will control the vport link state */
2145 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2146 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2147
c2d7712c
BW
2148 /* Uplink vport rep must load first. */
2149 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
925a6acc 2150 if (err)
c2d7712c 2151 goto err_uplink;
c1286050 2152
c2d7712c 2153 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
eca8cc38 2154 if (err)
c2d7712c 2155 goto err_vports;
eca8cc38
BW
2156
2157 esw_offloads_devcom_init(esw);
a3888f33 2158
eca8cc38
BW
2159 return 0;
2160
925a6acc 2161err_vports:
c2d7712c
BW
2162 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2163err_uplink:
7983a675 2164 esw_offloads_steering_cleanup(esw);
79949985
PP
2165err_steering_init:
2166 esw_set_passing_vport_metadata(esw, false);
7983a675 2167err_vport_metadata:
8463daf1 2168 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2169 mutex_destroy(&esw->offloads.termtbl_mutex);
eca8cc38
BW
2170 return err;
2171}
2172
db7ff19e
EB
2173static int esw_offloads_stop(struct mlx5_eswitch *esw,
2174 struct netlink_ext_ack *extack)
c930a3ad 2175{
062f4bf4 2176 int err, err1;
c930a3ad 2177
8e0aa4bc
PP
2178 mlx5_eswitch_disable_locked(esw, false);
2179 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2180 MLX5_ESWITCH_IGNORE_NUM_VFS);
6c419ba8 2181 if (err) {
8c98ee77 2182 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
8e0aa4bc
PP
2183 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2184 MLX5_ESWITCH_IGNORE_NUM_VFS);
8c98ee77
EB
2185 if (err1) {
2186 NL_SET_ERR_MSG_MOD(extack,
2187 "Failed setting eswitch back to offloads");
2188 }
6c419ba8 2189 }
c930a3ad
OG
2190
2191 return err;
2192}
2193
5896b972 2194void esw_offloads_disable(struct mlx5_eswitch *esw)
c930a3ad 2195{
ac004b83 2196 esw_offloads_devcom_cleanup(esw);
5896b972 2197 mlx5_eswitch_disable_pf_vf_vports(esw);
c2d7712c 2198 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
332bd3a5 2199 esw_set_passing_vport_metadata(esw, false);
eca8cc38 2200 esw_offloads_steering_cleanup(esw);
8463daf1 2201 mlx5_rdma_disable_roce(esw->dev);
2bb72e7e 2202 mutex_destroy(&esw->offloads.termtbl_mutex);
9a64144d 2203 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
c930a3ad
OG
2204}
2205
ef78618b 2206static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
c930a3ad
OG
2207{
2208 switch (mode) {
2209 case DEVLINK_ESWITCH_MODE_LEGACY:
f6455de0 2210 *mlx5_mode = MLX5_ESWITCH_LEGACY;
c930a3ad
OG
2211 break;
2212 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
f6455de0 2213 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
c930a3ad
OG
2214 break;
2215 default:
2216 return -EINVAL;
2217 }
2218
2219 return 0;
2220}
2221
ef78618b
OG
2222static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2223{
2224 switch (mlx5_mode) {
f6455de0 2225 case MLX5_ESWITCH_LEGACY:
ef78618b
OG
2226 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2227 break;
f6455de0 2228 case MLX5_ESWITCH_OFFLOADS:
ef78618b
OG
2229 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2230 break;
2231 default:
2232 return -EINVAL;
2233 }
2234
2235 return 0;
2236}
2237
bffaa916
RD
2238static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2239{
2240 switch (mode) {
2241 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2242 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2243 break;
2244 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2245 *mlx5_mode = MLX5_INLINE_MODE_L2;
2246 break;
2247 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2248 *mlx5_mode = MLX5_INLINE_MODE_IP;
2249 break;
2250 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2251 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2252 break;
2253 default:
2254 return -EINVAL;
2255 }
2256
2257 return 0;
2258}
2259
2260static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2261{
2262 switch (mlx5_mode) {
2263 case MLX5_INLINE_MODE_NONE:
2264 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2265 break;
2266 case MLX5_INLINE_MODE_L2:
2267 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2268 break;
2269 case MLX5_INLINE_MODE_IP:
2270 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2271 break;
2272 case MLX5_INLINE_MODE_TCP_UDP:
2273 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2274 break;
2275 default:
2276 return -EINVAL;
2277 }
2278
2279 return 0;
2280}
2281
0e6fa491 2282static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
feae9087 2283{
9d1cef19
OG
2284 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2285 return -EOPNOTSUPP;
c930a3ad 2286
733d3e54
OG
2287 if(!MLX5_ESWITCH_MANAGER(dev))
2288 return -EPERM;
c930a3ad 2289
9d1cef19
OG
2290 return 0;
2291}
2292
ae24432c
PP
2293static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2294{
2295 /* devlink commands in NONE eswitch mode are currently supported only
2296 * on ECPF.
2297 */
2298 return (esw->mode == MLX5_ESWITCH_NONE &&
2299 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2300}
2301
db7ff19e
EB
2302int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2303 struct netlink_ext_ack *extack)
9d1cef19
OG
2304{
2305 struct mlx5_core_dev *dev = devlink_priv(devlink);
8e0aa4bc 2306 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19
OG
2307 u16 cur_mlx5_mode, mlx5_mode = 0;
2308 int err;
2309
0e6fa491 2310 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2311 if (err)
2312 return err;
2313
ef78618b 2314 if (esw_mode_from_devlink(mode, &mlx5_mode))
c930a3ad
OG
2315 return -EINVAL;
2316
8e0aa4bc
PP
2317 mutex_lock(&esw->mode_lock);
2318 err = eswitch_devlink_esw_mode_check(esw);
2319 if (err)
2320 goto unlock;
2321
2322 cur_mlx5_mode = esw->mode;
2323
c930a3ad 2324 if (cur_mlx5_mode == mlx5_mode)
8e0aa4bc 2325 goto unlock;
c930a3ad
OG
2326
2327 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
8e0aa4bc 2328 err = esw_offloads_start(esw, extack);
c930a3ad 2329 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
8e0aa4bc 2330 err = esw_offloads_stop(esw, extack);
c930a3ad 2331 else
8e0aa4bc
PP
2332 err = -EINVAL;
2333
2334unlock:
2335 mutex_unlock(&esw->mode_lock);
2336 return err;
feae9087
OG
2337}
2338
2339int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2340{
9d1cef19 2341 struct mlx5_core_dev *dev = devlink_priv(devlink);
8e0aa4bc 2342 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2343 int err;
c930a3ad 2344
0e6fa491 2345 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2346 if (err)
2347 return err;
c930a3ad 2348
8e0aa4bc 2349 mutex_lock(&esw->mode_lock);
ae24432c
PP
2350 err = eswitch_devlink_esw_mode_check(dev->priv.eswitch);
2351 if (err)
8e0aa4bc 2352 goto unlock;
ae24432c 2353
8e0aa4bc
PP
2354 err = esw_mode_to_devlink(esw->mode, mode);
2355unlock:
2356 mutex_unlock(&esw->mode_lock);
2357 return err;
feae9087 2358}
127ea380 2359
db7ff19e
EB
2360int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2361 struct netlink_ext_ack *extack)
bffaa916
RD
2362{
2363 struct mlx5_core_dev *dev = devlink_priv(devlink);
2364 struct mlx5_eswitch *esw = dev->priv.eswitch;
db68cc56 2365 int err, vport, num_vport;
bffaa916
RD
2366 u8 mlx5_mode;
2367
0e6fa491 2368 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2369 if (err)
2370 return err;
bffaa916 2371
8e0aa4bc 2372 mutex_lock(&esw->mode_lock);
ae24432c
PP
2373 err = eswitch_devlink_esw_mode_check(esw);
2374 if (err)
8e0aa4bc 2375 goto out;
ae24432c 2376
c415f704
OG
2377 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2378 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2379 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
8e0aa4bc 2380 goto out;
c415f704
OG
2381 /* fall through */
2382 case MLX5_CAP_INLINE_MODE_L2:
8c98ee77 2383 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
8e0aa4bc
PP
2384 err = -EOPNOTSUPP;
2385 goto out;
c415f704
OG
2386 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2387 break;
2388 }
bffaa916 2389
525e84be 2390 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2391 NL_SET_ERR_MSG_MOD(extack,
2392 "Can't set inline mode when flows are configured");
8e0aa4bc
PP
2393 err = -EOPNOTSUPP;
2394 goto out;
375f51e2
RD
2395 }
2396
bffaa916
RD
2397 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2398 if (err)
2399 goto out;
2400
411ec9e0 2401 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
bffaa916
RD
2402 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2403 if (err) {
8c98ee77
EB
2404 NL_SET_ERR_MSG_MOD(extack,
2405 "Failed to set min inline on vport");
bffaa916
RD
2406 goto revert_inline_mode;
2407 }
2408 }
2409
2410 esw->offloads.inline_mode = mlx5_mode;
8e0aa4bc 2411 mutex_unlock(&esw->mode_lock);
bffaa916
RD
2412 return 0;
2413
2414revert_inline_mode:
db68cc56 2415 num_vport = --vport;
411ec9e0 2416 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
bffaa916
RD
2417 mlx5_modify_nic_vport_min_inline(dev,
2418 vport,
2419 esw->offloads.inline_mode);
2420out:
8e0aa4bc 2421 mutex_unlock(&esw->mode_lock);
bffaa916
RD
2422 return err;
2423}
2424
2425int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2426{
2427 struct mlx5_core_dev *dev = devlink_priv(devlink);
2428 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2429 int err;
bffaa916 2430
0e6fa491 2431 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2432 if (err)
2433 return err;
bffaa916 2434
8e0aa4bc 2435 mutex_lock(&esw->mode_lock);
ae24432c
PP
2436 err = eswitch_devlink_esw_mode_check(esw);
2437 if (err)
8e0aa4bc 2438 goto unlock;
ae24432c 2439
8e0aa4bc
PP
2440 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2441unlock:
2442 mutex_unlock(&esw->mode_lock);
2443 return err;
bffaa916
RD
2444}
2445
98fdbea5
LR
2446int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2447 enum devlink_eswitch_encap_mode encap,
db7ff19e 2448 struct netlink_ext_ack *extack)
7768d197
RD
2449{
2450 struct mlx5_core_dev *dev = devlink_priv(devlink);
2451 struct mlx5_eswitch *esw = dev->priv.eswitch;
2452 int err;
2453
0e6fa491 2454 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2455 if (err)
2456 return err;
7768d197 2457
8e0aa4bc 2458 mutex_lock(&esw->mode_lock);
ae24432c
PP
2459 err = eswitch_devlink_esw_mode_check(esw);
2460 if (err)
8e0aa4bc 2461 goto unlock;
ae24432c 2462
7768d197 2463 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
60786f09 2464 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
8e0aa4bc
PP
2465 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
2466 err = -EOPNOTSUPP;
2467 goto unlock;
2468 }
7768d197 2469
8e0aa4bc
PP
2470 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
2471 err = -EOPNOTSUPP;
2472 goto unlock;
2473 }
7768d197 2474
f6455de0 2475 if (esw->mode == MLX5_ESWITCH_LEGACY) {
7768d197 2476 esw->offloads.encap = encap;
8e0aa4bc 2477 goto unlock;
7768d197
RD
2478 }
2479
2480 if (esw->offloads.encap == encap)
8e0aa4bc 2481 goto unlock;
7768d197 2482
525e84be 2483 if (atomic64_read(&esw->offloads.num_flows) > 0) {
8c98ee77
EB
2484 NL_SET_ERR_MSG_MOD(extack,
2485 "Can't set encapsulation when flows are configured");
8e0aa4bc
PP
2486 err = -EOPNOTSUPP;
2487 goto unlock;
7768d197
RD
2488 }
2489
e52c2802 2490 esw_destroy_offloads_fdb_tables(esw);
7768d197
RD
2491
2492 esw->offloads.encap = encap;
e52c2802
PB
2493
2494 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2495
7768d197 2496 if (err) {
8c98ee77
EB
2497 NL_SET_ERR_MSG_MOD(extack,
2498 "Failed re-creating fast FDB table");
7768d197 2499 esw->offloads.encap = !encap;
e52c2802 2500 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
7768d197 2501 }
e52c2802 2502
8e0aa4bc
PP
2503unlock:
2504 mutex_unlock(&esw->mode_lock);
7768d197
RD
2505 return err;
2506}
2507
98fdbea5
LR
2508int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2509 enum devlink_eswitch_encap_mode *encap)
7768d197
RD
2510{
2511 struct mlx5_core_dev *dev = devlink_priv(devlink);
2512 struct mlx5_eswitch *esw = dev->priv.eswitch;
9d1cef19 2513 int err;
7768d197 2514
0e6fa491 2515 err = mlx5_eswitch_check(dev);
9d1cef19
OG
2516 if (err)
2517 return err;
7768d197 2518
8e0aa4bc 2519 mutex_lock(&esw->mode_lock);
ae24432c
PP
2520 err = eswitch_devlink_esw_mode_check(esw);
2521 if (err)
8e0aa4bc 2522 goto unlock;
ae24432c 2523
7768d197 2524 *encap = esw->offloads.encap;
8e0aa4bc
PP
2525unlock:
2526 mutex_unlock(&esw->mode_lock);
7768d197
RD
2527 return 0;
2528}
2529
c2d7712c
BW
2530static bool
2531mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
2532{
2533 /* Currently, only ECPF based device has representor for host PF. */
2534 if (vport_num == MLX5_VPORT_PF &&
2535 !mlx5_core_is_ecpf_esw_manager(esw->dev))
2536 return false;
2537
2538 if (vport_num == MLX5_VPORT_ECPF &&
2539 !mlx5_ecpf_vport_exists(esw->dev))
2540 return false;
2541
2542 return true;
2543}
2544
f8e8fa02 2545void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
8693115a 2546 const struct mlx5_eswitch_rep_ops *ops,
f8e8fa02 2547 u8 rep_type)
127ea380 2548{
8693115a 2549 struct mlx5_eswitch_rep_data *rep_data;
f8e8fa02
BW
2550 struct mlx5_eswitch_rep *rep;
2551 int i;
9deb2241 2552
8693115a 2553 esw->offloads.rep_ops[rep_type] = ops;
f8e8fa02 2554 mlx5_esw_for_all_reps(esw, i, rep) {
c2d7712c
BW
2555 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
2556 rep_data = &rep->rep_data[rep_type];
2557 atomic_set(&rep_data->state, REP_REGISTERED);
2558 }
f8e8fa02 2559 }
127ea380 2560}
f8e8fa02 2561EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
127ea380 2562
f8e8fa02 2563void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
127ea380 2564{
cb67b832 2565 struct mlx5_eswitch_rep *rep;
f8e8fa02 2566 int i;
cb67b832 2567
f6455de0 2568 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
062f4bf4 2569 __unload_reps_all_vport(esw, rep_type);
127ea380 2570
f8e8fa02 2571 mlx5_esw_for_all_reps(esw, i, rep)
8693115a 2572 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
127ea380 2573}
f8e8fa02 2574EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
726293f1 2575
a4b97ab4 2576void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
726293f1 2577{
726293f1
HHZ
2578 struct mlx5_eswitch_rep *rep;
2579
879c8f84 2580 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
8693115a 2581 return rep->rep_data[rep_type].priv;
726293f1 2582}
22215908
MB
2583
2584void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
02f3afd9 2585 u16 vport,
22215908
MB
2586 u8 rep_type)
2587{
22215908
MB
2588 struct mlx5_eswitch_rep *rep;
2589
879c8f84 2590 rep = mlx5_eswitch_get_rep(esw, vport);
22215908 2591
8693115a
PP
2592 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2593 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2594 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
22215908
MB
2595 return NULL;
2596}
57cbd893 2597EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
22215908
MB
2598
2599void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2600{
879c8f84 2601 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
22215908 2602}
57cbd893
MB
2603EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2604
2605struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
02f3afd9 2606 u16 vport)
57cbd893 2607{
879c8f84 2608 return mlx5_eswitch_get_rep(esw, vport);
57cbd893
MB
2609}
2610EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
91d6291c
PP
2611
2612bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2613{
2614 return vport_num >= MLX5_VPORT_FIRST_VF &&
2615 vport_num <= esw->dev->priv.sriov.max_vfs;
2616}
7445cfb1 2617
5b7cb745
PB
2618bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
2619{
2620 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
2621}
2622EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
2623
7445cfb1
JL
2624bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2625{
2626 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2627}
2628EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2629
0f0d3827 2630u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
7445cfb1
JL
2631 u16 vport_num)
2632{
133dcfc5 2633 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
0f0d3827 2634
133dcfc5
VP
2635 if (WARN_ON_ONCE(IS_ERR(vport)))
2636 return 0;
0f0d3827 2637
133dcfc5 2638 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
7445cfb1
JL
2639}
2640EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);