2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
40 #include "eswitch_offloads_chains.h"
44 #include "lib/devcom.h"
47 /* There are two match-all miss flows, one for unicast dst mac and
50 #define MLX5_ESW_MISS_FLOWS (2)
51 #define UPLINK_REP_INDEX 0
53 /* Per vport tables */
55 #define MLX5_ESW_VPORT_TABLE_SIZE 128
57 /* This struct is used as a key to the hash table and we need it to be packed
58 * so hash result is consistent
60 struct mlx5_vport_key {
67 struct mlx5_vport_table {
68 struct hlist_node hlist;
69 struct mlx5_flow_table *fdb;
71 struct mlx5_vport_key key;
74 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
76 static struct mlx5_flow_table *
77 esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
79 struct mlx5_flow_table_attr ft_attr = {};
80 struct mlx5_flow_table *fdb;
82 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
83 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
84 ft_attr.prio = FDB_PER_VPORT;
85 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
87 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
94 static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
95 struct mlx5_esw_flow_attr *attr,
96 struct mlx5_vport_key *key)
98 key->vport = attr->in_rep->vport;
99 key->chain = attr->chain;
100 key->prio = attr->prio;
101 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
102 return jhash(key, sizeof(*key), 0);
105 /* caller must hold vports.lock */
106 static struct mlx5_vport_table *
107 esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
109 struct mlx5_vport_table *e;
111 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
112 if (!memcmp(&e->key, skey, sizeof(*skey)))
119 esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
121 struct mlx5_vport_table *e;
122 struct mlx5_vport_key key;
125 mutex_lock(&esw->fdb_table.offloads.vports.lock);
126 hkey = flow_attr_to_vport_key(esw, attr, &key);
127 e = esw_vport_tbl_lookup(esw, &key, hkey);
128 if (!e || --e->num_rules)
132 mlx5_destroy_flow_table(e->fdb);
135 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
138 static struct mlx5_flow_table *
139 esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
141 struct mlx5_core_dev *dev = esw->dev;
142 struct mlx5_flow_namespace *ns;
143 struct mlx5_flow_table *fdb;
144 struct mlx5_vport_table *e;
145 struct mlx5_vport_key skey;
148 mutex_lock(&esw->fdb_table.offloads.vports.lock);
149 hkey = flow_attr_to_vport_key(esw, attr, &skey);
150 e = esw_vport_tbl_lookup(esw, &skey, hkey);
156 e = kzalloc(sizeof(*e), GFP_KERNEL);
158 fdb = ERR_PTR(-ENOMEM);
162 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
164 esw_warn(dev, "Failed to get FDB namespace\n");
165 fdb = ERR_PTR(-ENOENT);
169 fdb = esw_vport_tbl_create(esw, ns);
176 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
178 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
184 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
188 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
190 struct mlx5_esw_flow_attr attr = {};
191 struct mlx5_eswitch_rep rep = {};
192 struct mlx5_flow_table *fdb;
193 struct mlx5_vport *vport;
198 mlx5_esw_for_all_vports(esw, i, vport) {
199 attr.in_rep->vport = vport->vport;
200 fdb = esw_vport_tbl_get(esw, &attr);
207 mlx5_esw_vport_tbl_put(esw);
211 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
213 struct mlx5_esw_flow_attr attr = {};
214 struct mlx5_eswitch_rep rep = {};
215 struct mlx5_vport *vport;
220 mlx5_esw_for_all_vports(esw, i, vport) {
221 attr.in_rep->vport = vport->vport;
222 esw_vport_tbl_put(esw, &attr);
226 /* End: Per vport tables */
228 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
231 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
233 WARN_ON(idx > esw->total_vports - 1);
234 return &esw->offloads.vport_reps[idx];
238 esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
239 const struct mlx5_vport *vport)
241 return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
242 mlx5_eswitch_is_vf_vport(esw, vport->vport));
246 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_esw_flow_attr *attr)
253 /* Use metadata matching because vport is not represented by single
254 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
256 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
257 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
258 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
259 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
260 attr->in_rep->vport));
262 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
263 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
264 mlx5_eswitch_get_vport_metadata_mask());
266 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
267 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
268 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
269 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
271 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
272 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
274 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
275 MLX5_SET(fte_match_set_misc, misc,
276 source_eswitch_owner_vhca_id,
277 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
279 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
280 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
281 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
282 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
283 source_eswitch_owner_vhca_id);
285 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
288 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
289 attr->in_rep->vport == MLX5_VPORT_UPLINK)
290 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
293 struct mlx5_flow_handle *
294 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
295 struct mlx5_flow_spec *spec,
296 struct mlx5_esw_flow_attr *attr)
298 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
299 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
300 bool split = !!(attr->split_count);
301 struct mlx5_flow_handle *rule;
302 struct mlx5_flow_table *fdb;
303 bool hairpin = false;
306 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
307 return ERR_PTR(-EOPNOTSUPP);
309 flow_act.action = attr->action;
310 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
311 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
312 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
313 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
314 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
315 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
316 flow_act.vlan[0].vid = attr->vlan_vid[0];
317 flow_act.vlan[0].prio = attr->vlan_prio[0];
318 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
319 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
320 flow_act.vlan[1].vid = attr->vlan_vid[1];
321 flow_act.vlan[1].prio = attr->vlan_prio[1];
325 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
326 struct mlx5_flow_table *ft;
329 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
330 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
331 dest[i].ft = attr->dest_ft;
333 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
334 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
335 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
336 dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
338 } else if (attr->dest_chain) {
339 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
340 ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
344 goto err_create_goto_table;
347 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
351 for (j = attr->split_count; j < attr->out_count; j++) {
352 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
353 dest[i].vport.num = attr->dests[j].rep->vport;
354 dest[i].vport.vhca_id =
355 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
356 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
357 dest[i].vport.flags |=
358 MLX5_FLOW_DEST_VPORT_VHCA_ID;
359 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
360 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
361 flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
362 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
363 dest[i].vport.pkt_reformat =
364 attr->dests[j].pkt_reformat;
370 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
371 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
372 dest[i].counter_id = mlx5_fc_id(attr->counter);
376 if (attr->outer_match_level != MLX5_MATCH_NONE)
377 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
378 if (attr->inner_match_level != MLX5_MATCH_NONE)
379 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
381 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
382 flow_act.modify_hdr = attr->modify_hdr;
385 fdb = esw_vport_tbl_get(esw, attr);
387 if (attr->chain || attr->prio)
388 fdb = mlx5_esw_chains_get_table(esw, attr->chain,
393 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
394 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
397 rule = ERR_CAST(fdb);
401 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec)) {
402 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
406 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
411 atomic64_inc(&esw->offloads.num_flows);
414 attr->flags |= MLX5_ESW_ATTR_FLAG_HAIRPIN;
420 esw_vport_tbl_put(esw, attr);
421 else if (attr->chain || attr->prio)
422 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
424 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
425 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
426 err_create_goto_table:
430 struct mlx5_flow_handle *
431 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
432 struct mlx5_flow_spec *spec,
433 struct mlx5_esw_flow_attr *attr)
435 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
436 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
437 struct mlx5_flow_table *fast_fdb;
438 struct mlx5_flow_table *fwd_fdb;
439 struct mlx5_flow_handle *rule;
442 fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
443 if (IS_ERR(fast_fdb)) {
444 rule = ERR_CAST(fast_fdb);
448 fwd_fdb = esw_vport_tbl_get(esw, attr);
449 if (IS_ERR(fwd_fdb)) {
450 rule = ERR_CAST(fwd_fdb);
454 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
455 for (i = 0; i < attr->split_count; i++) {
456 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
457 dest[i].vport.num = attr->dests[i].rep->vport;
458 dest[i].vport.vhca_id =
459 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
460 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
461 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
462 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
463 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
464 dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
467 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
468 dest[i].ft = fwd_fdb,
471 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
473 if (attr->outer_match_level != MLX5_MATCH_NONE)
474 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
476 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
477 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
482 atomic64_inc(&esw->offloads.num_flows);
486 esw_vport_tbl_put(esw, attr);
488 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
494 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
495 struct mlx5_flow_handle *rule,
496 struct mlx5_esw_flow_attr *attr,
499 bool split = (attr->split_count > 0);
502 mlx5_del_flow_rules(rule);
504 if (attr->flags & MLX5_ESW_ATTR_FLAG_HAIRPIN) {
505 /* unref the term table */
506 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
507 if (attr->dests[i].termtbl)
508 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
512 atomic64_dec(&esw->offloads.num_flows);
515 esw_vport_tbl_put(esw, attr);
516 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
519 esw_vport_tbl_put(esw, attr);
520 else if (attr->chain || attr->prio)
521 mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
523 if (attr->dest_chain)
524 mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
529 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
530 struct mlx5_flow_handle *rule,
531 struct mlx5_esw_flow_attr *attr)
533 __mlx5_eswitch_del_rule(esw, rule, attr, false);
537 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
538 struct mlx5_flow_handle *rule,
539 struct mlx5_esw_flow_attr *attr)
541 __mlx5_eswitch_del_rule(esw, rule, attr, true);
544 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
546 struct mlx5_eswitch_rep *rep;
549 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
550 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
551 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
554 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
563 static struct mlx5_eswitch_rep *
564 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
566 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
568 in_rep = attr->in_rep;
569 out_rep = attr->dests[0].rep;
581 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
582 bool push, bool pop, bool fwd)
584 struct mlx5_eswitch_rep *in_rep, *out_rep;
586 if ((push || pop) && !fwd)
589 in_rep = attr->in_rep;
590 out_rep = attr->dests[0].rep;
592 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
595 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
598 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
599 if (!push && !pop && fwd)
600 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
603 /* protects against (1) setting rules with different vlans to push and
604 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
606 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
615 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
616 struct mlx5_esw_flow_attr *attr)
618 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
619 struct mlx5_eswitch_rep *vport = NULL;
623 /* nop if we're on the vlan push/pop non emulation mode */
624 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
627 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
628 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
629 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
632 mutex_lock(&esw->state_lock);
634 err = esw_add_vlan_action_check(attr, push, pop, fwd);
638 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
640 vport = esw_vlan_action_get_vport(attr, push, pop);
642 if (!push && !pop && fwd) {
643 /* tracks VF --> wire rules without vlan push action */
644 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
645 vport->vlan_refcount++;
646 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
655 if (!(offloads->vlan_push_pop_refcount)) {
656 /* it's the 1st vlan rule, apply global vlan pop policy */
657 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
661 offloads->vlan_push_pop_refcount++;
664 if (vport->vlan_refcount)
667 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
668 SET_VLAN_INSERT | SET_VLAN_STRIP);
671 vport->vlan = attr->vlan_vid[0];
673 vport->vlan_refcount++;
677 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
679 mutex_unlock(&esw->state_lock);
683 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
684 struct mlx5_esw_flow_attr *attr)
686 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
687 struct mlx5_eswitch_rep *vport = NULL;
691 /* nop if we're on the vlan push/pop non emulation mode */
692 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
695 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
698 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
699 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
700 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
702 mutex_lock(&esw->state_lock);
704 vport = esw_vlan_action_get_vport(attr, push, pop);
706 if (!push && !pop && fwd) {
707 /* tracks VF --> wire rules without vlan push action */
708 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
709 vport->vlan_refcount--;
715 vport->vlan_refcount--;
716 if (vport->vlan_refcount)
717 goto skip_unset_push;
720 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
721 0, 0, SET_VLAN_STRIP);
727 offloads->vlan_push_pop_refcount--;
728 if (offloads->vlan_push_pop_refcount)
731 /* no more vlan rules, stop global vlan pop policy */
732 err = esw_set_global_vlan_pop(esw, 0);
735 mutex_unlock(&esw->state_lock);
739 struct mlx5_flow_handle *
740 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
743 struct mlx5_flow_act flow_act = {0};
744 struct mlx5_flow_destination dest = {};
745 struct mlx5_flow_handle *flow_rule;
746 struct mlx5_flow_spec *spec;
749 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
751 flow_rule = ERR_PTR(-ENOMEM);
755 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
756 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
757 /* source vport is the esw manager */
758 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
760 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
761 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
762 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
764 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
765 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
766 dest.vport.num = vport;
767 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
769 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
770 spec, &flow_act, &dest, 1);
771 if (IS_ERR(flow_rule))
772 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
777 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
779 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
781 mlx5_del_flow_rules(rule);
784 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
786 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
787 MLX5_FDB_TO_VPORT_REG_C_1;
790 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
792 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
793 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
797 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
798 !mlx5_eswitch_vport_match_metadata_enabled(esw))
801 err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
806 curr = MLX5_GET(query_esw_vport_context_out, out,
807 esw_vport_context.fdb_to_vport_reg_c_id);
808 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
809 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
810 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
817 MLX5_SET(modify_esw_vport_context_in, in,
818 esw_vport_context.fdb_to_vport_reg_c_id, curr);
820 MLX5_SET(modify_esw_vport_context_in, in,
821 field_select.fdb_to_vport_reg_c_id, 1);
823 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, in,
826 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
827 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
829 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
835 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
836 struct mlx5_core_dev *peer_dev,
837 struct mlx5_flow_spec *spec,
838 struct mlx5_flow_destination *dest)
842 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
843 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
845 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
846 mlx5_eswitch_get_vport_metadata_mask());
848 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
850 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
853 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
854 MLX5_CAP_GEN(peer_dev, vhca_id));
856 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
858 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
860 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
861 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
862 source_eswitch_owner_vhca_id);
865 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
866 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
867 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
868 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
871 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
872 struct mlx5_eswitch *peer_esw,
873 struct mlx5_flow_spec *spec,
878 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
879 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
881 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
882 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
885 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
887 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
891 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
892 struct mlx5_core_dev *peer_dev)
894 struct mlx5_flow_destination dest = {};
895 struct mlx5_flow_act flow_act = {0};
896 struct mlx5_flow_handle **flows;
897 struct mlx5_flow_handle *flow;
898 struct mlx5_flow_spec *spec;
899 /* total vports is the same for both e-switches */
900 int nvports = esw->total_vports;
904 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
908 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
910 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
913 goto alloc_flows_err;
916 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
917 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
920 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
921 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
922 spec, MLX5_VPORT_PF);
924 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
925 spec, &flow_act, &dest, 1);
928 goto add_pf_flow_err;
930 flows[MLX5_VPORT_PF] = flow;
933 if (mlx5_ecpf_vport_exists(esw->dev)) {
934 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
935 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
936 spec, &flow_act, &dest, 1);
939 goto add_ecpf_flow_err;
941 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
944 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
945 esw_set_peer_miss_rule_source_port(esw,
946 peer_dev->priv.eswitch,
949 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
950 spec, &flow_act, &dest, 1);
953 goto add_vf_flow_err;
958 esw->fdb_table.offloads.peer_miss_rules = flows;
965 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
966 mlx5_del_flow_rules(flows[i]);
968 if (mlx5_ecpf_vport_exists(esw->dev))
969 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
971 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
972 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
974 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
981 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
983 struct mlx5_flow_handle **flows;
986 flows = esw->fdb_table.offloads.peer_miss_rules;
988 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
989 mlx5_core_max_vfs(esw->dev))
990 mlx5_del_flow_rules(flows[i]);
992 if (mlx5_ecpf_vport_exists(esw->dev))
993 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
995 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
996 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
1001 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1003 struct mlx5_flow_act flow_act = {0};
1004 struct mlx5_flow_destination dest = {};
1005 struct mlx5_flow_handle *flow_rule = NULL;
1006 struct mlx5_flow_spec *spec;
1013 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1019 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1020 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1022 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1023 outer_headers.dmac_47_16);
1026 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1027 dest.vport.num = esw->manager_vport;
1028 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1030 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1031 spec, &flow_act, &dest, 1);
1032 if (IS_ERR(flow_rule)) {
1033 err = PTR_ERR(flow_rule);
1034 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1038 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1040 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1042 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1043 outer_headers.dmac_47_16);
1045 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1046 spec, &flow_act, &dest, 1);
1047 if (IS_ERR(flow_rule)) {
1048 err = PTR_ERR(flow_rule);
1049 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1050 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1054 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1061 struct mlx5_flow_handle *
1062 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1064 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1065 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1066 struct mlx5_flow_context *flow_context;
1067 struct mlx5_flow_handle *flow_rule;
1068 struct mlx5_flow_destination dest;
1069 struct mlx5_flow_spec *spec;
1072 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1073 return ERR_PTR(-EOPNOTSUPP);
1075 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1077 return ERR_PTR(-ENOMEM);
1079 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1081 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1082 ESW_CHAIN_TAG_METADATA_MASK);
1083 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1085 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1086 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1087 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1088 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1089 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1091 flow_context = &spec->flow_context;
1092 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1093 flow_context->flow_tag = tag;
1094 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1095 dest.ft = esw->offloads.ft_offloads;
1097 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1100 if (IS_ERR(flow_rule))
1102 "Failed to create restore rule for tag: %d, err(%d)\n",
1103 tag, (int)PTR_ERR(flow_rule));
1109 esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1111 return ESW_CHAIN_TAG_METADATA_MASK;
1114 #define MAX_PF_SQ 256
1115 #define MAX_SQ_NVPORTS 32
1117 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1120 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1124 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1125 MLX5_SET(create_flow_group_in, flow_group_in,
1126 match_criteria_enable,
1127 MLX5_MATCH_MISC_PARAMETERS_2);
1129 MLX5_SET(fte_match_param, match_criteria,
1130 misc_parameters_2.metadata_reg_c_0,
1131 mlx5_eswitch_get_vport_metadata_mask());
1133 MLX5_SET(create_flow_group_in, flow_group_in,
1134 match_criteria_enable,
1135 MLX5_MATCH_MISC_PARAMETERS);
1137 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1138 misc_parameters.source_port);
1142 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1144 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1145 struct mlx5_flow_table_attr ft_attr = {};
1146 struct mlx5_core_dev *dev = esw->dev;
1147 struct mlx5_flow_namespace *root_ns;
1148 struct mlx5_flow_table *fdb = NULL;
1149 u32 flags = 0, *flow_group_in;
1150 int table_size, ix, err = 0;
1151 struct mlx5_flow_group *g;
1152 void *match_criteria;
1155 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1157 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1161 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1163 esw_warn(dev, "Failed to get FDB flow namespace\n");
1167 esw->fdb_table.offloads.ns = root_ns;
1168 err = mlx5_flow_namespace_set_mode(root_ns,
1169 esw->dev->priv.steering->mode);
1171 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1175 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1176 MLX5_ESW_MISS_FLOWS + esw->total_vports;
1178 /* create the slow path fdb with encap set, so further table instances
1179 * can be created at run time while VFs are probed if the FW allows that.
1181 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1182 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1183 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1185 ft_attr.flags = flags;
1186 ft_attr.max_fte = table_size;
1187 ft_attr.prio = FDB_SLOW_PATH;
1189 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1192 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1195 esw->fdb_table.offloads.slow_fdb = fdb;
1197 err = mlx5_esw_chains_create(esw);
1199 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1200 goto fdb_chains_err;
1203 /* create send-to-vport group */
1204 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1205 MLX5_MATCH_MISC_PARAMETERS);
1207 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1209 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1210 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1212 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1213 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1214 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1216 g = mlx5_create_flow_group(fdb, flow_group_in);
1219 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1220 goto send_vport_err;
1222 esw->fdb_table.offloads.send_to_vport_grp = g;
1224 /* create peer esw miss group */
1225 memset(flow_group_in, 0, inlen);
1227 esw_set_flow_group_source_port(esw, flow_group_in);
1229 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1230 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1234 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1235 misc_parameters.source_eswitch_owner_vhca_id);
1237 MLX5_SET(create_flow_group_in, flow_group_in,
1238 source_eswitch_owner_vhca_id_valid, 1);
1241 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1242 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1243 ix + esw->total_vports - 1);
1244 ix += esw->total_vports;
1246 g = mlx5_create_flow_group(fdb, flow_group_in);
1249 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1252 esw->fdb_table.offloads.peer_miss_grp = g;
1254 /* create miss group */
1255 memset(flow_group_in, 0, inlen);
1256 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1257 MLX5_MATCH_OUTER_HEADERS);
1258 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1260 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1261 outer_headers.dmac_47_16);
1264 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1265 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1266 ix + MLX5_ESW_MISS_FLOWS);
1268 g = mlx5_create_flow_group(fdb, flow_group_in);
1271 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1274 esw->fdb_table.offloads.miss_grp = g;
1276 err = esw_add_fdb_miss_rule(esw);
1280 esw->nvports = nvports;
1281 kvfree(flow_group_in);
1285 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1287 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1289 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1291 mlx5_esw_chains_destroy(esw);
1293 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1295 /* Holds true only as long as DMFS is the default */
1296 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1298 kvfree(flow_group_in);
1302 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1304 if (!esw->fdb_table.offloads.slow_fdb)
1307 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1308 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1309 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1310 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1311 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1312 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1314 mlx5_esw_chains_destroy(esw);
1315 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1316 /* Holds true only as long as DMFS is the default */
1317 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1318 MLX5_FLOW_STEERING_MODE_DMFS);
1321 static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
1323 struct mlx5_flow_table_attr ft_attr = {};
1324 struct mlx5_core_dev *dev = esw->dev;
1325 struct mlx5_flow_table *ft_offloads;
1326 struct mlx5_flow_namespace *ns;
1329 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1331 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1335 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
1338 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1339 if (IS_ERR(ft_offloads)) {
1340 err = PTR_ERR(ft_offloads);
1341 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1345 esw->offloads.ft_offloads = ft_offloads;
1349 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1351 struct mlx5_esw_offload *offloads = &esw->offloads;
1353 mlx5_destroy_flow_table(offloads->ft_offloads);
1356 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
1358 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1359 struct mlx5_flow_group *g;
1363 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1364 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1368 /* create vport rx group */
1369 esw_set_flow_group_source_port(esw, flow_group_in);
1371 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1372 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1374 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1378 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1382 esw->offloads.vport_rx_group = g;
1384 kvfree(flow_group_in);
1388 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1390 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1393 struct mlx5_flow_handle *
1394 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1395 struct mlx5_flow_destination *dest)
1397 struct mlx5_flow_act flow_act = {0};
1398 struct mlx5_flow_handle *flow_rule;
1399 struct mlx5_flow_spec *spec;
1402 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1404 flow_rule = ERR_PTR(-ENOMEM);
1408 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1409 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1410 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1411 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1413 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1414 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1415 mlx5_eswitch_get_vport_metadata_mask());
1417 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1419 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1420 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1422 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1423 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1425 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1428 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1429 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1430 &flow_act, dest, 1);
1431 if (IS_ERR(flow_rule)) {
1432 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1442 static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1444 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1445 struct mlx5_core_dev *dev = esw->dev;
1448 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1451 if (esw->mode == MLX5_ESWITCH_NONE)
1454 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1455 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1456 mlx5_mode = MLX5_INLINE_MODE_NONE;
1458 case MLX5_CAP_INLINE_MODE_L2:
1459 mlx5_mode = MLX5_INLINE_MODE_L2;
1461 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1466 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1467 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1468 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1469 if (prev_mlx5_mode != mlx5_mode)
1471 prev_mlx5_mode = mlx5_mode;
1479 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1481 struct mlx5_esw_offload *offloads = &esw->offloads;
1483 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1486 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
1487 mlx5_destroy_flow_group(offloads->restore_group);
1488 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1491 static int esw_create_restore_table(struct mlx5_eswitch *esw)
1493 u8 modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1494 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1495 struct mlx5_flow_table_attr ft_attr = {};
1496 struct mlx5_core_dev *dev = esw->dev;
1497 struct mlx5_flow_namespace *ns;
1498 struct mlx5_modify_hdr *mod_hdr;
1499 void *match_criteria, *misc;
1500 struct mlx5_flow_table *ft;
1501 struct mlx5_flow_group *g;
1505 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1508 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1510 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1514 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1515 if (!flow_group_in) {
1520 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1521 ft = mlx5_create_flow_table(ns, &ft_attr);
1524 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1529 memset(flow_group_in, 0, inlen);
1530 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1532 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1535 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1536 ESW_CHAIN_TAG_METADATA_MASK);
1537 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1538 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1539 ft_attr.max_fte - 1);
1540 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1541 MLX5_MATCH_MISC_PARAMETERS_2);
1542 g = mlx5_create_flow_group(ft, flow_group_in);
1545 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1550 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1551 MLX5_SET(copy_action_in, modact, src_field,
1552 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1553 MLX5_SET(copy_action_in, modact, dst_field,
1554 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1555 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1556 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1558 if (IS_ERR(mod_hdr)) {
1559 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1561 err = PTR_ERR(mod_hdr);
1565 esw->offloads.ft_offloads_restore = ft;
1566 esw->offloads.restore_group = g;
1567 esw->offloads.restore_copy_hdr_id = mod_hdr;
1572 mlx5_destroy_flow_group(g);
1574 mlx5_destroy_flow_table(ft);
1576 kvfree(flow_group_in);
1581 static int esw_offloads_start(struct mlx5_eswitch *esw,
1582 struct netlink_ext_ack *extack)
1586 if (esw->mode != MLX5_ESWITCH_LEGACY &&
1587 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1588 NL_SET_ERR_MSG_MOD(extack,
1589 "Can't set offloads mode, SRIOV legacy not enabled");
1593 mlx5_eswitch_disable(esw, false);
1594 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1595 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
1597 NL_SET_ERR_MSG_MOD(extack,
1598 "Failed setting eswitch to offloads");
1599 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
1601 NL_SET_ERR_MSG_MOD(extack,
1602 "Failed setting eswitch back to legacy");
1605 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1606 if (mlx5_eswitch_inline_mode_get(esw,
1607 &esw->offloads.inline_mode)) {
1608 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1609 NL_SET_ERR_MSG_MOD(extack,
1610 "Inline mode is different between vports");
1616 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1618 kfree(esw->offloads.vport_reps);
1621 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1623 int total_vports = esw->total_vports;
1624 struct mlx5_eswitch_rep *rep;
1628 esw->offloads.vport_reps = kcalloc(total_vports,
1629 sizeof(struct mlx5_eswitch_rep),
1631 if (!esw->offloads.vport_reps)
1634 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1635 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
1636 rep->vport_index = vport_index;
1638 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1639 atomic_set(&rep->rep_data[rep_type].state,
1646 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1647 struct mlx5_eswitch_rep *rep, u8 rep_type)
1649 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1650 REP_LOADED, REP_REGISTERED) == REP_LOADED)
1651 esw->offloads.rep_ops[rep_type]->unload(rep);
1654 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1656 struct mlx5_eswitch_rep *rep;
1659 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
1660 __esw_offloads_unload_rep(esw, rep, rep_type);
1662 if (mlx5_ecpf_vport_exists(esw->dev)) {
1663 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1664 __esw_offloads_unload_rep(esw, rep, rep_type);
1667 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1668 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1669 __esw_offloads_unload_rep(esw, rep, rep_type);
1672 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1673 __esw_offloads_unload_rep(esw, rep, rep_type);
1676 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
1678 struct mlx5_eswitch_rep *rep;
1682 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1685 rep = mlx5_eswitch_get_rep(esw, vport_num);
1686 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1687 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1688 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1689 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1697 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
1698 for (--rep_type; rep_type >= 0; rep_type--)
1699 __esw_offloads_unload_rep(esw, rep, rep_type);
1703 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
1705 struct mlx5_eswitch_rep *rep;
1708 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1711 rep = mlx5_eswitch_get_rep(esw, vport_num);
1712 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
1713 __esw_offloads_unload_rep(esw, rep, rep_type);
1716 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1717 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1719 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1720 struct mlx5_eswitch *peer_esw)
1724 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1731 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1733 mlx5e_tc_clean_fdb_peer_flows(esw);
1734 esw_del_fdb_peer_miss_rules(esw);
1737 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1738 struct mlx5_eswitch *peer_esw,
1741 struct mlx5_flow_root_namespace *peer_ns;
1742 struct mlx5_flow_root_namespace *ns;
1745 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1746 ns = esw->dev->priv.steering->fdb_root_ns;
1749 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1753 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
1755 mlx5_flow_namespace_set_peer(ns, NULL);
1759 mlx5_flow_namespace_set_peer(ns, NULL);
1760 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1766 static int mlx5_esw_offloads_devcom_event(int event,
1770 struct mlx5_eswitch *esw = my_data;
1771 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1772 struct mlx5_eswitch *peer_esw = event_data;
1776 case ESW_OFFLOADS_DEVCOM_PAIR:
1777 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1778 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1781 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
1784 err = mlx5_esw_offloads_pair(esw, peer_esw);
1788 err = mlx5_esw_offloads_pair(peer_esw, esw);
1792 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1795 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1796 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1799 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1800 mlx5_esw_offloads_unpair(peer_esw);
1801 mlx5_esw_offloads_unpair(esw);
1802 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
1809 mlx5_esw_offloads_unpair(esw);
1811 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
1813 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1818 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1820 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1822 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1823 mutex_init(&esw->offloads.peer_mutex);
1825 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1828 mlx5_devcom_register_component(devcom,
1829 MLX5_DEVCOM_ESW_OFFLOADS,
1830 mlx5_esw_offloads_devcom_event,
1833 mlx5_devcom_send_event(devcom,
1834 MLX5_DEVCOM_ESW_OFFLOADS,
1835 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1838 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1840 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1842 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1845 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1846 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1848 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1851 static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1852 struct mlx5_vport *vport)
1854 struct mlx5_flow_act flow_act = {0};
1855 struct mlx5_flow_spec *spec;
1858 /* For prio tag mode, there is only 1 FTEs:
1859 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1861 * Unmatched traffic is allowed by default
1863 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1867 /* Untagged packets - push prio tag VLAN, allow */
1868 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1869 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1870 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1871 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1872 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1873 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1874 flow_act.vlan[0].vid = 0;
1875 flow_act.vlan[0].prio = 0;
1877 if (vport->ingress.offloads.modify_metadata_rule) {
1878 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1879 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1882 vport->ingress.allow_rule =
1883 mlx5_add_flow_rules(vport->ingress.acl, spec,
1884 &flow_act, NULL, 0);
1885 if (IS_ERR(vport->ingress.allow_rule)) {
1886 err = PTR_ERR(vport->ingress.allow_rule);
1888 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1890 vport->ingress.allow_rule = NULL;
1897 static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1898 struct mlx5_vport *vport)
1900 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1901 struct mlx5_flow_act flow_act = {};
1905 key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport);
1906 key >>= ESW_SOURCE_PORT_METADATA_OFFSET;
1908 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1909 MLX5_SET(set_action_in, action, field,
1910 MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1911 MLX5_SET(set_action_in, action, data, key);
1912 MLX5_SET(set_action_in, action, offset,
1913 ESW_SOURCE_PORT_METADATA_OFFSET);
1914 MLX5_SET(set_action_in, action, length,
1915 ESW_SOURCE_PORT_METADATA_BITS);
1917 vport->ingress.offloads.modify_metadata =
1918 mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1920 if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
1921 err = PTR_ERR(vport->ingress.offloads.modify_metadata);
1923 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1928 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1929 flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
1930 vport->ingress.offloads.modify_metadata_rule =
1931 mlx5_add_flow_rules(vport->ingress.acl,
1932 NULL, &flow_act, NULL, 0);
1933 if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
1934 err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
1936 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1938 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
1939 vport->ingress.offloads.modify_metadata_rule = NULL;
1944 static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1945 struct mlx5_vport *vport)
1947 if (vport->ingress.offloads.modify_metadata_rule) {
1948 mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
1949 mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
1951 vport->ingress.offloads.modify_metadata_rule = NULL;
1955 static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
1956 struct mlx5_vport *vport)
1958 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1959 struct mlx5_flow_group *g;
1960 void *match_criteria;
1965 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1969 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
1970 /* This group is to hold FTE to match untagged packets when prio_tag
1973 memset(flow_group_in, 0, inlen);
1975 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1976 flow_group_in, match_criteria);
1977 MLX5_SET(create_flow_group_in, flow_group_in,
1978 match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1979 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
1980 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1981 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1983 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
1986 esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
1990 vport->ingress.offloads.metadata_prio_tag_grp = g;
1994 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1995 /* This group holds an FTE with no matches for add metadata for
1996 * tagged packets, if prio-tag is enabled (as a fallthrough),
1997 * or all traffic in case prio-tag is disabled.
1999 memset(flow_group_in, 0, inlen);
2000 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
2001 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
2003 g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
2006 esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
2010 vport->ingress.offloads.metadata_allmatch_grp = g;
2013 kvfree(flow_group_in);
2017 if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
2018 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2019 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
2022 kvfree(flow_group_in);
2026 static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
2028 if (vport->ingress.offloads.metadata_allmatch_grp) {
2029 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
2030 vport->ingress.offloads.metadata_allmatch_grp = NULL;
2033 if (vport->ingress.offloads.metadata_prio_tag_grp) {
2034 mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
2035 vport->ingress.offloads.metadata_prio_tag_grp = NULL;
2039 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
2040 struct mlx5_vport *vport)
2045 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
2046 !esw_check_ingress_prio_tag_enabled(esw, vport))
2049 esw_vport_cleanup_ingress_rules(esw, vport);
2051 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2053 if (esw_check_ingress_prio_tag_enabled(esw, vport))
2056 err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
2059 "failed to enable ingress acl (%d) on vport[%d]\n",
2064 err = esw_vport_create_ingress_acl_group(esw, vport);
2069 "vport[%d] configure ingress rules\n", vport->vport);
2071 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2072 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
2077 if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
2078 err = esw_vport_ingress_prio_tag_config(esw, vport);
2085 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2087 esw_vport_destroy_ingress_acl_group(vport);
2089 esw_vport_destroy_ingress_acl_table(vport);
2093 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
2094 struct mlx5_vport *vport)
2098 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
2101 esw_vport_cleanup_egress_rules(esw, vport);
2103 err = esw_vport_enable_egress_acl(esw, vport);
2107 /* For prio tag mode, there is only 1 FTEs:
2108 * 1) prio tag packets - pop the prio tag VLAN, allow
2109 * Unmatched traffic is allowed by default
2112 "vport[%d] configure prio tag egress rules\n", vport->vport);
2114 /* prio tag vlan rule - pop it so VF receives untagged packets */
2115 err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
2116 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
2117 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
2119 esw_vport_disable_egress_acl(esw, vport);
2125 esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2127 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2130 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2131 MLX5_FDB_TO_VPORT_REG_C_0))
2134 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2137 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2138 mlx5_ecpf_vport_exists(esw->dev))
2145 esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
2147 return mlx5_core_mp_enabled(esw->dev);
2150 static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
2152 return esw_check_vport_match_metadata_mandatory(esw) &&
2153 esw_check_vport_match_metadata_supported(esw);
2157 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2158 struct mlx5_vport *vport)
2162 err = esw_vport_ingress_config(esw, vport);
2166 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
2167 err = esw_vport_egress_config(esw, vport);
2169 esw_vport_cleanup_ingress_rules(esw, vport);
2170 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2171 esw_vport_destroy_ingress_acl_group(vport);
2172 esw_vport_destroy_ingress_acl_table(vport);
2179 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2180 struct mlx5_vport *vport)
2182 esw_vport_disable_egress_acl(esw, vport);
2183 esw_vport_cleanup_ingress_rules(esw, vport);
2184 esw_vport_del_ingress_acl_modify_metadata(esw, vport);
2185 esw_vport_destroy_ingress_acl_group(vport);
2186 esw_vport_destroy_ingress_acl_table(vport);
2189 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2191 struct mlx5_vport *vport;
2194 if (esw_use_vport_metadata(esw))
2195 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2197 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2198 err = esw_vport_create_offloads_acl_tables(esw, vport);
2200 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2204 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2206 struct mlx5_vport *vport;
2208 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2209 esw_vport_destroy_offloads_acl_tables(esw, vport);
2210 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2213 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
2215 int num_vfs = esw->esw_funcs.num_vfs;
2219 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2220 total_vports = esw->total_vports;
2222 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2224 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
2226 err = esw_create_uplink_offloads_acl_tables(esw);
2230 err = esw_create_offloads_table(esw, total_vports);
2232 goto create_offloads_err;
2234 err = esw_create_restore_table(esw);
2236 goto create_restore_err;
2238 err = esw_create_offloads_fdb_tables(esw, total_vports);
2240 goto create_fdb_err;
2242 err = esw_create_vport_rx_group(esw, total_vports);
2246 mutex_init(&esw->fdb_table.offloads.vports.lock);
2247 hash_init(esw->fdb_table.offloads.vports.table);
2252 esw_destroy_offloads_fdb_tables(esw);
2254 esw_destroy_restore_table(esw);
2256 esw_destroy_offloads_table(esw);
2257 create_offloads_err:
2258 esw_destroy_uplink_offloads_acl_tables(esw);
2263 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2265 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
2266 esw_destroy_vport_rx_group(esw);
2267 esw_destroy_offloads_fdb_tables(esw);
2268 esw_destroy_restore_table(esw);
2269 esw_destroy_offloads_table(esw);
2270 esw_destroy_uplink_offloads_acl_tables(esw);
2274 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
2276 bool host_pf_disabled;
2279 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2280 host_params_context.host_num_of_vfs);
2281 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2282 host_params_context.host_pf_disabled);
2284 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2287 /* Number of VFs can only change from "0 to x" or "x to 0". */
2288 if (esw->esw_funcs.num_vfs > 0) {
2289 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
2293 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2294 MLX5_VPORT_UC_ADDR_CHANGE);
2298 esw->esw_funcs.num_vfs = new_num_vfs;
2301 static void esw_functions_changed_event_handler(struct work_struct *work)
2303 struct mlx5_host_work *host_work;
2304 struct mlx5_eswitch *esw;
2307 host_work = container_of(work, struct mlx5_host_work, work);
2308 esw = host_work->esw;
2310 out = mlx5_esw_query_functions(esw->dev);
2314 esw_vfs_changed_event_handler(esw, out);
2320 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
2322 struct mlx5_esw_functions *esw_funcs;
2323 struct mlx5_host_work *host_work;
2324 struct mlx5_eswitch *esw;
2326 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2330 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2331 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
2333 host_work->esw = esw;
2335 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
2336 queue_work(esw->work_queue, &host_work->work);
2341 int esw_offloads_enable(struct mlx5_eswitch *esw)
2343 struct mlx5_vport *vport;
2346 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2347 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2348 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2350 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2352 mutex_init(&esw->offloads.termtbl_mutex);
2353 mlx5_rdma_enable_roce(esw->dev);
2355 err = esw_set_passing_vport_metadata(esw, true);
2357 goto err_vport_metadata;
2359 err = esw_offloads_steering_init(esw);
2361 goto err_steering_init;
2363 /* Representor will control the vport link state */
2364 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2365 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2367 /* Uplink vport rep must load first. */
2368 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
2372 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2376 esw_offloads_devcom_init(esw);
2381 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2383 esw_set_passing_vport_metadata(esw, false);
2385 esw_offloads_steering_cleanup(esw);
2387 mlx5_rdma_disable_roce(esw->dev);
2388 mutex_destroy(&esw->offloads.termtbl_mutex);
2392 static int esw_offloads_stop(struct mlx5_eswitch *esw,
2393 struct netlink_ext_ack *extack)
2397 mlx5_eswitch_disable(esw, false);
2398 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
2400 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
2401 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
2403 NL_SET_ERR_MSG_MOD(extack,
2404 "Failed setting eswitch back to offloads");
2411 void esw_offloads_disable(struct mlx5_eswitch *esw)
2413 esw_offloads_devcom_cleanup(esw);
2414 mlx5_eswitch_disable_pf_vf_vports(esw);
2415 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2416 esw_set_passing_vport_metadata(esw, false);
2417 esw_offloads_steering_cleanup(esw);
2418 mlx5_rdma_disable_roce(esw->dev);
2419 mutex_destroy(&esw->offloads.termtbl_mutex);
2420 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2423 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
2426 case DEVLINK_ESWITCH_MODE_LEGACY:
2427 *mlx5_mode = MLX5_ESWITCH_LEGACY;
2429 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
2430 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
2439 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2441 switch (mlx5_mode) {
2442 case MLX5_ESWITCH_LEGACY:
2443 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2445 case MLX5_ESWITCH_OFFLOADS:
2446 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2455 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2458 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2459 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2461 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2462 *mlx5_mode = MLX5_INLINE_MODE_L2;
2464 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2465 *mlx5_mode = MLX5_INLINE_MODE_IP;
2467 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2468 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2477 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2479 switch (mlx5_mode) {
2480 case MLX5_INLINE_MODE_NONE:
2481 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2483 case MLX5_INLINE_MODE_L2:
2484 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2486 case MLX5_INLINE_MODE_IP:
2487 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2489 case MLX5_INLINE_MODE_TCP_UDP:
2490 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2499 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
2501 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2504 if(!MLX5_ESWITCH_MANAGER(dev))
2507 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2508 !mlx5_core_is_ecpf_esw_manager(dev))
2514 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2515 struct netlink_ext_ack *extack)
2517 struct mlx5_core_dev *dev = devlink_priv(devlink);
2518 u16 cur_mlx5_mode, mlx5_mode = 0;
2521 err = mlx5_eswitch_check(dev);
2525 cur_mlx5_mode = dev->priv.eswitch->mode;
2527 if (esw_mode_from_devlink(mode, &mlx5_mode))
2530 if (cur_mlx5_mode == mlx5_mode)
2533 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2534 return esw_offloads_start(dev->priv.eswitch, extack);
2535 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2536 return esw_offloads_stop(dev->priv.eswitch, extack);
2541 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2543 struct mlx5_core_dev *dev = devlink_priv(devlink);
2546 err = mlx5_eswitch_check(dev);
2550 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
2553 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2554 struct netlink_ext_ack *extack)
2556 struct mlx5_core_dev *dev = devlink_priv(devlink);
2557 struct mlx5_eswitch *esw = dev->priv.eswitch;
2558 int err, vport, num_vport;
2561 err = mlx5_eswitch_check(dev);
2565 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2566 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2567 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2570 case MLX5_CAP_INLINE_MODE_L2:
2571 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
2573 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2577 if (atomic64_read(&esw->offloads.num_flows) > 0) {
2578 NL_SET_ERR_MSG_MOD(extack,
2579 "Can't set inline mode when flows are configured");
2583 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2587 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2588 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2590 NL_SET_ERR_MSG_MOD(extack,
2591 "Failed to set min inline on vport");
2592 goto revert_inline_mode;
2596 esw->offloads.inline_mode = mlx5_mode;
2600 num_vport = --vport;
2601 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
2602 mlx5_modify_nic_vport_min_inline(dev,
2604 esw->offloads.inline_mode);
2609 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2611 struct mlx5_core_dev *dev = devlink_priv(devlink);
2612 struct mlx5_eswitch *esw = dev->priv.eswitch;
2615 err = mlx5_eswitch_check(dev);
2619 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2622 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2623 enum devlink_eswitch_encap_mode encap,
2624 struct netlink_ext_ack *extack)
2626 struct mlx5_core_dev *dev = devlink_priv(devlink);
2627 struct mlx5_eswitch *esw = dev->priv.eswitch;
2630 err = mlx5_eswitch_check(dev);
2634 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
2635 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
2636 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2639 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2642 if (esw->mode == MLX5_ESWITCH_LEGACY) {
2643 esw->offloads.encap = encap;
2647 if (esw->offloads.encap == encap)
2650 if (atomic64_read(&esw->offloads.num_flows) > 0) {
2651 NL_SET_ERR_MSG_MOD(extack,
2652 "Can't set encapsulation when flows are configured");
2656 esw_destroy_offloads_fdb_tables(esw);
2658 esw->offloads.encap = encap;
2660 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2663 NL_SET_ERR_MSG_MOD(extack,
2664 "Failed re-creating fast FDB table");
2665 esw->offloads.encap = !encap;
2666 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
2672 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2673 enum devlink_eswitch_encap_mode *encap)
2675 struct mlx5_core_dev *dev = devlink_priv(devlink);
2676 struct mlx5_eswitch *esw = dev->priv.eswitch;
2679 err = mlx5_eswitch_check(dev);
2683 *encap = esw->offloads.encap;
2688 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
2690 /* Currently, only ECPF based device has representor for host PF. */
2691 if (vport_num == MLX5_VPORT_PF &&
2692 !mlx5_core_is_ecpf_esw_manager(esw->dev))
2695 if (vport_num == MLX5_VPORT_ECPF &&
2696 !mlx5_ecpf_vport_exists(esw->dev))
2702 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2703 const struct mlx5_eswitch_rep_ops *ops,
2706 struct mlx5_eswitch_rep_data *rep_data;
2707 struct mlx5_eswitch_rep *rep;
2710 esw->offloads.rep_ops[rep_type] = ops;
2711 mlx5_esw_for_all_reps(esw, i, rep) {
2712 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
2713 rep_data = &rep->rep_data[rep_type];
2714 atomic_set(&rep_data->state, REP_REGISTERED);
2718 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
2720 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
2722 struct mlx5_eswitch_rep *rep;
2725 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
2726 __unload_reps_all_vport(esw, rep_type);
2728 mlx5_esw_for_all_reps(esw, i, rep)
2729 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2731 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
2733 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
2735 struct mlx5_eswitch_rep *rep;
2737 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2738 return rep->rep_data[rep_type].priv;
2741 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2745 struct mlx5_eswitch_rep *rep;
2747 rep = mlx5_eswitch_get_rep(esw, vport);
2749 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2750 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2751 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
2754 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
2756 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2758 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
2760 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2762 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2765 return mlx5_eswitch_get_rep(esw, vport);
2767 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
2769 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2771 return vport_num >= MLX5_VPORT_FIRST_VF &&
2772 vport_num <= esw->dev->priv.sriov.max_vfs;
2775 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
2777 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
2779 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
2781 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2783 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2785 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2787 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
2790 u32 vport_num_mask = GENMASK(ESW_VPORT_BITS - 1, 0);
2791 u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
2792 u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
2795 /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
2796 WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
2798 /* Trim vhca_id to ESW_VHCA_ID_BITS */
2799 vhca_id &= vhca_id_mask;
2801 /* Make sure pf and ecpf map to end of ESW_VPORT_BITS range so they
2802 * don't overlap with VF numbers, and themselves, after trimming.
2804 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) <
2805 vport_num_mask - 1);
2806 WARN_ON_ONCE((MLX5_VPORT_ECPF & vport_num_mask) <
2807 vport_num_mask - 1);
2808 WARN_ON_ONCE((MLX5_VPORT_UPLINK & vport_num_mask) ==
2809 (MLX5_VPORT_ECPF & vport_num_mask));
2811 /* Make sure that the VF vport_num fits ESW_VPORT_BITS and don't
2812 * overlap with pf and ecpf.
2814 if (vport_num != MLX5_VPORT_UPLINK &&
2815 vport_num != MLX5_VPORT_ECPF)
2816 WARN_ON_ONCE(vport_num >= vport_num_mask - 1);
2818 /* We can now trim vport_num to ESW_VPORT_BITS */
2819 vport_num &= vport_num_mask;
2821 val = (vhca_id << ESW_VPORT_BITS) | vport_num;
2822 return val << (32 - ESW_SOURCE_PORT_METADATA_BITS);
2824 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);