2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
43 #include "lib/devcom.h"
46 /* There are two match-all miss flows, one for unicast dst mac and
49 #define MLX5_ESW_MISS_FLOWS (2)
51 #define fdb_prio_table(esw, chain, prio, level) \
52 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
54 #define UPLINK_REP_INDEX 0
56 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
59 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
61 WARN_ON(idx > esw->total_vports - 1);
62 return &esw->offloads.vport_reps[idx];
65 static struct mlx5_flow_table *
66 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
68 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
70 bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
72 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
75 u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
77 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
83 u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
85 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
92 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
93 struct mlx5_flow_spec *spec,
94 struct mlx5_esw_flow_attr *attr)
99 /* Use metadata matching because vport is not represented by single
100 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
102 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
103 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
104 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
105 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
106 attr->in_rep->vport));
108 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
109 MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
111 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
113 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
114 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
116 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
117 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
119 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
120 MLX5_SET(fte_match_set_misc, misc,
121 source_eswitch_owner_vhca_id,
122 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
125 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
126 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
127 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
128 source_eswitch_owner_vhca_id);
130 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
133 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
134 attr->in_rep->vport == MLX5_VPORT_UPLINK)
135 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
138 struct mlx5_flow_handle *
139 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
140 struct mlx5_flow_spec *spec,
141 struct mlx5_esw_flow_attr *attr)
143 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
144 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
145 bool split = !!(attr->split_count);
146 struct mlx5_flow_handle *rule;
147 struct mlx5_flow_table *fdb;
150 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
151 return ERR_PTR(-EOPNOTSUPP);
153 flow_act.action = attr->action;
154 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
155 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
156 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
157 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
158 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
159 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
160 flow_act.vlan[0].vid = attr->vlan_vid[0];
161 flow_act.vlan[0].prio = attr->vlan_prio[0];
162 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
163 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
164 flow_act.vlan[1].vid = attr->vlan_vid[1];
165 flow_act.vlan[1].prio = attr->vlan_prio[1];
169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
170 if (attr->dest_chain) {
171 struct mlx5_flow_table *ft;
173 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
176 goto err_create_goto_table;
179 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
183 for (j = attr->split_count; j < attr->out_count; j++) {
184 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
185 dest[i].vport.num = attr->dests[j].rep->vport;
186 dest[i].vport.vhca_id =
187 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
188 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
189 dest[i].vport.flags |=
190 MLX5_FLOW_DEST_VPORT_VHCA_ID;
191 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
192 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
193 flow_act.reformat_id = attr->dests[j].encap_id;
194 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
195 dest[i].vport.reformat_id =
196 attr->dests[j].encap_id;
202 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
203 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
204 dest[i].counter_id = mlx5_fc_id(attr->counter);
208 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
210 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
211 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
212 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
213 if (attr->match_level != MLX5_MATCH_NONE)
214 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
215 } else if (attr->match_level != MLX5_MATCH_NONE) {
216 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
219 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
220 flow_act.modify_id = attr->mod_hdr_id;
222 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
224 rule = ERR_CAST(fdb);
228 if (mlx5_eswitch_termtbl_required(esw, &flow_act, spec))
229 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
232 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
236 esw->offloads.num_flows++;
241 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
243 if (attr->dest_chain)
244 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
245 err_create_goto_table:
249 struct mlx5_flow_handle *
250 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
251 struct mlx5_flow_spec *spec,
252 struct mlx5_esw_flow_attr *attr)
254 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
255 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
256 struct mlx5_flow_table *fast_fdb;
257 struct mlx5_flow_table *fwd_fdb;
258 struct mlx5_flow_handle *rule;
261 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
262 if (IS_ERR(fast_fdb)) {
263 rule = ERR_CAST(fast_fdb);
267 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
268 if (IS_ERR(fwd_fdb)) {
269 rule = ERR_CAST(fwd_fdb);
273 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
274 for (i = 0; i < attr->split_count; i++) {
275 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
276 dest[i].vport.num = attr->dests[i].rep->vport;
277 dest[i].vport.vhca_id =
278 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
279 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
280 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
281 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
282 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
283 dest[i].vport.reformat_id = attr->dests[i].encap_id;
286 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
287 dest[i].ft = fwd_fdb,
290 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
292 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
293 if (attr->match_level != MLX5_MATCH_NONE)
294 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
296 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
301 esw->offloads.num_flows++;
305 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
307 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
313 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
314 struct mlx5_flow_handle *rule,
315 struct mlx5_esw_flow_attr *attr,
318 bool split = (attr->split_count > 0);
321 mlx5_del_flow_rules(rule);
323 /* unref the term table */
324 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
325 if (attr->dests[i].termtbl)
326 mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
329 esw->offloads.num_flows--;
332 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
333 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
335 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
336 if (attr->dest_chain)
337 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
342 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
343 struct mlx5_flow_handle *rule,
344 struct mlx5_esw_flow_attr *attr)
346 __mlx5_eswitch_del_rule(esw, rule, attr, false);
350 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
351 struct mlx5_flow_handle *rule,
352 struct mlx5_esw_flow_attr *attr)
354 __mlx5_eswitch_del_rule(esw, rule, attr, true);
357 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
359 struct mlx5_eswitch_rep *rep;
362 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
363 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
364 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
367 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
376 static struct mlx5_eswitch_rep *
377 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
379 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
381 in_rep = attr->in_rep;
382 out_rep = attr->dests[0].rep;
394 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
395 bool push, bool pop, bool fwd)
397 struct mlx5_eswitch_rep *in_rep, *out_rep;
399 if ((push || pop) && !fwd)
402 in_rep = attr->in_rep;
403 out_rep = attr->dests[0].rep;
405 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
408 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
411 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
412 if (!push && !pop && fwd)
413 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
416 /* protects against (1) setting rules with different vlans to push and
417 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
419 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
428 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
429 struct mlx5_esw_flow_attr *attr)
431 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
432 struct mlx5_eswitch_rep *vport = NULL;
436 /* nop if we're on the vlan push/pop non emulation mode */
437 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
440 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
441 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
442 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
445 err = esw_add_vlan_action_check(attr, push, pop, fwd);
449 attr->vlan_handled = false;
451 vport = esw_vlan_action_get_vport(attr, push, pop);
453 if (!push && !pop && fwd) {
454 /* tracks VF --> wire rules without vlan push action */
455 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
456 vport->vlan_refcount++;
457 attr->vlan_handled = true;
466 if (!(offloads->vlan_push_pop_refcount)) {
467 /* it's the 1st vlan rule, apply global vlan pop policy */
468 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
472 offloads->vlan_push_pop_refcount++;
475 if (vport->vlan_refcount)
478 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
479 SET_VLAN_INSERT | SET_VLAN_STRIP);
482 vport->vlan = attr->vlan_vid[0];
484 vport->vlan_refcount++;
488 attr->vlan_handled = true;
492 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
493 struct mlx5_esw_flow_attr *attr)
495 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
496 struct mlx5_eswitch_rep *vport = NULL;
500 /* nop if we're on the vlan push/pop non emulation mode */
501 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
504 if (!attr->vlan_handled)
507 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
508 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
509 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
511 vport = esw_vlan_action_get_vport(attr, push, pop);
513 if (!push && !pop && fwd) {
514 /* tracks VF --> wire rules without vlan push action */
515 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
516 vport->vlan_refcount--;
522 vport->vlan_refcount--;
523 if (vport->vlan_refcount)
524 goto skip_unset_push;
527 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
528 0, 0, SET_VLAN_STRIP);
534 offloads->vlan_push_pop_refcount--;
535 if (offloads->vlan_push_pop_refcount)
538 /* no more vlan rules, stop global vlan pop policy */
539 err = esw_set_global_vlan_pop(esw, 0);
545 struct mlx5_flow_handle *
546 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
549 struct mlx5_flow_act flow_act = {0};
550 struct mlx5_flow_destination dest = {};
551 struct mlx5_flow_handle *flow_rule;
552 struct mlx5_flow_spec *spec;
555 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
557 flow_rule = ERR_PTR(-ENOMEM);
561 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
562 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
563 /* source vport is the esw manager */
564 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
566 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
567 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
568 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
570 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
571 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
572 dest.vport.num = vport;
573 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
575 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
576 &flow_act, &dest, 1);
577 if (IS_ERR(flow_rule))
578 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
583 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
585 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
587 mlx5_del_flow_rules(rule);
590 static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
592 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
593 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
594 u8 fdb_to_vport_reg_c_id;
597 err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
602 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
603 esw_vport_context.fdb_to_vport_reg_c_id);
605 fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
606 MLX5_SET(modify_esw_vport_context_in, in,
607 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
609 MLX5_SET(modify_esw_vport_context_in, in,
610 field_select.fdb_to_vport_reg_c_id, 1);
612 return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
616 static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
618 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
619 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
620 u8 fdb_to_vport_reg_c_id;
623 err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
628 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
629 esw_vport_context.fdb_to_vport_reg_c_id);
631 fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
633 MLX5_SET(modify_esw_vport_context_in, in,
634 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
636 MLX5_SET(modify_esw_vport_context_in, in,
637 field_select.fdb_to_vport_reg_c_id, 1);
639 return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
643 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
644 struct mlx5_core_dev *peer_dev,
645 struct mlx5_flow_spec *spec,
646 struct mlx5_flow_destination *dest)
650 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
651 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
653 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
655 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
657 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
660 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
661 MLX5_CAP_GEN(peer_dev, vhca_id));
663 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
665 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
667 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
668 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
669 source_eswitch_owner_vhca_id);
672 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
673 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
674 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
675 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
678 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
679 struct mlx5_eswitch *peer_esw,
680 struct mlx5_flow_spec *spec,
685 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
686 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
688 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
689 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
692 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
694 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
698 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
699 struct mlx5_core_dev *peer_dev)
701 struct mlx5_flow_destination dest = {};
702 struct mlx5_flow_act flow_act = {0};
703 struct mlx5_flow_handle **flows;
704 struct mlx5_flow_handle *flow;
705 struct mlx5_flow_spec *spec;
706 /* total vports is the same for both e-switches */
707 int nvports = esw->total_vports;
711 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
715 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
717 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
720 goto alloc_flows_err;
723 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
724 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
727 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
728 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
729 spec, MLX5_VPORT_PF);
731 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
732 spec, &flow_act, &dest, 1);
735 goto add_pf_flow_err;
737 flows[MLX5_VPORT_PF] = flow;
740 if (mlx5_ecpf_vport_exists(esw->dev)) {
741 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
742 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
743 spec, &flow_act, &dest, 1);
746 goto add_ecpf_flow_err;
748 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
751 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
752 esw_set_peer_miss_rule_source_port(esw,
753 peer_dev->priv.eswitch,
756 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
757 spec, &flow_act, &dest, 1);
760 goto add_vf_flow_err;
765 esw->fdb_table.offloads.peer_miss_rules = flows;
772 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
773 mlx5_del_flow_rules(flows[i]);
775 if (mlx5_ecpf_vport_exists(esw->dev))
776 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
778 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
779 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
781 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
788 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
790 struct mlx5_flow_handle **flows;
793 flows = esw->fdb_table.offloads.peer_miss_rules;
795 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
796 mlx5_core_max_vfs(esw->dev))
797 mlx5_del_flow_rules(flows[i]);
799 if (mlx5_ecpf_vport_exists(esw->dev))
800 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
802 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
803 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
808 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
810 struct mlx5_flow_act flow_act = {0};
811 struct mlx5_flow_destination dest = {};
812 struct mlx5_flow_handle *flow_rule = NULL;
813 struct mlx5_flow_spec *spec;
820 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
826 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
827 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
829 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
830 outer_headers.dmac_47_16);
833 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
834 dest.vport.num = esw->manager_vport;
835 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
837 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
838 &flow_act, &dest, 1);
839 if (IS_ERR(flow_rule)) {
840 err = PTR_ERR(flow_rule);
841 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
845 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
847 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
849 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
850 outer_headers.dmac_47_16);
852 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
853 &flow_act, &dest, 1);
854 if (IS_ERR(flow_rule)) {
855 err = PTR_ERR(flow_rule);
856 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
857 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
861 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
868 #define ESW_OFFLOADS_NUM_GROUPS 4
870 /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
871 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
872 * for each flow table pool. We can allocate up to 16M of each pool,
873 * and we keep track of how much we used via put/get_sz_to_pool.
874 * Firmware doesn't report any of this for now.
875 * ESW_POOL is expected to be sorted from large to small
877 #define ESW_SIZE (16 * 1024 * 1024)
878 const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
879 64 * 1024, 4 * 1024 };
882 get_sz_from_pool(struct mlx5_eswitch *esw)
886 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
887 if (esw->fdb_table.offloads.fdb_left[i]) {
888 --esw->fdb_table.offloads.fdb_left[i];
898 put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
902 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
903 if (sz >= ESW_POOLS[i]) {
904 ++esw->fdb_table.offloads.fdb_left[i];
910 static struct mlx5_flow_table *
911 create_next_size_table(struct mlx5_eswitch *esw,
912 struct mlx5_flow_namespace *ns,
917 struct mlx5_flow_table *fdb;
920 sz = get_sz_from_pool(esw);
922 return ERR_PTR(-ENOSPC);
924 fdb = mlx5_create_auto_grouped_flow_table(ns,
927 ESW_OFFLOADS_NUM_GROUPS,
931 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
932 (int)PTR_ERR(fdb), table_prio, level, sz);
933 put_sz_to_pool(esw, sz);
939 static struct mlx5_flow_table *
940 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
942 struct mlx5_core_dev *dev = esw->dev;
943 struct mlx5_flow_table *fdb = NULL;
944 struct mlx5_flow_namespace *ns;
945 int table_prio, l = 0;
948 if (chain == FDB_SLOW_PATH_CHAIN)
949 return esw->fdb_table.offloads.slow_fdb;
951 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
953 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
955 /* take ref on earlier levels as well */
957 fdb_prio_table(esw, chain, prio, level--).num_rules++;
958 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
962 ns = mlx5_get_fdb_sub_ns(dev, chain);
964 esw_warn(dev, "Failed to get FDB sub namespace\n");
965 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
966 return ERR_PTR(-EOPNOTSUPP);
969 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
970 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
971 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
973 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
975 /* create earlier levels for correct fs_core lookup when
978 for (l = 0; l <= level; l++) {
979 if (fdb_prio_table(esw, chain, prio, l).fdb) {
980 fdb_prio_table(esw, chain, prio, l).num_rules++;
984 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
990 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
991 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
994 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
998 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
1000 esw_put_prio_table(esw, chain, prio, l);
1006 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
1010 if (chain == FDB_SLOW_PATH_CHAIN)
1013 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
1015 for (l = level; l >= 0; l--) {
1016 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
1019 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
1020 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
1021 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
1024 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
1027 static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
1029 /* If lazy creation isn't supported, deref the fast path tables */
1030 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
1031 esw_put_prio_table(esw, 0, 1, 1);
1032 esw_put_prio_table(esw, 0, 1, 0);
1036 #define MAX_PF_SQ 256
1037 #define MAX_SQ_NVPORTS 32
1039 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1042 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1046 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1047 MLX5_SET(create_flow_group_in, flow_group_in,
1048 match_criteria_enable,
1049 MLX5_MATCH_MISC_PARAMETERS_2);
1051 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1052 misc_parameters_2.metadata_reg_c_0);
1054 MLX5_SET(create_flow_group_in, flow_group_in,
1055 match_criteria_enable,
1056 MLX5_MATCH_MISC_PARAMETERS);
1058 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1059 misc_parameters.source_port);
1063 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1065 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1066 struct mlx5_flow_table_attr ft_attr = {};
1067 struct mlx5_core_dev *dev = esw->dev;
1068 u32 *flow_group_in, max_flow_counter;
1069 struct mlx5_flow_namespace *root_ns;
1070 struct mlx5_flow_table *fdb = NULL;
1071 int table_size, ix, err = 0, i;
1072 struct mlx5_flow_group *g;
1073 u32 flags = 0, fdb_max;
1074 void *match_criteria;
1077 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1078 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1082 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1084 esw_warn(dev, "Failed to get FDB flow namespace\n");
1089 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
1090 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1091 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1093 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
1094 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
1095 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
1098 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
1099 esw->fdb_table.offloads.fdb_left[i] =
1100 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1102 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1103 MLX5_ESW_MISS_FLOWS + esw->total_vports;
1105 /* create the slow path fdb with encap set, so further table instances
1106 * can be created at run time while VFs are probed if the FW allows that.
1108 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1109 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1110 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1112 ft_attr.flags = flags;
1113 ft_attr.max_fte = table_size;
1114 ft_attr.prio = FDB_SLOW_PATH;
1116 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1119 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1122 esw->fdb_table.offloads.slow_fdb = fdb;
1124 /* If lazy creation isn't supported, open the fast path tables now */
1125 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
1126 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1127 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1128 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1129 esw_get_prio_table(esw, 0, 1, 0);
1130 esw_get_prio_table(esw, 0, 1, 1);
1132 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
1133 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1136 /* create send-to-vport group */
1137 memset(flow_group_in, 0, inlen);
1138 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1139 MLX5_MATCH_MISC_PARAMETERS);
1141 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1143 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1144 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1146 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1147 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1148 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1150 g = mlx5_create_flow_group(fdb, flow_group_in);
1153 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1154 goto send_vport_err;
1156 esw->fdb_table.offloads.send_to_vport_grp = g;
1158 /* create peer esw miss group */
1159 memset(flow_group_in, 0, inlen);
1161 esw_set_flow_group_source_port(esw, flow_group_in);
1163 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1164 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1168 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1169 misc_parameters.source_eswitch_owner_vhca_id);
1171 MLX5_SET(create_flow_group_in, flow_group_in,
1172 source_eswitch_owner_vhca_id_valid, 1);
1175 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1176 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1177 ix + esw->total_vports - 1);
1178 ix += esw->total_vports;
1180 g = mlx5_create_flow_group(fdb, flow_group_in);
1183 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1186 esw->fdb_table.offloads.peer_miss_grp = g;
1188 /* create miss group */
1189 memset(flow_group_in, 0, inlen);
1190 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1191 MLX5_MATCH_OUTER_HEADERS);
1192 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1194 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1195 outer_headers.dmac_47_16);
1198 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1199 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1200 ix + MLX5_ESW_MISS_FLOWS);
1202 g = mlx5_create_flow_group(fdb, flow_group_in);
1205 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1208 esw->fdb_table.offloads.miss_grp = g;
1210 err = esw_add_fdb_miss_rule(esw);
1214 esw->nvports = nvports;
1215 kvfree(flow_group_in);
1219 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1221 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1223 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1225 esw_destroy_offloads_fast_fdb_tables(esw);
1226 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1229 kvfree(flow_group_in);
1233 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1235 if (!esw->fdb_table.offloads.slow_fdb)
1238 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1239 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1240 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1241 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1242 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1243 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1245 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1246 esw_destroy_offloads_fast_fdb_tables(esw);
1249 static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
1251 struct mlx5_flow_table_attr ft_attr = {};
1252 struct mlx5_core_dev *dev = esw->dev;
1253 struct mlx5_flow_table *ft_offloads;
1254 struct mlx5_flow_namespace *ns;
1257 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1259 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1263 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
1265 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1266 if (IS_ERR(ft_offloads)) {
1267 err = PTR_ERR(ft_offloads);
1268 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1272 esw->offloads.ft_offloads = ft_offloads;
1276 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1278 struct mlx5_esw_offload *offloads = &esw->offloads;
1280 mlx5_destroy_flow_table(offloads->ft_offloads);
1283 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
1285 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1286 struct mlx5_flow_group *g;
1290 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1291 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1295 /* create vport rx group */
1296 memset(flow_group_in, 0, inlen);
1298 esw_set_flow_group_source_port(esw, flow_group_in);
1300 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1301 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1303 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1307 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1311 esw->offloads.vport_rx_group = g;
1313 kvfree(flow_group_in);
1317 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1319 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1322 struct mlx5_flow_handle *
1323 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1324 struct mlx5_flow_destination *dest)
1326 struct mlx5_flow_act flow_act = {0};
1327 struct mlx5_flow_handle *flow_rule;
1328 struct mlx5_flow_spec *spec;
1331 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1333 flow_rule = ERR_PTR(-ENOMEM);
1337 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1338 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1339 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1340 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1342 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1343 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
1345 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1347 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1348 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1350 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1351 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1353 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1356 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1357 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1358 &flow_act, dest, 1);
1359 if (IS_ERR(flow_rule)) {
1360 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1369 static int esw_offloads_start(struct mlx5_eswitch *esw,
1370 struct netlink_ext_ack *extack)
1374 if (esw->mode != MLX5_ESWITCH_LEGACY &&
1375 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1376 NL_SET_ERR_MSG_MOD(extack,
1377 "Can't set offloads mode, SRIOV legacy not enabled");
1381 mlx5_eswitch_disable(esw);
1382 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
1383 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
1385 NL_SET_ERR_MSG_MOD(extack,
1386 "Failed setting eswitch to offloads");
1387 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
1389 NL_SET_ERR_MSG_MOD(extack,
1390 "Failed setting eswitch back to legacy");
1393 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1394 if (mlx5_eswitch_inline_mode_get(esw,
1395 &esw->offloads.inline_mode)) {
1396 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1397 NL_SET_ERR_MSG_MOD(extack,
1398 "Inline mode is different between vports");
1404 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1406 kfree(esw->offloads.vport_reps);
1409 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1411 int total_vports = esw->total_vports;
1412 struct mlx5_core_dev *dev = esw->dev;
1413 struct mlx5_eswitch_rep *rep;
1414 u8 hw_id[ETH_ALEN], rep_type;
1417 esw->offloads.vport_reps = kcalloc(total_vports,
1418 sizeof(struct mlx5_eswitch_rep),
1420 if (!esw->offloads.vport_reps)
1423 mlx5_query_mac_address(dev, hw_id);
1425 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1426 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
1427 rep->vport_index = vport_index;
1428 ether_addr_copy(rep->hw_id, hw_id);
1430 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1431 atomic_set(&rep->rep_data[rep_type].state,
1438 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1439 struct mlx5_eswitch_rep *rep, u8 rep_type)
1441 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1442 REP_LOADED, REP_REGISTERED) == REP_LOADED)
1443 esw->offloads.rep_ops[rep_type]->unload(rep);
1446 static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
1448 struct mlx5_eswitch_rep *rep;
1450 if (mlx5_ecpf_vport_exists(esw->dev)) {
1451 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1452 __esw_offloads_unload_rep(esw, rep, rep_type);
1455 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1456 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1457 __esw_offloads_unload_rep(esw, rep, rep_type);
1460 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1461 __esw_offloads_unload_rep(esw, rep, rep_type);
1464 static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1467 struct mlx5_eswitch_rep *rep;
1470 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1471 __esw_offloads_unload_rep(esw, rep, rep_type);
1474 static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1476 u8 rep_type = NUM_REP_TYPES;
1478 while (rep_type-- > 0)
1479 __unload_reps_vf_vport(esw, nvports, rep_type);
1482 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1484 __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1486 /* Special vports must be the last to unload. */
1487 __unload_reps_special_vport(esw, rep_type);
1490 static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw)
1492 u8 rep_type = NUM_REP_TYPES;
1494 while (rep_type-- > 0)
1495 __unload_reps_all_vport(esw, rep_type);
1498 static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1499 struct mlx5_eswitch_rep *rep, u8 rep_type)
1503 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1504 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1505 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1507 atomic_set(&rep->rep_data[rep_type].state,
1514 static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
1516 struct mlx5_eswitch_rep *rep;
1519 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1520 err = __esw_offloads_load_rep(esw, rep, rep_type);
1524 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1525 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1526 err = __esw_offloads_load_rep(esw, rep, rep_type);
1531 if (mlx5_ecpf_vport_exists(esw->dev)) {
1532 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1533 err = __esw_offloads_load_rep(esw, rep, rep_type);
1541 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1542 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1543 __esw_offloads_unload_rep(esw, rep, rep_type);
1547 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1548 __esw_offloads_unload_rep(esw, rep, rep_type);
1552 static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1555 struct mlx5_eswitch_rep *rep;
1558 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
1559 err = __esw_offloads_load_rep(esw, rep, rep_type);
1567 __unload_reps_vf_vport(esw, --i, rep_type);
1571 static int __load_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1575 /* Special vports must be loaded first, uplink rep creates mdev resource. */
1576 err = __load_reps_special_vport(esw, rep_type);
1580 err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type);
1587 __unload_reps_special_vport(esw, rep_type);
1591 static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1596 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1597 err = __load_reps_vf_vport(esw, nvports, rep_type);
1605 while (rep_type-- > 0)
1606 __unload_reps_vf_vport(esw, nvports, rep_type);
1610 static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
1615 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1616 err = __load_reps_all_vport(esw, rep_type);
1624 while (rep_type-- > 0)
1625 __unload_reps_all_vport(esw, rep_type);
1629 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1630 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1632 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1633 struct mlx5_eswitch *peer_esw)
1637 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1644 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1646 mlx5e_tc_clean_fdb_peer_flows(esw);
1647 esw_del_fdb_peer_miss_rules(esw);
1650 static int mlx5_esw_offloads_devcom_event(int event,
1654 struct mlx5_eswitch *esw = my_data;
1655 struct mlx5_eswitch *peer_esw = event_data;
1656 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1660 case ESW_OFFLOADS_DEVCOM_PAIR:
1661 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1662 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1665 err = mlx5_esw_offloads_pair(esw, peer_esw);
1669 err = mlx5_esw_offloads_pair(peer_esw, esw);
1673 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1676 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1677 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1680 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1681 mlx5_esw_offloads_unpair(peer_esw);
1682 mlx5_esw_offloads_unpair(esw);
1689 mlx5_esw_offloads_unpair(esw);
1692 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1697 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1699 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1701 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1702 mutex_init(&esw->offloads.peer_mutex);
1704 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1707 mlx5_devcom_register_component(devcom,
1708 MLX5_DEVCOM_ESW_OFFLOADS,
1709 mlx5_esw_offloads_devcom_event,
1712 mlx5_devcom_send_event(devcom,
1713 MLX5_DEVCOM_ESW_OFFLOADS,
1714 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1717 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1719 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1721 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1724 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1725 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1727 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1730 static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1731 struct mlx5_vport *vport)
1733 struct mlx5_flow_act flow_act = {0};
1734 struct mlx5_flow_spec *spec;
1737 /* For prio tag mode, there is only 1 FTEs:
1738 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1740 * Unmatched traffic is allowed by default
1743 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1749 /* Untagged packets - push prio tag VLAN, allow */
1750 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1751 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1752 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1753 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1754 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1755 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1756 flow_act.vlan[0].vid = 0;
1757 flow_act.vlan[0].prio = 0;
1759 if (vport->ingress.modify_metadata_rule) {
1760 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1761 flow_act.modify_id = vport->ingress.modify_metadata_id;
1764 vport->ingress.allow_rule =
1765 mlx5_add_flow_rules(vport->ingress.acl, spec,
1766 &flow_act, NULL, 0);
1767 if (IS_ERR(vport->ingress.allow_rule)) {
1768 err = PTR_ERR(vport->ingress.allow_rule);
1770 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1772 vport->ingress.allow_rule = NULL;
1780 esw_vport_cleanup_ingress_rules(esw, vport);
1784 static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1785 struct mlx5_vport *vport)
1787 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1788 struct mlx5_flow_act flow_act = {};
1789 struct mlx5_flow_spec spec = {};
1792 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1793 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1794 MLX5_SET(set_action_in, action, data,
1795 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
1797 err = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1798 1, action, &vport->ingress.modify_metadata_id);
1801 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1806 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1807 flow_act.modify_id = vport->ingress.modify_metadata_id;
1808 vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
1809 &spec, &flow_act, NULL, 0);
1810 if (IS_ERR(vport->ingress.modify_metadata_rule)) {
1811 err = PTR_ERR(vport->ingress.modify_metadata_rule);
1813 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1815 vport->ingress.modify_metadata_rule = NULL;
1821 mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
1825 void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1826 struct mlx5_vport *vport)
1828 if (vport->ingress.modify_metadata_rule) {
1829 mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
1830 mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
1832 vport->ingress.modify_metadata_rule = NULL;
1836 static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1837 struct mlx5_vport *vport)
1839 struct mlx5_flow_act flow_act = {0};
1840 struct mlx5_flow_spec *spec;
1843 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
1846 /* For prio tag mode, there is only 1 FTEs:
1847 * 1) prio tag packets - pop the prio tag VLAN, allow
1848 * Unmatched traffic is allowed by default
1851 esw_vport_cleanup_egress_rules(esw, vport);
1853 err = esw_vport_enable_egress_acl(esw, vport);
1855 mlx5_core_warn(esw->dev,
1856 "failed to enable egress acl (%d) on vport[%d]\n",
1862 "vport[%d] configure prio tag egress rules\n", vport->vport);
1864 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1870 /* prio tag vlan rule - pop it so VF receives untagged packets */
1871 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1872 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1873 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1874 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1876 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1877 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1878 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1879 vport->egress.allowed_vlan =
1880 mlx5_add_flow_rules(vport->egress.acl, spec,
1881 &flow_act, NULL, 0);
1882 if (IS_ERR(vport->egress.allowed_vlan)) {
1883 err = PTR_ERR(vport->egress.allowed_vlan);
1885 "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1887 vport->egress.allowed_vlan = NULL;
1895 esw_vport_cleanup_egress_rules(esw, vport);
1899 static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
1900 struct mlx5_vport *vport)
1904 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1905 !MLX5_CAP_GEN(esw->dev, prio_tag_required))
1908 esw_vport_cleanup_ingress_rules(esw, vport);
1910 err = esw_vport_enable_ingress_acl(esw, vport);
1913 "failed to enable ingress acl (%d) on vport[%d]\n",
1919 "vport[%d] configure ingress rules\n", vport->vport);
1921 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1922 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
1927 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
1928 mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
1929 err = esw_vport_ingress_prio_tag_config(esw, vport);
1936 esw_vport_disable_ingress_acl(esw, vport);
1941 esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
1943 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
1946 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1947 MLX5_FDB_TO_VPORT_REG_C_0))
1950 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
1953 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1954 mlx5_ecpf_vport_exists(esw->dev))
1960 static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
1962 struct mlx5_vport *vport;
1966 if (esw_check_vport_match_metadata_supported(esw))
1967 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
1969 mlx5_esw_for_all_vports(esw, i, vport) {
1970 err = esw_vport_ingress_common_config(esw, vport);
1974 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
1975 err = esw_vport_egress_prio_tag_config(esw, vport);
1981 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
1982 esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
1987 esw_vport_disable_ingress_acl(esw, vport);
1989 for (j = MLX5_VPORT_PF; j < i; j++) {
1990 vport = &esw->vports[j];
1991 esw_vport_disable_egress_acl(esw, vport);
1992 esw_vport_disable_ingress_acl(esw, vport);
1998 static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
2000 struct mlx5_vport *vport;
2003 mlx5_esw_for_all_vports(esw, i, vport) {
2004 esw_vport_disable_egress_acl(esw, vport);
2005 esw_vport_disable_ingress_acl(esw, vport);
2008 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2011 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
2013 int num_vfs = esw->esw_funcs.num_vfs;
2017 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
2018 total_vports = esw->total_vports;
2020 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
2022 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
2023 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
2025 err = esw_create_offloads_acl_tables(esw);
2029 err = esw_create_offloads_fdb_tables(esw, total_vports);
2031 goto create_fdb_err;
2033 err = esw_create_offloads_table(esw, total_vports);
2037 err = esw_create_vport_rx_group(esw, total_vports);
2044 esw_destroy_offloads_table(esw);
2047 esw_destroy_offloads_fdb_tables(esw);
2050 esw_destroy_offloads_acl_tables(esw);
2055 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2057 esw_destroy_vport_rx_group(esw);
2058 esw_destroy_offloads_table(esw);
2059 esw_destroy_offloads_fdb_tables(esw);
2060 esw_destroy_offloads_acl_tables(esw);
2064 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
2066 bool host_pf_disabled;
2069 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2070 host_params_context.host_num_of_vfs);
2071 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2072 host_params_context.host_pf_disabled);
2074 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2077 /* Number of VFs can only change from "0 to x" or "x to 0". */
2078 if (esw->esw_funcs.num_vfs > 0) {
2079 esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
2083 err = esw_offloads_load_vf_reps(esw, new_num_vfs);
2087 esw->esw_funcs.num_vfs = new_num_vfs;
2090 static void esw_functions_changed_event_handler(struct work_struct *work)
2092 struct mlx5_host_work *host_work;
2093 struct mlx5_eswitch *esw;
2096 host_work = container_of(work, struct mlx5_host_work, work);
2097 esw = host_work->esw;
2099 out = mlx5_esw_query_functions(esw->dev);
2103 esw_vfs_changed_event_handler(esw, out);
2109 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
2111 struct mlx5_esw_functions *esw_funcs;
2112 struct mlx5_host_work *host_work;
2113 struct mlx5_eswitch *esw;
2115 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2119 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2120 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
2122 host_work->esw = esw;
2124 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
2125 queue_work(esw->work_queue, &host_work->work);
2130 int esw_offloads_init(struct mlx5_eswitch *esw)
2134 err = esw_offloads_steering_init(esw);
2138 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2139 err = mlx5_eswitch_enable_passing_vport_metadata(esw);
2141 goto err_vport_metadata;
2144 err = esw_offloads_load_all_reps(esw);
2148 esw_offloads_devcom_init(esw);
2149 mutex_init(&esw->offloads.termtbl_mutex);
2151 mlx5_rdma_enable_roce(esw->dev);
2156 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2157 mlx5_eswitch_disable_passing_vport_metadata(esw);
2159 esw_offloads_steering_cleanup(esw);
2163 static int esw_offloads_stop(struct mlx5_eswitch *esw,
2164 struct netlink_ext_ack *extack)
2168 mlx5_eswitch_disable(esw);
2169 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
2171 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
2172 err1 = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
2174 NL_SET_ERR_MSG_MOD(extack,
2175 "Failed setting eswitch back to offloads");
2182 void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2184 mlx5_rdma_disable_roce(esw->dev);
2185 esw_offloads_devcom_cleanup(esw);
2186 esw_offloads_unload_all_reps(esw);
2187 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2188 mlx5_eswitch_disable_passing_vport_metadata(esw);
2189 esw_offloads_steering_cleanup(esw);
2192 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
2195 case DEVLINK_ESWITCH_MODE_LEGACY:
2196 *mlx5_mode = MLX5_ESWITCH_LEGACY;
2198 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
2199 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
2208 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2210 switch (mlx5_mode) {
2211 case MLX5_ESWITCH_LEGACY:
2212 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2214 case MLX5_ESWITCH_OFFLOADS:
2215 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2224 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2227 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2228 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2230 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2231 *mlx5_mode = MLX5_INLINE_MODE_L2;
2233 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2234 *mlx5_mode = MLX5_INLINE_MODE_IP;
2236 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2237 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2246 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2248 switch (mlx5_mode) {
2249 case MLX5_INLINE_MODE_NONE:
2250 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2252 case MLX5_INLINE_MODE_L2:
2253 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2255 case MLX5_INLINE_MODE_IP:
2256 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2258 case MLX5_INLINE_MODE_TCP_UDP:
2259 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2268 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
2270 struct mlx5_core_dev *dev = devlink_priv(devlink);
2272 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2275 if(!MLX5_ESWITCH_MANAGER(dev))
2278 if (dev->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2279 !mlx5_core_is_ecpf_esw_manager(dev))
2285 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2286 struct netlink_ext_ack *extack)
2288 struct mlx5_core_dev *dev = devlink_priv(devlink);
2289 u16 cur_mlx5_mode, mlx5_mode = 0;
2292 err = mlx5_devlink_eswitch_check(devlink);
2296 cur_mlx5_mode = dev->priv.eswitch->mode;
2298 if (esw_mode_from_devlink(mode, &mlx5_mode))
2301 if (cur_mlx5_mode == mlx5_mode)
2304 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2305 return esw_offloads_start(dev->priv.eswitch, extack);
2306 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2307 return esw_offloads_stop(dev->priv.eswitch, extack);
2312 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2314 struct mlx5_core_dev *dev = devlink_priv(devlink);
2317 err = mlx5_devlink_eswitch_check(devlink);
2321 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
2324 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2325 struct netlink_ext_ack *extack)
2327 struct mlx5_core_dev *dev = devlink_priv(devlink);
2328 struct mlx5_eswitch *esw = dev->priv.eswitch;
2329 int err, vport, num_vport;
2332 err = mlx5_devlink_eswitch_check(devlink);
2336 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2337 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2338 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2341 case MLX5_CAP_INLINE_MODE_L2:
2342 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
2344 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2348 if (esw->offloads.num_flows > 0) {
2349 NL_SET_ERR_MSG_MOD(extack,
2350 "Can't set inline mode when flows are configured");
2354 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2358 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2359 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2361 NL_SET_ERR_MSG_MOD(extack,
2362 "Failed to set min inline on vport");
2363 goto revert_inline_mode;
2367 esw->offloads.inline_mode = mlx5_mode;
2371 num_vport = --vport;
2372 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
2373 mlx5_modify_nic_vport_min_inline(dev,
2375 esw->offloads.inline_mode);
2380 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2382 struct mlx5_core_dev *dev = devlink_priv(devlink);
2383 struct mlx5_eswitch *esw = dev->priv.eswitch;
2386 err = mlx5_devlink_eswitch_check(devlink);
2390 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2393 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2395 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2396 struct mlx5_core_dev *dev = esw->dev;
2399 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2402 if (esw->mode == MLX5_ESWITCH_NONE)
2405 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2406 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2407 mlx5_mode = MLX5_INLINE_MODE_NONE;
2409 case MLX5_CAP_INLINE_MODE_L2:
2410 mlx5_mode = MLX5_INLINE_MODE_L2;
2412 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2417 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2418 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2419 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
2420 if (prev_mlx5_mode != mlx5_mode)
2422 prev_mlx5_mode = mlx5_mode;
2430 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2431 enum devlink_eswitch_encap_mode encap,
2432 struct netlink_ext_ack *extack)
2434 struct mlx5_core_dev *dev = devlink_priv(devlink);
2435 struct mlx5_eswitch *esw = dev->priv.eswitch;
2438 err = mlx5_devlink_eswitch_check(devlink);
2442 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
2443 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
2444 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2447 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2450 if (esw->mode == MLX5_ESWITCH_LEGACY) {
2451 esw->offloads.encap = encap;
2455 if (esw->offloads.encap == encap)
2458 if (esw->offloads.num_flows > 0) {
2459 NL_SET_ERR_MSG_MOD(extack,
2460 "Can't set encapsulation when flows are configured");
2464 esw_destroy_offloads_fdb_tables(esw);
2466 esw->offloads.encap = encap;
2468 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2471 NL_SET_ERR_MSG_MOD(extack,
2472 "Failed re-creating fast FDB table");
2473 esw->offloads.encap = !encap;
2474 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
2480 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2481 enum devlink_eswitch_encap_mode *encap)
2483 struct mlx5_core_dev *dev = devlink_priv(devlink);
2484 struct mlx5_eswitch *esw = dev->priv.eswitch;
2487 err = mlx5_devlink_eswitch_check(devlink);
2491 *encap = esw->offloads.encap;
2495 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2496 const struct mlx5_eswitch_rep_ops *ops,
2499 struct mlx5_eswitch_rep_data *rep_data;
2500 struct mlx5_eswitch_rep *rep;
2503 esw->offloads.rep_ops[rep_type] = ops;
2504 mlx5_esw_for_all_reps(esw, i, rep) {
2505 rep_data = &rep->rep_data[rep_type];
2506 atomic_set(&rep_data->state, REP_REGISTERED);
2509 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
2511 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
2513 struct mlx5_eswitch_rep *rep;
2516 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
2517 __unload_reps_all_vport(esw, rep_type);
2519 mlx5_esw_for_all_reps(esw, i, rep)
2520 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2522 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
2524 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
2526 struct mlx5_eswitch_rep *rep;
2528 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2529 return rep->rep_data[rep_type].priv;
2532 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2536 struct mlx5_eswitch_rep *rep;
2538 rep = mlx5_eswitch_get_rep(esw, vport);
2540 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2541 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2542 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
2545 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
2547 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2549 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
2551 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2553 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2556 return mlx5_eswitch_get_rep(esw, vport);
2558 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
2560 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2562 return vport_num >= MLX5_VPORT_FIRST_VF &&
2563 vport_num <= esw->dev->priv.sriov.max_vfs;
2566 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2568 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2570 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2572 u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
2575 return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
2577 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);