2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/mpfs.h>
39 #include <linux/debugfs.h>
40 #include "esw/acl/lgcy.h"
41 #include "esw/legacy.h"
43 #include "mlx5_core.h"
49 #include "en/mod_hdr.h"
57 /* Vport UC/MC hash node */
59 struct l2addr_node node;
62 struct mlx5_flow_handle *flow_rule;
63 bool mpfs; /* UC MAC was added to MPFs */
64 /* A flag indicating that mac was added due to mc promiscuous vport */
68 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
70 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
73 if (!MLX5_ESWITCH_MANAGER(dev))
79 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
81 struct mlx5_core_dev *dev = devlink_priv(devlink);
84 err = mlx5_eswitch_check(dev);
88 return dev->priv.eswitch;
91 struct mlx5_vport *__must_check
92 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
94 struct mlx5_vport *vport;
96 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
97 return ERR_PTR(-EPERM);
99 vport = xa_load(&esw->vports, vport_num);
101 esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
102 return ERR_PTR(-EINVAL);
107 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
110 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
113 MLX5_SET(modify_nic_vport_context_in, in,
114 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
115 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
116 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
117 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
118 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
119 in, nic_vport_context);
121 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
123 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
124 MLX5_SET(nic_vport_context, nic_vport_ctx,
125 event_on_uc_address_change, 1);
126 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
127 MLX5_SET(nic_vport_context, nic_vport_ctx,
128 event_on_mc_address_change, 1);
129 if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
130 MLX5_SET(nic_vport_context, nic_vport_ctx,
131 event_on_promisc_change, 1);
133 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
136 /* E-Switch vport context HW commands */
137 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
138 bool other_vport, void *in)
140 MLX5_SET(modify_esw_vport_context_in, in, opcode,
141 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
142 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
143 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
144 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
147 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
148 u16 vlan, u8 qos, u8 set_flags)
150 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
152 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
153 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
156 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
157 vport, vlan, qos, set_flags);
159 if (set_flags & SET_VLAN_STRIP)
160 MLX5_SET(modify_esw_vport_context_in, in,
161 esw_vport_context.vport_cvlan_strip, 1);
163 if (set_flags & SET_VLAN_INSERT) {
164 if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) {
165 /* insert either if vlan exist in packet or not */
166 MLX5_SET(modify_esw_vport_context_in, in,
167 esw_vport_context.vport_cvlan_insert,
168 MLX5_VPORT_CVLAN_INSERT_ALWAYS);
170 /* insert only if no vlan in packet */
171 MLX5_SET(modify_esw_vport_context_in, in,
172 esw_vport_context.vport_cvlan_insert,
173 MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN);
175 MLX5_SET(modify_esw_vport_context_in, in,
176 esw_vport_context.cvlan_pcp, qos);
177 MLX5_SET(modify_esw_vport_context_in, in,
178 esw_vport_context.cvlan_id, vlan);
181 MLX5_SET(modify_esw_vport_context_in, in,
182 field_select.vport_cvlan_strip, 1);
183 MLX5_SET(modify_esw_vport_context_in, in,
184 field_select.vport_cvlan_insert, 1);
186 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
190 static struct mlx5_flow_handle *
191 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
192 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
194 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
195 MLX5_MATCH_OUTER_HEADERS);
196 struct mlx5_flow_handle *flow_rule = NULL;
197 struct mlx5_flow_act flow_act = {0};
198 struct mlx5_flow_destination dest = {};
199 struct mlx5_flow_spec *spec;
200 void *mv_misc = NULL;
201 void *mc_misc = NULL;
206 match_header |= MLX5_MATCH_MISC_PARAMETERS;
208 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
212 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
213 outer_headers.dmac_47_16);
214 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
215 outer_headers.dmac_47_16);
217 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
218 ether_addr_copy(dmac_v, mac_v);
219 ether_addr_copy(dmac_c, mac_c);
222 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
223 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
225 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
227 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
228 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
231 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
232 dest.vport.num = vport;
235 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
236 dmac_v, dmac_c, vport);
237 spec->match_criteria_enable = match_header;
238 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
240 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
241 &flow_act, &dest, 1);
242 if (IS_ERR(flow_rule)) {
244 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
245 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
253 static struct mlx5_flow_handle *
254 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
258 eth_broadcast_addr(mac_c);
259 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
262 static struct mlx5_flow_handle *
263 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
268 eth_zero_addr(mac_c);
269 eth_zero_addr(mac_v);
272 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
275 static struct mlx5_flow_handle *
276 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
281 eth_zero_addr(mac_c);
282 eth_zero_addr(mac_v);
283 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
286 /* E-Switch vport UC/MC lists management */
287 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
288 struct vport_addr *vaddr);
290 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
292 u8 *mac = vaddr->node.addr;
293 u16 vport = vaddr->vport;
296 /* Skip mlx5_mpfs_add_mac for eswitch_managers,
297 * it is already done by its netdev in mlx5e_execute_l2_action
299 if (mlx5_esw_is_manager_vport(esw, vport))
302 err = mlx5_mpfs_add_mac(esw->dev, mac);
305 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
312 /* SRIOV is enabled: Forward UC MAC to vport */
313 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
314 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
316 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
317 vport, mac, vaddr->flow_rule);
322 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
324 u8 *mac = vaddr->node.addr;
325 u16 vport = vaddr->vport;
328 /* Skip mlx5_mpfs_del_mac for eswitch managers,
329 * it is already done by its netdev in mlx5e_execute_l2_action
331 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
334 err = mlx5_mpfs_del_mac(esw->dev, mac);
337 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
342 if (vaddr->flow_rule)
343 mlx5_del_flow_rules(vaddr->flow_rule);
344 vaddr->flow_rule = NULL;
349 static void update_allmulti_vports(struct mlx5_eswitch *esw,
350 struct vport_addr *vaddr,
351 struct esw_mc_addr *esw_mc)
353 u8 *mac = vaddr->node.addr;
354 struct mlx5_vport *vport;
358 mlx5_esw_for_each_vport(esw, i, vport) {
359 struct hlist_head *vport_hash = vport->mc_list;
360 struct vport_addr *iter_vaddr =
361 l2addr_hash_find(vport_hash,
364 vport_num = vport->vport;
365 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
366 vaddr->vport == vport_num)
368 switch (vaddr->action) {
369 case MLX5_ACTION_ADD:
372 iter_vaddr = l2addr_hash_add(vport_hash, mac,
377 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
381 iter_vaddr->vport = vport_num;
382 iter_vaddr->flow_rule =
383 esw_fdb_set_vport_rule(esw,
386 iter_vaddr->mc_promisc = true;
388 case MLX5_ACTION_DEL:
391 mlx5_del_flow_rules(iter_vaddr->flow_rule);
392 l2addr_hash_del(iter_vaddr);
398 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
400 struct hlist_head *hash = esw->mc_table;
401 struct esw_mc_addr *esw_mc;
402 u8 *mac = vaddr->node.addr;
403 u16 vport = vaddr->vport;
405 if (!esw->fdb_table.legacy.fdb)
408 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
412 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
416 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
417 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
419 /* Add this multicast mac to all the mc promiscuous vports */
420 update_allmulti_vports(esw, vaddr, esw_mc);
423 /* If the multicast mac is added as a result of mc promiscuous vport,
424 * don't increment the multicast ref count
426 if (!vaddr->mc_promisc)
429 /* Forward MC MAC to vport */
430 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
432 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
433 vport, mac, vaddr->flow_rule,
434 esw_mc->refcnt, esw_mc->uplink_rule);
438 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
440 struct hlist_head *hash = esw->mc_table;
441 struct esw_mc_addr *esw_mc;
442 u8 *mac = vaddr->node.addr;
443 u16 vport = vaddr->vport;
445 if (!esw->fdb_table.legacy.fdb)
448 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
451 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
456 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
457 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
458 esw_mc->uplink_rule);
460 if (vaddr->flow_rule)
461 mlx5_del_flow_rules(vaddr->flow_rule);
462 vaddr->flow_rule = NULL;
464 /* If the multicast mac is added as a result of mc promiscuous vport,
465 * don't decrement the multicast ref count.
467 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
470 /* Remove this multicast mac from all the mc promiscuous vports */
471 update_allmulti_vports(esw, vaddr, esw_mc);
473 if (esw_mc->uplink_rule)
474 mlx5_del_flow_rules(esw_mc->uplink_rule);
476 l2addr_hash_del(esw_mc);
480 /* Apply vport UC/MC list to HW l2 table and FDB table */
481 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
482 struct mlx5_vport *vport, int list_type)
484 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
485 vport_addr_action vport_addr_add;
486 vport_addr_action vport_addr_del;
487 struct vport_addr *addr;
488 struct l2addr_node *node;
489 struct hlist_head *hash;
490 struct hlist_node *tmp;
493 vport_addr_add = is_uc ? esw_add_uc_addr :
495 vport_addr_del = is_uc ? esw_del_uc_addr :
498 hash = is_uc ? vport->uc_list : vport->mc_list;
499 for_each_l2hash_node(node, tmp, hash, hi) {
500 addr = container_of(node, struct vport_addr, node);
501 switch (addr->action) {
502 case MLX5_ACTION_ADD:
503 vport_addr_add(esw, addr);
504 addr->action = MLX5_ACTION_NONE;
506 case MLX5_ACTION_DEL:
507 vport_addr_del(esw, addr);
508 l2addr_hash_del(addr);
514 /* Sync vport UC/MC list from vport context */
515 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
516 struct mlx5_vport *vport, int list_type)
518 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
519 u8 (*mac_list)[ETH_ALEN];
520 struct l2addr_node *node;
521 struct vport_addr *addr;
522 struct hlist_head *hash;
523 struct hlist_node *tmp;
529 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
530 MLX5_MAX_MC_PER_VPORT(esw->dev);
532 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
536 hash = is_uc ? vport->uc_list : vport->mc_list;
538 for_each_l2hash_node(node, tmp, hash, hi) {
539 addr = container_of(node, struct vport_addr, node);
540 addr->action = MLX5_ACTION_DEL;
546 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
550 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
551 vport->vport, is_uc ? "UC" : "MC", size);
553 for (i = 0; i < size; i++) {
554 if (is_uc && !is_valid_ether_addr(mac_list[i]))
557 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
560 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
562 addr->action = MLX5_ACTION_NONE;
563 /* If this mac was previously added because of allmulti
564 * promiscuous rx mode, its now converted to be original
567 if (addr->mc_promisc) {
568 struct esw_mc_addr *esw_mc =
569 l2addr_hash_find(esw->mc_table,
574 "Failed to MAC(%pM) in mcast DB\n",
579 addr->mc_promisc = false;
584 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
588 "Failed to add MAC(%pM) to vport[%d] DB\n",
589 mac_list[i], vport->vport);
592 addr->vport = vport->vport;
593 addr->action = MLX5_ACTION_ADD;
599 /* Sync vport UC/MC list from vport context
600 * Must be called after esw_update_vport_addr_list
602 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
603 struct mlx5_vport *vport)
605 struct l2addr_node *node;
606 struct vport_addr *addr;
607 struct hlist_head *hash;
608 struct hlist_node *tmp;
611 hash = vport->mc_list;
613 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
614 u8 *mac = node->addr;
616 addr = l2addr_hash_find(hash, mac, struct vport_addr);
618 if (addr->action == MLX5_ACTION_DEL)
619 addr->action = MLX5_ACTION_NONE;
622 addr = l2addr_hash_add(hash, mac, struct vport_addr,
626 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
630 addr->vport = vport->vport;
631 addr->action = MLX5_ACTION_ADD;
632 addr->mc_promisc = true;
636 /* Apply vport rx mode to HW FDB table */
637 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
638 struct mlx5_vport *vport,
639 bool promisc, bool mc_promisc)
641 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
643 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
647 vport->allmulti_rule =
648 esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
649 if (!allmulti_addr->uplink_rule)
650 allmulti_addr->uplink_rule =
651 esw_fdb_set_vport_allmulti_rule(esw,
653 allmulti_addr->refcnt++;
654 } else if (vport->allmulti_rule) {
655 mlx5_del_flow_rules(vport->allmulti_rule);
656 vport->allmulti_rule = NULL;
658 if (--allmulti_addr->refcnt > 0)
661 if (allmulti_addr->uplink_rule)
662 mlx5_del_flow_rules(allmulti_addr->uplink_rule);
663 allmulti_addr->uplink_rule = NULL;
667 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
671 vport->promisc_rule =
672 esw_fdb_set_vport_promisc_rule(esw, vport->vport);
673 } else if (vport->promisc_rule) {
674 mlx5_del_flow_rules(vport->promisc_rule);
675 vport->promisc_rule = NULL;
679 /* Sync vport rx mode from vport context */
680 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
681 struct mlx5_vport *vport)
688 err = mlx5_query_nic_vport_promisc(esw->dev,
695 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
696 vport->vport, promisc_all, promisc_mc);
698 if (!vport->info.trusted || !vport->enabled) {
704 esw_apply_vport_rx_mode(esw, vport, promisc_all,
705 (promisc_all || promisc_mc));
708 void esw_vport_change_handle_locked(struct mlx5_vport *vport)
710 struct mlx5_core_dev *dev = vport->dev;
711 struct mlx5_eswitch *esw = dev->priv.eswitch;
714 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
715 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
718 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
719 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
720 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
723 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
724 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
726 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
727 esw_update_vport_rx_mode(esw, vport);
728 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
729 esw_update_vport_mc_promisc(esw, vport);
732 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
733 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
735 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
737 arm_vport_context_events_cmd(dev, vport->vport,
738 vport->enabled_events);
741 static void esw_vport_change_handler(struct work_struct *work)
743 struct mlx5_vport *vport =
744 container_of(work, struct mlx5_vport, vport_change_handler);
745 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
747 mutex_lock(&esw->state_lock);
748 esw_vport_change_handle_locked(vport);
749 mutex_unlock(&esw->state_lock);
752 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
754 ((u8 *)node_guid)[7] = mac[0];
755 ((u8 *)node_guid)[6] = mac[1];
756 ((u8 *)node_guid)[5] = mac[2];
757 ((u8 *)node_guid)[4] = 0xff;
758 ((u8 *)node_guid)[3] = 0xfe;
759 ((u8 *)node_guid)[2] = mac[3];
760 ((u8 *)node_guid)[1] = mac[4];
761 ((u8 *)node_guid)[0] = mac[5];
764 static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
765 struct mlx5_vport *vport)
767 if (esw->mode == MLX5_ESWITCH_LEGACY)
768 return esw_legacy_vport_acl_setup(esw, vport);
770 return esw_vport_create_offloads_acl_tables(esw, vport);
773 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
774 struct mlx5_vport *vport)
776 if (esw->mode == MLX5_ESWITCH_LEGACY)
777 esw_legacy_vport_acl_cleanup(esw, vport);
779 esw_vport_destroy_offloads_acl_tables(esw, vport);
782 static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
784 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
789 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
792 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
796 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
801 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
802 vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
804 memset(query_ctx, 0, query_out_sz);
805 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
810 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
811 vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
817 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
819 bool vst_mode_steering = esw_vst_mode_is_steering(esw);
820 u16 vport_num = vport->vport;
824 err = esw_vport_setup_acl(esw, vport);
828 if (mlx5_esw_is_manager_vport(esw, vport_num))
831 err = mlx5_esw_vport_caps_get(esw, vport);
835 mlx5_modify_vport_admin_state(esw->dev,
836 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
838 vport->info.link_state);
840 /* Host PF has its own mac/guid. */
842 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
844 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
845 vport->info.node_guid);
848 flags = (vport->info.vlan || vport->info.qos) ?
849 SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
850 if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering)
851 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
852 vport->info.qos, flags);
857 esw_vport_cleanup_acl(esw, vport);
861 /* Don't cleanup vport->info, it's needed to restore vport configuration */
862 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
864 u16 vport_num = vport->vport;
866 if (!mlx5_esw_is_manager_vport(esw, vport_num))
867 mlx5_modify_vport_admin_state(esw->dev,
868 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
870 MLX5_VPORT_ADMIN_STATE_DOWN);
872 mlx5_esw_qos_vport_disable(esw, vport);
873 esw_vport_cleanup_acl(esw, vport);
876 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
877 enum mlx5_eswitch_vport_event enabled_events)
879 struct mlx5_vport *vport;
882 vport = mlx5_eswitch_get_vport(esw, vport_num);
884 return PTR_ERR(vport);
886 mutex_lock(&esw->state_lock);
887 WARN_ON(vport->enabled);
889 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
891 ret = esw_vport_setup(esw, vport);
895 /* Sync with current vport context */
896 vport->enabled_events = enabled_events;
897 vport->enabled = true;
899 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
900 * in smartNIC as it's a vport group manager.
902 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
903 (!vport_num && mlx5_core_is_ecpf(esw->dev)))
904 vport->info.trusted = true;
906 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
907 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
908 ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
910 goto err_vhca_mapping;
913 /* External controller host PF has factory programmed MAC.
914 * Read it from the device.
916 if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
917 mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
919 esw_vport_change_handle_locked(vport);
921 esw->enabled_vports++;
922 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
924 mutex_unlock(&esw->state_lock);
928 esw_vport_cleanup(esw, vport);
929 mutex_unlock(&esw->state_lock);
933 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
935 struct mlx5_vport *vport;
937 vport = mlx5_eswitch_get_vport(esw, vport_num);
941 mutex_lock(&esw->state_lock);
945 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
946 /* Mark this vport as disabled to discard new events */
947 vport->enabled = false;
949 /* Disable events from this vport */
950 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
952 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
953 MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
954 mlx5_esw_vport_vhca_id_clear(esw, vport_num);
956 /* We don't assume VFs will cleanup after themselves.
957 * Calling vport change handler while vport is disabled will cleanup
958 * the vport resources.
960 esw_vport_change_handle_locked(vport);
961 vport->enabled_events = 0;
962 esw_vport_cleanup(esw, vport);
963 esw->enabled_vports--;
966 mutex_unlock(&esw->state_lock);
969 static int eswitch_vport_event(struct notifier_block *nb,
970 unsigned long type, void *data)
972 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
973 struct mlx5_eqe *eqe = data;
974 struct mlx5_vport *vport;
977 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
978 vport = mlx5_eswitch_get_vport(esw, vport_num);
980 queue_work(esw->work_queue, &vport->vport_change_handler);
985 * mlx5_esw_query_functions - Returns raw output about functions state
986 * @dev: Pointer to device to query
988 * mlx5_esw_query_functions() allocates and returns functions changed
989 * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
990 * Caller must free the memory using kvfree() when valid pointer is returned.
992 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
994 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
995 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
999 out = kvzalloc(outlen, GFP_KERNEL);
1001 return ERR_PTR(-ENOMEM);
1003 MLX5_SET(query_esw_functions_in, in, opcode,
1004 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1006 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
1011 return ERR_PTR(err);
1014 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
1016 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1017 mlx5_eq_notifier_register(esw->dev, &esw->nb);
1019 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
1020 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1021 ESW_FUNCTIONS_CHANGED);
1022 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1026 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
1028 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
1029 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1031 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1033 flush_workqueue(esw->work_queue);
1036 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
1038 struct mlx5_vport *vport;
1041 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1042 memset(&vport->qos, 0, sizeof(vport->qos));
1043 memset(&vport->info, 0, sizeof(vport->info));
1044 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1048 /* Public E-Switch API */
1049 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
1050 enum mlx5_eswitch_vport_event enabled_events)
1054 err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
1058 mlx5_esw_vport_debugfs_create(esw, vport_num, false, 0);
1059 err = esw_offloads_load_rep(esw, vport_num);
1066 mlx5_esw_vport_debugfs_destroy(esw, vport_num);
1067 mlx5_esw_vport_disable(esw, vport_num);
1071 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
1073 esw_offloads_unload_rep(esw, vport_num);
1074 mlx5_esw_vport_debugfs_destroy(esw, vport_num);
1075 mlx5_esw_vport_disable(esw, vport_num);
1078 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
1080 struct mlx5_vport *vport;
1083 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1084 if (!vport->enabled)
1086 mlx5_eswitch_unload_vport(esw, vport->vport);
1090 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
1091 enum mlx5_eswitch_vport_event enabled_events)
1093 struct mlx5_vport *vport;
1097 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1098 err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
1106 mlx5_eswitch_unload_vf_vports(esw, num_vfs);
1110 static int host_pf_enable_hca(struct mlx5_core_dev *dev)
1112 if (!mlx5_core_is_ecpf(dev))
1115 /* Once vport and representor are ready, take out the external host PF
1116 * out of initializing state. Enabling HCA clears the iser->initializing
1117 * bit and host PF driver loading can progress.
1119 return mlx5_cmd_host_pf_enable_hca(dev);
1122 static void host_pf_disable_hca(struct mlx5_core_dev *dev)
1124 if (!mlx5_core_is_ecpf(dev))
1127 mlx5_cmd_host_pf_disable_hca(dev);
1130 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
1131 * whichever are present on the eswitch.
1134 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1135 enum mlx5_eswitch_vport_event enabled_events)
1139 /* Enable PF vport */
1140 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
1144 /* Enable external host PF HCA */
1145 ret = host_pf_enable_hca(esw->dev);
1149 /* Enable ECPF vport */
1150 if (mlx5_ecpf_vport_exists(esw->dev)) {
1151 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
1156 /* Enable VF vports */
1157 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
1164 if (mlx5_ecpf_vport_exists(esw->dev))
1165 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1167 host_pf_disable_hca(esw->dev);
1169 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1173 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
1174 * whichever are previously enabled on the eswitch.
1176 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1178 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1180 if (mlx5_ecpf_vport_exists(esw->dev))
1181 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1183 host_pf_disable_hca(esw->dev);
1184 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1187 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
1189 struct devlink *devlink = priv_to_devlink(esw->dev);
1190 union devlink_param_value val;
1193 err = devlink_param_driverinit_value_get(devlink,
1194 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
1197 esw->params.large_group_num = val.vu32;
1200 "Devlink can't get param fdb_large_groups, uses default (%d).\n",
1201 ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
1202 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
1207 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
1214 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1215 esw->esw_funcs.num_vfs = num_vfs;
1219 out = mlx5_esw_query_functions(esw->dev);
1223 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
1224 host_params_context.host_num_of_vfs);
1228 static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
1230 struct mlx5_esw_event_info info = {};
1232 info.new_mode = mode;
1234 blocking_notifier_call_chain(&esw->n_head, 0, &info);
1237 static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
1239 struct mlx5_core_dev *dev = esw->dev;
1243 if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
1246 total_vports = mlx5_eswitch_get_total_vports(dev);
1248 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
1249 err = mlx5_fs_egress_acls_init(dev, total_vports);
1253 esw_warn(dev, "engress ACL is not supported by FW\n");
1256 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
1257 err = mlx5_fs_ingress_acls_init(dev, total_vports);
1261 esw_warn(dev, "ingress ACL is not supported by FW\n");
1263 esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
1267 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1268 mlx5_fs_egress_acls_cleanup(dev);
1272 static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
1274 struct mlx5_core_dev *dev = esw->dev;
1276 esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
1277 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1278 mlx5_fs_ingress_acls_cleanup(dev);
1279 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1280 mlx5_fs_egress_acls_cleanup(dev);
1284 * mlx5_eswitch_enable_locked - Enable eswitch
1285 * @esw: Pointer to eswitch
1286 * @num_vfs: Enable eswitch for given number of VFs. This is optional.
1287 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
1288 * Caller should pass num_vfs > 0 when enabling eswitch for
1289 * vf vports. Caller should pass num_vfs = 0, when eswitch
1290 * is enabled without sriov VFs or when caller
1291 * is unaware of the sriov state of the host PF on ECPF based
1292 * eswitch. Caller should pass < 0 when num_vfs should be
1293 * completely ignored. This is typically the case when eswitch
1294 * is enabled without sriov regardless of PF/ECPF system.
1295 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
1296 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
1297 * It returns 0 on success or error code on failure.
1299 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
1303 lockdep_assert_held(&esw->mode_lock);
1305 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1306 esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
1310 mlx5_eswitch_get_devlink_param(esw);
1312 err = mlx5_esw_acls_ns_init(esw);
1316 mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
1318 if (esw->mode == MLX5_ESWITCH_LEGACY) {
1319 err = esw_legacy_enable(esw);
1321 mlx5_rescan_drivers(esw->dev);
1322 err = esw_offloads_enable(esw);
1328 esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
1330 mlx5_eswitch_event_handlers_register(esw);
1332 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
1333 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1334 esw->esw_funcs.num_vfs, esw->enabled_vports);
1336 mlx5_esw_mode_change_notify(esw, esw->mode);
1341 mlx5_esw_acls_ns_cleanup(esw);
1346 * mlx5_eswitch_enable - Enable eswitch
1347 * @esw: Pointer to eswitch
1348 * @num_vfs: Enable eswitch switch for given number of VFs.
1349 * Caller must pass num_vfs > 0 when enabling eswitch for
1351 * mlx5_eswitch_enable() returns 0 on success or error code on failure.
1353 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
1358 if (!mlx5_esw_allowed(esw))
1361 devl_assert_locked(priv_to_devlink(esw->dev));
1363 toggle_lag = !mlx5_esw_is_fdb_created(esw);
1366 mlx5_lag_disable_change(esw->dev);
1368 down_write(&esw->mode_lock);
1369 if (!mlx5_esw_is_fdb_created(esw)) {
1370 ret = mlx5_eswitch_enable_locked(esw, num_vfs);
1372 enum mlx5_eswitch_vport_event vport_events;
1374 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
1375 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
1376 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
1378 esw->esw_funcs.num_vfs = num_vfs;
1380 up_write(&esw->mode_lock);
1383 mlx5_lag_enable_change(esw->dev);
1388 /* When disabling sriov, free driver level resources. */
1389 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
1391 if (!mlx5_esw_allowed(esw))
1394 devl_assert_locked(priv_to_devlink(esw->dev));
1395 down_write(&esw->mode_lock);
1396 /* If driver is unloaded, this function is called twice by remove_one()
1397 * and mlx5_unload(). Prevent the second call.
1399 if (!esw->esw_funcs.num_vfs && !clear_vf)
1402 esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n",
1403 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1404 esw->esw_funcs.num_vfs, esw->enabled_vports);
1406 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1408 mlx5_eswitch_clear_vf_vports_info(esw);
1409 /* If disabling sriov in switchdev mode, free meta rules here
1410 * because it depends on num_vfs.
1412 if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
1413 struct devlink *devlink = priv_to_devlink(esw->dev);
1415 devl_rate_nodes_destroy(devlink);
1417 /* Destroy legacy fdb when disabling sriov in legacy mode. */
1418 if (esw->mode == MLX5_ESWITCH_LEGACY)
1419 mlx5_eswitch_disable_locked(esw);
1421 esw->esw_funcs.num_vfs = 0;
1424 up_write(&esw->mode_lock);
1427 /* Free resources for corresponding eswitch mode. It is called by devlink
1428 * when changing eswitch mode or modprobe when unloading driver.
1430 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
1432 struct devlink *devlink = priv_to_devlink(esw->dev);
1434 /* Notify eswitch users that it is exiting from current mode.
1435 * So that it can do necessary cleanup before the eswitch is disabled.
1437 mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
1439 mlx5_eswitch_event_handlers_unregister(esw);
1441 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
1442 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1443 esw->esw_funcs.num_vfs, esw->enabled_vports);
1445 if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) {
1446 esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
1447 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1448 esw_offloads_disable(esw);
1449 else if (esw->mode == MLX5_ESWITCH_LEGACY)
1450 esw_legacy_disable(esw);
1451 mlx5_esw_acls_ns_cleanup(esw);
1454 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1455 devl_rate_nodes_destroy(devlink);
1458 void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
1460 if (!mlx5_esw_allowed(esw))
1463 devl_assert_locked(priv_to_devlink(esw->dev));
1464 mlx5_lag_disable_change(esw->dev);
1465 down_write(&esw->mode_lock);
1466 mlx5_eswitch_disable_locked(esw);
1467 up_write(&esw->mode_lock);
1468 mlx5_lag_enable_change(esw->dev);
1471 static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
1473 u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
1474 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
1476 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1477 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
1478 MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
1479 MLX5_SET(query_hca_cap_in, in, other_function, true);
1480 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
1483 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
1486 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
1491 if (!mlx5_core_is_ecpf(dev)) {
1496 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
1500 err = mlx5_query_hca_cap_host_pf(dev, query_ctx);
1504 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
1505 *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf);
1506 *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id);
1513 static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev,
1514 int index, u16 vport_num)
1516 struct mlx5_vport *vport;
1519 vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1523 vport->dev = esw->dev;
1524 vport->vport = vport_num;
1525 vport->index = index;
1526 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1527 INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
1528 err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
1532 esw->total_vports++;
1540 static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1542 xa_erase(&esw->vports, vport->vport);
1546 static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
1548 struct mlx5_vport *vport;
1551 mlx5_esw_for_each_vport(esw, i, vport)
1552 mlx5_esw_vport_free(esw, vport);
1553 xa_destroy(&esw->vports);
1556 static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
1558 struct mlx5_core_dev *dev = esw->dev;
1559 u16 max_host_pf_sfs;
1565 xa_init(&esw->vports);
1567 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF);
1570 if (esw->first_host_vport == MLX5_VPORT_PF)
1571 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1574 for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
1575 err = mlx5_esw_vport_alloc(esw, dev, idx, idx);
1578 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
1579 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1582 base_sf_num = mlx5_sf_start_function_id(dev);
1583 for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
1584 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
1587 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1591 err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num);
1594 for (i = 0; i < max_host_pf_sfs; i++) {
1595 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
1598 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1602 if (mlx5_ecpf_vport_exists(dev)) {
1603 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF);
1608 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK);
1614 mlx5_esw_vports_cleanup(esw);
1618 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1620 struct mlx5_eswitch *esw;
1623 if (!MLX5_VPORT_MANAGER(dev))
1626 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1631 esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1632 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
1634 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1635 if (!esw->work_queue) {
1640 err = mlx5_esw_vports_init(esw);
1644 err = esw_offloads_init_reps(esw);
1648 mutex_init(&esw->offloads.encap_tbl_lock);
1649 hash_init(esw->offloads.encap_tbl);
1650 mutex_init(&esw->offloads.decap_tbl_lock);
1651 hash_init(esw->offloads.decap_tbl);
1652 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
1653 atomic64_set(&esw->offloads.num_flows, 0);
1654 ida_init(&esw->offloads.vport_metadata_ida);
1655 xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
1656 mutex_init(&esw->state_lock);
1657 init_rwsem(&esw->mode_lock);
1658 refcount_set(&esw->qos.refcnt, 0);
1660 esw->enabled_vports = 0;
1661 esw->mode = MLX5_ESWITCH_LEGACY;
1662 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1663 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
1664 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
1665 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
1667 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1668 if (MLX5_ESWITCH_MANAGER(dev) &&
1669 mlx5_esw_vport_match_metadata_supported(esw))
1670 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
1672 dev->priv.eswitch = esw;
1673 BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
1675 esw->dbgfs = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(esw->dev));
1677 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1679 MLX5_MAX_UC_PER_VPORT(dev),
1680 MLX5_MAX_MC_PER_VPORT(dev));
1684 mlx5_esw_vports_cleanup(esw);
1686 if (esw->work_queue)
1687 destroy_workqueue(esw->work_queue);
1692 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1694 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1697 esw_info(esw->dev, "cleanup\n");
1699 debugfs_remove_recursive(esw->dbgfs);
1700 esw->dev->priv.eswitch = NULL;
1701 destroy_workqueue(esw->work_queue);
1702 WARN_ON(refcount_read(&esw->qos.refcnt));
1703 mutex_destroy(&esw->state_lock);
1704 WARN_ON(!xa_empty(&esw->offloads.vhca_map));
1705 xa_destroy(&esw->offloads.vhca_map);
1706 ida_destroy(&esw->offloads.vport_metadata_ida);
1707 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
1708 mutex_destroy(&esw->offloads.encap_tbl_lock);
1709 mutex_destroy(&esw->offloads.decap_tbl_lock);
1710 esw_offloads_cleanup_reps(esw);
1711 mlx5_esw_vports_cleanup(esw);
1715 /* Vport Administration */
1717 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw,
1718 struct mlx5_vport *evport, const u8 *mac)
1720 u16 vport_num = evport->vport;
1724 if (is_multicast_ether_addr(mac))
1727 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1728 mlx5_core_warn(esw->dev,
1729 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1732 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac);
1734 mlx5_core_warn(esw->dev,
1735 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1740 node_guid_gen_from_mac(&node_guid, mac);
1741 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid);
1743 mlx5_core_warn(esw->dev,
1744 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1747 ether_addr_copy(evport->info.mac, mac);
1748 evport->info.node_guid = node_guid;
1749 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
1750 err = esw_acl_ingress_lgcy_setup(esw, evport);
1755 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1756 u16 vport, const u8 *mac)
1758 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1762 return PTR_ERR(evport);
1764 mutex_lock(&esw->state_lock);
1765 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac);
1766 mutex_unlock(&esw->state_lock);
1770 static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
1772 struct mlx5_vport *vport;
1774 vport = mlx5_eswitch_get_vport(esw, vport_num);
1778 return xa_get_mark(&esw->vports, vport_num, mark);
1781 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1783 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
1786 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1788 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
1791 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1792 u16 vport, int link_state)
1794 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1795 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1796 int other_vport = 1;
1799 if (!mlx5_esw_allowed(esw))
1802 return PTR_ERR(evport);
1804 if (vport == MLX5_VPORT_UPLINK) {
1805 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
1809 mutex_lock(&esw->state_lock);
1810 if (esw->mode != MLX5_ESWITCH_LEGACY) {
1815 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
1817 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
1822 evport->info.link_state = link_state;
1825 mutex_unlock(&esw->state_lock);
1829 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1830 u16 vport, struct ifla_vf_info *ivi)
1832 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1835 return PTR_ERR(evport);
1837 memset(ivi, 0, sizeof(*ivi));
1838 ivi->vf = vport - 1;
1840 mutex_lock(&esw->state_lock);
1841 ether_addr_copy(ivi->mac, evport->info.mac);
1842 ivi->linkstate = evport->info.link_state;
1843 ivi->vlan = evport->info.vlan;
1844 ivi->qos = evport->info.qos;
1845 ivi->spoofchk = evport->info.spoofchk;
1846 ivi->trusted = evport->info.trusted;
1847 if (evport->qos.enabled) {
1848 ivi->min_tx_rate = evport->qos.min_rate;
1849 ivi->max_tx_rate = evport->qos.max_rate;
1851 mutex_unlock(&esw->state_lock);
1856 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1857 u16 vport, u16 vlan, u8 qos, u8 set_flags)
1859 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1860 bool vst_mode_steering = esw_vst_mode_is_steering(esw);
1864 return PTR_ERR(evport);
1865 if (vlan > 4095 || qos > 7)
1868 if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) {
1869 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
1874 evport->info.vlan = vlan;
1875 evport->info.qos = qos;
1876 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
1877 err = esw_acl_ingress_lgcy_setup(esw, evport);
1880 err = esw_acl_egress_lgcy_setup(esw, evport);
1886 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
1888 struct ifla_vf_stats *vf_stats)
1890 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
1891 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1892 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
1893 struct mlx5_vport_drop_stats stats = {};
1898 return PTR_ERR(vport);
1900 out = kvzalloc(outlen, GFP_KERNEL);
1904 MLX5_SET(query_vport_counter_in, in, opcode,
1905 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1906 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
1907 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
1908 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1910 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
1914 #define MLX5_GET_CTR(p, x) \
1915 MLX5_GET64(query_vport_counter_out, p, x)
1917 memset(vf_stats, 0, sizeof(*vf_stats));
1918 vf_stats->rx_packets =
1919 MLX5_GET_CTR(out, received_eth_unicast.packets) +
1920 MLX5_GET_CTR(out, received_ib_unicast.packets) +
1921 MLX5_GET_CTR(out, received_eth_multicast.packets) +
1922 MLX5_GET_CTR(out, received_ib_multicast.packets) +
1923 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1925 vf_stats->rx_bytes =
1926 MLX5_GET_CTR(out, received_eth_unicast.octets) +
1927 MLX5_GET_CTR(out, received_ib_unicast.octets) +
1928 MLX5_GET_CTR(out, received_eth_multicast.octets) +
1929 MLX5_GET_CTR(out, received_ib_multicast.octets) +
1930 MLX5_GET_CTR(out, received_eth_broadcast.octets);
1932 vf_stats->tx_packets =
1933 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
1934 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
1935 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
1936 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
1937 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
1939 vf_stats->tx_bytes =
1940 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
1941 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
1942 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
1943 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
1944 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
1946 vf_stats->multicast =
1947 MLX5_GET_CTR(out, received_eth_multicast.packets) +
1948 MLX5_GET_CTR(out, received_ib_multicast.packets);
1950 vf_stats->broadcast =
1951 MLX5_GET_CTR(out, received_eth_broadcast.packets);
1953 err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats);
1956 vf_stats->rx_dropped = stats.rx_dropped;
1957 vf_stats->tx_dropped = stats.tx_dropped;
1964 u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
1966 struct mlx5_eswitch *esw = dev->priv.eswitch;
1968 return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY;
1970 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
1972 enum devlink_eswitch_encap_mode
1973 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
1975 struct mlx5_eswitch *esw;
1977 esw = dev->priv.eswitch;
1978 return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap :
1979 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1981 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
1983 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
1984 struct mlx5_core_dev *dev1)
1986 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
1987 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
1990 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
1992 return blocking_notifier_chain_register(&esw->n_head, nb);
1995 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
1997 blocking_notifier_chain_unregister(&esw->n_head, nb);
2001 * mlx5_esw_hold() - Try to take a read lock on esw mode lock.
2002 * @mdev: mlx5 core device.
2004 * Should be called by esw resources callers.
2006 * Return: true on success or false.
2008 bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
2010 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2012 /* e.g. VF doesn't have eswitch so nothing to do */
2013 if (!mlx5_esw_allowed(esw))
2016 if (down_read_trylock(&esw->mode_lock) != 0)
2023 * mlx5_esw_release() - Release a read lock on esw mode lock.
2024 * @mdev: mlx5 core device.
2026 void mlx5_esw_release(struct mlx5_core_dev *mdev)
2028 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2030 if (mlx5_esw_allowed(esw))
2031 up_read(&esw->mode_lock);
2035 * mlx5_esw_get() - Increase esw user count.
2036 * @mdev: mlx5 core device.
2038 void mlx5_esw_get(struct mlx5_core_dev *mdev)
2040 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2042 if (mlx5_esw_allowed(esw))
2043 atomic64_inc(&esw->user_count);
2047 * mlx5_esw_put() - Decrease esw user count.
2048 * @mdev: mlx5 core device.
2050 void mlx5_esw_put(struct mlx5_core_dev *mdev)
2052 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2054 if (mlx5_esw_allowed(esw))
2055 atomic64_dec_if_positive(&esw->user_count);
2059 * mlx5_esw_try_lock() - Take a write lock on esw mode lock.
2060 * @esw: eswitch device.
2062 * Should be called by esw mode change routine.
2065 * * 0 - esw mode if successfully locked and refcount is 0.
2066 * * -EBUSY - refcount is not 0.
2067 * * -EINVAL - In the middle of switching mode or lock is already held.
2069 int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
2071 if (down_write_trylock(&esw->mode_lock) == 0)
2074 if (atomic64_read(&esw->user_count) > 0) {
2075 up_write(&esw->mode_lock);
2083 * mlx5_esw_unlock() - Release write lock on esw mode lock
2084 * @esw: eswitch device.
2086 void mlx5_esw_unlock(struct mlx5_eswitch *esw)
2088 up_write(&esw->mode_lock);
2092 * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
2094 * @dev: Pointer to core device
2096 * mlx5_eswitch_get_total_vports returns total number of eswitch vports.
2098 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
2100 struct mlx5_eswitch *esw;
2102 esw = dev->priv.eswitch;
2103 return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
2105 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
2108 * mlx5_eswitch_get_core_dev - Get the mdev device
2109 * @esw : eswitch device.
2111 * Return the mellanox core device which manages the eswitch.
2113 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
2115 return mlx5_esw_allowed(esw) ? esw->dev : NULL;
2117 EXPORT_SYMBOL(mlx5_eswitch_get_core_dev);