2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/debugfs.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/mpfs.h>
40 #include "esw/acl/lgcy.h"
41 #include "esw/legacy.h"
43 #include "mlx5_core.h"
50 #include "en/mod_hdr.h"
58 /* Vport UC/MC hash node */
60 struct l2addr_node node;
63 struct mlx5_flow_handle *flow_rule;
64 bool mpfs; /* UC MAC was added to MPFs */
65 /* A flag indicating that mac was added due to mc promiscuous vport */
69 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
71 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
74 if (!MLX5_ESWITCH_MANAGER(dev))
80 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
82 struct mlx5_core_dev *dev = devlink_priv(devlink);
85 err = mlx5_eswitch_check(dev);
89 return dev->priv.eswitch;
92 struct mlx5_vport *__must_check
93 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
95 struct mlx5_vport *vport;
98 return ERR_PTR(-EPERM);
100 vport = xa_load(&esw->vports, vport_num);
102 esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
103 return ERR_PTR(-EINVAL);
108 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
111 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
114 MLX5_SET(modify_nic_vport_context_in, in,
115 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
116 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
117 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
118 if (vport || mlx5_core_is_ecpf(dev))
119 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
120 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
121 in, nic_vport_context);
123 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
125 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
126 MLX5_SET(nic_vport_context, nic_vport_ctx,
127 event_on_uc_address_change, 1);
128 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
129 MLX5_SET(nic_vport_context, nic_vport_ctx,
130 event_on_mc_address_change, 1);
131 if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
132 MLX5_SET(nic_vport_context, nic_vport_ctx,
133 event_on_promisc_change, 1);
135 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
138 /* E-Switch vport context HW commands */
139 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
140 bool other_vport, void *in)
142 MLX5_SET(modify_esw_vport_context_in, in, opcode,
143 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
144 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
145 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
146 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
149 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
150 u16 vlan, u8 qos, u8 set_flags)
152 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
154 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
155 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
158 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
159 vport, vlan, qos, set_flags);
161 if (set_flags & SET_VLAN_STRIP)
162 MLX5_SET(modify_esw_vport_context_in, in,
163 esw_vport_context.vport_cvlan_strip, 1);
165 if (set_flags & SET_VLAN_INSERT) {
166 if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) {
167 /* insert either if vlan exist in packet or not */
168 MLX5_SET(modify_esw_vport_context_in, in,
169 esw_vport_context.vport_cvlan_insert,
170 MLX5_VPORT_CVLAN_INSERT_ALWAYS);
172 /* insert only if no vlan in packet */
173 MLX5_SET(modify_esw_vport_context_in, in,
174 esw_vport_context.vport_cvlan_insert,
175 MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN);
177 MLX5_SET(modify_esw_vport_context_in, in,
178 esw_vport_context.cvlan_pcp, qos);
179 MLX5_SET(modify_esw_vport_context_in, in,
180 esw_vport_context.cvlan_id, vlan);
183 MLX5_SET(modify_esw_vport_context_in, in,
184 field_select.vport_cvlan_strip, 1);
185 MLX5_SET(modify_esw_vport_context_in, in,
186 field_select.vport_cvlan_insert, 1);
188 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
192 static struct mlx5_flow_handle *
193 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
194 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
196 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
197 MLX5_MATCH_OUTER_HEADERS);
198 struct mlx5_flow_handle *flow_rule = NULL;
199 struct mlx5_flow_act flow_act = {0};
200 struct mlx5_flow_destination dest = {};
201 struct mlx5_flow_spec *spec;
202 void *mv_misc = NULL;
203 void *mc_misc = NULL;
208 match_header |= MLX5_MATCH_MISC_PARAMETERS;
210 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
214 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
215 outer_headers.dmac_47_16);
216 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
217 outer_headers.dmac_47_16);
219 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
220 ether_addr_copy(dmac_v, mac_v);
221 ether_addr_copy(dmac_c, mac_c);
224 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
225 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
227 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
229 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
230 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
233 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
234 dest.vport.num = vport;
237 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
238 dmac_v, dmac_c, vport);
239 spec->match_criteria_enable = match_header;
240 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
242 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
243 &flow_act, &dest, 1);
244 if (IS_ERR(flow_rule)) {
246 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
247 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
255 static struct mlx5_flow_handle *
256 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
260 eth_broadcast_addr(mac_c);
261 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
264 static struct mlx5_flow_handle *
265 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
270 eth_zero_addr(mac_c);
271 eth_zero_addr(mac_v);
274 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
277 static struct mlx5_flow_handle *
278 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
283 eth_zero_addr(mac_c);
284 eth_zero_addr(mac_v);
285 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
288 /* E-Switch vport UC/MC lists management */
289 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
290 struct vport_addr *vaddr);
292 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
294 u8 *mac = vaddr->node.addr;
295 u16 vport = vaddr->vport;
298 /* Skip mlx5_mpfs_add_mac for eswitch_managers,
299 * it is already done by its netdev in mlx5e_execute_l2_action
301 if (mlx5_esw_is_manager_vport(esw, vport))
304 err = mlx5_mpfs_add_mac(esw->dev, mac);
307 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
314 /* SRIOV is enabled: Forward UC MAC to vport */
315 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) {
316 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
318 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
319 vport, mac, vaddr->flow_rule);
325 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
327 u8 *mac = vaddr->node.addr;
328 u16 vport = vaddr->vport;
331 /* Skip mlx5_mpfs_del_mac for eswitch managers,
332 * it is already done by its netdev in mlx5e_execute_l2_action
334 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
337 err = mlx5_mpfs_del_mac(esw->dev, mac);
340 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
345 if (vaddr->flow_rule)
346 mlx5_del_flow_rules(vaddr->flow_rule);
347 vaddr->flow_rule = NULL;
352 static void update_allmulti_vports(struct mlx5_eswitch *esw,
353 struct vport_addr *vaddr,
354 struct esw_mc_addr *esw_mc)
356 u8 *mac = vaddr->node.addr;
357 struct mlx5_vport *vport;
361 mlx5_esw_for_each_vport(esw, i, vport) {
362 struct hlist_head *vport_hash = vport->mc_list;
363 struct vport_addr *iter_vaddr =
364 l2addr_hash_find(vport_hash,
367 vport_num = vport->vport;
368 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
369 vaddr->vport == vport_num)
371 switch (vaddr->action) {
372 case MLX5_ACTION_ADD:
375 iter_vaddr = l2addr_hash_add(vport_hash, mac,
380 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
384 iter_vaddr->vport = vport_num;
385 iter_vaddr->flow_rule =
386 esw_fdb_set_vport_rule(esw,
389 iter_vaddr->mc_promisc = true;
391 case MLX5_ACTION_DEL:
394 mlx5_del_flow_rules(iter_vaddr->flow_rule);
395 l2addr_hash_del(iter_vaddr);
401 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
403 struct hlist_head *hash = esw->mc_table;
404 struct esw_mc_addr *esw_mc;
405 u8 *mac = vaddr->node.addr;
406 u16 vport = vaddr->vport;
408 if (!esw->fdb_table.legacy.fdb)
411 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
415 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
419 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
420 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
422 /* Add this multicast mac to all the mc promiscuous vports */
423 update_allmulti_vports(esw, vaddr, esw_mc);
426 /* If the multicast mac is added as a result of mc promiscuous vport,
427 * don't increment the multicast ref count
429 if (!vaddr->mc_promisc)
432 /* Forward MC MAC to vport */
433 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
435 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
436 vport, mac, vaddr->flow_rule,
437 esw_mc->refcnt, esw_mc->uplink_rule);
441 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
443 struct hlist_head *hash = esw->mc_table;
444 struct esw_mc_addr *esw_mc;
445 u8 *mac = vaddr->node.addr;
446 u16 vport = vaddr->vport;
448 if (!esw->fdb_table.legacy.fdb)
451 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
454 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
459 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
460 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
461 esw_mc->uplink_rule);
463 if (vaddr->flow_rule)
464 mlx5_del_flow_rules(vaddr->flow_rule);
465 vaddr->flow_rule = NULL;
467 /* If the multicast mac is added as a result of mc promiscuous vport,
468 * don't decrement the multicast ref count.
470 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
473 /* Remove this multicast mac from all the mc promiscuous vports */
474 update_allmulti_vports(esw, vaddr, esw_mc);
476 if (esw_mc->uplink_rule)
477 mlx5_del_flow_rules(esw_mc->uplink_rule);
479 l2addr_hash_del(esw_mc);
483 /* Apply vport UC/MC list to HW l2 table and FDB table */
484 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
485 struct mlx5_vport *vport, int list_type)
487 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
488 vport_addr_action vport_addr_add;
489 vport_addr_action vport_addr_del;
490 struct vport_addr *addr;
491 struct l2addr_node *node;
492 struct hlist_head *hash;
493 struct hlist_node *tmp;
496 vport_addr_add = is_uc ? esw_add_uc_addr :
498 vport_addr_del = is_uc ? esw_del_uc_addr :
501 hash = is_uc ? vport->uc_list : vport->mc_list;
502 for_each_l2hash_node(node, tmp, hash, hi) {
503 addr = container_of(node, struct vport_addr, node);
504 switch (addr->action) {
505 case MLX5_ACTION_ADD:
506 vport_addr_add(esw, addr);
507 addr->action = MLX5_ACTION_NONE;
509 case MLX5_ACTION_DEL:
510 vport_addr_del(esw, addr);
511 l2addr_hash_del(addr);
517 /* Sync vport UC/MC list from vport context */
518 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
519 struct mlx5_vport *vport, int list_type)
521 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
522 u8 (*mac_list)[ETH_ALEN];
523 struct l2addr_node *node;
524 struct vport_addr *addr;
525 struct hlist_head *hash;
526 struct hlist_node *tmp;
532 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
533 MLX5_MAX_MC_PER_VPORT(esw->dev);
535 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
539 hash = is_uc ? vport->uc_list : vport->mc_list;
541 for_each_l2hash_node(node, tmp, hash, hi) {
542 addr = container_of(node, struct vport_addr, node);
543 addr->action = MLX5_ACTION_DEL;
549 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
553 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
554 vport->vport, is_uc ? "UC" : "MC", size);
556 for (i = 0; i < size; i++) {
557 if (is_uc && !is_valid_ether_addr(mac_list[i]))
560 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
563 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
565 addr->action = MLX5_ACTION_NONE;
566 /* If this mac was previously added because of allmulti
567 * promiscuous rx mode, its now converted to be original
570 if (addr->mc_promisc) {
571 struct esw_mc_addr *esw_mc =
572 l2addr_hash_find(esw->mc_table,
577 "Failed to MAC(%pM) in mcast DB\n",
582 addr->mc_promisc = false;
587 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
591 "Failed to add MAC(%pM) to vport[%d] DB\n",
592 mac_list[i], vport->vport);
595 addr->vport = vport->vport;
596 addr->action = MLX5_ACTION_ADD;
602 /* Sync vport UC/MC list from vport context
603 * Must be called after esw_update_vport_addr_list
605 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
606 struct mlx5_vport *vport)
608 struct l2addr_node *node;
609 struct vport_addr *addr;
610 struct hlist_head *hash;
611 struct hlist_node *tmp;
614 hash = vport->mc_list;
616 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
617 u8 *mac = node->addr;
619 addr = l2addr_hash_find(hash, mac, struct vport_addr);
621 if (addr->action == MLX5_ACTION_DEL)
622 addr->action = MLX5_ACTION_NONE;
625 addr = l2addr_hash_add(hash, mac, struct vport_addr,
629 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
633 addr->vport = vport->vport;
634 addr->action = MLX5_ACTION_ADD;
635 addr->mc_promisc = true;
639 /* Apply vport rx mode to HW FDB table */
640 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
641 struct mlx5_vport *vport,
642 bool promisc, bool mc_promisc)
644 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
646 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
650 vport->allmulti_rule =
651 esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
652 if (!allmulti_addr->uplink_rule)
653 allmulti_addr->uplink_rule =
654 esw_fdb_set_vport_allmulti_rule(esw,
656 allmulti_addr->refcnt++;
657 } else if (vport->allmulti_rule) {
658 mlx5_del_flow_rules(vport->allmulti_rule);
659 vport->allmulti_rule = NULL;
661 if (--allmulti_addr->refcnt > 0)
664 if (allmulti_addr->uplink_rule)
665 mlx5_del_flow_rules(allmulti_addr->uplink_rule);
666 allmulti_addr->uplink_rule = NULL;
670 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
674 vport->promisc_rule =
675 esw_fdb_set_vport_promisc_rule(esw, vport->vport);
676 } else if (vport->promisc_rule) {
677 mlx5_del_flow_rules(vport->promisc_rule);
678 vport->promisc_rule = NULL;
682 /* Sync vport rx mode from vport context */
683 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
684 struct mlx5_vport *vport)
691 err = mlx5_query_nic_vport_promisc(esw->dev,
698 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
699 vport->vport, promisc_all, promisc_mc);
701 if (!vport->info.trusted || !vport->enabled) {
707 esw_apply_vport_rx_mode(esw, vport, promisc_all,
708 (promisc_all || promisc_mc));
711 void esw_vport_change_handle_locked(struct mlx5_vport *vport)
713 struct mlx5_core_dev *dev = vport->dev;
714 struct mlx5_eswitch *esw = dev->priv.eswitch;
717 if (!MLX5_CAP_GEN(dev, log_max_l2_table))
720 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
721 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
724 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
725 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
726 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
729 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
730 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
732 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
733 esw_update_vport_rx_mode(esw, vport);
734 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
735 esw_update_vport_mc_promisc(esw, vport);
738 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
739 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
741 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
743 arm_vport_context_events_cmd(dev, vport->vport,
744 vport->enabled_events);
747 static void esw_vport_change_handler(struct work_struct *work)
749 struct mlx5_vport *vport =
750 container_of(work, struct mlx5_vport, vport_change_handler);
751 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
753 mutex_lock(&esw->state_lock);
754 esw_vport_change_handle_locked(vport);
755 mutex_unlock(&esw->state_lock);
758 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
760 ((u8 *)node_guid)[7] = mac[0];
761 ((u8 *)node_guid)[6] = mac[1];
762 ((u8 *)node_guid)[5] = mac[2];
763 ((u8 *)node_guid)[4] = 0xff;
764 ((u8 *)node_guid)[3] = 0xfe;
765 ((u8 *)node_guid)[2] = mac[3];
766 ((u8 *)node_guid)[1] = mac[4];
767 ((u8 *)node_guid)[0] = mac[5];
770 static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
771 struct mlx5_vport *vport)
773 if (esw->mode == MLX5_ESWITCH_LEGACY)
774 return esw_legacy_vport_acl_setup(esw, vport);
776 return esw_vport_create_offloads_acl_tables(esw, vport);
779 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
780 struct mlx5_vport *vport)
782 if (esw->mode == MLX5_ESWITCH_LEGACY)
783 esw_legacy_vport_acl_cleanup(esw, vport);
785 esw_vport_destroy_offloads_acl_tables(esw, vport);
788 static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
790 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
795 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
798 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
802 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
807 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
808 vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
810 memset(query_ctx, 0, query_out_sz);
811 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
816 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
817 vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
823 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
825 bool vst_mode_steering = esw_vst_mode_is_steering(esw);
826 u16 vport_num = vport->vport;
830 err = esw_vport_setup_acl(esw, vport);
834 if (mlx5_esw_is_manager_vport(esw, vport_num))
837 err = mlx5_esw_vport_caps_get(esw, vport);
841 mlx5_modify_vport_admin_state(esw->dev,
842 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
844 vport->info.link_state);
846 /* Host PF has its own mac/guid. */
848 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
850 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
851 vport->info.node_guid);
854 flags = (vport->info.vlan || vport->info.qos) ?
855 SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
856 if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering)
857 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
858 vport->info.qos, flags);
863 esw_vport_cleanup_acl(esw, vport);
867 /* Don't cleanup vport->info, it's needed to restore vport configuration */
868 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
870 u16 vport_num = vport->vport;
872 if (!mlx5_esw_is_manager_vport(esw, vport_num))
873 mlx5_modify_vport_admin_state(esw->dev,
874 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
876 MLX5_VPORT_ADMIN_STATE_DOWN);
878 mlx5_esw_qos_vport_disable(esw, vport);
879 esw_vport_cleanup_acl(esw, vport);
882 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
883 enum mlx5_eswitch_vport_event enabled_events)
885 struct mlx5_vport *vport;
888 vport = mlx5_eswitch_get_vport(esw, vport_num);
890 return PTR_ERR(vport);
892 mutex_lock(&esw->state_lock);
893 WARN_ON(vport->enabled);
895 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
897 ret = esw_vport_setup(esw, vport);
901 /* Sync with current vport context */
902 vport->enabled_events = enabled_events;
903 vport->enabled = true;
905 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
906 * in smartNIC as it's a vport group manager.
908 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
909 (!vport_num && mlx5_core_is_ecpf(esw->dev)))
910 vport->info.trusted = true;
912 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
913 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
914 ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
916 goto err_vhca_mapping;
919 /* External controller host PF has factory programmed MAC.
920 * Read it from the device.
922 if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
923 mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
925 esw_vport_change_handle_locked(vport);
927 esw->enabled_vports++;
928 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
930 mutex_unlock(&esw->state_lock);
934 esw_vport_cleanup(esw, vport);
935 mutex_unlock(&esw->state_lock);
939 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
941 struct mlx5_vport *vport;
943 vport = mlx5_eswitch_get_vport(esw, vport_num);
947 mutex_lock(&esw->state_lock);
951 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
952 /* Mark this vport as disabled to discard new events */
953 vport->enabled = false;
955 /* Disable events from this vport */
956 if (MLX5_CAP_GEN(esw->dev, log_max_l2_table))
957 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
959 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
960 MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
961 mlx5_esw_vport_vhca_id_clear(esw, vport_num);
963 /* We don't assume VFs will cleanup after themselves.
964 * Calling vport change handler while vport is disabled will cleanup
965 * the vport resources.
967 esw_vport_change_handle_locked(vport);
968 vport->enabled_events = 0;
969 esw_apply_vport_rx_mode(esw, vport, false, false);
970 esw_vport_cleanup(esw, vport);
971 esw->enabled_vports--;
974 mutex_unlock(&esw->state_lock);
977 static int eswitch_vport_event(struct notifier_block *nb,
978 unsigned long type, void *data)
980 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
981 struct mlx5_eqe *eqe = data;
982 struct mlx5_vport *vport;
985 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
986 vport = mlx5_eswitch_get_vport(esw, vport_num);
988 queue_work(esw->work_queue, &vport->vport_change_handler);
993 * mlx5_esw_query_functions - Returns raw output about functions state
994 * @dev: Pointer to device to query
996 * mlx5_esw_query_functions() allocates and returns functions changed
997 * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
998 * Caller must free the memory using kvfree() when valid pointer is returned.
1000 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1002 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
1003 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
1007 out = kvzalloc(outlen, GFP_KERNEL);
1009 return ERR_PTR(-ENOMEM);
1011 MLX5_SET(query_esw_functions_in, in, opcode,
1012 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1014 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
1019 return ERR_PTR(err);
1022 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
1024 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1025 mlx5_eq_notifier_register(esw->dev, &esw->nb);
1027 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
1028 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1029 ESW_FUNCTIONS_CHANGED);
1030 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1034 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
1036 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
1037 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1039 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1041 flush_workqueue(esw->work_queue);
1044 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
1046 struct mlx5_vport *vport;
1049 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1050 memset(&vport->qos, 0, sizeof(vport->qos));
1051 memset(&vport->info, 0, sizeof(vport->info));
1052 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1056 static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
1058 struct mlx5_vport *vport;
1061 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
1062 memset(&vport->qos, 0, sizeof(vport->qos));
1063 memset(&vport->info, 0, sizeof(vport->info));
1064 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1068 /* Public E-Switch API */
1069 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
1070 enum mlx5_eswitch_vport_event enabled_events)
1074 err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
1078 err = esw_offloads_load_rep(esw, vport_num);
1085 mlx5_esw_vport_disable(esw, vport_num);
1089 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
1091 esw_offloads_unload_rep(esw, vport_num);
1092 mlx5_esw_vport_disable(esw, vport_num);
1095 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
1097 struct mlx5_vport *vport;
1100 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1101 if (!vport->enabled)
1103 mlx5_eswitch_unload_vport(esw, vport->vport);
1107 static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
1110 struct mlx5_vport *vport;
1113 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
1114 if (!vport->enabled)
1116 mlx5_eswitch_unload_vport(esw, vport->vport);
1120 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
1121 enum mlx5_eswitch_vport_event enabled_events)
1123 struct mlx5_vport *vport;
1127 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1128 err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
1136 mlx5_eswitch_unload_vf_vports(esw, num_vfs);
1140 static int mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch *esw, u16 num_ec_vfs,
1141 enum mlx5_eswitch_vport_event enabled_events)
1143 struct mlx5_vport *vport;
1147 mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
1148 err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
1156 mlx5_eswitch_unload_ec_vf_vports(esw, num_ec_vfs);
1160 static int host_pf_enable_hca(struct mlx5_core_dev *dev)
1162 if (!mlx5_core_is_ecpf(dev))
1165 /* Once vport and representor are ready, take out the external host PF
1166 * out of initializing state. Enabling HCA clears the iser->initializing
1167 * bit and host PF driver loading can progress.
1169 return mlx5_cmd_host_pf_enable_hca(dev);
1172 static void host_pf_disable_hca(struct mlx5_core_dev *dev)
1174 if (!mlx5_core_is_ecpf(dev))
1177 mlx5_cmd_host_pf_disable_hca(dev);
1180 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
1181 * whichever are present on the eswitch.
1184 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1185 enum mlx5_eswitch_vport_event enabled_events)
1189 /* Enable PF vport */
1190 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
1194 /* Enable external host PF HCA */
1195 ret = host_pf_enable_hca(esw->dev);
1199 /* Enable ECPF vport */
1200 if (mlx5_ecpf_vport_exists(esw->dev)) {
1201 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
1204 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1205 ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
1212 /* Enable VF vports */
1213 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
1220 if (mlx5_core_ec_sriov_enabled(esw->dev))
1221 mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
1223 if (mlx5_ecpf_vport_exists(esw->dev))
1224 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1226 host_pf_disable_hca(esw->dev);
1228 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1232 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
1233 * whichever are previously enabled on the eswitch.
1235 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1237 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1239 if (mlx5_ecpf_vport_exists(esw->dev)) {
1240 if (mlx5_core_ec_sriov_enabled(esw->dev))
1241 mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
1242 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1245 host_pf_disable_hca(esw->dev);
1246 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1249 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
1251 struct devlink *devlink = priv_to_devlink(esw->dev);
1252 union devlink_param_value val;
1255 err = devl_param_driverinit_value_get(devlink,
1256 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
1259 esw->params.large_group_num = val.vu32;
1262 "Devlink can't get param fdb_large_groups, uses default (%d).\n",
1263 ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
1264 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
1269 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
1276 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1277 esw->esw_funcs.num_vfs = num_vfs;
1281 out = mlx5_esw_query_functions(esw->dev);
1285 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
1286 host_params_context.host_num_of_vfs);
1287 if (mlx5_core_ec_sriov_enabled(esw->dev))
1288 esw->esw_funcs.num_ec_vfs = num_vfs;
1293 static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
1295 struct mlx5_esw_event_info info = {};
1297 info.new_mode = mode;
1299 blocking_notifier_call_chain(&esw->n_head, 0, &info);
1302 static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
1304 struct mlx5_core_dev *dev = esw->dev;
1308 if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
1311 total_vports = mlx5_eswitch_get_total_vports(dev);
1313 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
1314 err = mlx5_fs_egress_acls_init(dev, total_vports);
1318 esw_warn(dev, "egress ACL is not supported by FW\n");
1321 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
1322 err = mlx5_fs_ingress_acls_init(dev, total_vports);
1326 esw_warn(dev, "ingress ACL is not supported by FW\n");
1328 esw->flags |= MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
1332 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1333 mlx5_fs_egress_acls_cleanup(dev);
1337 static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
1339 struct mlx5_core_dev *dev = esw->dev;
1341 esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
1342 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1343 mlx5_fs_ingress_acls_cleanup(dev);
1344 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1345 mlx5_fs_egress_acls_cleanup(dev);
1349 * mlx5_eswitch_enable_locked - Enable eswitch
1350 * @esw: Pointer to eswitch
1351 * @num_vfs: Enable eswitch for given number of VFs. This is optional.
1352 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
1353 * Caller should pass num_vfs > 0 when enabling eswitch for
1354 * vf vports. Caller should pass num_vfs = 0, when eswitch
1355 * is enabled without sriov VFs or when caller
1356 * is unaware of the sriov state of the host PF on ECPF based
1357 * eswitch. Caller should pass < 0 when num_vfs should be
1358 * completely ignored. This is typically the case when eswitch
1359 * is enabled without sriov regardless of PF/ECPF system.
1360 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
1361 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
1362 * It returns 0 on success or error code on failure.
1364 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
1368 lockdep_assert_held(&esw->mode_lock);
1370 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1371 esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
1375 mlx5_eswitch_get_devlink_param(esw);
1377 err = mlx5_esw_acls_ns_init(esw);
1381 mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
1383 if (esw->mode == MLX5_ESWITCH_LEGACY) {
1384 err = esw_legacy_enable(esw);
1386 mlx5_rescan_drivers(esw->dev);
1387 err = esw_offloads_enable(esw);
1393 esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
1395 mlx5_eswitch_event_handlers_register(esw);
1397 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
1398 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1399 esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
1401 mlx5_esw_mode_change_notify(esw, esw->mode);
1406 mlx5_esw_acls_ns_cleanup(esw);
1411 * mlx5_eswitch_enable - Enable eswitch
1412 * @esw: Pointer to eswitch
1413 * @num_vfs: Enable eswitch switch for given number of VFs.
1414 * Caller must pass num_vfs > 0 when enabling eswitch for
1416 * mlx5_eswitch_enable() returns 0 on success or error code on failure.
1418 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
1423 if (!mlx5_esw_allowed(esw))
1426 devl_assert_locked(priv_to_devlink(esw->dev));
1428 toggle_lag = !mlx5_esw_is_fdb_created(esw);
1431 mlx5_lag_disable_change(esw->dev);
1433 down_write(&esw->mode_lock);
1434 if (!mlx5_esw_is_fdb_created(esw)) {
1435 ret = mlx5_eswitch_enable_locked(esw, num_vfs);
1437 enum mlx5_eswitch_vport_event vport_events;
1439 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
1440 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
1441 /* If this is the ECPF the number of host VFs is managed via the
1442 * eswitch function change event handler, and any num_vfs provided
1443 * here are intended to be EC VFs.
1445 if (!mlx5_core_is_ecpf(esw->dev)) {
1446 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
1448 esw->esw_funcs.num_vfs = num_vfs;
1449 } else if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1450 ret = mlx5_eswitch_load_ec_vf_vports(esw, num_vfs, vport_events);
1452 esw->esw_funcs.num_ec_vfs = num_vfs;
1456 up_write(&esw->mode_lock);
1459 mlx5_lag_enable_change(esw->dev);
1464 /* When disabling sriov, free driver level resources. */
1465 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
1467 if (!mlx5_esw_allowed(esw))
1470 devl_assert_locked(priv_to_devlink(esw->dev));
1471 down_write(&esw->mode_lock);
1472 /* If driver is unloaded, this function is called twice by remove_one()
1473 * and mlx5_unload(). Prevent the second call.
1475 if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
1478 esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
1479 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1480 esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
1482 if (!mlx5_core_is_ecpf(esw->dev)) {
1483 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1485 mlx5_eswitch_clear_vf_vports_info(esw);
1486 } else if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1487 mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
1489 mlx5_eswitch_clear_ec_vf_vports_info(esw);
1492 if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
1493 struct devlink *devlink = priv_to_devlink(esw->dev);
1495 devl_rate_nodes_destroy(devlink);
1497 /* Destroy legacy fdb when disabling sriov in legacy mode. */
1498 if (esw->mode == MLX5_ESWITCH_LEGACY)
1499 mlx5_eswitch_disable_locked(esw);
1501 if (!mlx5_core_is_ecpf(esw->dev))
1502 esw->esw_funcs.num_vfs = 0;
1504 esw->esw_funcs.num_ec_vfs = 0;
1507 up_write(&esw->mode_lock);
1510 /* Free resources for corresponding eswitch mode. It is called by devlink
1511 * when changing eswitch mode or modprobe when unloading driver.
1513 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
1515 struct devlink *devlink = priv_to_devlink(esw->dev);
1517 /* Notify eswitch users that it is exiting from current mode.
1518 * So that it can do necessary cleanup before the eswitch is disabled.
1520 mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
1522 mlx5_eswitch_event_handlers_unregister(esw);
1524 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
1525 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1526 esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
1528 if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) {
1529 esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
1530 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1531 esw_offloads_disable(esw);
1532 else if (esw->mode == MLX5_ESWITCH_LEGACY)
1533 esw_legacy_disable(esw);
1534 mlx5_esw_acls_ns_cleanup(esw);
1537 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1538 devl_rate_nodes_destroy(devlink);
1541 void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
1543 if (!mlx5_esw_allowed(esw))
1546 devl_assert_locked(priv_to_devlink(esw->dev));
1547 mlx5_lag_disable_change(esw->dev);
1548 down_write(&esw->mode_lock);
1549 mlx5_eswitch_disable_locked(esw);
1550 esw->mode = MLX5_ESWITCH_LEGACY;
1551 up_write(&esw->mode_lock);
1552 mlx5_lag_enable_change(esw->dev);
1555 static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
1557 u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
1558 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
1560 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1561 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
1562 MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
1563 MLX5_SET(query_hca_cap_in, in, other_function, true);
1564 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
1567 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
1570 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
1575 if (!mlx5_core_is_ecpf(dev)) {
1580 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
1584 err = mlx5_query_hca_cap_host_pf(dev, query_ctx);
1588 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
1589 *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf);
1590 *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id);
1597 static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
1598 int index, u16 vport_num)
1600 struct mlx5_vport *vport;
1603 vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1607 vport->dev = esw->dev;
1608 vport->vport = vport_num;
1609 vport->index = index;
1610 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1611 INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
1612 err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
1616 esw->total_vports++;
1624 static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1626 xa_erase(&esw->vports, vport->vport);
1630 static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
1632 struct mlx5_vport *vport;
1635 mlx5_esw_for_each_vport(esw, i, vport)
1636 mlx5_esw_vport_free(esw, vport);
1637 xa_destroy(&esw->vports);
1640 static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
1642 struct mlx5_core_dev *dev = esw->dev;
1643 u16 max_host_pf_sfs;
1649 xa_init(&esw->vports);
1651 err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
1654 if (esw->first_host_vport == MLX5_VPORT_PF)
1655 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1658 for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
1659 err = mlx5_esw_vport_alloc(esw, idx, idx);
1662 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
1663 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1666 base_sf_num = mlx5_sf_start_function_id(dev);
1667 for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
1668 err = mlx5_esw_vport_alloc(esw, idx, base_sf_num + i);
1671 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1675 err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num);
1678 for (i = 0; i < max_host_pf_sfs; i++) {
1679 err = mlx5_esw_vport_alloc(esw, idx, base_sf_num + i);
1682 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1686 if (mlx5_core_ec_sriov_enabled(esw->dev)) {
1687 int ec_vf_base_num = mlx5_core_ec_vf_vport_base(dev);
1689 for (i = 0; i < mlx5_core_max_ec_vfs(esw->dev); i++) {
1690 err = mlx5_esw_vport_alloc(esw, idx, ec_vf_base_num + i);
1697 if (mlx5_ecpf_vport_exists(dev) ||
1698 mlx5_core_is_ecpf_esw_manager(dev)) {
1699 err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_ECPF);
1704 err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_UPLINK);
1710 mlx5_esw_vports_cleanup(esw);
1714 static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
1715 struct devlink_param_gset_ctx *ctx)
1717 struct mlx5_core_dev *dev = devlink_priv(devlink);
1719 if (!MLX5_ESWITCH_MANAGER(dev))
1723 return mlx5_lag_mpesw_enable(dev);
1725 mlx5_lag_mpesw_disable(dev);
1729 static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
1730 struct devlink_param_gset_ctx *ctx)
1732 struct mlx5_core_dev *dev = devlink_priv(devlink);
1734 ctx->val.vbool = mlx5_lag_is_mpesw(dev);
1738 static const struct devlink_param mlx5_eswitch_params[] = {
1739 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
1740 "esw_multiport", DEVLINK_PARAM_TYPE_BOOL,
1741 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1742 mlx5_devlink_esw_multiport_get,
1743 mlx5_devlink_esw_multiport_set, NULL),
1746 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1748 struct mlx5_eswitch *esw;
1751 if (!MLX5_VPORT_MANAGER(dev) && !MLX5_ESWITCH_MANAGER(dev))
1754 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1758 err = devl_params_register(priv_to_devlink(dev), mlx5_eswitch_params,
1759 ARRAY_SIZE(mlx5_eswitch_params));
1764 esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1765 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
1767 esw->debugfs_root = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(dev));
1768 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1769 if (!esw->work_queue) {
1774 err = mlx5_esw_vports_init(esw);
1778 err = esw_offloads_init(esw);
1782 mutex_init(&esw->offloads.encap_tbl_lock);
1783 hash_init(esw->offloads.encap_tbl);
1784 mutex_init(&esw->offloads.decap_tbl_lock);
1785 hash_init(esw->offloads.decap_tbl);
1786 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
1787 atomic64_set(&esw->offloads.num_flows, 0);
1788 ida_init(&esw->offloads.vport_metadata_ida);
1789 xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
1790 mutex_init(&esw->state_lock);
1791 init_rwsem(&esw->mode_lock);
1792 refcount_set(&esw->qos.refcnt, 0);
1794 esw->enabled_vports = 0;
1795 esw->mode = MLX5_ESWITCH_LEGACY;
1796 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1797 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
1798 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
1799 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
1801 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
1802 if (MLX5_ESWITCH_MANAGER(dev) &&
1803 mlx5_esw_vport_match_metadata_supported(esw))
1804 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
1806 dev->priv.eswitch = esw;
1807 BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
1810 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1812 MLX5_MAX_UC_PER_VPORT(dev),
1813 MLX5_MAX_MC_PER_VPORT(dev));
1817 mlx5_esw_vports_cleanup(esw);
1819 if (esw->work_queue)
1820 destroy_workqueue(esw->work_queue);
1821 debugfs_remove_recursive(esw->debugfs_root);
1822 devl_params_unregister(priv_to_devlink(dev), mlx5_eswitch_params,
1823 ARRAY_SIZE(mlx5_eswitch_params));
1829 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1834 esw_info(esw->dev, "cleanup\n");
1836 esw->dev->priv.eswitch = NULL;
1837 destroy_workqueue(esw->work_queue);
1838 WARN_ON(refcount_read(&esw->qos.refcnt));
1839 mutex_destroy(&esw->state_lock);
1840 WARN_ON(!xa_empty(&esw->offloads.vhca_map));
1841 xa_destroy(&esw->offloads.vhca_map);
1842 ida_destroy(&esw->offloads.vport_metadata_ida);
1843 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
1844 mutex_destroy(&esw->offloads.encap_tbl_lock);
1845 mutex_destroy(&esw->offloads.decap_tbl_lock);
1846 esw_offloads_cleanup(esw);
1847 mlx5_esw_vports_cleanup(esw);
1848 debugfs_remove_recursive(esw->debugfs_root);
1849 devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
1850 ARRAY_SIZE(mlx5_eswitch_params));
1854 /* Vport Administration */
1856 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw,
1857 struct mlx5_vport *evport, const u8 *mac)
1859 u16 vport_num = evport->vport;
1863 if (is_multicast_ether_addr(mac))
1866 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1867 mlx5_core_warn(esw->dev,
1868 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1871 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac);
1873 mlx5_core_warn(esw->dev,
1874 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1879 node_guid_gen_from_mac(&node_guid, mac);
1880 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid);
1882 mlx5_core_warn(esw->dev,
1883 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1886 ether_addr_copy(evport->info.mac, mac);
1887 evport->info.node_guid = node_guid;
1888 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
1889 err = esw_acl_ingress_lgcy_setup(esw, evport);
1894 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1895 u16 vport, const u8 *mac)
1897 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1901 return PTR_ERR(evport);
1903 mutex_lock(&esw->state_lock);
1904 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac);
1905 mutex_unlock(&esw->state_lock);
1909 static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
1911 return xa_get_mark(&esw->vports, vport_num, mark);
1914 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1916 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
1919 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1921 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
1924 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1925 u16 vport, int link_state)
1927 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1928 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1929 int other_vport = 1;
1932 if (!mlx5_esw_allowed(esw))
1935 return PTR_ERR(evport);
1937 if (vport == MLX5_VPORT_UPLINK) {
1938 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
1942 mutex_lock(&esw->state_lock);
1943 if (esw->mode != MLX5_ESWITCH_LEGACY) {
1948 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
1950 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
1955 evport->info.link_state = link_state;
1958 mutex_unlock(&esw->state_lock);
1962 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1963 u16 vport, struct ifla_vf_info *ivi)
1965 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1968 return PTR_ERR(evport);
1970 memset(ivi, 0, sizeof(*ivi));
1971 ivi->vf = vport - 1;
1973 mutex_lock(&esw->state_lock);
1974 ether_addr_copy(ivi->mac, evport->info.mac);
1975 ivi->linkstate = evport->info.link_state;
1976 ivi->vlan = evport->info.vlan;
1977 ivi->qos = evport->info.qos;
1978 ivi->spoofchk = evport->info.spoofchk;
1979 ivi->trusted = evport->info.trusted;
1980 if (evport->qos.enabled) {
1981 ivi->min_tx_rate = evport->qos.min_rate;
1982 ivi->max_tx_rate = evport->qos.max_rate;
1984 mutex_unlock(&esw->state_lock);
1989 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1990 u16 vport, u16 vlan, u8 qos, u8 set_flags)
1992 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1993 bool vst_mode_steering = esw_vst_mode_is_steering(esw);
1997 return PTR_ERR(evport);
1998 if (vlan > 4095 || qos > 7)
2001 if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) {
2002 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
2007 evport->info.vlan = vlan;
2008 evport->info.qos = qos;
2009 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
2010 err = esw_acl_ingress_lgcy_setup(esw, evport);
2013 err = esw_acl_egress_lgcy_setup(esw, evport);
2019 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2021 struct ifla_vf_stats *vf_stats)
2023 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
2024 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2025 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
2026 struct mlx5_vport_drop_stats stats = {};
2031 return PTR_ERR(vport);
2033 out = kvzalloc(outlen, GFP_KERNEL);
2037 MLX5_SET(query_vport_counter_in, in, opcode,
2038 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2039 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2040 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
2041 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2043 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
2047 #define MLX5_GET_CTR(p, x) \
2048 MLX5_GET64(query_vport_counter_out, p, x)
2050 memset(vf_stats, 0, sizeof(*vf_stats));
2051 vf_stats->rx_packets =
2052 MLX5_GET_CTR(out, received_eth_unicast.packets) +
2053 MLX5_GET_CTR(out, received_ib_unicast.packets) +
2054 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2055 MLX5_GET_CTR(out, received_ib_multicast.packets) +
2056 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2058 vf_stats->rx_bytes =
2059 MLX5_GET_CTR(out, received_eth_unicast.octets) +
2060 MLX5_GET_CTR(out, received_ib_unicast.octets) +
2061 MLX5_GET_CTR(out, received_eth_multicast.octets) +
2062 MLX5_GET_CTR(out, received_ib_multicast.octets) +
2063 MLX5_GET_CTR(out, received_eth_broadcast.octets);
2065 vf_stats->tx_packets =
2066 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2067 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
2068 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2069 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
2070 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2072 vf_stats->tx_bytes =
2073 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2074 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
2075 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2076 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
2077 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2079 vf_stats->multicast =
2080 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2081 MLX5_GET_CTR(out, received_ib_multicast.packets);
2083 vf_stats->broadcast =
2084 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2086 err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats);
2089 vf_stats->rx_dropped = stats.rx_dropped;
2090 vf_stats->tx_dropped = stats.tx_dropped;
2097 u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
2099 struct mlx5_eswitch *esw = dev->priv.eswitch;
2101 return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_LEGACY;
2103 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2105 enum devlink_eswitch_encap_mode
2106 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
2108 struct mlx5_eswitch *esw;
2110 esw = dev->priv.eswitch;
2111 return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap :
2112 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2114 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
2116 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2117 struct mlx5_core_dev *dev1)
2119 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2120 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
2123 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
2125 return blocking_notifier_chain_register(&esw->n_head, nb);
2128 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
2130 blocking_notifier_chain_unregister(&esw->n_head, nb);
2134 * mlx5_esw_hold() - Try to take a read lock on esw mode lock.
2135 * @mdev: mlx5 core device.
2137 * Should be called by esw resources callers.
2139 * Return: true on success or false.
2141 bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
2143 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2145 /* e.g. VF doesn't have eswitch so nothing to do */
2146 if (!mlx5_esw_allowed(esw))
2149 if (down_read_trylock(&esw->mode_lock) != 0)
2156 * mlx5_esw_release() - Release a read lock on esw mode lock.
2157 * @mdev: mlx5 core device.
2159 void mlx5_esw_release(struct mlx5_core_dev *mdev)
2161 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2163 if (mlx5_esw_allowed(esw))
2164 up_read(&esw->mode_lock);
2168 * mlx5_esw_get() - Increase esw user count.
2169 * @mdev: mlx5 core device.
2171 void mlx5_esw_get(struct mlx5_core_dev *mdev)
2173 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2175 if (mlx5_esw_allowed(esw))
2176 atomic64_inc(&esw->user_count);
2180 * mlx5_esw_put() - Decrease esw user count.
2181 * @mdev: mlx5 core device.
2183 void mlx5_esw_put(struct mlx5_core_dev *mdev)
2185 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2187 if (mlx5_esw_allowed(esw))
2188 atomic64_dec_if_positive(&esw->user_count);
2192 * mlx5_esw_try_lock() - Take a write lock on esw mode lock.
2193 * @esw: eswitch device.
2195 * Should be called by esw mode change routine.
2198 * * 0 - esw mode if successfully locked and refcount is 0.
2199 * * -EBUSY - refcount is not 0.
2200 * * -EINVAL - In the middle of switching mode or lock is already held.
2202 int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
2204 if (down_write_trylock(&esw->mode_lock) == 0)
2207 if (atomic64_read(&esw->user_count) > 0) {
2208 up_write(&esw->mode_lock);
2216 * mlx5_esw_unlock() - Release write lock on esw mode lock
2217 * @esw: eswitch device.
2219 void mlx5_esw_unlock(struct mlx5_eswitch *esw)
2221 up_write(&esw->mode_lock);
2225 * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
2227 * @dev: Pointer to core device
2229 * mlx5_eswitch_get_total_vports returns total number of eswitch vports.
2231 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
2233 struct mlx5_eswitch *esw;
2235 esw = dev->priv.eswitch;
2236 return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
2238 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
2241 * mlx5_eswitch_get_core_dev - Get the mdev device
2242 * @esw : eswitch device.
2244 * Return the mellanox core device which manages the eswitch.
2246 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
2248 return mlx5_esw_allowed(esw) ? esw->dev : NULL;
2250 EXPORT_SYMBOL(mlx5_eswitch_get_core_dev);