2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <linux/xarray.h>
40 #include <net/devlink.h>
41 #include <linux/mlx5/device.h>
42 #include <linux/mlx5/eswitch.h>
43 #include <linux/mlx5/vport.h>
44 #include <linux/mlx5/fs.h>
46 #include "lib/fs_chains.h"
49 #include "en/tc/sample.h"
51 enum mlx5_mapped_obj_type {
52 MLX5_MAPPED_OBJ_CHAIN,
53 MLX5_MAPPED_OBJ_SAMPLE,
54 MLX5_MAPPED_OBJ_INT_PORT_METADATA,
55 MLX5_MAPPED_OBJ_ACT_MISS,
58 struct mlx5_mapped_obj {
59 enum mlx5_mapped_obj_type type;
69 u32 int_port_metadata;
73 #ifdef CONFIG_MLX5_ESWITCH
75 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
77 #define MLX5_MAX_UC_PER_VPORT(dev) \
78 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
80 #define MLX5_MAX_MC_PER_VPORT(dev) \
81 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
83 #define mlx5_esw_has_fwd_fdb(dev) \
84 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
86 #define esw_chains(esw) \
87 ((esw)->fdb_table.offloads.esw_chains_priv)
92 MAPPING_TYPE_TUNNEL_ENC_OPTS,
95 MAPPING_TYPE_INT_PORT,
98 struct vport_ingress {
99 struct mlx5_flow_table *acl;
100 struct mlx5_flow_handle *allow_rule;
102 struct mlx5_flow_group *allow_spoofchk_only_grp;
103 struct mlx5_flow_group *allow_untagged_spoofchk_grp;
104 struct mlx5_flow_group *allow_untagged_only_grp;
105 struct mlx5_flow_group *drop_grp;
106 struct mlx5_flow_handle *drop_rule;
107 struct mlx5_fc *drop_counter;
110 /* Optional group to add an FTE to do internal priority
111 * tagging on ingress packets.
113 struct mlx5_flow_group *metadata_prio_tag_grp;
114 /* Group to add default match-all FTE entry to tag ingress
115 * packet with metadata.
117 struct mlx5_flow_group *metadata_allmatch_grp;
118 /* Optional group to add a drop all rule */
119 struct mlx5_flow_group *drop_grp;
120 struct mlx5_modify_hdr *modify_metadata;
121 struct mlx5_flow_handle *modify_metadata_rule;
122 struct mlx5_flow_handle *drop_rule;
126 enum vport_egress_acl_type {
127 VPORT_EGRESS_ACL_TYPE_DEFAULT,
128 VPORT_EGRESS_ACL_TYPE_SHARED_FDB,
131 struct vport_egress {
132 struct mlx5_flow_table *acl;
133 enum vport_egress_acl_type type;
134 struct mlx5_flow_handle *allowed_vlan;
135 struct mlx5_flow_group *vlan_grp;
138 struct mlx5_flow_group *drop_grp;
139 struct mlx5_flow_handle *drop_rule;
140 struct mlx5_fc *drop_counter;
143 struct mlx5_flow_group *fwd_grp;
144 struct mlx5_flow_handle *fwd_rule;
145 struct xarray bounce_rules;
146 struct mlx5_flow_group *bounce_grp;
151 struct mlx5_vport_drop_stats {
156 struct mlx5_vport_info {
168 /* Vport context events */
169 enum mlx5_eswitch_vport_event {
170 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
171 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
172 MLX5_VPORT_PROMISC_CHANGE = BIT(3),
176 struct mlx5_core_dev *dev;
177 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
178 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
179 struct mlx5_flow_handle *promisc_rule;
180 struct mlx5_flow_handle *allmulti_rule;
181 struct work_struct vport_change_handler;
183 struct vport_ingress ingress;
184 struct vport_egress egress;
185 u32 default_metadata;
188 struct mlx5_vport_info info;
196 struct mlx5_esw_rate_group *group;
201 enum mlx5_eswitch_vport_event enabled_events;
203 struct devlink_port *dl_port;
206 struct mlx5_esw_indir_table;
208 struct mlx5_eswitch_fdb {
211 struct mlx5_flow_table *fdb;
212 struct mlx5_flow_group *addr_grp;
213 struct mlx5_flow_group *allmulti_grp;
214 struct mlx5_flow_group *promisc_grp;
215 struct mlx5_flow_table *vepa_fdb;
216 struct mlx5_flow_handle *vepa_uplink_rule;
217 struct mlx5_flow_handle *vepa_star_rule;
220 struct offloads_fdb {
221 struct mlx5_flow_namespace *ns;
222 struct mlx5_flow_table *tc_miss_table;
223 struct mlx5_flow_table *slow_fdb;
224 struct mlx5_flow_group *send_to_vport_grp;
225 struct mlx5_flow_group *send_to_vport_meta_grp;
226 struct mlx5_flow_group *peer_miss_grp;
227 struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS];
228 struct mlx5_flow_group *miss_grp;
229 struct mlx5_flow_handle **send_to_vport_meta_rules;
230 struct mlx5_flow_handle *miss_rule_uni;
231 struct mlx5_flow_handle *miss_rule_multi;
233 struct mlx5_fs_chains *esw_chains_priv;
235 DECLARE_HASHTABLE(table, 8);
236 /* Protects vports.table */
240 struct mlx5_esw_indir_table *indir;
247 struct mlx5_esw_offload {
248 struct mlx5_flow_table *ft_offloads_restore;
249 struct mlx5_flow_group *restore_group;
250 struct mlx5_modify_hdr *restore_copy_hdr_id;
251 struct mapping_ctx *reg_c0_obj_pool;
253 struct mlx5_flow_table *ft_offloads;
254 struct mlx5_flow_group *vport_rx_group;
255 struct mlx5_flow_group *vport_rx_drop_group;
256 struct mlx5_flow_handle *vport_rx_drop_rule;
257 struct xarray vport_reps;
258 struct list_head peer_flows[MLX5_MAX_PORTS];
259 struct mutex peer_mutex;
260 struct mutex encap_tbl_lock; /* protects encap_tbl */
261 DECLARE_HASHTABLE(encap_tbl, 8);
262 struct mutex decap_tbl_lock; /* protects decap_tbl */
263 DECLARE_HASHTABLE(decap_tbl, 8);
264 struct mod_hdr_tbl mod_hdr;
265 DECLARE_HASHTABLE(termtbl_tbl, 8);
266 struct mutex termtbl_mutex; /* protects termtbl hash */
267 struct xarray vhca_map;
268 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
270 atomic64_t num_flows;
272 enum devlink_eswitch_encap_mode encap;
273 struct ida vport_metadata_ida;
274 unsigned int host_number; /* ECPF supports one external host */
277 /* E-Switch MC FDB table hash node */
278 struct esw_mc_addr { /* SRIOV only */
279 struct l2addr_node node;
280 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
284 struct mlx5_host_work {
285 struct work_struct work;
286 struct mlx5_eswitch *esw;
289 struct mlx5_esw_functions {
296 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
297 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
298 MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2),
301 struct mlx5_esw_bridge_offloads;
304 MLX5_ESW_FDB_CREATED = BIT(0),
309 struct mlx5_eswitch {
310 struct mlx5_core_dev *dev;
312 struct mlx5_eswitch_fdb fdb_table;
313 /* legacy data structures */
314 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
315 struct esw_mc_addr mc_promisc;
317 struct dentry *debugfs_root;
318 struct workqueue_struct *work_queue;
319 struct xarray vports;
323 /* Synchronize between vport change events
324 * and async SRIOV admin state changes
326 struct mutex state_lock;
328 /* Protects eswitch mode change that occurs via one or more
329 * user commands, i.e. sriov state change, devlink commands.
331 struct rw_semaphore mode_lock;
332 atomic64_t user_count;
336 struct mlx5_esw_rate_group *group0;
337 struct list_head groups; /* Protected by esw->state_lock */
339 /* Protected by esw->state_lock.
340 * Initially 0, meaning no QoS users and QoS is disabled.
345 struct mlx5_esw_bridge_offloads *br_offloads;
346 struct mlx5_esw_offload offloads;
349 u16 first_host_vport;
351 struct mlx5_esw_functions esw_funcs;
355 struct blocking_notifier_head n_head;
356 struct xarray paired;
357 struct mlx5_devcom_comp_dev *devcom;
360 void esw_offloads_disable(struct mlx5_eswitch *esw);
361 int esw_offloads_enable(struct mlx5_eswitch *esw);
362 void esw_offloads_cleanup(struct mlx5_eswitch *esw);
363 int esw_offloads_init(struct mlx5_eswitch *esw);
365 struct mlx5_flow_handle *
366 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
367 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule);
369 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
370 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
371 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
373 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
376 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
377 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
379 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
380 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
381 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
382 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
383 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
384 void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
385 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key);
386 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
387 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
388 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
389 u16 vport, const u8 *mac);
390 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
391 u16 vport, int link_state);
392 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
393 u16 vport, u16 vlan, u8 qos);
394 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
395 u16 vport, bool spoofchk);
396 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
397 u16 vport_num, bool setting);
398 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
399 u32 max_rate, u32 min_rate);
400 int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
401 struct mlx5_vport *vport,
402 struct mlx5_esw_rate_group *group,
403 struct netlink_ext_ack *extack);
404 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
405 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
406 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
407 u16 vport, struct ifla_vf_info *ivi);
408 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
410 struct ifla_vf_stats *vf_stats);
411 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
413 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
414 bool other_vport, void *in);
416 struct mlx5_flow_spec;
417 struct mlx5_esw_flow_attr;
418 struct mlx5_termtbl_handle;
421 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
422 struct mlx5_flow_attr *attr,
423 struct mlx5_flow_act *flow_act,
424 struct mlx5_flow_spec *spec);
426 struct mlx5_flow_handle *
427 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
428 struct mlx5_flow_table *ft,
429 struct mlx5_flow_spec *spec,
430 struct mlx5_esw_flow_attr *attr,
431 struct mlx5_flow_act *flow_act,
432 struct mlx5_flow_destination *dest,
436 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
437 struct mlx5_termtbl_handle *tt);
440 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
442 struct mlx5_flow_handle *
443 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
444 struct mlx5_flow_spec *spec,
445 struct mlx5_flow_attr *attr);
446 struct mlx5_flow_handle *
447 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
448 struct mlx5_flow_spec *spec,
449 struct mlx5_flow_attr *attr);
451 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
452 struct mlx5_flow_handle *rule,
453 struct mlx5_flow_attr *attr);
455 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
456 struct mlx5_flow_handle *rule,
457 struct mlx5_flow_attr *attr);
459 struct mlx5_flow_handle *
460 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
461 struct mlx5_flow_destination *dest);
464 SET_VLAN_STRIP = BIT(0),
465 SET_VLAN_INSERT = BIT(1)
468 enum mlx5_flow_match_level {
469 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE,
470 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2,
471 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP,
472 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP,
475 /* current maximum for flow based vport multicasting */
476 #define MLX5_MAX_FLOW_FWD_VPORTS 32
479 MLX5_ESW_DEST_ENCAP = BIT(0),
480 MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
481 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2),
484 struct mlx5_esw_flow_attr {
485 struct mlx5_eswitch_rep *in_rep;
486 struct mlx5_core_dev *in_mdev;
487 struct mlx5_core_dev *counter_dev;
488 struct mlx5e_tc_int_port *dest_int_port;
489 struct mlx5e_tc_int_port *int_port;
494 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
495 u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
496 u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
500 struct mlx5_eswitch_rep *rep;
501 struct mlx5_pkt_reformat *pkt_reformat;
502 struct mlx5_core_dev *mdev;
503 struct mlx5_termtbl_handle *termtbl;
504 int src_port_rewrite_act_id;
505 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
506 struct mlx5_rx_tun_attr *rx_tun_attr;
508 struct mlx5_pkt_reformat *decap_pkt_reformat;
511 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
512 struct netlink_ext_ack *extack);
513 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
514 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
515 struct netlink_ext_ack *extack);
516 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
517 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
518 enum devlink_eswitch_encap_mode encap,
519 struct netlink_ext_ack *extack);
520 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
521 enum devlink_eswitch_encap_mode *encap);
522 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
523 u8 *hw_addr, int *hw_addr_len,
524 struct netlink_ext_ack *extack);
525 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
526 const u8 *hw_addr, int hw_addr_len,
527 struct netlink_ext_ack *extack);
528 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
529 struct netlink_ext_ack *extack);
530 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
531 struct netlink_ext_ack *extack);
532 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
533 struct netlink_ext_ack *extack);
534 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
535 struct netlink_ext_ack *extack);
536 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
538 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
539 u16 vport, u16 vlan, u8 qos, u8 set_flags);
541 static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
543 return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
544 MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
547 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
550 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
551 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
556 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
557 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
560 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
561 struct mlx5_core_dev *dev1);
563 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
565 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
567 #define esw_info(__dev, format, ...) \
568 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
570 #define esw_warn(__dev, format, ...) \
571 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
573 #define esw_debug(dev, format, ...) \
574 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
576 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
578 return esw && MLX5_ESWITCH_MANAGER(esw->dev);
581 /* The returned number is valid only when the dev is eswitch manager. */
582 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
584 return mlx5_core_is_ecpf_esw_manager(dev) ?
585 MLX5_VPORT_ECPF : MLX5_VPORT_PF;
589 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
591 return esw->manager_vport == vport_num;
594 static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num,
595 u16 esw_owner_vhca_id)
597 return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) ||
598 (vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev));
601 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
603 return mlx5_core_is_ecpf_esw_manager(dev) ?
604 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
607 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
609 return mlx5_core_is_ecpf_esw_manager(dev);
612 static inline unsigned int
613 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
616 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
620 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
622 return dl_port_index & 0xffff;
625 static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw)
627 return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED;
630 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
631 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
633 /* Each mark identifies eswitch vport type.
634 * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
636 * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
637 * MLX5_ESW_VPT_SF identifies SF vport.
639 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
640 #define MLX5_ESW_VPT_VF XA_MARK_1
641 #define MLX5_ESW_VPT_SF XA_MARK_2
643 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
644 * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
647 #define mlx5_esw_for_each_vport(esw, index, vport) \
648 xa_for_each(&((esw)->vports), index, vport)
650 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \
651 for (index = 0, entry = xa_find(xa, &index, last, filter); \
652 entry; entry = xa_find_after(xa, &index, last, filter))
654 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \
655 mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
657 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \
658 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
660 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
661 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
663 /* This macro should only be used if EC SRIOV is enabled.
665 * Because there were no more marks available on the xarray this uses a
666 * for_each_range approach. The range is only valid when EC SRIOV is enabled
668 #define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last) \
669 xa_for_each_range(&((esw)->vports), \
672 MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base), \
673 MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
676 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
677 struct mlx5_vport *__must_check
678 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
680 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
681 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
683 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
686 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
687 enum mlx5_eswitch_vport_event enabled_events);
688 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
690 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
691 enum mlx5_eswitch_vport_event enabled_events);
692 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
695 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
696 struct mlx5_vport *vport);
698 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
699 struct mlx5_vport *vport);
701 struct esw_vport_tbl_namespace {
707 struct mlx5_vport_tbl_attr {
711 struct esw_vport_tbl_namespace *vport_ns;
714 struct mlx5_flow_table *
715 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
717 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
719 struct mlx5_flow_handle *
720 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
722 void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
726 void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
728 struct mlx5_flow_spec *spec);
730 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
731 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
733 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
734 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
736 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
737 enum mlx5_eswitch_vport_event enabled_events);
738 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
740 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
741 enum mlx5_eswitch_vport_event enabled_events);
742 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
744 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
745 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
746 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
748 int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
749 u16 vport_num, u32 controller, u32 sfnum);
750 void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
752 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
753 u16 vport_num, u32 controller, u32 sfnum);
754 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
755 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
757 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
758 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
759 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
762 * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
764 * @new_mode: New mode of eswitch.
766 struct mlx5_esw_event_info {
770 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
771 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
773 bool mlx5_esw_hold(struct mlx5_core_dev *dev);
774 void mlx5_esw_release(struct mlx5_core_dev *dev);
775 void mlx5_esw_get(struct mlx5_core_dev *dev);
776 void mlx5_esw_put(struct mlx5_core_dev *dev);
777 int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
778 void mlx5_esw_unlock(struct mlx5_eswitch *esw);
780 void esw_vport_change_handle_locked(struct mlx5_vport *vport);
782 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
784 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
785 struct mlx5_eswitch *slave_esw, int max_slaves);
786 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
787 struct mlx5_eswitch *slave_esw);
788 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
790 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
791 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
793 static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
795 if (mlx5_esw_allowed(esw))
796 return esw->esw_funcs.num_vfs;
801 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw)
803 if (mlx5_esw_allowed(esw))
804 return esw->num_peers;
808 static inline struct mlx5_flow_table *
809 mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
811 return esw->fdb_table.offloads.slow_fdb;
814 #else /* CONFIG_MLX5_ESWITCH */
815 /* eswitch API stubs */
816 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
817 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
818 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
819 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
820 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
821 static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {}
822 static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
823 static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
824 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
826 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
827 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
829 return ERR_PTR(-EOPNOTSUPP);
832 static inline struct mlx5_flow_handle *
833 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
835 return ERR_PTR(-EOPNOTSUPP);
838 static inline unsigned int
839 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
846 mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
847 struct mlx5_eswitch *slave_esw, int max_slaves)
853 mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
854 struct mlx5_eswitch *slave_esw) {}
856 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
859 mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
864 static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
869 static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
872 #endif /* CONFIG_MLX5_ESWITCH */
874 #endif /* __MLX5_ESWITCH_H__ */