net/mlx5e: Handle IPsec offload for RX datapath in switchdev mode
authorJianbo Liu <jianbol@nvidia.com>
Mon, 31 Jul 2023 11:28:17 +0000 (14:28 +0300)
committerJakub Kicinski <kuba@kernel.org>
Thu, 3 Aug 2023 01:37:29 +0000 (18:37 -0700)
Reuse tun opts bits in reg c1, to pass IPsec obj id to datapath.
As this is only for RX SA and there are only 11 bits, xarray is used
to map IPsec obj id to an index, which is between 1 and 0x7ff, and
replace obj id to write to reg c1.

Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Link: https://lore.kernel.org/r/43d60fbcc9cd672a97d7e2a2f7fe6a3d9e9a776d.1690802064.git.leon@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
include/linux/mlx5/eswitch.h

index b5c773ffc76325e180f6bd04ed229ad1436a9a4f..b12fe3c5a2585bc8dfec7bb0f04b4b7eb2f09e28 100644 (file)
@@ -715,9 +715,20 @@ void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
        uplink_priv = &uplink_rpriv->uplink_priv;
        ct_priv = uplink_priv->ct_priv;
 
-       if (!mlx5_ipsec_is_rx_flow(cqe) &&
-           !mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, zone_restore_id, tunnel_id,
-                                &tc_priv))
+#ifdef CONFIG_MLX5_EN_IPSEC
+       if (!(tunnel_id >> ESW_TUN_OPTS_BITS)) {
+               u32 mapped_id;
+               u32 metadata;
+
+               mapped_id = tunnel_id & ESW_IPSEC_RX_MAPPED_ID_MASK;
+               if (mapped_id &&
+                   !mlx5_esw_ipsec_rx_make_metadata(priv, mapped_id, &metadata))
+                       mlx5e_ipsec_offload_handle_rx_skb(priv->netdev, skb, metadata);
+       }
+#endif
+
+       if (!mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv,
+                                zone_restore_id, tunnel_id, &tc_priv))
                goto free_skb;
 
 forward:
index 578af9d7aef3c0d69a0a2c8b5e2f15f2f0d804a8..168b9600bde31b0a7bc7881f5ac0b08715706716 100644 (file)
@@ -210,6 +210,7 @@ struct mlx5e_ipsec_rx {
        struct mlx5e_ipsec_fc *fc;
        struct mlx5_fs_chains *chains;
        u8 allow_tunnel_mode : 1;
+       struct xarray ipsec_obj_id_map;
 };
 
 struct mlx5e_ipsec {
@@ -256,6 +257,7 @@ struct mlx5e_ipsec_sa_entry {
        struct mlx5e_ipsec_work *work;
        struct mlx5e_ipsec_dwork *dwork;
        struct mlx5e_ipsec_limits limits;
+       u32 rx_mapped_id;
 };
 
 struct mlx5_accel_pol_xfrm_attrs {
index 2db16e49abc102590a3473e97e5448e2a53852d1..84322e85cd23ac54108c9e6a8b5031f192485fe7 100644 (file)
@@ -1153,6 +1153,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
                err = setup_modify_header(ipsec, attrs->type,
                                          sa_entry->ipsec_obj_id | BIT(31),
                                          XFRM_DEV_OFFLOAD_IN, &flow_act);
+       else
+               err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
+
        if (err)
                goto err_mod_header;
 
@@ -1641,6 +1644,7 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
        }
 
        mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+       mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
        rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
 }
 
@@ -1693,6 +1697,8 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
        kfree(ipsec->rx_ipv6);
 
        if (ipsec->is_uplink_rep) {
+               xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
+
                mutex_destroy(&ipsec->tx_esw->ft.mutex);
                WARN_ON(ipsec->tx_esw->ft.refcnt);
                kfree(ipsec->tx_esw);
@@ -1753,6 +1759,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
                mutex_init(&ipsec->tx_esw->ft.mutex);
                mutex_init(&ipsec->rx_esw->ft.mutex);
                ipsec->tx_esw->ns = ns_esw;
+               xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
        } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
                ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
        }
index 8d6379ac4574e81b2ee73cbd0e9ed1d46f238ea2..c00fe0d5ea11e931ca12a0f541144f5db155c48c 100644 (file)
@@ -37,6 +37,7 @@
 #include "ipsec.h"
 #include "ipsec_rxtx.h"
 #include "en.h"
+#include "esw/ipsec_fs.h"
 
 enum {
        MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
@@ -355,3 +356,24 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
                atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
        }
 }
+
+int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
+{
+       struct mlx5e_ipsec *ipsec = priv->ipsec;
+       u32 ipsec_obj_id;
+       int err;
+
+       if (!ipsec || !ipsec->is_uplink_rep)
+               return -EINVAL;
+
+       err = mlx5_esw_ipsec_rx_ipsec_obj_id_search(priv, id, &ipsec_obj_id);
+       if (err) {
+               atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+               return err;
+       }
+
+       *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
+                                              MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
+
+       return 0;
+}
index 436e9a8a32d30dcdeab61722423a6b81f7b30f41..9ee014a8ad24a8ce99491aeb9e8a566d12f81a03 100644 (file)
@@ -43,6 +43,7 @@
 #define MLX5_IPSEC_METADATA_MARKER(metadata)  (((metadata) >> 31) & 0x1)
 #define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
 #define MLX5_IPSEC_METADATA_HANDLE(metadata)  ((metadata) & GENMASK(23, 0))
+#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
 
 struct mlx5e_accel_tx_ipsec_state {
        struct xfrm_offload *xo;
@@ -67,6 +68,7 @@ void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
                                       struct sk_buff *skb,
                                       u32 ipsec_meta_data);
+int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata);
 static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
 {
        return ipsec_st->tailen;
index 7df7a8b0a6a0999bb823b2e7465a179d62c56d28..0675587c1a79fda8b6b44dc8064f16b072c1fa24 100644 (file)
@@ -182,3 +182,72 @@ int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
 
        return 0;
 }
+
+int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+                                         struct mlx5_flow_act *flow_act)
+{
+       u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+       struct mlx5_core_dev *mdev = ipsec->mdev;
+       struct mlx5_modify_hdr *modify_hdr;
+       u32 mapped_id;
+       int err;
+
+       err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+                         xa_mk_value(sa_entry->ipsec_obj_id),
+                         XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
+       if (err)
+               return err;
+
+       /* reuse tunnel bits for ipsec,
+        * tun_id is always 0 and tun_opts is mapped to ipsec_obj_id.
+        */
+       MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+       MLX5_SET(set_action_in, action, field,
+                MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+       MLX5_SET(set_action_in, action, offset, ESW_ZONE_ID_BITS);
+       MLX5_SET(set_action_in, action, length,
+                ESW_TUN_ID_BITS + ESW_TUN_OPTS_BITS);
+       MLX5_SET(set_action_in, action, data, mapped_id);
+
+       modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
+                                             1, action);
+       if (IS_ERR(modify_hdr)) {
+               err = PTR_ERR(modify_hdr);
+               goto err_header_alloc;
+       }
+
+       sa_entry->rx_mapped_id = mapped_id;
+       flow_act->modify_hdr = modify_hdr;
+       flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+       return 0;
+
+err_header_alloc:
+       xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+       return err;
+}
+
+void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+
+       if (sa_entry->rx_mapped_id)
+               xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+                           sa_entry->rx_mapped_id);
+}
+
+int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+                                         u32 *ipsec_obj_id)
+{
+       struct mlx5e_ipsec *ipsec = priv->ipsec;
+       void *val;
+
+       val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+       if (!val)
+               return -ENOENT;
+
+       *ipsec_obj_id = xa_to_value(val);
+
+       return 0;
+}
index 1d6104648d32b215b39cf6d99c036ab541f94b64..44df34032d1ee6ad792815d15d653d70dbe115c6 100644 (file)
@@ -5,6 +5,7 @@
 #define __MLX5_ESW_IPSEC_FS_H__
 
 struct mlx5e_ipsec;
+struct mlx5e_ipsec_sa_entry;
 
 #ifdef CONFIG_MLX5_ESWITCH
 void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
@@ -16,6 +17,11 @@ void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
                                       struct mlx5e_ipsec_rx_create_attr *attr);
 int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
                                           struct mlx5_flow_destination *dest);
+int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+                                         struct mlx5_flow_act *flow_act);
+void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+                                         u32 *ipsec_obj_id);
 #else
 static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
                                                    struct mlx5e_ipsec_rx *rx) {}
@@ -35,5 +41,19 @@ static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ips
 {
        return -EINVAL;
 }
+
+static inline int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+                                                       struct mlx5_flow_act *flow_act)
+{
+       return -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) {}
+
+static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+                                                       u32 *ipsec_obj_id)
+{
+       return -EINVAL;
+}
 #endif /* CONFIG_MLX5_ESWITCH */
 #endif /* __MLX5_ESW_IPSEC_FS_H__ */
index e2701ed0200e2dd52b364c7702b1bcdc82f81d3b..950d2431a53c88d2d7951594233dd1605fa43d76 100644 (file)
@@ -144,6 +144,9 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
        GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
                ESW_TUN_OPTS_OFFSET + 1)
 
+/* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */
+#define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+
 u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
 struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);