1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
7 #include <linux/if_vlan.h>
11 #include "lib/crypto.h"
12 #include "en_accel/macsec.h"
14 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
15 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
17 enum mlx5_macsec_aso_event_arm {
18 MLX5E_ASO_EPN_ARM = BIT(0),
22 MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
25 struct mlx5e_macsec_handle {
26 struct mlx5e_macsec *macsec;
35 struct mlx5e_macsec_aso_out {
40 struct mlx5e_macsec_aso_in {
45 struct mlx5e_macsec_epn_state {
51 struct mlx5e_macsec_async_work {
52 struct mlx5e_macsec *macsec;
53 struct mlx5_core_dev *mdev;
54 struct work_struct work;
58 struct mlx5e_macsec_sa {
68 union mlx5_macsec_rule *macsec_rule;
69 struct rcu_head rcu_head;
70 struct mlx5e_macsec_epn_state epn_state;
73 struct mlx5e_macsec_rx_sc;
74 struct mlx5e_macsec_rx_sc_xarray_element {
76 struct mlx5e_macsec_rx_sc *rx_sc;
79 struct mlx5e_macsec_rx_sc {
82 struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
83 struct list_head rx_sc_list_element;
84 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
85 struct metadata_dst *md_dst;
86 struct rcu_head rcu_head;
89 struct mlx5e_macsec_umr {
90 u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
95 struct mlx5e_macsec_aso {
97 struct mlx5_aso *maso;
98 /* Protects macsec ASO */
99 struct mutex aso_lock;
101 struct mlx5e_macsec_umr *umr;
106 struct mlx5e_macsec_device {
107 const struct net_device *netdev;
108 struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
109 struct list_head macsec_rx_sc_list_head;
110 unsigned char *dev_addr;
111 struct list_head macsec_device_list_element;
114 struct mlx5e_macsec {
115 struct list_head macsec_device_list_head;
117 struct mutex lock; /* Protects mlx5e_macsec internal contexts */
119 /* Rx fs_id -> rx_sc mapping */
120 struct xarray sc_xarray;
122 struct mlx5_core_dev *mdev;
125 struct mlx5e_macsec_aso aso;
127 struct notifier_block nb;
128 struct workqueue_struct *wq;
131 struct mlx5_macsec_obj_attrs {
137 struct mlx5e_macsec_epn_state epn_state;
144 struct mlx5_aso_ctrl_param {
146 u8 condition_0_operand;
147 u8 condition_1_operand;
148 u8 condition_0_offset;
149 u8 condition_1_offset;
151 u8 condition_operand;
152 u32 condition_0_data;
153 u32 condition_0_mask;
154 u32 condition_1_data;
155 u32 condition_1_mask;
160 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
162 struct mlx5e_macsec_umr *umr;
163 struct device *dma_device;
167 umr = kzalloc(sizeof(*umr), GFP_KERNEL);
173 dma_device = mlx5_core_dma_dev(mdev);
174 dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
175 err = dma_mapping_error(dma_device, dma_addr);
177 mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
181 err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
183 mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
187 umr->dma_addr = dma_addr;
194 dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
200 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
202 struct mlx5e_macsec_umr *umr = aso->umr;
204 mlx5_core_destroy_mkey(mdev, umr->mkey);
205 dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
209 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
213 if (!attrs->replay_protect)
216 switch (attrs->replay_window) {
218 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
221 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
224 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
227 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
232 MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
233 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
238 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
239 struct mlx5_macsec_obj_attrs *attrs,
243 u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
244 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
249 obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
250 aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
252 MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
253 MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
254 MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
255 MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
256 MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
259 if (attrs->epn_state.epn_enabled) {
263 MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
264 MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
265 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
266 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
267 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
268 salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
269 for (i = 0; i < 3 ; i++)
270 memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
272 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
275 MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
277 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
279 err = macsec_set_replay_protection(attrs, aso_ctx);
284 /* general object fields set */
285 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
286 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
288 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
291 "MACsec offload: Failed to create MACsec object (err = %d)\n",
296 *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
301 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
303 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
304 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
306 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
307 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
308 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
310 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
313 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
314 struct mlx5e_macsec_sa *sa,
315 bool is_tx, struct net_device *netdev, u32 fs_id)
317 int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
318 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
320 if (!sa->macsec_rule)
323 mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
325 mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
326 sa->macsec_rule = NULL;
329 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
330 struct mlx5e_macsec_sa *sa,
331 bool encrypt, bool is_tx, u32 *fs_id)
333 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
334 struct mlx5e_macsec *macsec = priv->macsec;
335 struct mlx5_macsec_rule_attrs rule_attrs;
336 struct mlx5_core_dev *mdev = priv->mdev;
337 struct mlx5_macsec_obj_attrs obj_attrs;
338 union mlx5_macsec_rule *macsec_rule;
341 obj_attrs.next_pn = sa->next_pn;
342 obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
343 obj_attrs.enc_key_id = sa->enc_key_id;
344 obj_attrs.encrypt = encrypt;
345 obj_attrs.aso_pdn = macsec->aso.pdn;
346 obj_attrs.epn_state = sa->epn_state;
348 if (sa->epn_state.epn_enabled) {
349 obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
350 memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
353 obj_attrs.replay_window = ctx->secy->replay_window;
354 obj_attrs.replay_protect = ctx->secy->replay_protect;
356 err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
360 rule_attrs.macsec_obj_id = sa->macsec_obj_id;
361 rule_attrs.sci = sa->sci;
362 rule_attrs.assoc_num = sa->assoc_num;
363 rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
364 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
366 macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
369 goto destroy_macsec_object;
372 sa->macsec_rule = macsec_rule;
376 destroy_macsec_object:
377 mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
382 static struct mlx5e_macsec_rx_sc *
383 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
385 struct mlx5e_macsec_rx_sc *iter;
387 list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
388 if (iter->sci == sci)
395 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
396 struct mlx5e_macsec_sa *rx_sa,
397 bool active, u32 *fs_id)
399 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
400 struct mlx5e_macsec *macsec = priv->macsec;
403 if (rx_sa->active == active)
406 rx_sa->active = active;
408 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, *fs_id);
412 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, fs_id);
414 rx_sa->active = false;
419 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
421 const struct net_device *netdev = ctx->netdev;
422 const struct macsec_secy *secy = ctx->secy;
424 if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
426 "MACsec offload is supported only when validate_frame is in strict mode\n");
430 if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
431 netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
432 MACSEC_DEFAULT_ICV_LEN);
436 if (!secy->protect_frames) {
438 "MACsec offload is supported only when protect_frames is set\n");
442 if (!ctx->secy->tx_sc.encrypt) {
443 netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
450 static struct mlx5e_macsec_device *
451 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
452 const struct macsec_context *ctx)
454 struct mlx5e_macsec_device *iter;
455 const struct list_head *list;
457 list = &macsec->macsec_device_list_head;
458 list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
459 if (iter->netdev == ctx->secy->netdev)
466 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
467 const pn_t *next_pn_halves, ssci_t ssci)
469 struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
472 sa->salt = key->salt;
473 epn_state->epn_enabled = 1;
474 epn_state->epn_msb = next_pn_halves->upper;
475 epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
478 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
480 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
481 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
482 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
483 const struct macsec_secy *secy = ctx->secy;
484 struct mlx5e_macsec_device *macsec_device;
485 struct mlx5_core_dev *mdev = priv->mdev;
486 u8 assoc_num = ctx->sa.assoc_num;
487 struct mlx5e_macsec_sa *tx_sa;
488 struct mlx5e_macsec *macsec;
491 mutex_lock(&priv->macsec->lock);
493 macsec = priv->macsec;
494 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
495 if (!macsec_device) {
496 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
501 if (macsec_device->tx_sa[assoc_num]) {
502 netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
507 tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
513 tx_sa->active = ctx_tx_sa->active;
514 tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
515 tx_sa->sci = secy->sci;
516 tx_sa->assoc_num = assoc_num;
519 update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
522 err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
523 MLX5_ACCEL_OBJ_MACSEC_KEY,
528 macsec_device->tx_sa[assoc_num] = tx_sa;
529 if (!secy->operational ||
530 assoc_num != tx_sc->encoding_sa ||
534 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
536 goto destroy_encryption_key;
538 mutex_unlock(&macsec->lock);
542 destroy_encryption_key:
543 macsec_device->tx_sa[assoc_num] = NULL;
544 mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
548 mutex_unlock(&macsec->lock);
553 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
555 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
556 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
557 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
558 struct mlx5e_macsec_device *macsec_device;
559 u8 assoc_num = ctx->sa.assoc_num;
560 struct mlx5e_macsec_sa *tx_sa;
561 struct mlx5e_macsec *macsec;
562 struct net_device *netdev;
565 mutex_lock(&priv->macsec->lock);
567 macsec = priv->macsec;
568 netdev = ctx->netdev;
569 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
570 if (!macsec_device) {
571 netdev_err(netdev, "MACsec offload: Failed to find device context\n");
576 tx_sa = macsec_device->tx_sa[assoc_num];
578 netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
583 if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
584 netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
590 if (tx_sa->active == ctx_tx_sa->active)
593 tx_sa->active = ctx_tx_sa->active;
594 if (tx_sa->assoc_num != tx_sc->encoding_sa)
597 if (ctx_tx_sa->active) {
598 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
602 if (!tx_sa->macsec_rule) {
607 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
610 mutex_unlock(&macsec->lock);
615 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
617 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
618 struct mlx5e_macsec_device *macsec_device;
619 u8 assoc_num = ctx->sa.assoc_num;
620 struct mlx5e_macsec_sa *tx_sa;
621 struct mlx5e_macsec *macsec;
624 mutex_lock(&priv->macsec->lock);
625 macsec = priv->macsec;
626 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
627 if (!macsec_device) {
628 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
633 tx_sa = macsec_device->tx_sa[assoc_num];
635 netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
640 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
641 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
642 kfree_rcu_mightsleep(tx_sa);
643 macsec_device->tx_sa[assoc_num] = NULL;
646 mutex_unlock(&macsec->lock);
651 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
653 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
654 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
655 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
656 struct mlx5e_macsec_device *macsec_device;
657 struct mlx5e_macsec_rx_sc *rx_sc;
658 struct list_head *rx_sc_list;
659 struct mlx5e_macsec *macsec;
662 mutex_lock(&priv->macsec->lock);
663 macsec = priv->macsec;
664 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
665 if (!macsec_device) {
666 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
671 rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
672 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
674 netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
680 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
686 sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
687 if (!sc_xarray_element) {
692 sc_xarray_element->rx_sc = rx_sc;
693 err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
694 XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
697 netdev_err(ctx->netdev,
698 "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
699 MLX5_MACEC_RX_FS_ID_MAX);
700 goto destroy_sc_xarray_elemenet;
703 rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
704 if (!rx_sc->md_dst) {
709 rx_sc->sci = ctx_rx_sc->sci;
710 rx_sc->active = ctx_rx_sc->active;
711 list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
713 rx_sc->sc_xarray_element = sc_xarray_element;
714 rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
715 mutex_unlock(&macsec->lock);
720 xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
721 destroy_sc_xarray_elemenet:
722 kfree(sc_xarray_element);
727 mutex_unlock(&macsec->lock);
732 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
734 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
735 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
736 struct mlx5e_macsec_device *macsec_device;
737 struct mlx5e_macsec_rx_sc *rx_sc;
738 struct mlx5e_macsec_sa *rx_sa;
739 struct mlx5e_macsec *macsec;
740 struct list_head *list;
744 mutex_lock(&priv->macsec->lock);
746 macsec = priv->macsec;
747 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
748 if (!macsec_device) {
749 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
754 list = &macsec_device->macsec_rx_sc_list_head;
755 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
761 if (rx_sc->active == ctx_rx_sc->active)
764 rx_sc->active = ctx_rx_sc->active;
765 for (i = 0; i < MACSEC_NUM_AN; ++i) {
766 rx_sa = rx_sc->rx_sa[i];
770 err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active,
771 &rx_sc->sc_xarray_element->fs_id);
777 mutex_unlock(&macsec->lock);
782 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc,
783 struct net_device *netdev)
785 struct mlx5e_macsec_sa *rx_sa;
788 for (i = 0; i < MACSEC_NUM_AN; ++i) {
789 rx_sa = rx_sc->rx_sa[i];
793 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, netdev,
794 rx_sc->sc_xarray_element->fs_id);
795 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
798 rx_sc->rx_sa[i] = NULL;
801 /* At this point the relevant MACsec offload Rx rule already removed at
802 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
803 * Rx related data propagating using xa_erase which uses rcu to sync,
804 * once fs_id is erased then this rx_sc is hidden from datapath.
806 list_del_rcu(&rx_sc->rx_sc_list_element);
807 xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
808 metadata_dst_free(rx_sc->md_dst);
809 kfree(rx_sc->sc_xarray_element);
810 kfree_rcu_mightsleep(rx_sc);
813 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
815 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
816 struct mlx5e_macsec_device *macsec_device;
817 struct mlx5e_macsec_rx_sc *rx_sc;
818 struct mlx5e_macsec *macsec;
819 struct list_head *list;
822 mutex_lock(&priv->macsec->lock);
824 macsec = priv->macsec;
825 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
826 if (!macsec_device) {
827 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
832 list = &macsec_device->macsec_rx_sc_list_head;
833 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
835 netdev_err(ctx->netdev,
836 "MACsec offload rx_sc sci %lld doesn't exist\n",
837 ctx->sa.rx_sa->sc->sci);
842 macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
844 mutex_unlock(&macsec->lock);
849 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
851 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
852 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
853 struct mlx5e_macsec_device *macsec_device;
854 struct mlx5_core_dev *mdev = priv->mdev;
855 u8 assoc_num = ctx->sa.assoc_num;
856 struct mlx5e_macsec_rx_sc *rx_sc;
857 sci_t sci = ctx_rx_sa->sc->sci;
858 struct mlx5e_macsec_sa *rx_sa;
859 struct mlx5e_macsec *macsec;
860 struct list_head *list;
863 mutex_lock(&priv->macsec->lock);
865 macsec = priv->macsec;
866 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
867 if (!macsec_device) {
868 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
873 list = &macsec_device->macsec_rx_sc_list_head;
874 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
876 netdev_err(ctx->netdev,
877 "MACsec offload rx_sc sci %lld doesn't exist\n",
878 ctx->sa.rx_sa->sc->sci);
883 if (rx_sc->rx_sa[assoc_num]) {
884 netdev_err(ctx->netdev,
885 "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
891 rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
897 rx_sa->active = ctx_rx_sa->active;
898 rx_sa->next_pn = ctx_rx_sa->next_pn;
900 rx_sa->assoc_num = assoc_num;
903 update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
906 err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
907 MLX5_ACCEL_OBJ_MACSEC_KEY,
912 rx_sc->rx_sa[assoc_num] = rx_sa;
916 //TODO - add support for both authentication and encryption flows
917 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, &rx_sc->sc_xarray_element->fs_id);
919 goto destroy_encryption_key;
923 destroy_encryption_key:
924 rx_sc->rx_sa[assoc_num] = NULL;
925 mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
929 mutex_unlock(&macsec->lock);
934 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
936 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
937 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
938 struct mlx5e_macsec_device *macsec_device;
939 u8 assoc_num = ctx->sa.assoc_num;
940 struct mlx5e_macsec_rx_sc *rx_sc;
941 sci_t sci = ctx_rx_sa->sc->sci;
942 struct mlx5e_macsec_sa *rx_sa;
943 struct mlx5e_macsec *macsec;
944 struct list_head *list;
947 mutex_lock(&priv->macsec->lock);
949 macsec = priv->macsec;
950 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
951 if (!macsec_device) {
952 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
957 list = &macsec_device->macsec_rx_sc_list_head;
958 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
960 netdev_err(ctx->netdev,
961 "MACsec offload rx_sc sci %lld doesn't exist\n",
962 ctx->sa.rx_sa->sc->sci);
967 rx_sa = rx_sc->rx_sa[assoc_num];
969 netdev_err(ctx->netdev,
970 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
976 if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
977 netdev_err(ctx->netdev,
978 "MACsec offload update RX sa %d PN isn't supported\n",
984 err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active,
985 &rx_sc->sc_xarray_element->fs_id);
987 mutex_unlock(&macsec->lock);
992 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
994 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
995 struct mlx5e_macsec_device *macsec_device;
996 sci_t sci = ctx->sa.rx_sa->sc->sci;
997 struct mlx5e_macsec_rx_sc *rx_sc;
998 u8 assoc_num = ctx->sa.assoc_num;
999 struct mlx5e_macsec_sa *rx_sa;
1000 struct mlx5e_macsec *macsec;
1001 struct list_head *list;
1004 mutex_lock(&priv->macsec->lock);
1006 macsec = priv->macsec;
1007 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1008 if (!macsec_device) {
1009 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1014 list = &macsec_device->macsec_rx_sc_list_head;
1015 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1017 netdev_err(ctx->netdev,
1018 "MACsec offload rx_sc sci %lld doesn't exist\n",
1019 ctx->sa.rx_sa->sc->sci);
1024 rx_sa = rx_sc->rx_sa[assoc_num];
1026 netdev_err(ctx->netdev,
1027 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1033 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
1034 rx_sc->sc_xarray_element->fs_id);
1035 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1037 rx_sc->rx_sa[assoc_num] = NULL;
1040 mutex_unlock(&macsec->lock);
1045 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1047 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1048 const struct net_device *dev = ctx->secy->netdev;
1049 const struct net_device *netdev = ctx->netdev;
1050 struct mlx5e_macsec_device *macsec_device;
1051 struct mlx5e_macsec *macsec;
1054 if (!mlx5e_macsec_secy_features_validate(ctx))
1057 mutex_lock(&priv->macsec->lock);
1058 macsec = priv->macsec;
1059 if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1060 netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1064 if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1065 netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1066 MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1071 macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
1072 if (!macsec_device) {
1077 macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1078 if (!macsec_device->dev_addr) {
1079 kfree(macsec_device);
1084 macsec_device->netdev = dev;
1086 INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1087 list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1089 ++macsec->num_of_devices;
1091 mutex_unlock(&macsec->lock);
1096 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1097 struct mlx5e_macsec_device *macsec_device)
1099 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1100 const struct net_device *dev = ctx->secy->netdev;
1101 struct mlx5e_macsec *macsec = priv->macsec;
1102 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1103 struct mlx5e_macsec_sa *rx_sa;
1104 struct list_head *list;
1108 list = &macsec_device->macsec_rx_sc_list_head;
1109 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1110 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1111 rx_sa = rx_sc->rx_sa[i];
1112 if (!rx_sa || !rx_sa->macsec_rule)
1115 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
1116 rx_sc->sc_xarray_element->fs_id);
1120 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1121 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1122 rx_sa = rx_sc->rx_sa[i];
1126 if (rx_sa->active) {
1127 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
1128 &rx_sc->sc_xarray_element->fs_id);
1135 memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1140 /* this function is called from 2 macsec ops functions:
1141 * macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1142 * and create new Tx contexts(macsec object + steering).
1143 * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1144 * destroy Tx and Rx contexts(macsec object + steering)
1146 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1148 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1149 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1150 const struct net_device *dev = ctx->secy->netdev;
1151 struct mlx5e_macsec_device *macsec_device;
1152 struct mlx5e_macsec_sa *tx_sa;
1153 struct mlx5e_macsec *macsec;
1156 if (!mlx5e_macsec_secy_features_validate(ctx))
1159 mutex_lock(&priv->macsec->lock);
1161 macsec = priv->macsec;
1162 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1163 if (!macsec_device) {
1164 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1169 /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1170 if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1171 err = macsec_upd_secy_hw_address(ctx, macsec_device);
1176 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1177 tx_sa = macsec_device->tx_sa[i];
1181 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
1184 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1185 tx_sa = macsec_device->tx_sa[i];
1189 if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1190 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
1197 mutex_unlock(&macsec->lock);
1202 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1204 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1205 struct mlx5e_macsec_device *macsec_device;
1206 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1207 struct mlx5e_macsec_sa *tx_sa;
1208 struct mlx5e_macsec *macsec;
1209 struct list_head *list;
1213 mutex_lock(&priv->macsec->lock);
1214 macsec = priv->macsec;
1215 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1216 if (!macsec_device) {
1217 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1223 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1224 tx_sa = macsec_device->tx_sa[i];
1228 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
1229 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1231 macsec_device->tx_sa[i] = NULL;
1234 list = &macsec_device->macsec_rx_sc_list_head;
1235 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1236 macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
1238 kfree(macsec_device->dev_addr);
1239 macsec_device->dev_addr = NULL;
1241 list_del_rcu(&macsec_device->macsec_device_list_element);
1242 --macsec->num_of_devices;
1243 kfree(macsec_device);
1246 mutex_unlock(&macsec->lock);
1251 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1252 struct mlx5_macsec_obj_attrs *attrs)
1254 attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1255 attrs->epn_state.overlap = sa->epn_state.overlap;
1258 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1259 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1260 struct mlx5_aso_ctrl_param *param)
1262 struct mlx5e_macsec_umr *umr = macsec_aso->umr;
1264 memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1265 aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
1266 aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
1267 aso_ctrl->l_key = cpu_to_be32(umr->mkey);
1272 aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1273 aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1274 param->condition_0_operand << 4;
1275 aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1276 param->condition_0_offset << 4;
1277 aso_ctrl->data_offset_condition_operand = param->data_offset |
1278 param->condition_operand << 6;
1279 aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1280 aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1281 aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1282 aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1283 aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1284 aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1287 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1290 u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1291 u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1292 u64 modify_field_select = 0;
1296 /* General object fields set */
1297 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1298 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1299 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1300 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1302 mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1307 obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1308 modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1311 if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1312 !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1313 mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1318 obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1319 MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1320 MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1321 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1322 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1324 /* General object fields set */
1325 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1327 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1330 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1331 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1332 struct mlx5e_macsec_aso_in *in)
1334 struct mlx5_aso_ctrl_param param = {};
1336 param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1337 param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1338 param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1339 if (in->mode == MLX5_MACSEC_EPN) {
1340 param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1341 param.bitwise_data = BIT_ULL(54);
1342 param.data_mask = param.bitwise_data;
1344 macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, ¶m);
1347 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1348 struct mlx5e_macsec_aso_in *in)
1350 struct mlx5e_macsec_aso *aso;
1351 struct mlx5_aso_wqe *aso_wqe;
1352 struct mlx5_aso *maso;
1358 mutex_lock(&aso->aso_lock);
1359 aso_wqe = mlx5_aso_get_wqe(maso);
1360 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1361 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1362 macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1363 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1364 err = mlx5_aso_poll_cq(maso, false);
1365 mutex_unlock(&aso->aso_lock);
1370 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1371 struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1373 struct mlx5e_macsec_aso *aso;
1374 struct mlx5_aso_wqe *aso_wqe;
1375 struct mlx5_aso *maso;
1376 unsigned long expires;
1382 mutex_lock(&aso->aso_lock);
1384 aso_wqe = mlx5_aso_get_wqe(maso);
1385 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1386 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1387 macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1389 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1390 expires = jiffies + msecs_to_jiffies(10);
1392 err = mlx5_aso_poll_cq(maso, false);
1394 usleep_range(2, 10);
1395 } while (err && time_is_after_jiffies(expires));
1400 if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1401 out->event_arm |= MLX5E_ASO_EPN_ARM;
1403 out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1406 mutex_unlock(&aso->aso_lock);
1410 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1413 const struct list_head *device_list;
1414 struct mlx5e_macsec_sa *macsec_sa;
1415 struct mlx5e_macsec_device *iter;
1418 device_list = &macsec->macsec_device_list_head;
1420 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1421 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1422 macsec_sa = iter->tx_sa[i];
1423 if (!macsec_sa || !macsec_sa->active)
1425 if (macsec_sa->macsec_obj_id == obj_id)
1433 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1436 const struct list_head *device_list, *sc_list;
1437 struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1438 struct mlx5e_macsec_sa *macsec_sa;
1439 struct mlx5e_macsec_device *iter;
1442 device_list = &macsec->macsec_device_list_head;
1444 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1445 sc_list = &iter->macsec_rx_sc_list_head;
1446 list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1447 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1448 macsec_sa = mlx5e_rx_sc->rx_sa[i];
1449 if (!macsec_sa || !macsec_sa->active)
1451 if (macsec_sa->macsec_obj_id == obj_id)
1460 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1461 struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1463 struct mlx5_macsec_obj_attrs attrs = {};
1464 struct mlx5e_macsec_aso_in in = {};
1466 /* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1467 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1468 * esn_overlap to OLD (1).
1469 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1470 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1471 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1474 if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1475 sa->epn_state.epn_msb++;
1476 sa->epn_state.overlap = 0;
1478 sa->epn_state.overlap = 1;
1481 macsec_build_accel_attrs(sa, &attrs);
1482 mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1484 /* Re-set EPN arm event */
1486 in.mode = MLX5_MACSEC_EPN;
1487 macsec_aso_set_arm_event(mdev, macsec, &in);
1490 static void macsec_async_event(struct work_struct *work)
1492 struct mlx5e_macsec_async_work *async_work;
1493 struct mlx5e_macsec_aso_out out = {};
1494 struct mlx5e_macsec_aso_in in = {};
1495 struct mlx5e_macsec_sa *macsec_sa;
1496 struct mlx5e_macsec *macsec;
1497 struct mlx5_core_dev *mdev;
1500 async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1501 macsec = async_work->macsec;
1502 mutex_lock(&macsec->lock);
1504 mdev = async_work->mdev;
1505 obj_id = async_work->obj_id;
1506 macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1508 macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1510 mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1511 goto out_async_work;
1515 /* Query MACsec ASO context */
1517 macsec_aso_query(mdev, macsec, &in, &out);
1520 if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1521 macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1525 mutex_unlock(&macsec->lock);
1528 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1530 struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1531 struct mlx5e_macsec_async_work *async_work;
1532 struct mlx5_eqe_obj_change *obj_change;
1533 struct mlx5_eqe *eqe = data;
1537 if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1540 obj_change = &eqe->data.obj_change;
1541 obj_type = be16_to_cpu(obj_change->obj_type);
1542 obj_id = be32_to_cpu(obj_change->obj_id);
1544 if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1547 async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
1551 async_work->macsec = macsec;
1552 async_work->mdev = macsec->mdev;
1553 async_work->obj_id = obj_id;
1555 INIT_WORK(&async_work->work, macsec_async_event);
1557 WARN_ON(!queue_work(macsec->wq, &async_work->work));
1562 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1564 struct mlx5_aso *maso;
1567 err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1570 "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1575 maso = mlx5_aso_create(mdev, aso->pdn);
1577 err = PTR_ERR(maso);
1581 err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1585 mutex_init(&aso->aso_lock);
1592 mlx5_aso_destroy(maso);
1594 mlx5_core_dealloc_pd(mdev, aso->pdn);
1598 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1603 mlx5e_macsec_aso_dereg_mr(mdev, aso);
1605 mlx5_aso_destroy(aso->maso);
1607 mlx5_core_dealloc_pd(mdev, aso->pdn);
1610 static const struct macsec_ops macsec_offload_ops = {
1611 .mdo_add_txsa = mlx5e_macsec_add_txsa,
1612 .mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1613 .mdo_del_txsa = mlx5e_macsec_del_txsa,
1614 .mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1615 .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1616 .mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1617 .mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1618 .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1619 .mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1620 .mdo_add_secy = mlx5e_macsec_add_secy,
1621 .mdo_upd_secy = mlx5e_macsec_upd_secy,
1622 .mdo_del_secy = mlx5e_macsec_del_secy,
1625 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1627 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1630 fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
1631 &md_dst->u.macsec_info.sci);
1638 dev_kfree_skb_any(skb);
1642 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1643 struct sk_buff *skb,
1644 struct mlx5_wqe_eth_seg *eseg)
1646 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1649 fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
1650 &md_dst->u.macsec_info.sci);
1654 eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
1657 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1658 struct sk_buff *skb,
1659 struct mlx5_cqe64 *cqe)
1661 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1662 u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1663 struct mlx5e_priv *priv = macsec_netdev_priv(netdev);
1664 struct mlx5e_macsec_rx_sc *rx_sc;
1665 struct mlx5e_macsec *macsec;
1668 macsec = priv->macsec;
1672 fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1675 sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1676 rx_sc = sc_xarray_element->rx_sc;
1678 dst_hold(&rx_sc->md_dst->dst);
1679 skb_dst_set(skb, &rx_sc->md_dst->dst);
1685 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1687 struct net_device *netdev = priv->netdev;
1689 if (!mlx5e_is_macsec_device(priv->mdev))
1693 mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1694 netdev->macsec_ops = &macsec_offload_ops;
1695 netdev->features |= NETIF_F_HW_MACSEC;
1696 netif_keep_dst(netdev);
1699 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1701 struct mlx5_core_dev *mdev = priv->mdev;
1702 struct mlx5e_macsec *macsec = NULL;
1703 struct mlx5_macsec_fs *macsec_fs;
1706 if (!mlx5e_is_macsec_device(priv->mdev)) {
1707 mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1711 macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
1715 INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1716 mutex_init(&macsec->lock);
1718 err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1720 mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1724 macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1730 xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1732 priv->macsec = macsec;
1734 macsec->mdev = mdev;
1736 macsec_fs = mlx5_macsec_fs_init(mdev);
1742 mdev->macsec_fs = macsec_fs;
1744 macsec->nb.notifier_call = macsec_obj_change_event;
1745 mlx5_notifier_register(mdev, &macsec->nb);
1747 mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1752 destroy_workqueue(macsec->wq);
1754 mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1757 priv->macsec = NULL;
1761 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1763 struct mlx5e_macsec *macsec = priv->macsec;
1764 struct mlx5_core_dev *mdev = priv->mdev;
1769 mlx5_notifier_unregister(mdev, &macsec->nb);
1770 mlx5_macsec_fs_cleanup(mdev->macsec_fs);
1771 destroy_workqueue(macsec->wq);
1772 mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1773 mutex_destroy(&macsec->lock);