Merge branch 'for-5.3/upstream-fixes' into for-linus
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ktls.h
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #ifndef __MLX5E_KTLS_H__
5 #define __MLX5E_KTLS_H__
6
7 #include "en.h"
8
9 #ifdef CONFIG_MLX5_EN_TLS
10 #include <net/tls.h>
11 #include "accel/tls.h"
12
13 #define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
14         (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params))
15 #define MLX5E_KTLS_STATIC_WQEBBS \
16         (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
17
18 #define MLX5E_KTLS_PROGRESS_WQE_SZ \
19         (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params))
20 #define MLX5E_KTLS_PROGRESS_WQEBBS \
21         (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
22 #define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
23
24 enum {
25         MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD     = 0,
26         MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD        = 1,
27         MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION = 2,
28 };
29
30 enum {
31         MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START     = 0,
32         MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1,
33         MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING  = 2,
34 };
35
36 struct mlx5e_ktls_offload_context_tx {
37         struct tls_offload_context_tx *tx_ctx;
38         struct tls_crypto_info *crypto_info;
39         u32 expected_seq;
40         u32 tisn;
41         u32 key_id;
42         bool ctx_post_pending;
43 };
44
45 struct mlx5e_ktls_offload_context_tx_shadow {
46         struct tls_offload_context_tx         tx_ctx;
47         struct mlx5e_ktls_offload_context_tx *priv_tx;
48 };
49
50 static inline void
51 mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
52                            struct mlx5e_ktls_offload_context_tx *priv_tx)
53 {
54         struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
55         struct mlx5e_ktls_offload_context_tx_shadow *shadow;
56
57         BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
58
59         shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
60
61         shadow->priv_tx = priv_tx;
62         priv_tx->tx_ctx = tx_ctx;
63 }
64
65 static inline struct mlx5e_ktls_offload_context_tx *
66 mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
67 {
68         struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
69         struct mlx5e_ktls_offload_context_tx_shadow *shadow;
70
71         BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
72
73         shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
74
75         return shadow->priv_tx;
76 }
77
78 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
79 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
80
81 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
82                                          struct mlx5e_txqsq *sq,
83                                          struct sk_buff *skb,
84                                          struct mlx5e_tx_wqe **wqe, u16 *pi);
85 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
86                                            struct mlx5e_tx_wqe_info *wi,
87                                            struct mlx5e_sq_dma *dma);
88
89 #else
90
91 static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
92 {
93 }
94
95 #endif
96
97 #endif /* __MLX5E_TLS_H__ */