Commit | Line | Data |
---|---|---|
d2ead1f3 TT |
1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | // Copyright (c) 2019 Mellanox Technologies. | |
3 | ||
943aa7bd | 4 | #include "en_accel/ktls.h" |
7d0d0d86 TT |
5 | #include "en_accel/ktls_txrx.h" |
6 | #include "en_accel/ktls_utils.h" | |
d2ead1f3 | 7 | |
7d0d0d86 TT |
8 | struct mlx5e_dump_wqe { |
9 | struct mlx5_wqe_ctrl_seg ctrl; | |
10 | struct mlx5_wqe_data_seg data; | |
d2ead1f3 TT |
11 | }; |
12 | ||
7d0d0d86 TT |
13 | #define MLX5E_KTLS_DUMP_WQEBBS \ |
14 | (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB)) | |
d2ead1f3 | 15 | |
7d0d0d86 | 16 | static u8 |
579524c6 | 17 | mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags, |
7d0d0d86 TT |
18 | unsigned int sync_len) |
19 | { | |
20 | /* Given the MTU and sync_len, calculates an upper bound for the | |
21 | * number of DUMP WQEs needed for the TX resync of a record. | |
22 | */ | |
579524c6 | 23 | return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu)); |
7d0d0d86 | 24 | } |
d2ead1f3 | 25 | |
39e8cc6d | 26 | u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) |
d2ead1f3 | 27 | { |
7d0d0d86 | 28 | u16 num_dumps, stop_room = 0; |
d2ead1f3 | 29 | |
7a9104ea | 30 | if (!mlx5e_is_ktls_tx(mdev)) |
39e8cc6d AH |
31 | return 0; |
32 | ||
579524c6 | 33 | num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); |
d2ead1f3 | 34 | |
c27bd171 AL |
35 | stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS); |
36 | stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS); | |
37 | stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS); | |
7d0d0d86 TT |
38 | |
39 | return stop_room; | |
40 | } | |
41 | ||
da6682fa TT |
42 | static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc) |
43 | { | |
44 | MLX5_SET(tisc, tisc, tls_en, 1); | |
45 | MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn); | |
46 | MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); | |
47 | } | |
48 | ||
7d0d0d86 TT |
49 | static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn) |
50 | { | |
51 | u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; | |
d2ead1f3 | 52 | |
da6682fa | 53 | mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx)); |
d2ead1f3 | 54 | |
da6682fa | 55 | return mlx5_core_create_tis(mdev, in, tisn); |
d2ead1f3 TT |
56 | } |
57 | ||
7d0d0d86 TT |
58 | struct mlx5e_ktls_offload_context_tx { |
59 | struct tls_offload_context_tx *tx_ctx; | |
60 | struct tls12_crypto_info_aes_gcm_128 crypto_info; | |
a51bce96 | 61 | struct mlx5e_tls_sw_stats *sw_stats; |
7d0d0d86 TT |
62 | u32 expected_seq; |
63 | u32 tisn; | |
64 | u32 key_id; | |
65 | bool ctx_post_pending; | |
66 | }; | |
67 | ||
d2ead1f3 | 68 | static void |
7d0d0d86 TT |
69 | mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx, |
70 | struct mlx5e_ktls_offload_context_tx *priv_tx) | |
d2ead1f3 | 71 | { |
df8d8667 TT |
72 | struct mlx5e_ktls_offload_context_tx **ctx = |
73 | __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); | |
d2ead1f3 | 74 | |
6cc2714e | 75 | BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX); |
d2ead1f3 | 76 | |
df8d8667 | 77 | *ctx = priv_tx; |
d2ead1f3 TT |
78 | } |
79 | ||
7d0d0d86 TT |
80 | static struct mlx5e_ktls_offload_context_tx * |
81 | mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx) | |
d2ead1f3 | 82 | { |
df8d8667 TT |
83 | struct mlx5e_ktls_offload_context_tx **ctx = |
84 | __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); | |
7d0d0d86 | 85 | |
df8d8667 | 86 | return *ctx; |
d2ead1f3 TT |
87 | } |
88 | ||
7d0d0d86 TT |
89 | int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, |
90 | struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn) | |
d2ead1f3 | 91 | { |
7d0d0d86 TT |
92 | struct mlx5e_ktls_offload_context_tx *priv_tx; |
93 | struct tls_context *tls_ctx; | |
94 | struct mlx5_core_dev *mdev; | |
95 | struct mlx5e_priv *priv; | |
96 | int err; | |
d2ead1f3 | 97 | |
7d0d0d86 TT |
98 | tls_ctx = tls_get_ctx(sk); |
99 | priv = netdev_priv(netdev); | |
100 | mdev = priv->mdev; | |
d2ead1f3 | 101 | |
7d0d0d86 TT |
102 | priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL); |
103 | if (!priv_tx) | |
104 | return -ENOMEM; | |
d2ead1f3 | 105 | |
7d0d0d86 TT |
106 | err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id); |
107 | if (err) | |
108 | goto err_create_key; | |
109 | ||
a51bce96 | 110 | priv_tx->sw_stats = &priv->tls->sw_stats; |
7d0d0d86 TT |
111 | priv_tx->expected_seq = start_offload_tcp_sn; |
112 | priv_tx->crypto_info = | |
113 | *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; | |
df8d8667 | 114 | priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx); |
7d0d0d86 TT |
115 | |
116 | mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx); | |
117 | ||
118 | err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn); | |
119 | if (err) | |
120 | goto err_create_tis; | |
121 | ||
122 | priv_tx->ctx_post_pending = true; | |
a51bce96 | 123 | atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx); |
7d0d0d86 TT |
124 | |
125 | return 0; | |
126 | ||
127 | err_create_tis: | |
128 | mlx5_ktls_destroy_key(mdev, priv_tx->key_id); | |
129 | err_create_key: | |
130 | kfree(priv_tx); | |
131 | return err; | |
132 | } | |
133 | ||
134 | void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx) | |
135 | { | |
136 | struct mlx5e_ktls_offload_context_tx *priv_tx; | |
137 | struct mlx5_core_dev *mdev; | |
138 | struct mlx5e_priv *priv; | |
139 | ||
140 | priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); | |
141 | priv = netdev_priv(netdev); | |
142 | mdev = priv->mdev; | |
143 | ||
e8c82761 | 144 | atomic64_inc(&priv_tx->sw_stats->tx_tls_del); |
7d0d0d86 TT |
145 | mlx5e_destroy_tis(mdev, priv_tx->tisn); |
146 | mlx5_ktls_destroy_key(mdev, priv_tx->key_id); | |
147 | kfree(priv_tx); | |
d2ead1f3 TT |
148 | } |
149 | ||
150 | static void tx_fill_wi(struct mlx5e_txqsq *sq, | |
f45da371 TT |
151 | u16 pi, u8 num_wqebbs, u32 num_bytes, |
152 | struct page *page) | |
d2ead1f3 TT |
153 | { |
154 | struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; | |
155 | ||
41a8e4eb TT |
156 | *wi = (struct mlx5e_tx_wqe_info) { |
157 | .num_wqebbs = num_wqebbs, | |
158 | .num_bytes = num_bytes, | |
159 | .resync_dump_frag_page = page, | |
160 | }; | |
d2ead1f3 TT |
161 | } |
162 | ||
d2ead1f3 TT |
163 | static bool |
164 | mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) | |
165 | { | |
166 | bool ret = priv_tx->ctx_post_pending; | |
167 | ||
168 | priv_tx->ctx_post_pending = false; | |
169 | ||
170 | return ret; | |
171 | } | |
172 | ||
173 | static void | |
174 | post_static_params(struct mlx5e_txqsq *sq, | |
175 | struct mlx5e_ktls_offload_context_tx *priv_tx, | |
176 | bool fence) | |
177 | { | |
7d0d0d86 TT |
178 | struct mlx5e_set_tls_static_params_wqe *wqe; |
179 | u16 pi, num_wqebbs; | |
d2ead1f3 | 180 | |
7d0d0d86 | 181 | num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; |
ab1e0ce9 | 182 | pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); |
7d0d0d86 TT |
183 | wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); |
184 | mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info, | |
0419d8c9 | 185 | priv_tx->tisn, priv_tx->key_id, 0, fence, |
7d0d0d86 | 186 | TLS_OFFLOAD_CTX_DIR_TX); |
ab1e0ce9 TT |
187 | tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); |
188 | sq->pc += num_wqebbs; | |
d2ead1f3 TT |
189 | } |
190 | ||
191 | static void | |
192 | post_progress_params(struct mlx5e_txqsq *sq, | |
193 | struct mlx5e_ktls_offload_context_tx *priv_tx, | |
194 | bool fence) | |
195 | { | |
7d0d0d86 TT |
196 | struct mlx5e_set_tls_progress_params_wqe *wqe; |
197 | u16 pi, num_wqebbs; | |
d2ead1f3 | 198 | |
7d0d0d86 | 199 | num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; |
ab1e0ce9 | 200 | pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); |
7d0d0d86 | 201 | wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); |
1182f365 | 202 | mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0, |
7d0d0d86 | 203 | TLS_OFFLOAD_CTX_DIR_TX); |
ab1e0ce9 TT |
204 | tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); |
205 | sq->pc += num_wqebbs; | |
d2ead1f3 TT |
206 | } |
207 | ||
208 | static void | |
209 | mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, | |
210 | struct mlx5e_ktls_offload_context_tx *priv_tx, | |
211 | bool skip_static_post, bool fence_first_post) | |
212 | { | |
213 | bool progress_fence = skip_static_post || !fence_first_post; | |
700ec497 | 214 | |
d2ead1f3 TT |
215 | if (!skip_static_post) |
216 | post_static_params(sq, priv_tx, fence_first_post); | |
217 | ||
218 | post_progress_params(sq, priv_tx, progress_fence); | |
219 | } | |
220 | ||
221 | struct tx_sync_info { | |
222 | u64 rcd_sn; | |
ffbd9ca9 | 223 | u32 sync_len; |
d2ead1f3 | 224 | int nr_frags; |
310d9b9d | 225 | skb_frag_t frags[MAX_SKB_FRAGS]; |
d2ead1f3 TT |
226 | }; |
227 | ||
46a3ea98 TT |
228 | enum mlx5e_ktls_sync_retval { |
229 | MLX5E_KTLS_SYNC_DONE, | |
230 | MLX5E_KTLS_SYNC_FAIL, | |
231 | MLX5E_KTLS_SYNC_SKIP_NO_DATA, | |
232 | }; | |
233 | ||
234 | static enum mlx5e_ktls_sync_retval | |
235 | tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, | |
ffbd9ca9 | 236 | u32 tcp_seq, int datalen, struct tx_sync_info *info) |
d2ead1f3 TT |
237 | { |
238 | struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; | |
46a3ea98 | 239 | enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE; |
d2ead1f3 TT |
240 | struct tls_record_info *record; |
241 | int remaining, i = 0; | |
242 | unsigned long flags; | |
ffbd9ca9 | 243 | bool ends_before; |
d2ead1f3 TT |
244 | |
245 | spin_lock_irqsave(&tx_ctx->lock, flags); | |
246 | record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); | |
247 | ||
248 | if (unlikely(!record)) { | |
46a3ea98 | 249 | ret = MLX5E_KTLS_SYNC_FAIL; |
d2ead1f3 TT |
250 | goto out; |
251 | } | |
252 | ||
ffbd9ca9 TT |
253 | /* There are the following cases: |
254 | * 1. packet ends before start marker: bypass offload. | |
255 | * 2. packet starts before start marker and ends after it: drop, | |
256 | * not supported, breaks contract with kernel. | |
257 | * 3. packet ends before tls record info starts: drop, | |
258 | * this packet was already acknowledged and its record info | |
259 | * was released. | |
260 | */ | |
56917766 | 261 | ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record)); |
ffbd9ca9 TT |
262 | |
263 | if (unlikely(tls_record_is_start_marker(record))) { | |
264 | ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; | |
265 | goto out; | |
266 | } else if (ends_before) { | |
267 | ret = MLX5E_KTLS_SYNC_FAIL; | |
d2ead1f3 TT |
268 | goto out; |
269 | } | |
270 | ||
271 | info->sync_len = tcp_seq - tls_record_start_seq(record); | |
272 | remaining = info->sync_len; | |
273 | while (remaining > 0) { | |
274 | skb_frag_t *frag = &record->frags[i]; | |
275 | ||
f45da371 | 276 | get_page(skb_frag_page(frag)); |
d2ead1f3 | 277 | remaining -= skb_frag_size(frag); |
310d9b9d | 278 | info->frags[i++] = *frag; |
d2ead1f3 TT |
279 | } |
280 | /* reduce the part which will be sent with the original SKB */ | |
281 | if (remaining < 0) | |
310d9b9d | 282 | skb_frag_size_add(&info->frags[i - 1], remaining); |
d2ead1f3 TT |
283 | info->nr_frags = i; |
284 | out: | |
285 | spin_unlock_irqrestore(&tx_ctx->lock, flags); | |
286 | return ret; | |
287 | } | |
288 | ||
289 | static void | |
290 | tx_post_resync_params(struct mlx5e_txqsq *sq, | |
291 | struct mlx5e_ktls_offload_context_tx *priv_tx, | |
292 | u64 rcd_sn) | |
293 | { | |
af11a7a4 | 294 | struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info; |
d2ead1f3 TT |
295 | __be64 rn_be = cpu_to_be64(rcd_sn); |
296 | bool skip_static_post; | |
297 | u16 rec_seq_sz; | |
298 | char *rec_seq; | |
299 | ||
9db7e618 NC |
300 | rec_seq = info->rec_seq; |
301 | rec_seq_sz = sizeof(info->rec_seq); | |
d2ead1f3 TT |
302 | |
303 | skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz); | |
304 | if (!skip_static_post) | |
305 | memcpy(rec_seq, &rn_be, rec_seq_sz); | |
306 | ||
307 | mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); | |
308 | } | |
309 | ||
310 | static int | |
fa9e01c8 | 311 | tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first) |
d2ead1f3 TT |
312 | { |
313 | struct mlx5_wqe_ctrl_seg *cseg; | |
d2ead1f3 | 314 | struct mlx5_wqe_data_seg *dseg; |
a195784c | 315 | struct mlx5e_dump_wqe *wqe; |
d2ead1f3 | 316 | dma_addr_t dma_addr = 0; |
a195784c | 317 | u16 ds_cnt; |
d2ead1f3 | 318 | int fsz; |
a195784c | 319 | u16 pi; |
d2ead1f3 | 320 | |
f713ce1d | 321 | BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1); |
fed0c6cf MM |
322 | pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); |
323 | wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi); | |
d2ead1f3 | 324 | |
a195784c | 325 | ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; |
d2ead1f3 TT |
326 | |
327 | cseg = &wqe->ctrl; | |
a195784c | 328 | dseg = &wqe->data; |
d2ead1f3 TT |
329 | |
330 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); | |
331 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); | |
2d1b69ed | 332 | cseg->tis_tir_num = cpu_to_be32(tisn << 8); |
d2ead1f3 TT |
333 | cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; |
334 | ||
d2ead1f3 TT |
335 | fsz = skb_frag_size(frag); |
336 | dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, | |
337 | DMA_TO_DEVICE); | |
338 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) | |
339 | return -ENOMEM; | |
340 | ||
341 | dseg->addr = cpu_to_be64(dma_addr); | |
342 | dseg->lkey = sq->mkey_be; | |
343 | dseg->byte_count = cpu_to_be32(fsz); | |
344 | mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); | |
345 | ||
f45da371 | 346 | tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag)); |
9b1fef2f | 347 | sq->pc += MLX5E_KTLS_DUMP_WQEBBS; |
d2ead1f3 TT |
348 | |
349 | return 0; | |
350 | } | |
351 | ||
352 | void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, | |
353 | struct mlx5e_tx_wqe_info *wi, | |
2c559361 | 354 | u32 *dma_fifo_cc) |
d2ead1f3 | 355 | { |
2c559361 TT |
356 | struct mlx5e_sq_stats *stats; |
357 | struct mlx5e_sq_dma *dma; | |
358 | ||
2c559361 TT |
359 | dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++); |
360 | stats = sq->stats; | |
d2ead1f3 TT |
361 | |
362 | mlx5e_tx_dma_unmap(sq->pdev, dma); | |
f45da371 | 363 | put_page(wi->resync_dump_frag_page); |
d2ead1f3 TT |
364 | stats->tls_dump_packets++; |
365 | stats->tls_dump_bytes += wi->num_bytes; | |
366 | } | |
367 | ||
368 | static void tx_post_fence_nop(struct mlx5e_txqsq *sq) | |
369 | { | |
370 | struct mlx5_wq_cyc *wq = &sq->wq; | |
371 | u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); | |
372 | ||
f45da371 | 373 | tx_fill_wi(sq, pi, 1, 0, NULL); |
d2ead1f3 TT |
374 | |
375 | mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); | |
376 | } | |
377 | ||
46a3ea98 | 378 | static enum mlx5e_ktls_sync_retval |
d2ead1f3 TT |
379 | mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, |
380 | struct mlx5e_txqsq *sq, | |
46a3ea98 | 381 | int datalen, |
d2ead1f3 TT |
382 | u32 seq) |
383 | { | |
384 | struct mlx5e_sq_stats *stats = sq->stats; | |
46a3ea98 | 385 | enum mlx5e_ktls_sync_retval ret; |
d2ead1f3 | 386 | struct tx_sync_info info = {}; |
b61b24bd | 387 | int i = 0; |
d2ead1f3 | 388 | |
ffbd9ca9 | 389 | ret = tx_sync_info_get(priv_tx, seq, datalen, &info); |
46a3ea98 TT |
390 | if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) { |
391 | if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) { | |
392 | stats->tls_skip_no_sync_data++; | |
393 | return MLX5E_KTLS_SYNC_SKIP_NO_DATA; | |
394 | } | |
d2ead1f3 TT |
395 | /* We might get here if a retransmission reaches the driver |
396 | * after the relevant record is acked. | |
397 | * It should be safe to drop the packet in this case | |
398 | */ | |
399 | stats->tls_drop_no_sync_data++; | |
400 | goto err_out; | |
401 | } | |
402 | ||
d2ead1f3 TT |
403 | stats->tls_ooo++; |
404 | ||
700ec497 TT |
405 | tx_post_resync_params(sq, priv_tx, info.rcd_sn); |
406 | ||
407 | /* If no dump WQE was sent, we need to have a fence NOP WQE before the | |
408 | * actual data xmit. | |
409 | */ | |
410 | if (!info.nr_frags) { | |
411 | tx_post_fence_nop(sq); | |
46a3ea98 | 412 | return MLX5E_KTLS_SYNC_DONE; |
700ec497 TT |
413 | } |
414 | ||
84d1bb2b TT |
415 | for (; i < info.nr_frags; i++) { |
416 | unsigned int orig_fsz, frag_offset = 0, n = 0; | |
417 | skb_frag_t *f = &info.frags[i]; | |
418 | ||
419 | orig_fsz = skb_frag_size(f); | |
420 | ||
421 | do { | |
422 | bool fence = !(i || frag_offset); | |
423 | unsigned int fsz; | |
424 | ||
425 | n++; | |
426 | fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset); | |
427 | skb_frag_size_set(f, fsz); | |
428 | if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) { | |
429 | page_ref_add(skb_frag_page(f), n - 1); | |
430 | goto err_out; | |
431 | } | |
432 | ||
433 | skb_frag_off_add(f, fsz); | |
434 | frag_offset += fsz; | |
435 | } while (frag_offset < orig_fsz); | |
436 | ||
437 | page_ref_add(skb_frag_page(f), n - 1); | |
438 | } | |
d2ead1f3 | 439 | |
46a3ea98 | 440 | return MLX5E_KTLS_SYNC_DONE; |
d2ead1f3 TT |
441 | |
442 | err_out: | |
b61b24bd | 443 | for (; i < info.nr_frags; i++) |
84d1bb2b TT |
444 | /* The put_page() here undoes the page ref obtained in tx_sync_info_get(). |
445 | * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be | |
446 | * released only upon their completions (or in mlx5e_free_txqsq_descs, | |
447 | * if channel closes). | |
448 | */ | |
b61b24bd TT |
449 | put_page(skb_frag_page(&info.frags[i])); |
450 | ||
46a3ea98 | 451 | return MLX5E_KTLS_SYNC_FAIL; |
d2ead1f3 TT |
452 | } |
453 | ||
943aa7bd LR |
454 | bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, |
455 | struct sk_buff *skb, | |
714c88a3 | 456 | struct mlx5e_accel_tx_tls_state *state) |
d2ead1f3 TT |
457 | { |
458 | struct mlx5e_ktls_offload_context_tx *priv_tx; | |
459 | struct mlx5e_sq_stats *stats = sq->stats; | |
943aa7bd LR |
460 | struct tls_context *tls_ctx; |
461 | int datalen; | |
d2ead1f3 TT |
462 | u32 seq; |
463 | ||
504148fe | 464 | datalen = skb->len - skb_tcp_all_headers(skb); |
943aa7bd LR |
465 | if (!datalen) |
466 | return true; | |
467 | ||
468 | mlx5e_tx_mpwqe_ensure_complete(sq); | |
469 | ||
470 | tls_ctx = tls_get_ctx(skb->sk); | |
471 | if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) | |
472 | goto err_out; | |
473 | ||
d2ead1f3 TT |
474 | priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); |
475 | ||
476 | if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) { | |
477 | mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false); | |
d2ead1f3 TT |
478 | } |
479 | ||
480 | seq = ntohl(tcp_hdr(skb)->seq); | |
481 | if (unlikely(priv_tx->expected_seq != seq)) { | |
46a3ea98 TT |
482 | enum mlx5e_ktls_sync_retval ret = |
483 | mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq); | |
484 | ||
342508c1 TT |
485 | switch (ret) { |
486 | case MLX5E_KTLS_SYNC_DONE: | |
342508c1 TT |
487 | break; |
488 | case MLX5E_KTLS_SYNC_SKIP_NO_DATA: | |
489 | if (likely(!skb->decrypted)) | |
490 | goto out; | |
491 | WARN_ON_ONCE(1); | |
c8b838d1 | 492 | fallthrough; |
2eeb6e38 | 493 | case MLX5E_KTLS_SYNC_FAIL: |
46a3ea98 | 494 | goto err_out; |
342508c1 | 495 | } |
d2ead1f3 TT |
496 | } |
497 | ||
498 | priv_tx->expected_seq = seq + datalen; | |
499 | ||
714c88a3 | 500 | state->tls_tisn = priv_tx->tisn; |
d2ead1f3 TT |
501 | |
502 | stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; | |
503 | stats->tls_encrypted_bytes += datalen; | |
504 | ||
505 | out: | |
f02bac9a | 506 | return true; |
d2ead1f3 TT |
507 | |
508 | err_out: | |
509 | dev_kfree_skb_any(skb); | |
f02bac9a | 510 | return false; |
d2ead1f3 | 511 | } |