Merge branch 'core-objtool-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ktls_tx.c
CommitLineData
d2ead1f3
TT
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2// Copyright (c) 2019 Mellanox Technologies.
3
4#include <linux/tls.h>
5#include "en.h"
6#include "en/txrx.h"
7#include "en_accel/ktls.h"
8
9enum {
10 MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
11};
12
13enum {
14 MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
15};
16
17#define EXTRACT_INFO_FIELDS do { \
18 salt = info->salt; \
19 rec_seq = info->rec_seq; \
20 salt_sz = sizeof(info->salt); \
21 rec_seq_sz = sizeof(info->rec_seq); \
22} while (0)
23
24static void
25fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
26{
af11a7a4 27 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
d2ead1f3
TT
28 char *initial_rn, *gcm_iv;
29 u16 salt_sz, rec_seq_sz;
30 char *salt, *rec_seq;
31 u8 tls_version;
32
9db7e618 33 EXTRACT_INFO_FIELDS;
d2ead1f3
TT
34
35 gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
36 initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
37
38 memcpy(gcm_iv, salt, salt_sz);
39 memcpy(initial_rn, rec_seq, rec_seq_sz);
40
41 tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
42
43 MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
44 MLX5_SET(tls_static_params, ctx, const_1, 1);
45 MLX5_SET(tls_static_params, ctx, const_2, 2);
46 MLX5_SET(tls_static_params, ctx, encryption_standard,
47 MLX5E_ENCRYPTION_STANDARD_TLS);
48 MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
49}
50
51static void
52build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
53 struct mlx5e_ktls_offload_context_tx *priv_tx,
54 bool fence)
55{
56 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
57 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
58
59#define STATIC_PARAMS_DS_CNT \
60 DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
61
62 cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
63 (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
64 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
65 STATIC_PARAMS_DS_CNT);
66 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
b86f1abe 67 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
d2ead1f3
TT
68
69 ucseg->flags = MLX5_UMR_INLINE;
70 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
71
72 fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
73}
74
75static void
76fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
77{
a9bc3390 78 MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
d2ead1f3
TT
79 MLX5_SET(tls_progress_params, ctx, record_tracker_state,
80 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
81 MLX5_SET(tls_progress_params, ctx, auth_state,
82 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
83}
84
85static void
86build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
87 struct mlx5e_ktls_offload_context_tx *priv_tx,
88 bool fence)
89{
90 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
91
92#define PROGRESS_PARAMS_DS_CNT \
93 DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
94
95 cseg->opmod_idx_opcode =
96 cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
97 (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
98 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
99 PROGRESS_PARAMS_DS_CNT);
100 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
101
a9bc3390 102 fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
d2ead1f3
TT
103}
104
105static void tx_fill_wi(struct mlx5e_txqsq *sq,
f45da371
TT
106 u16 pi, u8 num_wqebbs, u32 num_bytes,
107 struct page *page)
d2ead1f3
TT
108{
109 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
110
f45da371
TT
111 memset(wi, 0, sizeof(*wi));
112 wi->num_wqebbs = num_wqebbs;
113 wi->num_bytes = num_bytes;
114 wi->resync_dump_frag_page = page;
d2ead1f3
TT
115}
116
117void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
118{
119 priv_tx->ctx_post_pending = true;
120}
121
122static bool
123mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
124{
125 bool ret = priv_tx->ctx_post_pending;
126
127 priv_tx->ctx_post_pending = false;
128
129 return ret;
130}
131
132static void
133post_static_params(struct mlx5e_txqsq *sq,
134 struct mlx5e_ktls_offload_context_tx *priv_tx,
135 bool fence)
136{
137 struct mlx5e_umr_wqe *umr_wqe;
138 u16 pi;
139
140 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
141 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
f45da371 142 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
d2ead1f3
TT
143 sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
144}
145
146static void
147post_progress_params(struct mlx5e_txqsq *sq,
148 struct mlx5e_ktls_offload_context_tx *priv_tx,
149 bool fence)
150{
151 struct mlx5e_tx_wqe *wqe;
152 u16 pi;
153
154 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
155 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
f45da371 156 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
d2ead1f3
TT
157 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
158}
159
160static void
161mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
162 struct mlx5e_ktls_offload_context_tx *priv_tx,
163 bool skip_static_post, bool fence_first_post)
164{
165 bool progress_fence = skip_static_post || !fence_first_post;
700ec497
TT
166 struct mlx5_wq_cyc *wq = &sq->wq;
167 u16 contig_wqebbs_room, pi;
168
169 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
170 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
171 if (unlikely(contig_wqebbs_room <
172 MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
173 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
d2ead1f3
TT
174
175 if (!skip_static_post)
176 post_static_params(sq, priv_tx, fence_first_post);
177
178 post_progress_params(sq, priv_tx, progress_fence);
179}
180
181struct tx_sync_info {
182 u64 rcd_sn;
ffbd9ca9 183 u32 sync_len;
d2ead1f3 184 int nr_frags;
310d9b9d 185 skb_frag_t frags[MAX_SKB_FRAGS];
d2ead1f3
TT
186};
187
46a3ea98
TT
188enum mlx5e_ktls_sync_retval {
189 MLX5E_KTLS_SYNC_DONE,
190 MLX5E_KTLS_SYNC_FAIL,
191 MLX5E_KTLS_SYNC_SKIP_NO_DATA,
192};
193
194static enum mlx5e_ktls_sync_retval
195tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
ffbd9ca9 196 u32 tcp_seq, int datalen, struct tx_sync_info *info)
d2ead1f3
TT
197{
198 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
46a3ea98 199 enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
d2ead1f3
TT
200 struct tls_record_info *record;
201 int remaining, i = 0;
202 unsigned long flags;
ffbd9ca9 203 bool ends_before;
d2ead1f3
TT
204
205 spin_lock_irqsave(&tx_ctx->lock, flags);
206 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
207
208 if (unlikely(!record)) {
46a3ea98 209 ret = MLX5E_KTLS_SYNC_FAIL;
d2ead1f3
TT
210 goto out;
211 }
212
ffbd9ca9
TT
213 /* There are the following cases:
214 * 1. packet ends before start marker: bypass offload.
215 * 2. packet starts before start marker and ends after it: drop,
216 * not supported, breaks contract with kernel.
217 * 3. packet ends before tls record info starts: drop,
218 * this packet was already acknowledged and its record info
219 * was released.
220 */
56917766 221 ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
ffbd9ca9
TT
222
223 if (unlikely(tls_record_is_start_marker(record))) {
224 ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
225 goto out;
226 } else if (ends_before) {
227 ret = MLX5E_KTLS_SYNC_FAIL;
d2ead1f3
TT
228 goto out;
229 }
230
231 info->sync_len = tcp_seq - tls_record_start_seq(record);
232 remaining = info->sync_len;
233 while (remaining > 0) {
234 skb_frag_t *frag = &record->frags[i];
235
f45da371 236 get_page(skb_frag_page(frag));
d2ead1f3 237 remaining -= skb_frag_size(frag);
310d9b9d 238 info->frags[i++] = *frag;
d2ead1f3
TT
239 }
240 /* reduce the part which will be sent with the original SKB */
241 if (remaining < 0)
310d9b9d 242 skb_frag_size_add(&info->frags[i - 1], remaining);
d2ead1f3
TT
243 info->nr_frags = i;
244out:
245 spin_unlock_irqrestore(&tx_ctx->lock, flags);
246 return ret;
247}
248
249static void
250tx_post_resync_params(struct mlx5e_txqsq *sq,
251 struct mlx5e_ktls_offload_context_tx *priv_tx,
252 u64 rcd_sn)
253{
af11a7a4 254 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
d2ead1f3
TT
255 __be64 rn_be = cpu_to_be64(rcd_sn);
256 bool skip_static_post;
257 u16 rec_seq_sz;
258 char *rec_seq;
259
9db7e618
NC
260 rec_seq = info->rec_seq;
261 rec_seq_sz = sizeof(info->rec_seq);
d2ead1f3
TT
262
263 skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
264 if (!skip_static_post)
265 memcpy(rec_seq, &rn_be, rec_seq_sz);
266
267 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
268}
269
270static int
fa9e01c8 271tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
d2ead1f3
TT
272{
273 struct mlx5_wqe_ctrl_seg *cseg;
d2ead1f3 274 struct mlx5_wqe_data_seg *dseg;
a195784c 275 struct mlx5e_dump_wqe *wqe;
d2ead1f3 276 dma_addr_t dma_addr = 0;
a195784c 277 u16 ds_cnt;
d2ead1f3 278 int fsz;
a195784c 279 u16 pi;
d2ead1f3
TT
280
281 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
282
a195784c 283 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
d2ead1f3
TT
284
285 cseg = &wqe->ctrl;
a195784c 286 dseg = &wqe->data;
d2ead1f3
TT
287
288 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
289 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
b86f1abe 290 cseg->tisn = cpu_to_be32(tisn << 8);
d2ead1f3
TT
291 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
292
d2ead1f3
TT
293 fsz = skb_frag_size(frag);
294 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
295 DMA_TO_DEVICE);
296 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
297 return -ENOMEM;
298
299 dseg->addr = cpu_to_be64(dma_addr);
300 dseg->lkey = sq->mkey_be;
301 dseg->byte_count = cpu_to_be32(fsz);
302 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
303
f45da371 304 tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
9b1fef2f 305 sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
d2ead1f3
TT
306
307 return 0;
308}
309
310void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
311 struct mlx5e_tx_wqe_info *wi,
2c559361 312 u32 *dma_fifo_cc)
d2ead1f3 313{
2c559361
TT
314 struct mlx5e_sq_stats *stats;
315 struct mlx5e_sq_dma *dma;
316
f45da371 317 if (!wi->resync_dump_frag_page)
2c559361
TT
318 return;
319
320 dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
321 stats = sq->stats;
d2ead1f3
TT
322
323 mlx5e_tx_dma_unmap(sq->pdev, dma);
f45da371 324 put_page(wi->resync_dump_frag_page);
d2ead1f3
TT
325 stats->tls_dump_packets++;
326 stats->tls_dump_bytes += wi->num_bytes;
327}
328
329static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
330{
331 struct mlx5_wq_cyc *wq = &sq->wq;
332 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
333
f45da371 334 tx_fill_wi(sq, pi, 1, 0, NULL);
d2ead1f3
TT
335
336 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
337}
338
46a3ea98 339static enum mlx5e_ktls_sync_retval
d2ead1f3
TT
340mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
341 struct mlx5e_txqsq *sq,
46a3ea98 342 int datalen,
d2ead1f3
TT
343 u32 seq)
344{
345 struct mlx5e_sq_stats *stats = sq->stats;
346 struct mlx5_wq_cyc *wq = &sq->wq;
46a3ea98 347 enum mlx5e_ktls_sync_retval ret;
d2ead1f3
TT
348 struct tx_sync_info info = {};
349 u16 contig_wqebbs_room, pi;
350 u8 num_wqebbs;
b61b24bd 351 int i = 0;
d2ead1f3 352
ffbd9ca9 353 ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
46a3ea98
TT
354 if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
355 if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
356 stats->tls_skip_no_sync_data++;
357 return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
358 }
d2ead1f3
TT
359 /* We might get here if a retransmission reaches the driver
360 * after the relevant record is acked.
361 * It should be safe to drop the packet in this case
362 */
363 stats->tls_drop_no_sync_data++;
364 goto err_out;
365 }
366
d2ead1f3
TT
367 stats->tls_ooo++;
368
700ec497
TT
369 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
370
371 /* If no dump WQE was sent, we need to have a fence NOP WQE before the
372 * actual data xmit.
373 */
374 if (!info.nr_frags) {
375 tx_post_fence_nop(sq);
46a3ea98 376 return MLX5E_KTLS_SYNC_DONE;
700ec497
TT
377 }
378
84d1bb2b 379 num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
d2ead1f3
TT
380 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
381 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
700ec497 382
d2ead1f3
TT
383 if (unlikely(contig_wqebbs_room < num_wqebbs))
384 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
385
84d1bb2b
TT
386 for (; i < info.nr_frags; i++) {
387 unsigned int orig_fsz, frag_offset = 0, n = 0;
388 skb_frag_t *f = &info.frags[i];
389
390 orig_fsz = skb_frag_size(f);
391
392 do {
393 bool fence = !(i || frag_offset);
394 unsigned int fsz;
395
396 n++;
397 fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
398 skb_frag_size_set(f, fsz);
399 if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
400 page_ref_add(skb_frag_page(f), n - 1);
401 goto err_out;
402 }
403
404 skb_frag_off_add(f, fsz);
405 frag_offset += fsz;
406 } while (frag_offset < orig_fsz);
407
408 page_ref_add(skb_frag_page(f), n - 1);
409 }
d2ead1f3 410
46a3ea98 411 return MLX5E_KTLS_SYNC_DONE;
d2ead1f3
TT
412
413err_out:
b61b24bd 414 for (; i < info.nr_frags; i++)
84d1bb2b
TT
415 /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
416 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
417 * released only upon their completions (or in mlx5e_free_txqsq_descs,
418 * if channel closes).
419 */
b61b24bd
TT
420 put_page(skb_frag_page(&info.frags[i]));
421
46a3ea98 422 return MLX5E_KTLS_SYNC_FAIL;
d2ead1f3
TT
423}
424
425struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
426 struct mlx5e_txqsq *sq,
427 struct sk_buff *skb,
428 struct mlx5e_tx_wqe **wqe, u16 *pi)
429{
430 struct mlx5e_ktls_offload_context_tx *priv_tx;
431 struct mlx5e_sq_stats *stats = sq->stats;
432 struct mlx5_wqe_ctrl_seg *cseg;
433 struct tls_context *tls_ctx;
434 int datalen;
435 u32 seq;
436
437 if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
438 goto out;
439
440 datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
441 if (!datalen)
442 goto out;
443
444 tls_ctx = tls_get_ctx(skb->sk);
7cf92ccb 445 if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
d2ead1f3
TT
446 goto err_out;
447
448 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
449
450 if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
451 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
452 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
453 stats->tls_ctx++;
454 }
455
456 seq = ntohl(tcp_hdr(skb)->seq);
457 if (unlikely(priv_tx->expected_seq != seq)) {
46a3ea98
TT
458 enum mlx5e_ktls_sync_retval ret =
459 mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
460
342508c1
TT
461 switch (ret) {
462 case MLX5E_KTLS_SYNC_DONE:
46a3ea98 463 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
342508c1
TT
464 break;
465 case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
466 if (likely(!skb->decrypted))
467 goto out;
468 WARN_ON_ONCE(1);
469 /* fall-through */
470 default: /* MLX5E_KTLS_SYNC_FAIL */
46a3ea98 471 goto err_out;
342508c1 472 }
d2ead1f3
TT
473 }
474
475 priv_tx->expected_seq = seq + datalen;
476
477 cseg = &(*wqe)->ctrl;
b86f1abe 478 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
d2ead1f3
TT
479
480 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
481 stats->tls_encrypted_bytes += datalen;
482
483out:
484 return skb;
485
486err_out:
487 dev_kfree_skb_any(skb);
488 return NULL;
489}