1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
4 #include "en_accel/ktls.h"
5 #include "en_accel/ktls_txrx.h"
6 #include "en_accel/ktls_utils.h"
8 struct mlx5e_dump_wqe {
9 struct mlx5_wqe_ctrl_seg ctrl;
10 struct mlx5_wqe_data_seg data;
13 #define MLX5E_KTLS_DUMP_WQEBBS \
14 (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
17 mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
18 unsigned int sync_len)
20 /* Given the MTU and sync_len, calculates an upper bound for the
21 * number of DUMP WQEs needed for the TX resync of a record.
23 return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
26 u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
28 u16 num_dumps, stop_room = 0;
30 if (!mlx5e_is_ktls_tx(mdev))
33 num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
35 stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
36 stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
37 stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
38 stop_room += 1; /* fence nop */
43 static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
45 MLX5_SET(tisc, tisc, tls_en, 1);
46 MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
47 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
50 static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
52 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
54 mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
56 return mlx5_core_create_tis(mdev, in, tisn);
59 static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
60 struct mlx5_async_ctx *async_ctx,
62 mlx5_async_cbk_t callback,
63 struct mlx5_async_work *context)
65 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
67 mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
68 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
70 return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
71 out, outlen, callback, context);
74 static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
75 struct mlx5_async_ctx *async_ctx,
77 mlx5_async_cbk_t callback,
78 struct mlx5_async_work *context)
80 u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
82 MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
83 MLX5_SET(destroy_tis_in, in, tisn, tisn);
85 return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
86 out, outlen, callback, context);
89 struct mlx5e_ktls_offload_context_tx {
93 bool ctx_post_pending;
94 /* control / resync */
95 struct list_head list_node; /* member of the pool */
96 struct tls12_crypto_info_aes_gcm_128 crypto_info;
97 struct tls_offload_context_tx *tx_ctx;
98 struct mlx5_core_dev *mdev;
99 struct mlx5e_tls_sw_stats *sw_stats;
105 mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
106 struct mlx5e_ktls_offload_context_tx *priv_tx)
108 struct mlx5e_ktls_offload_context_tx **ctx =
109 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
111 BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
116 static struct mlx5e_ktls_offload_context_tx *
117 mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
119 struct mlx5e_ktls_offload_context_tx **ctx =
120 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
125 /* struct for callback API management */
126 struct mlx5e_async_ctx {
127 struct mlx5_async_work context;
128 struct mlx5_async_ctx async_ctx;
129 struct work_struct work;
130 struct mlx5e_ktls_offload_context_tx *priv_tx;
131 struct completion complete;
134 u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
135 u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
139 static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
141 struct mlx5e_async_ctx *bulk_async;
144 bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
148 for (i = 0; i < n; i++) {
149 struct mlx5e_async_ctx *async = &bulk_async[i];
151 mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
152 init_completion(&async->complete);
158 static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
162 for (i = 0; i < n; i++) {
163 struct mlx5e_async_ctx *async = &bulk_async[i];
165 mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
170 static void create_tis_callback(int status, struct mlx5_async_work *context)
172 struct mlx5e_async_ctx *async =
173 container_of(context, struct mlx5e_async_ctx, context);
174 struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
178 priv_tx->create_err = 1;
182 priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
184 complete(&async->complete);
187 static void destroy_tis_callback(int status, struct mlx5_async_work *context)
189 struct mlx5e_async_ctx *async =
190 container_of(context, struct mlx5e_async_ctx, context);
191 struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
193 complete(&async->complete);
197 static struct mlx5e_ktls_offload_context_tx *
198 mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
199 struct mlx5e_async_ctx *async)
201 struct mlx5e_ktls_offload_context_tx *priv_tx;
204 priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
206 return ERR_PTR(-ENOMEM);
208 priv_tx->mdev = mdev;
209 priv_tx->sw_stats = sw_stats;
212 err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
216 async->priv_tx = priv_tx;
217 err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
218 async->out_create, sizeof(async->out_create),
219 create_tis_callback, &async->context);
231 static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
232 struct mlx5e_async_ctx *async)
234 if (priv_tx->create_err) {
235 complete(&async->complete);
239 async->priv_tx = priv_tx;
240 mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
242 async->out_destroy, sizeof(async->out_destroy),
243 destroy_tis_callback, &async->context);
246 static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
247 struct list_head *list, int size)
249 struct mlx5e_ktls_offload_context_tx *obj;
250 struct mlx5e_async_ctx *bulk_async;
253 bulk_async = mlx5e_bulk_async_init(mdev, size);
258 list_for_each_entry(obj, list, list_node) {
259 mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
263 for (i = 0; i < size; i++) {
264 struct mlx5e_async_ctx *async = &bulk_async[i];
266 wait_for_completion(&async->complete);
268 mlx5e_bulk_async_cleanup(bulk_async, size);
271 /* Recycling pool API */
273 #define MLX5E_TLS_TX_POOL_BULK (16)
274 #define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
275 #define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
277 struct mlx5e_tls_tx_pool {
278 struct mlx5_core_dev *mdev;
279 struct mlx5e_tls_sw_stats *sw_stats;
280 struct mutex lock; /* Protects access to the pool */
281 struct list_head list;
284 struct workqueue_struct *wq;
285 struct work_struct create_work;
286 struct work_struct destroy_work;
289 static void create_work(struct work_struct *work)
291 struct mlx5e_tls_tx_pool *pool =
292 container_of(work, struct mlx5e_tls_tx_pool, create_work);
293 struct mlx5e_ktls_offload_context_tx *obj;
294 struct mlx5e_async_ctx *bulk_async;
295 LIST_HEAD(local_list);
298 bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
302 for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
303 obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
308 list_add(&obj->list_node, &local_list);
311 for (j = 0; j < i; j++) {
312 struct mlx5e_async_ctx *async = &bulk_async[j];
314 wait_for_completion(&async->complete);
315 if (!err && async->err)
318 atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
319 mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
323 mutex_lock(&pool->lock);
324 if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
325 mutex_unlock(&pool->lock);
328 list_splice(&local_list, &pool->list);
329 pool->size += MLX5E_TLS_TX_POOL_BULK;
330 if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
331 queue_work(pool->wq, work);
332 mutex_unlock(&pool->lock);
336 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
337 atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
340 static void destroy_work(struct work_struct *work)
342 struct mlx5e_tls_tx_pool *pool =
343 container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
344 struct mlx5e_ktls_offload_context_tx *obj;
345 LIST_HEAD(local_list);
348 mutex_lock(&pool->lock);
349 if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
350 mutex_unlock(&pool->lock);
354 list_for_each_entry(obj, &pool->list, list_node)
355 if (++i == MLX5E_TLS_TX_POOL_BULK)
358 list_cut_position(&local_list, &pool->list, &obj->list_node);
359 pool->size -= MLX5E_TLS_TX_POOL_BULK;
360 if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
361 queue_work(pool->wq, work);
362 mutex_unlock(&pool->lock);
364 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
365 atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
368 static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
369 struct mlx5e_tls_sw_stats *sw_stats)
371 struct mlx5e_tls_tx_pool *pool;
373 BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
375 pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
379 pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
383 INIT_LIST_HEAD(&pool->list);
384 mutex_init(&pool->lock);
386 INIT_WORK(&pool->create_work, create_work);
387 INIT_WORK(&pool->destroy_work, destroy_work);
390 pool->sw_stats = sw_stats;
399 static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
401 while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
402 struct mlx5e_ktls_offload_context_tx *obj;
403 LIST_HEAD(local_list);
406 list_for_each_entry(obj, &pool->list, list_node)
407 if (++i == MLX5E_TLS_TX_POOL_BULK)
410 list_cut_position(&local_list, &pool->list, &obj->list_node);
411 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
412 atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
413 pool->size -= MLX5E_TLS_TX_POOL_BULK;
416 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
417 atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
421 static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
423 mlx5e_tls_tx_pool_list_cleanup(pool);
424 destroy_workqueue(pool->wq);
428 static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
430 mutex_lock(&pool->lock);
431 list_add(&obj->list_node, &pool->list);
432 if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
433 queue_work(pool->wq, &pool->destroy_work);
434 mutex_unlock(&pool->lock);
437 static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
439 struct mlx5e_ktls_offload_context_tx *obj;
441 mutex_lock(&pool->lock);
442 if (unlikely(pool->size == 0)) {
444 * - trigger the populating work, and
445 * - serve the current context via the regular blocking api.
447 queue_work(pool->wq, &pool->create_work);
448 mutex_unlock(&pool->lock);
449 obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
451 atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
455 obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
457 list_del(&obj->list_node);
458 if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
459 queue_work(pool->wq, &pool->create_work);
460 mutex_unlock(&pool->lock);
464 /* End of pool API */
466 int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
467 struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
469 struct mlx5e_ktls_offload_context_tx *priv_tx;
470 struct mlx5e_tls_tx_pool *pool;
471 struct tls_context *tls_ctx;
472 struct mlx5e_priv *priv;
475 tls_ctx = tls_get_ctx(sk);
476 priv = netdev_priv(netdev);
477 pool = priv->tls->tx_pool;
479 priv_tx = pool_pop(pool);
481 return PTR_ERR(priv_tx);
483 err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
487 priv_tx->expected_seq = start_offload_tcp_sn;
488 priv_tx->crypto_info =
489 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
490 priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
492 mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
494 priv_tx->ctx_post_pending = true;
495 atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
500 pool_push(pool, priv_tx);
504 void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
506 struct mlx5e_ktls_offload_context_tx *priv_tx;
507 struct mlx5e_tls_tx_pool *pool;
508 struct mlx5e_priv *priv;
510 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
511 priv = netdev_priv(netdev);
512 pool = priv->tls->tx_pool;
514 atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
515 mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
516 pool_push(pool, priv_tx);
519 static void tx_fill_wi(struct mlx5e_txqsq *sq,
520 u16 pi, u8 num_wqebbs, u32 num_bytes,
523 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
525 *wi = (struct mlx5e_tx_wqe_info) {
526 .num_wqebbs = num_wqebbs,
527 .num_bytes = num_bytes,
528 .resync_dump_frag_page = page,
533 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
535 bool ret = priv_tx->ctx_post_pending;
537 priv_tx->ctx_post_pending = false;
543 post_static_params(struct mlx5e_txqsq *sq,
544 struct mlx5e_ktls_offload_context_tx *priv_tx,
547 struct mlx5e_set_tls_static_params_wqe *wqe;
550 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
551 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
552 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
553 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
554 priv_tx->tisn, priv_tx->key_id, 0, fence,
555 TLS_OFFLOAD_CTX_DIR_TX);
556 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
557 sq->pc += num_wqebbs;
561 post_progress_params(struct mlx5e_txqsq *sq,
562 struct mlx5e_ktls_offload_context_tx *priv_tx,
565 struct mlx5e_set_tls_progress_params_wqe *wqe;
568 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
569 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
570 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
571 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
572 TLS_OFFLOAD_CTX_DIR_TX);
573 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
574 sq->pc += num_wqebbs;
577 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
579 struct mlx5_wq_cyc *wq = &sq->wq;
580 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
582 tx_fill_wi(sq, pi, 1, 0, NULL);
584 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
588 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
589 struct mlx5e_ktls_offload_context_tx *priv_tx,
590 bool skip_static_post, bool fence_first_post)
592 bool progress_fence = skip_static_post || !fence_first_post;
594 if (!skip_static_post)
595 post_static_params(sq, priv_tx, fence_first_post);
597 post_progress_params(sq, priv_tx, progress_fence);
598 tx_post_fence_nop(sq);
601 struct tx_sync_info {
605 skb_frag_t frags[MAX_SKB_FRAGS];
608 enum mlx5e_ktls_sync_retval {
609 MLX5E_KTLS_SYNC_DONE,
610 MLX5E_KTLS_SYNC_FAIL,
611 MLX5E_KTLS_SYNC_SKIP_NO_DATA,
614 static enum mlx5e_ktls_sync_retval
615 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
616 u32 tcp_seq, int datalen, struct tx_sync_info *info)
618 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
619 enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
620 struct tls_record_info *record;
621 int remaining, i = 0;
625 spin_lock_irqsave(&tx_ctx->lock, flags);
626 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
628 if (unlikely(!record)) {
629 ret = MLX5E_KTLS_SYNC_FAIL;
633 /* There are the following cases:
634 * 1. packet ends before start marker: bypass offload.
635 * 2. packet starts before start marker and ends after it: drop,
636 * not supported, breaks contract with kernel.
637 * 3. packet ends before tls record info starts: drop,
638 * this packet was already acknowledged and its record info
641 ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
643 if (unlikely(tls_record_is_start_marker(record))) {
644 ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
646 } else if (ends_before) {
647 ret = MLX5E_KTLS_SYNC_FAIL;
651 info->sync_len = tcp_seq - tls_record_start_seq(record);
652 remaining = info->sync_len;
653 while (remaining > 0) {
654 skb_frag_t *frag = &record->frags[i];
656 get_page(skb_frag_page(frag));
657 remaining -= skb_frag_size(frag);
658 info->frags[i++] = *frag;
660 /* reduce the part which will be sent with the original SKB */
662 skb_frag_size_add(&info->frags[i - 1], remaining);
665 spin_unlock_irqrestore(&tx_ctx->lock, flags);
670 tx_post_resync_params(struct mlx5e_txqsq *sq,
671 struct mlx5e_ktls_offload_context_tx *priv_tx,
674 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
675 __be64 rn_be = cpu_to_be64(rcd_sn);
676 bool skip_static_post;
680 rec_seq = info->rec_seq;
681 rec_seq_sz = sizeof(info->rec_seq);
683 skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
684 if (!skip_static_post)
685 memcpy(rec_seq, &rn_be, rec_seq_sz);
687 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
691 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
693 struct mlx5_wqe_ctrl_seg *cseg;
694 struct mlx5_wqe_data_seg *dseg;
695 struct mlx5e_dump_wqe *wqe;
696 dma_addr_t dma_addr = 0;
701 BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
702 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
703 wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
705 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
710 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
711 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
712 cseg->tis_tir_num = cpu_to_be32(tisn << 8);
714 fsz = skb_frag_size(frag);
715 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
717 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
720 dseg->addr = cpu_to_be64(dma_addr);
721 dseg->lkey = sq->mkey_be;
722 dseg->byte_count = cpu_to_be32(fsz);
723 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
725 tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
726 sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
731 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
732 struct mlx5e_tx_wqe_info *wi,
735 struct mlx5e_sq_stats *stats;
736 struct mlx5e_sq_dma *dma;
738 dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
741 mlx5e_tx_dma_unmap(sq->pdev, dma);
742 put_page(wi->resync_dump_frag_page);
743 stats->tls_dump_packets++;
744 stats->tls_dump_bytes += wi->num_bytes;
747 static enum mlx5e_ktls_sync_retval
748 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
749 struct mlx5e_txqsq *sq,
753 enum mlx5e_ktls_sync_retval ret;
754 struct tx_sync_info info = {};
757 ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
758 if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
759 /* We might get here with ret == FAIL if a retransmission
760 * reaches the driver after the relevant record is acked.
761 * It should be safe to drop the packet in this case
765 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
767 for (i = 0; i < info.nr_frags; i++) {
768 unsigned int orig_fsz, frag_offset = 0, n = 0;
769 skb_frag_t *f = &info.frags[i];
771 orig_fsz = skb_frag_size(f);
777 fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
778 skb_frag_size_set(f, fsz);
779 if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
780 page_ref_add(skb_frag_page(f), n - 1);
784 skb_frag_off_add(f, fsz);
786 } while (frag_offset < orig_fsz);
788 page_ref_add(skb_frag_page(f), n - 1);
791 return MLX5E_KTLS_SYNC_DONE;
794 for (; i < info.nr_frags; i++)
795 /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
796 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
797 * released only upon their completions (or in mlx5e_free_txqsq_descs,
798 * if channel closes).
800 put_page(skb_frag_page(&info.frags[i]));
802 return MLX5E_KTLS_SYNC_FAIL;
805 bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
807 struct mlx5e_accel_tx_tls_state *state)
809 struct mlx5e_ktls_offload_context_tx *priv_tx;
810 struct mlx5e_sq_stats *stats = sq->stats;
811 struct net_device *tls_netdev;
812 struct tls_context *tls_ctx;
816 datalen = skb->len - skb_tcp_all_headers(skb);
820 mlx5e_tx_mpwqe_ensure_complete(sq);
822 tls_ctx = tls_get_ctx(skb->sk);
823 tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
824 /* Don't WARN on NULL: if tls_device_down is running in parallel,
825 * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
826 * true. Rather continue processing this packet.
828 if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
831 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
833 if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
834 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
836 seq = ntohl(tcp_hdr(skb)->seq);
837 if (unlikely(priv_tx->expected_seq != seq)) {
838 enum mlx5e_ktls_sync_retval ret =
839 mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
844 case MLX5E_KTLS_SYNC_DONE:
846 case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
847 stats->tls_skip_no_sync_data++;
848 if (likely(!skb->decrypted))
852 case MLX5E_KTLS_SYNC_FAIL:
853 stats->tls_drop_no_sync_data++;
858 priv_tx->expected_seq = seq + datalen;
860 state->tls_tisn = priv_tx->tisn;
862 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
863 stats->tls_encrypted_bytes += datalen;
869 dev_kfree_skb_any(skb);
873 int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
875 if (!mlx5e_is_ktls_tx(priv->mdev))
878 priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
879 if (!priv->tls->tx_pool)
885 void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
887 if (!mlx5e_is_ktls_tx(priv->mdev))
890 mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
891 priv->tls->tx_pool = NULL;