net/tls: Use RCU API to access tls_ctx->netdev
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ktls_tx.c
CommitLineData
d2ead1f3
TT
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2// Copyright (c) 2019 Mellanox Technologies.
3
943aa7bd 4#include "en_accel/ktls.h"
7d0d0d86
TT
5#include "en_accel/ktls_txrx.h"
6#include "en_accel/ktls_utils.h"
d2ead1f3 7
7d0d0d86
TT
8struct mlx5e_dump_wqe {
9 struct mlx5_wqe_ctrl_seg ctrl;
10 struct mlx5_wqe_data_seg data;
d2ead1f3
TT
11};
12
7d0d0d86
TT
13#define MLX5E_KTLS_DUMP_WQEBBS \
14 (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
d2ead1f3 15
7d0d0d86 16static u8
579524c6 17mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
7d0d0d86
TT
18 unsigned int sync_len)
19{
20 /* Given the MTU and sync_len, calculates an upper bound for the
21 * number of DUMP WQEs needed for the TX resync of a record.
22 */
579524c6 23 return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
7d0d0d86 24}
d2ead1f3 25
39e8cc6d 26u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
d2ead1f3 27{
7d0d0d86 28 u16 num_dumps, stop_room = 0;
d2ead1f3 29
7a9104ea 30 if (!mlx5e_is_ktls_tx(mdev))
39e8cc6d
AH
31 return 0;
32
579524c6 33 num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
d2ead1f3 34
c27bd171
AL
35 stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
36 stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
37 stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
c4dfe704 38 stop_room += 1; /* fence nop */
7d0d0d86
TT
39
40 return stop_room;
41}
42
da6682fa
TT
43static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
44{
45 MLX5_SET(tisc, tisc, tls_en, 1);
46 MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
47 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
48}
49
7d0d0d86
TT
50static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
51{
52 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
d2ead1f3 53
da6682fa 54 mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
d2ead1f3 55
da6682fa 56 return mlx5_core_create_tis(mdev, in, tisn);
d2ead1f3
TT
57}
58
624bf099
TT
59static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
60 struct mlx5_async_ctx *async_ctx,
61 u32 *out, int outlen,
62 mlx5_async_cbk_t callback,
63 struct mlx5_async_work *context)
64{
65 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
66
67 mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
68 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
69
70 return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
71 out, outlen, callback, context);
72}
73
74static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
75 struct mlx5_async_ctx *async_ctx,
76 u32 *out, int outlen,
77 mlx5_async_cbk_t callback,
78 struct mlx5_async_work *context)
79{
80 u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
81
82 MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
83 MLX5_SET(destroy_tis_in, in, tisn, tisn);
84
85 return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
86 out, outlen, callback, context);
87}
88
7d0d0d86 89struct mlx5e_ktls_offload_context_tx {
c4dfe704 90 /* fast path */
7d0d0d86
TT
91 u32 expected_seq;
92 u32 tisn;
7d0d0d86 93 bool ctx_post_pending;
c4dfe704
TT
94 /* control / resync */
95 struct list_head list_node; /* member of the pool */
96 struct tls12_crypto_info_aes_gcm_128 crypto_info;
97 struct tls_offload_context_tx *tx_ctx;
98 struct mlx5_core_dev *mdev;
99 struct mlx5e_tls_sw_stats *sw_stats;
100 u32 key_id;
624bf099 101 u8 create_err : 1;
7d0d0d86
TT
102};
103
d2ead1f3 104static void
7d0d0d86
TT
105mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
106 struct mlx5e_ktls_offload_context_tx *priv_tx)
d2ead1f3 107{
df8d8667
TT
108 struct mlx5e_ktls_offload_context_tx **ctx =
109 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
d2ead1f3 110
6cc2714e 111 BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
d2ead1f3 112
df8d8667 113 *ctx = priv_tx;
d2ead1f3
TT
114}
115
7d0d0d86
TT
116static struct mlx5e_ktls_offload_context_tx *
117mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
d2ead1f3 118{
df8d8667
TT
119 struct mlx5e_ktls_offload_context_tx **ctx =
120 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
7d0d0d86 121
df8d8667 122 return *ctx;
d2ead1f3
TT
123}
124
624bf099
TT
125/* struct for callback API management */
126struct mlx5e_async_ctx {
127 struct mlx5_async_work context;
128 struct mlx5_async_ctx async_ctx;
129 struct work_struct work;
130 struct mlx5e_ktls_offload_context_tx *priv_tx;
131 struct completion complete;
132 int err;
133 union {
134 u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
135 u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
136 };
137};
138
139static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
140{
141 struct mlx5e_async_ctx *bulk_async;
142 int i;
143
144 bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
145 if (!bulk_async)
146 return NULL;
147
148 for (i = 0; i < n; i++) {
149 struct mlx5e_async_ctx *async = &bulk_async[i];
150
151 mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
152 init_completion(&async->complete);
153 }
154
155 return bulk_async;
156}
157
158static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
159{
160 int i;
161
162 for (i = 0; i < n; i++) {
163 struct mlx5e_async_ctx *async = &bulk_async[i];
164
165 mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
166 }
167 kvfree(bulk_async);
168}
169
170static void create_tis_callback(int status, struct mlx5_async_work *context)
171{
172 struct mlx5e_async_ctx *async =
173 container_of(context, struct mlx5e_async_ctx, context);
174 struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
175
176 if (status) {
177 async->err = status;
178 priv_tx->create_err = 1;
179 goto out;
180 }
181
182 priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
183out:
184 complete(&async->complete);
185}
186
187static void destroy_tis_callback(int status, struct mlx5_async_work *context)
188{
189 struct mlx5e_async_ctx *async =
190 container_of(context, struct mlx5e_async_ctx, context);
191 struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
192
193 complete(&async->complete);
194 kfree(priv_tx);
195}
196
c4dfe704 197static struct mlx5e_ktls_offload_context_tx *
624bf099
TT
198mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
199 struct mlx5e_async_ctx *async)
c4dfe704
TT
200{
201 struct mlx5e_ktls_offload_context_tx *priv_tx;
202 int err;
203
204 priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
205 if (!priv_tx)
206 return ERR_PTR(-ENOMEM);
207
208 priv_tx->mdev = mdev;
209 priv_tx->sw_stats = sw_stats;
210
624bf099
TT
211 if (!async) {
212 err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
213 if (err)
214 goto err_out;
215 } else {
216 async->priv_tx = priv_tx;
217 err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
218 async->out_create, sizeof(async->out_create),
219 create_tis_callback, &async->context);
220 if (err)
221 goto err_out;
c4dfe704
TT
222 }
223
224 return priv_tx;
624bf099
TT
225
226err_out:
227 kfree(priv_tx);
228 return ERR_PTR(err);
c4dfe704
TT
229}
230
624bf099
TT
231static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
232 struct mlx5e_async_ctx *async)
c4dfe704 233{
624bf099
TT
234 if (priv_tx->create_err) {
235 complete(&async->complete);
236 kfree(priv_tx);
237 return;
238 }
239 async->priv_tx = priv_tx;
240 mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
241 &async->async_ctx,
242 async->out_destroy, sizeof(async->out_destroy),
243 destroy_tis_callback, &async->context);
c4dfe704
TT
244}
245
624bf099
TT
246static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
247 struct list_head *list, int size)
c4dfe704
TT
248{
249 struct mlx5e_ktls_offload_context_tx *obj;
624bf099
TT
250 struct mlx5e_async_ctx *bulk_async;
251 int i;
252
253 bulk_async = mlx5e_bulk_async_init(mdev, size);
254 if (!bulk_async)
255 return;
c4dfe704 256
624bf099
TT
257 i = 0;
258 list_for_each_entry(obj, list, list_node) {
259 mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
260 i++;
261 }
262
263 for (i = 0; i < size; i++) {
264 struct mlx5e_async_ctx *async = &bulk_async[i];
265
266 wait_for_completion(&async->complete);
267 }
268 mlx5e_bulk_async_cleanup(bulk_async, size);
c4dfe704
TT
269}
270
271/* Recycling pool API */
272
624bf099
TT
273#define MLX5E_TLS_TX_POOL_BULK (16)
274#define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
275#define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
276
c4dfe704
TT
277struct mlx5e_tls_tx_pool {
278 struct mlx5_core_dev *mdev;
279 struct mlx5e_tls_sw_stats *sw_stats;
280 struct mutex lock; /* Protects access to the pool */
281 struct list_head list;
c4dfe704 282 size_t size;
624bf099
TT
283
284 struct workqueue_struct *wq;
285 struct work_struct create_work;
286 struct work_struct destroy_work;
c4dfe704
TT
287};
288
624bf099
TT
289static void create_work(struct work_struct *work)
290{
291 struct mlx5e_tls_tx_pool *pool =
292 container_of(work, struct mlx5e_tls_tx_pool, create_work);
293 struct mlx5e_ktls_offload_context_tx *obj;
294 struct mlx5e_async_ctx *bulk_async;
295 LIST_HEAD(local_list);
296 int i, j, err = 0;
297
298 bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
299 if (!bulk_async)
300 return;
301
302 for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
303 obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
304 if (IS_ERR(obj)) {
305 err = PTR_ERR(obj);
306 break;
307 }
308 list_add(&obj->list_node, &local_list);
309 }
310
311 for (j = 0; j < i; j++) {
312 struct mlx5e_async_ctx *async = &bulk_async[j];
313
314 wait_for_completion(&async->complete);
315 if (!err && async->err)
316 err = async->err;
317 }
318 atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
319 mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
320 if (err)
321 goto err_out;
322
323 mutex_lock(&pool->lock);
324 if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
325 mutex_unlock(&pool->lock);
326 goto err_out;
327 }
328 list_splice(&local_list, &pool->list);
329 pool->size += MLX5E_TLS_TX_POOL_BULK;
330 if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
331 queue_work(pool->wq, work);
332 mutex_unlock(&pool->lock);
333 return;
334
335err_out:
336 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
337 atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
338}
339
340static void destroy_work(struct work_struct *work)
341{
342 struct mlx5e_tls_tx_pool *pool =
343 container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
344 struct mlx5e_ktls_offload_context_tx *obj;
345 LIST_HEAD(local_list);
346 int i = 0;
347
348 mutex_lock(&pool->lock);
349 if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
350 mutex_unlock(&pool->lock);
351 return;
352 }
353
354 list_for_each_entry(obj, &pool->list, list_node)
355 if (++i == MLX5E_TLS_TX_POOL_BULK)
356 break;
357
358 list_cut_position(&local_list, &pool->list, &obj->list_node);
359 pool->size -= MLX5E_TLS_TX_POOL_BULK;
360 if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
361 queue_work(pool->wq, work);
362 mutex_unlock(&pool->lock);
363
364 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
365 atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
366}
367
c4dfe704
TT
368static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
369 struct mlx5e_tls_sw_stats *sw_stats)
370{
371 struct mlx5e_tls_tx_pool *pool;
372
624bf099
TT
373 BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
374
c4dfe704
TT
375 pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
376 if (!pool)
377 return NULL;
378
624bf099
TT
379 pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
380 if (!pool->wq)
381 goto err_free;
382
c4dfe704
TT
383 INIT_LIST_HEAD(&pool->list);
384 mutex_init(&pool->lock);
385
624bf099
TT
386 INIT_WORK(&pool->create_work, create_work);
387 INIT_WORK(&pool->destroy_work, destroy_work);
388
c4dfe704
TT
389 pool->mdev = mdev;
390 pool->sw_stats = sw_stats;
391
392 return pool;
624bf099
TT
393
394err_free:
395 kvfree(pool);
396 return NULL;
397}
398
399static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
400{
401 while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
402 struct mlx5e_ktls_offload_context_tx *obj;
403 LIST_HEAD(local_list);
404 int i = 0;
405
406 list_for_each_entry(obj, &pool->list, list_node)
407 if (++i == MLX5E_TLS_TX_POOL_BULK)
408 break;
409
410 list_cut_position(&local_list, &pool->list, &obj->list_node);
411 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
412 atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
413 pool->size -= MLX5E_TLS_TX_POOL_BULK;
414 }
415 if (pool->size) {
416 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
417 atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
418 }
c4dfe704
TT
419}
420
421static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
422{
624bf099
TT
423 mlx5e_tls_tx_pool_list_cleanup(pool);
424 destroy_workqueue(pool->wq);
c4dfe704
TT
425 kvfree(pool);
426}
427
428static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
429{
430 mutex_lock(&pool->lock);
c4dfe704 431 list_add(&obj->list_node, &pool->list);
624bf099
TT
432 if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
433 queue_work(pool->wq, &pool->destroy_work);
c4dfe704
TT
434 mutex_unlock(&pool->lock);
435}
436
437static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
438{
439 struct mlx5e_ktls_offload_context_tx *obj;
440
441 mutex_lock(&pool->lock);
624bf099
TT
442 if (unlikely(pool->size == 0)) {
443 /* pool is empty:
444 * - trigger the populating work, and
445 * - serve the current context via the regular blocking api.
446 */
447 queue_work(pool->wq, &pool->create_work);
448 mutex_unlock(&pool->lock);
449 obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
c4dfe704
TT
450 if (!IS_ERR(obj))
451 atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
624bf099 452 return obj;
c4dfe704
TT
453 }
454
455 obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
456 list_node);
457 list_del(&obj->list_node);
624bf099
TT
458 if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
459 queue_work(pool->wq, &pool->create_work);
c4dfe704
TT
460 mutex_unlock(&pool->lock);
461 return obj;
462}
463
464/* End of pool API */
465
7d0d0d86
TT
466int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
467 struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
d2ead1f3 468{
7d0d0d86 469 struct mlx5e_ktls_offload_context_tx *priv_tx;
c4dfe704 470 struct mlx5e_tls_tx_pool *pool;
7d0d0d86 471 struct tls_context *tls_ctx;
7d0d0d86
TT
472 struct mlx5e_priv *priv;
473 int err;
d2ead1f3 474
7d0d0d86
TT
475 tls_ctx = tls_get_ctx(sk);
476 priv = netdev_priv(netdev);
c4dfe704 477 pool = priv->tls->tx_pool;
d2ead1f3 478
c4dfe704
TT
479 priv_tx = pool_pop(pool);
480 if (IS_ERR(priv_tx))
481 return PTR_ERR(priv_tx);
d2ead1f3 482
c4dfe704 483 err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
7d0d0d86
TT
484 if (err)
485 goto err_create_key;
486
487 priv_tx->expected_seq = start_offload_tcp_sn;
488 priv_tx->crypto_info =
489 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
df8d8667 490 priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
7d0d0d86
TT
491
492 mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
493
7d0d0d86 494 priv_tx->ctx_post_pending = true;
a51bce96 495 atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
7d0d0d86
TT
496
497 return 0;
498
7d0d0d86 499err_create_key:
c4dfe704 500 pool_push(pool, priv_tx);
7d0d0d86
TT
501 return err;
502}
503
504void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
505{
506 struct mlx5e_ktls_offload_context_tx *priv_tx;
c4dfe704 507 struct mlx5e_tls_tx_pool *pool;
7d0d0d86
TT
508 struct mlx5e_priv *priv;
509
510 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
511 priv = netdev_priv(netdev);
c4dfe704 512 pool = priv->tls->tx_pool;
7d0d0d86 513
e8c82761 514 atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
c4dfe704
TT
515 mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
516 pool_push(pool, priv_tx);
d2ead1f3
TT
517}
518
519static void tx_fill_wi(struct mlx5e_txqsq *sq,
f45da371
TT
520 u16 pi, u8 num_wqebbs, u32 num_bytes,
521 struct page *page)
d2ead1f3
TT
522{
523 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
524
41a8e4eb
TT
525 *wi = (struct mlx5e_tx_wqe_info) {
526 .num_wqebbs = num_wqebbs,
527 .num_bytes = num_bytes,
528 .resync_dump_frag_page = page,
529 };
d2ead1f3
TT
530}
531
d2ead1f3
TT
532static bool
533mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
534{
535 bool ret = priv_tx->ctx_post_pending;
536
537 priv_tx->ctx_post_pending = false;
538
539 return ret;
540}
541
542static void
543post_static_params(struct mlx5e_txqsq *sq,
544 struct mlx5e_ktls_offload_context_tx *priv_tx,
545 bool fence)
546{
7d0d0d86
TT
547 struct mlx5e_set_tls_static_params_wqe *wqe;
548 u16 pi, num_wqebbs;
d2ead1f3 549
7d0d0d86 550 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
ab1e0ce9 551 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
7d0d0d86
TT
552 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
553 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
0419d8c9 554 priv_tx->tisn, priv_tx->key_id, 0, fence,
7d0d0d86 555 TLS_OFFLOAD_CTX_DIR_TX);
ab1e0ce9
TT
556 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
557 sq->pc += num_wqebbs;
d2ead1f3
TT
558}
559
560static void
561post_progress_params(struct mlx5e_txqsq *sq,
562 struct mlx5e_ktls_offload_context_tx *priv_tx,
563 bool fence)
564{
7d0d0d86
TT
565 struct mlx5e_set_tls_progress_params_wqe *wqe;
566 u16 pi, num_wqebbs;
d2ead1f3 567
7d0d0d86 568 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
ab1e0ce9 569 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
7d0d0d86 570 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
1182f365 571 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
7d0d0d86 572 TLS_OFFLOAD_CTX_DIR_TX);
ab1e0ce9
TT
573 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
574 sq->pc += num_wqebbs;
d2ead1f3
TT
575}
576
c4dfe704
TT
577static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
578{
579 struct mlx5_wq_cyc *wq = &sq->wq;
580 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
581
582 tx_fill_wi(sq, pi, 1, 0, NULL);
583
584 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
585}
586
d2ead1f3
TT
587static void
588mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
589 struct mlx5e_ktls_offload_context_tx *priv_tx,
590 bool skip_static_post, bool fence_first_post)
591{
592 bool progress_fence = skip_static_post || !fence_first_post;
700ec497 593
d2ead1f3
TT
594 if (!skip_static_post)
595 post_static_params(sq, priv_tx, fence_first_post);
596
597 post_progress_params(sq, priv_tx, progress_fence);
c4dfe704 598 tx_post_fence_nop(sq);
d2ead1f3
TT
599}
600
601struct tx_sync_info {
602 u64 rcd_sn;
ffbd9ca9 603 u32 sync_len;
d2ead1f3 604 int nr_frags;
310d9b9d 605 skb_frag_t frags[MAX_SKB_FRAGS];
d2ead1f3
TT
606};
607
46a3ea98
TT
608enum mlx5e_ktls_sync_retval {
609 MLX5E_KTLS_SYNC_DONE,
610 MLX5E_KTLS_SYNC_FAIL,
611 MLX5E_KTLS_SYNC_SKIP_NO_DATA,
612};
613
614static enum mlx5e_ktls_sync_retval
615tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
ffbd9ca9 616 u32 tcp_seq, int datalen, struct tx_sync_info *info)
d2ead1f3
TT
617{
618 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
46a3ea98 619 enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
d2ead1f3
TT
620 struct tls_record_info *record;
621 int remaining, i = 0;
622 unsigned long flags;
ffbd9ca9 623 bool ends_before;
d2ead1f3
TT
624
625 spin_lock_irqsave(&tx_ctx->lock, flags);
626 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
627
628 if (unlikely(!record)) {
46a3ea98 629 ret = MLX5E_KTLS_SYNC_FAIL;
d2ead1f3
TT
630 goto out;
631 }
632
ffbd9ca9
TT
633 /* There are the following cases:
634 * 1. packet ends before start marker: bypass offload.
635 * 2. packet starts before start marker and ends after it: drop,
636 * not supported, breaks contract with kernel.
637 * 3. packet ends before tls record info starts: drop,
638 * this packet was already acknowledged and its record info
639 * was released.
640 */
56917766 641 ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
ffbd9ca9
TT
642
643 if (unlikely(tls_record_is_start_marker(record))) {
644 ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
645 goto out;
646 } else if (ends_before) {
647 ret = MLX5E_KTLS_SYNC_FAIL;
d2ead1f3
TT
648 goto out;
649 }
650
651 info->sync_len = tcp_seq - tls_record_start_seq(record);
652 remaining = info->sync_len;
653 while (remaining > 0) {
654 skb_frag_t *frag = &record->frags[i];
655
f45da371 656 get_page(skb_frag_page(frag));
d2ead1f3 657 remaining -= skb_frag_size(frag);
310d9b9d 658 info->frags[i++] = *frag;
d2ead1f3
TT
659 }
660 /* reduce the part which will be sent with the original SKB */
661 if (remaining < 0)
310d9b9d 662 skb_frag_size_add(&info->frags[i - 1], remaining);
d2ead1f3
TT
663 info->nr_frags = i;
664out:
665 spin_unlock_irqrestore(&tx_ctx->lock, flags);
666 return ret;
667}
668
669static void
670tx_post_resync_params(struct mlx5e_txqsq *sq,
671 struct mlx5e_ktls_offload_context_tx *priv_tx,
672 u64 rcd_sn)
673{
af11a7a4 674 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
d2ead1f3
TT
675 __be64 rn_be = cpu_to_be64(rcd_sn);
676 bool skip_static_post;
677 u16 rec_seq_sz;
678 char *rec_seq;
679
9db7e618
NC
680 rec_seq = info->rec_seq;
681 rec_seq_sz = sizeof(info->rec_seq);
d2ead1f3
TT
682
683 skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
684 if (!skip_static_post)
685 memcpy(rec_seq, &rn_be, rec_seq_sz);
686
687 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
688}
689
690static int
c4dfe704 691tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
d2ead1f3
TT
692{
693 struct mlx5_wqe_ctrl_seg *cseg;
d2ead1f3 694 struct mlx5_wqe_data_seg *dseg;
a195784c 695 struct mlx5e_dump_wqe *wqe;
d2ead1f3 696 dma_addr_t dma_addr = 0;
a195784c 697 u16 ds_cnt;
d2ead1f3 698 int fsz;
a195784c 699 u16 pi;
d2ead1f3 700
f713ce1d 701 BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
fed0c6cf
MM
702 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
703 wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
d2ead1f3 704
a195784c 705 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
d2ead1f3
TT
706
707 cseg = &wqe->ctrl;
a195784c 708 dseg = &wqe->data;
d2ead1f3
TT
709
710 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
711 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
2d1b69ed 712 cseg->tis_tir_num = cpu_to_be32(tisn << 8);
d2ead1f3 713
d2ead1f3
TT
714 fsz = skb_frag_size(frag);
715 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
716 DMA_TO_DEVICE);
717 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
718 return -ENOMEM;
719
720 dseg->addr = cpu_to_be64(dma_addr);
721 dseg->lkey = sq->mkey_be;
722 dseg->byte_count = cpu_to_be32(fsz);
723 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
724
f45da371 725 tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
9b1fef2f 726 sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
d2ead1f3
TT
727
728 return 0;
729}
730
731void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
732 struct mlx5e_tx_wqe_info *wi,
2c559361 733 u32 *dma_fifo_cc)
d2ead1f3 734{
2c559361
TT
735 struct mlx5e_sq_stats *stats;
736 struct mlx5e_sq_dma *dma;
737
2c559361
TT
738 dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
739 stats = sq->stats;
d2ead1f3
TT
740
741 mlx5e_tx_dma_unmap(sq->pdev, dma);
f45da371 742 put_page(wi->resync_dump_frag_page);
d2ead1f3
TT
743 stats->tls_dump_packets++;
744 stats->tls_dump_bytes += wi->num_bytes;
745}
746
46a3ea98 747static enum mlx5e_ktls_sync_retval
d2ead1f3
TT
748mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
749 struct mlx5e_txqsq *sq,
46a3ea98 750 int datalen,
d2ead1f3
TT
751 u32 seq)
752{
46a3ea98 753 enum mlx5e_ktls_sync_retval ret;
d2ead1f3 754 struct tx_sync_info info = {};
23b1cf1e 755 int i;
d2ead1f3 756
ffbd9ca9 757 ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
23b1cf1e
TT
758 if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
759 /* We might get here with ret == FAIL if a retransmission
760 * reaches the driver after the relevant record is acked.
d2ead1f3
TT
761 * It should be safe to drop the packet in this case
762 */
23b1cf1e 763 return ret;
d2ead1f3 764
700ec497
TT
765 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
766
23b1cf1e 767 for (i = 0; i < info.nr_frags; i++) {
84d1bb2b
TT
768 unsigned int orig_fsz, frag_offset = 0, n = 0;
769 skb_frag_t *f = &info.frags[i];
770
771 orig_fsz = skb_frag_size(f);
772
773 do {
84d1bb2b
TT
774 unsigned int fsz;
775
776 n++;
777 fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
778 skb_frag_size_set(f, fsz);
c4dfe704 779 if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
84d1bb2b
TT
780 page_ref_add(skb_frag_page(f), n - 1);
781 goto err_out;
782 }
783
784 skb_frag_off_add(f, fsz);
785 frag_offset += fsz;
786 } while (frag_offset < orig_fsz);
787
788 page_ref_add(skb_frag_page(f), n - 1);
789 }
d2ead1f3 790
46a3ea98 791 return MLX5E_KTLS_SYNC_DONE;
d2ead1f3
TT
792
793err_out:
b61b24bd 794 for (; i < info.nr_frags; i++)
84d1bb2b
TT
795 /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
796 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
797 * released only upon their completions (or in mlx5e_free_txqsq_descs,
798 * if channel closes).
799 */
b61b24bd
TT
800 put_page(skb_frag_page(&info.frags[i]));
801
46a3ea98 802 return MLX5E_KTLS_SYNC_FAIL;
d2ead1f3
TT
803}
804
943aa7bd
LR
805bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
806 struct sk_buff *skb,
714c88a3 807 struct mlx5e_accel_tx_tls_state *state)
d2ead1f3
TT
808{
809 struct mlx5e_ktls_offload_context_tx *priv_tx;
810 struct mlx5e_sq_stats *stats = sq->stats;
94ce3b64 811 struct net_device *tls_netdev;
943aa7bd
LR
812 struct tls_context *tls_ctx;
813 int datalen;
d2ead1f3
TT
814 u32 seq;
815
504148fe 816 datalen = skb->len - skb_tcp_all_headers(skb);
943aa7bd
LR
817 if (!datalen)
818 return true;
819
820 mlx5e_tx_mpwqe_ensure_complete(sq);
821
822 tls_ctx = tls_get_ctx(skb->sk);
94ce3b64
MM
823 tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
824 /* Don't WARN on NULL: if tls_device_down is running in parallel,
825 * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
826 * true. Rather continue processing this packet.
827 */
828 if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
943aa7bd
LR
829 goto err_out;
830
d2ead1f3
TT
831 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
832
c4dfe704 833 if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
d2ead1f3 834 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
d2ead1f3
TT
835
836 seq = ntohl(tcp_hdr(skb)->seq);
837 if (unlikely(priv_tx->expected_seq != seq)) {
46a3ea98
TT
838 enum mlx5e_ktls_sync_retval ret =
839 mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
840
23b1cf1e
TT
841 stats->tls_ooo++;
842
342508c1
TT
843 switch (ret) {
844 case MLX5E_KTLS_SYNC_DONE:
342508c1
TT
845 break;
846 case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
23b1cf1e 847 stats->tls_skip_no_sync_data++;
342508c1
TT
848 if (likely(!skb->decrypted))
849 goto out;
850 WARN_ON_ONCE(1);
23b1cf1e 851 goto err_out;
2eeb6e38 852 case MLX5E_KTLS_SYNC_FAIL:
23b1cf1e 853 stats->tls_drop_no_sync_data++;
46a3ea98 854 goto err_out;
342508c1 855 }
d2ead1f3
TT
856 }
857
858 priv_tx->expected_seq = seq + datalen;
859
714c88a3 860 state->tls_tisn = priv_tx->tisn;
d2ead1f3
TT
861
862 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
863 stats->tls_encrypted_bytes += datalen;
864
865out:
f02bac9a 866 return true;
d2ead1f3
TT
867
868err_out:
869 dev_kfree_skb_any(skb);
f02bac9a 870 return false;
d2ead1f3 871}
c4dfe704
TT
872
873int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
874{
875 if (!mlx5e_is_ktls_tx(priv->mdev))
876 return 0;
877
878 priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
879 if (!priv->tls->tx_pool)
880 return -ENOMEM;
881
882 return 0;
883}
884
885void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
886{
887 if (!mlx5e_is_ktls_tx(priv->mdev))
888 return;
889
890 mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
891 priv->tls->tx_pool = NULL;
892}