Merge tag 'hardening-v6.6-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ktls_tx.c
CommitLineData
d2ead1f3
TT
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2// Copyright (c) 2019 Mellanox Technologies.
3
0fedee1a 4#include <linux/debugfs.h>
943aa7bd 5#include "en_accel/ktls.h"
7d0d0d86
TT
6#include "en_accel/ktls_txrx.h"
7#include "en_accel/ktls_utils.h"
d2ead1f3 8
7d0d0d86
TT
9struct mlx5e_dump_wqe {
10 struct mlx5_wqe_ctrl_seg ctrl;
11 struct mlx5_wqe_data_seg data;
d2ead1f3
TT
12};
13
7d0d0d86
TT
14#define MLX5E_KTLS_DUMP_WQEBBS \
15 (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
d2ead1f3 16
7d0d0d86 17static u8
579524c6 18mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
7d0d0d86
TT
19 unsigned int sync_len)
20{
21 /* Given the MTU and sync_len, calculates an upper bound for the
22 * number of DUMP WQEs needed for the TX resync of a record.
23 */
579524c6 24 return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
7d0d0d86 25}
d2ead1f3 26
39e8cc6d 27u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
d2ead1f3 28{
7d0d0d86 29 u16 num_dumps, stop_room = 0;
d2ead1f3 30
7a9104ea 31 if (!mlx5e_is_ktls_tx(mdev))
39e8cc6d
AH
32 return 0;
33
579524c6 34 num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
d2ead1f3 35
c27bd171
AL
36 stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
37 stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
38 stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
c4dfe704 39 stop_room += 1; /* fence nop */
7d0d0d86
TT
40
41 return stop_room;
42}
43
da6682fa
TT
44static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
45{
46 MLX5_SET(tisc, tisc, tls_en, 1);
47 MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
48 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
49}
50
7d0d0d86
TT
51static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
52{
53 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
d2ead1f3 54
da6682fa 55 mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
d2ead1f3 56
da6682fa 57 return mlx5_core_create_tis(mdev, in, tisn);
d2ead1f3
TT
58}
59
624bf099
TT
60static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
61 struct mlx5_async_ctx *async_ctx,
62 u32 *out, int outlen,
63 mlx5_async_cbk_t callback,
64 struct mlx5_async_work *context)
65{
66 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
67
68 mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
69 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
70
71 return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
72 out, outlen, callback, context);
73}
74
75static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
76 struct mlx5_async_ctx *async_ctx,
77 u32 *out, int outlen,
78 mlx5_async_cbk_t callback,
79 struct mlx5_async_work *context)
80{
81 u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
82
83 MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
84 MLX5_SET(destroy_tis_in, in, tisn, tisn);
85
86 return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
87 out, outlen, callback, context);
88}
89
7d0d0d86 90struct mlx5e_ktls_offload_context_tx {
c4dfe704 91 /* fast path */
7d0d0d86
TT
92 u32 expected_seq;
93 u32 tisn;
7d0d0d86 94 bool ctx_post_pending;
c4dfe704
TT
95 /* control / resync */
96 struct list_head list_node; /* member of the pool */
4960c414 97 union mlx5e_crypto_info crypto_info;
c4dfe704
TT
98 struct tls_offload_context_tx *tx_ctx;
99 struct mlx5_core_dev *mdev;
100 struct mlx5e_tls_sw_stats *sw_stats;
f741db1a 101 struct mlx5_crypto_dek *dek;
624bf099 102 u8 create_err : 1;
7d0d0d86
TT
103};
104
d2ead1f3 105static void
7d0d0d86
TT
106mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
107 struct mlx5e_ktls_offload_context_tx *priv_tx)
d2ead1f3 108{
df8d8667
TT
109 struct mlx5e_ktls_offload_context_tx **ctx =
110 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
d2ead1f3 111
6cc2714e 112 BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
d2ead1f3 113
df8d8667 114 *ctx = priv_tx;
d2ead1f3
TT
115}
116
7d0d0d86
TT
117static struct mlx5e_ktls_offload_context_tx *
118mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
d2ead1f3 119{
df8d8667
TT
120 struct mlx5e_ktls_offload_context_tx **ctx =
121 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
7d0d0d86 122
df8d8667 123 return *ctx;
d2ead1f3
TT
124}
125
624bf099
TT
126/* struct for callback API management */
127struct mlx5e_async_ctx {
128 struct mlx5_async_work context;
34136153 129 struct mlx5_async_ctx *async_ctx;
624bf099 130 struct mlx5e_ktls_offload_context_tx *priv_tx;
624bf099
TT
131 int err;
132 union {
133 u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
134 u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
135 };
136};
137
34136153
TT
138struct mlx5e_bulk_async_ctx {
139 struct mlx5_async_ctx async_ctx;
140 DECLARE_FLEX_ARRAY(struct mlx5e_async_ctx, arr);
141};
142
143static struct mlx5e_bulk_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
624bf099 144{
34136153
TT
145 struct mlx5e_bulk_async_ctx *bulk_async;
146 int sz;
624bf099
TT
147 int i;
148
34136153
TT
149 sz = struct_size(bulk_async, arr, n);
150 bulk_async = kvzalloc(sz, GFP_KERNEL);
624bf099
TT
151 if (!bulk_async)
152 return NULL;
153
34136153 154 mlx5_cmd_init_async_ctx(mdev, &bulk_async->async_ctx);
624bf099 155
34136153
TT
156 for (i = 0; i < n; i++)
157 bulk_async->arr[i].async_ctx = &bulk_async->async_ctx;
624bf099
TT
158
159 return bulk_async;
160}
161
34136153 162static void mlx5e_bulk_async_cleanup(struct mlx5e_bulk_async_ctx *bulk_async)
624bf099 163{
34136153 164 mlx5_cmd_cleanup_async_ctx(&bulk_async->async_ctx);
624bf099
TT
165 kvfree(bulk_async);
166}
167
168static void create_tis_callback(int status, struct mlx5_async_work *context)
169{
170 struct mlx5e_async_ctx *async =
171 container_of(context, struct mlx5e_async_ctx, context);
172 struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
173
174 if (status) {
175 async->err = status;
176 priv_tx->create_err = 1;
4d78a2eb 177 return;
624bf099
TT
178 }
179
180 priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
624bf099
TT
181}
182
183static void destroy_tis_callback(int status, struct mlx5_async_work *context)
184{
185 struct mlx5e_async_ctx *async =
186 container_of(context, struct mlx5e_async_ctx, context);
187 struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
188
624bf099
TT
189 kfree(priv_tx);
190}
191
c4dfe704 192static struct mlx5e_ktls_offload_context_tx *
624bf099
TT
193mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
194 struct mlx5e_async_ctx *async)
c4dfe704
TT
195{
196 struct mlx5e_ktls_offload_context_tx *priv_tx;
197 int err;
198
199 priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
200 if (!priv_tx)
201 return ERR_PTR(-ENOMEM);
202
203 priv_tx->mdev = mdev;
204 priv_tx->sw_stats = sw_stats;
205
624bf099
TT
206 if (!async) {
207 err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
208 if (err)
209 goto err_out;
210 } else {
211 async->priv_tx = priv_tx;
34136153 212 err = mlx5e_ktls_create_tis_cb(mdev, async->async_ctx,
624bf099
TT
213 async->out_create, sizeof(async->out_create),
214 create_tis_callback, &async->context);
215 if (err)
216 goto err_out;
c4dfe704
TT
217 }
218
219 return priv_tx;
624bf099
TT
220
221err_out:
222 kfree(priv_tx);
223 return ERR_PTR(err);
c4dfe704
TT
224}
225
624bf099
TT
226static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
227 struct mlx5e_async_ctx *async)
c4dfe704 228{
624bf099 229 if (priv_tx->create_err) {
624bf099
TT
230 kfree(priv_tx);
231 return;
232 }
233 async->priv_tx = priv_tx;
234 mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
34136153 235 async->async_ctx,
624bf099
TT
236 async->out_destroy, sizeof(async->out_destroy),
237 destroy_tis_callback, &async->context);
c4dfe704
TT
238}
239
624bf099
TT
240static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
241 struct list_head *list, int size)
c4dfe704 242{
6514210b 243 struct mlx5e_ktls_offload_context_tx *obj, *n;
34136153 244 struct mlx5e_bulk_async_ctx *bulk_async;
624bf099
TT
245 int i;
246
247 bulk_async = mlx5e_bulk_async_init(mdev, size);
248 if (!bulk_async)
249 return;
c4dfe704 250
624bf099 251 i = 0;
6514210b 252 list_for_each_entry_safe(obj, n, list, list_node) {
34136153 253 mlx5e_tls_priv_tx_cleanup(obj, &bulk_async->arr[i]);
624bf099
TT
254 i++;
255 }
256
34136153 257 mlx5e_bulk_async_cleanup(bulk_async);
c4dfe704
TT
258}
259
260/* Recycling pool API */
261
624bf099
TT
262#define MLX5E_TLS_TX_POOL_BULK (16)
263#define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
264#define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
265
c4dfe704
TT
266struct mlx5e_tls_tx_pool {
267 struct mlx5_core_dev *mdev;
268 struct mlx5e_tls_sw_stats *sw_stats;
269 struct mutex lock; /* Protects access to the pool */
270 struct list_head list;
c4dfe704 271 size_t size;
624bf099
TT
272
273 struct workqueue_struct *wq;
274 struct work_struct create_work;
275 struct work_struct destroy_work;
c4dfe704
TT
276};
277
624bf099
TT
278static void create_work(struct work_struct *work)
279{
280 struct mlx5e_tls_tx_pool *pool =
281 container_of(work, struct mlx5e_tls_tx_pool, create_work);
282 struct mlx5e_ktls_offload_context_tx *obj;
34136153 283 struct mlx5e_bulk_async_ctx *bulk_async;
624bf099
TT
284 LIST_HEAD(local_list);
285 int i, j, err = 0;
286
287 bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
288 if (!bulk_async)
289 return;
290
291 for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
34136153 292 obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async->arr[i]);
624bf099
TT
293 if (IS_ERR(obj)) {
294 err = PTR_ERR(obj);
295 break;
296 }
297 list_add(&obj->list_node, &local_list);
298 }
299
300 for (j = 0; j < i; j++) {
34136153 301 struct mlx5e_async_ctx *async = &bulk_async->arr[j];
624bf099 302
624bf099
TT
303 if (!err && async->err)
304 err = async->err;
305 }
306 atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
34136153 307 mlx5e_bulk_async_cleanup(bulk_async);
624bf099
TT
308 if (err)
309 goto err_out;
310
311 mutex_lock(&pool->lock);
312 if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
313 mutex_unlock(&pool->lock);
314 goto err_out;
315 }
316 list_splice(&local_list, &pool->list);
317 pool->size += MLX5E_TLS_TX_POOL_BULK;
318 if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
319 queue_work(pool->wq, work);
320 mutex_unlock(&pool->lock);
321 return;
322
323err_out:
324 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
325 atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
326}
327
328static void destroy_work(struct work_struct *work)
329{
330 struct mlx5e_tls_tx_pool *pool =
331 container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
332 struct mlx5e_ktls_offload_context_tx *obj;
333 LIST_HEAD(local_list);
334 int i = 0;
335
336 mutex_lock(&pool->lock);
337 if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
338 mutex_unlock(&pool->lock);
339 return;
340 }
341
342 list_for_each_entry(obj, &pool->list, list_node)
343 if (++i == MLX5E_TLS_TX_POOL_BULK)
344 break;
345
346 list_cut_position(&local_list, &pool->list, &obj->list_node);
347 pool->size -= MLX5E_TLS_TX_POOL_BULK;
348 if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
349 queue_work(pool->wq, work);
350 mutex_unlock(&pool->lock);
351
352 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
353 atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
354}
355
c4dfe704
TT
356static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
357 struct mlx5e_tls_sw_stats *sw_stats)
358{
359 struct mlx5e_tls_tx_pool *pool;
360
624bf099
TT
361 BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
362
c4dfe704
TT
363 pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
364 if (!pool)
365 return NULL;
366
624bf099
TT
367 pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
368 if (!pool->wq)
369 goto err_free;
370
c4dfe704
TT
371 INIT_LIST_HEAD(&pool->list);
372 mutex_init(&pool->lock);
373
624bf099
TT
374 INIT_WORK(&pool->create_work, create_work);
375 INIT_WORK(&pool->destroy_work, destroy_work);
376
c4dfe704
TT
377 pool->mdev = mdev;
378 pool->sw_stats = sw_stats;
379
380 return pool;
624bf099
TT
381
382err_free:
383 kvfree(pool);
384 return NULL;
385}
386
387static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
388{
389 while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
390 struct mlx5e_ktls_offload_context_tx *obj;
391 LIST_HEAD(local_list);
392 int i = 0;
393
394 list_for_each_entry(obj, &pool->list, list_node)
395 if (++i == MLX5E_TLS_TX_POOL_BULK)
396 break;
397
398 list_cut_position(&local_list, &pool->list, &obj->list_node);
399 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
400 atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
401 pool->size -= MLX5E_TLS_TX_POOL_BULK;
402 }
403 if (pool->size) {
404 mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
405 atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
406 }
c4dfe704
TT
407}
408
409static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
410{
624bf099
TT
411 mlx5e_tls_tx_pool_list_cleanup(pool);
412 destroy_workqueue(pool->wq);
c4dfe704
TT
413 kvfree(pool);
414}
415
416static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
417{
418 mutex_lock(&pool->lock);
c4dfe704 419 list_add(&obj->list_node, &pool->list);
624bf099
TT
420 if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
421 queue_work(pool->wq, &pool->destroy_work);
c4dfe704
TT
422 mutex_unlock(&pool->lock);
423}
424
425static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
426{
427 struct mlx5e_ktls_offload_context_tx *obj;
428
429 mutex_lock(&pool->lock);
624bf099
TT
430 if (unlikely(pool->size == 0)) {
431 /* pool is empty:
432 * - trigger the populating work, and
433 * - serve the current context via the regular blocking api.
434 */
435 queue_work(pool->wq, &pool->create_work);
436 mutex_unlock(&pool->lock);
437 obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
c4dfe704
TT
438 if (!IS_ERR(obj))
439 atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
624bf099 440 return obj;
c4dfe704
TT
441 }
442
443 obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
444 list_node);
445 list_del(&obj->list_node);
624bf099
TT
446 if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
447 queue_work(pool->wq, &pool->create_work);
c4dfe704
TT
448 mutex_unlock(&pool->lock);
449 return obj;
450}
451
452/* End of pool API */
453
7d0d0d86
TT
454int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
455 struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
d2ead1f3 456{
7d0d0d86 457 struct mlx5e_ktls_offload_context_tx *priv_tx;
c4dfe704 458 struct mlx5e_tls_tx_pool *pool;
7d0d0d86 459 struct tls_context *tls_ctx;
f741db1a 460 struct mlx5_crypto_dek *dek;
7d0d0d86
TT
461 struct mlx5e_priv *priv;
462 int err;
d2ead1f3 463
7d0d0d86
TT
464 tls_ctx = tls_get_ctx(sk);
465 priv = netdev_priv(netdev);
c4dfe704 466 pool = priv->tls->tx_pool;
d2ead1f3 467
c4dfe704
TT
468 priv_tx = pool_pop(pool);
469 if (IS_ERR(priv_tx))
470 return PTR_ERR(priv_tx);
d2ead1f3 471
4960c414
GP
472 switch (crypto_info->cipher_type) {
473 case TLS_CIPHER_AES_GCM_128:
474 priv_tx->crypto_info.crypto_info_128 =
475 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
476 break;
477 case TLS_CIPHER_AES_GCM_256:
478 priv_tx->crypto_info.crypto_info_256 =
479 *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
480 break;
481 default:
482 WARN_ONCE(1, "Unsupported cipher type %u\n",
483 crypto_info->cipher_type);
dd645724
GP
484 err = -EOPNOTSUPP;
485 goto err_pool_push;
4960c414 486 }
dd645724
GP
487
488 dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
489 if (IS_ERR(dek)) {
490 err = PTR_ERR(dek);
491 goto err_pool_push;
492 }
493
494 priv_tx->dek = dek;
495 priv_tx->expected_seq = start_offload_tcp_sn;
df8d8667 496 priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
7d0d0d86
TT
497
498 mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
499
7d0d0d86 500 priv_tx->ctx_post_pending = true;
a51bce96 501 atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
7d0d0d86
TT
502
503 return 0;
504
dd645724 505err_pool_push:
c4dfe704 506 pool_push(pool, priv_tx);
7d0d0d86
TT
507 return err;
508}
509
510void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
511{
512 struct mlx5e_ktls_offload_context_tx *priv_tx;
c4dfe704 513 struct mlx5e_tls_tx_pool *pool;
7d0d0d86
TT
514 struct mlx5e_priv *priv;
515
516 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
517 priv = netdev_priv(netdev);
c4dfe704 518 pool = priv->tls->tx_pool;
7d0d0d86 519
e8c82761 520 atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
f741db1a 521 mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_tx->dek);
c4dfe704 522 pool_push(pool, priv_tx);
d2ead1f3
TT
523}
524
525static void tx_fill_wi(struct mlx5e_txqsq *sq,
f45da371
TT
526 u16 pi, u8 num_wqebbs, u32 num_bytes,
527 struct page *page)
d2ead1f3
TT
528{
529 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
530
41a8e4eb
TT
531 *wi = (struct mlx5e_tx_wqe_info) {
532 .num_wqebbs = num_wqebbs,
533 .num_bytes = num_bytes,
534 .resync_dump_frag_page = page,
535 };
d2ead1f3
TT
536}
537
d2ead1f3
TT
538static bool
539mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
540{
541 bool ret = priv_tx->ctx_post_pending;
542
543 priv_tx->ctx_post_pending = false;
544
545 return ret;
546}
547
548static void
549post_static_params(struct mlx5e_txqsq *sq,
550 struct mlx5e_ktls_offload_context_tx *priv_tx,
551 bool fence)
552{
7d0d0d86
TT
553 struct mlx5e_set_tls_static_params_wqe *wqe;
554 u16 pi, num_wqebbs;
d2ead1f3 555
7d0d0d86 556 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
ab1e0ce9 557 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
7d0d0d86
TT
558 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
559 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
f741db1a
JL
560 priv_tx->tisn,
561 mlx5_crypto_dek_get_id(priv_tx->dek),
562 0, fence, TLS_OFFLOAD_CTX_DIR_TX);
ab1e0ce9
TT
563 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
564 sq->pc += num_wqebbs;
d2ead1f3
TT
565}
566
567static void
568post_progress_params(struct mlx5e_txqsq *sq,
569 struct mlx5e_ktls_offload_context_tx *priv_tx,
570 bool fence)
571{
7d0d0d86
TT
572 struct mlx5e_set_tls_progress_params_wqe *wqe;
573 u16 pi, num_wqebbs;
d2ead1f3 574
7d0d0d86 575 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
ab1e0ce9 576 pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
7d0d0d86 577 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
1182f365 578 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
7d0d0d86 579 TLS_OFFLOAD_CTX_DIR_TX);
ab1e0ce9
TT
580 tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
581 sq->pc += num_wqebbs;
d2ead1f3
TT
582}
583
c4dfe704
TT
584static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
585{
586 struct mlx5_wq_cyc *wq = &sq->wq;
587 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
588
589 tx_fill_wi(sq, pi, 1, 0, NULL);
590
591 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
592}
593
d2ead1f3
TT
594static void
595mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
596 struct mlx5e_ktls_offload_context_tx *priv_tx,
597 bool skip_static_post, bool fence_first_post)
598{
599 bool progress_fence = skip_static_post || !fence_first_post;
700ec497 600
d2ead1f3
TT
601 if (!skip_static_post)
602 post_static_params(sq, priv_tx, fence_first_post);
603
604 post_progress_params(sq, priv_tx, progress_fence);
c4dfe704 605 tx_post_fence_nop(sq);
d2ead1f3
TT
606}
607
608struct tx_sync_info {
609 u64 rcd_sn;
ffbd9ca9 610 u32 sync_len;
d2ead1f3 611 int nr_frags;
310d9b9d 612 skb_frag_t frags[MAX_SKB_FRAGS];
d2ead1f3
TT
613};
614
46a3ea98
TT
615enum mlx5e_ktls_sync_retval {
616 MLX5E_KTLS_SYNC_DONE,
617 MLX5E_KTLS_SYNC_FAIL,
618 MLX5E_KTLS_SYNC_SKIP_NO_DATA,
619};
620
621static enum mlx5e_ktls_sync_retval
622tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
ffbd9ca9 623 u32 tcp_seq, int datalen, struct tx_sync_info *info)
d2ead1f3
TT
624{
625 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
46a3ea98 626 enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
d2ead1f3
TT
627 struct tls_record_info *record;
628 int remaining, i = 0;
629 unsigned long flags;
ffbd9ca9 630 bool ends_before;
d2ead1f3
TT
631
632 spin_lock_irqsave(&tx_ctx->lock, flags);
633 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
634
635 if (unlikely(!record)) {
46a3ea98 636 ret = MLX5E_KTLS_SYNC_FAIL;
d2ead1f3
TT
637 goto out;
638 }
639
ffbd9ca9
TT
640 /* There are the following cases:
641 * 1. packet ends before start marker: bypass offload.
642 * 2. packet starts before start marker and ends after it: drop,
643 * not supported, breaks contract with kernel.
644 * 3. packet ends before tls record info starts: drop,
645 * this packet was already acknowledged and its record info
646 * was released.
647 */
56917766 648 ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
ffbd9ca9
TT
649
650 if (unlikely(tls_record_is_start_marker(record))) {
651 ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
652 goto out;
653 } else if (ends_before) {
654 ret = MLX5E_KTLS_SYNC_FAIL;
d2ead1f3
TT
655 goto out;
656 }
657
658 info->sync_len = tcp_seq - tls_record_start_seq(record);
659 remaining = info->sync_len;
660 while (remaining > 0) {
661 skb_frag_t *frag = &record->frags[i];
662
f45da371 663 get_page(skb_frag_page(frag));
d2ead1f3 664 remaining -= skb_frag_size(frag);
310d9b9d 665 info->frags[i++] = *frag;
d2ead1f3
TT
666 }
667 /* reduce the part which will be sent with the original SKB */
668 if (remaining < 0)
310d9b9d 669 skb_frag_size_add(&info->frags[i - 1], remaining);
d2ead1f3
TT
670 info->nr_frags = i;
671out:
672 spin_unlock_irqrestore(&tx_ctx->lock, flags);
673 return ret;
674}
675
676static void
677tx_post_resync_params(struct mlx5e_txqsq *sq,
678 struct mlx5e_ktls_offload_context_tx *priv_tx,
679 u64 rcd_sn)
680{
d2ead1f3
TT
681 __be64 rn_be = cpu_to_be64(rcd_sn);
682 bool skip_static_post;
683 u16 rec_seq_sz;
684 char *rec_seq;
685
4960c414
GP
686 switch (priv_tx->crypto_info.crypto_info.cipher_type) {
687 case TLS_CIPHER_AES_GCM_128: {
688 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info.crypto_info_128;
689
690 rec_seq = info->rec_seq;
691 rec_seq_sz = sizeof(info->rec_seq);
692 break;
693 }
694 case TLS_CIPHER_AES_GCM_256: {
695 struct tls12_crypto_info_aes_gcm_256 *info = &priv_tx->crypto_info.crypto_info_256;
696
697 rec_seq = info->rec_seq;
698 rec_seq_sz = sizeof(info->rec_seq);
699 break;
700 }
701 default:
702 WARN_ONCE(1, "Unsupported cipher type %u\n",
703 priv_tx->crypto_info.crypto_info.cipher_type);
704 return;
705 }
d2ead1f3
TT
706
707 skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
708 if (!skip_static_post)
709 memcpy(rec_seq, &rn_be, rec_seq_sz);
710
711 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
712}
713
714static int
c4dfe704 715tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
d2ead1f3
TT
716{
717 struct mlx5_wqe_ctrl_seg *cseg;
d2ead1f3 718 struct mlx5_wqe_data_seg *dseg;
a195784c 719 struct mlx5e_dump_wqe *wqe;
d2ead1f3 720 dma_addr_t dma_addr = 0;
a195784c 721 u16 ds_cnt;
d2ead1f3 722 int fsz;
a195784c 723 u16 pi;
d2ead1f3 724
f713ce1d 725 BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
fed0c6cf
MM
726 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
727 wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
d2ead1f3 728
a195784c 729 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
d2ead1f3
TT
730
731 cseg = &wqe->ctrl;
a195784c 732 dseg = &wqe->data;
d2ead1f3
TT
733
734 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
735 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
2d1b69ed 736 cseg->tis_tir_num = cpu_to_be32(tisn << 8);
d2ead1f3 737
d2ead1f3
TT
738 fsz = skb_frag_size(frag);
739 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
740 DMA_TO_DEVICE);
741 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
742 return -ENOMEM;
743
744 dseg->addr = cpu_to_be64(dma_addr);
745 dseg->lkey = sq->mkey_be;
746 dseg->byte_count = cpu_to_be32(fsz);
747 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
748
f45da371 749 tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
9b1fef2f 750 sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
d2ead1f3
TT
751
752 return 0;
753}
754
755void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
756 struct mlx5e_tx_wqe_info *wi,
2c559361 757 u32 *dma_fifo_cc)
d2ead1f3 758{
2c559361
TT
759 struct mlx5e_sq_stats *stats;
760 struct mlx5e_sq_dma *dma;
761
2c559361
TT
762 dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
763 stats = sq->stats;
d2ead1f3
TT
764
765 mlx5e_tx_dma_unmap(sq->pdev, dma);
f45da371 766 put_page(wi->resync_dump_frag_page);
d2ead1f3
TT
767 stats->tls_dump_packets++;
768 stats->tls_dump_bytes += wi->num_bytes;
769}
770
46a3ea98 771static enum mlx5e_ktls_sync_retval
d2ead1f3
TT
772mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
773 struct mlx5e_txqsq *sq,
46a3ea98 774 int datalen,
d2ead1f3
TT
775 u32 seq)
776{
46a3ea98 777 enum mlx5e_ktls_sync_retval ret;
d2ead1f3 778 struct tx_sync_info info = {};
23b1cf1e 779 int i;
d2ead1f3 780
ffbd9ca9 781 ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
23b1cf1e
TT
782 if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
783 /* We might get here with ret == FAIL if a retransmission
784 * reaches the driver after the relevant record is acked.
d2ead1f3
TT
785 * It should be safe to drop the packet in this case
786 */
23b1cf1e 787 return ret;
d2ead1f3 788
700ec497
TT
789 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
790
23b1cf1e 791 for (i = 0; i < info.nr_frags; i++) {
84d1bb2b
TT
792 unsigned int orig_fsz, frag_offset = 0, n = 0;
793 skb_frag_t *f = &info.frags[i];
794
795 orig_fsz = skb_frag_size(f);
796
797 do {
84d1bb2b
TT
798 unsigned int fsz;
799
800 n++;
801 fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
802 skb_frag_size_set(f, fsz);
c4dfe704 803 if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
84d1bb2b
TT
804 page_ref_add(skb_frag_page(f), n - 1);
805 goto err_out;
806 }
807
808 skb_frag_off_add(f, fsz);
809 frag_offset += fsz;
810 } while (frag_offset < orig_fsz);
811
812 page_ref_add(skb_frag_page(f), n - 1);
813 }
d2ead1f3 814
46a3ea98 815 return MLX5E_KTLS_SYNC_DONE;
d2ead1f3
TT
816
817err_out:
b61b24bd 818 for (; i < info.nr_frags; i++)
84d1bb2b
TT
819 /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
820 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
821 * released only upon their completions (or in mlx5e_free_txqsq_descs,
822 * if channel closes).
823 */
b61b24bd
TT
824 put_page(skb_frag_page(&info.frags[i]));
825
46a3ea98 826 return MLX5E_KTLS_SYNC_FAIL;
d2ead1f3
TT
827}
828
943aa7bd
LR
829bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
830 struct sk_buff *skb,
714c88a3 831 struct mlx5e_accel_tx_tls_state *state)
d2ead1f3
TT
832{
833 struct mlx5e_ktls_offload_context_tx *priv_tx;
834 struct mlx5e_sq_stats *stats = sq->stats;
94ce3b64 835 struct net_device *tls_netdev;
943aa7bd
LR
836 struct tls_context *tls_ctx;
837 int datalen;
d2ead1f3
TT
838 u32 seq;
839
504148fe 840 datalen = skb->len - skb_tcp_all_headers(skb);
943aa7bd
LR
841 if (!datalen)
842 return true;
843
844 mlx5e_tx_mpwqe_ensure_complete(sq);
845
846 tls_ctx = tls_get_ctx(skb->sk);
94ce3b64
MM
847 tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
848 /* Don't WARN on NULL: if tls_device_down is running in parallel,
ed3c9a2f 849 * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
94ce3b64
MM
850 * true. Rather continue processing this packet.
851 */
852 if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
943aa7bd
LR
853 goto err_out;
854
d2ead1f3
TT
855 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
856
c4dfe704 857 if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
d2ead1f3 858 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
d2ead1f3
TT
859
860 seq = ntohl(tcp_hdr(skb)->seq);
861 if (unlikely(priv_tx->expected_seq != seq)) {
46a3ea98
TT
862 enum mlx5e_ktls_sync_retval ret =
863 mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
864
23b1cf1e
TT
865 stats->tls_ooo++;
866
342508c1
TT
867 switch (ret) {
868 case MLX5E_KTLS_SYNC_DONE:
342508c1
TT
869 break;
870 case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
23b1cf1e 871 stats->tls_skip_no_sync_data++;
342508c1
TT
872 if (likely(!skb->decrypted))
873 goto out;
874 WARN_ON_ONCE(1);
23b1cf1e 875 goto err_out;
2eeb6e38 876 case MLX5E_KTLS_SYNC_FAIL:
23b1cf1e 877 stats->tls_drop_no_sync_data++;
46a3ea98 878 goto err_out;
342508c1 879 }
d2ead1f3
TT
880 }
881
882 priv_tx->expected_seq = seq + datalen;
883
714c88a3 884 state->tls_tisn = priv_tx->tisn;
d2ead1f3
TT
885
886 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
887 stats->tls_encrypted_bytes += datalen;
888
889out:
f02bac9a 890 return true;
d2ead1f3
TT
891
892err_out:
893 dev_kfree_skb_any(skb);
f02bac9a 894 return false;
d2ead1f3 895}
c4dfe704 896
0fedee1a
TT
897static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls,
898 struct dentry *dfs_root)
899{
900 if (IS_ERR_OR_NULL(dfs_root))
901 return;
902
903 tls->debugfs.dfs_tx = debugfs_create_dir("tx", dfs_root);
0fedee1a
TT
904
905 debugfs_create_size_t("pool_size", 0400, tls->debugfs.dfs_tx,
906 &tls->tx_pool->size);
907}
908
c4dfe704
TT
909int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
910{
3e4cf1dd 911 struct mlx5_crypto_dek_pool *dek_pool;
0fedee1a 912 struct mlx5e_tls *tls = priv->tls;
3e4cf1dd
JL
913 int err;
914
915 if (!mlx5e_is_ktls_device(priv->mdev))
916 return 0;
917
918 /* DEK pool could be used by either or both of TX and RX. But we have to
919 * put the creation here to avoid syndrome when doing devlink reload.
920 */
921 dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY);
922 if (IS_ERR(dek_pool))
923 return PTR_ERR(dek_pool);
924 tls->dek_pool = dek_pool;
0fedee1a 925
c4dfe704
TT
926 if (!mlx5e_is_ktls_tx(priv->mdev))
927 return 0;
928
929 priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
3e4cf1dd
JL
930 if (!priv->tls->tx_pool) {
931 err = -ENOMEM;
932 goto err_tx_pool_init;
933 }
c4dfe704 934
0fedee1a
TT
935 mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs);
936
c4dfe704 937 return 0;
3e4cf1dd
JL
938
939err_tx_pool_init:
940 mlx5_crypto_dek_pool_destroy(dek_pool);
941 return err;
c4dfe704
TT
942}
943
944void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
945{
946 if (!mlx5e_is_ktls_tx(priv->mdev))
3e4cf1dd 947 goto dek_pool_destroy;
c4dfe704 948
0fedee1a
TT
949 debugfs_remove_recursive(priv->tls->debugfs.dfs_tx);
950 priv->tls->debugfs.dfs_tx = NULL;
951
c4dfe704
TT
952 mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
953 priv->tls->tx_pool = NULL;
3e4cf1dd
JL
954
955dek_pool_destroy:
956 if (mlx5e_is_ktls_device(priv->mdev))
957 mlx5_crypto_dek_pool_destroy(priv->tls->dek_pool);
c4dfe704 958}