net/mlx5e: Return bool from TLS and IPSEC offloads
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ktls_tx.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3
4 #include <linux/tls.h>
5 #include "en.h"
6 #include "en/txrx.h"
7 #include "en_accel/ktls.h"
8
9 enum {
10         MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
11 };
12
13 enum {
14         MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
15 };
16
17 #define EXTRACT_INFO_FIELDS do { \
18         salt    = info->salt;    \
19         rec_seq = info->rec_seq; \
20         salt_sz    = sizeof(info->salt);    \
21         rec_seq_sz = sizeof(info->rec_seq); \
22 } while (0)
23
24 static void
25 fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
26 {
27         struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
28         char *initial_rn, *gcm_iv;
29         u16 salt_sz, rec_seq_sz;
30         char *salt, *rec_seq;
31         u8 tls_version;
32
33         EXTRACT_INFO_FIELDS;
34
35         gcm_iv      = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
36         initial_rn  = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
37
38         memcpy(gcm_iv,      salt,    salt_sz);
39         memcpy(initial_rn,  rec_seq, rec_seq_sz);
40
41         tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
42
43         MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
44         MLX5_SET(tls_static_params, ctx, const_1, 1);
45         MLX5_SET(tls_static_params, ctx, const_2, 2);
46         MLX5_SET(tls_static_params, ctx, encryption_standard,
47                  MLX5E_ENCRYPTION_STANDARD_TLS);
48         MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
49 }
50
51 static void
52 build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
53                     struct mlx5e_ktls_offload_context_tx *priv_tx,
54                     bool fence)
55 {
56         struct mlx5_wqe_ctrl_seg     *cseg  = &wqe->ctrl;
57         struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
58
59 #define STATIC_PARAMS_DS_CNT \
60         DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
61
62         cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
63                                              (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
64         cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
65                                              STATIC_PARAMS_DS_CNT);
66         cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
67         cseg->tisn             = cpu_to_be32(priv_tx->tisn << 8);
68
69         ucseg->flags = MLX5_UMR_INLINE;
70         ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
71
72         fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
73 }
74
75 static void
76 fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
77 {
78         MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
79         MLX5_SET(tls_progress_params, ctx, record_tracker_state,
80                  MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
81         MLX5_SET(tls_progress_params, ctx, auth_state,
82                  MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
83 }
84
85 static void
86 build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
87                       struct mlx5e_ktls_offload_context_tx *priv_tx,
88                       bool fence)
89 {
90         struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
91
92 #define PROGRESS_PARAMS_DS_CNT \
93         DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
94
95         cseg->opmod_idx_opcode =
96                 cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
97                             (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
98         cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
99                                              PROGRESS_PARAMS_DS_CNT);
100         cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
101
102         fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
103 }
104
105 static void tx_fill_wi(struct mlx5e_txqsq *sq,
106                        u16 pi, u8 num_wqebbs, u32 num_bytes,
107                        struct page *page)
108 {
109         struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
110
111         memset(wi, 0, sizeof(*wi));
112         wi->num_wqebbs = num_wqebbs;
113         wi->num_bytes  = num_bytes;
114         wi->resync_dump_frag_page = page;
115 }
116
117 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
118 {
119         priv_tx->ctx_post_pending = true;
120 }
121
122 static bool
123 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
124 {
125         bool ret = priv_tx->ctx_post_pending;
126
127         priv_tx->ctx_post_pending = false;
128
129         return ret;
130 }
131
132 static void
133 post_static_params(struct mlx5e_txqsq *sq,
134                    struct mlx5e_ktls_offload_context_tx *priv_tx,
135                    bool fence)
136 {
137         struct mlx5e_umr_wqe *umr_wqe;
138         u16 pi;
139
140         pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
141         umr_wqe = MLX5E_TLS_FETCH_UMR_WQE(sq, pi);
142         build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
143         tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
144         sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
145 }
146
147 static void
148 post_progress_params(struct mlx5e_txqsq *sq,
149                      struct mlx5e_ktls_offload_context_tx *priv_tx,
150                      bool fence)
151 {
152         struct mlx5e_tx_wqe *wqe;
153         u16 pi;
154
155         pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
156         wqe = MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi);
157         build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
158         tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
159         sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
160 }
161
162 static void
163 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
164                               struct mlx5e_ktls_offload_context_tx *priv_tx,
165                               bool skip_static_post, bool fence_first_post)
166 {
167         bool progress_fence = skip_static_post || !fence_first_post;
168
169         mlx5e_txqsq_get_next_pi(sq, MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS);
170
171         if (!skip_static_post)
172                 post_static_params(sq, priv_tx, fence_first_post);
173
174         post_progress_params(sq, priv_tx, progress_fence);
175 }
176
177 struct tx_sync_info {
178         u64 rcd_sn;
179         u32 sync_len;
180         int nr_frags;
181         skb_frag_t frags[MAX_SKB_FRAGS];
182 };
183
184 enum mlx5e_ktls_sync_retval {
185         MLX5E_KTLS_SYNC_DONE,
186         MLX5E_KTLS_SYNC_FAIL,
187         MLX5E_KTLS_SYNC_SKIP_NO_DATA,
188 };
189
190 static enum mlx5e_ktls_sync_retval
191 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
192                  u32 tcp_seq, int datalen, struct tx_sync_info *info)
193 {
194         struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
195         enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
196         struct tls_record_info *record;
197         int remaining, i = 0;
198         unsigned long flags;
199         bool ends_before;
200
201         spin_lock_irqsave(&tx_ctx->lock, flags);
202         record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
203
204         if (unlikely(!record)) {
205                 ret = MLX5E_KTLS_SYNC_FAIL;
206                 goto out;
207         }
208
209         /* There are the following cases:
210          * 1. packet ends before start marker: bypass offload.
211          * 2. packet starts before start marker and ends after it: drop,
212          *    not supported, breaks contract with kernel.
213          * 3. packet ends before tls record info starts: drop,
214          *    this packet was already acknowledged and its record info
215          *    was released.
216          */
217         ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
218
219         if (unlikely(tls_record_is_start_marker(record))) {
220                 ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
221                 goto out;
222         } else if (ends_before) {
223                 ret = MLX5E_KTLS_SYNC_FAIL;
224                 goto out;
225         }
226
227         info->sync_len = tcp_seq - tls_record_start_seq(record);
228         remaining = info->sync_len;
229         while (remaining > 0) {
230                 skb_frag_t *frag = &record->frags[i];
231
232                 get_page(skb_frag_page(frag));
233                 remaining -= skb_frag_size(frag);
234                 info->frags[i++] = *frag;
235         }
236         /* reduce the part which will be sent with the original SKB */
237         if (remaining < 0)
238                 skb_frag_size_add(&info->frags[i - 1], remaining);
239         info->nr_frags = i;
240 out:
241         spin_unlock_irqrestore(&tx_ctx->lock, flags);
242         return ret;
243 }
244
245 static void
246 tx_post_resync_params(struct mlx5e_txqsq *sq,
247                       struct mlx5e_ktls_offload_context_tx *priv_tx,
248                       u64 rcd_sn)
249 {
250         struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
251         __be64 rn_be = cpu_to_be64(rcd_sn);
252         bool skip_static_post;
253         u16 rec_seq_sz;
254         char *rec_seq;
255
256         rec_seq = info->rec_seq;
257         rec_seq_sz = sizeof(info->rec_seq);
258
259         skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
260         if (!skip_static_post)
261                 memcpy(rec_seq, &rn_be, rec_seq_sz);
262
263         mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
264 }
265
266 static int
267 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
268 {
269         struct mlx5_wqe_ctrl_seg *cseg;
270         struct mlx5_wqe_data_seg *dseg;
271         struct mlx5e_dump_wqe *wqe;
272         dma_addr_t dma_addr = 0;
273         u16 ds_cnt;
274         int fsz;
275         u16 pi;
276
277         pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
278         wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
279
280         ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
281
282         cseg = &wqe->ctrl;
283         dseg = &wqe->data;
284
285         cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
286         cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
287         cseg->tisn             = cpu_to_be32(tisn << 8);
288         cseg->fm_ce_se         = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
289
290         fsz = skb_frag_size(frag);
291         dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
292                                     DMA_TO_DEVICE);
293         if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
294                 return -ENOMEM;
295
296         dseg->addr       = cpu_to_be64(dma_addr);
297         dseg->lkey       = sq->mkey_be;
298         dseg->byte_count = cpu_to_be32(fsz);
299         mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
300
301         tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
302         sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
303
304         return 0;
305 }
306
307 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
308                                            struct mlx5e_tx_wqe_info *wi,
309                                            u32 *dma_fifo_cc)
310 {
311         struct mlx5e_sq_stats *stats;
312         struct mlx5e_sq_dma *dma;
313
314         if (!wi->resync_dump_frag_page)
315                 return;
316
317         dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
318         stats = sq->stats;
319
320         mlx5e_tx_dma_unmap(sq->pdev, dma);
321         put_page(wi->resync_dump_frag_page);
322         stats->tls_dump_packets++;
323         stats->tls_dump_bytes += wi->num_bytes;
324 }
325
326 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
327 {
328         struct mlx5_wq_cyc *wq = &sq->wq;
329         u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
330
331         tx_fill_wi(sq, pi, 1, 0, NULL);
332
333         mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
334 }
335
336 static enum mlx5e_ktls_sync_retval
337 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
338                          struct mlx5e_txqsq *sq,
339                          int datalen,
340                          u32 seq)
341 {
342         struct mlx5e_sq_stats *stats = sq->stats;
343         enum mlx5e_ktls_sync_retval ret;
344         struct tx_sync_info info = {};
345         u8 num_wqebbs;
346         int i = 0;
347
348         ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
349         if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
350                 if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
351                         stats->tls_skip_no_sync_data++;
352                         return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
353                 }
354                 /* We might get here if a retransmission reaches the driver
355                  * after the relevant record is acked.
356                  * It should be safe to drop the packet in this case
357                  */
358                 stats->tls_drop_no_sync_data++;
359                 goto err_out;
360         }
361
362         stats->tls_ooo++;
363
364         tx_post_resync_params(sq, priv_tx, info.rcd_sn);
365
366         /* If no dump WQE was sent, we need to have a fence NOP WQE before the
367          * actual data xmit.
368          */
369         if (!info.nr_frags) {
370                 tx_post_fence_nop(sq);
371                 return MLX5E_KTLS_SYNC_DONE;
372         }
373
374         num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
375         mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
376
377         for (; i < info.nr_frags; i++) {
378                 unsigned int orig_fsz, frag_offset = 0, n = 0;
379                 skb_frag_t *f = &info.frags[i];
380
381                 orig_fsz = skb_frag_size(f);
382
383                 do {
384                         bool fence = !(i || frag_offset);
385                         unsigned int fsz;
386
387                         n++;
388                         fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
389                         skb_frag_size_set(f, fsz);
390                         if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
391                                 page_ref_add(skb_frag_page(f), n - 1);
392                                 goto err_out;
393                         }
394
395                         skb_frag_off_add(f, fsz);
396                         frag_offset += fsz;
397                 } while (frag_offset < orig_fsz);
398
399                 page_ref_add(skb_frag_page(f), n - 1);
400         }
401
402         return MLX5E_KTLS_SYNC_DONE;
403
404 err_out:
405         for (; i < info.nr_frags; i++)
406                 /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
407                  * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
408                  * released only upon their completions (or in mlx5e_free_txqsq_descs,
409                  * if channel closes).
410                  */
411                 put_page(skb_frag_page(&info.frags[i]));
412
413         return MLX5E_KTLS_SYNC_FAIL;
414 }
415
416 bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
417                               struct sk_buff *skb, struct mlx5e_tx_wqe **wqe,
418                               u16 *pi)
419 {
420         struct mlx5e_ktls_offload_context_tx *priv_tx;
421         struct mlx5e_sq_stats *stats = sq->stats;
422         struct mlx5_wqe_ctrl_seg *cseg;
423         struct tls_context *tls_ctx;
424         int datalen;
425         u32 seq;
426
427         if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
428                 goto out;
429
430         datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
431         if (!datalen)
432                 goto out;
433
434         tls_ctx = tls_get_ctx(skb->sk);
435         if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
436                 goto err_out;
437
438         priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
439
440         if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
441                 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
442                 *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
443                 *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
444                 stats->tls_ctx++;
445         }
446
447         seq = ntohl(tcp_hdr(skb)->seq);
448         if (unlikely(priv_tx->expected_seq != seq)) {
449                 enum mlx5e_ktls_sync_retval ret =
450                         mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
451
452                 switch (ret) {
453                 case MLX5E_KTLS_SYNC_DONE:
454                         *pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
455                         *wqe = MLX5E_TX_FETCH_WQE(sq, *pi);
456                         break;
457                 case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
458                         if (likely(!skb->decrypted))
459                                 goto out;
460                         WARN_ON_ONCE(1);
461                         /* fall-through */
462                 default: /* MLX5E_KTLS_SYNC_FAIL */
463                         goto err_out;
464                 }
465         }
466
467         priv_tx->expected_seq = seq + datalen;
468
469         cseg = &(*wqe)->ctrl;
470         cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
471
472         stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
473         stats->tls_encrypted_bytes   += datalen;
474
475 out:
476         return true;
477
478 err_out:
479         dev_kfree_skb_any(skb);
480         return false;
481 }