net/tls: use RCU for the adder to the offload record list
[linux-block.git] / net / tls / tls_device.c
CommitLineData
e8f69799
IL
1/* Copyright (c) 2018, Mellanox Technologies All rights reserved.
2 *
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
8 *
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
11 * conditions are met:
12 *
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer.
16 *
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31
32#include <crypto/aead.h>
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/netdevice.h>
36#include <net/dst.h>
37#include <net/inet_connection_sock.h>
38#include <net/tcp.h>
39#include <net/tls.h>
40
41/* device_offload_lock is used to synchronize tls_dev_add
42 * against NETDEV_DOWN notifications.
43 */
44static DECLARE_RWSEM(device_offload_lock);
45
46static void tls_device_gc_task(struct work_struct *work);
47
48static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
49static LIST_HEAD(tls_device_gc_list);
50static LIST_HEAD(tls_device_list);
51static DEFINE_SPINLOCK(tls_device_lock);
52
53static void tls_device_free_ctx(struct tls_context *ctx)
54{
5a03bc73 55 if (ctx->tx_conf == TLS_HW) {
4799ac81 56 kfree(tls_offload_ctx_tx(ctx));
5a03bc73
JK
57 kfree(ctx->tx.rec_seq);
58 kfree(ctx->tx.iv);
59 }
4799ac81
BP
60
61 if (ctx->rx_conf == TLS_HW)
62 kfree(tls_offload_ctx_rx(ctx));
e8f69799 63
15a7dea7 64 tls_ctx_free(NULL, ctx);
e8f69799
IL
65}
66
67static void tls_device_gc_task(struct work_struct *work)
68{
69 struct tls_context *ctx, *tmp;
70 unsigned long flags;
71 LIST_HEAD(gc_list);
72
73 spin_lock_irqsave(&tls_device_lock, flags);
74 list_splice_init(&tls_device_gc_list, &gc_list);
75 spin_unlock_irqrestore(&tls_device_lock, flags);
76
77 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
78 struct net_device *netdev = ctx->netdev;
79
4799ac81 80 if (netdev && ctx->tx_conf == TLS_HW) {
e8f69799
IL
81 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
82 TLS_OFFLOAD_CTX_DIR_TX);
83 dev_put(netdev);
4799ac81 84 ctx->netdev = NULL;
e8f69799
IL
85 }
86
87 list_del(&ctx->list);
88 tls_device_free_ctx(ctx);
89 }
90}
91
92static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
93{
94 unsigned long flags;
95
96 spin_lock_irqsave(&tls_device_lock, flags);
97 list_move_tail(&ctx->list, &tls_device_gc_list);
98
99 /* schedule_work inside the spinlock
100 * to make sure tls_device_down waits for that work.
101 */
102 schedule_work(&tls_device_gc_work);
103
104 spin_unlock_irqrestore(&tls_device_lock, flags);
105}
106
107/* We assume that the socket is already connected */
108static struct net_device *get_netdev_for_sock(struct sock *sk)
109{
110 struct dst_entry *dst = sk_dst_get(sk);
111 struct net_device *netdev = NULL;
112
113 if (likely(dst)) {
114 netdev = dst->dev;
115 dev_hold(netdev);
116 }
117
118 dst_release(dst);
119
120 return netdev;
121}
122
123static void destroy_record(struct tls_record_info *record)
124{
7ccd4519 125 int i;
e8f69799 126
7ccd4519
JK
127 for (i = 0; i < record->num_frags; i++)
128 __skb_frag_unref(&record->frags[i]);
e8f69799
IL
129 kfree(record);
130}
131
d80a1b9d 132static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
e8f69799
IL
133{
134 struct tls_record_info *info, *temp;
135
136 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
137 list_del(&info->list);
138 destroy_record(info);
139 }
140
141 offload_ctx->retransmit_hint = NULL;
142}
143
144static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
145{
146 struct tls_context *tls_ctx = tls_get_ctx(sk);
147 struct tls_record_info *info, *temp;
d80a1b9d 148 struct tls_offload_context_tx *ctx;
e8f69799
IL
149 u64 deleted_records = 0;
150 unsigned long flags;
151
152 if (!tls_ctx)
153 return;
154
d80a1b9d 155 ctx = tls_offload_ctx_tx(tls_ctx);
e8f69799
IL
156
157 spin_lock_irqsave(&ctx->lock, flags);
158 info = ctx->retransmit_hint;
6e3d02b6 159 if (info && !before(acked_seq, info->end_seq))
e8f69799 160 ctx->retransmit_hint = NULL;
e8f69799
IL
161
162 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
163 if (before(acked_seq, info->end_seq))
164 break;
165 list_del(&info->list);
166
167 destroy_record(info);
168 deleted_records++;
169 }
170
171 ctx->unacked_record_sn += deleted_records;
172 spin_unlock_irqrestore(&ctx->lock, flags);
173}
174
175/* At this point, there should be no references on this
176 * socket and no in-flight SKBs associated with this
177 * socket, so it is safe to free all the resources.
178 */
9e995797 179static void tls_device_sk_destruct(struct sock *sk)
e8f69799
IL
180{
181 struct tls_context *tls_ctx = tls_get_ctx(sk);
d80a1b9d 182 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
e8f69799 183
4799ac81 184 tls_ctx->sk_destruct(sk);
e8f69799 185
4799ac81
BP
186 if (tls_ctx->tx_conf == TLS_HW) {
187 if (ctx->open_record)
188 destroy_record(ctx->open_record);
189 delete_all_records(ctx);
190 crypto_free_aead(ctx->aead_send);
191 clean_acked_data_disable(inet_csk(sk));
192 }
e8f69799
IL
193
194 if (refcount_dec_and_test(&tls_ctx->refcount))
195 tls_device_queue_ctx_destruction(tls_ctx);
196}
e8f69799 197
35b71a34
JK
198void tls_device_free_resources_tx(struct sock *sk)
199{
200 struct tls_context *tls_ctx = tls_get_ctx(sk);
201
202 tls_free_partial_record(sk, tls_ctx);
203}
204
50180074
JK
205static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
206 u32 seq)
207{
208 struct net_device *netdev;
209 struct sk_buff *skb;
b5d9a834 210 int err = 0;
50180074
JK
211 u8 *rcd_sn;
212
213 skb = tcp_write_queue_tail(sk);
214 if (skb)
215 TCP_SKB_CB(skb)->eor = 1;
216
217 rcd_sn = tls_ctx->tx.rec_seq;
218
219 down_read(&device_offload_lock);
220 netdev = tls_ctx->netdev;
221 if (netdev)
b5d9a834
DM
222 err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
223 rcd_sn,
224 TLS_OFFLOAD_CTX_DIR_TX);
50180074 225 up_read(&device_offload_lock);
b5d9a834
DM
226 if (err)
227 return;
50180074
JK
228
229 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
230}
231
e8f69799
IL
232static void tls_append_frag(struct tls_record_info *record,
233 struct page_frag *pfrag,
234 int size)
235{
236 skb_frag_t *frag;
237
238 frag = &record->frags[record->num_frags - 1];
d8e18a51 239 if (skb_frag_page(frag) == pfrag->page &&
b54c9d5b 240 skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
d8e18a51 241 skb_frag_size_add(frag, size);
e8f69799
IL
242 } else {
243 ++frag;
d8e18a51 244 __skb_frag_set_page(frag, pfrag->page);
b54c9d5b 245 skb_frag_off_set(frag, pfrag->offset);
d8e18a51 246 skb_frag_size_set(frag, size);
e8f69799
IL
247 ++record->num_frags;
248 get_page(pfrag->page);
249 }
250
251 pfrag->offset += size;
252 record->len += size;
253}
254
255static int tls_push_record(struct sock *sk,
256 struct tls_context *ctx,
d80a1b9d 257 struct tls_offload_context_tx *offload_ctx,
e8f69799
IL
258 struct tls_record_info *record,
259 struct page_frag *pfrag,
260 int flags,
261 unsigned char record_type)
262{
4509de14 263 struct tls_prot_info *prot = &ctx->prot_info;
e8f69799
IL
264 struct tcp_sock *tp = tcp_sk(sk);
265 struct page_frag dummy_tag_frag;
266 skb_frag_t *frag;
267 int i;
268
269 /* fill prepend */
270 frag = &record->frags[0];
271 tls_fill_prepend(ctx,
272 skb_frag_address(frag),
4509de14 273 record->len - prot->prepend_size,
130b392c 274 record_type,
9cd81988 275 prot->version);
e8f69799
IL
276
277 /* HW doesn't care about the data in the tag, because it fills it. */
278 dummy_tag_frag.page = skb_frag_page(frag);
279 dummy_tag_frag.offset = 0;
280
4509de14 281 tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
e8f69799 282 record->end_seq = tp->write_seq + record->len;
d4774ac0 283 list_add_tail_rcu(&record->list, &offload_ctx->records_list);
e8f69799 284 offload_ctx->open_record = NULL;
50180074
JK
285
286 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
287 tls_device_resync_tx(sk, ctx, tp->write_seq);
288
fb0f886f 289 tls_advance_record_sn(sk, prot, &ctx->tx);
e8f69799
IL
290
291 for (i = 0; i < record->num_frags; i++) {
292 frag = &record->frags[i];
293 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
294 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
b54c9d5b 295 skb_frag_size(frag), skb_frag_off(frag));
d8e18a51 296 sk_mem_charge(sk, skb_frag_size(frag));
e8f69799
IL
297 get_page(skb_frag_page(frag));
298 }
299 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
300
301 /* all ready, send */
302 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
303}
304
d80a1b9d 305static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
e8f69799
IL
306 struct page_frag *pfrag,
307 size_t prepend_size)
308{
309 struct tls_record_info *record;
310 skb_frag_t *frag;
311
312 record = kmalloc(sizeof(*record), GFP_KERNEL);
313 if (!record)
314 return -ENOMEM;
315
316 frag = &record->frags[0];
317 __skb_frag_set_page(frag, pfrag->page);
b54c9d5b 318 skb_frag_off_set(frag, pfrag->offset);
e8f69799
IL
319 skb_frag_size_set(frag, prepend_size);
320
321 get_page(pfrag->page);
322 pfrag->offset += prepend_size;
323
324 record->num_frags = 1;
325 record->len = prepend_size;
326 offload_ctx->open_record = record;
327 return 0;
328}
329
330static int tls_do_allocation(struct sock *sk,
d80a1b9d 331 struct tls_offload_context_tx *offload_ctx,
e8f69799
IL
332 struct page_frag *pfrag,
333 size_t prepend_size)
334{
335 int ret;
336
337 if (!offload_ctx->open_record) {
338 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
339 sk->sk_allocation))) {
340 sk->sk_prot->enter_memory_pressure(sk);
341 sk_stream_moderate_sndbuf(sk);
342 return -ENOMEM;
343 }
344
345 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
346 if (ret)
347 return ret;
348
349 if (pfrag->size > pfrag->offset)
350 return 0;
351 }
352
353 if (!sk_page_frag_refill(sk, pfrag))
354 return -ENOMEM;
355
356 return 0;
357}
358
359static int tls_push_data(struct sock *sk,
360 struct iov_iter *msg_iter,
361 size_t size, int flags,
362 unsigned char record_type)
363{
364 struct tls_context *tls_ctx = tls_get_ctx(sk);
4509de14 365 struct tls_prot_info *prot = &tls_ctx->prot_info;
d80a1b9d 366 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
e8f69799
IL
367 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
368 struct tls_record_info *record = ctx->open_record;
41477662 369 int tls_push_record_flags;
e8f69799
IL
370 struct page_frag *pfrag;
371 size_t orig_size = size;
372 u32 max_open_record_len;
373 int copy, rc = 0;
374 bool done = false;
375 long timeo;
376
377 if (flags &
378 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
379 return -ENOTSUPP;
380
381 if (sk->sk_err)
382 return -sk->sk_err;
383
41477662
JK
384 flags |= MSG_SENDPAGE_DECRYPTED;
385 tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
386
e8f69799 387 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
94850257
BP
388 if (tls_is_partially_sent_record(tls_ctx)) {
389 rc = tls_push_partial_record(sk, tls_ctx, flags);
390 if (rc < 0)
391 return rc;
392 }
e8f69799
IL
393
394 pfrag = sk_page_frag(sk);
395
396 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
397 * we need to leave room for an authentication tag.
398 */
399 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
4509de14 400 prot->prepend_size;
e8f69799
IL
401 do {
402 rc = tls_do_allocation(sk, ctx, pfrag,
4509de14 403 prot->prepend_size);
e8f69799
IL
404 if (rc) {
405 rc = sk_stream_wait_memory(sk, &timeo);
406 if (!rc)
407 continue;
408
409 record = ctx->open_record;
410 if (!record)
411 break;
412handle_error:
413 if (record_type != TLS_RECORD_TYPE_DATA) {
414 /* avoid sending partial
415 * record with type !=
416 * application_data
417 */
418 size = orig_size;
419 destroy_record(record);
420 ctx->open_record = NULL;
4509de14 421 } else if (record->len > prot->prepend_size) {
e8f69799
IL
422 goto last_record;
423 }
424
425 break;
426 }
427
428 record = ctx->open_record;
429 copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
430 copy = min_t(size_t, copy, (max_open_record_len - record->len));
431
432 if (copy_from_iter_nocache(page_address(pfrag->page) +
433 pfrag->offset,
434 copy, msg_iter) != copy) {
435 rc = -EFAULT;
436 goto handle_error;
437 }
438 tls_append_frag(record, pfrag, copy);
439
440 size -= copy;
441 if (!size) {
442last_record:
443 tls_push_record_flags = flags;
444 if (more) {
445 tls_ctx->pending_open_record_frags =
d829e9c4 446 !!record->num_frags;
e8f69799
IL
447 break;
448 }
449
450 done = true;
451 }
452
453 if (done || record->len >= max_open_record_len ||
454 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
455 rc = tls_push_record(sk,
456 tls_ctx,
457 ctx,
458 record,
459 pfrag,
460 tls_push_record_flags,
461 record_type);
462 if (rc < 0)
463 break;
464 }
465 } while (!done);
466
467 if (orig_size - size > 0)
468 rc = orig_size - size;
469
470 return rc;
471}
472
473int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
474{
475 unsigned char record_type = TLS_RECORD_TYPE_DATA;
476 int rc;
477
478 lock_sock(sk);
479
480 if (unlikely(msg->msg_controllen)) {
481 rc = tls_proccess_cmsg(sk, msg, &record_type);
482 if (rc)
483 goto out;
484 }
485
486 rc = tls_push_data(sk, &msg->msg_iter, size,
487 msg->msg_flags, record_type);
488
489out:
490 release_sock(sk);
491 return rc;
492}
493
494int tls_device_sendpage(struct sock *sk, struct page *page,
495 int offset, size_t size, int flags)
496{
497 struct iov_iter msg_iter;
498 char *kaddr = kmap(page);
499 struct kvec iov;
500 int rc;
501
502 if (flags & MSG_SENDPAGE_NOTLAST)
503 flags |= MSG_MORE;
504
505 lock_sock(sk);
506
507 if (flags & MSG_OOB) {
508 rc = -ENOTSUPP;
509 goto out;
510 }
511
512 iov.iov_base = kaddr + offset;
513 iov.iov_len = size;
aa563d7b 514 iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
e8f69799
IL
515 rc = tls_push_data(sk, &msg_iter, size,
516 flags, TLS_RECORD_TYPE_DATA);
517 kunmap(page);
518
519out:
520 release_sock(sk);
521 return rc;
522}
523
d80a1b9d 524struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
e8f69799
IL
525 u32 seq, u64 *p_record_sn)
526{
527 u64 record_sn = context->hint_record_sn;
528 struct tls_record_info *info;
529
530 info = context->retransmit_hint;
531 if (!info ||
532 before(seq, info->end_seq - info->len)) {
533 /* if retransmit_hint is irrelevant start
534 * from the beggining of the list
535 */
d4774ac0
JK
536 info = list_first_entry_or_null(&context->records_list,
537 struct tls_record_info, list);
538 if (!info)
539 return NULL;
e8f69799
IL
540 record_sn = context->unacked_record_sn;
541 }
542
d4774ac0
JK
543 /* We just need the _rcu for the READ_ONCE() */
544 rcu_read_lock();
545 list_for_each_entry_from_rcu(info, &context->records_list, list) {
e8f69799
IL
546 if (before(seq, info->end_seq)) {
547 if (!context->retransmit_hint ||
548 after(info->end_seq,
549 context->retransmit_hint->end_seq)) {
550 context->hint_record_sn = record_sn;
551 context->retransmit_hint = info;
552 }
553 *p_record_sn = record_sn;
d4774ac0 554 goto exit_rcu_unlock;
e8f69799
IL
555 }
556 record_sn++;
557 }
d4774ac0 558 info = NULL;
e8f69799 559
d4774ac0
JK
560exit_rcu_unlock:
561 rcu_read_unlock();
562 return info;
e8f69799
IL
563}
564EXPORT_SYMBOL(tls_get_record);
565
566static int tls_device_push_pending_record(struct sock *sk, int flags)
567{
568 struct iov_iter msg_iter;
569
aa563d7b 570 iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
e8f69799
IL
571 return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
572}
573
7463d3a2
BP
574void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
575{
7463d3a2
BP
576 if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
577 gfp_t sk_allocation = sk->sk_allocation;
578
579 sk->sk_allocation = GFP_ATOMIC;
41477662
JK
580 tls_push_partial_record(sk, ctx,
581 MSG_DONTWAIT | MSG_NOSIGNAL |
582 MSG_SENDPAGE_DECRYPTED);
7463d3a2
BP
583 sk->sk_allocation = sk_allocation;
584 }
7463d3a2
BP
585}
586
e52972c1 587static void tls_device_resync_rx(struct tls_context *tls_ctx,
89fec474 588 struct sock *sk, u32 seq, u8 *rcd_sn)
e52972c1
JK
589{
590 struct net_device *netdev;
591
592 if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
593 return;
594 netdev = READ_ONCE(tls_ctx->netdev);
595 if (netdev)
eeb2efaf
JK
596 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
597 TLS_OFFLOAD_CTX_DIR_RX);
e52972c1
JK
598 clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
599}
600
f953d33b 601void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
4799ac81
BP
602{
603 struct tls_context *tls_ctx = tls_get_ctx(sk);
4799ac81 604 struct tls_offload_context_rx *rx_ctx;
f953d33b
JK
605 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
606 struct tls_prot_info *prot;
4799ac81
BP
607 u32 is_req_pending;
608 s64 resync_req;
609 u32 req_seq;
610
611 if (tls_ctx->rx_conf != TLS_HW)
612 return;
613
f953d33b 614 prot = &tls_ctx->prot_info;
4799ac81 615 rx_ctx = tls_offload_ctx_rx(tls_ctx);
f953d33b
JK
616 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
617
618 switch (rx_ctx->resync_type) {
619 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
620 resync_req = atomic64_read(&rx_ctx->resync_req);
621 req_seq = resync_req >> 32;
622 seq += TLS_HEADER_SIZE - 1;
623 is_req_pending = resync_req;
624
625 if (likely(!is_req_pending) || req_seq != seq ||
626 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
627 return;
628 break;
629 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
630 if (likely(!rx_ctx->resync_nh_do_now))
631 return;
632
633 /* head of next rec is already in, note that the sock_inq will
634 * include the currently parsed message when called from parser
635 */
636 if (tcp_inq(sk) > rcd_len)
637 return;
638
639 rx_ctx->resync_nh_do_now = 0;
640 seq += rcd_len;
641 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
642 break;
643 }
644
645 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
646}
647
648static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
649 struct tls_offload_context_rx *ctx,
650 struct sock *sk, struct sk_buff *skb)
651{
652 struct strp_msg *rxm;
653
654 /* device will request resyncs by itself based on stream scan */
655 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
656 return;
657 /* already scheduled */
658 if (ctx->resync_nh_do_now)
659 return;
660 /* seen decrypted fragments since last fully-failed record */
661 if (ctx->resync_nh_reset) {
662 ctx->resync_nh_reset = 0;
663 ctx->resync_nh.decrypted_failed = 1;
664 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
665 return;
666 }
667
668 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
669 return;
670
671 /* doing resync, bump the next target in case it fails */
672 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
673 ctx->resync_nh.decrypted_tgt *= 2;
674 else
675 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
676
677 rxm = strp_msg(skb);
678
679 /* head of next rec is already in, parser will sync for us */
680 if (tcp_inq(sk) > rxm->full_len) {
681 ctx->resync_nh_do_now = 1;
682 } else {
683 struct tls_prot_info *prot = &tls_ctx->prot_info;
684 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
685
686 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
687 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
688
689 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
690 rcd_sn);
691 }
4799ac81
BP
692}
693
694static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
695{
696 struct strp_msg *rxm = strp_msg(skb);
eb3d38d5 697 int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
4799ac81
BP
698 struct sk_buff *skb_iter, *unused;
699 struct scatterlist sg[1];
700 char *orig_buf, *buf;
701
702 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
703 TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
704 if (!orig_buf)
705 return -ENOMEM;
706 buf = orig_buf;
707
708 nsg = skb_cow_data(skb, 0, &unused);
709 if (unlikely(nsg < 0)) {
710 err = nsg;
711 goto free_buf;
712 }
713
714 sg_init_table(sg, 1);
715 sg_set_buf(&sg[0], buf,
716 rxm->full_len + TLS_HEADER_SIZE +
717 TLS_CIPHER_AES_GCM_128_IV_SIZE);
aeb11ff0
JK
718 err = skb_copy_bits(skb, offset, buf,
719 TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
720 if (err)
721 goto free_buf;
4799ac81
BP
722
723 /* We are interested only in the decrypted data not the auth */
724 err = decrypt_skb(sk, skb, sg);
725 if (err != -EBADMSG)
726 goto free_buf;
727 else
728 err = 0;
729
eb3d38d5 730 data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
4799ac81 731
97e1caa5 732 if (skb_pagelen(skb) > offset) {
eb3d38d5 733 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
4799ac81 734
aeb11ff0
JK
735 if (skb->decrypted) {
736 err = skb_store_bits(skb, offset, buf, copy);
737 if (err)
738 goto free_buf;
739 }
4799ac81 740
97e1caa5
JK
741 offset += copy;
742 buf += copy;
743 }
4799ac81 744
eb3d38d5 745 pos = skb_pagelen(skb);
4799ac81 746 skb_walk_frags(skb, skb_iter) {
eb3d38d5
JK
747 int frag_pos;
748
749 /* Practically all frags must belong to msg if reencrypt
750 * is needed with current strparser and coalescing logic,
751 * but strparser may "get optimized", so let's be safe.
752 */
753 if (pos + skb_iter->len <= offset)
754 goto done_with_frag;
755 if (pos >= data_len + rxm->offset)
756 break;
757
758 frag_pos = offset - pos;
759 copy = min_t(int, skb_iter->len - frag_pos,
760 data_len + rxm->offset - offset);
4799ac81 761
aeb11ff0
JK
762 if (skb_iter->decrypted) {
763 err = skb_store_bits(skb_iter, frag_pos, buf, copy);
764 if (err)
765 goto free_buf;
766 }
4799ac81
BP
767
768 offset += copy;
769 buf += copy;
eb3d38d5
JK
770done_with_frag:
771 pos += skb_iter->len;
4799ac81
BP
772 }
773
774free_buf:
775 kfree(orig_buf);
776 return err;
777}
778
779int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
780{
781 struct tls_context *tls_ctx = tls_get_ctx(sk);
782 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
783 int is_decrypted = skb->decrypted;
784 int is_encrypted = !is_decrypted;
785 struct sk_buff *skb_iter;
786
4799ac81
BP
787 /* Check if all the data is decrypted already */
788 skb_walk_frags(skb, skb_iter) {
789 is_decrypted &= skb_iter->decrypted;
790 is_encrypted &= !skb_iter->decrypted;
791 }
792
793 ctx->sw.decrypted |= is_decrypted;
794
f953d33b 795 /* Return immediately if the record is either entirely plaintext or
4799ac81
BP
796 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
797 * record.
798 */
f953d33b
JK
799 if (is_decrypted) {
800 ctx->resync_nh_reset = 1;
801 return 0;
802 }
803 if (is_encrypted) {
804 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
805 return 0;
806 }
807
808 ctx->resync_nh_reset = 1;
809 return tls_device_reencrypt(sk, skb);
4799ac81
BP
810}
811
9e995797
JK
812static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
813 struct net_device *netdev)
814{
815 if (sk->sk_destruct != tls_device_sk_destruct) {
816 refcount_set(&ctx->refcount, 1);
817 dev_hold(netdev);
818 ctx->netdev = netdev;
819 spin_lock_irq(&tls_device_lock);
820 list_add_tail(&ctx->list, &tls_device_list);
821 spin_unlock_irq(&tls_device_lock);
822
823 ctx->sk_destruct = sk->sk_destruct;
824 sk->sk_destruct = tls_device_sk_destruct;
825 }
826}
827
e8f69799
IL
828int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
829{
830 u16 nonce_size, tag_size, iv_size, rec_seq_size;
4509de14
VG
831 struct tls_context *tls_ctx = tls_get_ctx(sk);
832 struct tls_prot_info *prot = &tls_ctx->prot_info;
e8f69799 833 struct tls_record_info *start_marker_record;
d80a1b9d 834 struct tls_offload_context_tx *offload_ctx;
e8f69799
IL
835 struct tls_crypto_info *crypto_info;
836 struct net_device *netdev;
837 char *iv, *rec_seq;
838 struct sk_buff *skb;
e8f69799 839 __be64 rcd_sn;
90962b48 840 int rc;
e8f69799
IL
841
842 if (!ctx)
90962b48 843 return -EINVAL;
e8f69799 844
90962b48
JK
845 if (ctx->priv_ctx_tx)
846 return -EEXIST;
e8f69799
IL
847
848 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
90962b48
JK
849 if (!start_marker_record)
850 return -ENOMEM;
e8f69799 851
d80a1b9d 852 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
e8f69799
IL
853 if (!offload_ctx) {
854 rc = -ENOMEM;
855 goto free_marker_record;
856 }
857
86029d10 858 crypto_info = &ctx->crypto_send.info;
618bac45
JK
859 if (crypto_info->version != TLS_1_2_VERSION) {
860 rc = -EOPNOTSUPP;
861 goto free_offload_ctx;
862 }
863
e8f69799
IL
864 switch (crypto_info->cipher_type) {
865 case TLS_CIPHER_AES_GCM_128:
866 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
867 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
868 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
869 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
870 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
871 rec_seq =
872 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
873 break;
874 default:
875 rc = -EINVAL;
876 goto free_offload_ctx;
877 }
878
89fec474
JK
879 /* Sanity-check the rec_seq_size for stack allocations */
880 if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
881 rc = -EINVAL;
882 goto free_offload_ctx;
883 }
884
ab232e61
JK
885 prot->version = crypto_info->version;
886 prot->cipher_type = crypto_info->cipher_type;
4509de14
VG
887 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
888 prot->tag_size = tag_size;
889 prot->overhead_size = prot->prepend_size + prot->tag_size;
890 prot->iv_size = iv_size;
e8f69799
IL
891 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
892 GFP_KERNEL);
893 if (!ctx->tx.iv) {
894 rc = -ENOMEM;
895 goto free_offload_ctx;
896 }
897
898 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
899
4509de14 900 prot->rec_seq_size = rec_seq_size;
969d5090 901 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
e8f69799
IL
902 if (!ctx->tx.rec_seq) {
903 rc = -ENOMEM;
904 goto free_iv;
905 }
e8f69799
IL
906
907 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
908 if (rc)
909 goto free_rec_seq;
910
911 /* start at rec_seq - 1 to account for the start marker record */
912 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
913 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
914
915 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
916 start_marker_record->len = 0;
917 start_marker_record->num_frags = 0;
918
919 INIT_LIST_HEAD(&offload_ctx->records_list);
920 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
921 spin_lock_init(&offload_ctx->lock);
895262d8
BP
922 sg_init_table(offload_ctx->sg_tx_data,
923 ARRAY_SIZE(offload_ctx->sg_tx_data));
e8f69799
IL
924
925 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
926 ctx->push_pending_record = tls_device_push_pending_record;
e8f69799
IL
927
928 /* TLS offload is greatly simplified if we don't send
929 * SKBs where only part of the payload needs to be encrypted.
930 * So mark the last skb in the write queue as end of record.
931 */
932 skb = tcp_write_queue_tail(sk);
933 if (skb)
934 TCP_SKB_CB(skb)->eor = 1;
935
e8f69799
IL
936 netdev = get_netdev_for_sock(sk);
937 if (!netdev) {
938 pr_err_ratelimited("%s: netdev not found\n", __func__);
939 rc = -EINVAL;
3544c98a 940 goto disable_cad;
e8f69799
IL
941 }
942
943 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
944 rc = -ENOTSUPP;
945 goto release_netdev;
946 }
947
948 /* Avoid offloading if the device is down
949 * We don't want to offload new flows after
950 * the NETDEV_DOWN event
3544c98a
JK
951 *
952 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
953 * handler thus protecting from the device going down before
954 * ctx was added to tls_device_list.
e8f69799 955 */
3544c98a 956 down_read(&device_offload_lock);
e8f69799
IL
957 if (!(netdev->flags & IFF_UP)) {
958 rc = -EINVAL;
3544c98a 959 goto release_lock;
e8f69799
IL
960 }
961
962 ctx->priv_ctx_tx = offload_ctx;
963 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
86029d10 964 &ctx->crypto_send.info,
e8f69799
IL
965 tcp_sk(sk)->write_seq);
966 if (rc)
3544c98a 967 goto release_lock;
e8f69799 968
4799ac81 969 tls_device_attach(ctx, sk, netdev);
3544c98a 970 up_read(&device_offload_lock);
e8f69799 971
e8f69799
IL
972 /* following this assignment tls_is_sk_tx_device_offloaded
973 * will return true and the context might be accessed
974 * by the netdev's xmit function.
975 */
4799ac81
BP
976 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
977 dev_put(netdev);
90962b48
JK
978
979 return 0;
e8f69799 980
e8f69799
IL
981release_lock:
982 up_read(&device_offload_lock);
3544c98a
JK
983release_netdev:
984 dev_put(netdev);
985disable_cad:
e8f69799
IL
986 clean_acked_data_disable(inet_csk(sk));
987 crypto_free_aead(offload_ctx->aead_send);
988free_rec_seq:
989 kfree(ctx->tx.rec_seq);
990free_iv:
991 kfree(ctx->tx.iv);
992free_offload_ctx:
993 kfree(offload_ctx);
994 ctx->priv_ctx_tx = NULL;
995free_marker_record:
996 kfree(start_marker_record);
e8f69799
IL
997 return rc;
998}
999
4799ac81
BP
1000int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
1001{
1002 struct tls_offload_context_rx *context;
1003 struct net_device *netdev;
1004 int rc = 0;
1005
618bac45
JK
1006 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1007 return -EOPNOTSUPP;
1008
4799ac81
BP
1009 netdev = get_netdev_for_sock(sk);
1010 if (!netdev) {
1011 pr_err_ratelimited("%s: netdev not found\n", __func__);
3544c98a 1012 return -EINVAL;
4799ac81
BP
1013 }
1014
1015 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
4799ac81
BP
1016 rc = -ENOTSUPP;
1017 goto release_netdev;
1018 }
1019
1020 /* Avoid offloading if the device is down
1021 * We don't want to offload new flows after
1022 * the NETDEV_DOWN event
3544c98a
JK
1023 *
1024 * device_offload_lock is taken in tls_devices's NETDEV_DOWN
1025 * handler thus protecting from the device going down before
1026 * ctx was added to tls_device_list.
4799ac81 1027 */
3544c98a 1028 down_read(&device_offload_lock);
4799ac81
BP
1029 if (!(netdev->flags & IFF_UP)) {
1030 rc = -EINVAL;
3544c98a 1031 goto release_lock;
4799ac81
BP
1032 }
1033
1034 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
1035 if (!context) {
1036 rc = -ENOMEM;
3544c98a 1037 goto release_lock;
4799ac81 1038 }
f953d33b 1039 context->resync_nh_reset = 1;
4799ac81
BP
1040
1041 ctx->priv_ctx_rx = context;
1042 rc = tls_set_sw_offload(sk, ctx, 0);
1043 if (rc)
1044 goto release_ctx;
1045
1046 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
86029d10 1047 &ctx->crypto_recv.info,
4799ac81 1048 tcp_sk(sk)->copied_seq);
e49d268d 1049 if (rc)
4799ac81 1050 goto free_sw_resources;
4799ac81
BP
1051
1052 tls_device_attach(ctx, sk, netdev);
90962b48
JK
1053 up_read(&device_offload_lock);
1054
1055 dev_put(netdev);
1056
1057 return 0;
4799ac81
BP
1058
1059free_sw_resources:
62ef81d5 1060 up_read(&device_offload_lock);
4799ac81 1061 tls_sw_free_resources_rx(sk);
62ef81d5 1062 down_read(&device_offload_lock);
4799ac81
BP
1063release_ctx:
1064 ctx->priv_ctx_rx = NULL;
4799ac81
BP
1065release_lock:
1066 up_read(&device_offload_lock);
3544c98a
JK
1067release_netdev:
1068 dev_put(netdev);
4799ac81
BP
1069 return rc;
1070}
1071
1072void tls_device_offload_cleanup_rx(struct sock *sk)
1073{
1074 struct tls_context *tls_ctx = tls_get_ctx(sk);
1075 struct net_device *netdev;
1076
1077 down_read(&device_offload_lock);
1078 netdev = tls_ctx->netdev;
1079 if (!netdev)
1080 goto out;
1081
4799ac81
BP
1082 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1083 TLS_OFFLOAD_CTX_DIR_RX);
1084
1085 if (tls_ctx->tx_conf != TLS_HW) {
1086 dev_put(netdev);
1087 tls_ctx->netdev = NULL;
1088 }
1089out:
1090 up_read(&device_offload_lock);
4799ac81
BP
1091 tls_sw_release_resources_rx(sk);
1092}
1093
e8f69799
IL
1094static int tls_device_down(struct net_device *netdev)
1095{
1096 struct tls_context *ctx, *tmp;
1097 unsigned long flags;
1098 LIST_HEAD(list);
1099
1100 /* Request a write lock to block new offload attempts */
1101 down_write(&device_offload_lock);
1102
1103 spin_lock_irqsave(&tls_device_lock, flags);
1104 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1105 if (ctx->netdev != netdev ||
1106 !refcount_inc_not_zero(&ctx->refcount))
1107 continue;
1108
1109 list_move(&ctx->list, &list);
1110 }
1111 spin_unlock_irqrestore(&tls_device_lock, flags);
1112
1113 list_for_each_entry_safe(ctx, tmp, &list, list) {
4799ac81
BP
1114 if (ctx->tx_conf == TLS_HW)
1115 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1116 TLS_OFFLOAD_CTX_DIR_TX);
1117 if (ctx->rx_conf == TLS_HW)
1118 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1119 TLS_OFFLOAD_CTX_DIR_RX);
e52972c1
JK
1120 WRITE_ONCE(ctx->netdev, NULL);
1121 smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
1122 while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
1123 usleep_range(10, 200);
e8f69799
IL
1124 dev_put(netdev);
1125 list_del_init(&ctx->list);
1126
1127 if (refcount_dec_and_test(&ctx->refcount))
1128 tls_device_free_ctx(ctx);
1129 }
1130
1131 up_write(&device_offload_lock);
1132
1133 flush_work(&tls_device_gc_work);
1134
1135 return NOTIFY_DONE;
1136}
1137
1138static int tls_dev_event(struct notifier_block *this, unsigned long event,
1139 void *ptr)
1140{
1141 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1142
c3f4a6c3
JK
1143 if (!dev->tlsdev_ops &&
1144 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
e8f69799
IL
1145 return NOTIFY_DONE;
1146
1147 switch (event) {
1148 case NETDEV_REGISTER:
1149 case NETDEV_FEAT_CHANGE:
4799ac81 1150 if ((dev->features & NETIF_F_HW_TLS_RX) &&
eeb2efaf 1151 !dev->tlsdev_ops->tls_dev_resync)
4799ac81
BP
1152 return NOTIFY_BAD;
1153
e8f69799
IL
1154 if (dev->tlsdev_ops &&
1155 dev->tlsdev_ops->tls_dev_add &&
1156 dev->tlsdev_ops->tls_dev_del)
1157 return NOTIFY_DONE;
1158 else
1159 return NOTIFY_BAD;
1160 case NETDEV_DOWN:
1161 return tls_device_down(dev);
1162 }
1163 return NOTIFY_DONE;
1164}
1165
1166static struct notifier_block tls_dev_notifier = {
1167 .notifier_call = tls_dev_event,
1168};
1169
1170void __init tls_device_init(void)
1171{
1172 register_netdevice_notifier(&tls_dev_notifier);
1173}
1174
1175void __exit tls_device_cleanup(void)
1176{
1177 unregister_netdevice_notifier(&tls_dev_notifier);
1178 flush_work(&tls_device_gc_work);
494bc1d2 1179 clean_acked_data_flush();
e8f69799 1180}