1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <crypto/aead.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
37 #include <net/inet_connection_sock.h>
41 /* device_offload_lock is used to synchronize tls_dev_add
42 * against NETDEV_DOWN notifications.
44 static DECLARE_RWSEM(device_offload_lock);
46 static void tls_device_gc_task(struct work_struct *work);
48 static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
49 static LIST_HEAD(tls_device_gc_list);
50 static LIST_HEAD(tls_device_list);
51 static DEFINE_SPINLOCK(tls_device_lock);
53 static void tls_device_free_ctx(struct tls_context *ctx)
55 if (ctx->tx_conf == TLS_HW) {
56 kfree(tls_offload_ctx_tx(ctx));
57 kfree(ctx->tx.rec_seq);
61 if (ctx->rx_conf == TLS_HW)
62 kfree(tls_offload_ctx_rx(ctx));
67 static void tls_device_gc_task(struct work_struct *work)
69 struct tls_context *ctx, *tmp;
73 spin_lock_irqsave(&tls_device_lock, flags);
74 list_splice_init(&tls_device_gc_list, &gc_list);
75 spin_unlock_irqrestore(&tls_device_lock, flags);
77 list_for_each_entry_safe(ctx, tmp, &gc_list, list) {
78 struct net_device *netdev = ctx->netdev;
80 if (netdev && ctx->tx_conf == TLS_HW) {
81 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
82 TLS_OFFLOAD_CTX_DIR_TX);
88 tls_device_free_ctx(ctx);
92 static void tls_device_queue_ctx_destruction(struct tls_context *ctx)
96 spin_lock_irqsave(&tls_device_lock, flags);
97 list_move_tail(&ctx->list, &tls_device_gc_list);
99 /* schedule_work inside the spinlock
100 * to make sure tls_device_down waits for that work.
102 schedule_work(&tls_device_gc_work);
104 spin_unlock_irqrestore(&tls_device_lock, flags);
107 /* We assume that the socket is already connected */
108 static struct net_device *get_netdev_for_sock(struct sock *sk)
110 struct dst_entry *dst = sk_dst_get(sk);
111 struct net_device *netdev = NULL;
123 static void destroy_record(struct tls_record_info *record)
125 int nr_frags = record->num_frags;
128 while (nr_frags-- > 0) {
129 frag = &record->frags[nr_frags];
130 __skb_frag_unref(frag);
135 static void delete_all_records(struct tls_offload_context_tx *offload_ctx)
137 struct tls_record_info *info, *temp;
139 list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) {
140 list_del(&info->list);
141 destroy_record(info);
144 offload_ctx->retransmit_hint = NULL;
147 static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
149 struct tls_context *tls_ctx = tls_get_ctx(sk);
150 struct tls_record_info *info, *temp;
151 struct tls_offload_context_tx *ctx;
152 u64 deleted_records = 0;
158 ctx = tls_offload_ctx_tx(tls_ctx);
160 spin_lock_irqsave(&ctx->lock, flags);
161 info = ctx->retransmit_hint;
162 if (info && !before(acked_seq, info->end_seq)) {
163 ctx->retransmit_hint = NULL;
164 list_del(&info->list);
165 destroy_record(info);
169 list_for_each_entry_safe(info, temp, &ctx->records_list, list) {
170 if (before(acked_seq, info->end_seq))
172 list_del(&info->list);
174 destroy_record(info);
178 ctx->unacked_record_sn += deleted_records;
179 spin_unlock_irqrestore(&ctx->lock, flags);
182 /* At this point, there should be no references on this
183 * socket and no in-flight SKBs associated with this
184 * socket, so it is safe to free all the resources.
186 static void tls_device_sk_destruct(struct sock *sk)
188 struct tls_context *tls_ctx = tls_get_ctx(sk);
189 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
191 tls_ctx->sk_destruct(sk);
193 if (tls_ctx->tx_conf == TLS_HW) {
194 if (ctx->open_record)
195 destroy_record(ctx->open_record);
196 delete_all_records(ctx);
197 crypto_free_aead(ctx->aead_send);
198 clean_acked_data_disable(inet_csk(sk));
201 if (refcount_dec_and_test(&tls_ctx->refcount))
202 tls_device_queue_ctx_destruction(tls_ctx);
205 void tls_device_free_resources_tx(struct sock *sk)
207 struct tls_context *tls_ctx = tls_get_ctx(sk);
209 tls_free_partial_record(sk, tls_ctx);
212 static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
215 struct net_device *netdev;
219 skb = tcp_write_queue_tail(sk);
221 TCP_SKB_CB(skb)->eor = 1;
223 rcd_sn = tls_ctx->tx.rec_seq;
225 down_read(&device_offload_lock);
226 netdev = tls_ctx->netdev;
228 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
229 TLS_OFFLOAD_CTX_DIR_TX);
230 up_read(&device_offload_lock);
232 clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
235 static void tls_append_frag(struct tls_record_info *record,
236 struct page_frag *pfrag,
241 frag = &record->frags[record->num_frags - 1];
242 if (frag->page.p == pfrag->page &&
243 frag->page_offset + frag->size == pfrag->offset) {
247 frag->page.p = pfrag->page;
248 frag->page_offset = pfrag->offset;
251 get_page(pfrag->page);
254 pfrag->offset += size;
258 static int tls_push_record(struct sock *sk,
259 struct tls_context *ctx,
260 struct tls_offload_context_tx *offload_ctx,
261 struct tls_record_info *record,
262 struct page_frag *pfrag,
264 unsigned char record_type)
266 struct tls_prot_info *prot = &ctx->prot_info;
267 struct tcp_sock *tp = tcp_sk(sk);
268 struct page_frag dummy_tag_frag;
273 frag = &record->frags[0];
274 tls_fill_prepend(ctx,
275 skb_frag_address(frag),
276 record->len - prot->prepend_size,
280 /* HW doesn't care about the data in the tag, because it fills it. */
281 dummy_tag_frag.page = skb_frag_page(frag);
282 dummy_tag_frag.offset = 0;
284 tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
285 record->end_seq = tp->write_seq + record->len;
286 spin_lock_irq(&offload_ctx->lock);
287 list_add_tail(&record->list, &offload_ctx->records_list);
288 spin_unlock_irq(&offload_ctx->lock);
289 offload_ctx->open_record = NULL;
291 if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
292 tls_device_resync_tx(sk, ctx, tp->write_seq);
294 tls_advance_record_sn(sk, prot, &ctx->tx);
296 for (i = 0; i < record->num_frags; i++) {
297 frag = &record->frags[i];
298 sg_unmark_end(&offload_ctx->sg_tx_data[i]);
299 sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
300 frag->size, frag->page_offset);
301 sk_mem_charge(sk, frag->size);
302 get_page(skb_frag_page(frag));
304 sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]);
306 /* all ready, send */
307 return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
310 static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
311 struct page_frag *pfrag,
314 struct tls_record_info *record;
317 record = kmalloc(sizeof(*record), GFP_KERNEL);
321 frag = &record->frags[0];
322 __skb_frag_set_page(frag, pfrag->page);
323 frag->page_offset = pfrag->offset;
324 skb_frag_size_set(frag, prepend_size);
326 get_page(pfrag->page);
327 pfrag->offset += prepend_size;
329 record->num_frags = 1;
330 record->len = prepend_size;
331 offload_ctx->open_record = record;
335 static int tls_do_allocation(struct sock *sk,
336 struct tls_offload_context_tx *offload_ctx,
337 struct page_frag *pfrag,
342 if (!offload_ctx->open_record) {
343 if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
344 sk->sk_allocation))) {
345 sk->sk_prot->enter_memory_pressure(sk);
346 sk_stream_moderate_sndbuf(sk);
350 ret = tls_create_new_record(offload_ctx, pfrag, prepend_size);
354 if (pfrag->size > pfrag->offset)
358 if (!sk_page_frag_refill(sk, pfrag))
364 static int tls_push_data(struct sock *sk,
365 struct iov_iter *msg_iter,
366 size_t size, int flags,
367 unsigned char record_type)
369 struct tls_context *tls_ctx = tls_get_ctx(sk);
370 struct tls_prot_info *prot = &tls_ctx->prot_info;
371 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
372 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
373 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
374 struct tls_record_info *record = ctx->open_record;
375 struct page_frag *pfrag;
376 size_t orig_size = size;
377 u32 max_open_record_len;
383 ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST))
389 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
390 if (tls_is_partially_sent_record(tls_ctx)) {
391 rc = tls_push_partial_record(sk, tls_ctx, flags);
396 pfrag = sk_page_frag(sk);
398 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
399 * we need to leave room for an authentication tag.
401 max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
404 rc = tls_do_allocation(sk, ctx, pfrag,
407 rc = sk_stream_wait_memory(sk, &timeo);
411 record = ctx->open_record;
415 if (record_type != TLS_RECORD_TYPE_DATA) {
416 /* avoid sending partial
417 * record with type !=
421 destroy_record(record);
422 ctx->open_record = NULL;
423 } else if (record->len > prot->prepend_size) {
430 record = ctx->open_record;
431 copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
432 copy = min_t(size_t, copy, (max_open_record_len - record->len));
434 if (copy_from_iter_nocache(page_address(pfrag->page) +
436 copy, msg_iter) != copy) {
440 tls_append_frag(record, pfrag, copy);
445 tls_push_record_flags = flags;
447 tls_ctx->pending_open_record_frags =
455 if (done || record->len >= max_open_record_len ||
456 (record->num_frags >= MAX_SKB_FRAGS - 1)) {
457 rc = tls_push_record(sk,
462 tls_push_record_flags,
469 if (orig_size - size > 0)
470 rc = orig_size - size;
475 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
477 unsigned char record_type = TLS_RECORD_TYPE_DATA;
482 if (unlikely(msg->msg_controllen)) {
483 rc = tls_proccess_cmsg(sk, msg, &record_type);
488 rc = tls_push_data(sk, &msg->msg_iter, size,
489 msg->msg_flags, record_type);
496 int tls_device_sendpage(struct sock *sk, struct page *page,
497 int offset, size_t size, int flags)
499 struct iov_iter msg_iter;
500 char *kaddr = kmap(page);
504 if (flags & MSG_SENDPAGE_NOTLAST)
509 if (flags & MSG_OOB) {
514 iov.iov_base = kaddr + offset;
516 iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
517 rc = tls_push_data(sk, &msg_iter, size,
518 flags, TLS_RECORD_TYPE_DATA);
526 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
527 u32 seq, u64 *p_record_sn)
529 u64 record_sn = context->hint_record_sn;
530 struct tls_record_info *info;
532 info = context->retransmit_hint;
534 before(seq, info->end_seq - info->len)) {
535 /* if retransmit_hint is irrelevant start
536 * from the beggining of the list
538 info = list_first_entry(&context->records_list,
539 struct tls_record_info, list);
540 record_sn = context->unacked_record_sn;
543 list_for_each_entry_from(info, &context->records_list, list) {
544 if (before(seq, info->end_seq)) {
545 if (!context->retransmit_hint ||
547 context->retransmit_hint->end_seq)) {
548 context->hint_record_sn = record_sn;
549 context->retransmit_hint = info;
551 *p_record_sn = record_sn;
559 EXPORT_SYMBOL(tls_get_record);
561 static int tls_device_push_pending_record(struct sock *sk, int flags)
563 struct iov_iter msg_iter;
565 iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
566 return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
569 void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
571 if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
572 gfp_t sk_allocation = sk->sk_allocation;
574 sk->sk_allocation = GFP_ATOMIC;
575 tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL);
576 sk->sk_allocation = sk_allocation;
580 static void tls_device_resync_rx(struct tls_context *tls_ctx,
581 struct sock *sk, u32 seq, u8 *rcd_sn)
583 struct net_device *netdev;
585 if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
587 netdev = READ_ONCE(tls_ctx->netdev);
589 netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
590 TLS_OFFLOAD_CTX_DIR_RX);
591 clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
594 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
596 struct tls_context *tls_ctx = tls_get_ctx(sk);
597 struct tls_offload_context_rx *rx_ctx;
598 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
599 struct tls_prot_info *prot;
604 if (tls_ctx->rx_conf != TLS_HW)
607 prot = &tls_ctx->prot_info;
608 rx_ctx = tls_offload_ctx_rx(tls_ctx);
609 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
611 switch (rx_ctx->resync_type) {
612 case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
613 resync_req = atomic64_read(&rx_ctx->resync_req);
614 req_seq = resync_req >> 32;
615 seq += TLS_HEADER_SIZE - 1;
616 is_req_pending = resync_req;
618 if (likely(!is_req_pending) || req_seq != seq ||
619 !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
622 case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
623 if (likely(!rx_ctx->resync_nh_do_now))
626 /* head of next rec is already in, note that the sock_inq will
627 * include the currently parsed message when called from parser
629 if (tcp_inq(sk) > rcd_len)
632 rx_ctx->resync_nh_do_now = 0;
634 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
638 tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
641 static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
642 struct tls_offload_context_rx *ctx,
643 struct sock *sk, struct sk_buff *skb)
645 struct strp_msg *rxm;
647 /* device will request resyncs by itself based on stream scan */
648 if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
650 /* already scheduled */
651 if (ctx->resync_nh_do_now)
653 /* seen decrypted fragments since last fully-failed record */
654 if (ctx->resync_nh_reset) {
655 ctx->resync_nh_reset = 0;
656 ctx->resync_nh.decrypted_failed = 1;
657 ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
661 if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
664 /* doing resync, bump the next target in case it fails */
665 if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
666 ctx->resync_nh.decrypted_tgt *= 2;
668 ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
672 /* head of next rec is already in, parser will sync for us */
673 if (tcp_inq(sk) > rxm->full_len) {
674 ctx->resync_nh_do_now = 1;
676 struct tls_prot_info *prot = &tls_ctx->prot_info;
677 u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
679 memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
680 tls_bigint_increment(rcd_sn, prot->rec_seq_size);
682 tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
687 static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
689 struct strp_msg *rxm = strp_msg(skb);
690 int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
691 struct sk_buff *skb_iter, *unused;
692 struct scatterlist sg[1];
693 char *orig_buf, *buf;
695 orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE +
696 TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
701 nsg = skb_cow_data(skb, 0, &unused);
702 if (unlikely(nsg < 0)) {
707 sg_init_table(sg, 1);
708 sg_set_buf(&sg[0], buf,
709 rxm->full_len + TLS_HEADER_SIZE +
710 TLS_CIPHER_AES_GCM_128_IV_SIZE);
711 err = skb_copy_bits(skb, offset, buf,
712 TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
716 /* We are interested only in the decrypted data not the auth */
717 err = decrypt_skb(sk, skb, sg);
723 data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
725 if (skb_pagelen(skb) > offset) {
726 copy = min_t(int, skb_pagelen(skb) - offset, data_len);
728 if (skb->decrypted) {
729 err = skb_store_bits(skb, offset, buf, copy);
738 pos = skb_pagelen(skb);
739 skb_walk_frags(skb, skb_iter) {
742 /* Practically all frags must belong to msg if reencrypt
743 * is needed with current strparser and coalescing logic,
744 * but strparser may "get optimized", so let's be safe.
746 if (pos + skb_iter->len <= offset)
748 if (pos >= data_len + rxm->offset)
751 frag_pos = offset - pos;
752 copy = min_t(int, skb_iter->len - frag_pos,
753 data_len + rxm->offset - offset);
755 if (skb_iter->decrypted) {
756 err = skb_store_bits(skb_iter, frag_pos, buf, copy);
764 pos += skb_iter->len;
772 int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
774 struct tls_context *tls_ctx = tls_get_ctx(sk);
775 struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx);
776 int is_decrypted = skb->decrypted;
777 int is_encrypted = !is_decrypted;
778 struct sk_buff *skb_iter;
780 /* Check if all the data is decrypted already */
781 skb_walk_frags(skb, skb_iter) {
782 is_decrypted &= skb_iter->decrypted;
783 is_encrypted &= !skb_iter->decrypted;
786 ctx->sw.decrypted |= is_decrypted;
788 /* Return immediately if the record is either entirely plaintext or
789 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
793 ctx->resync_nh_reset = 1;
797 tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
801 ctx->resync_nh_reset = 1;
802 return tls_device_reencrypt(sk, skb);
805 static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
806 struct net_device *netdev)
808 if (sk->sk_destruct != tls_device_sk_destruct) {
809 refcount_set(&ctx->refcount, 1);
811 ctx->netdev = netdev;
812 spin_lock_irq(&tls_device_lock);
813 list_add_tail(&ctx->list, &tls_device_list);
814 spin_unlock_irq(&tls_device_lock);
816 ctx->sk_destruct = sk->sk_destruct;
817 sk->sk_destruct = tls_device_sk_destruct;
821 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
823 u16 nonce_size, tag_size, iv_size, rec_seq_size;
824 struct tls_context *tls_ctx = tls_get_ctx(sk);
825 struct tls_prot_info *prot = &tls_ctx->prot_info;
826 struct tls_record_info *start_marker_record;
827 struct tls_offload_context_tx *offload_ctx;
828 struct tls_crypto_info *crypto_info;
829 struct net_device *netdev;
838 if (ctx->priv_ctx_tx) {
843 start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL);
844 if (!start_marker_record) {
849 offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL);
852 goto free_marker_record;
855 crypto_info = &ctx->crypto_send.info;
856 if (crypto_info->version != TLS_1_2_VERSION) {
858 goto free_offload_ctx;
861 switch (crypto_info->cipher_type) {
862 case TLS_CIPHER_AES_GCM_128:
863 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
864 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
865 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
866 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
867 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
869 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
873 goto free_offload_ctx;
876 /* Sanity-check the rec_seq_size for stack allocations */
877 if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
879 goto free_offload_ctx;
882 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
883 prot->tag_size = tag_size;
884 prot->overhead_size = prot->prepend_size + prot->tag_size;
885 prot->iv_size = iv_size;
886 ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
890 goto free_offload_ctx;
893 memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
895 prot->rec_seq_size = rec_seq_size;
896 ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
897 if (!ctx->tx.rec_seq) {
902 rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
906 /* start at rec_seq - 1 to account for the start marker record */
907 memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn));
908 offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1;
910 start_marker_record->end_seq = tcp_sk(sk)->write_seq;
911 start_marker_record->len = 0;
912 start_marker_record->num_frags = 0;
914 INIT_LIST_HEAD(&offload_ctx->records_list);
915 list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
916 spin_lock_init(&offload_ctx->lock);
917 sg_init_table(offload_ctx->sg_tx_data,
918 ARRAY_SIZE(offload_ctx->sg_tx_data));
920 clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
921 ctx->push_pending_record = tls_device_push_pending_record;
923 /* TLS offload is greatly simplified if we don't send
924 * SKBs where only part of the payload needs to be encrypted.
925 * So mark the last skb in the write queue as end of record.
927 skb = tcp_write_queue_tail(sk);
929 TCP_SKB_CB(skb)->eor = 1;
931 /* We support starting offload on multiple sockets
932 * concurrently, so we only need a read lock here.
933 * This lock must precede get_netdev_for_sock to prevent races between
934 * NETDEV_DOWN and setsockopt.
936 down_read(&device_offload_lock);
937 netdev = get_netdev_for_sock(sk);
939 pr_err_ratelimited("%s: netdev not found\n", __func__);
944 if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
949 /* Avoid offloading if the device is down
950 * We don't want to offload new flows after
951 * the NETDEV_DOWN event
953 if (!(netdev->flags & IFF_UP)) {
958 ctx->priv_ctx_tx = offload_ctx;
959 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
960 &ctx->crypto_send.info,
961 tcp_sk(sk)->write_seq);
965 tls_device_attach(ctx, sk, netdev);
967 /* following this assignment tls_is_sk_tx_device_offloaded
968 * will return true and the context might be accessed
969 * by the netdev's xmit function.
971 smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
973 up_read(&device_offload_lock);
979 up_read(&device_offload_lock);
980 clean_acked_data_disable(inet_csk(sk));
981 crypto_free_aead(offload_ctx->aead_send);
983 kfree(ctx->tx.rec_seq);
988 ctx->priv_ctx_tx = NULL;
990 kfree(start_marker_record);
995 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
997 struct tls_offload_context_rx *context;
998 struct net_device *netdev;
1001 if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
1004 /* We support starting offload on multiple sockets
1005 * concurrently, so we only need a read lock here.
1006 * This lock must precede get_netdev_for_sock to prevent races between
1007 * NETDEV_DOWN and setsockopt.
1009 down_read(&device_offload_lock);
1010 netdev = get_netdev_for_sock(sk);
1012 pr_err_ratelimited("%s: netdev not found\n", __func__);
1017 if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
1019 goto release_netdev;
1022 /* Avoid offloading if the device is down
1023 * We don't want to offload new flows after
1024 * the NETDEV_DOWN event
1026 if (!(netdev->flags & IFF_UP)) {
1028 goto release_netdev;
1031 context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL);
1034 goto release_netdev;
1036 context->resync_nh_reset = 1;
1038 ctx->priv_ctx_rx = context;
1039 rc = tls_set_sw_offload(sk, ctx, 0);
1043 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
1044 &ctx->crypto_recv.info,
1045 tcp_sk(sk)->copied_seq);
1047 goto free_sw_resources;
1049 tls_device_attach(ctx, sk, netdev);
1050 goto release_netdev;
1053 up_read(&device_offload_lock);
1054 tls_sw_free_resources_rx(sk);
1055 down_read(&device_offload_lock);
1057 ctx->priv_ctx_rx = NULL;
1061 up_read(&device_offload_lock);
1065 void tls_device_offload_cleanup_rx(struct sock *sk)
1067 struct tls_context *tls_ctx = tls_get_ctx(sk);
1068 struct net_device *netdev;
1070 down_read(&device_offload_lock);
1071 netdev = tls_ctx->netdev;
1075 netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
1076 TLS_OFFLOAD_CTX_DIR_RX);
1078 if (tls_ctx->tx_conf != TLS_HW) {
1080 tls_ctx->netdev = NULL;
1083 up_read(&device_offload_lock);
1084 tls_sw_release_resources_rx(sk);
1087 static int tls_device_down(struct net_device *netdev)
1089 struct tls_context *ctx, *tmp;
1090 unsigned long flags;
1093 /* Request a write lock to block new offload attempts */
1094 down_write(&device_offload_lock);
1096 spin_lock_irqsave(&tls_device_lock, flags);
1097 list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) {
1098 if (ctx->netdev != netdev ||
1099 !refcount_inc_not_zero(&ctx->refcount))
1102 list_move(&ctx->list, &list);
1104 spin_unlock_irqrestore(&tls_device_lock, flags);
1106 list_for_each_entry_safe(ctx, tmp, &list, list) {
1107 if (ctx->tx_conf == TLS_HW)
1108 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1109 TLS_OFFLOAD_CTX_DIR_TX);
1110 if (ctx->rx_conf == TLS_HW)
1111 netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
1112 TLS_OFFLOAD_CTX_DIR_RX);
1113 WRITE_ONCE(ctx->netdev, NULL);
1114 smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
1115 while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
1116 usleep_range(10, 200);
1118 list_del_init(&ctx->list);
1120 if (refcount_dec_and_test(&ctx->refcount))
1121 tls_device_free_ctx(ctx);
1124 up_write(&device_offload_lock);
1126 flush_work(&tls_device_gc_work);
1131 static int tls_dev_event(struct notifier_block *this, unsigned long event,
1134 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1136 if (!dev->tlsdev_ops &&
1137 !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
1141 case NETDEV_REGISTER:
1142 case NETDEV_FEAT_CHANGE:
1143 if ((dev->features & NETIF_F_HW_TLS_RX) &&
1144 !dev->tlsdev_ops->tls_dev_resync)
1147 if (dev->tlsdev_ops &&
1148 dev->tlsdev_ops->tls_dev_add &&
1149 dev->tlsdev_ops->tls_dev_del)
1154 return tls_device_down(dev);
1159 static struct notifier_block tls_dev_notifier = {
1160 .notifier_call = tls_dev_event,
1163 void __init tls_device_init(void)
1165 register_netdevice_notifier(&tls_dev_notifier);
1168 void __exit tls_device_cleanup(void)
1170 unregister_netdevice_notifier(&tls_dev_notifier);
1171 flush_work(&tls_device_gc_work);
1172 clean_acked_data_flush();