2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/sched/signal.h>
38 #include <linux/module.h>
39 #include <crypto/aead.h>
41 #include <net/strparser.h>
44 #define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
46 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
47 unsigned int recursion_level)
49 int start = skb_headlen(skb);
50 int i, chunk = start - offset;
51 struct sk_buff *frag_iter;
54 if (unlikely(recursion_level >= 24))
67 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
70 WARN_ON(start > offset + len);
72 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
86 if (unlikely(skb_has_frag_list(skb))) {
87 skb_walk_frags(skb, frag_iter) {
90 WARN_ON(start > offset + len);
92 end = start + frag_iter->len;
97 ret = __skb_nsg(frag_iter, offset - start, chunk,
99 if (unlikely(ret < 0))
114 /* Return the number of scatterlist elements required to completely map the
115 * skb, or -EMSGSIZE if the recursion depth is exceeded.
117 static int skb_nsg(struct sk_buff *skb, int offset, int len)
119 return __skb_nsg(skb, offset, len, 0);
122 static void tls_decrypt_done(struct crypto_async_request *req, int err)
124 struct aead_request *aead_req = (struct aead_request *)req;
125 struct scatterlist *sgout = aead_req->dst;
126 struct tls_sw_context_rx *ctx;
127 struct tls_context *tls_ctx;
128 struct scatterlist *sg;
133 skb = (struct sk_buff *)req->data;
134 tls_ctx = tls_get_ctx(skb->sk);
135 ctx = tls_sw_ctx_rx(tls_ctx);
136 pending = atomic_dec_return(&ctx->decrypt_pending);
138 /* Propagate if there was an err */
140 ctx->async_wait.err = err;
141 tls_err_abort(skb->sk, err);
144 /* After using skb->sk to propagate sk through crypto async callback
145 * we need to NULL it again.
149 /* Release the skb, pages and memory allocated for crypto req */
152 /* Skip the first S/G entry as it points to AAD */
153 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
156 put_page(sg_page(sg));
161 if (!pending && READ_ONCE(ctx->async_notify))
162 complete(&ctx->async_wait.completion);
165 static int tls_do_decryption(struct sock *sk,
167 struct scatterlist *sgin,
168 struct scatterlist *sgout,
171 struct aead_request *aead_req,
174 struct tls_context *tls_ctx = tls_get_ctx(sk);
175 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
178 aead_request_set_tfm(aead_req, ctx->aead_recv);
179 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
180 aead_request_set_crypt(aead_req, sgin, sgout,
181 data_len + tls_ctx->rx.tag_size,
185 /* Using skb->sk to push sk through to crypto async callback
186 * handler. This allows propagating errors up to the socket
187 * if needed. It _must_ be cleared in the async handler
188 * before kfree_skb is called. We _know_ skb->sk is NULL
189 * because it is a clone from strparser.
192 aead_request_set_callback(aead_req,
193 CRYPTO_TFM_REQ_MAY_BACKLOG,
194 tls_decrypt_done, skb);
195 atomic_inc(&ctx->decrypt_pending);
197 aead_request_set_callback(aead_req,
198 CRYPTO_TFM_REQ_MAY_BACKLOG,
199 crypto_req_done, &ctx->async_wait);
202 ret = crypto_aead_decrypt(aead_req);
203 if (ret == -EINPROGRESS) {
207 ret = crypto_wait_req(ret, &ctx->async_wait);
211 atomic_dec(&ctx->decrypt_pending);
216 static void trim_sg(struct sock *sk, struct scatterlist *sg,
217 int *sg_num_elem, unsigned int *sg_size, int target_size)
219 int i = *sg_num_elem - 1;
220 int trim = *sg_size - target_size;
227 *sg_size = target_size;
228 while (trim >= sg[i].length) {
229 trim -= sg[i].length;
230 sk_mem_uncharge(sk, sg[i].length);
231 put_page(sg_page(&sg[i]));
238 sg[i].length -= trim;
239 sk_mem_uncharge(sk, trim);
242 *sg_num_elem = i + 1;
245 static void trim_both_sgl(struct sock *sk, int target_size)
247 struct tls_context *tls_ctx = tls_get_ctx(sk);
248 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
249 struct tls_rec *rec = ctx->open_rec;
251 trim_sg(sk, rec->sg_plaintext_data,
252 &rec->sg_plaintext_num_elem,
253 &rec->sg_plaintext_size,
257 target_size += tls_ctx->tx.overhead_size;
259 trim_sg(sk, rec->sg_encrypted_data,
260 &rec->sg_encrypted_num_elem,
261 &rec->sg_encrypted_size,
265 static int alloc_encrypted_sg(struct sock *sk, int len)
267 struct tls_context *tls_ctx = tls_get_ctx(sk);
268 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
269 struct tls_rec *rec = ctx->open_rec;
272 rc = sk_alloc_sg(sk, len,
273 rec->sg_encrypted_data, 0,
274 &rec->sg_encrypted_num_elem,
275 &rec->sg_encrypted_size, 0);
278 rec->sg_encrypted_num_elem = ARRAY_SIZE(rec->sg_encrypted_data);
283 static int alloc_plaintext_sg(struct sock *sk, int len)
285 struct tls_context *tls_ctx = tls_get_ctx(sk);
286 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
287 struct tls_rec *rec = ctx->open_rec;
290 rc = sk_alloc_sg(sk, len, rec->sg_plaintext_data, 0,
291 &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size,
292 tls_ctx->pending_open_record_frags);
295 rec->sg_plaintext_num_elem = ARRAY_SIZE(rec->sg_plaintext_data);
300 static void free_sg(struct sock *sk, struct scatterlist *sg,
301 int *sg_num_elem, unsigned int *sg_size)
303 int i, n = *sg_num_elem;
305 for (i = 0; i < n; ++i) {
306 sk_mem_uncharge(sk, sg[i].length);
307 put_page(sg_page(&sg[i]));
313 static void tls_free_both_sg(struct sock *sk)
315 struct tls_context *tls_ctx = tls_get_ctx(sk);
316 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
317 struct tls_rec *rec = ctx->open_rec;
319 /* Return if there is no open record */
323 free_sg(sk, rec->sg_encrypted_data,
324 &rec->sg_encrypted_num_elem,
325 &rec->sg_encrypted_size);
327 free_sg(sk, rec->sg_plaintext_data,
328 &rec->sg_plaintext_num_elem,
329 &rec->sg_plaintext_size);
332 int tls_tx_records(struct sock *sk, int flags)
334 struct tls_context *tls_ctx = tls_get_ctx(sk);
335 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
336 struct tls_rec *rec, *tmp;
337 int tx_flags, rc = 0;
339 if (tls_is_partially_sent_record(tls_ctx)) {
340 rec = list_first_entry(&ctx->tx_list,
341 struct tls_rec, list);
344 tx_flags = rec->tx_flags;
348 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
352 /* Full record has been transmitted.
353 * Remove the head of tx_list
355 list_del(&rec->list);
359 /* Tx all ready records */
360 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
361 if (READ_ONCE(rec->tx_ready)) {
363 tx_flags = rec->tx_flags;
367 rc = tls_push_sg(sk, tls_ctx,
368 &rec->sg_encrypted_data[0],
373 list_del(&rec->list);
381 if (rc < 0 && rc != -EAGAIN)
382 tls_err_abort(sk, EBADMSG);
387 static void tls_encrypt_done(struct crypto_async_request *req, int err)
389 struct aead_request *aead_req = (struct aead_request *)req;
390 struct sock *sk = req->data;
391 struct tls_context *tls_ctx = tls_get_ctx(sk);
392 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
397 rec = container_of(aead_req, struct tls_rec, aead_req);
399 rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
400 rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
402 free_sg(sk, rec->sg_plaintext_data,
403 &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size);
405 /* Free the record if error is previously set on socket */
406 if (err || sk->sk_err) {
407 free_sg(sk, rec->sg_encrypted_data,
408 &rec->sg_encrypted_num_elem, &rec->sg_encrypted_size);
413 /* If err is already set on socket, return the same code */
415 ctx->async_wait.err = sk->sk_err;
417 ctx->async_wait.err = err;
418 tls_err_abort(sk, err);
423 struct tls_rec *first_rec;
425 /* Mark the record as ready for transmission */
426 smp_store_mb(rec->tx_ready, true);
428 /* If received record is at head of tx_list, schedule tx */
429 first_rec = list_first_entry(&ctx->tx_list,
430 struct tls_rec, list);
431 if (rec == first_rec)
435 pending = atomic_dec_return(&ctx->encrypt_pending);
437 if (!pending && READ_ONCE(ctx->async_notify))
438 complete(&ctx->async_wait.completion);
443 /* Schedule the transmission */
444 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
445 schedule_delayed_work(&ctx->tx_work.work, 1);
448 static int tls_do_encryption(struct sock *sk,
449 struct tls_context *tls_ctx,
450 struct tls_sw_context_tx *ctx,
451 struct aead_request *aead_req,
454 struct tls_rec *rec = ctx->open_rec;
457 rec->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size;
458 rec->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size;
460 aead_request_set_tfm(aead_req, ctx->aead_send);
461 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
462 aead_request_set_crypt(aead_req, rec->sg_aead_in,
464 data_len, tls_ctx->tx.iv);
466 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
467 tls_encrypt_done, sk);
469 /* Add the record in tx_list */
470 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
471 atomic_inc(&ctx->encrypt_pending);
473 rc = crypto_aead_encrypt(aead_req);
474 if (!rc || rc != -EINPROGRESS) {
475 atomic_dec(&ctx->encrypt_pending);
476 rec->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size;
477 rec->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size;
481 WRITE_ONCE(rec->tx_ready, true);
482 } else if (rc != -EINPROGRESS) {
483 list_del(&rec->list);
487 /* Unhook the record from context if encryption is not failure */
488 ctx->open_rec = NULL;
489 tls_advance_record_sn(sk, &tls_ctx->tx);
493 static int tls_push_record(struct sock *sk, int flags,
494 unsigned char record_type)
496 struct tls_context *tls_ctx = tls_get_ctx(sk);
497 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
498 struct tls_rec *rec = ctx->open_rec;
499 struct aead_request *req;
505 rec->tx_flags = flags;
506 req = &rec->aead_req;
508 sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem - 1);
509 sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem - 1);
511 tls_make_aad(rec->aad_space, rec->sg_plaintext_size,
512 tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
515 tls_fill_prepend(tls_ctx,
516 page_address(sg_page(&rec->sg_encrypted_data[0])) +
517 rec->sg_encrypted_data[0].offset,
518 rec->sg_plaintext_size, record_type);
520 tls_ctx->pending_open_record_frags = 0;
522 rc = tls_do_encryption(sk, tls_ctx, ctx, req, rec->sg_plaintext_size);
523 if (rc == -EINPROGRESS)
526 free_sg(sk, rec->sg_plaintext_data, &rec->sg_plaintext_num_elem,
527 &rec->sg_plaintext_size);
530 tls_err_abort(sk, EBADMSG);
534 return tls_tx_records(sk, flags);
537 static int tls_sw_push_pending_record(struct sock *sk, int flags)
539 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
542 static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
543 int length, int *pages_used,
544 unsigned int *size_used,
545 struct scatterlist *to, int to_max_pages,
548 struct page *pages[MAX_SKB_FRAGS];
553 unsigned int size = *size_used;
554 int num_elem = *pages_used;
560 maxpages = to_max_pages - num_elem;
565 copied = iov_iter_get_pages(from, pages,
573 iov_iter_advance(from, copied);
578 use = min_t(int, copied, PAGE_SIZE - offset);
580 sg_set_page(&to[num_elem],
581 pages[i], use, offset);
582 sg_unmark_end(&to[num_elem]);
584 sk_mem_charge(sk, use);
594 /* Mark the end in the last sg entry if newly added */
595 if (num_elem > *pages_used)
596 sg_mark_end(&to[num_elem - 1]);
599 iov_iter_revert(from, size - *size_used);
601 *pages_used = num_elem;
606 static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
609 struct tls_context *tls_ctx = tls_get_ctx(sk);
610 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
611 struct tls_rec *rec = ctx->open_rec;
612 struct scatterlist *sg = rec->sg_plaintext_data;
615 for (i = tls_ctx->pending_open_record_frags;
616 i < rec->sg_plaintext_num_elem; ++i) {
619 page_address(sg_page(&sg[i])) + sg[i].offset,
620 copy, from) != copy) {
626 ++tls_ctx->pending_open_record_frags;
636 struct tls_rec *get_rec(struct sock *sk)
638 struct tls_context *tls_ctx = tls_get_ctx(sk);
639 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
643 /* Return if we already have an open record */
645 return ctx->open_rec;
647 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
649 rec = kzalloc(mem_size, sk->sk_allocation);
653 sg_init_table(&rec->sg_plaintext_data[0],
654 ARRAY_SIZE(rec->sg_plaintext_data));
655 sg_init_table(&rec->sg_encrypted_data[0],
656 ARRAY_SIZE(rec->sg_encrypted_data));
658 sg_init_table(rec->sg_aead_in, 2);
659 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
660 sizeof(rec->aad_space));
661 sg_unmark_end(&rec->sg_aead_in[1]);
662 sg_chain(rec->sg_aead_in, 2, rec->sg_plaintext_data);
664 sg_init_table(rec->sg_aead_out, 2);
665 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
666 sizeof(rec->aad_space));
667 sg_unmark_end(&rec->sg_aead_out[1]);
668 sg_chain(rec->sg_aead_out, 2, rec->sg_encrypted_data);
675 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
677 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
678 struct tls_context *tls_ctx = tls_get_ctx(sk);
679 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
680 struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
681 bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
682 unsigned char record_type = TLS_RECORD_TYPE_DATA;
683 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
684 bool eor = !(msg->msg_flags & MSG_MORE);
685 size_t try_to_copy, copied = 0;
695 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
700 /* Wait till there is any pending write on socket */
701 if (unlikely(sk->sk_write_pending)) {
702 ret = wait_on_pending_writer(sk, &timeo);
707 if (unlikely(msg->msg_controllen)) {
708 ret = tls_proccess_cmsg(sk, msg, &record_type);
710 if (ret == -EINPROGRESS)
712 else if (ret != -EAGAIN)
717 while (msg_data_left(msg)) {
729 orig_size = rec->sg_plaintext_size;
731 try_to_copy = msg_data_left(msg);
732 record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
733 if (try_to_copy >= record_room) {
734 try_to_copy = record_room;
738 required_size = rec->sg_plaintext_size + try_to_copy +
739 tls_ctx->tx.overhead_size;
741 if (!sk_stream_memory_free(sk))
742 goto wait_for_sndbuf;
745 ret = alloc_encrypted_sg(sk, required_size);
748 goto wait_for_memory;
750 /* Adjust try_to_copy according to the amount that was
751 * actually allocated. The difference is due
752 * to max sg elements limit
754 try_to_copy -= required_size - rec->sg_encrypted_size;
758 if (!is_kvec && (full_record || eor) && !async_capable) {
759 ret = zerocopy_from_iter(sk, &msg->msg_iter,
760 try_to_copy, &rec->sg_plaintext_num_elem,
761 &rec->sg_plaintext_size,
762 rec->sg_plaintext_data,
763 ARRAY_SIZE(rec->sg_plaintext_data),
766 goto fallback_to_reg_send;
769 copied += try_to_copy;
770 ret = tls_push_record(sk, msg->msg_flags, record_type);
772 if (ret == -EINPROGRESS)
774 else if (ret != -EAGAIN)
779 fallback_to_reg_send:
780 trim_sg(sk, rec->sg_plaintext_data,
781 &rec->sg_plaintext_num_elem,
782 &rec->sg_plaintext_size,
786 required_size = rec->sg_plaintext_size + try_to_copy;
788 ret = alloc_plaintext_sg(sk, required_size);
791 goto wait_for_memory;
793 /* Adjust try_to_copy according to the amount that was
794 * actually allocated. The difference is due
795 * to max sg elements limit
797 try_to_copy -= required_size - rec->sg_plaintext_size;
800 trim_sg(sk, rec->sg_encrypted_data,
801 &rec->sg_encrypted_num_elem,
802 &rec->sg_encrypted_size,
803 rec->sg_plaintext_size +
804 tls_ctx->tx.overhead_size);
807 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
811 copied += try_to_copy;
812 if (full_record || eor) {
813 ret = tls_push_record(sk, msg->msg_flags, record_type);
815 if (ret == -EINPROGRESS)
817 else if (ret != -EAGAIN)
825 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
827 ret = sk_stream_wait_memory(sk, &timeo);
830 trim_both_sgl(sk, orig_size);
834 if (rec->sg_encrypted_size < required_size)
835 goto alloc_encrypted;
837 goto alloc_plaintext;
843 /* Wait for pending encryptions to get completed */
844 smp_store_mb(ctx->async_notify, true);
846 if (atomic_read(&ctx->encrypt_pending))
847 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
849 reinit_completion(&ctx->async_wait.completion);
851 WRITE_ONCE(ctx->async_notify, false);
853 if (ctx->async_wait.err) {
854 ret = ctx->async_wait.err;
859 /* Transmit if any encryptions have completed */
860 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
861 cancel_delayed_work(&ctx->tx_work.work);
862 tls_tx_records(sk, msg->msg_flags);
866 ret = sk_stream_error(sk, msg->msg_flags, ret);
869 return copied ? copied : ret;
872 int tls_sw_sendpage(struct sock *sk, struct page *page,
873 int offset, size_t size, int flags)
875 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
876 struct tls_context *tls_ctx = tls_get_ctx(sk);
877 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
878 unsigned char record_type = TLS_RECORD_TYPE_DATA;
879 size_t orig_size = size;
880 struct scatterlist *sg;
888 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
889 MSG_SENDPAGE_NOTLAST))
892 /* No MSG_EOR from splice, only look at MSG_MORE */
893 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
897 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
899 /* Wait till there is any pending write on socket */
900 if (unlikely(sk->sk_write_pending)) {
901 ret = wait_on_pending_writer(sk, &timeo);
906 /* Call the sk_stream functions to manage the sndbuf mem. */
908 size_t copy, required_size;
922 record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
924 if (copy >= record_room) {
928 required_size = rec->sg_plaintext_size + copy +
929 tls_ctx->tx.overhead_size;
931 if (!sk_stream_memory_free(sk))
932 goto wait_for_sndbuf;
934 ret = alloc_encrypted_sg(sk, required_size);
937 goto wait_for_memory;
939 /* Adjust copy according to the amount that was
940 * actually allocated. The difference is due
941 * to max sg elements limit
943 copy -= required_size - rec->sg_plaintext_size;
948 sg = rec->sg_plaintext_data + rec->sg_plaintext_num_elem;
949 sg_set_page(sg, page, copy, offset);
952 rec->sg_plaintext_num_elem++;
954 sk_mem_charge(sk, copy);
957 rec->sg_plaintext_size += copy;
958 tls_ctx->pending_open_record_frags = rec->sg_plaintext_num_elem;
960 if (full_record || eor ||
961 rec->sg_plaintext_num_elem ==
962 ARRAY_SIZE(rec->sg_plaintext_data)) {
963 ret = tls_push_record(sk, flags, record_type);
965 if (ret == -EINPROGRESS)
967 else if (ret != -EAGAIN)
973 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
975 ret = sk_stream_wait_memory(sk, &timeo);
977 trim_both_sgl(sk, rec->sg_plaintext_size);
985 /* Transmit if any encryptions have completed */
986 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
987 cancel_delayed_work(&ctx->tx_work.work);
988 tls_tx_records(sk, flags);
992 if (orig_size > size)
993 ret = orig_size - size;
995 ret = sk_stream_error(sk, flags, ret);
1001 static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
1002 long timeo, int *err)
1004 struct tls_context *tls_ctx = tls_get_ctx(sk);
1005 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1006 struct sk_buff *skb;
1007 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1009 while (!(skb = ctx->recv_pkt)) {
1011 *err = sock_error(sk);
1015 if (sk->sk_shutdown & RCV_SHUTDOWN)
1018 if (sock_flag(sk, SOCK_DONE))
1021 if ((flags & MSG_DONTWAIT) || !timeo) {
1026 add_wait_queue(sk_sleep(sk), &wait);
1027 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1028 sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait);
1029 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1030 remove_wait_queue(sk_sleep(sk), &wait);
1032 /* Handle signals */
1033 if (signal_pending(current)) {
1034 *err = sock_intr_errno(timeo);
1042 /* This function decrypts the input skb into either out_iov or in out_sg
1043 * or in skb buffers itself. The input parameter 'zc' indicates if
1044 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1045 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1046 * NULL, then the decryption happens inside skb buffers itself, i.e.
1047 * zero-copy gets disabled and 'zc' is updated.
1050 static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
1051 struct iov_iter *out_iov,
1052 struct scatterlist *out_sg,
1053 int *chunk, bool *zc)
1055 struct tls_context *tls_ctx = tls_get_ctx(sk);
1056 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1057 struct strp_msg *rxm = strp_msg(skb);
1058 int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
1059 struct aead_request *aead_req;
1060 struct sk_buff *unused;
1061 u8 *aad, *iv, *mem = NULL;
1062 struct scatterlist *sgin = NULL;
1063 struct scatterlist *sgout = NULL;
1064 const int data_len = rxm->full_len - tls_ctx->rx.overhead_size;
1066 if (*zc && (out_iov || out_sg)) {
1068 n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1;
1070 n_sgout = sg_nents(out_sg);
1071 n_sgin = skb_nsg(skb, rxm->offset + tls_ctx->rx.prepend_size,
1072 rxm->full_len - tls_ctx->rx.prepend_size);
1076 n_sgin = skb_cow_data(skb, 0, &unused);
1082 /* Increment to accommodate AAD */
1083 n_sgin = n_sgin + 1;
1085 nsg = n_sgin + n_sgout;
1087 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1088 mem_size = aead_size + (nsg * sizeof(struct scatterlist));
1089 mem_size = mem_size + TLS_AAD_SPACE_SIZE;
1090 mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv);
1092 /* Allocate a single block of memory which contains
1093 * aead_req || sgin[] || sgout[] || aad || iv.
1094 * This order achieves correct alignment for aead_req, sgin, sgout.
1096 mem = kmalloc(mem_size, sk->sk_allocation);
1100 /* Segment the allocated memory */
1101 aead_req = (struct aead_request *)mem;
1102 sgin = (struct scatterlist *)(mem + aead_size);
1103 sgout = sgin + n_sgin;
1104 aad = (u8 *)(sgout + n_sgout);
1105 iv = aad + TLS_AAD_SPACE_SIZE;
1108 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1109 iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1110 tls_ctx->rx.iv_size);
1115 memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1118 tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size,
1119 tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size,
1123 sg_init_table(sgin, n_sgin);
1124 sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE);
1125 err = skb_to_sgvec(skb, &sgin[1],
1126 rxm->offset + tls_ctx->rx.prepend_size,
1127 rxm->full_len - tls_ctx->rx.prepend_size);
1135 sg_init_table(sgout, n_sgout);
1136 sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
1139 err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
1141 (n_sgout - 1), false);
1143 goto fallback_to_reg_recv;
1144 } else if (out_sg) {
1145 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1147 goto fallback_to_reg_recv;
1150 fallback_to_reg_recv:
1157 /* Prepare and submit AEAD request */
1158 err = tls_do_decryption(sk, skb, sgin, sgout, iv,
1159 data_len, aead_req, *zc);
1160 if (err == -EINPROGRESS)
1163 /* Release the pages in case iov was mapped to pages */
1164 for (; pages > 0; pages--)
1165 put_page(sg_page(&sgout[pages]));
1171 static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1172 struct iov_iter *dest, int *chunk, bool *zc)
1174 struct tls_context *tls_ctx = tls_get_ctx(sk);
1175 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1176 struct strp_msg *rxm = strp_msg(skb);
1179 #ifdef CONFIG_TLS_DEVICE
1180 err = tls_device_decrypted(sk, skb);
1184 if (!ctx->decrypted) {
1185 err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
1187 if (err == -EINPROGRESS)
1188 tls_advance_record_sn(sk, &tls_ctx->rx);
1196 rxm->offset += tls_ctx->rx.prepend_size;
1197 rxm->full_len -= tls_ctx->rx.overhead_size;
1198 tls_advance_record_sn(sk, &tls_ctx->rx);
1199 ctx->decrypted = true;
1200 ctx->saved_data_ready(sk);
1205 int decrypt_skb(struct sock *sk, struct sk_buff *skb,
1206 struct scatterlist *sgout)
1211 return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc);
1214 static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
1217 struct tls_context *tls_ctx = tls_get_ctx(sk);
1218 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1221 struct strp_msg *rxm = strp_msg(skb);
1223 if (len < rxm->full_len) {
1225 rxm->full_len -= len;
1231 /* Finished with message */
1232 ctx->recv_pkt = NULL;
1233 __strp_unpause(&ctx->strp);
1238 int tls_sw_recvmsg(struct sock *sk,
1245 struct tls_context *tls_ctx = tls_get_ctx(sk);
1246 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1247 unsigned char control;
1248 struct strp_msg *rxm;
1249 struct sk_buff *skb;
1252 int target, err = 0;
1254 bool is_kvec = msg->msg_iter.type & ITER_KVEC;
1259 if (unlikely(flags & MSG_ERRQUEUE))
1260 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
1264 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1265 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1271 skb = tls_wait_data(sk, flags, timeo, &err);
1275 rxm = strp_msg(skb);
1280 cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1281 sizeof(ctx->control), &ctx->control);
1283 control = ctx->control;
1284 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1285 if (cerr || msg->msg_flags & MSG_CTRUNC) {
1290 } else if (control != ctx->control) {
1294 if (!ctx->decrypted) {
1295 int to_copy = rxm->full_len - tls_ctx->rx.overhead_size;
1297 if (!is_kvec && to_copy <= len &&
1298 likely(!(flags & MSG_PEEK)))
1301 err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1303 if (err < 0 && err != -EINPROGRESS) {
1304 tls_err_abort(sk, EBADMSG);
1308 if (err == -EINPROGRESS) {
1311 goto pick_next_record;
1314 ctx->decrypted = true;
1318 chunk = min_t(unsigned int, rxm->full_len, len);
1320 err = skb_copy_datagram_msg(skb, rxm->offset, msg,
1329 if (likely(!(flags & MSG_PEEK))) {
1330 u8 control = ctx->control;
1332 /* For async, drop current skb reference */
1336 if (tls_sw_advance_skb(sk, skb, chunk)) {
1337 /* Return full control message to
1338 * userspace before trying to parse
1339 * another message type
1341 msg->msg_flags |= MSG_EOR;
1342 if (control != TLS_RECORD_TYPE_DATA)
1348 /* MSG_PEEK right now cannot look beyond current skb
1349 * from strparser, meaning we cannot advance skb here
1350 * and thus unpause strparser since we'd loose original
1356 /* If we have a new message from strparser, continue now. */
1357 if (copied >= target && !ctx->recv_pkt)
1363 /* Wait for all previously submitted records to be decrypted */
1364 smp_store_mb(ctx->async_notify, true);
1365 if (atomic_read(&ctx->decrypt_pending)) {
1366 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1368 /* one of async decrypt failed */
1369 tls_err_abort(sk, err);
1373 reinit_completion(&ctx->async_wait.completion);
1375 WRITE_ONCE(ctx->async_notify, false);
1379 return copied ? : err;
1382 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
1383 struct pipe_inode_info *pipe,
1384 size_t len, unsigned int flags)
1386 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
1387 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1388 struct strp_msg *rxm = NULL;
1389 struct sock *sk = sock->sk;
1390 struct sk_buff *skb;
1399 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1401 skb = tls_wait_data(sk, flags, timeo, &err);
1403 goto splice_read_end;
1405 /* splice does not support reading control messages */
1406 if (ctx->control != TLS_RECORD_TYPE_DATA) {
1408 goto splice_read_end;
1411 if (!ctx->decrypted) {
1412 err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc);
1415 tls_err_abort(sk, EBADMSG);
1416 goto splice_read_end;
1418 ctx->decrypted = true;
1420 rxm = strp_msg(skb);
1422 chunk = min_t(unsigned int, rxm->full_len, len);
1423 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
1425 goto splice_read_end;
1427 if (likely(!(flags & MSG_PEEK)))
1428 tls_sw_advance_skb(sk, skb, copied);
1432 return copied ? : err;
1435 unsigned int tls_sw_poll(struct file *file, struct socket *sock,
1436 struct poll_table_struct *wait)
1439 struct sock *sk = sock->sk;
1440 struct tls_context *tls_ctx = tls_get_ctx(sk);
1441 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1443 /* Grab POLLOUT and POLLHUP from the underlying socket */
1444 ret = ctx->sk_poll(file, sock, wait);
1446 /* Clear POLLIN bits, and set based on recv_pkt */
1447 ret &= ~(POLLIN | POLLRDNORM);
1449 ret |= POLLIN | POLLRDNORM;
1454 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1456 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
1457 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1458 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
1459 struct strp_msg *rxm = strp_msg(skb);
1460 size_t cipher_overhead;
1461 size_t data_len = 0;
1464 /* Verify that we have a full TLS header, or wait for more data */
1465 if (rxm->offset + tls_ctx->rx.prepend_size > skb->len)
1468 /* Sanity-check size of on-stack buffer. */
1469 if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) {
1474 /* Linearize header to local buffer */
1475 ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size);
1480 ctx->control = header[0];
1482 data_len = ((header[4] & 0xFF) | (header[3] << 8));
1484 cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size;
1486 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) {
1490 if (data_len < cipher_overhead) {
1495 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
1496 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
1501 #ifdef CONFIG_TLS_DEVICE
1502 handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
1503 *(u64*)tls_ctx->rx.rec_seq);
1505 return data_len + TLS_HEADER_SIZE;
1508 tls_err_abort(strp->sk, ret);
1513 static void tls_queue(struct strparser *strp, struct sk_buff *skb)
1515 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
1516 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1518 ctx->decrypted = false;
1520 ctx->recv_pkt = skb;
1523 ctx->saved_data_ready(strp->sk);
1526 static void tls_data_ready(struct sock *sk)
1528 struct tls_context *tls_ctx = tls_get_ctx(sk);
1529 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1531 strp_data_ready(&ctx->strp);
1534 void tls_sw_free_resources_tx(struct sock *sk)
1536 struct tls_context *tls_ctx = tls_get_ctx(sk);
1537 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1538 struct tls_rec *rec, *tmp;
1540 /* Wait for any pending async encryptions to complete */
1541 smp_store_mb(ctx->async_notify, true);
1542 if (atomic_read(&ctx->encrypt_pending))
1543 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1545 cancel_delayed_work_sync(&ctx->tx_work.work);
1547 /* Tx whatever records we can transmit and abandon the rest */
1548 tls_tx_records(sk, -1);
1550 /* Free up un-sent records in tx_list. First, free
1551 * the partially sent record if any at head of tx_list.
1553 if (tls_ctx->partially_sent_record) {
1554 struct scatterlist *sg = tls_ctx->partially_sent_record;
1557 put_page(sg_page(sg));
1558 sk_mem_uncharge(sk, sg->length);
1565 tls_ctx->partially_sent_record = NULL;
1567 rec = list_first_entry(&ctx->tx_list,
1568 struct tls_rec, list);
1569 list_del(&rec->list);
1573 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
1574 free_sg(sk, rec->sg_encrypted_data,
1575 &rec->sg_encrypted_num_elem,
1576 &rec->sg_encrypted_size);
1578 list_del(&rec->list);
1582 crypto_free_aead(ctx->aead_send);
1583 tls_free_both_sg(sk);
1588 void tls_sw_release_resources_rx(struct sock *sk)
1590 struct tls_context *tls_ctx = tls_get_ctx(sk);
1591 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1593 if (ctx->aead_recv) {
1594 kfree_skb(ctx->recv_pkt);
1595 ctx->recv_pkt = NULL;
1596 crypto_free_aead(ctx->aead_recv);
1597 strp_stop(&ctx->strp);
1598 write_lock_bh(&sk->sk_callback_lock);
1599 sk->sk_data_ready = ctx->saved_data_ready;
1600 write_unlock_bh(&sk->sk_callback_lock);
1602 strp_done(&ctx->strp);
1607 void tls_sw_free_resources_rx(struct sock *sk)
1609 struct tls_context *tls_ctx = tls_get_ctx(sk);
1610 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1612 tls_sw_release_resources_rx(sk);
1617 /* The work handler to transmitt the encrypted records in tx_list */
1618 static void tx_work_handler(struct work_struct *work)
1620 struct delayed_work *delayed_work = to_delayed_work(work);
1621 struct tx_work *tx_work = container_of(delayed_work,
1622 struct tx_work, work);
1623 struct sock *sk = tx_work->sk;
1624 struct tls_context *tls_ctx = tls_get_ctx(sk);
1625 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1627 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
1631 tls_tx_records(sk, -1);
1635 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1637 struct tls_crypto_info *crypto_info;
1638 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
1639 struct tls_sw_context_tx *sw_ctx_tx = NULL;
1640 struct tls_sw_context_rx *sw_ctx_rx = NULL;
1641 struct cipher_context *cctx;
1642 struct crypto_aead **aead;
1643 struct strp_callbacks cb;
1644 u16 nonce_size, tag_size, iv_size, rec_seq_size;
1654 if (!ctx->priv_ctx_tx) {
1655 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
1660 ctx->priv_ctx_tx = sw_ctx_tx;
1663 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
1666 if (!ctx->priv_ctx_rx) {
1667 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
1672 ctx->priv_ctx_rx = sw_ctx_rx;
1675 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
1680 crypto_init_wait(&sw_ctx_tx->async_wait);
1681 crypto_info = &ctx->crypto_send.info;
1683 aead = &sw_ctx_tx->aead_send;
1684 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
1685 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
1686 sw_ctx_tx->tx_work.sk = sk;
1688 crypto_init_wait(&sw_ctx_rx->async_wait);
1689 crypto_info = &ctx->crypto_recv.info;
1691 aead = &sw_ctx_rx->aead_recv;
1694 switch (crypto_info->cipher_type) {
1695 case TLS_CIPHER_AES_GCM_128: {
1696 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1697 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
1698 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
1699 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
1700 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
1702 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
1704 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
1712 /* Sanity-check the IV size for stack allocations. */
1713 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
1718 cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
1719 cctx->tag_size = tag_size;
1720 cctx->overhead_size = cctx->prepend_size + cctx->tag_size;
1721 cctx->iv_size = iv_size;
1722 cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
1728 memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
1729 memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
1730 cctx->rec_seq_size = rec_seq_size;
1731 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
1732 if (!cctx->rec_seq) {
1738 *aead = crypto_alloc_aead("gcm(aes)", 0, 0);
1739 if (IS_ERR(*aead)) {
1740 rc = PTR_ERR(*aead);
1746 ctx->push_pending_record = tls_sw_push_pending_record;
1748 rc = crypto_aead_setkey(*aead, gcm_128_info->key,
1749 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1753 rc = crypto_aead_setauthsize(*aead, cctx->tag_size);
1758 /* Set up strparser */
1759 memset(&cb, 0, sizeof(cb));
1760 cb.rcv_msg = tls_queue;
1761 cb.parse_msg = tls_read_size;
1763 strp_init(&sw_ctx_rx->strp, sk, &cb);
1765 write_lock_bh(&sk->sk_callback_lock);
1766 sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
1767 sk->sk_data_ready = tls_data_ready;
1768 write_unlock_bh(&sk->sk_callback_lock);
1770 sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
1772 strp_check_rcv(&sw_ctx_rx->strp);
1778 crypto_free_aead(*aead);
1781 kfree(cctx->rec_seq);
1782 cctx->rec_seq = NULL;
1788 kfree(ctx->priv_ctx_tx);
1789 ctx->priv_ctx_tx = NULL;
1791 kfree(ctx->priv_ctx_rx);
1792 ctx->priv_ctx_rx = NULL;