Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / net / tls / tls_main.c
CommitLineData
3c4d7559
DW
1/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35
36#include <net/tcp.h>
37#include <net/inet_common.h>
38#include <linux/highmem.h>
39#include <linux/netdevice.h>
40#include <linux/sched/signal.h>
dd0bed16 41#include <linux/inetdevice.h>
26811cc9 42#include <linux/inet_diag.h>
3c4d7559 43
d26b698d 44#include <net/snmp.h>
3c4d7559 45#include <net/tls.h>
25a3cd81 46#include <net/tls_toe.h>
3c4d7559 47
58790314
JK
48#include "tls.h"
49
3c4d7559
DW
50MODULE_AUTHOR("Mellanox Technologies");
51MODULE_DESCRIPTION("Transport Layer Security Support");
52MODULE_LICENSE("Dual BSD/GPL");
037b0b86 53MODULE_ALIAS_TCP_ULP("tls");
3c4d7559 54
c113187d
BP
55enum {
56 TLSV4,
57 TLSV6,
58 TLS_NUM_PROTS,
59};
6d88207f 60
0d98cc02 61#define CHECK_CIPHER_DESC(cipher,ci) \
bee6b7b3 62 static_assert(cipher ## _IV_SIZE <= TLS_MAX_IV_SIZE); \
1c1cb311 63 static_assert(cipher ## _SALT_SIZE <= TLS_MAX_SALT_SIZE); \
0d98cc02
SD
64 static_assert(cipher ## _REC_SEQ_SIZE <= TLS_MAX_REC_SEQ_SIZE); \
65 static_assert(cipher ## _TAG_SIZE == TLS_TAG_SIZE); \
66 static_assert(sizeof_field(struct ci, iv) == cipher ## _IV_SIZE); \
67 static_assert(sizeof_field(struct ci, key) == cipher ## _KEY_SIZE); \
68 static_assert(sizeof_field(struct ci, salt) == cipher ## _SALT_SIZE); \
69 static_assert(sizeof_field(struct ci, rec_seq) == cipher ## _REC_SEQ_SIZE);
70
176a3f50
SD
71#define __CIPHER_DESC(ci) \
72 .iv_offset = offsetof(struct ci, iv), \
73 .key_offset = offsetof(struct ci, key), \
74 .salt_offset = offsetof(struct ci, salt), \
75 .rec_seq_offset = offsetof(struct ci, rec_seq), \
76 .crypto_info = sizeof(struct ci)
77
78#define CIPHER_DESC(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \
79 .nonce = cipher ## _IV_SIZE, \
80 .iv = cipher ## _IV_SIZE, \
81 .key = cipher ## _KEY_SIZE, \
82 .salt = cipher ## _SALT_SIZE, \
83 .tag = cipher ## _TAG_SIZE, \
84 .rec_seq = cipher ## _REC_SEQ_SIZE, \
85 .cipher_name = algname, \
86 .offloadable = _offloadable, \
87 __CIPHER_DESC(ci), \
88}
89
90#define CIPHER_DESC_NONCE0(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \
91 .nonce = 0, \
2d2c5ea2
TT
92 .iv = cipher ## _IV_SIZE, \
93 .key = cipher ## _KEY_SIZE, \
94 .salt = cipher ## _SALT_SIZE, \
95 .tag = cipher ## _TAG_SIZE, \
96 .rec_seq = cipher ## _REC_SEQ_SIZE, \
176a3f50
SD
97 .cipher_name = algname, \
98 .offloadable = _offloadable, \
99 __CIPHER_DESC(ci), \
2d2c5ea2
TT
100}
101
8db44ab2 102const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN] = {
176a3f50
SD
103 CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128, "gcm(aes)", true),
104 CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256, "gcm(aes)", true),
105 CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128, "ccm(aes)", false),
106 CIPHER_DESC_NONCE0(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305, "rfc7539(chacha20,poly1305)", false),
107 CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm, "gcm(sm4)", false),
108 CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm, "ccm(sm4)", false),
109 CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128, "gcm(aria)", false),
110 CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256, "gcm(aria)", false),
2d2c5ea2
TT
111};
112
0d98cc02
SD
113CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128);
114CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256);
115CHECK_CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128);
116CHECK_CIPHER_DESC(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305);
117CHECK_CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm);
118CHECK_CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm);
119CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128);
120CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256);
121
f691a25c 122static const struct proto *saved_tcpv6_prot;
c113187d 123static DEFINE_MUTEX(tcpv6_prot_mutex);
f691a25c 124static const struct proto *saved_tcpv4_prot;
28cb6f1e 125static DEFINE_MUTEX(tcpv4_prot_mutex);
f66de3ee 126static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
f3911f73 127static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
63a6b3fe 128static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
f13fe3e6 129 const struct proto *base);
6d88207f 130
08700dab 131void update_sk_prot(struct sock *sk, struct tls_context *ctx)
6d88207f 132{
c113187d
BP
133 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
134
d5bee737
JS
135 WRITE_ONCE(sk->sk_prot,
136 &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
f3911f73
JK
137 WRITE_ONCE(sk->sk_socket->ops,
138 &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
6d88207f 139}
3c4d7559
DW
140
141int wait_on_pending_writer(struct sock *sk, long *timeo)
142{
3c4d7559 143 DEFINE_WAIT_FUNC(wait, woken_wake_function);
419ce133 144 int ret, rc = 0;
3c4d7559
DW
145
146 add_wait_queue(sk_sleep(sk), &wait);
147 while (1) {
148 if (!*timeo) {
149 rc = -EAGAIN;
150 break;
151 }
152
153 if (signal_pending(current)) {
154 rc = sock_intr_errno(*timeo);
155 break;
156 }
157
419ce133
PA
158 ret = sk_wait_event(sk, timeo,
159 !READ_ONCE(sk->sk_write_pending), &wait);
160 if (ret) {
161 if (ret < 0)
162 rc = ret;
3c4d7559 163 break;
419ce133 164 }
3c4d7559
DW
165 }
166 remove_wait_queue(sk_sleep(sk), &wait);
167 return rc;
168}
169
170int tls_push_sg(struct sock *sk,
171 struct tls_context *ctx,
172 struct scatterlist *sg,
173 u16 first_offset,
174 int flags)
175{
e117dcfd
DH
176 struct bio_vec bvec;
177 struct msghdr msg = {
b848b26c 178 .msg_flags = MSG_SPLICE_PAGES | flags,
e117dcfd 179 };
3c4d7559
DW
180 int ret = 0;
181 struct page *p;
182 size_t size;
183 int offset = first_offset;
184
185 size = sg->length - offset;
186 offset += sg->offset;
187
e117dcfd 188 ctx->splicing_pages = true;
3c4d7559 189 while (1) {
3c4d7559
DW
190 /* is sending application-limited? */
191 tcp_rate_check_app_limited(sk);
192 p = sg_page(sg);
193retry:
e117dcfd
DH
194 bvec_set_page(&bvec, p, size, offset);
195 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size);
196
197 ret = tcp_sendmsg_locked(sk, &msg, size);
3c4d7559
DW
198
199 if (ret != size) {
200 if (ret > 0) {
201 offset += ret;
202 size -= ret;
203 goto retry;
204 }
205
206 offset -= sg->offset;
207 ctx->partially_sent_offset = offset;
208 ctx->partially_sent_record = (void *)sg;
e117dcfd 209 ctx->splicing_pages = false;
3c4d7559
DW
210 return ret;
211 }
212
213 put_page(p);
214 sk_mem_uncharge(sk, sg->length);
215 sg = sg_next(sg);
216 if (!sg)
217 break;
218
219 offset = sg->offset;
220 size = sg->length;
221 }
222
e117dcfd 223 ctx->splicing_pages = false;
3c4d7559
DW
224
225 return 0;
226}
227
228static int tls_handle_open_record(struct sock *sk, int flags)
229{
230 struct tls_context *ctx = tls_get_ctx(sk);
231
232 if (tls_is_pending_open_record(ctx))
233 return ctx->push_pending_record(sk, flags);
234
235 return 0;
236}
237
58790314
JK
238int tls_process_cmsg(struct sock *sk, struct msghdr *msg,
239 unsigned char *record_type)
3c4d7559
DW
240{
241 struct cmsghdr *cmsg;
242 int rc = -EINVAL;
243
244 for_each_cmsghdr(cmsg, msg) {
245 if (!CMSG_OK(msg, cmsg))
246 return -EINVAL;
247 if (cmsg->cmsg_level != SOL_TLS)
248 continue;
249
250 switch (cmsg->cmsg_type) {
251 case TLS_SET_RECORD_TYPE:
252 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
253 return -EINVAL;
254
255 if (msg->msg_flags & MSG_MORE)
256 return -EINVAL;
257
258 rc = tls_handle_open_record(sk, msg->msg_flags);
259 if (rc)
260 return rc;
261
262 *record_type = *(unsigned char *)CMSG_DATA(cmsg);
263 rc = 0;
264 break;
265 default:
266 return -EINVAL;
267 }
268 }
269
270 return rc;
271}
272
a42055e8
VG
273int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
274 int flags)
3c4d7559
DW
275{
276 struct scatterlist *sg;
277 u16 offset;
278
3c4d7559
DW
279 sg = ctx->partially_sent_record;
280 offset = ctx->partially_sent_offset;
281
282 ctx->partially_sent_record = NULL;
283 return tls_push_sg(sk, ctx, sg, offset, flags);
284}
285
c5daa6cc 286void tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
35b71a34
JK
287{
288 struct scatterlist *sg;
289
c5daa6cc 290 for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) {
35b71a34
JK
291 put_page(sg_page(sg));
292 sk_mem_uncharge(sk, sg->length);
35b71a34
JK
293 }
294 ctx->partially_sent_record = NULL;
35b71a34
JK
295}
296
3c4d7559
DW
297static void tls_write_space(struct sock *sk)
298{
299 struct tls_context *ctx = tls_get_ctx(sk);
300
e117dcfd 301 /* If splicing_pages call lower protocol write space handler
67db7cd2 302 * to ensure we wake up any waiting operations there. For example
e117dcfd 303 * if splicing pages where to call sk_wait_event.
67db7cd2 304 */
e117dcfd 305 if (ctx->splicing_pages) {
67db7cd2 306 ctx->sk_write_space(sk);
c212d2c7 307 return;
67db7cd2 308 }
c212d2c7 309
7463d3a2
BP
310#ifdef CONFIG_TLS_DEVICE
311 if (ctx->tx_conf == TLS_HW)
312 tls_device_write_space(sk, ctx);
313 else
314#endif
315 tls_sw_write_space(sk, ctx);
4504ab0e
VG
316
317 ctx->sk_write_space(sk);
3c4d7559
DW
318}
319
15a7dea7
JK
320/**
321 * tls_ctx_free() - free TLS ULP context
322 * @sk: socket to with @ctx is attached
323 * @ctx: TLS context structure
324 *
325 * Free TLS context. If @sk is %NULL caller guarantees that the socket
326 * to which @ctx was attached has no outstanding references.
327 */
328void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
86029d10
SD
329{
330 if (!ctx)
331 return;
332
333 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
334 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
79ffe608 335 mutex_destroy(&ctx->tx_lock);
15a7dea7
JK
336
337 if (sk)
338 kfree_rcu(ctx, rcu);
339 else
340 kfree(ctx);
86029d10
SD
341}
342
313ab004
JF
343static void tls_sk_proto_cleanup(struct sock *sk,
344 struct tls_context *ctx, long timeo)
3c4d7559 345{
9354544c
DM
346 if (unlikely(sk->sk_write_pending) &&
347 !wait_on_pending_writer(sk, &timeo))
3c4d7559
DW
348 tls_handle_open_record(sk, 0);
349
f66de3ee
BP
350 /* We need these for tls_sw_fallback handling of other packets */
351 if (ctx->tx_conf == TLS_SW) {
313ab004 352 tls_sw_release_resources_tx(sk);
b32fd3cc 353 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
35b71a34
JK
354 } else if (ctx->tx_conf == TLS_HW) {
355 tls_device_free_resources_tx(sk);
b32fd3cc 356 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
f66de3ee 357 }
3c4d7559 358
b32fd3cc 359 if (ctx->rx_conf == TLS_SW) {
313ab004 360 tls_sw_release_resources_rx(sk);
b32fd3cc
JK
361 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
362 } else if (ctx->rx_conf == TLS_HW) {
4799ac81 363 tls_device_offload_cleanup_rx(sk);
b32fd3cc
JK
364 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
365 }
313ab004
JF
366}
367
368static void tls_sk_proto_close(struct sock *sk, long timeout)
369{
95fa1454 370 struct inet_connection_sock *icsk = inet_csk(sk);
313ab004
JF
371 struct tls_context *ctx = tls_get_ctx(sk);
372 long timeo = sock_sndtimeo(sk, 0);
373 bool free_ctx;
374
375 if (ctx->tx_conf == TLS_SW)
376 tls_sw_cancel_work_tx(ctx);
377
378 lock_sock(sk);
379 free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW;
313ab004
JF
380
381 if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE)
382 tls_sk_proto_cleanup(sk, ctx, timeo);
e8f69799 383
95fa1454
JF
384 write_lock_bh(&sk->sk_callback_lock);
385 if (free_ctx)
15a7dea7 386 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
d5bee737 387 WRITE_ONCE(sk->sk_prot, ctx->sk_proto);
d85f0177
JF
388 if (sk->sk_write_space == tls_write_space)
389 sk->sk_write_space = ctx->sk_write_space;
95fa1454 390 write_unlock_bh(&sk->sk_callback_lock);
3c4d7559 391 release_sock(sk);
313ab004
JF
392 if (ctx->tx_conf == TLS_SW)
393 tls_sw_free_ctx_tx(ctx);
394 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
395 tls_sw_strparser_done(ctx);
396 if (ctx->rx_conf == TLS_SW)
397 tls_sw_free_ctx_rx(ctx);
be7bbea1 398 ctx->sk_proto->close(sk, timeout);
313ab004 399
98f0a395 400 if (free_ctx)
15a7dea7 401 tls_ctx_free(sk, ctx);
3c4d7559
DW
402}
403
121dca78
JK
404static __poll_t tls_sk_poll(struct file *file, struct socket *sock,
405 struct poll_table_struct *wait)
406{
407 struct tls_sw_context_rx *ctx;
408 struct tls_context *tls_ctx;
409 struct sock *sk = sock->sk;
410 struct sk_psock *psock;
411 __poll_t mask = 0;
412 u8 shutdown;
413 int state;
414
415 mask = tcp_poll(file, sock, wait);
416
417 state = inet_sk_state_load(sk);
418 shutdown = READ_ONCE(sk->sk_shutdown);
419 if (unlikely(state != TCP_ESTABLISHED || shutdown & RCV_SHUTDOWN))
420 return mask;
421
422 tls_ctx = tls_get_ctx(sk);
423 ctx = tls_sw_ctx_rx(tls_ctx);
424 psock = sk_psock_get(sk);
425
47069594
SD
426 if ((skb_queue_empty_lockless(&ctx->rx_list) &&
427 !tls_strp_msg_ready(ctx) &&
428 sk_psock_queue_empty(psock)) ||
429 READ_ONCE(ctx->key_update_pending))
121dca78
JK
430 mask &= ~(EPOLLIN | EPOLLRDNORM);
431
432 if (psock)
433 sk_psock_put(sk, psock);
434
435 return mask;
436}
437
ffa81fa4
YH
438static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
439 int __user *optlen, int tx)
3c4d7559
DW
440{
441 int rc = 0;
077e05d1 442 const struct tls_cipher_desc *cipher_desc;
3c4d7559
DW
443 struct tls_context *ctx = tls_get_ctx(sk);
444 struct tls_crypto_info *crypto_info;
ffa81fa4 445 struct cipher_context *cctx;
3c4d7559
DW
446 int len;
447
448 if (get_user(len, optlen))
449 return -EFAULT;
450
451 if (!optval || (len < sizeof(*crypto_info))) {
452 rc = -EINVAL;
453 goto out;
454 }
455
456 if (!ctx) {
457 rc = -EBUSY;
458 goto out;
459 }
460
461 /* get user crypto info */
ffa81fa4
YH
462 if (tx) {
463 crypto_info = &ctx->crypto_send.info;
464 cctx = &ctx->tx;
465 } else {
466 crypto_info = &ctx->crypto_recv.info;
467 cctx = &ctx->rx;
468 }
3c4d7559
DW
469
470 if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
471 rc = -EBUSY;
472 goto out;
473 }
474
5a3b886c 475 if (len == sizeof(*crypto_info)) {
ac55cd61
DC
476 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
477 rc = -EFAULT;
3c4d7559
DW
478 goto out;
479 }
480
077e05d1
SD
481 cipher_desc = get_cipher_desc(crypto_info->cipher_type);
482 if (!cipher_desc || len != cipher_desc->crypto_info) {
3c4d7559 483 rc = -EINVAL;
077e05d1 484 goto out;
3c4d7559
DW
485 }
486
077e05d1
SD
487 memcpy(crypto_info_iv(crypto_info, cipher_desc),
488 cctx->iv + cipher_desc->salt, cipher_desc->iv);
489 memcpy(crypto_info_rec_seq(crypto_info, cipher_desc),
490 cctx->rec_seq, cipher_desc->rec_seq);
491
492 if (copy_to_user(optval, crypto_info, cipher_desc->crypto_info))
493 rc = -EFAULT;
494
3c4d7559
DW
495out:
496 return rc;
497}
498
c1318b39
BP
499static int do_tls_getsockopt_tx_zc(struct sock *sk, char __user *optval,
500 int __user *optlen)
501{
502 struct tls_context *ctx = tls_get_ctx(sk);
503 unsigned int value;
504 int len;
505
506 if (get_user(len, optlen))
507 return -EFAULT;
508
509 if (len != sizeof(value))
510 return -EINVAL;
511
512 value = ctx->zerocopy_sendfile;
513 if (copy_to_user(optval, &value, sizeof(value)))
514 return -EFAULT;
515
516 return 0;
517}
518
88527790
JK
519static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval,
520 int __user *optlen)
521{
522 struct tls_context *ctx = tls_get_ctx(sk);
57128e98 523 int value, len;
88527790
JK
524
525 if (ctx->prot_info.version != TLS_1_3_VERSION)
526 return -EINVAL;
527
528 if (get_user(len, optlen))
529 return -EFAULT;
530 if (len < sizeof(value))
531 return -EINVAL;
532
57128e98 533 value = -EINVAL;
88527790
JK
534 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
535 value = ctx->rx_no_pad;
57128e98
JK
536 if (value < 0)
537 return value;
88527790
JK
538
539 if (put_user(sizeof(value), optlen))
540 return -EFAULT;
541 if (copy_to_user(optval, &value, sizeof(value)))
542 return -EFAULT;
543
544 return 0;
545}
546
3c4d7559
DW
547static int do_tls_getsockopt(struct sock *sk, int optname,
548 char __user *optval, int __user *optlen)
549{
550 int rc = 0;
551
49c47cc2
HH
552 lock_sock(sk);
553
3c4d7559
DW
554 switch (optname) {
555 case TLS_TX:
ffa81fa4
YH
556 case TLS_RX:
557 rc = do_tls_getsockopt_conf(sk, optval, optlen,
558 optname == TLS_TX);
3c4d7559 559 break;
b489a6e5 560 case TLS_TX_ZEROCOPY_RO:
c1318b39
BP
561 rc = do_tls_getsockopt_tx_zc(sk, optval, optlen);
562 break;
88527790
JK
563 case TLS_RX_EXPECT_NO_PAD:
564 rc = do_tls_getsockopt_no_pad(sk, optval, optlen);
565 break;
3c4d7559
DW
566 default:
567 rc = -ENOPROTOOPT;
568 break;
569 }
49c47cc2
HH
570
571 release_sock(sk);
572
3c4d7559
DW
573 return rc;
574}
575
576static int tls_getsockopt(struct sock *sk, int level, int optname,
577 char __user *optval, int __user *optlen)
578{
579 struct tls_context *ctx = tls_get_ctx(sk);
580
581 if (level != SOL_TLS)
be7bbea1
JK
582 return ctx->sk_proto->getsockopt(sk, level,
583 optname, optval, optlen);
3c4d7559
DW
584
585 return do_tls_getsockopt(sk, optname, optval, optlen);
586}
587
1cf7fbce
SD
588static int validate_crypto_info(const struct tls_crypto_info *crypto_info,
589 const struct tls_crypto_info *alt_crypto_info)
590{
591 if (crypto_info->version != TLS_1_2_VERSION &&
592 crypto_info->version != TLS_1_3_VERSION)
593 return -EINVAL;
594
595 switch (crypto_info->cipher_type) {
596 case TLS_CIPHER_ARIA_GCM_128:
597 case TLS_CIPHER_ARIA_GCM_256:
598 if (crypto_info->version != TLS_1_2_VERSION)
599 return -EINVAL;
600 break;
601 }
602
603 /* Ensure that TLS version and ciphers are same in both directions */
604 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) {
605 if (alt_crypto_info->version != crypto_info->version ||
606 alt_crypto_info->cipher_type != crypto_info->cipher_type)
607 return -EINVAL;
608 }
609
610 return 0;
611}
612
a7b75c5a 613static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
c46234eb 614 unsigned int optlen, int tx)
3c4d7559 615{
47069594
SD
616 struct tls_crypto_info *crypto_info, *alt_crypto_info;
617 struct tls_crypto_info *old_crypto_info = NULL;
3c4d7559 618 struct tls_context *ctx = tls_get_ctx(sk);
5f309ade 619 const struct tls_cipher_desc *cipher_desc;
0d9e699d 620 union tls_crypto_context *crypto_ctx;
47069594
SD
621 union tls_crypto_context tmp = {};
622 bool update = false;
3c4d7559 623 int rc = 0;
58371585 624 int conf;
3c4d7559 625
1ddcbfbf
ZX
626 if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info)))
627 return -EINVAL;
3c4d7559 628
4509de14 629 if (tx) {
0d9e699d 630 crypto_ctx = &ctx->crypto_send;
4509de14
VG
631 alt_crypto_info = &ctx->crypto_recv.info;
632 } else {
0d9e699d 633 crypto_ctx = &ctx->crypto_recv;
4509de14
VG
634 alt_crypto_info = &ctx->crypto_send.info;
635 }
c46234eb 636
0d9e699d
SH
637 crypto_info = &crypto_ctx->info;
638
47069594
SD
639 if (TLS_CRYPTO_INFO_READY(crypto_info)) {
640 /* Currently we only support setting crypto info more
641 * than one time for TLS 1.3
642 */
510128b3
SD
643 if (crypto_info->version != TLS_1_3_VERSION) {
644 TLS_INC_STATS(sock_net(sk), tx ? LINUX_MIB_TLSTXREKEYERROR
645 : LINUX_MIB_TLSRXREKEYERROR);
47069594 646 return -EBUSY;
510128b3 647 }
47069594
SD
648
649 update = true;
650 old_crypto_info = crypto_info;
651 crypto_info = &tmp.info;
652 crypto_ctx = &tmp;
653 }
196c31b4 654
a7b75c5a 655 rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info));
3c4d7559
DW
656 if (rc) {
657 rc = -EFAULT;
257082e6 658 goto err_crypto_info;
3c4d7559
DW
659 }
660
47069594
SD
661 if (update) {
662 /* Ensure that TLS version and ciphers are not modified */
663 if (crypto_info->version != old_crypto_info->version ||
664 crypto_info->cipher_type != old_crypto_info->cipher_type)
665 rc = -EINVAL;
666 } else {
667 rc = validate_crypto_info(crypto_info, alt_crypto_info);
668 }
1cf7fbce 669 if (rc)
196c31b4 670 goto err_crypto_info;
4509de14 671
5f309ade
SD
672 cipher_desc = get_cipher_desc(crypto_info->cipher_type);
673 if (!cipher_desc) {
674 rc = -EINVAL;
675 goto err_crypto_info;
3c4d7559 676 }
5f309ade 677
5f309ade 678 if (optlen != cipher_desc->crypto_info) {
f295b3ae
VG
679 rc = -EINVAL;
680 goto err_crypto_info;
681 }
682
d3c48151
CH
683 rc = copy_from_sockptr_offset(crypto_info + 1, optval,
684 sizeof(*crypto_info),
685 optlen - sizeof(*crypto_info));
f295b3ae
VG
686 if (rc) {
687 rc = -EFAULT;
688 goto err_crypto_info;
689 }
690
c46234eb 691 if (tx) {
4f486699 692 rc = tls_set_device_offload(sk);
e8f69799 693 conf = TLS_HW;
b32fd3cc
JK
694 if (!rc) {
695 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE);
696 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
697 } else {
47069594
SD
698 rc = tls_set_sw_offload(sk, 1,
699 update ? crypto_info : NULL);
318892ac
JK
700 if (rc)
701 goto err_crypto_info;
510128b3
SD
702
703 if (update) {
704 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXREKEYOK);
705 } else {
706 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
707 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
708 }
e8f69799
IL
709 conf = TLS_SW;
710 }
c46234eb 711 } else {
4799ac81
BP
712 rc = tls_set_device_offload_rx(sk, ctx);
713 conf = TLS_HW;
b32fd3cc
JK
714 if (!rc) {
715 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE);
716 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
717 } else {
47069594
SD
718 rc = tls_set_sw_offload(sk, 0,
719 update ? crypto_info : NULL);
318892ac
JK
720 if (rc)
721 goto err_crypto_info;
510128b3
SD
722
723 if (update) {
724 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXREKEYOK);
725 } else {
726 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
727 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
728 }
4799ac81
BP
729 conf = TLS_SW;
730 }
47069594
SD
731 if (!update)
732 tls_sw_strparser_arm(sk, ctx);
c46234eb
DW
733 }
734
f66de3ee
BP
735 if (tx)
736 ctx->tx_conf = conf;
737 else
738 ctx->rx_conf = conf;
6d88207f 739 update_sk_prot(sk, ctx);
06cc8786
SD
740
741 if (update)
742 return 0;
743
c46234eb
DW
744 if (tx) {
745 ctx->sk_write_space = sk->sk_write_space;
746 sk->sk_write_space = tls_write_space;
84c61fe1
JK
747 } else {
748 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(ctx);
749
750 tls_strp_check_rcv(&rx_ctx->strp);
c46234eb 751 }
1ddcbfbf 752 return 0;
3c4d7559
DW
753
754err_crypto_info:
510128b3
SD
755 if (update) {
756 TLS_INC_STATS(sock_net(sk), tx ? LINUX_MIB_TLSTXREKEYERROR
757 : LINUX_MIB_TLSRXREKEYERROR);
758 }
0d9e699d 759 memzero_explicit(crypto_ctx, sizeof(*crypto_ctx));
3c4d7559
DW
760 return rc;
761}
762
c1318b39
BP
763static int do_tls_setsockopt_tx_zc(struct sock *sk, sockptr_t optval,
764 unsigned int optlen)
765{
766 struct tls_context *ctx = tls_get_ctx(sk);
767 unsigned int value;
768
769 if (sockptr_is_null(optval) || optlen != sizeof(value))
770 return -EINVAL;
771
772 if (copy_from_sockptr(&value, optval, sizeof(value)))
773 return -EFAULT;
774
775 if (value > 1)
776 return -EINVAL;
777
778 ctx->zerocopy_sendfile = value;
779
780 return 0;
781}
782
88527790
JK
783static int do_tls_setsockopt_no_pad(struct sock *sk, sockptr_t optval,
784 unsigned int optlen)
785{
786 struct tls_context *ctx = tls_get_ctx(sk);
787 u32 val;
788 int rc;
789
790 if (ctx->prot_info.version != TLS_1_3_VERSION ||
791 sockptr_is_null(optval) || optlen < sizeof(val))
792 return -EINVAL;
793
794 rc = copy_from_sockptr(&val, optval, sizeof(val));
795 if (rc)
796 return -EFAULT;
797 if (val > 1)
798 return -EINVAL;
799 rc = check_zeroed_sockptr(optval, sizeof(val), optlen - sizeof(val));
800 if (rc < 1)
801 return rc == 0 ? -EINVAL : rc;
802
803 lock_sock(sk);
804 rc = -EINVAL;
805 if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) {
806 ctx->rx_no_pad = val;
807 tls_update_rx_zc_capable(ctx);
808 rc = 0;
809 }
810 release_sock(sk);
811
812 return rc;
813}
814
a7b75c5a
CH
815static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
816 unsigned int optlen)
3c4d7559
DW
817{
818 int rc = 0;
819
820 switch (optname) {
821 case TLS_TX:
c46234eb 822 case TLS_RX:
3c4d7559 823 lock_sock(sk);
c46234eb
DW
824 rc = do_tls_setsockopt_conf(sk, optval, optlen,
825 optname == TLS_TX);
3c4d7559
DW
826 release_sock(sk);
827 break;
b489a6e5 828 case TLS_TX_ZEROCOPY_RO:
c1318b39
BP
829 lock_sock(sk);
830 rc = do_tls_setsockopt_tx_zc(sk, optval, optlen);
831 release_sock(sk);
832 break;
88527790
JK
833 case TLS_RX_EXPECT_NO_PAD:
834 rc = do_tls_setsockopt_no_pad(sk, optval, optlen);
835 break;
3c4d7559
DW
836 default:
837 rc = -ENOPROTOOPT;
838 break;
839 }
840 return rc;
841}
842
843static int tls_setsockopt(struct sock *sk, int level, int optname,
a7b75c5a 844 sockptr_t optval, unsigned int optlen)
3c4d7559
DW
845{
846 struct tls_context *ctx = tls_get_ctx(sk);
847
848 if (level != SOL_TLS)
be7bbea1
JK
849 return ctx->sk_proto->setsockopt(sk, level, optname, optval,
850 optlen);
3c4d7559
DW
851
852 return do_tls_setsockopt(sk, optname, optval, optlen);
853}
854
5071a1e6
JK
855static int tls_disconnect(struct sock *sk, int flags)
856{
857 return -EOPNOTSUPP;
858}
859
08700dab 860struct tls_context *tls_ctx_create(struct sock *sk)
dd0bed16
AG
861{
862 struct inet_connection_sock *icsk = inet_csk(sk);
863 struct tls_context *ctx;
864
c6ec179a 865 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
dd0bed16
AG
866 if (!ctx)
867 return NULL;
868
79ffe608 869 mutex_init(&ctx->tx_lock);
d5bee737 870 ctx->sk_proto = READ_ONCE(sk->sk_prot);
c55dcdd4 871 ctx->sk = sk;
91e61dd7
DJ
872 /* Release semantic of rcu_assign_pointer() ensures that
873 * ctx->sk_proto is visible before changing sk->sk_prot in
874 * update_sk_prot(), and prevents reading uninitialized value in
875 * tls_{getsockopt, setsockopt}. Note that we do not need a
876 * read barrier in tls_{getsockopt,setsockopt} as there is an
877 * address dependency between sk->sk_proto->{getsockopt,setsockopt}
878 * and ctx->sk_proto.
879 */
880 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
dd0bed16
AG
881 return ctx;
882}
883
f3911f73
JK
884static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
885 const struct proto_ops *base)
886{
887 ops[TLS_BASE][TLS_BASE] = *base;
888
889 ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
df720d28 890 ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof;
f3911f73
JK
891
892 ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
893 ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
121dca78 894 ops[TLS_BASE][TLS_SW ].poll = tls_sk_poll;
662fbcec 895 ops[TLS_BASE][TLS_SW ].read_sock = tls_sw_read_sock;
f3911f73
JK
896
897 ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
898 ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
121dca78 899 ops[TLS_SW ][TLS_SW ].poll = tls_sk_poll;
662fbcec 900 ops[TLS_SW ][TLS_SW ].read_sock = tls_sw_read_sock;
f3911f73
JK
901
902#ifdef CONFIG_TLS_DEVICE
903 ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
f3911f73
JK
904
905 ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
f3911f73
JK
906
907 ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
908
909 ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
910
911 ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
f3911f73
JK
912#endif
913#ifdef CONFIG_TLS_TOE
914 ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
915#endif
916}
917
63a6b3fe
AG
918static void tls_build_proto(struct sock *sk)
919{
920 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
9a893949 921 struct proto *prot = READ_ONCE(sk->sk_prot);
63a6b3fe
AG
922
923 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
924 if (ip_ver == TLSV6 &&
5bb4c45d 925 unlikely(prot != smp_load_acquire(&saved_tcpv6_prot))) {
63a6b3fe 926 mutex_lock(&tcpv6_prot_mutex);
5bb4c45d
JS
927 if (likely(prot != saved_tcpv6_prot)) {
928 build_protos(tls_prots[TLSV6], prot);
f3911f73
JK
929 build_proto_ops(tls_proto_ops[TLSV6],
930 sk->sk_socket->ops);
5bb4c45d 931 smp_store_release(&saved_tcpv6_prot, prot);
63a6b3fe
AG
932 }
933 mutex_unlock(&tcpv6_prot_mutex);
934 }
935
936 if (ip_ver == TLSV4 &&
5bb4c45d 937 unlikely(prot != smp_load_acquire(&saved_tcpv4_prot))) {
63a6b3fe 938 mutex_lock(&tcpv4_prot_mutex);
5bb4c45d
JS
939 if (likely(prot != saved_tcpv4_prot)) {
940 build_protos(tls_prots[TLSV4], prot);
f3911f73
JK
941 build_proto_ops(tls_proto_ops[TLSV4],
942 sk->sk_socket->ops);
5bb4c45d 943 smp_store_release(&saved_tcpv4_prot, prot);
63a6b3fe
AG
944 }
945 mutex_unlock(&tcpv4_prot_mutex);
946 }
947}
948
f66de3ee 949static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
f13fe3e6 950 const struct proto *base)
c113187d 951{
f66de3ee
BP
952 prot[TLS_BASE][TLS_BASE] = *base;
953 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
954 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
5071a1e6 955 prot[TLS_BASE][TLS_BASE].disconnect = tls_disconnect;
f66de3ee
BP
956 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
957
958 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
959 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
df720d28 960 prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof;
f66de3ee
BP
961
962 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
924ad65e 963 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
7b50ecfc 964 prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
924ad65e 965 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
f66de3ee
BP
966
967 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
924ad65e 968 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
7b50ecfc 969 prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable;
924ad65e 970 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
f66de3ee 971
e8f69799
IL
972#ifdef CONFIG_TLS_DEVICE
973 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
974 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
d4c1e80b 975 prot[TLS_HW][TLS_BASE].splice_eof = tls_device_splice_eof;
e8f69799
IL
976
977 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
978 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
d4c1e80b 979 prot[TLS_HW][TLS_SW].splice_eof = tls_device_splice_eof;
4799ac81
BP
980
981 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
982
983 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
984
985 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
e8f69799 986#endif
53b4414a 987#ifdef CONFIG_TLS_TOE
f66de3ee 988 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
0eb8745e
JK
989 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash;
990 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash;
53b4414a 991#endif
c113187d
BP
992}
993
3c4d7559
DW
994static int tls_init(struct sock *sk)
995{
3c4d7559
DW
996 struct tls_context *ctx;
997 int rc = 0;
998
16bed0e6
JK
999 tls_build_proto(sk);
1000
53b4414a 1001#ifdef CONFIG_TLS_TOE
0eb8745e 1002 if (tls_toe_bypass(sk))
95fa1454 1003 return 0;
53b4414a 1004#endif
dd0bed16 1005
d91c3e17
IL
1006 /* The TLS ulp is currently supported only for TCP sockets
1007 * in ESTABLISHED state.
1008 * Supporting sockets in LISTEN state will require us
1009 * to modify the accept implementation to clone rather then
1010 * share the ulp context.
1011 */
1012 if (sk->sk_state != TCP_ESTABLISHED)
4a5cdc60 1013 return -ENOTCONN;
d91c3e17 1014
3c4d7559 1015 /* allocate tls context */
95fa1454 1016 write_lock_bh(&sk->sk_callback_lock);
08700dab 1017 ctx = tls_ctx_create(sk);
3c4d7559
DW
1018 if (!ctx) {
1019 rc = -ENOMEM;
1020 goto out;
1021 }
6d88207f 1022
f66de3ee
BP
1023 ctx->tx_conf = TLS_BASE;
1024 ctx->rx_conf = TLS_BASE;
6d88207f 1025 update_sk_prot(sk, ctx);
3c4d7559 1026out:
95fa1454 1027 write_unlock_bh(&sk->sk_callback_lock);
3c4d7559
DW
1028 return rc;
1029}
1030
33bfe20d
JF
1031static void tls_update(struct sock *sk, struct proto *p,
1032 void (*write_space)(struct sock *sk))
95fa1454
JF
1033{
1034 struct tls_context *ctx;
1035
e34a07c0
JK
1036 WARN_ON_ONCE(sk->sk_prot == p);
1037
95fa1454 1038 ctx = tls_get_ctx(sk);
33bfe20d
JF
1039 if (likely(ctx)) {
1040 ctx->sk_write_space = write_space;
95fa1454 1041 ctx->sk_proto = p;
33bfe20d 1042 } else {
b8e202d1
JS
1043 /* Pairs with lockless read in sk_clone_lock(). */
1044 WRITE_ONCE(sk->sk_prot, p);
33bfe20d
JF
1045 sk->sk_write_space = write_space;
1046 }
95fa1454
JF
1047}
1048
58790314
JK
1049static u16 tls_user_config(struct tls_context *ctx, bool tx)
1050{
1051 u16 config = tx ? ctx->tx_conf : ctx->rx_conf;
1052
1053 switch (config) {
1054 case TLS_BASE:
1055 return TLS_CONF_BASE;
1056 case TLS_SW:
1057 return TLS_CONF_SW;
1058 case TLS_HW:
1059 return TLS_CONF_HW;
1060 case TLS_HW_RECORD:
1061 return TLS_CONF_HW_RECORD;
1062 }
1063 return 0;
1064}
1065
0d7336f8 1066static int tls_get_info(struct sock *sk, struct sk_buff *skb, bool net_admin)
26811cc9
DC
1067{
1068 u16 version, cipher_type;
1069 struct tls_context *ctx;
1070 struct nlattr *start;
1071 int err;
1072
1073 start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS);
1074 if (!start)
1075 return -EMSGSIZE;
1076
1077 rcu_read_lock();
1078 ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
1079 if (!ctx) {
1080 err = 0;
1081 goto nla_failure;
1082 }
1083 version = ctx->prot_info.version;
1084 if (version) {
1085 err = nla_put_u16(skb, TLS_INFO_VERSION, version);
1086 if (err)
1087 goto nla_failure;
1088 }
1089 cipher_type = ctx->prot_info.cipher_type;
1090 if (cipher_type) {
1091 err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type);
1092 if (err)
1093 goto nla_failure;
1094 }
1095 err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true));
1096 if (err)
1097 goto nla_failure;
1098
1099 err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false));
1100 if (err)
1101 goto nla_failure;
1102
c1318b39 1103 if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) {
b489a6e5 1104 err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX);
c1318b39
BP
1105 if (err)
1106 goto nla_failure;
1107 }
88527790
JK
1108 if (ctx->rx_no_pad) {
1109 err = nla_put_flag(skb, TLS_INFO_RX_NO_PAD);
1110 if (err)
1111 goto nla_failure;
1112 }
c1318b39 1113
26811cc9
DC
1114 rcu_read_unlock();
1115 nla_nest_end(skb, start);
1116 return 0;
1117
1118nla_failure:
1119 rcu_read_unlock();
1120 nla_nest_cancel(skb, start);
1121 return err;
1122}
1123
0d7336f8 1124static size_t tls_get_info_size(const struct sock *sk, bool net_admin)
26811cc9
DC
1125{
1126 size_t size = 0;
1127
1128 size += nla_total_size(0) + /* INET_ULP_INFO_TLS */
1129 nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */
1130 nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */
1131 nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */
1132 nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */
b489a6e5 1133 nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */
88527790 1134 nla_total_size(0) + /* TLS_INFO_RX_NO_PAD */
26811cc9
DC
1135 0;
1136
1137 return size;
1138}
1139
d26b698d
JK
1140static int __net_init tls_init_net(struct net *net)
1141{
1142 int err;
1143
1144 net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib);
1145 if (!net->mib.tls_statistics)
1146 return -ENOMEM;
1147
1148 err = tls_proc_init(net);
1149 if (err)
1150 goto err_free_stats;
1151
1152 return 0;
1153err_free_stats:
1154 free_percpu(net->mib.tls_statistics);
1155 return err;
1156}
1157
1158static void __net_exit tls_exit_net(struct net *net)
1159{
1160 tls_proc_fini(net);
1161 free_percpu(net->mib.tls_statistics);
1162}
1163
1164static struct pernet_operations tls_proc_ops = {
1165 .init = tls_init_net,
1166 .exit = tls_exit_net,
1167};
1168
3c4d7559
DW
1169static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
1170 .name = "tls",
1171 .owner = THIS_MODULE,
1172 .init = tls_init,
95fa1454 1173 .update = tls_update,
26811cc9
DC
1174 .get_info = tls_get_info,
1175 .get_info_size = tls_get_info_size,
3c4d7559
DW
1176};
1177
1178static int __init tls_register(void)
1179{
d26b698d
JK
1180 int err;
1181
1182 err = register_pernet_subsys(&tls_proc_ops);
1183 if (err)
1184 return err;
1185
84c61fe1
JK
1186 err = tls_strp_dev_init();
1187 if (err)
1188 goto err_pernet;
1189
3d8c51b2 1190 err = tls_device_init();
84c61fe1
JK
1191 if (err)
1192 goto err_strp;
3d8c51b2 1193
3c4d7559
DW
1194 tcp_register_ulp(&tcp_tls_ulp_ops);
1195
1196 return 0;
84c61fe1
JK
1197err_strp:
1198 tls_strp_dev_exit();
1199err_pernet:
1200 unregister_pernet_subsys(&tls_proc_ops);
1201 return err;
3c4d7559
DW
1202}
1203
1204static void __exit tls_unregister(void)
1205{
1206 tcp_unregister_ulp(&tcp_tls_ulp_ops);
84c61fe1 1207 tls_strp_dev_exit();
e8f69799 1208 tls_device_cleanup();
d26b698d 1209 unregister_pernet_subsys(&tls_proc_ops);
3c4d7559
DW
1210}
1211
1212module_init(tls_register);
1213module_exit(tls_unregister);