tls: Fix tls_device handling of partial records
[linux-block.git] / net / tls / tls_main.c
CommitLineData
3c4d7559
DW
1/*
2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35
36#include <net/tcp.h>
37#include <net/inet_common.h>
38#include <linux/highmem.h>
39#include <linux/netdevice.h>
40#include <linux/sched/signal.h>
dd0bed16 41#include <linux/inetdevice.h>
3c4d7559
DW
42
43#include <net/tls.h>
44
45MODULE_AUTHOR("Mellanox Technologies");
46MODULE_DESCRIPTION("Transport Layer Security Support");
47MODULE_LICENSE("Dual BSD/GPL");
037b0b86 48MODULE_ALIAS_TCP_ULP("tls");
3c4d7559 49
c113187d
BP
50enum {
51 TLSV4,
52 TLSV6,
53 TLS_NUM_PROTS,
54};
6d88207f 55
c113187d
BP
56static struct proto *saved_tcpv6_prot;
57static DEFINE_MUTEX(tcpv6_prot_mutex);
28cb6f1e
JF
58static struct proto *saved_tcpv4_prot;
59static DEFINE_MUTEX(tcpv4_prot_mutex);
dd0bed16 60static LIST_HEAD(device_list);
df9d4a17 61static DEFINE_SPINLOCK(device_spinlock);
f66de3ee 62static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
c46234eb 63static struct proto_ops tls_sw_proto_ops;
63a6b3fe
AG
64static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
65 struct proto *base);
6d88207f 66
f66de3ee 67static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
6d88207f 68{
c113187d
BP
69 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
70
f66de3ee 71 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
6d88207f 72}
3c4d7559
DW
73
74int wait_on_pending_writer(struct sock *sk, long *timeo)
75{
76 int rc = 0;
77 DEFINE_WAIT_FUNC(wait, woken_wake_function);
78
79 add_wait_queue(sk_sleep(sk), &wait);
80 while (1) {
81 if (!*timeo) {
82 rc = -EAGAIN;
83 break;
84 }
85
86 if (signal_pending(current)) {
87 rc = sock_intr_errno(*timeo);
88 break;
89 }
90
91 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
92 break;
93 }
94 remove_wait_queue(sk_sleep(sk), &wait);
95 return rc;
96}
97
98int tls_push_sg(struct sock *sk,
99 struct tls_context *ctx,
100 struct scatterlist *sg,
101 u16 first_offset,
102 int flags)
103{
104 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
105 int ret = 0;
106 struct page *p;
107 size_t size;
108 int offset = first_offset;
109
110 size = sg->length - offset;
111 offset += sg->offset;
112
c212d2c7 113 ctx->in_tcp_sendpages = true;
3c4d7559
DW
114 while (1) {
115 if (sg_is_last(sg))
116 sendpage_flags = flags;
117
118 /* is sending application-limited? */
119 tcp_rate_check_app_limited(sk);
120 p = sg_page(sg);
121retry:
122 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
123
124 if (ret != size) {
125 if (ret > 0) {
126 offset += ret;
127 size -= ret;
128 goto retry;
129 }
130
131 offset -= sg->offset;
132 ctx->partially_sent_offset = offset;
133 ctx->partially_sent_record = (void *)sg;
080324c3 134 ctx->in_tcp_sendpages = false;
3c4d7559
DW
135 return ret;
136 }
137
138 put_page(p);
139 sk_mem_uncharge(sk, sg->length);
140 sg = sg_next(sg);
141 if (!sg)
142 break;
143
144 offset = sg->offset;
145 size = sg->length;
146 }
147
c212d2c7
DW
148 ctx->in_tcp_sendpages = false;
149 ctx->sk_write_space(sk);
3c4d7559
DW
150
151 return 0;
152}
153
154static int tls_handle_open_record(struct sock *sk, int flags)
155{
156 struct tls_context *ctx = tls_get_ctx(sk);
157
158 if (tls_is_pending_open_record(ctx))
159 return ctx->push_pending_record(sk, flags);
160
161 return 0;
162}
163
164int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
165 unsigned char *record_type)
166{
167 struct cmsghdr *cmsg;
168 int rc = -EINVAL;
169
170 for_each_cmsghdr(cmsg, msg) {
171 if (!CMSG_OK(msg, cmsg))
172 return -EINVAL;
173 if (cmsg->cmsg_level != SOL_TLS)
174 continue;
175
176 switch (cmsg->cmsg_type) {
177 case TLS_SET_RECORD_TYPE:
178 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
179 return -EINVAL;
180
181 if (msg->msg_flags & MSG_MORE)
182 return -EINVAL;
183
184 rc = tls_handle_open_record(sk, msg->msg_flags);
185 if (rc)
186 return rc;
187
188 *record_type = *(unsigned char *)CMSG_DATA(cmsg);
189 rc = 0;
190 break;
191 default:
192 return -EINVAL;
193 }
194 }
195
196 return rc;
197}
198
a42055e8
VG
199int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
200 int flags)
3c4d7559
DW
201{
202 struct scatterlist *sg;
203 u16 offset;
204
3c4d7559
DW
205 sg = ctx->partially_sent_record;
206 offset = ctx->partially_sent_offset;
207
208 ctx->partially_sent_record = NULL;
209 return tls_push_sg(sk, ctx, sg, offset, flags);
210}
211
212static void tls_write_space(struct sock *sk)
213{
214 struct tls_context *ctx = tls_get_ctx(sk);
a42055e8 215 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
3c4d7559 216
67db7cd2
JF
217 /* If in_tcp_sendpages call lower protocol write space handler
218 * to ensure we wake up any waiting operations there. For example
219 * if do_tcp_sendpages where to call sk_wait_event.
220 */
221 if (ctx->in_tcp_sendpages) {
222 ctx->sk_write_space(sk);
c212d2c7 223 return;
67db7cd2 224 }
c212d2c7 225
a42055e8 226 /* Schedule the transmission if tx list is ready */
9932a29a 227 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
a42055e8
VG
228 /* Schedule the transmission */
229 if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
230 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
3c4d7559
DW
231 }
232
233 ctx->sk_write_space(sk);
234}
235
86029d10
SD
236static void tls_ctx_free(struct tls_context *ctx)
237{
238 if (!ctx)
239 return;
240
241 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
242 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
243 kfree(ctx);
244}
245
3c4d7559
DW
246static void tls_sk_proto_close(struct sock *sk, long timeout)
247{
248 struct tls_context *ctx = tls_get_ctx(sk);
249 long timeo = sock_sndtimeo(sk, 0);
250 void (*sk_proto_close)(struct sock *sk, long timeout);
98f0a395 251 bool free_ctx = false;
3c4d7559
DW
252
253 lock_sock(sk);
ff45d820
IL
254 sk_proto_close = ctx->sk_proto_close;
255
76f7164d
AG
256 if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD)
257 goto skip_tx_cleanup;
258
259 if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) {
98f0a395 260 free_ctx = true;
ff45d820
IL
261 goto skip_tx_cleanup;
262 }
3c4d7559
DW
263
264 if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
265 tls_handle_open_record(sk, 0);
266
f66de3ee
BP
267 /* We need these for tls_sw_fallback handling of other packets */
268 if (ctx->tx_conf == TLS_SW) {
269 kfree(ctx->tx.rec_seq);
270 kfree(ctx->tx.iv);
271 tls_sw_free_resources_tx(sk);
272 }
3c4d7559 273
f66de3ee
BP
274 if (ctx->rx_conf == TLS_SW) {
275 kfree(ctx->rx.rec_seq);
276 kfree(ctx->rx.iv);
277 tls_sw_free_resources_rx(sk);
c46234eb 278 }
3c4d7559 279
e8f69799 280#ifdef CONFIG_TLS_DEVICE
4799ac81
BP
281 if (ctx->rx_conf == TLS_HW)
282 tls_device_offload_cleanup_rx(sk);
283
284 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
e8f69799
IL
285#else
286 {
287#endif
86029d10 288 tls_ctx_free(ctx);
e8f69799
IL
289 ctx = NULL;
290 }
291
ff45d820 292skip_tx_cleanup:
3c4d7559
DW
293 release_sock(sk);
294 sk_proto_close(sk, timeout);
dd0bed16
AG
295 /* free ctx for TLS_HW_RECORD, used by tcp_set_state
296 * for sk->sk_prot->unhash [tls_hw_unhash]
297 */
98f0a395 298 if (free_ctx)
86029d10 299 tls_ctx_free(ctx);
3c4d7559
DW
300}
301
302static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
303 int __user *optlen)
304{
305 int rc = 0;
306 struct tls_context *ctx = tls_get_ctx(sk);
307 struct tls_crypto_info *crypto_info;
308 int len;
309
310 if (get_user(len, optlen))
311 return -EFAULT;
312
313 if (!optval || (len < sizeof(*crypto_info))) {
314 rc = -EINVAL;
315 goto out;
316 }
317
318 if (!ctx) {
319 rc = -EBUSY;
320 goto out;
321 }
322
323 /* get user crypto info */
86029d10 324 crypto_info = &ctx->crypto_send.info;
3c4d7559
DW
325
326 if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
327 rc = -EBUSY;
328 goto out;
329 }
330
5a3b886c 331 if (len == sizeof(*crypto_info)) {
ac55cd61
DC
332 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
333 rc = -EFAULT;
3c4d7559
DW
334 goto out;
335 }
336
337 switch (crypto_info->cipher_type) {
338 case TLS_CIPHER_AES_GCM_128: {
339 struct tls12_crypto_info_aes_gcm_128 *
340 crypto_info_aes_gcm_128 =
341 container_of(crypto_info,
342 struct tls12_crypto_info_aes_gcm_128,
343 info);
344
345 if (len != sizeof(*crypto_info_aes_gcm_128)) {
346 rc = -EINVAL;
347 goto out;
348 }
349 lock_sock(sk);
a1dfa681 350 memcpy(crypto_info_aes_gcm_128->iv,
dbe42559 351 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
3c4d7559 352 TLS_CIPHER_AES_GCM_128_IV_SIZE);
dbe42559 353 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
c410c196 354 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
3c4d7559 355 release_sock(sk);
ac55cd61
DC
356 if (copy_to_user(optval,
357 crypto_info_aes_gcm_128,
358 sizeof(*crypto_info_aes_gcm_128)))
359 rc = -EFAULT;
3c4d7559
DW
360 break;
361 }
fb99bce7
DW
362 case TLS_CIPHER_AES_GCM_256: {
363 struct tls12_crypto_info_aes_gcm_256 *
364 crypto_info_aes_gcm_256 =
365 container_of(crypto_info,
366 struct tls12_crypto_info_aes_gcm_256,
367 info);
368
369 if (len != sizeof(*crypto_info_aes_gcm_256)) {
370 rc = -EINVAL;
371 goto out;
372 }
373 lock_sock(sk);
374 memcpy(crypto_info_aes_gcm_256->iv,
375 ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
376 TLS_CIPHER_AES_GCM_256_IV_SIZE);
377 memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq,
378 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
379 release_sock(sk);
380 if (copy_to_user(optval,
381 crypto_info_aes_gcm_256,
382 sizeof(*crypto_info_aes_gcm_256)))
383 rc = -EFAULT;
384 break;
385 }
3c4d7559
DW
386 default:
387 rc = -EINVAL;
388 }
389
390out:
391 return rc;
392}
393
394static int do_tls_getsockopt(struct sock *sk, int optname,
395 char __user *optval, int __user *optlen)
396{
397 int rc = 0;
398
399 switch (optname) {
400 case TLS_TX:
401 rc = do_tls_getsockopt_tx(sk, optval, optlen);
402 break;
403 default:
404 rc = -ENOPROTOOPT;
405 break;
406 }
407 return rc;
408}
409
410static int tls_getsockopt(struct sock *sk, int level, int optname,
411 char __user *optval, int __user *optlen)
412{
413 struct tls_context *ctx = tls_get_ctx(sk);
414
415 if (level != SOL_TLS)
416 return ctx->getsockopt(sk, level, optname, optval, optlen);
417
418 return do_tls_getsockopt(sk, optname, optval, optlen);
419}
420
c46234eb
DW
421static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
422 unsigned int optlen, int tx)
3c4d7559 423{
196c31b4 424 struct tls_crypto_info *crypto_info;
4509de14 425 struct tls_crypto_info *alt_crypto_info;
3c4d7559 426 struct tls_context *ctx = tls_get_ctx(sk);
fb99bce7 427 size_t optsize;
3c4d7559 428 int rc = 0;
58371585 429 int conf;
3c4d7559
DW
430
431 if (!optval || (optlen < sizeof(*crypto_info))) {
432 rc = -EINVAL;
433 goto out;
434 }
435
4509de14 436 if (tx) {
86029d10 437 crypto_info = &ctx->crypto_send.info;
4509de14
VG
438 alt_crypto_info = &ctx->crypto_recv.info;
439 } else {
86029d10 440 crypto_info = &ctx->crypto_recv.info;
4509de14
VG
441 alt_crypto_info = &ctx->crypto_send.info;
442 }
c46234eb 443
196c31b4 444 /* Currently we don't support set crypto info more than one time */
877d17c7
SD
445 if (TLS_CRYPTO_INFO_READY(crypto_info)) {
446 rc = -EBUSY;
196c31b4 447 goto out;
877d17c7 448 }
196c31b4
IL
449
450 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
3c4d7559
DW
451 if (rc) {
452 rc = -EFAULT;
257082e6 453 goto err_crypto_info;
3c4d7559
DW
454 }
455
456 /* check version */
130b392c
DW
457 if (crypto_info->version != TLS_1_2_VERSION &&
458 crypto_info->version != TLS_1_3_VERSION) {
3c4d7559 459 rc = -ENOTSUPP;
196c31b4 460 goto err_crypto_info;
3c4d7559
DW
461 }
462
4509de14
VG
463 /* Ensure that TLS version and ciphers are same in both directions */
464 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) {
465 if (alt_crypto_info->version != crypto_info->version ||
466 alt_crypto_info->cipher_type != crypto_info->cipher_type) {
467 rc = -EINVAL;
468 goto err_crypto_info;
469 }
470 }
471
196c31b4 472 switch (crypto_info->cipher_type) {
fb99bce7
DW
473 case TLS_CIPHER_AES_GCM_128:
474 case TLS_CIPHER_AES_GCM_256: {
475 optsize = crypto_info->cipher_type == TLS_CIPHER_AES_GCM_128 ?
476 sizeof(struct tls12_crypto_info_aes_gcm_128) :
477 sizeof(struct tls12_crypto_info_aes_gcm_256);
478 if (optlen != optsize) {
3c4d7559 479 rc = -EINVAL;
6db959c8 480 goto err_crypto_info;
3c4d7559 481 }
196c31b4
IL
482 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
483 optlen - sizeof(*crypto_info));
3c4d7559
DW
484 if (rc) {
485 rc = -EFAULT;
486 goto err_crypto_info;
487 }
488 break;
489 }
490 default:
491 rc = -EINVAL;
6db959c8 492 goto err_crypto_info;
3c4d7559
DW
493 }
494
c46234eb 495 if (tx) {
e8f69799
IL
496#ifdef CONFIG_TLS_DEVICE
497 rc = tls_set_device_offload(sk, ctx);
498 conf = TLS_HW;
499 if (rc) {
500#else
501 {
502#endif
503 rc = tls_set_sw_offload(sk, ctx, 1);
504 conf = TLS_SW;
505 }
c46234eb 506 } else {
4799ac81
BP
507#ifdef CONFIG_TLS_DEVICE
508 rc = tls_set_device_offload_rx(sk, ctx);
509 conf = TLS_HW;
510 if (rc) {
511#else
512 {
513#endif
514 rc = tls_set_sw_offload(sk, ctx, 0);
515 conf = TLS_SW;
516 }
c46234eb
DW
517 }
518
3c4d7559
DW
519 if (rc)
520 goto err_crypto_info;
521
f66de3ee
BP
522 if (tx)
523 ctx->tx_conf = conf;
524 else
525 ctx->rx_conf = conf;
6d88207f 526 update_sk_prot(sk, ctx);
c46234eb
DW
527 if (tx) {
528 ctx->sk_write_space = sk->sk_write_space;
529 sk->sk_write_space = tls_write_space;
530 } else {
531 sk->sk_socket->ops = &tls_sw_proto_ops;
532 }
3c4d7559
DW
533 goto out;
534
535err_crypto_info:
c844eb46 536 memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
3c4d7559
DW
537out:
538 return rc;
539}
540
541static int do_tls_setsockopt(struct sock *sk, int optname,
542 char __user *optval, unsigned int optlen)
543{
544 int rc = 0;
545
546 switch (optname) {
547 case TLS_TX:
c46234eb 548 case TLS_RX:
3c4d7559 549 lock_sock(sk);
c46234eb
DW
550 rc = do_tls_setsockopt_conf(sk, optval, optlen,
551 optname == TLS_TX);
3c4d7559
DW
552 release_sock(sk);
553 break;
554 default:
555 rc = -ENOPROTOOPT;
556 break;
557 }
558 return rc;
559}
560
561static int tls_setsockopt(struct sock *sk, int level, int optname,
562 char __user *optval, unsigned int optlen)
563{
564 struct tls_context *ctx = tls_get_ctx(sk);
565
566 if (level != SOL_TLS)
567 return ctx->setsockopt(sk, level, optname, optval, optlen);
568
569 return do_tls_setsockopt(sk, optname, optval, optlen);
570}
571
dd0bed16
AG
572static struct tls_context *create_ctx(struct sock *sk)
573{
574 struct inet_connection_sock *icsk = inet_csk(sk);
575 struct tls_context *ctx;
576
c6ec179a 577 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
dd0bed16
AG
578 if (!ctx)
579 return NULL;
580
581 icsk->icsk_ulp_data = ctx;
6c0563e4
AG
582 ctx->setsockopt = sk->sk_prot->setsockopt;
583 ctx->getsockopt = sk->sk_prot->getsockopt;
584 ctx->sk_proto_close = sk->sk_prot->close;
dd0bed16
AG
585 return ctx;
586}
587
63a6b3fe
AG
588static void tls_build_proto(struct sock *sk)
589{
590 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
591
592 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
593 if (ip_ver == TLSV6 &&
594 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
595 mutex_lock(&tcpv6_prot_mutex);
596 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
597 build_protos(tls_prots[TLSV6], sk->sk_prot);
598 smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
599 }
600 mutex_unlock(&tcpv6_prot_mutex);
601 }
602
603 if (ip_ver == TLSV4 &&
604 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
605 mutex_lock(&tcpv4_prot_mutex);
606 if (likely(sk->sk_prot != saved_tcpv4_prot)) {
607 build_protos(tls_prots[TLSV4], sk->sk_prot);
608 smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
609 }
610 mutex_unlock(&tcpv4_prot_mutex);
611 }
612}
613
76f7164d
AG
614static void tls_hw_sk_destruct(struct sock *sk)
615{
616 struct tls_context *ctx = tls_get_ctx(sk);
617 struct inet_connection_sock *icsk = inet_csk(sk);
618
619 ctx->sk_destruct(sk);
620 /* Free ctx */
621 kfree(ctx);
622 icsk->icsk_ulp_data = NULL;
623}
624
dd0bed16
AG
625static int tls_hw_prot(struct sock *sk)
626{
627 struct tls_context *ctx;
628 struct tls_device *dev;
629 int rc = 0;
630
df9d4a17 631 spin_lock_bh(&device_spinlock);
dd0bed16
AG
632 list_for_each_entry(dev, &device_list, dev_list) {
633 if (dev->feature && dev->feature(dev)) {
634 ctx = create_ctx(sk);
635 if (!ctx)
636 goto out;
637
63a6b3fe
AG
638 spin_unlock_bh(&device_spinlock);
639 tls_build_proto(sk);
dd0bed16
AG
640 ctx->hash = sk->sk_prot->hash;
641 ctx->unhash = sk->sk_prot->unhash;
642 ctx->sk_proto_close = sk->sk_prot->close;
76f7164d
AG
643 ctx->sk_destruct = sk->sk_destruct;
644 sk->sk_destruct = tls_hw_sk_destruct;
f66de3ee
BP
645 ctx->rx_conf = TLS_HW_RECORD;
646 ctx->tx_conf = TLS_HW_RECORD;
dd0bed16 647 update_sk_prot(sk, ctx);
63a6b3fe 648 spin_lock_bh(&device_spinlock);
dd0bed16
AG
649 rc = 1;
650 break;
651 }
652 }
653out:
df9d4a17 654 spin_unlock_bh(&device_spinlock);
dd0bed16
AG
655 return rc;
656}
657
658static void tls_hw_unhash(struct sock *sk)
659{
660 struct tls_context *ctx = tls_get_ctx(sk);
661 struct tls_device *dev;
662
df9d4a17 663 spin_lock_bh(&device_spinlock);
dd0bed16 664 list_for_each_entry(dev, &device_list, dev_list) {
df9d4a17
AG
665 if (dev->unhash) {
666 kref_get(&dev->kref);
667 spin_unlock_bh(&device_spinlock);
dd0bed16 668 dev->unhash(dev, sk);
df9d4a17
AG
669 kref_put(&dev->kref, dev->release);
670 spin_lock_bh(&device_spinlock);
671 }
dd0bed16 672 }
df9d4a17 673 spin_unlock_bh(&device_spinlock);
dd0bed16
AG
674 ctx->unhash(sk);
675}
676
677static int tls_hw_hash(struct sock *sk)
678{
679 struct tls_context *ctx = tls_get_ctx(sk);
680 struct tls_device *dev;
681 int err;
682
683 err = ctx->hash(sk);
df9d4a17 684 spin_lock_bh(&device_spinlock);
dd0bed16 685 list_for_each_entry(dev, &device_list, dev_list) {
df9d4a17
AG
686 if (dev->hash) {
687 kref_get(&dev->kref);
688 spin_unlock_bh(&device_spinlock);
dd0bed16 689 err |= dev->hash(dev, sk);
df9d4a17
AG
690 kref_put(&dev->kref, dev->release);
691 spin_lock_bh(&device_spinlock);
692 }
dd0bed16 693 }
df9d4a17 694 spin_unlock_bh(&device_spinlock);
dd0bed16
AG
695
696 if (err)
697 tls_hw_unhash(sk);
698 return err;
699}
700
f66de3ee
BP
701static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
702 struct proto *base)
c113187d 703{
f66de3ee
BP
704 prot[TLS_BASE][TLS_BASE] = *base;
705 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
706 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
707 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
708
709 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
710 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
711 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
712
713 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
924ad65e
JF
714 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
715 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
716 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
f66de3ee
BP
717
718 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
924ad65e
JF
719 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
720 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
721 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
f66de3ee 722
e8f69799
IL
723#ifdef CONFIG_TLS_DEVICE
724 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
725 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
726 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
727
728 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
729 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
730 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
4799ac81
BP
731
732 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
733
734 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
735
736 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
e8f69799
IL
737#endif
738
f66de3ee
BP
739 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
740 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
741 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
742 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close;
c113187d
BP
743}
744
3c4d7559
DW
745static int tls_init(struct sock *sk)
746{
3c4d7559
DW
747 struct tls_context *ctx;
748 int rc = 0;
749
dd0bed16
AG
750 if (tls_hw_prot(sk))
751 goto out;
752
d91c3e17
IL
753 /* The TLS ulp is currently supported only for TCP sockets
754 * in ESTABLISHED state.
755 * Supporting sockets in LISTEN state will require us
756 * to modify the accept implementation to clone rather then
757 * share the ulp context.
758 */
759 if (sk->sk_state != TCP_ESTABLISHED)
760 return -ENOTSUPP;
761
3c4d7559 762 /* allocate tls context */
dd0bed16 763 ctx = create_ctx(sk);
3c4d7559
DW
764 if (!ctx) {
765 rc = -ENOMEM;
766 goto out;
767 }
6d88207f 768
63a6b3fe 769 tls_build_proto(sk);
f66de3ee
BP
770 ctx->tx_conf = TLS_BASE;
771 ctx->rx_conf = TLS_BASE;
6d88207f 772 update_sk_prot(sk, ctx);
3c4d7559
DW
773out:
774 return rc;
775}
776
dd0bed16
AG
777void tls_register_device(struct tls_device *device)
778{
df9d4a17 779 spin_lock_bh(&device_spinlock);
dd0bed16 780 list_add_tail(&device->dev_list, &device_list);
df9d4a17 781 spin_unlock_bh(&device_spinlock);
dd0bed16
AG
782}
783EXPORT_SYMBOL(tls_register_device);
784
785void tls_unregister_device(struct tls_device *device)
786{
df9d4a17 787 spin_lock_bh(&device_spinlock);
dd0bed16 788 list_del(&device->dev_list);
df9d4a17 789 spin_unlock_bh(&device_spinlock);
dd0bed16
AG
790}
791EXPORT_SYMBOL(tls_unregister_device);
792
3c4d7559
DW
793static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
794 .name = "tls",
795 .owner = THIS_MODULE,
796 .init = tls_init,
797};
798
799static int __init tls_register(void)
800{
c46234eb 801 tls_sw_proto_ops = inet_stream_ops;
c46234eb
DW
802 tls_sw_proto_ops.splice_read = tls_sw_splice_read;
803
e8f69799
IL
804#ifdef CONFIG_TLS_DEVICE
805 tls_device_init();
806#endif
3c4d7559
DW
807 tcp_register_ulp(&tcp_tls_ulp_ops);
808
809 return 0;
810}
811
812static void __exit tls_unregister(void)
813{
814 tcp_unregister_ulp(&tcp_tls_ulp_ops);
e8f69799
IL
815#ifdef CONFIG_TLS_DEVICE
816 tls_device_cleanup();
817#endif
3c4d7559
DW
818}
819
820module_init(tls_register);
821module_exit(tls_unregister);