Commit | Line | Data |
---|---|---|
e8f69799 IL |
1 | /* Copyright (c) 2018, Mellanox Technologies All rights reserved. |
2 | * | |
3 | * This software is available to you under a choice of one of two | |
4 | * licenses. You may choose to be licensed under the terms of the GNU | |
5 | * General Public License (GPL) Version 2, available from the file | |
6 | * COPYING in the main directory of this source tree, or the | |
7 | * OpenIB.org BSD license below: | |
8 | * | |
9 | * Redistribution and use in source and binary forms, with or | |
10 | * without modification, are permitted provided that the following | |
11 | * conditions are met: | |
12 | * | |
13 | * - Redistributions of source code must retain the above | |
14 | * copyright notice, this list of conditions and the following | |
15 | * disclaimer. | |
16 | * | |
17 | * - Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials | |
20 | * provided with the distribution. | |
21 | * | |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
23 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
24 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
25 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
26 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
27 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
28 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
29 | * SOFTWARE. | |
30 | */ | |
31 | ||
32 | #include <crypto/aead.h> | |
33 | #include <linux/highmem.h> | |
34 | #include <linux/module.h> | |
35 | #include <linux/netdevice.h> | |
36 | #include <net/dst.h> | |
37 | #include <net/inet_connection_sock.h> | |
38 | #include <net/tcp.h> | |
39 | #include <net/tls.h> | |
40 | ||
41 | /* device_offload_lock is used to synchronize tls_dev_add | |
42 | * against NETDEV_DOWN notifications. | |
43 | */ | |
44 | static DECLARE_RWSEM(device_offload_lock); | |
45 | ||
46 | static void tls_device_gc_task(struct work_struct *work); | |
47 | ||
48 | static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task); | |
49 | static LIST_HEAD(tls_device_gc_list); | |
50 | static LIST_HEAD(tls_device_list); | |
51 | static DEFINE_SPINLOCK(tls_device_lock); | |
52 | ||
53 | static void tls_device_free_ctx(struct tls_context *ctx) | |
54 | { | |
4799ac81 BP |
55 | if (ctx->tx_conf == TLS_HW) |
56 | kfree(tls_offload_ctx_tx(ctx)); | |
57 | ||
58 | if (ctx->rx_conf == TLS_HW) | |
59 | kfree(tls_offload_ctx_rx(ctx)); | |
e8f69799 | 60 | |
e8f69799 IL |
61 | kfree(ctx); |
62 | } | |
63 | ||
64 | static void tls_device_gc_task(struct work_struct *work) | |
65 | { | |
66 | struct tls_context *ctx, *tmp; | |
67 | unsigned long flags; | |
68 | LIST_HEAD(gc_list); | |
69 | ||
70 | spin_lock_irqsave(&tls_device_lock, flags); | |
71 | list_splice_init(&tls_device_gc_list, &gc_list); | |
72 | spin_unlock_irqrestore(&tls_device_lock, flags); | |
73 | ||
74 | list_for_each_entry_safe(ctx, tmp, &gc_list, list) { | |
75 | struct net_device *netdev = ctx->netdev; | |
76 | ||
4799ac81 | 77 | if (netdev && ctx->tx_conf == TLS_HW) { |
e8f69799 IL |
78 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
79 | TLS_OFFLOAD_CTX_DIR_TX); | |
80 | dev_put(netdev); | |
4799ac81 | 81 | ctx->netdev = NULL; |
e8f69799 IL |
82 | } |
83 | ||
84 | list_del(&ctx->list); | |
85 | tls_device_free_ctx(ctx); | |
86 | } | |
87 | } | |
88 | ||
4799ac81 BP |
89 | static void tls_device_attach(struct tls_context *ctx, struct sock *sk, |
90 | struct net_device *netdev) | |
91 | { | |
92 | if (sk->sk_destruct != tls_device_sk_destruct) { | |
93 | refcount_set(&ctx->refcount, 1); | |
94 | dev_hold(netdev); | |
95 | ctx->netdev = netdev; | |
96 | spin_lock_irq(&tls_device_lock); | |
97 | list_add_tail(&ctx->list, &tls_device_list); | |
98 | spin_unlock_irq(&tls_device_lock); | |
99 | ||
100 | ctx->sk_destruct = sk->sk_destruct; | |
101 | sk->sk_destruct = tls_device_sk_destruct; | |
102 | } | |
103 | } | |
104 | ||
e8f69799 IL |
105 | static void tls_device_queue_ctx_destruction(struct tls_context *ctx) |
106 | { | |
107 | unsigned long flags; | |
108 | ||
109 | spin_lock_irqsave(&tls_device_lock, flags); | |
110 | list_move_tail(&ctx->list, &tls_device_gc_list); | |
111 | ||
112 | /* schedule_work inside the spinlock | |
113 | * to make sure tls_device_down waits for that work. | |
114 | */ | |
115 | schedule_work(&tls_device_gc_work); | |
116 | ||
117 | spin_unlock_irqrestore(&tls_device_lock, flags); | |
118 | } | |
119 | ||
120 | /* We assume that the socket is already connected */ | |
121 | static struct net_device *get_netdev_for_sock(struct sock *sk) | |
122 | { | |
123 | struct dst_entry *dst = sk_dst_get(sk); | |
124 | struct net_device *netdev = NULL; | |
125 | ||
126 | if (likely(dst)) { | |
127 | netdev = dst->dev; | |
128 | dev_hold(netdev); | |
129 | } | |
130 | ||
131 | dst_release(dst); | |
132 | ||
133 | return netdev; | |
134 | } | |
135 | ||
136 | static void destroy_record(struct tls_record_info *record) | |
137 | { | |
138 | int nr_frags = record->num_frags; | |
139 | skb_frag_t *frag; | |
140 | ||
141 | while (nr_frags-- > 0) { | |
142 | frag = &record->frags[nr_frags]; | |
143 | __skb_frag_unref(frag); | |
144 | } | |
145 | kfree(record); | |
146 | } | |
147 | ||
d80a1b9d | 148 | static void delete_all_records(struct tls_offload_context_tx *offload_ctx) |
e8f69799 IL |
149 | { |
150 | struct tls_record_info *info, *temp; | |
151 | ||
152 | list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) { | |
153 | list_del(&info->list); | |
154 | destroy_record(info); | |
155 | } | |
156 | ||
157 | offload_ctx->retransmit_hint = NULL; | |
158 | } | |
159 | ||
160 | static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) | |
161 | { | |
162 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
163 | struct tls_record_info *info, *temp; | |
d80a1b9d | 164 | struct tls_offload_context_tx *ctx; |
e8f69799 IL |
165 | u64 deleted_records = 0; |
166 | unsigned long flags; | |
167 | ||
168 | if (!tls_ctx) | |
169 | return; | |
170 | ||
d80a1b9d | 171 | ctx = tls_offload_ctx_tx(tls_ctx); |
e8f69799 IL |
172 | |
173 | spin_lock_irqsave(&ctx->lock, flags); | |
174 | info = ctx->retransmit_hint; | |
175 | if (info && !before(acked_seq, info->end_seq)) { | |
176 | ctx->retransmit_hint = NULL; | |
177 | list_del(&info->list); | |
178 | destroy_record(info); | |
179 | deleted_records++; | |
180 | } | |
181 | ||
182 | list_for_each_entry_safe(info, temp, &ctx->records_list, list) { | |
183 | if (before(acked_seq, info->end_seq)) | |
184 | break; | |
185 | list_del(&info->list); | |
186 | ||
187 | destroy_record(info); | |
188 | deleted_records++; | |
189 | } | |
190 | ||
191 | ctx->unacked_record_sn += deleted_records; | |
192 | spin_unlock_irqrestore(&ctx->lock, flags); | |
193 | } | |
194 | ||
195 | /* At this point, there should be no references on this | |
196 | * socket and no in-flight SKBs associated with this | |
197 | * socket, so it is safe to free all the resources. | |
198 | */ | |
199 | void tls_device_sk_destruct(struct sock *sk) | |
200 | { | |
201 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
d80a1b9d | 202 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
e8f69799 | 203 | |
4799ac81 | 204 | tls_ctx->sk_destruct(sk); |
e8f69799 | 205 | |
4799ac81 BP |
206 | if (tls_ctx->tx_conf == TLS_HW) { |
207 | if (ctx->open_record) | |
208 | destroy_record(ctx->open_record); | |
209 | delete_all_records(ctx); | |
210 | crypto_free_aead(ctx->aead_send); | |
211 | clean_acked_data_disable(inet_csk(sk)); | |
212 | } | |
e8f69799 IL |
213 | |
214 | if (refcount_dec_and_test(&tls_ctx->refcount)) | |
215 | tls_device_queue_ctx_destruction(tls_ctx); | |
216 | } | |
217 | EXPORT_SYMBOL(tls_device_sk_destruct); | |
218 | ||
219 | static void tls_append_frag(struct tls_record_info *record, | |
220 | struct page_frag *pfrag, | |
221 | int size) | |
222 | { | |
223 | skb_frag_t *frag; | |
224 | ||
225 | frag = &record->frags[record->num_frags - 1]; | |
226 | if (frag->page.p == pfrag->page && | |
227 | frag->page_offset + frag->size == pfrag->offset) { | |
228 | frag->size += size; | |
229 | } else { | |
230 | ++frag; | |
231 | frag->page.p = pfrag->page; | |
232 | frag->page_offset = pfrag->offset; | |
233 | frag->size = size; | |
234 | ++record->num_frags; | |
235 | get_page(pfrag->page); | |
236 | } | |
237 | ||
238 | pfrag->offset += size; | |
239 | record->len += size; | |
240 | } | |
241 | ||
242 | static int tls_push_record(struct sock *sk, | |
243 | struct tls_context *ctx, | |
d80a1b9d | 244 | struct tls_offload_context_tx *offload_ctx, |
e8f69799 IL |
245 | struct tls_record_info *record, |
246 | struct page_frag *pfrag, | |
247 | int flags, | |
248 | unsigned char record_type) | |
249 | { | |
4509de14 | 250 | struct tls_prot_info *prot = &ctx->prot_info; |
e8f69799 IL |
251 | struct tcp_sock *tp = tcp_sk(sk); |
252 | struct page_frag dummy_tag_frag; | |
253 | skb_frag_t *frag; | |
254 | int i; | |
255 | ||
256 | /* fill prepend */ | |
257 | frag = &record->frags[0]; | |
258 | tls_fill_prepend(ctx, | |
259 | skb_frag_address(frag), | |
4509de14 | 260 | record->len - prot->prepend_size, |
130b392c DW |
261 | record_type, |
262 | ctx->crypto_send.info.version); | |
e8f69799 IL |
263 | |
264 | /* HW doesn't care about the data in the tag, because it fills it. */ | |
265 | dummy_tag_frag.page = skb_frag_page(frag); | |
266 | dummy_tag_frag.offset = 0; | |
267 | ||
4509de14 | 268 | tls_append_frag(record, &dummy_tag_frag, prot->tag_size); |
e8f69799 IL |
269 | record->end_seq = tp->write_seq + record->len; |
270 | spin_lock_irq(&offload_ctx->lock); | |
271 | list_add_tail(&record->list, &offload_ctx->records_list); | |
272 | spin_unlock_irq(&offload_ctx->lock); | |
273 | offload_ctx->open_record = NULL; | |
274 | set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); | |
130b392c | 275 | tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version); |
e8f69799 IL |
276 | |
277 | for (i = 0; i < record->num_frags; i++) { | |
278 | frag = &record->frags[i]; | |
279 | sg_unmark_end(&offload_ctx->sg_tx_data[i]); | |
280 | sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag), | |
281 | frag->size, frag->page_offset); | |
282 | sk_mem_charge(sk, frag->size); | |
283 | get_page(skb_frag_page(frag)); | |
284 | } | |
285 | sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]); | |
286 | ||
287 | /* all ready, send */ | |
288 | return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); | |
289 | } | |
290 | ||
d80a1b9d | 291 | static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, |
e8f69799 IL |
292 | struct page_frag *pfrag, |
293 | size_t prepend_size) | |
294 | { | |
295 | struct tls_record_info *record; | |
296 | skb_frag_t *frag; | |
297 | ||
298 | record = kmalloc(sizeof(*record), GFP_KERNEL); | |
299 | if (!record) | |
300 | return -ENOMEM; | |
301 | ||
302 | frag = &record->frags[0]; | |
303 | __skb_frag_set_page(frag, pfrag->page); | |
304 | frag->page_offset = pfrag->offset; | |
305 | skb_frag_size_set(frag, prepend_size); | |
306 | ||
307 | get_page(pfrag->page); | |
308 | pfrag->offset += prepend_size; | |
309 | ||
310 | record->num_frags = 1; | |
311 | record->len = prepend_size; | |
312 | offload_ctx->open_record = record; | |
313 | return 0; | |
314 | } | |
315 | ||
316 | static int tls_do_allocation(struct sock *sk, | |
d80a1b9d | 317 | struct tls_offload_context_tx *offload_ctx, |
e8f69799 IL |
318 | struct page_frag *pfrag, |
319 | size_t prepend_size) | |
320 | { | |
321 | int ret; | |
322 | ||
323 | if (!offload_ctx->open_record) { | |
324 | if (unlikely(!skb_page_frag_refill(prepend_size, pfrag, | |
325 | sk->sk_allocation))) { | |
326 | sk->sk_prot->enter_memory_pressure(sk); | |
327 | sk_stream_moderate_sndbuf(sk); | |
328 | return -ENOMEM; | |
329 | } | |
330 | ||
331 | ret = tls_create_new_record(offload_ctx, pfrag, prepend_size); | |
332 | if (ret) | |
333 | return ret; | |
334 | ||
335 | if (pfrag->size > pfrag->offset) | |
336 | return 0; | |
337 | } | |
338 | ||
339 | if (!sk_page_frag_refill(sk, pfrag)) | |
340 | return -ENOMEM; | |
341 | ||
342 | return 0; | |
343 | } | |
344 | ||
345 | static int tls_push_data(struct sock *sk, | |
346 | struct iov_iter *msg_iter, | |
347 | size_t size, int flags, | |
348 | unsigned char record_type) | |
349 | { | |
350 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
4509de14 | 351 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
d80a1b9d | 352 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
e8f69799 IL |
353 | int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; |
354 | int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); | |
355 | struct tls_record_info *record = ctx->open_record; | |
356 | struct page_frag *pfrag; | |
357 | size_t orig_size = size; | |
358 | u32 max_open_record_len; | |
359 | int copy, rc = 0; | |
360 | bool done = false; | |
361 | long timeo; | |
362 | ||
363 | if (flags & | |
364 | ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST)) | |
365 | return -ENOTSUPP; | |
366 | ||
367 | if (sk->sk_err) | |
368 | return -sk->sk_err; | |
369 | ||
370 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | |
371 | rc = tls_complete_pending_work(sk, tls_ctx, flags, &timeo); | |
372 | if (rc < 0) | |
373 | return rc; | |
374 | ||
375 | pfrag = sk_page_frag(sk); | |
376 | ||
377 | /* TLS_HEADER_SIZE is not counted as part of the TLS record, and | |
378 | * we need to leave room for an authentication tag. | |
379 | */ | |
380 | max_open_record_len = TLS_MAX_PAYLOAD_SIZE + | |
4509de14 | 381 | prot->prepend_size; |
e8f69799 IL |
382 | do { |
383 | rc = tls_do_allocation(sk, ctx, pfrag, | |
4509de14 | 384 | prot->prepend_size); |
e8f69799 IL |
385 | if (rc) { |
386 | rc = sk_stream_wait_memory(sk, &timeo); | |
387 | if (!rc) | |
388 | continue; | |
389 | ||
390 | record = ctx->open_record; | |
391 | if (!record) | |
392 | break; | |
393 | handle_error: | |
394 | if (record_type != TLS_RECORD_TYPE_DATA) { | |
395 | /* avoid sending partial | |
396 | * record with type != | |
397 | * application_data | |
398 | */ | |
399 | size = orig_size; | |
400 | destroy_record(record); | |
401 | ctx->open_record = NULL; | |
4509de14 | 402 | } else if (record->len > prot->prepend_size) { |
e8f69799 IL |
403 | goto last_record; |
404 | } | |
405 | ||
406 | break; | |
407 | } | |
408 | ||
409 | record = ctx->open_record; | |
410 | copy = min_t(size_t, size, (pfrag->size - pfrag->offset)); | |
411 | copy = min_t(size_t, copy, (max_open_record_len - record->len)); | |
412 | ||
413 | if (copy_from_iter_nocache(page_address(pfrag->page) + | |
414 | pfrag->offset, | |
415 | copy, msg_iter) != copy) { | |
416 | rc = -EFAULT; | |
417 | goto handle_error; | |
418 | } | |
419 | tls_append_frag(record, pfrag, copy); | |
420 | ||
421 | size -= copy; | |
422 | if (!size) { | |
423 | last_record: | |
424 | tls_push_record_flags = flags; | |
425 | if (more) { | |
426 | tls_ctx->pending_open_record_frags = | |
d829e9c4 | 427 | !!record->num_frags; |
e8f69799 IL |
428 | break; |
429 | } | |
430 | ||
431 | done = true; | |
432 | } | |
433 | ||
434 | if (done || record->len >= max_open_record_len || | |
435 | (record->num_frags >= MAX_SKB_FRAGS - 1)) { | |
436 | rc = tls_push_record(sk, | |
437 | tls_ctx, | |
438 | ctx, | |
439 | record, | |
440 | pfrag, | |
441 | tls_push_record_flags, | |
442 | record_type); | |
443 | if (rc < 0) | |
444 | break; | |
445 | } | |
446 | } while (!done); | |
447 | ||
448 | if (orig_size - size > 0) | |
449 | rc = orig_size - size; | |
450 | ||
451 | return rc; | |
452 | } | |
453 | ||
454 | int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) | |
455 | { | |
456 | unsigned char record_type = TLS_RECORD_TYPE_DATA; | |
457 | int rc; | |
458 | ||
459 | lock_sock(sk); | |
460 | ||
461 | if (unlikely(msg->msg_controllen)) { | |
462 | rc = tls_proccess_cmsg(sk, msg, &record_type); | |
463 | if (rc) | |
464 | goto out; | |
465 | } | |
466 | ||
467 | rc = tls_push_data(sk, &msg->msg_iter, size, | |
468 | msg->msg_flags, record_type); | |
469 | ||
470 | out: | |
471 | release_sock(sk); | |
472 | return rc; | |
473 | } | |
474 | ||
475 | int tls_device_sendpage(struct sock *sk, struct page *page, | |
476 | int offset, size_t size, int flags) | |
477 | { | |
478 | struct iov_iter msg_iter; | |
479 | char *kaddr = kmap(page); | |
480 | struct kvec iov; | |
481 | int rc; | |
482 | ||
483 | if (flags & MSG_SENDPAGE_NOTLAST) | |
484 | flags |= MSG_MORE; | |
485 | ||
486 | lock_sock(sk); | |
487 | ||
488 | if (flags & MSG_OOB) { | |
489 | rc = -ENOTSUPP; | |
490 | goto out; | |
491 | } | |
492 | ||
493 | iov.iov_base = kaddr + offset; | |
494 | iov.iov_len = size; | |
aa563d7b | 495 | iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size); |
e8f69799 IL |
496 | rc = tls_push_data(sk, &msg_iter, size, |
497 | flags, TLS_RECORD_TYPE_DATA); | |
498 | kunmap(page); | |
499 | ||
500 | out: | |
501 | release_sock(sk); | |
502 | return rc; | |
503 | } | |
504 | ||
d80a1b9d | 505 | struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, |
e8f69799 IL |
506 | u32 seq, u64 *p_record_sn) |
507 | { | |
508 | u64 record_sn = context->hint_record_sn; | |
509 | struct tls_record_info *info; | |
510 | ||
511 | info = context->retransmit_hint; | |
512 | if (!info || | |
513 | before(seq, info->end_seq - info->len)) { | |
514 | /* if retransmit_hint is irrelevant start | |
515 | * from the beggining of the list | |
516 | */ | |
517 | info = list_first_entry(&context->records_list, | |
518 | struct tls_record_info, list); | |
519 | record_sn = context->unacked_record_sn; | |
520 | } | |
521 | ||
522 | list_for_each_entry_from(info, &context->records_list, list) { | |
523 | if (before(seq, info->end_seq)) { | |
524 | if (!context->retransmit_hint || | |
525 | after(info->end_seq, | |
526 | context->retransmit_hint->end_seq)) { | |
527 | context->hint_record_sn = record_sn; | |
528 | context->retransmit_hint = info; | |
529 | } | |
530 | *p_record_sn = record_sn; | |
531 | return info; | |
532 | } | |
533 | record_sn++; | |
534 | } | |
535 | ||
536 | return NULL; | |
537 | } | |
538 | EXPORT_SYMBOL(tls_get_record); | |
539 | ||
540 | static int tls_device_push_pending_record(struct sock *sk, int flags) | |
541 | { | |
542 | struct iov_iter msg_iter; | |
543 | ||
aa563d7b | 544 | iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0); |
e8f69799 IL |
545 | return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA); |
546 | } | |
547 | ||
4799ac81 BP |
548 | void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn) |
549 | { | |
550 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
551 | struct net_device *netdev = tls_ctx->netdev; | |
552 | struct tls_offload_context_rx *rx_ctx; | |
553 | u32 is_req_pending; | |
554 | s64 resync_req; | |
555 | u32 req_seq; | |
556 | ||
557 | if (tls_ctx->rx_conf != TLS_HW) | |
558 | return; | |
559 | ||
560 | rx_ctx = tls_offload_ctx_rx(tls_ctx); | |
561 | resync_req = atomic64_read(&rx_ctx->resync_req); | |
562 | req_seq = ntohl(resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1); | |
563 | is_req_pending = resync_req; | |
564 | ||
565 | if (unlikely(is_req_pending) && req_seq == seq && | |
566 | atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) | |
567 | netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, | |
568 | seq + TLS_HEADER_SIZE - 1, | |
569 | rcd_sn); | |
570 | } | |
571 | ||
572 | static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb) | |
573 | { | |
574 | struct strp_msg *rxm = strp_msg(skb); | |
575 | int err = 0, offset = rxm->offset, copy, nsg; | |
576 | struct sk_buff *skb_iter, *unused; | |
577 | struct scatterlist sg[1]; | |
578 | char *orig_buf, *buf; | |
579 | ||
580 | orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + | |
581 | TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation); | |
582 | if (!orig_buf) | |
583 | return -ENOMEM; | |
584 | buf = orig_buf; | |
585 | ||
586 | nsg = skb_cow_data(skb, 0, &unused); | |
587 | if (unlikely(nsg < 0)) { | |
588 | err = nsg; | |
589 | goto free_buf; | |
590 | } | |
591 | ||
592 | sg_init_table(sg, 1); | |
593 | sg_set_buf(&sg[0], buf, | |
594 | rxm->full_len + TLS_HEADER_SIZE + | |
595 | TLS_CIPHER_AES_GCM_128_IV_SIZE); | |
596 | skb_copy_bits(skb, offset, buf, | |
597 | TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE); | |
598 | ||
599 | /* We are interested only in the decrypted data not the auth */ | |
600 | err = decrypt_skb(sk, skb, sg); | |
601 | if (err != -EBADMSG) | |
602 | goto free_buf; | |
603 | else | |
604 | err = 0; | |
605 | ||
606 | copy = min_t(int, skb_pagelen(skb) - offset, | |
607 | rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE); | |
608 | ||
609 | if (skb->decrypted) | |
610 | skb_store_bits(skb, offset, buf, copy); | |
611 | ||
612 | offset += copy; | |
613 | buf += copy; | |
614 | ||
615 | skb_walk_frags(skb, skb_iter) { | |
616 | copy = min_t(int, skb_iter->len, | |
617 | rxm->full_len - offset + rxm->offset - | |
618 | TLS_CIPHER_AES_GCM_128_TAG_SIZE); | |
619 | ||
620 | if (skb_iter->decrypted) | |
eecd6857 | 621 | skb_store_bits(skb_iter, offset, buf, copy); |
4799ac81 BP |
622 | |
623 | offset += copy; | |
624 | buf += copy; | |
625 | } | |
626 | ||
627 | free_buf: | |
628 | kfree(orig_buf); | |
629 | return err; | |
630 | } | |
631 | ||
632 | int tls_device_decrypted(struct sock *sk, struct sk_buff *skb) | |
633 | { | |
634 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
635 | struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx); | |
636 | int is_decrypted = skb->decrypted; | |
637 | int is_encrypted = !is_decrypted; | |
638 | struct sk_buff *skb_iter; | |
639 | ||
640 | /* Skip if it is already decrypted */ | |
641 | if (ctx->sw.decrypted) | |
642 | return 0; | |
643 | ||
644 | /* Check if all the data is decrypted already */ | |
645 | skb_walk_frags(skb, skb_iter) { | |
646 | is_decrypted &= skb_iter->decrypted; | |
647 | is_encrypted &= !skb_iter->decrypted; | |
648 | } | |
649 | ||
650 | ctx->sw.decrypted |= is_decrypted; | |
651 | ||
652 | /* Return immedeatly if the record is either entirely plaintext or | |
653 | * entirely ciphertext. Otherwise handle reencrypt partially decrypted | |
654 | * record. | |
655 | */ | |
656 | return (is_encrypted || is_decrypted) ? 0 : | |
657 | tls_device_reencrypt(sk, skb); | |
658 | } | |
659 | ||
e8f69799 IL |
660 | int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) |
661 | { | |
662 | u16 nonce_size, tag_size, iv_size, rec_seq_size; | |
4509de14 VG |
663 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
664 | struct tls_prot_info *prot = &tls_ctx->prot_info; | |
e8f69799 | 665 | struct tls_record_info *start_marker_record; |
d80a1b9d | 666 | struct tls_offload_context_tx *offload_ctx; |
e8f69799 IL |
667 | struct tls_crypto_info *crypto_info; |
668 | struct net_device *netdev; | |
669 | char *iv, *rec_seq; | |
670 | struct sk_buff *skb; | |
671 | int rc = -EINVAL; | |
672 | __be64 rcd_sn; | |
673 | ||
674 | if (!ctx) | |
675 | goto out; | |
676 | ||
677 | if (ctx->priv_ctx_tx) { | |
678 | rc = -EEXIST; | |
679 | goto out; | |
680 | } | |
681 | ||
682 | start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL); | |
683 | if (!start_marker_record) { | |
684 | rc = -ENOMEM; | |
685 | goto out; | |
686 | } | |
687 | ||
d80a1b9d | 688 | offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL); |
e8f69799 IL |
689 | if (!offload_ctx) { |
690 | rc = -ENOMEM; | |
691 | goto free_marker_record; | |
692 | } | |
693 | ||
86029d10 | 694 | crypto_info = &ctx->crypto_send.info; |
e8f69799 IL |
695 | switch (crypto_info->cipher_type) { |
696 | case TLS_CIPHER_AES_GCM_128: | |
697 | nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; | |
698 | tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; | |
699 | iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; | |
700 | iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; | |
701 | rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; | |
702 | rec_seq = | |
703 | ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; | |
704 | break; | |
705 | default: | |
706 | rc = -EINVAL; | |
707 | goto free_offload_ctx; | |
708 | } | |
709 | ||
4509de14 VG |
710 | prot->prepend_size = TLS_HEADER_SIZE + nonce_size; |
711 | prot->tag_size = tag_size; | |
712 | prot->overhead_size = prot->prepend_size + prot->tag_size; | |
713 | prot->iv_size = iv_size; | |
e8f69799 IL |
714 | ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, |
715 | GFP_KERNEL); | |
716 | if (!ctx->tx.iv) { | |
717 | rc = -ENOMEM; | |
718 | goto free_offload_ctx; | |
719 | } | |
720 | ||
721 | memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); | |
722 | ||
4509de14 | 723 | prot->rec_seq_size = rec_seq_size; |
969d5090 | 724 | ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); |
e8f69799 IL |
725 | if (!ctx->tx.rec_seq) { |
726 | rc = -ENOMEM; | |
727 | goto free_iv; | |
728 | } | |
e8f69799 IL |
729 | |
730 | rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info); | |
731 | if (rc) | |
732 | goto free_rec_seq; | |
733 | ||
734 | /* start at rec_seq - 1 to account for the start marker record */ | |
735 | memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn)); | |
736 | offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1; | |
737 | ||
738 | start_marker_record->end_seq = tcp_sk(sk)->write_seq; | |
739 | start_marker_record->len = 0; | |
740 | start_marker_record->num_frags = 0; | |
741 | ||
742 | INIT_LIST_HEAD(&offload_ctx->records_list); | |
743 | list_add_tail(&start_marker_record->list, &offload_ctx->records_list); | |
744 | spin_lock_init(&offload_ctx->lock); | |
895262d8 BP |
745 | sg_init_table(offload_ctx->sg_tx_data, |
746 | ARRAY_SIZE(offload_ctx->sg_tx_data)); | |
e8f69799 IL |
747 | |
748 | clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked); | |
749 | ctx->push_pending_record = tls_device_push_pending_record; | |
e8f69799 IL |
750 | |
751 | /* TLS offload is greatly simplified if we don't send | |
752 | * SKBs where only part of the payload needs to be encrypted. | |
753 | * So mark the last skb in the write queue as end of record. | |
754 | */ | |
755 | skb = tcp_write_queue_tail(sk); | |
756 | if (skb) | |
757 | TCP_SKB_CB(skb)->eor = 1; | |
758 | ||
e8f69799 IL |
759 | /* We support starting offload on multiple sockets |
760 | * concurrently, so we only need a read lock here. | |
761 | * This lock must precede get_netdev_for_sock to prevent races between | |
762 | * NETDEV_DOWN and setsockopt. | |
763 | */ | |
764 | down_read(&device_offload_lock); | |
765 | netdev = get_netdev_for_sock(sk); | |
766 | if (!netdev) { | |
767 | pr_err_ratelimited("%s: netdev not found\n", __func__); | |
768 | rc = -EINVAL; | |
769 | goto release_lock; | |
770 | } | |
771 | ||
772 | if (!(netdev->features & NETIF_F_HW_TLS_TX)) { | |
773 | rc = -ENOTSUPP; | |
774 | goto release_netdev; | |
775 | } | |
776 | ||
777 | /* Avoid offloading if the device is down | |
778 | * We don't want to offload new flows after | |
779 | * the NETDEV_DOWN event | |
780 | */ | |
781 | if (!(netdev->flags & IFF_UP)) { | |
782 | rc = -EINVAL; | |
783 | goto release_netdev; | |
784 | } | |
785 | ||
786 | ctx->priv_ctx_tx = offload_ctx; | |
787 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, | |
86029d10 | 788 | &ctx->crypto_send.info, |
e8f69799 IL |
789 | tcp_sk(sk)->write_seq); |
790 | if (rc) | |
791 | goto release_netdev; | |
792 | ||
4799ac81 | 793 | tls_device_attach(ctx, sk, netdev); |
e8f69799 | 794 | |
e8f69799 IL |
795 | /* following this assignment tls_is_sk_tx_device_offloaded |
796 | * will return true and the context might be accessed | |
797 | * by the netdev's xmit function. | |
798 | */ | |
4799ac81 BP |
799 | smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); |
800 | dev_put(netdev); | |
e8f69799 IL |
801 | up_read(&device_offload_lock); |
802 | goto out; | |
803 | ||
804 | release_netdev: | |
805 | dev_put(netdev); | |
806 | release_lock: | |
807 | up_read(&device_offload_lock); | |
808 | clean_acked_data_disable(inet_csk(sk)); | |
809 | crypto_free_aead(offload_ctx->aead_send); | |
810 | free_rec_seq: | |
811 | kfree(ctx->tx.rec_seq); | |
812 | free_iv: | |
813 | kfree(ctx->tx.iv); | |
814 | free_offload_ctx: | |
815 | kfree(offload_ctx); | |
816 | ctx->priv_ctx_tx = NULL; | |
817 | free_marker_record: | |
818 | kfree(start_marker_record); | |
819 | out: | |
820 | return rc; | |
821 | } | |
822 | ||
4799ac81 BP |
823 | int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) |
824 | { | |
825 | struct tls_offload_context_rx *context; | |
826 | struct net_device *netdev; | |
827 | int rc = 0; | |
828 | ||
829 | /* We support starting offload on multiple sockets | |
830 | * concurrently, so we only need a read lock here. | |
831 | * This lock must precede get_netdev_for_sock to prevent races between | |
832 | * NETDEV_DOWN and setsockopt. | |
833 | */ | |
834 | down_read(&device_offload_lock); | |
835 | netdev = get_netdev_for_sock(sk); | |
836 | if (!netdev) { | |
837 | pr_err_ratelimited("%s: netdev not found\n", __func__); | |
838 | rc = -EINVAL; | |
839 | goto release_lock; | |
840 | } | |
841 | ||
842 | if (!(netdev->features & NETIF_F_HW_TLS_RX)) { | |
843 | pr_err_ratelimited("%s: netdev %s with no TLS offload\n", | |
844 | __func__, netdev->name); | |
845 | rc = -ENOTSUPP; | |
846 | goto release_netdev; | |
847 | } | |
848 | ||
849 | /* Avoid offloading if the device is down | |
850 | * We don't want to offload new flows after | |
851 | * the NETDEV_DOWN event | |
852 | */ | |
853 | if (!(netdev->flags & IFF_UP)) { | |
854 | rc = -EINVAL; | |
855 | goto release_netdev; | |
856 | } | |
857 | ||
858 | context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL); | |
859 | if (!context) { | |
860 | rc = -ENOMEM; | |
861 | goto release_netdev; | |
862 | } | |
863 | ||
864 | ctx->priv_ctx_rx = context; | |
865 | rc = tls_set_sw_offload(sk, ctx, 0); | |
866 | if (rc) | |
867 | goto release_ctx; | |
868 | ||
869 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, | |
86029d10 | 870 | &ctx->crypto_recv.info, |
4799ac81 BP |
871 | tcp_sk(sk)->copied_seq); |
872 | if (rc) { | |
873 | pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", | |
874 | __func__); | |
875 | goto free_sw_resources; | |
876 | } | |
877 | ||
878 | tls_device_attach(ctx, sk, netdev); | |
879 | goto release_netdev; | |
880 | ||
881 | free_sw_resources: | |
882 | tls_sw_free_resources_rx(sk); | |
883 | release_ctx: | |
884 | ctx->priv_ctx_rx = NULL; | |
885 | release_netdev: | |
886 | dev_put(netdev); | |
887 | release_lock: | |
888 | up_read(&device_offload_lock); | |
889 | return rc; | |
890 | } | |
891 | ||
892 | void tls_device_offload_cleanup_rx(struct sock *sk) | |
893 | { | |
894 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
895 | struct net_device *netdev; | |
896 | ||
897 | down_read(&device_offload_lock); | |
898 | netdev = tls_ctx->netdev; | |
899 | if (!netdev) | |
900 | goto out; | |
901 | ||
902 | if (!(netdev->features & NETIF_F_HW_TLS_RX)) { | |
903 | pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n", | |
904 | __func__); | |
905 | goto out; | |
906 | } | |
907 | ||
908 | netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx, | |
909 | TLS_OFFLOAD_CTX_DIR_RX); | |
910 | ||
911 | if (tls_ctx->tx_conf != TLS_HW) { | |
912 | dev_put(netdev); | |
913 | tls_ctx->netdev = NULL; | |
914 | } | |
915 | out: | |
916 | up_read(&device_offload_lock); | |
917 | kfree(tls_ctx->rx.rec_seq); | |
918 | kfree(tls_ctx->rx.iv); | |
919 | tls_sw_release_resources_rx(sk); | |
920 | } | |
921 | ||
e8f69799 IL |
922 | static int tls_device_down(struct net_device *netdev) |
923 | { | |
924 | struct tls_context *ctx, *tmp; | |
925 | unsigned long flags; | |
926 | LIST_HEAD(list); | |
927 | ||
928 | /* Request a write lock to block new offload attempts */ | |
929 | down_write(&device_offload_lock); | |
930 | ||
931 | spin_lock_irqsave(&tls_device_lock, flags); | |
932 | list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) { | |
933 | if (ctx->netdev != netdev || | |
934 | !refcount_inc_not_zero(&ctx->refcount)) | |
935 | continue; | |
936 | ||
937 | list_move(&ctx->list, &list); | |
938 | } | |
939 | spin_unlock_irqrestore(&tls_device_lock, flags); | |
940 | ||
941 | list_for_each_entry_safe(ctx, tmp, &list, list) { | |
4799ac81 BP |
942 | if (ctx->tx_conf == TLS_HW) |
943 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, | |
944 | TLS_OFFLOAD_CTX_DIR_TX); | |
945 | if (ctx->rx_conf == TLS_HW) | |
946 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, | |
947 | TLS_OFFLOAD_CTX_DIR_RX); | |
e8f69799 IL |
948 | ctx->netdev = NULL; |
949 | dev_put(netdev); | |
950 | list_del_init(&ctx->list); | |
951 | ||
952 | if (refcount_dec_and_test(&ctx->refcount)) | |
953 | tls_device_free_ctx(ctx); | |
954 | } | |
955 | ||
956 | up_write(&device_offload_lock); | |
957 | ||
958 | flush_work(&tls_device_gc_work); | |
959 | ||
960 | return NOTIFY_DONE; | |
961 | } | |
962 | ||
963 | static int tls_dev_event(struct notifier_block *this, unsigned long event, | |
964 | void *ptr) | |
965 | { | |
966 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | |
967 | ||
4799ac81 | 968 | if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) |
e8f69799 IL |
969 | return NOTIFY_DONE; |
970 | ||
971 | switch (event) { | |
972 | case NETDEV_REGISTER: | |
973 | case NETDEV_FEAT_CHANGE: | |
4799ac81 BP |
974 | if ((dev->features & NETIF_F_HW_TLS_RX) && |
975 | !dev->tlsdev_ops->tls_dev_resync_rx) | |
976 | return NOTIFY_BAD; | |
977 | ||
e8f69799 IL |
978 | if (dev->tlsdev_ops && |
979 | dev->tlsdev_ops->tls_dev_add && | |
980 | dev->tlsdev_ops->tls_dev_del) | |
981 | return NOTIFY_DONE; | |
982 | else | |
983 | return NOTIFY_BAD; | |
984 | case NETDEV_DOWN: | |
985 | return tls_device_down(dev); | |
986 | } | |
987 | return NOTIFY_DONE; | |
988 | } | |
989 | ||
990 | static struct notifier_block tls_dev_notifier = { | |
991 | .notifier_call = tls_dev_event, | |
992 | }; | |
993 | ||
994 | void __init tls_device_init(void) | |
995 | { | |
996 | register_netdevice_notifier(&tls_dev_notifier); | |
997 | } | |
998 | ||
999 | void __exit tls_device_cleanup(void) | |
1000 | { | |
1001 | unregister_netdevice_notifier(&tls_dev_notifier); | |
1002 | flush_work(&tls_device_gc_work); | |
1003 | } |