Commit | Line | Data |
---|---|---|
e8f69799 IL |
1 | /* Copyright (c) 2018, Mellanox Technologies All rights reserved. |
2 | * | |
3 | * This software is available to you under a choice of one of two | |
4 | * licenses. You may choose to be licensed under the terms of the GNU | |
5 | * General Public License (GPL) Version 2, available from the file | |
6 | * COPYING in the main directory of this source tree, or the | |
7 | * OpenIB.org BSD license below: | |
8 | * | |
9 | * Redistribution and use in source and binary forms, with or | |
10 | * without modification, are permitted provided that the following | |
11 | * conditions are met: | |
12 | * | |
13 | * - Redistributions of source code must retain the above | |
14 | * copyright notice, this list of conditions and the following | |
15 | * disclaimer. | |
16 | * | |
17 | * - Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials | |
20 | * provided with the distribution. | |
21 | * | |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
23 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
24 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
25 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
26 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
27 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
28 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
29 | * SOFTWARE. | |
30 | */ | |
31 | ||
32 | #include <crypto/aead.h> | |
33 | #include <linux/highmem.h> | |
34 | #include <linux/module.h> | |
35 | #include <linux/netdevice.h> | |
36 | #include <net/dst.h> | |
37 | #include <net/inet_connection_sock.h> | |
38 | #include <net/tcp.h> | |
39 | #include <net/tls.h> | |
40 | ||
58790314 | 41 | #include "tls.h" |
8538d29c JK |
42 | #include "trace.h" |
43 | ||
e8f69799 IL |
44 | /* device_offload_lock is used to synchronize tls_dev_add |
45 | * against NETDEV_DOWN notifications. | |
46 | */ | |
47 | static DECLARE_RWSEM(device_offload_lock); | |
48 | ||
49 | static void tls_device_gc_task(struct work_struct *work); | |
50 | ||
51 | static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task); | |
52 | static LIST_HEAD(tls_device_gc_list); | |
53 | static LIST_HEAD(tls_device_list); | |
c55dcdd4 | 54 | static LIST_HEAD(tls_device_down_list); |
e8f69799 IL |
55 | static DEFINE_SPINLOCK(tls_device_lock); |
56 | ||
57 | static void tls_device_free_ctx(struct tls_context *ctx) | |
58 | { | |
5a03bc73 | 59 | if (ctx->tx_conf == TLS_HW) { |
4799ac81 | 60 | kfree(tls_offload_ctx_tx(ctx)); |
5a03bc73 JK |
61 | kfree(ctx->tx.rec_seq); |
62 | kfree(ctx->tx.iv); | |
63 | } | |
4799ac81 BP |
64 | |
65 | if (ctx->rx_conf == TLS_HW) | |
66 | kfree(tls_offload_ctx_rx(ctx)); | |
e8f69799 | 67 | |
15a7dea7 | 68 | tls_ctx_free(NULL, ctx); |
e8f69799 IL |
69 | } |
70 | ||
71 | static void tls_device_gc_task(struct work_struct *work) | |
72 | { | |
73 | struct tls_context *ctx, *tmp; | |
74 | unsigned long flags; | |
75 | LIST_HEAD(gc_list); | |
76 | ||
77 | spin_lock_irqsave(&tls_device_lock, flags); | |
78 | list_splice_init(&tls_device_gc_list, &gc_list); | |
79 | spin_unlock_irqrestore(&tls_device_lock, flags); | |
80 | ||
81 | list_for_each_entry_safe(ctx, tmp, &gc_list, list) { | |
82 | struct net_device *netdev = ctx->netdev; | |
83 | ||
4799ac81 | 84 | if (netdev && ctx->tx_conf == TLS_HW) { |
e8f69799 IL |
85 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
86 | TLS_OFFLOAD_CTX_DIR_TX); | |
87 | dev_put(netdev); | |
4799ac81 | 88 | ctx->netdev = NULL; |
e8f69799 IL |
89 | } |
90 | ||
91 | list_del(&ctx->list); | |
92 | tls_device_free_ctx(ctx); | |
93 | } | |
94 | } | |
95 | ||
96 | static void tls_device_queue_ctx_destruction(struct tls_context *ctx) | |
97 | { | |
98 | unsigned long flags; | |
99 | ||
100 | spin_lock_irqsave(&tls_device_lock, flags); | |
101 | list_move_tail(&ctx->list, &tls_device_gc_list); | |
102 | ||
103 | /* schedule_work inside the spinlock | |
104 | * to make sure tls_device_down waits for that work. | |
105 | */ | |
106 | schedule_work(&tls_device_gc_work); | |
107 | ||
108 | spin_unlock_irqrestore(&tls_device_lock, flags); | |
109 | } | |
110 | ||
111 | /* We assume that the socket is already connected */ | |
112 | static struct net_device *get_netdev_for_sock(struct sock *sk) | |
113 | { | |
114 | struct dst_entry *dst = sk_dst_get(sk); | |
115 | struct net_device *netdev = NULL; | |
116 | ||
117 | if (likely(dst)) { | |
153cbd13 | 118 | netdev = netdev_sk_get_lowest_dev(dst->dev, sk); |
e8f69799 IL |
119 | dev_hold(netdev); |
120 | } | |
121 | ||
122 | dst_release(dst); | |
123 | ||
124 | return netdev; | |
125 | } | |
126 | ||
127 | static void destroy_record(struct tls_record_info *record) | |
128 | { | |
7ccd4519 | 129 | int i; |
e8f69799 | 130 | |
7ccd4519 | 131 | for (i = 0; i < record->num_frags; i++) |
c420c989 | 132 | __skb_frag_unref(&record->frags[i], false); |
e8f69799 IL |
133 | kfree(record); |
134 | } | |
135 | ||
d80a1b9d | 136 | static void delete_all_records(struct tls_offload_context_tx *offload_ctx) |
e8f69799 IL |
137 | { |
138 | struct tls_record_info *info, *temp; | |
139 | ||
140 | list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) { | |
141 | list_del(&info->list); | |
142 | destroy_record(info); | |
143 | } | |
144 | ||
145 | offload_ctx->retransmit_hint = NULL; | |
146 | } | |
147 | ||
148 | static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) | |
149 | { | |
150 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
151 | struct tls_record_info *info, *temp; | |
d80a1b9d | 152 | struct tls_offload_context_tx *ctx; |
e8f69799 IL |
153 | u64 deleted_records = 0; |
154 | unsigned long flags; | |
155 | ||
156 | if (!tls_ctx) | |
157 | return; | |
158 | ||
d80a1b9d | 159 | ctx = tls_offload_ctx_tx(tls_ctx); |
e8f69799 IL |
160 | |
161 | spin_lock_irqsave(&ctx->lock, flags); | |
162 | info = ctx->retransmit_hint; | |
6e3d02b6 | 163 | if (info && !before(acked_seq, info->end_seq)) |
e8f69799 | 164 | ctx->retransmit_hint = NULL; |
e8f69799 IL |
165 | |
166 | list_for_each_entry_safe(info, temp, &ctx->records_list, list) { | |
167 | if (before(acked_seq, info->end_seq)) | |
168 | break; | |
169 | list_del(&info->list); | |
170 | ||
171 | destroy_record(info); | |
172 | deleted_records++; | |
173 | } | |
174 | ||
175 | ctx->unacked_record_sn += deleted_records; | |
176 | spin_unlock_irqrestore(&ctx->lock, flags); | |
177 | } | |
178 | ||
179 | /* At this point, there should be no references on this | |
180 | * socket and no in-flight SKBs associated with this | |
181 | * socket, so it is safe to free all the resources. | |
182 | */ | |
8d5a49e9 | 183 | void tls_device_sk_destruct(struct sock *sk) |
e8f69799 IL |
184 | { |
185 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
d80a1b9d | 186 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
e8f69799 | 187 | |
4799ac81 | 188 | tls_ctx->sk_destruct(sk); |
e8f69799 | 189 | |
4799ac81 BP |
190 | if (tls_ctx->tx_conf == TLS_HW) { |
191 | if (ctx->open_record) | |
192 | destroy_record(ctx->open_record); | |
193 | delete_all_records(ctx); | |
194 | crypto_free_aead(ctx->aead_send); | |
195 | clean_acked_data_disable(inet_csk(sk)); | |
196 | } | |
e8f69799 IL |
197 | |
198 | if (refcount_dec_and_test(&tls_ctx->refcount)) | |
199 | tls_device_queue_ctx_destruction(tls_ctx); | |
200 | } | |
8d5a49e9 | 201 | EXPORT_SYMBOL_GPL(tls_device_sk_destruct); |
e8f69799 | 202 | |
35b71a34 JK |
203 | void tls_device_free_resources_tx(struct sock *sk) |
204 | { | |
205 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
206 | ||
207 | tls_free_partial_record(sk, tls_ctx); | |
208 | } | |
209 | ||
8538d29c JK |
210 | void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq) |
211 | { | |
212 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
213 | ||
214 | trace_tls_device_tx_resync_req(sk, got_seq, exp_seq); | |
215 | WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags)); | |
216 | } | |
217 | EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request); | |
218 | ||
50180074 JK |
219 | static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, |
220 | u32 seq) | |
221 | { | |
222 | struct net_device *netdev; | |
223 | struct sk_buff *skb; | |
b5d9a834 | 224 | int err = 0; |
50180074 JK |
225 | u8 *rcd_sn; |
226 | ||
227 | skb = tcp_write_queue_tail(sk); | |
228 | if (skb) | |
229 | TCP_SKB_CB(skb)->eor = 1; | |
230 | ||
231 | rcd_sn = tls_ctx->tx.rec_seq; | |
232 | ||
8538d29c | 233 | trace_tls_device_tx_resync_send(sk, seq, rcd_sn); |
50180074 JK |
234 | down_read(&device_offload_lock); |
235 | netdev = tls_ctx->netdev; | |
236 | if (netdev) | |
b5d9a834 DM |
237 | err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, |
238 | rcd_sn, | |
239 | TLS_OFFLOAD_CTX_DIR_TX); | |
50180074 | 240 | up_read(&device_offload_lock); |
b5d9a834 DM |
241 | if (err) |
242 | return; | |
50180074 JK |
243 | |
244 | clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags); | |
245 | } | |
246 | ||
e8f69799 IL |
247 | static void tls_append_frag(struct tls_record_info *record, |
248 | struct page_frag *pfrag, | |
249 | int size) | |
250 | { | |
251 | skb_frag_t *frag; | |
252 | ||
253 | frag = &record->frags[record->num_frags - 1]; | |
d8e18a51 | 254 | if (skb_frag_page(frag) == pfrag->page && |
b54c9d5b | 255 | skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) { |
d8e18a51 | 256 | skb_frag_size_add(frag, size); |
e8f69799 IL |
257 | } else { |
258 | ++frag; | |
d8e18a51 | 259 | __skb_frag_set_page(frag, pfrag->page); |
b54c9d5b | 260 | skb_frag_off_set(frag, pfrag->offset); |
d8e18a51 | 261 | skb_frag_size_set(frag, size); |
e8f69799 IL |
262 | ++record->num_frags; |
263 | get_page(pfrag->page); | |
264 | } | |
265 | ||
266 | pfrag->offset += size; | |
267 | record->len += size; | |
268 | } | |
269 | ||
270 | static int tls_push_record(struct sock *sk, | |
271 | struct tls_context *ctx, | |
d80a1b9d | 272 | struct tls_offload_context_tx *offload_ctx, |
e8f69799 | 273 | struct tls_record_info *record, |
e7b159a4 | 274 | int flags) |
e8f69799 | 275 | { |
4509de14 | 276 | struct tls_prot_info *prot = &ctx->prot_info; |
e8f69799 | 277 | struct tcp_sock *tp = tcp_sk(sk); |
e8f69799 IL |
278 | skb_frag_t *frag; |
279 | int i; | |
280 | ||
e8f69799 | 281 | record->end_seq = tp->write_seq + record->len; |
d4774ac0 | 282 | list_add_tail_rcu(&record->list, &offload_ctx->records_list); |
e8f69799 | 283 | offload_ctx->open_record = NULL; |
50180074 JK |
284 | |
285 | if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags)) | |
286 | tls_device_resync_tx(sk, ctx, tp->write_seq); | |
287 | ||
fb0f886f | 288 | tls_advance_record_sn(sk, prot, &ctx->tx); |
e8f69799 IL |
289 | |
290 | for (i = 0; i < record->num_frags; i++) { | |
291 | frag = &record->frags[i]; | |
292 | sg_unmark_end(&offload_ctx->sg_tx_data[i]); | |
293 | sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag), | |
b54c9d5b | 294 | skb_frag_size(frag), skb_frag_off(frag)); |
d8e18a51 | 295 | sk_mem_charge(sk, skb_frag_size(frag)); |
e8f69799 IL |
296 | get_page(skb_frag_page(frag)); |
297 | } | |
298 | sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]); | |
299 | ||
300 | /* all ready, send */ | |
301 | return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); | |
302 | } | |
303 | ||
e7b159a4 JK |
304 | static int tls_device_record_close(struct sock *sk, |
305 | struct tls_context *ctx, | |
306 | struct tls_record_info *record, | |
307 | struct page_frag *pfrag, | |
308 | unsigned char record_type) | |
309 | { | |
310 | struct tls_prot_info *prot = &ctx->prot_info; | |
311 | int ret; | |
312 | ||
313 | /* append tag | |
314 | * device will fill in the tag, we just need to append a placeholder | |
315 | * use socket memory to improve coalescing (re-using a single buffer | |
316 | * increases frag count) | |
317 | * if we can't allocate memory now, steal some back from data | |
318 | */ | |
319 | if (likely(skb_page_frag_refill(prot->tag_size, pfrag, | |
320 | sk->sk_allocation))) { | |
321 | ret = 0; | |
322 | tls_append_frag(record, pfrag, prot->tag_size); | |
323 | } else { | |
324 | ret = prot->tag_size; | |
325 | if (record->len <= prot->overhead_size) | |
326 | return -ENOMEM; | |
327 | } | |
328 | ||
329 | /* fill prepend */ | |
330 | tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), | |
331 | record->len - prot->overhead_size, | |
6942a284 | 332 | record_type); |
e7b159a4 JK |
333 | return ret; |
334 | } | |
335 | ||
d80a1b9d | 336 | static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, |
e8f69799 IL |
337 | struct page_frag *pfrag, |
338 | size_t prepend_size) | |
339 | { | |
340 | struct tls_record_info *record; | |
341 | skb_frag_t *frag; | |
342 | ||
343 | record = kmalloc(sizeof(*record), GFP_KERNEL); | |
344 | if (!record) | |
345 | return -ENOMEM; | |
346 | ||
347 | frag = &record->frags[0]; | |
348 | __skb_frag_set_page(frag, pfrag->page); | |
b54c9d5b | 349 | skb_frag_off_set(frag, pfrag->offset); |
e8f69799 IL |
350 | skb_frag_size_set(frag, prepend_size); |
351 | ||
352 | get_page(pfrag->page); | |
353 | pfrag->offset += prepend_size; | |
354 | ||
355 | record->num_frags = 1; | |
356 | record->len = prepend_size; | |
357 | offload_ctx->open_record = record; | |
358 | return 0; | |
359 | } | |
360 | ||
361 | static int tls_do_allocation(struct sock *sk, | |
d80a1b9d | 362 | struct tls_offload_context_tx *offload_ctx, |
e8f69799 IL |
363 | struct page_frag *pfrag, |
364 | size_t prepend_size) | |
365 | { | |
366 | int ret; | |
367 | ||
368 | if (!offload_ctx->open_record) { | |
369 | if (unlikely(!skb_page_frag_refill(prepend_size, pfrag, | |
370 | sk->sk_allocation))) { | |
d5bee737 | 371 | READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk); |
e8f69799 IL |
372 | sk_stream_moderate_sndbuf(sk); |
373 | return -ENOMEM; | |
374 | } | |
375 | ||
376 | ret = tls_create_new_record(offload_ctx, pfrag, prepend_size); | |
377 | if (ret) | |
378 | return ret; | |
379 | ||
380 | if (pfrag->size > pfrag->offset) | |
381 | return 0; | |
382 | } | |
383 | ||
384 | if (!sk_page_frag_refill(sk, pfrag)) | |
385 | return -ENOMEM; | |
386 | ||
387 | return 0; | |
388 | } | |
389 | ||
e681cc60 JK |
390 | static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i) |
391 | { | |
392 | size_t pre_copy, nocache; | |
393 | ||
394 | pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1); | |
395 | if (pre_copy) { | |
396 | pre_copy = min(pre_copy, bytes); | |
397 | if (copy_from_iter(addr, pre_copy, i) != pre_copy) | |
398 | return -EFAULT; | |
399 | bytes -= pre_copy; | |
400 | addr += pre_copy; | |
401 | } | |
402 | ||
403 | nocache = round_down(bytes, SMP_CACHE_BYTES); | |
404 | if (copy_from_iter_nocache(addr, nocache, i) != nocache) | |
405 | return -EFAULT; | |
406 | bytes -= nocache; | |
407 | addr += nocache; | |
408 | ||
409 | if (bytes && copy_from_iter(addr, bytes, i) != bytes) | |
410 | return -EFAULT; | |
411 | ||
412 | return 0; | |
413 | } | |
414 | ||
c1318b39 BP |
415 | union tls_iter_offset { |
416 | struct iov_iter *msg_iter; | |
417 | int offset; | |
418 | }; | |
419 | ||
e8f69799 | 420 | static int tls_push_data(struct sock *sk, |
c1318b39 | 421 | union tls_iter_offset iter_offset, |
e8f69799 | 422 | size_t size, int flags, |
c1318b39 BP |
423 | unsigned char record_type, |
424 | struct page *zc_page) | |
e8f69799 IL |
425 | { |
426 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
4509de14 | 427 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
d80a1b9d | 428 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
3afef8c7 | 429 | struct tls_record_info *record; |
41477662 | 430 | int tls_push_record_flags; |
e8f69799 IL |
431 | struct page_frag *pfrag; |
432 | size_t orig_size = size; | |
433 | u32 max_open_record_len; | |
ea1dd3e9 | 434 | bool more = false; |
e8f69799 | 435 | bool done = false; |
ea1dd3e9 | 436 | int copy, rc = 0; |
e8f69799 IL |
437 | long timeo; |
438 | ||
439 | if (flags & | |
440 | ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST)) | |
4a5cdc60 | 441 | return -EOPNOTSUPP; |
e8f69799 | 442 | |
93277b25 | 443 | if (unlikely(sk->sk_err)) |
e8f69799 IL |
444 | return -sk->sk_err; |
445 | ||
41477662 JK |
446 | flags |= MSG_SENDPAGE_DECRYPTED; |
447 | tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; | |
448 | ||
e8f69799 | 449 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); |
94850257 BP |
450 | if (tls_is_partially_sent_record(tls_ctx)) { |
451 | rc = tls_push_partial_record(sk, tls_ctx, flags); | |
452 | if (rc < 0) | |
453 | return rc; | |
454 | } | |
e8f69799 IL |
455 | |
456 | pfrag = sk_page_frag(sk); | |
457 | ||
458 | /* TLS_HEADER_SIZE is not counted as part of the TLS record, and | |
459 | * we need to leave room for an authentication tag. | |
460 | */ | |
461 | max_open_record_len = TLS_MAX_PAYLOAD_SIZE + | |
4509de14 | 462 | prot->prepend_size; |
e8f69799 | 463 | do { |
34ef1ed1 JK |
464 | rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size); |
465 | if (unlikely(rc)) { | |
e8f69799 IL |
466 | rc = sk_stream_wait_memory(sk, &timeo); |
467 | if (!rc) | |
468 | continue; | |
469 | ||
470 | record = ctx->open_record; | |
471 | if (!record) | |
472 | break; | |
473 | handle_error: | |
474 | if (record_type != TLS_RECORD_TYPE_DATA) { | |
475 | /* avoid sending partial | |
476 | * record with type != | |
477 | * application_data | |
478 | */ | |
479 | size = orig_size; | |
480 | destroy_record(record); | |
481 | ctx->open_record = NULL; | |
4509de14 | 482 | } else if (record->len > prot->prepend_size) { |
e8f69799 IL |
483 | goto last_record; |
484 | } | |
485 | ||
486 | break; | |
487 | } | |
488 | ||
489 | record = ctx->open_record; | |
e8f69799 | 490 | |
c1318b39 BP |
491 | copy = min_t(size_t, size, max_open_record_len - record->len); |
492 | if (copy && zc_page) { | |
493 | struct page_frag zc_pfrag; | |
494 | ||
495 | zc_pfrag.page = zc_page; | |
496 | zc_pfrag.offset = iter_offset.offset; | |
497 | zc_pfrag.size = copy; | |
498 | tls_append_frag(record, &zc_pfrag, copy); | |
499 | } else if (copy) { | |
500 | copy = min_t(size_t, copy, pfrag->size - pfrag->offset); | |
501 | ||
a0df7194 | 502 | rc = tls_device_copy_data(page_address(pfrag->page) + |
c1318b39 BP |
503 | pfrag->offset, copy, |
504 | iter_offset.msg_iter); | |
a0df7194 MM |
505 | if (rc) |
506 | goto handle_error; | |
507 | tls_append_frag(record, pfrag, copy); | |
508 | } | |
e8f69799 IL |
509 | |
510 | size -= copy; | |
511 | if (!size) { | |
512 | last_record: | |
513 | tls_push_record_flags = flags; | |
ea1dd3e9 RM |
514 | if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) { |
515 | more = true; | |
e8f69799 IL |
516 | break; |
517 | } | |
518 | ||
519 | done = true; | |
520 | } | |
521 | ||
522 | if (done || record->len >= max_open_record_len || | |
523 | (record->num_frags >= MAX_SKB_FRAGS - 1)) { | |
e7b159a4 JK |
524 | rc = tls_device_record_close(sk, tls_ctx, record, |
525 | pfrag, record_type); | |
526 | if (rc) { | |
527 | if (rc > 0) { | |
528 | size += rc; | |
529 | } else { | |
530 | size = orig_size; | |
531 | destroy_record(record); | |
532 | ctx->open_record = NULL; | |
533 | break; | |
534 | } | |
535 | } | |
536 | ||
e8f69799 IL |
537 | rc = tls_push_record(sk, |
538 | tls_ctx, | |
539 | ctx, | |
540 | record, | |
e7b159a4 | 541 | tls_push_record_flags); |
e8f69799 IL |
542 | if (rc < 0) |
543 | break; | |
544 | } | |
545 | } while (!done); | |
546 | ||
ea1dd3e9 RM |
547 | tls_ctx->pending_open_record_frags = more; |
548 | ||
e8f69799 IL |
549 | if (orig_size - size > 0) |
550 | rc = orig_size - size; | |
551 | ||
552 | return rc; | |
553 | } | |
554 | ||
555 | int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) | |
556 | { | |
557 | unsigned char record_type = TLS_RECORD_TYPE_DATA; | |
79ffe608 | 558 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
c1318b39 | 559 | union tls_iter_offset iter; |
e8f69799 IL |
560 | int rc; |
561 | ||
79ffe608 | 562 | mutex_lock(&tls_ctx->tx_lock); |
e8f69799 IL |
563 | lock_sock(sk); |
564 | ||
565 | if (unlikely(msg->msg_controllen)) { | |
58790314 | 566 | rc = tls_process_cmsg(sk, msg, &record_type); |
e8f69799 IL |
567 | if (rc) |
568 | goto out; | |
569 | } | |
570 | ||
c1318b39 BP |
571 | iter.msg_iter = &msg->msg_iter; |
572 | rc = tls_push_data(sk, iter, size, msg->msg_flags, record_type, NULL); | |
e8f69799 IL |
573 | |
574 | out: | |
575 | release_sock(sk); | |
79ffe608 | 576 | mutex_unlock(&tls_ctx->tx_lock); |
e8f69799 IL |
577 | return rc; |
578 | } | |
579 | ||
580 | int tls_device_sendpage(struct sock *sk, struct page *page, | |
581 | int offset, size_t size, int flags) | |
582 | { | |
79ffe608 | 583 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
c1318b39 BP |
584 | union tls_iter_offset iter_offset; |
585 | struct iov_iter msg_iter; | |
b06c19d9 | 586 | char *kaddr; |
e8f69799 IL |
587 | struct kvec iov; |
588 | int rc; | |
589 | ||
590 | if (flags & MSG_SENDPAGE_NOTLAST) | |
591 | flags |= MSG_MORE; | |
592 | ||
79ffe608 | 593 | mutex_lock(&tls_ctx->tx_lock); |
e8f69799 IL |
594 | lock_sock(sk); |
595 | ||
596 | if (flags & MSG_OOB) { | |
4a5cdc60 | 597 | rc = -EOPNOTSUPP; |
e8f69799 IL |
598 | goto out; |
599 | } | |
600 | ||
c1318b39 BP |
601 | if (tls_ctx->zerocopy_sendfile) { |
602 | iter_offset.offset = offset; | |
603 | rc = tls_push_data(sk, iter_offset, size, | |
604 | flags, TLS_RECORD_TYPE_DATA, page); | |
605 | goto out; | |
606 | } | |
607 | ||
b06c19d9 | 608 | kaddr = kmap(page); |
e8f69799 IL |
609 | iov.iov_base = kaddr + offset; |
610 | iov.iov_len = size; | |
aa563d7b | 611 | iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size); |
c1318b39 BP |
612 | iter_offset.msg_iter = &msg_iter; |
613 | rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA, | |
614 | NULL); | |
e8f69799 IL |
615 | kunmap(page); |
616 | ||
617 | out: | |
618 | release_sock(sk); | |
79ffe608 | 619 | mutex_unlock(&tls_ctx->tx_lock); |
e8f69799 IL |
620 | return rc; |
621 | } | |
622 | ||
d80a1b9d | 623 | struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, |
e8f69799 IL |
624 | u32 seq, u64 *p_record_sn) |
625 | { | |
626 | u64 record_sn = context->hint_record_sn; | |
06f5201c | 627 | struct tls_record_info *info, *last; |
e8f69799 IL |
628 | |
629 | info = context->retransmit_hint; | |
630 | if (!info || | |
631 | before(seq, info->end_seq - info->len)) { | |
632 | /* if retransmit_hint is irrelevant start | |
72a0f6d0 | 633 | * from the beginning of the list |
e8f69799 | 634 | */ |
d4774ac0 JK |
635 | info = list_first_entry_or_null(&context->records_list, |
636 | struct tls_record_info, list); | |
637 | if (!info) | |
638 | return NULL; | |
06f5201c RM |
639 | /* send the start_marker record if seq number is before the |
640 | * tls offload start marker sequence number. This record is | |
641 | * required to handle TCP packets which are before TLS offload | |
642 | * started. | |
643 | * And if it's not start marker, look if this seq number | |
644 | * belongs to the list. | |
645 | */ | |
646 | if (likely(!tls_record_is_start_marker(info))) { | |
647 | /* we have the first record, get the last record to see | |
648 | * if this seq number belongs to the list. | |
649 | */ | |
650 | last = list_last_entry(&context->records_list, | |
651 | struct tls_record_info, list); | |
652 | ||
653 | if (!between(seq, tls_record_start_seq(info), | |
654 | last->end_seq)) | |
655 | return NULL; | |
656 | } | |
e8f69799 IL |
657 | record_sn = context->unacked_record_sn; |
658 | } | |
659 | ||
d4774ac0 JK |
660 | /* We just need the _rcu for the READ_ONCE() */ |
661 | rcu_read_lock(); | |
662 | list_for_each_entry_from_rcu(info, &context->records_list, list) { | |
e8f69799 IL |
663 | if (before(seq, info->end_seq)) { |
664 | if (!context->retransmit_hint || | |
665 | after(info->end_seq, | |
666 | context->retransmit_hint->end_seq)) { | |
667 | context->hint_record_sn = record_sn; | |
668 | context->retransmit_hint = info; | |
669 | } | |
670 | *p_record_sn = record_sn; | |
d4774ac0 | 671 | goto exit_rcu_unlock; |
e8f69799 IL |
672 | } |
673 | record_sn++; | |
674 | } | |
d4774ac0 | 675 | info = NULL; |
e8f69799 | 676 | |
d4774ac0 JK |
677 | exit_rcu_unlock: |
678 | rcu_read_unlock(); | |
679 | return info; | |
e8f69799 IL |
680 | } |
681 | EXPORT_SYMBOL(tls_get_record); | |
682 | ||
683 | static int tls_device_push_pending_record(struct sock *sk, int flags) | |
684 | { | |
c1318b39 BP |
685 | union tls_iter_offset iter; |
686 | struct iov_iter msg_iter; | |
e8f69799 | 687 | |
aa563d7b | 688 | iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0); |
c1318b39 BP |
689 | iter.msg_iter = &msg_iter; |
690 | return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL); | |
e8f69799 IL |
691 | } |
692 | ||
7463d3a2 BP |
693 | void tls_device_write_space(struct sock *sk, struct tls_context *ctx) |
694 | { | |
02b1fa07 | 695 | if (tls_is_partially_sent_record(ctx)) { |
7463d3a2 BP |
696 | gfp_t sk_allocation = sk->sk_allocation; |
697 | ||
02b1fa07 JK |
698 | WARN_ON_ONCE(sk->sk_write_pending); |
699 | ||
7463d3a2 | 700 | sk->sk_allocation = GFP_ATOMIC; |
41477662 JK |
701 | tls_push_partial_record(sk, ctx, |
702 | MSG_DONTWAIT | MSG_NOSIGNAL | | |
703 | MSG_SENDPAGE_DECRYPTED); | |
7463d3a2 BP |
704 | sk->sk_allocation = sk_allocation; |
705 | } | |
7463d3a2 BP |
706 | } |
707 | ||
e52972c1 | 708 | static void tls_device_resync_rx(struct tls_context *tls_ctx, |
89fec474 | 709 | struct sock *sk, u32 seq, u8 *rcd_sn) |
e52972c1 | 710 | { |
8538d29c | 711 | struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); |
e52972c1 JK |
712 | struct net_device *netdev; |
713 | ||
8538d29c | 714 | trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type); |
05fc8b6c | 715 | rcu_read_lock(); |
e52972c1 JK |
716 | netdev = READ_ONCE(tls_ctx->netdev); |
717 | if (netdev) | |
eeb2efaf JK |
718 | netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, |
719 | TLS_OFFLOAD_CTX_DIR_RX); | |
05fc8b6c | 720 | rcu_read_unlock(); |
a4d26fdb | 721 | TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC); |
e52972c1 JK |
722 | } |
723 | ||
ed9b7646 BP |
724 | static bool |
725 | tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, | |
138559b9 | 726 | s64 resync_req, u32 *seq, u16 *rcd_delta) |
ed9b7646 BP |
727 | { |
728 | u32 is_async = resync_req & RESYNC_REQ_ASYNC; | |
729 | u32 req_seq = resync_req >> 32; | |
730 | u32 req_end = req_seq + ((resync_req >> 16) & 0xffff); | |
138559b9 TT |
731 | u16 i; |
732 | ||
733 | *rcd_delta = 0; | |
ed9b7646 BP |
734 | |
735 | if (is_async) { | |
138559b9 TT |
736 | /* shouldn't get to wraparound: |
737 | * too long in async stage, something bad happened | |
738 | */ | |
739 | if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) | |
740 | return false; | |
741 | ||
ed9b7646 BP |
742 | /* asynchronous stage: log all headers seq such that |
743 | * req_seq <= seq <= end_seq, and wait for real resync request | |
744 | */ | |
138559b9 TT |
745 | if (before(*seq, req_seq)) |
746 | return false; | |
747 | if (!after(*seq, req_end) && | |
ed9b7646 BP |
748 | resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX) |
749 | resync_async->log[resync_async->loglen++] = *seq; | |
750 | ||
138559b9 TT |
751 | resync_async->rcd_delta++; |
752 | ||
ed9b7646 BP |
753 | return false; |
754 | } | |
755 | ||
756 | /* synchronous stage: check against the logged entries and | |
757 | * proceed to check the next entries if no match was found | |
758 | */ | |
138559b9 TT |
759 | for (i = 0; i < resync_async->loglen; i++) |
760 | if (req_seq == resync_async->log[i] && | |
761 | atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) { | |
762 | *rcd_delta = resync_async->rcd_delta - i; | |
ed9b7646 | 763 | *seq = req_seq; |
138559b9 TT |
764 | resync_async->loglen = 0; |
765 | resync_async->rcd_delta = 0; | |
ed9b7646 BP |
766 | return true; |
767 | } | |
138559b9 TT |
768 | |
769 | resync_async->loglen = 0; | |
770 | resync_async->rcd_delta = 0; | |
ed9b7646 BP |
771 | |
772 | if (req_seq == *seq && | |
773 | atomic64_try_cmpxchg(&resync_async->req, | |
774 | &resync_req, 0)) | |
775 | return true; | |
776 | ||
777 | return false; | |
778 | } | |
779 | ||
f953d33b | 780 | void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) |
4799ac81 BP |
781 | { |
782 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
4799ac81 | 783 | struct tls_offload_context_rx *rx_ctx; |
f953d33b | 784 | u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; |
acb5a07a | 785 | u32 sock_data, is_req_pending; |
f953d33b | 786 | struct tls_prot_info *prot; |
4799ac81 | 787 | s64 resync_req; |
138559b9 | 788 | u16 rcd_delta; |
4799ac81 BP |
789 | u32 req_seq; |
790 | ||
791 | if (tls_ctx->rx_conf != TLS_HW) | |
792 | return; | |
c55dcdd4 MM |
793 | if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) |
794 | return; | |
4799ac81 | 795 | |
f953d33b | 796 | prot = &tls_ctx->prot_info; |
4799ac81 | 797 | rx_ctx = tls_offload_ctx_rx(tls_ctx); |
f953d33b JK |
798 | memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size); |
799 | ||
800 | switch (rx_ctx->resync_type) { | |
801 | case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ: | |
802 | resync_req = atomic64_read(&rx_ctx->resync_req); | |
803 | req_seq = resync_req >> 32; | |
804 | seq += TLS_HEADER_SIZE - 1; | |
acb5a07a | 805 | is_req_pending = resync_req; |
f953d33b | 806 | |
acb5a07a | 807 | if (likely(!is_req_pending) || req_seq != seq || |
f953d33b JK |
808 | !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) |
809 | return; | |
810 | break; | |
811 | case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT: | |
812 | if (likely(!rx_ctx->resync_nh_do_now)) | |
813 | return; | |
814 | ||
815 | /* head of next rec is already in, note that the sock_inq will | |
816 | * include the currently parsed message when called from parser | |
817 | */ | |
8538d29c JK |
818 | sock_data = tcp_inq(sk); |
819 | if (sock_data > rcd_len) { | |
820 | trace_tls_device_rx_resync_nh_delay(sk, sock_data, | |
821 | rcd_len); | |
f953d33b | 822 | return; |
8538d29c | 823 | } |
f953d33b JK |
824 | |
825 | rx_ctx->resync_nh_do_now = 0; | |
826 | seq += rcd_len; | |
827 | tls_bigint_increment(rcd_sn, prot->rec_seq_size); | |
828 | break; | |
ed9b7646 BP |
829 | case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC: |
830 | resync_req = atomic64_read(&rx_ctx->resync_async->req); | |
831 | is_req_pending = resync_req; | |
832 | if (likely(!is_req_pending)) | |
833 | return; | |
834 | ||
835 | if (!tls_device_rx_resync_async(rx_ctx->resync_async, | |
138559b9 | 836 | resync_req, &seq, &rcd_delta)) |
ed9b7646 | 837 | return; |
138559b9 | 838 | tls_bigint_subtract(rcd_sn, rcd_delta); |
ed9b7646 | 839 | break; |
f953d33b JK |
840 | } |
841 | ||
842 | tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn); | |
843 | } | |
844 | ||
845 | static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx, | |
846 | struct tls_offload_context_rx *ctx, | |
847 | struct sock *sk, struct sk_buff *skb) | |
848 | { | |
849 | struct strp_msg *rxm; | |
850 | ||
851 | /* device will request resyncs by itself based on stream scan */ | |
852 | if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT) | |
853 | return; | |
854 | /* already scheduled */ | |
855 | if (ctx->resync_nh_do_now) | |
856 | return; | |
857 | /* seen decrypted fragments since last fully-failed record */ | |
858 | if (ctx->resync_nh_reset) { | |
859 | ctx->resync_nh_reset = 0; | |
860 | ctx->resync_nh.decrypted_failed = 1; | |
861 | ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL; | |
862 | return; | |
863 | } | |
864 | ||
865 | if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt) | |
866 | return; | |
867 | ||
868 | /* doing resync, bump the next target in case it fails */ | |
869 | if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL) | |
870 | ctx->resync_nh.decrypted_tgt *= 2; | |
871 | else | |
872 | ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL; | |
873 | ||
874 | rxm = strp_msg(skb); | |
875 | ||
876 | /* head of next rec is already in, parser will sync for us */ | |
877 | if (tcp_inq(sk) > rxm->full_len) { | |
8538d29c | 878 | trace_tls_device_rx_resync_nh_schedule(sk); |
f953d33b JK |
879 | ctx->resync_nh_do_now = 1; |
880 | } else { | |
881 | struct tls_prot_info *prot = &tls_ctx->prot_info; | |
882 | u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; | |
883 | ||
884 | memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size); | |
885 | tls_bigint_increment(rcd_sn, prot->rec_seq_size); | |
886 | ||
887 | tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq, | |
888 | rcd_sn); | |
889 | } | |
4799ac81 BP |
890 | } |
891 | ||
892 | static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb) | |
893 | { | |
894 | struct strp_msg *rxm = strp_msg(skb); | |
eb3d38d5 | 895 | int err = 0, offset = rxm->offset, copy, nsg, data_len, pos; |
4799ac81 BP |
896 | struct sk_buff *skb_iter, *unused; |
897 | struct scatterlist sg[1]; | |
898 | char *orig_buf, *buf; | |
899 | ||
900 | orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + | |
901 | TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation); | |
902 | if (!orig_buf) | |
903 | return -ENOMEM; | |
904 | buf = orig_buf; | |
905 | ||
906 | nsg = skb_cow_data(skb, 0, &unused); | |
907 | if (unlikely(nsg < 0)) { | |
908 | err = nsg; | |
909 | goto free_buf; | |
910 | } | |
911 | ||
912 | sg_init_table(sg, 1); | |
913 | sg_set_buf(&sg[0], buf, | |
914 | rxm->full_len + TLS_HEADER_SIZE + | |
915 | TLS_CIPHER_AES_GCM_128_IV_SIZE); | |
aeb11ff0 JK |
916 | err = skb_copy_bits(skb, offset, buf, |
917 | TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE); | |
918 | if (err) | |
919 | goto free_buf; | |
4799ac81 BP |
920 | |
921 | /* We are interested only in the decrypted data not the auth */ | |
922 | err = decrypt_skb(sk, skb, sg); | |
923 | if (err != -EBADMSG) | |
924 | goto free_buf; | |
925 | else | |
926 | err = 0; | |
927 | ||
eb3d38d5 | 928 | data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE; |
4799ac81 | 929 | |
97e1caa5 | 930 | if (skb_pagelen(skb) > offset) { |
eb3d38d5 | 931 | copy = min_t(int, skb_pagelen(skb) - offset, data_len); |
4799ac81 | 932 | |
aeb11ff0 JK |
933 | if (skb->decrypted) { |
934 | err = skb_store_bits(skb, offset, buf, copy); | |
935 | if (err) | |
936 | goto free_buf; | |
937 | } | |
4799ac81 | 938 | |
97e1caa5 JK |
939 | offset += copy; |
940 | buf += copy; | |
941 | } | |
4799ac81 | 942 | |
eb3d38d5 | 943 | pos = skb_pagelen(skb); |
4799ac81 | 944 | skb_walk_frags(skb, skb_iter) { |
eb3d38d5 JK |
945 | int frag_pos; |
946 | ||
947 | /* Practically all frags must belong to msg if reencrypt | |
948 | * is needed with current strparser and coalescing logic, | |
949 | * but strparser may "get optimized", so let's be safe. | |
950 | */ | |
951 | if (pos + skb_iter->len <= offset) | |
952 | goto done_with_frag; | |
953 | if (pos >= data_len + rxm->offset) | |
954 | break; | |
955 | ||
956 | frag_pos = offset - pos; | |
957 | copy = min_t(int, skb_iter->len - frag_pos, | |
958 | data_len + rxm->offset - offset); | |
4799ac81 | 959 | |
aeb11ff0 JK |
960 | if (skb_iter->decrypted) { |
961 | err = skb_store_bits(skb_iter, frag_pos, buf, copy); | |
962 | if (err) | |
963 | goto free_buf; | |
964 | } | |
4799ac81 BP |
965 | |
966 | offset += copy; | |
967 | buf += copy; | |
eb3d38d5 JK |
968 | done_with_frag: |
969 | pos += skb_iter->len; | |
4799ac81 BP |
970 | } |
971 | ||
972 | free_buf: | |
973 | kfree(orig_buf); | |
974 | return err; | |
975 | } | |
976 | ||
4de30a8d JK |
977 | int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, |
978 | struct sk_buff *skb, struct strp_msg *rxm) | |
4799ac81 | 979 | { |
4799ac81 BP |
980 | struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx); |
981 | int is_decrypted = skb->decrypted; | |
982 | int is_encrypted = !is_decrypted; | |
983 | struct sk_buff *skb_iter; | |
984 | ||
4799ac81 BP |
985 | /* Check if all the data is decrypted already */ |
986 | skb_walk_frags(skb, skb_iter) { | |
987 | is_decrypted &= skb_iter->decrypted; | |
988 | is_encrypted &= !skb_iter->decrypted; | |
989 | } | |
990 | ||
9ec1c6ac JK |
991 | trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len, |
992 | tls_ctx->rx.rec_seq, rxm->full_len, | |
993 | is_encrypted, is_decrypted); | |
994 | ||
c55dcdd4 MM |
995 | if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) { |
996 | if (likely(is_encrypted || is_decrypted)) | |
71471ca3 | 997 | return is_decrypted; |
c55dcdd4 MM |
998 | |
999 | /* After tls_device_down disables the offload, the next SKB will | |
1000 | * likely have initial fragments decrypted, and final ones not | |
1001 | * decrypted. We need to reencrypt that single SKB. | |
1002 | */ | |
1003 | return tls_device_reencrypt(sk, skb); | |
1004 | } | |
1005 | ||
f953d33b | 1006 | /* Return immediately if the record is either entirely plaintext or |
4799ac81 BP |
1007 | * entirely ciphertext. Otherwise handle reencrypt partially decrypted |
1008 | * record. | |
1009 | */ | |
f953d33b JK |
1010 | if (is_decrypted) { |
1011 | ctx->resync_nh_reset = 1; | |
71471ca3 | 1012 | return is_decrypted; |
f953d33b JK |
1013 | } |
1014 | if (is_encrypted) { | |
1015 | tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb); | |
1016 | return 0; | |
1017 | } | |
1018 | ||
1019 | ctx->resync_nh_reset = 1; | |
1020 | return tls_device_reencrypt(sk, skb); | |
4799ac81 BP |
1021 | } |
1022 | ||
9e995797 JK |
1023 | static void tls_device_attach(struct tls_context *ctx, struct sock *sk, |
1024 | struct net_device *netdev) | |
1025 | { | |
1026 | if (sk->sk_destruct != tls_device_sk_destruct) { | |
1027 | refcount_set(&ctx->refcount, 1); | |
1028 | dev_hold(netdev); | |
1029 | ctx->netdev = netdev; | |
1030 | spin_lock_irq(&tls_device_lock); | |
1031 | list_add_tail(&ctx->list, &tls_device_list); | |
1032 | spin_unlock_irq(&tls_device_lock); | |
1033 | ||
1034 | ctx->sk_destruct = sk->sk_destruct; | |
8d5a49e9 | 1035 | smp_store_release(&sk->sk_destruct, tls_device_sk_destruct); |
9e995797 JK |
1036 | } |
1037 | } | |
1038 | ||
e8f69799 IL |
1039 | int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) |
1040 | { | |
d31c0800 | 1041 | u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size; |
4509de14 VG |
1042 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
1043 | struct tls_prot_info *prot = &tls_ctx->prot_info; | |
e8f69799 | 1044 | struct tls_record_info *start_marker_record; |
d80a1b9d | 1045 | struct tls_offload_context_tx *offload_ctx; |
e8f69799 IL |
1046 | struct tls_crypto_info *crypto_info; |
1047 | struct net_device *netdev; | |
1048 | char *iv, *rec_seq; | |
1049 | struct sk_buff *skb; | |
e8f69799 | 1050 | __be64 rcd_sn; |
90962b48 | 1051 | int rc; |
e8f69799 IL |
1052 | |
1053 | if (!ctx) | |
90962b48 | 1054 | return -EINVAL; |
e8f69799 | 1055 | |
90962b48 JK |
1056 | if (ctx->priv_ctx_tx) |
1057 | return -EEXIST; | |
e8f69799 | 1058 | |
b1a6f56b ZX |
1059 | netdev = get_netdev_for_sock(sk); |
1060 | if (!netdev) { | |
1061 | pr_err_ratelimited("%s: netdev not found\n", __func__); | |
1062 | return -EINVAL; | |
1063 | } | |
e8f69799 | 1064 | |
b1a6f56b ZX |
1065 | if (!(netdev->features & NETIF_F_HW_TLS_TX)) { |
1066 | rc = -EOPNOTSUPP; | |
1067 | goto release_netdev; | |
e8f69799 IL |
1068 | } |
1069 | ||
86029d10 | 1070 | crypto_info = &ctx->crypto_send.info; |
618bac45 JK |
1071 | if (crypto_info->version != TLS_1_2_VERSION) { |
1072 | rc = -EOPNOTSUPP; | |
b1a6f56b | 1073 | goto release_netdev; |
618bac45 JK |
1074 | } |
1075 | ||
e8f69799 IL |
1076 | switch (crypto_info->cipher_type) { |
1077 | case TLS_CIPHER_AES_GCM_128: | |
1078 | nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; | |
1079 | tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; | |
1080 | iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; | |
1081 | iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; | |
1082 | rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; | |
d31c0800 | 1083 | salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; |
e8f69799 IL |
1084 | rec_seq = |
1085 | ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; | |
1086 | break; | |
1087 | default: | |
1088 | rc = -EINVAL; | |
b1a6f56b | 1089 | goto release_netdev; |
e8f69799 IL |
1090 | } |
1091 | ||
89fec474 JK |
1092 | /* Sanity-check the rec_seq_size for stack allocations */ |
1093 | if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) { | |
1094 | rc = -EINVAL; | |
b1a6f56b | 1095 | goto release_netdev; |
89fec474 JK |
1096 | } |
1097 | ||
ab232e61 JK |
1098 | prot->version = crypto_info->version; |
1099 | prot->cipher_type = crypto_info->cipher_type; | |
4509de14 VG |
1100 | prot->prepend_size = TLS_HEADER_SIZE + nonce_size; |
1101 | prot->tag_size = tag_size; | |
1102 | prot->overhead_size = prot->prepend_size + prot->tag_size; | |
1103 | prot->iv_size = iv_size; | |
d31c0800 | 1104 | prot->salt_size = salt_size; |
e8f69799 IL |
1105 | ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, |
1106 | GFP_KERNEL); | |
1107 | if (!ctx->tx.iv) { | |
1108 | rc = -ENOMEM; | |
b1a6f56b | 1109 | goto release_netdev; |
e8f69799 IL |
1110 | } |
1111 | ||
1112 | memcpy(ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); | |
1113 | ||
4509de14 | 1114 | prot->rec_seq_size = rec_seq_size; |
969d5090 | 1115 | ctx->tx.rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); |
e8f69799 IL |
1116 | if (!ctx->tx.rec_seq) { |
1117 | rc = -ENOMEM; | |
1118 | goto free_iv; | |
1119 | } | |
e8f69799 | 1120 | |
b1a6f56b ZX |
1121 | start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL); |
1122 | if (!start_marker_record) { | |
1123 | rc = -ENOMEM; | |
1124 | goto free_rec_seq; | |
1125 | } | |
1126 | ||
1127 | offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL); | |
1128 | if (!offload_ctx) { | |
1129 | rc = -ENOMEM; | |
1130 | goto free_marker_record; | |
1131 | } | |
1132 | ||
e8f69799 IL |
1133 | rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info); |
1134 | if (rc) | |
b1a6f56b | 1135 | goto free_offload_ctx; |
e8f69799 IL |
1136 | |
1137 | /* start at rec_seq - 1 to account for the start marker record */ | |
1138 | memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn)); | |
1139 | offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1; | |
1140 | ||
1141 | start_marker_record->end_seq = tcp_sk(sk)->write_seq; | |
1142 | start_marker_record->len = 0; | |
1143 | start_marker_record->num_frags = 0; | |
1144 | ||
1145 | INIT_LIST_HEAD(&offload_ctx->records_list); | |
1146 | list_add_tail(&start_marker_record->list, &offload_ctx->records_list); | |
1147 | spin_lock_init(&offload_ctx->lock); | |
895262d8 BP |
1148 | sg_init_table(offload_ctx->sg_tx_data, |
1149 | ARRAY_SIZE(offload_ctx->sg_tx_data)); | |
e8f69799 IL |
1150 | |
1151 | clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked); | |
1152 | ctx->push_pending_record = tls_device_push_pending_record; | |
e8f69799 IL |
1153 | |
1154 | /* TLS offload is greatly simplified if we don't send | |
1155 | * SKBs where only part of the payload needs to be encrypted. | |
1156 | * So mark the last skb in the write queue as end of record. | |
1157 | */ | |
1158 | skb = tcp_write_queue_tail(sk); | |
1159 | if (skb) | |
1160 | TCP_SKB_CB(skb)->eor = 1; | |
1161 | ||
e8f69799 IL |
1162 | /* Avoid offloading if the device is down |
1163 | * We don't want to offload new flows after | |
1164 | * the NETDEV_DOWN event | |
3544c98a JK |
1165 | * |
1166 | * device_offload_lock is taken in tls_devices's NETDEV_DOWN | |
1167 | * handler thus protecting from the device going down before | |
1168 | * ctx was added to tls_device_list. | |
e8f69799 | 1169 | */ |
3544c98a | 1170 | down_read(&device_offload_lock); |
e8f69799 IL |
1171 | if (!(netdev->flags & IFF_UP)) { |
1172 | rc = -EINVAL; | |
3544c98a | 1173 | goto release_lock; |
e8f69799 IL |
1174 | } |
1175 | ||
1176 | ctx->priv_ctx_tx = offload_ctx; | |
1177 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, | |
86029d10 | 1178 | &ctx->crypto_send.info, |
e8f69799 | 1179 | tcp_sk(sk)->write_seq); |
8538d29c JK |
1180 | trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX, |
1181 | tcp_sk(sk)->write_seq, rec_seq, rc); | |
e8f69799 | 1182 | if (rc) |
3544c98a | 1183 | goto release_lock; |
e8f69799 | 1184 | |
4799ac81 | 1185 | tls_device_attach(ctx, sk, netdev); |
3544c98a | 1186 | up_read(&device_offload_lock); |
e8f69799 | 1187 | |
e8f69799 IL |
1188 | /* following this assignment tls_is_sk_tx_device_offloaded |
1189 | * will return true and the context might be accessed | |
1190 | * by the netdev's xmit function. | |
1191 | */ | |
4799ac81 BP |
1192 | smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); |
1193 | dev_put(netdev); | |
90962b48 JK |
1194 | |
1195 | return 0; | |
e8f69799 | 1196 | |
e8f69799 IL |
1197 | release_lock: |
1198 | up_read(&device_offload_lock); | |
1199 | clean_acked_data_disable(inet_csk(sk)); | |
1200 | crypto_free_aead(offload_ctx->aead_send); | |
e8f69799 IL |
1201 | free_offload_ctx: |
1202 | kfree(offload_ctx); | |
1203 | ctx->priv_ctx_tx = NULL; | |
1204 | free_marker_record: | |
1205 | kfree(start_marker_record); | |
b1a6f56b ZX |
1206 | free_rec_seq: |
1207 | kfree(ctx->tx.rec_seq); | |
1208 | free_iv: | |
1209 | kfree(ctx->tx.iv); | |
1210 | release_netdev: | |
1211 | dev_put(netdev); | |
e8f69799 IL |
1212 | return rc; |
1213 | } | |
1214 | ||
4799ac81 BP |
1215 | int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) |
1216 | { | |
8538d29c | 1217 | struct tls12_crypto_info_aes_gcm_128 *info; |
4799ac81 BP |
1218 | struct tls_offload_context_rx *context; |
1219 | struct net_device *netdev; | |
1220 | int rc = 0; | |
1221 | ||
618bac45 JK |
1222 | if (ctx->crypto_recv.info.version != TLS_1_2_VERSION) |
1223 | return -EOPNOTSUPP; | |
1224 | ||
4799ac81 BP |
1225 | netdev = get_netdev_for_sock(sk); |
1226 | if (!netdev) { | |
1227 | pr_err_ratelimited("%s: netdev not found\n", __func__); | |
3544c98a | 1228 | return -EINVAL; |
4799ac81 BP |
1229 | } |
1230 | ||
1231 | if (!(netdev->features & NETIF_F_HW_TLS_RX)) { | |
4a5cdc60 | 1232 | rc = -EOPNOTSUPP; |
4799ac81 BP |
1233 | goto release_netdev; |
1234 | } | |
1235 | ||
1236 | /* Avoid offloading if the device is down | |
1237 | * We don't want to offload new flows after | |
1238 | * the NETDEV_DOWN event | |
3544c98a JK |
1239 | * |
1240 | * device_offload_lock is taken in tls_devices's NETDEV_DOWN | |
1241 | * handler thus protecting from the device going down before | |
1242 | * ctx was added to tls_device_list. | |
4799ac81 | 1243 | */ |
3544c98a | 1244 | down_read(&device_offload_lock); |
4799ac81 BP |
1245 | if (!(netdev->flags & IFF_UP)) { |
1246 | rc = -EINVAL; | |
3544c98a | 1247 | goto release_lock; |
4799ac81 BP |
1248 | } |
1249 | ||
1250 | context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL); | |
1251 | if (!context) { | |
1252 | rc = -ENOMEM; | |
3544c98a | 1253 | goto release_lock; |
4799ac81 | 1254 | } |
f953d33b | 1255 | context->resync_nh_reset = 1; |
4799ac81 BP |
1256 | |
1257 | ctx->priv_ctx_rx = context; | |
1258 | rc = tls_set_sw_offload(sk, ctx, 0); | |
1259 | if (rc) | |
1260 | goto release_ctx; | |
1261 | ||
1262 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, | |
86029d10 | 1263 | &ctx->crypto_recv.info, |
4799ac81 | 1264 | tcp_sk(sk)->copied_seq); |
8538d29c JK |
1265 | info = (void *)&ctx->crypto_recv.info; |
1266 | trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX, | |
1267 | tcp_sk(sk)->copied_seq, info->rec_seq, rc); | |
e49d268d | 1268 | if (rc) |
4799ac81 | 1269 | goto free_sw_resources; |
4799ac81 BP |
1270 | |
1271 | tls_device_attach(ctx, sk, netdev); | |
90962b48 JK |
1272 | up_read(&device_offload_lock); |
1273 | ||
1274 | dev_put(netdev); | |
1275 | ||
1276 | return 0; | |
4799ac81 BP |
1277 | |
1278 | free_sw_resources: | |
62ef81d5 | 1279 | up_read(&device_offload_lock); |
4799ac81 | 1280 | tls_sw_free_resources_rx(sk); |
62ef81d5 | 1281 | down_read(&device_offload_lock); |
4799ac81 BP |
1282 | release_ctx: |
1283 | ctx->priv_ctx_rx = NULL; | |
4799ac81 BP |
1284 | release_lock: |
1285 | up_read(&device_offload_lock); | |
3544c98a JK |
1286 | release_netdev: |
1287 | dev_put(netdev); | |
4799ac81 BP |
1288 | return rc; |
1289 | } | |
1290 | ||
1291 | void tls_device_offload_cleanup_rx(struct sock *sk) | |
1292 | { | |
1293 | struct tls_context *tls_ctx = tls_get_ctx(sk); | |
1294 | struct net_device *netdev; | |
1295 | ||
1296 | down_read(&device_offload_lock); | |
1297 | netdev = tls_ctx->netdev; | |
1298 | if (!netdev) | |
1299 | goto out; | |
1300 | ||
4799ac81 BP |
1301 | netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx, |
1302 | TLS_OFFLOAD_CTX_DIR_RX); | |
1303 | ||
1304 | if (tls_ctx->tx_conf != TLS_HW) { | |
1305 | dev_put(netdev); | |
1306 | tls_ctx->netdev = NULL; | |
025cc2fb MM |
1307 | } else { |
1308 | set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags); | |
4799ac81 BP |
1309 | } |
1310 | out: | |
1311 | up_read(&device_offload_lock); | |
4799ac81 BP |
1312 | tls_sw_release_resources_rx(sk); |
1313 | } | |
1314 | ||
e8f69799 IL |
1315 | static int tls_device_down(struct net_device *netdev) |
1316 | { | |
1317 | struct tls_context *ctx, *tmp; | |
1318 | unsigned long flags; | |
1319 | LIST_HEAD(list); | |
1320 | ||
1321 | /* Request a write lock to block new offload attempts */ | |
1322 | down_write(&device_offload_lock); | |
1323 | ||
1324 | spin_lock_irqsave(&tls_device_lock, flags); | |
1325 | list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) { | |
1326 | if (ctx->netdev != netdev || | |
1327 | !refcount_inc_not_zero(&ctx->refcount)) | |
1328 | continue; | |
1329 | ||
1330 | list_move(&ctx->list, &list); | |
1331 | } | |
1332 | spin_unlock_irqrestore(&tls_device_lock, flags); | |
1333 | ||
1334 | list_for_each_entry_safe(ctx, tmp, &list, list) { | |
c55dcdd4 MM |
1335 | /* Stop offloaded TX and switch to the fallback. |
1336 | * tls_is_sk_tx_device_offloaded will return false. | |
1337 | */ | |
1338 | WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw); | |
1339 | ||
1340 | /* Stop the RX and TX resync. | |
1341 | * tls_dev_resync must not be called after tls_dev_del. | |
1342 | */ | |
1343 | WRITE_ONCE(ctx->netdev, NULL); | |
1344 | ||
1345 | /* Start skipping the RX resync logic completely. */ | |
1346 | set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags); | |
1347 | ||
1348 | /* Sync with inflight packets. After this point: | |
1349 | * TX: no non-encrypted packets will be passed to the driver. | |
1350 | * RX: resync requests from the driver will be ignored. | |
1351 | */ | |
1352 | synchronize_net(); | |
1353 | ||
1354 | /* Release the offload context on the driver side. */ | |
4799ac81 BP |
1355 | if (ctx->tx_conf == TLS_HW) |
1356 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, | |
1357 | TLS_OFFLOAD_CTX_DIR_TX); | |
025cc2fb MM |
1358 | if (ctx->rx_conf == TLS_HW && |
1359 | !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags)) | |
4799ac81 BP |
1360 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
1361 | TLS_OFFLOAD_CTX_DIR_RX); | |
c55dcdd4 | 1362 | |
e8f69799 | 1363 | dev_put(netdev); |
e8f69799 | 1364 | |
c55dcdd4 MM |
1365 | /* Move the context to a separate list for two reasons: |
1366 | * 1. When the context is deallocated, list_del is called. | |
1367 | * 2. It's no longer an offloaded context, so we don't want to | |
1368 | * run offload-specific code on this context. | |
1369 | */ | |
1370 | spin_lock_irqsave(&tls_device_lock, flags); | |
1371 | list_move_tail(&ctx->list, &tls_device_down_list); | |
1372 | spin_unlock_irqrestore(&tls_device_lock, flags); | |
1373 | ||
1374 | /* Device contexts for RX and TX will be freed in on sk_destruct | |
1375 | * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW. | |
3740651b | 1376 | * Now release the ref taken above. |
c55dcdd4 | 1377 | */ |
3740651b MM |
1378 | if (refcount_dec_and_test(&ctx->refcount)) |
1379 | tls_device_free_ctx(ctx); | |
e8f69799 IL |
1380 | } |
1381 | ||
1382 | up_write(&device_offload_lock); | |
1383 | ||
1384 | flush_work(&tls_device_gc_work); | |
1385 | ||
1386 | return NOTIFY_DONE; | |
1387 | } | |
1388 | ||
1389 | static int tls_dev_event(struct notifier_block *this, unsigned long event, | |
1390 | void *ptr) | |
1391 | { | |
1392 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | |
1393 | ||
c3f4a6c3 JK |
1394 | if (!dev->tlsdev_ops && |
1395 | !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) | |
e8f69799 IL |
1396 | return NOTIFY_DONE; |
1397 | ||
1398 | switch (event) { | |
1399 | case NETDEV_REGISTER: | |
1400 | case NETDEV_FEAT_CHANGE: | |
4e5a7332 TT |
1401 | if (netif_is_bond_master(dev)) |
1402 | return NOTIFY_DONE; | |
4799ac81 | 1403 | if ((dev->features & NETIF_F_HW_TLS_RX) && |
eeb2efaf | 1404 | !dev->tlsdev_ops->tls_dev_resync) |
4799ac81 BP |
1405 | return NOTIFY_BAD; |
1406 | ||
e8f69799 IL |
1407 | if (dev->tlsdev_ops && |
1408 | dev->tlsdev_ops->tls_dev_add && | |
1409 | dev->tlsdev_ops->tls_dev_del) | |
1410 | return NOTIFY_DONE; | |
1411 | else | |
1412 | return NOTIFY_BAD; | |
1413 | case NETDEV_DOWN: | |
1414 | return tls_device_down(dev); | |
1415 | } | |
1416 | return NOTIFY_DONE; | |
1417 | } | |
1418 | ||
1419 | static struct notifier_block tls_dev_notifier = { | |
1420 | .notifier_call = tls_dev_event, | |
1421 | }; | |
1422 | ||
3d8c51b2 | 1423 | int __init tls_device_init(void) |
e8f69799 | 1424 | { |
3d8c51b2 | 1425 | return register_netdevice_notifier(&tls_dev_notifier); |
e8f69799 IL |
1426 | } |
1427 | ||
1428 | void __exit tls_device_cleanup(void) | |
1429 | { | |
1430 | unregister_netdevice_notifier(&tls_dev_notifier); | |
1431 | flush_work(&tls_device_gc_work); | |
494bc1d2 | 1432 | clean_acked_data_flush(); |
e8f69799 | 1433 | } |