| 1 | /* Copyright (c) 2018, Mellanox Technologies All rights reserved. |
| 2 | * |
| 3 | * This software is available to you under a choice of one of two |
| 4 | * licenses. You may choose to be licensed under the terms of the GNU |
| 5 | * General Public License (GPL) Version 2, available from the file |
| 6 | * COPYING in the main directory of this source tree, or the |
| 7 | * OpenIB.org BSD license below: |
| 8 | * |
| 9 | * Redistribution and use in source and binary forms, with or |
| 10 | * without modification, are permitted provided that the following |
| 11 | * conditions are met: |
| 12 | * |
| 13 | * - Redistributions of source code must retain the above |
| 14 | * copyright notice, this list of conditions and the following |
| 15 | * disclaimer. |
| 16 | * |
| 17 | * - Redistributions in binary form must reproduce the above |
| 18 | * copyright notice, this list of conditions and the following |
| 19 | * disclaimer in the documentation and/or other materials |
| 20 | * provided with the distribution. |
| 21 | * |
| 22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 23 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 24 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 25 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 26 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 27 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 28 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 29 | * SOFTWARE. |
| 30 | */ |
| 31 | |
| 32 | #include <crypto/aead.h> |
| 33 | #include <linux/highmem.h> |
| 34 | #include <linux/module.h> |
| 35 | #include <linux/netdevice.h> |
| 36 | #include <net/dst.h> |
| 37 | #include <net/inet_connection_sock.h> |
| 38 | #include <net/tcp.h> |
| 39 | #include <net/tls.h> |
| 40 | |
| 41 | #include "tls.h" |
| 42 | #include "trace.h" |
| 43 | |
| 44 | /* device_offload_lock is used to synchronize tls_dev_add |
| 45 | * against NETDEV_DOWN notifications. |
| 46 | */ |
| 47 | static DECLARE_RWSEM(device_offload_lock); |
| 48 | |
| 49 | static struct workqueue_struct *destruct_wq __read_mostly; |
| 50 | |
| 51 | static LIST_HEAD(tls_device_list); |
| 52 | static LIST_HEAD(tls_device_down_list); |
| 53 | static DEFINE_SPINLOCK(tls_device_lock); |
| 54 | |
| 55 | static void tls_device_free_ctx(struct tls_context *ctx) |
| 56 | { |
| 57 | if (ctx->tx_conf == TLS_HW) { |
| 58 | kfree(tls_offload_ctx_tx(ctx)); |
| 59 | kfree(ctx->tx.rec_seq); |
| 60 | kfree(ctx->tx.iv); |
| 61 | } |
| 62 | |
| 63 | if (ctx->rx_conf == TLS_HW) |
| 64 | kfree(tls_offload_ctx_rx(ctx)); |
| 65 | |
| 66 | tls_ctx_free(NULL, ctx); |
| 67 | } |
| 68 | |
| 69 | static void tls_device_tx_del_task(struct work_struct *work) |
| 70 | { |
| 71 | struct tls_offload_context_tx *offload_ctx = |
| 72 | container_of(work, struct tls_offload_context_tx, destruct_work); |
| 73 | struct tls_context *ctx = offload_ctx->ctx; |
| 74 | struct net_device *netdev; |
| 75 | |
| 76 | /* Safe, because this is the destroy flow, refcount is 0, so |
| 77 | * tls_device_down can't store this field in parallel. |
| 78 | */ |
| 79 | netdev = rcu_dereference_protected(ctx->netdev, |
| 80 | !refcount_read(&ctx->refcount)); |
| 81 | |
| 82 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX); |
| 83 | dev_put(netdev); |
| 84 | ctx->netdev = NULL; |
| 85 | tls_device_free_ctx(ctx); |
| 86 | } |
| 87 | |
| 88 | static void tls_device_queue_ctx_destruction(struct tls_context *ctx) |
| 89 | { |
| 90 | struct net_device *netdev; |
| 91 | unsigned long flags; |
| 92 | bool async_cleanup; |
| 93 | |
| 94 | spin_lock_irqsave(&tls_device_lock, flags); |
| 95 | if (unlikely(!refcount_dec_and_test(&ctx->refcount))) { |
| 96 | spin_unlock_irqrestore(&tls_device_lock, flags); |
| 97 | return; |
| 98 | } |
| 99 | |
| 100 | list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */ |
| 101 | |
| 102 | /* Safe, because this is the destroy flow, refcount is 0, so |
| 103 | * tls_device_down can't store this field in parallel. |
| 104 | */ |
| 105 | netdev = rcu_dereference_protected(ctx->netdev, |
| 106 | !refcount_read(&ctx->refcount)); |
| 107 | |
| 108 | async_cleanup = netdev && ctx->tx_conf == TLS_HW; |
| 109 | if (async_cleanup) { |
| 110 | struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx); |
| 111 | |
| 112 | /* queue_work inside the spinlock |
| 113 | * to make sure tls_device_down waits for that work. |
| 114 | */ |
| 115 | queue_work(destruct_wq, &offload_ctx->destruct_work); |
| 116 | } |
| 117 | spin_unlock_irqrestore(&tls_device_lock, flags); |
| 118 | |
| 119 | if (!async_cleanup) |
| 120 | tls_device_free_ctx(ctx); |
| 121 | } |
| 122 | |
| 123 | /* We assume that the socket is already connected */ |
| 124 | static struct net_device *get_netdev_for_sock(struct sock *sk) |
| 125 | { |
| 126 | struct dst_entry *dst = sk_dst_get(sk); |
| 127 | struct net_device *netdev = NULL; |
| 128 | |
| 129 | if (likely(dst)) { |
| 130 | netdev = netdev_sk_get_lowest_dev(dst->dev, sk); |
| 131 | dev_hold(netdev); |
| 132 | } |
| 133 | |
| 134 | dst_release(dst); |
| 135 | |
| 136 | return netdev; |
| 137 | } |
| 138 | |
| 139 | static void destroy_record(struct tls_record_info *record) |
| 140 | { |
| 141 | int i; |
| 142 | |
| 143 | for (i = 0; i < record->num_frags; i++) |
| 144 | __skb_frag_unref(&record->frags[i], false); |
| 145 | kfree(record); |
| 146 | } |
| 147 | |
| 148 | static void delete_all_records(struct tls_offload_context_tx *offload_ctx) |
| 149 | { |
| 150 | struct tls_record_info *info, *temp; |
| 151 | |
| 152 | list_for_each_entry_safe(info, temp, &offload_ctx->records_list, list) { |
| 153 | list_del(&info->list); |
| 154 | destroy_record(info); |
| 155 | } |
| 156 | |
| 157 | offload_ctx->retransmit_hint = NULL; |
| 158 | } |
| 159 | |
| 160 | static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) |
| 161 | { |
| 162 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 163 | struct tls_record_info *info, *temp; |
| 164 | struct tls_offload_context_tx *ctx; |
| 165 | u64 deleted_records = 0; |
| 166 | unsigned long flags; |
| 167 | |
| 168 | if (!tls_ctx) |
| 169 | return; |
| 170 | |
| 171 | ctx = tls_offload_ctx_tx(tls_ctx); |
| 172 | |
| 173 | spin_lock_irqsave(&ctx->lock, flags); |
| 174 | info = ctx->retransmit_hint; |
| 175 | if (info && !before(acked_seq, info->end_seq)) |
| 176 | ctx->retransmit_hint = NULL; |
| 177 | |
| 178 | list_for_each_entry_safe(info, temp, &ctx->records_list, list) { |
| 179 | if (before(acked_seq, info->end_seq)) |
| 180 | break; |
| 181 | list_del(&info->list); |
| 182 | |
| 183 | destroy_record(info); |
| 184 | deleted_records++; |
| 185 | } |
| 186 | |
| 187 | ctx->unacked_record_sn += deleted_records; |
| 188 | spin_unlock_irqrestore(&ctx->lock, flags); |
| 189 | } |
| 190 | |
| 191 | /* At this point, there should be no references on this |
| 192 | * socket and no in-flight SKBs associated with this |
| 193 | * socket, so it is safe to free all the resources. |
| 194 | */ |
| 195 | void tls_device_sk_destruct(struct sock *sk) |
| 196 | { |
| 197 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 198 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
| 199 | |
| 200 | tls_ctx->sk_destruct(sk); |
| 201 | |
| 202 | if (tls_ctx->tx_conf == TLS_HW) { |
| 203 | if (ctx->open_record) |
| 204 | destroy_record(ctx->open_record); |
| 205 | delete_all_records(ctx); |
| 206 | crypto_free_aead(ctx->aead_send); |
| 207 | clean_acked_data_disable(inet_csk(sk)); |
| 208 | } |
| 209 | |
| 210 | tls_device_queue_ctx_destruction(tls_ctx); |
| 211 | } |
| 212 | EXPORT_SYMBOL_GPL(tls_device_sk_destruct); |
| 213 | |
| 214 | void tls_device_free_resources_tx(struct sock *sk) |
| 215 | { |
| 216 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 217 | |
| 218 | tls_free_partial_record(sk, tls_ctx); |
| 219 | } |
| 220 | |
| 221 | void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq) |
| 222 | { |
| 223 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 224 | |
| 225 | trace_tls_device_tx_resync_req(sk, got_seq, exp_seq); |
| 226 | WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags)); |
| 227 | } |
| 228 | EXPORT_SYMBOL_GPL(tls_offload_tx_resync_request); |
| 229 | |
| 230 | static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, |
| 231 | u32 seq) |
| 232 | { |
| 233 | struct net_device *netdev; |
| 234 | struct sk_buff *skb; |
| 235 | int err = 0; |
| 236 | u8 *rcd_sn; |
| 237 | |
| 238 | skb = tcp_write_queue_tail(sk); |
| 239 | if (skb) |
| 240 | TCP_SKB_CB(skb)->eor = 1; |
| 241 | |
| 242 | rcd_sn = tls_ctx->tx.rec_seq; |
| 243 | |
| 244 | trace_tls_device_tx_resync_send(sk, seq, rcd_sn); |
| 245 | down_read(&device_offload_lock); |
| 246 | netdev = rcu_dereference_protected(tls_ctx->netdev, |
| 247 | lockdep_is_held(&device_offload_lock)); |
| 248 | if (netdev) |
| 249 | err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, |
| 250 | rcd_sn, |
| 251 | TLS_OFFLOAD_CTX_DIR_TX); |
| 252 | up_read(&device_offload_lock); |
| 253 | if (err) |
| 254 | return; |
| 255 | |
| 256 | clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags); |
| 257 | } |
| 258 | |
| 259 | static void tls_append_frag(struct tls_record_info *record, |
| 260 | struct page_frag *pfrag, |
| 261 | int size) |
| 262 | { |
| 263 | skb_frag_t *frag; |
| 264 | |
| 265 | frag = &record->frags[record->num_frags - 1]; |
| 266 | if (skb_frag_page(frag) == pfrag->page && |
| 267 | skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) { |
| 268 | skb_frag_size_add(frag, size); |
| 269 | } else { |
| 270 | ++frag; |
| 271 | __skb_frag_set_page(frag, pfrag->page); |
| 272 | skb_frag_off_set(frag, pfrag->offset); |
| 273 | skb_frag_size_set(frag, size); |
| 274 | ++record->num_frags; |
| 275 | get_page(pfrag->page); |
| 276 | } |
| 277 | |
| 278 | pfrag->offset += size; |
| 279 | record->len += size; |
| 280 | } |
| 281 | |
| 282 | static int tls_push_record(struct sock *sk, |
| 283 | struct tls_context *ctx, |
| 284 | struct tls_offload_context_tx *offload_ctx, |
| 285 | struct tls_record_info *record, |
| 286 | int flags) |
| 287 | { |
| 288 | struct tls_prot_info *prot = &ctx->prot_info; |
| 289 | struct tcp_sock *tp = tcp_sk(sk); |
| 290 | skb_frag_t *frag; |
| 291 | int i; |
| 292 | |
| 293 | record->end_seq = tp->write_seq + record->len; |
| 294 | list_add_tail_rcu(&record->list, &offload_ctx->records_list); |
| 295 | offload_ctx->open_record = NULL; |
| 296 | |
| 297 | if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags)) |
| 298 | tls_device_resync_tx(sk, ctx, tp->write_seq); |
| 299 | |
| 300 | tls_advance_record_sn(sk, prot, &ctx->tx); |
| 301 | |
| 302 | for (i = 0; i < record->num_frags; i++) { |
| 303 | frag = &record->frags[i]; |
| 304 | sg_unmark_end(&offload_ctx->sg_tx_data[i]); |
| 305 | sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag), |
| 306 | skb_frag_size(frag), skb_frag_off(frag)); |
| 307 | sk_mem_charge(sk, skb_frag_size(frag)); |
| 308 | get_page(skb_frag_page(frag)); |
| 309 | } |
| 310 | sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]); |
| 311 | |
| 312 | /* all ready, send */ |
| 313 | return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); |
| 314 | } |
| 315 | |
| 316 | static int tls_device_record_close(struct sock *sk, |
| 317 | struct tls_context *ctx, |
| 318 | struct tls_record_info *record, |
| 319 | struct page_frag *pfrag, |
| 320 | unsigned char record_type) |
| 321 | { |
| 322 | struct tls_prot_info *prot = &ctx->prot_info; |
| 323 | int ret; |
| 324 | |
| 325 | /* append tag |
| 326 | * device will fill in the tag, we just need to append a placeholder |
| 327 | * use socket memory to improve coalescing (re-using a single buffer |
| 328 | * increases frag count) |
| 329 | * if we can't allocate memory now, steal some back from data |
| 330 | */ |
| 331 | if (likely(skb_page_frag_refill(prot->tag_size, pfrag, |
| 332 | sk->sk_allocation))) { |
| 333 | ret = 0; |
| 334 | tls_append_frag(record, pfrag, prot->tag_size); |
| 335 | } else { |
| 336 | ret = prot->tag_size; |
| 337 | if (record->len <= prot->overhead_size) |
| 338 | return -ENOMEM; |
| 339 | } |
| 340 | |
| 341 | /* fill prepend */ |
| 342 | tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), |
| 343 | record->len - prot->overhead_size, |
| 344 | record_type); |
| 345 | return ret; |
| 346 | } |
| 347 | |
| 348 | static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, |
| 349 | struct page_frag *pfrag, |
| 350 | size_t prepend_size) |
| 351 | { |
| 352 | struct tls_record_info *record; |
| 353 | skb_frag_t *frag; |
| 354 | |
| 355 | record = kmalloc(sizeof(*record), GFP_KERNEL); |
| 356 | if (!record) |
| 357 | return -ENOMEM; |
| 358 | |
| 359 | frag = &record->frags[0]; |
| 360 | __skb_frag_set_page(frag, pfrag->page); |
| 361 | skb_frag_off_set(frag, pfrag->offset); |
| 362 | skb_frag_size_set(frag, prepend_size); |
| 363 | |
| 364 | get_page(pfrag->page); |
| 365 | pfrag->offset += prepend_size; |
| 366 | |
| 367 | record->num_frags = 1; |
| 368 | record->len = prepend_size; |
| 369 | offload_ctx->open_record = record; |
| 370 | return 0; |
| 371 | } |
| 372 | |
| 373 | static int tls_do_allocation(struct sock *sk, |
| 374 | struct tls_offload_context_tx *offload_ctx, |
| 375 | struct page_frag *pfrag, |
| 376 | size_t prepend_size) |
| 377 | { |
| 378 | int ret; |
| 379 | |
| 380 | if (!offload_ctx->open_record) { |
| 381 | if (unlikely(!skb_page_frag_refill(prepend_size, pfrag, |
| 382 | sk->sk_allocation))) { |
| 383 | READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk); |
| 384 | sk_stream_moderate_sndbuf(sk); |
| 385 | return -ENOMEM; |
| 386 | } |
| 387 | |
| 388 | ret = tls_create_new_record(offload_ctx, pfrag, prepend_size); |
| 389 | if (ret) |
| 390 | return ret; |
| 391 | |
| 392 | if (pfrag->size > pfrag->offset) |
| 393 | return 0; |
| 394 | } |
| 395 | |
| 396 | if (!sk_page_frag_refill(sk, pfrag)) |
| 397 | return -ENOMEM; |
| 398 | |
| 399 | return 0; |
| 400 | } |
| 401 | |
| 402 | static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i) |
| 403 | { |
| 404 | size_t pre_copy, nocache; |
| 405 | |
| 406 | pre_copy = ~((unsigned long)addr - 1) & (SMP_CACHE_BYTES - 1); |
| 407 | if (pre_copy) { |
| 408 | pre_copy = min(pre_copy, bytes); |
| 409 | if (copy_from_iter(addr, pre_copy, i) != pre_copy) |
| 410 | return -EFAULT; |
| 411 | bytes -= pre_copy; |
| 412 | addr += pre_copy; |
| 413 | } |
| 414 | |
| 415 | nocache = round_down(bytes, SMP_CACHE_BYTES); |
| 416 | if (copy_from_iter_nocache(addr, nocache, i) != nocache) |
| 417 | return -EFAULT; |
| 418 | bytes -= nocache; |
| 419 | addr += nocache; |
| 420 | |
| 421 | if (bytes && copy_from_iter(addr, bytes, i) != bytes) |
| 422 | return -EFAULT; |
| 423 | |
| 424 | return 0; |
| 425 | } |
| 426 | |
| 427 | union tls_iter_offset { |
| 428 | struct iov_iter *msg_iter; |
| 429 | int offset; |
| 430 | }; |
| 431 | |
| 432 | static int tls_push_data(struct sock *sk, |
| 433 | union tls_iter_offset iter_offset, |
| 434 | size_t size, int flags, |
| 435 | unsigned char record_type, |
| 436 | struct page *zc_page) |
| 437 | { |
| 438 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 439 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
| 440 | struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); |
| 441 | struct tls_record_info *record; |
| 442 | int tls_push_record_flags; |
| 443 | struct page_frag *pfrag; |
| 444 | size_t orig_size = size; |
| 445 | u32 max_open_record_len; |
| 446 | bool more = false; |
| 447 | bool done = false; |
| 448 | int copy, rc = 0; |
| 449 | long timeo; |
| 450 | |
| 451 | if (flags & |
| 452 | ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST)) |
| 453 | return -EOPNOTSUPP; |
| 454 | |
| 455 | if (unlikely(sk->sk_err)) |
| 456 | return -sk->sk_err; |
| 457 | |
| 458 | flags |= MSG_SENDPAGE_DECRYPTED; |
| 459 | tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; |
| 460 | |
| 461 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); |
| 462 | if (tls_is_partially_sent_record(tls_ctx)) { |
| 463 | rc = tls_push_partial_record(sk, tls_ctx, flags); |
| 464 | if (rc < 0) |
| 465 | return rc; |
| 466 | } |
| 467 | |
| 468 | pfrag = sk_page_frag(sk); |
| 469 | |
| 470 | /* TLS_HEADER_SIZE is not counted as part of the TLS record, and |
| 471 | * we need to leave room for an authentication tag. |
| 472 | */ |
| 473 | max_open_record_len = TLS_MAX_PAYLOAD_SIZE + |
| 474 | prot->prepend_size; |
| 475 | do { |
| 476 | rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size); |
| 477 | if (unlikely(rc)) { |
| 478 | rc = sk_stream_wait_memory(sk, &timeo); |
| 479 | if (!rc) |
| 480 | continue; |
| 481 | |
| 482 | record = ctx->open_record; |
| 483 | if (!record) |
| 484 | break; |
| 485 | handle_error: |
| 486 | if (record_type != TLS_RECORD_TYPE_DATA) { |
| 487 | /* avoid sending partial |
| 488 | * record with type != |
| 489 | * application_data |
| 490 | */ |
| 491 | size = orig_size; |
| 492 | destroy_record(record); |
| 493 | ctx->open_record = NULL; |
| 494 | } else if (record->len > prot->prepend_size) { |
| 495 | goto last_record; |
| 496 | } |
| 497 | |
| 498 | break; |
| 499 | } |
| 500 | |
| 501 | record = ctx->open_record; |
| 502 | |
| 503 | copy = min_t(size_t, size, max_open_record_len - record->len); |
| 504 | if (copy && zc_page) { |
| 505 | struct page_frag zc_pfrag; |
| 506 | |
| 507 | zc_pfrag.page = zc_page; |
| 508 | zc_pfrag.offset = iter_offset.offset; |
| 509 | zc_pfrag.size = copy; |
| 510 | tls_append_frag(record, &zc_pfrag, copy); |
| 511 | } else if (copy) { |
| 512 | copy = min_t(size_t, copy, pfrag->size - pfrag->offset); |
| 513 | |
| 514 | rc = tls_device_copy_data(page_address(pfrag->page) + |
| 515 | pfrag->offset, copy, |
| 516 | iter_offset.msg_iter); |
| 517 | if (rc) |
| 518 | goto handle_error; |
| 519 | tls_append_frag(record, pfrag, copy); |
| 520 | } |
| 521 | |
| 522 | size -= copy; |
| 523 | if (!size) { |
| 524 | last_record: |
| 525 | tls_push_record_flags = flags; |
| 526 | if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) { |
| 527 | more = true; |
| 528 | break; |
| 529 | } |
| 530 | |
| 531 | done = true; |
| 532 | } |
| 533 | |
| 534 | if (done || record->len >= max_open_record_len || |
| 535 | (record->num_frags >= MAX_SKB_FRAGS - 1)) { |
| 536 | rc = tls_device_record_close(sk, tls_ctx, record, |
| 537 | pfrag, record_type); |
| 538 | if (rc) { |
| 539 | if (rc > 0) { |
| 540 | size += rc; |
| 541 | } else { |
| 542 | size = orig_size; |
| 543 | destroy_record(record); |
| 544 | ctx->open_record = NULL; |
| 545 | break; |
| 546 | } |
| 547 | } |
| 548 | |
| 549 | rc = tls_push_record(sk, |
| 550 | tls_ctx, |
| 551 | ctx, |
| 552 | record, |
| 553 | tls_push_record_flags); |
| 554 | if (rc < 0) |
| 555 | break; |
| 556 | } |
| 557 | } while (!done); |
| 558 | |
| 559 | tls_ctx->pending_open_record_frags = more; |
| 560 | |
| 561 | if (orig_size - size > 0) |
| 562 | rc = orig_size - size; |
| 563 | |
| 564 | return rc; |
| 565 | } |
| 566 | |
| 567 | int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
| 568 | { |
| 569 | unsigned char record_type = TLS_RECORD_TYPE_DATA; |
| 570 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 571 | union tls_iter_offset iter; |
| 572 | int rc; |
| 573 | |
| 574 | mutex_lock(&tls_ctx->tx_lock); |
| 575 | lock_sock(sk); |
| 576 | |
| 577 | if (unlikely(msg->msg_controllen)) { |
| 578 | rc = tls_process_cmsg(sk, msg, &record_type); |
| 579 | if (rc) |
| 580 | goto out; |
| 581 | } |
| 582 | |
| 583 | iter.msg_iter = &msg->msg_iter; |
| 584 | rc = tls_push_data(sk, iter, size, msg->msg_flags, record_type, NULL); |
| 585 | |
| 586 | out: |
| 587 | release_sock(sk); |
| 588 | mutex_unlock(&tls_ctx->tx_lock); |
| 589 | return rc; |
| 590 | } |
| 591 | |
| 592 | int tls_device_sendpage(struct sock *sk, struct page *page, |
| 593 | int offset, size_t size, int flags) |
| 594 | { |
| 595 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 596 | union tls_iter_offset iter_offset; |
| 597 | struct iov_iter msg_iter; |
| 598 | char *kaddr; |
| 599 | struct kvec iov; |
| 600 | int rc; |
| 601 | |
| 602 | if (flags & MSG_SENDPAGE_NOTLAST) |
| 603 | flags |= MSG_MORE; |
| 604 | |
| 605 | mutex_lock(&tls_ctx->tx_lock); |
| 606 | lock_sock(sk); |
| 607 | |
| 608 | if (flags & MSG_OOB) { |
| 609 | rc = -EOPNOTSUPP; |
| 610 | goto out; |
| 611 | } |
| 612 | |
| 613 | if (tls_ctx->zerocopy_sendfile) { |
| 614 | iter_offset.offset = offset; |
| 615 | rc = tls_push_data(sk, iter_offset, size, |
| 616 | flags, TLS_RECORD_TYPE_DATA, page); |
| 617 | goto out; |
| 618 | } |
| 619 | |
| 620 | kaddr = kmap(page); |
| 621 | iov.iov_base = kaddr + offset; |
| 622 | iov.iov_len = size; |
| 623 | iov_iter_kvec(&msg_iter, ITER_SOURCE, &iov, 1, size); |
| 624 | iter_offset.msg_iter = &msg_iter; |
| 625 | rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA, |
| 626 | NULL); |
| 627 | kunmap(page); |
| 628 | |
| 629 | out: |
| 630 | release_sock(sk); |
| 631 | mutex_unlock(&tls_ctx->tx_lock); |
| 632 | return rc; |
| 633 | } |
| 634 | |
| 635 | struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, |
| 636 | u32 seq, u64 *p_record_sn) |
| 637 | { |
| 638 | u64 record_sn = context->hint_record_sn; |
| 639 | struct tls_record_info *info, *last; |
| 640 | |
| 641 | info = context->retransmit_hint; |
| 642 | if (!info || |
| 643 | before(seq, info->end_seq - info->len)) { |
| 644 | /* if retransmit_hint is irrelevant start |
| 645 | * from the beginning of the list |
| 646 | */ |
| 647 | info = list_first_entry_or_null(&context->records_list, |
| 648 | struct tls_record_info, list); |
| 649 | if (!info) |
| 650 | return NULL; |
| 651 | /* send the start_marker record if seq number is before the |
| 652 | * tls offload start marker sequence number. This record is |
| 653 | * required to handle TCP packets which are before TLS offload |
| 654 | * started. |
| 655 | * And if it's not start marker, look if this seq number |
| 656 | * belongs to the list. |
| 657 | */ |
| 658 | if (likely(!tls_record_is_start_marker(info))) { |
| 659 | /* we have the first record, get the last record to see |
| 660 | * if this seq number belongs to the list. |
| 661 | */ |
| 662 | last = list_last_entry(&context->records_list, |
| 663 | struct tls_record_info, list); |
| 664 | |
| 665 | if (!between(seq, tls_record_start_seq(info), |
| 666 | last->end_seq)) |
| 667 | return NULL; |
| 668 | } |
| 669 | record_sn = context->unacked_record_sn; |
| 670 | } |
| 671 | |
| 672 | /* We just need the _rcu for the READ_ONCE() */ |
| 673 | rcu_read_lock(); |
| 674 | list_for_each_entry_from_rcu(info, &context->records_list, list) { |
| 675 | if (before(seq, info->end_seq)) { |
| 676 | if (!context->retransmit_hint || |
| 677 | after(info->end_seq, |
| 678 | context->retransmit_hint->end_seq)) { |
| 679 | context->hint_record_sn = record_sn; |
| 680 | context->retransmit_hint = info; |
| 681 | } |
| 682 | *p_record_sn = record_sn; |
| 683 | goto exit_rcu_unlock; |
| 684 | } |
| 685 | record_sn++; |
| 686 | } |
| 687 | info = NULL; |
| 688 | |
| 689 | exit_rcu_unlock: |
| 690 | rcu_read_unlock(); |
| 691 | return info; |
| 692 | } |
| 693 | EXPORT_SYMBOL(tls_get_record); |
| 694 | |
| 695 | static int tls_device_push_pending_record(struct sock *sk, int flags) |
| 696 | { |
| 697 | union tls_iter_offset iter; |
| 698 | struct iov_iter msg_iter; |
| 699 | |
| 700 | iov_iter_kvec(&msg_iter, ITER_SOURCE, NULL, 0, 0); |
| 701 | iter.msg_iter = &msg_iter; |
| 702 | return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL); |
| 703 | } |
| 704 | |
| 705 | void tls_device_write_space(struct sock *sk, struct tls_context *ctx) |
| 706 | { |
| 707 | if (tls_is_partially_sent_record(ctx)) { |
| 708 | gfp_t sk_allocation = sk->sk_allocation; |
| 709 | |
| 710 | WARN_ON_ONCE(sk->sk_write_pending); |
| 711 | |
| 712 | sk->sk_allocation = GFP_ATOMIC; |
| 713 | tls_push_partial_record(sk, ctx, |
| 714 | MSG_DONTWAIT | MSG_NOSIGNAL | |
| 715 | MSG_SENDPAGE_DECRYPTED); |
| 716 | sk->sk_allocation = sk_allocation; |
| 717 | } |
| 718 | } |
| 719 | |
| 720 | static void tls_device_resync_rx(struct tls_context *tls_ctx, |
| 721 | struct sock *sk, u32 seq, u8 *rcd_sn) |
| 722 | { |
| 723 | struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); |
| 724 | struct net_device *netdev; |
| 725 | |
| 726 | trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type); |
| 727 | rcu_read_lock(); |
| 728 | netdev = rcu_dereference(tls_ctx->netdev); |
| 729 | if (netdev) |
| 730 | netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, |
| 731 | TLS_OFFLOAD_CTX_DIR_RX); |
| 732 | rcu_read_unlock(); |
| 733 | TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC); |
| 734 | } |
| 735 | |
| 736 | static bool |
| 737 | tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, |
| 738 | s64 resync_req, u32 *seq, u16 *rcd_delta) |
| 739 | { |
| 740 | u32 is_async = resync_req & RESYNC_REQ_ASYNC; |
| 741 | u32 req_seq = resync_req >> 32; |
| 742 | u32 req_end = req_seq + ((resync_req >> 16) & 0xffff); |
| 743 | u16 i; |
| 744 | |
| 745 | *rcd_delta = 0; |
| 746 | |
| 747 | if (is_async) { |
| 748 | /* shouldn't get to wraparound: |
| 749 | * too long in async stage, something bad happened |
| 750 | */ |
| 751 | if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) |
| 752 | return false; |
| 753 | |
| 754 | /* asynchronous stage: log all headers seq such that |
| 755 | * req_seq <= seq <= end_seq, and wait for real resync request |
| 756 | */ |
| 757 | if (before(*seq, req_seq)) |
| 758 | return false; |
| 759 | if (!after(*seq, req_end) && |
| 760 | resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX) |
| 761 | resync_async->log[resync_async->loglen++] = *seq; |
| 762 | |
| 763 | resync_async->rcd_delta++; |
| 764 | |
| 765 | return false; |
| 766 | } |
| 767 | |
| 768 | /* synchronous stage: check against the logged entries and |
| 769 | * proceed to check the next entries if no match was found |
| 770 | */ |
| 771 | for (i = 0; i < resync_async->loglen; i++) |
| 772 | if (req_seq == resync_async->log[i] && |
| 773 | atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) { |
| 774 | *rcd_delta = resync_async->rcd_delta - i; |
| 775 | *seq = req_seq; |
| 776 | resync_async->loglen = 0; |
| 777 | resync_async->rcd_delta = 0; |
| 778 | return true; |
| 779 | } |
| 780 | |
| 781 | resync_async->loglen = 0; |
| 782 | resync_async->rcd_delta = 0; |
| 783 | |
| 784 | if (req_seq == *seq && |
| 785 | atomic64_try_cmpxchg(&resync_async->req, |
| 786 | &resync_req, 0)) |
| 787 | return true; |
| 788 | |
| 789 | return false; |
| 790 | } |
| 791 | |
| 792 | void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) |
| 793 | { |
| 794 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 795 | struct tls_offload_context_rx *rx_ctx; |
| 796 | u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; |
| 797 | u32 sock_data, is_req_pending; |
| 798 | struct tls_prot_info *prot; |
| 799 | s64 resync_req; |
| 800 | u16 rcd_delta; |
| 801 | u32 req_seq; |
| 802 | |
| 803 | if (tls_ctx->rx_conf != TLS_HW) |
| 804 | return; |
| 805 | if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) |
| 806 | return; |
| 807 | |
| 808 | prot = &tls_ctx->prot_info; |
| 809 | rx_ctx = tls_offload_ctx_rx(tls_ctx); |
| 810 | memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size); |
| 811 | |
| 812 | switch (rx_ctx->resync_type) { |
| 813 | case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ: |
| 814 | resync_req = atomic64_read(&rx_ctx->resync_req); |
| 815 | req_seq = resync_req >> 32; |
| 816 | seq += TLS_HEADER_SIZE - 1; |
| 817 | is_req_pending = resync_req; |
| 818 | |
| 819 | if (likely(!is_req_pending) || req_seq != seq || |
| 820 | !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) |
| 821 | return; |
| 822 | break; |
| 823 | case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT: |
| 824 | if (likely(!rx_ctx->resync_nh_do_now)) |
| 825 | return; |
| 826 | |
| 827 | /* head of next rec is already in, note that the sock_inq will |
| 828 | * include the currently parsed message when called from parser |
| 829 | */ |
| 830 | sock_data = tcp_inq(sk); |
| 831 | if (sock_data > rcd_len) { |
| 832 | trace_tls_device_rx_resync_nh_delay(sk, sock_data, |
| 833 | rcd_len); |
| 834 | return; |
| 835 | } |
| 836 | |
| 837 | rx_ctx->resync_nh_do_now = 0; |
| 838 | seq += rcd_len; |
| 839 | tls_bigint_increment(rcd_sn, prot->rec_seq_size); |
| 840 | break; |
| 841 | case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC: |
| 842 | resync_req = atomic64_read(&rx_ctx->resync_async->req); |
| 843 | is_req_pending = resync_req; |
| 844 | if (likely(!is_req_pending)) |
| 845 | return; |
| 846 | |
| 847 | if (!tls_device_rx_resync_async(rx_ctx->resync_async, |
| 848 | resync_req, &seq, &rcd_delta)) |
| 849 | return; |
| 850 | tls_bigint_subtract(rcd_sn, rcd_delta); |
| 851 | break; |
| 852 | } |
| 853 | |
| 854 | tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn); |
| 855 | } |
| 856 | |
| 857 | static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx, |
| 858 | struct tls_offload_context_rx *ctx, |
| 859 | struct sock *sk, struct sk_buff *skb) |
| 860 | { |
| 861 | struct strp_msg *rxm; |
| 862 | |
| 863 | /* device will request resyncs by itself based on stream scan */ |
| 864 | if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT) |
| 865 | return; |
| 866 | /* already scheduled */ |
| 867 | if (ctx->resync_nh_do_now) |
| 868 | return; |
| 869 | /* seen decrypted fragments since last fully-failed record */ |
| 870 | if (ctx->resync_nh_reset) { |
| 871 | ctx->resync_nh_reset = 0; |
| 872 | ctx->resync_nh.decrypted_failed = 1; |
| 873 | ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL; |
| 874 | return; |
| 875 | } |
| 876 | |
| 877 | if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt) |
| 878 | return; |
| 879 | |
| 880 | /* doing resync, bump the next target in case it fails */ |
| 881 | if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL) |
| 882 | ctx->resync_nh.decrypted_tgt *= 2; |
| 883 | else |
| 884 | ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL; |
| 885 | |
| 886 | rxm = strp_msg(skb); |
| 887 | |
| 888 | /* head of next rec is already in, parser will sync for us */ |
| 889 | if (tcp_inq(sk) > rxm->full_len) { |
| 890 | trace_tls_device_rx_resync_nh_schedule(sk); |
| 891 | ctx->resync_nh_do_now = 1; |
| 892 | } else { |
| 893 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
| 894 | u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; |
| 895 | |
| 896 | memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size); |
| 897 | tls_bigint_increment(rcd_sn, prot->rec_seq_size); |
| 898 | |
| 899 | tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq, |
| 900 | rcd_sn); |
| 901 | } |
| 902 | } |
| 903 | |
| 904 | static int |
| 905 | tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx) |
| 906 | { |
| 907 | struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx); |
| 908 | const struct tls_cipher_size_desc *cipher_sz; |
| 909 | int err, offset, copy, data_len, pos; |
| 910 | struct sk_buff *skb, *skb_iter; |
| 911 | struct scatterlist sg[1]; |
| 912 | struct strp_msg *rxm; |
| 913 | char *orig_buf, *buf; |
| 914 | |
| 915 | switch (tls_ctx->crypto_recv.info.cipher_type) { |
| 916 | case TLS_CIPHER_AES_GCM_128: |
| 917 | case TLS_CIPHER_AES_GCM_256: |
| 918 | break; |
| 919 | default: |
| 920 | return -EINVAL; |
| 921 | } |
| 922 | cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_recv.info.cipher_type]; |
| 923 | |
| 924 | rxm = strp_msg(tls_strp_msg(sw_ctx)); |
| 925 | orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv, |
| 926 | sk->sk_allocation); |
| 927 | if (!orig_buf) |
| 928 | return -ENOMEM; |
| 929 | buf = orig_buf; |
| 930 | |
| 931 | err = tls_strp_msg_cow(sw_ctx); |
| 932 | if (unlikely(err)) |
| 933 | goto free_buf; |
| 934 | |
| 935 | skb = tls_strp_msg(sw_ctx); |
| 936 | rxm = strp_msg(skb); |
| 937 | offset = rxm->offset; |
| 938 | |
| 939 | sg_init_table(sg, 1); |
| 940 | sg_set_buf(&sg[0], buf, |
| 941 | rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv); |
| 942 | err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_sz->iv); |
| 943 | if (err) |
| 944 | goto free_buf; |
| 945 | |
| 946 | /* We are interested only in the decrypted data not the auth */ |
| 947 | err = decrypt_skb(sk, sg); |
| 948 | if (err != -EBADMSG) |
| 949 | goto free_buf; |
| 950 | else |
| 951 | err = 0; |
| 952 | |
| 953 | data_len = rxm->full_len - cipher_sz->tag; |
| 954 | |
| 955 | if (skb_pagelen(skb) > offset) { |
| 956 | copy = min_t(int, skb_pagelen(skb) - offset, data_len); |
| 957 | |
| 958 | if (skb->decrypted) { |
| 959 | err = skb_store_bits(skb, offset, buf, copy); |
| 960 | if (err) |
| 961 | goto free_buf; |
| 962 | } |
| 963 | |
| 964 | offset += copy; |
| 965 | buf += copy; |
| 966 | } |
| 967 | |
| 968 | pos = skb_pagelen(skb); |
| 969 | skb_walk_frags(skb, skb_iter) { |
| 970 | int frag_pos; |
| 971 | |
| 972 | /* Practically all frags must belong to msg if reencrypt |
| 973 | * is needed with current strparser and coalescing logic, |
| 974 | * but strparser may "get optimized", so let's be safe. |
| 975 | */ |
| 976 | if (pos + skb_iter->len <= offset) |
| 977 | goto done_with_frag; |
| 978 | if (pos >= data_len + rxm->offset) |
| 979 | break; |
| 980 | |
| 981 | frag_pos = offset - pos; |
| 982 | copy = min_t(int, skb_iter->len - frag_pos, |
| 983 | data_len + rxm->offset - offset); |
| 984 | |
| 985 | if (skb_iter->decrypted) { |
| 986 | err = skb_store_bits(skb_iter, frag_pos, buf, copy); |
| 987 | if (err) |
| 988 | goto free_buf; |
| 989 | } |
| 990 | |
| 991 | offset += copy; |
| 992 | buf += copy; |
| 993 | done_with_frag: |
| 994 | pos += skb_iter->len; |
| 995 | } |
| 996 | |
| 997 | free_buf: |
| 998 | kfree(orig_buf); |
| 999 | return err; |
| 1000 | } |
| 1001 | |
| 1002 | int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx) |
| 1003 | { |
| 1004 | struct tls_offload_context_rx *ctx = tls_offload_ctx_rx(tls_ctx); |
| 1005 | struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx); |
| 1006 | struct sk_buff *skb = tls_strp_msg(sw_ctx); |
| 1007 | struct strp_msg *rxm = strp_msg(skb); |
| 1008 | int is_decrypted = skb->decrypted; |
| 1009 | int is_encrypted = !is_decrypted; |
| 1010 | struct sk_buff *skb_iter; |
| 1011 | int left; |
| 1012 | |
| 1013 | left = rxm->full_len - skb->len; |
| 1014 | /* Check if all the data is decrypted already */ |
| 1015 | skb_iter = skb_shinfo(skb)->frag_list; |
| 1016 | while (skb_iter && left > 0) { |
| 1017 | is_decrypted &= skb_iter->decrypted; |
| 1018 | is_encrypted &= !skb_iter->decrypted; |
| 1019 | |
| 1020 | left -= skb_iter->len; |
| 1021 | skb_iter = skb_iter->next; |
| 1022 | } |
| 1023 | |
| 1024 | trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len, |
| 1025 | tls_ctx->rx.rec_seq, rxm->full_len, |
| 1026 | is_encrypted, is_decrypted); |
| 1027 | |
| 1028 | if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) { |
| 1029 | if (likely(is_encrypted || is_decrypted)) |
| 1030 | return is_decrypted; |
| 1031 | |
| 1032 | /* After tls_device_down disables the offload, the next SKB will |
| 1033 | * likely have initial fragments decrypted, and final ones not |
| 1034 | * decrypted. We need to reencrypt that single SKB. |
| 1035 | */ |
| 1036 | return tls_device_reencrypt(sk, tls_ctx); |
| 1037 | } |
| 1038 | |
| 1039 | /* Return immediately if the record is either entirely plaintext or |
| 1040 | * entirely ciphertext. Otherwise handle reencrypt partially decrypted |
| 1041 | * record. |
| 1042 | */ |
| 1043 | if (is_decrypted) { |
| 1044 | ctx->resync_nh_reset = 1; |
| 1045 | return is_decrypted; |
| 1046 | } |
| 1047 | if (is_encrypted) { |
| 1048 | tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb); |
| 1049 | return 0; |
| 1050 | } |
| 1051 | |
| 1052 | ctx->resync_nh_reset = 1; |
| 1053 | return tls_device_reencrypt(sk, tls_ctx); |
| 1054 | } |
| 1055 | |
| 1056 | static void tls_device_attach(struct tls_context *ctx, struct sock *sk, |
| 1057 | struct net_device *netdev) |
| 1058 | { |
| 1059 | if (sk->sk_destruct != tls_device_sk_destruct) { |
| 1060 | refcount_set(&ctx->refcount, 1); |
| 1061 | dev_hold(netdev); |
| 1062 | RCU_INIT_POINTER(ctx->netdev, netdev); |
| 1063 | spin_lock_irq(&tls_device_lock); |
| 1064 | list_add_tail(&ctx->list, &tls_device_list); |
| 1065 | spin_unlock_irq(&tls_device_lock); |
| 1066 | |
| 1067 | ctx->sk_destruct = sk->sk_destruct; |
| 1068 | smp_store_release(&sk->sk_destruct, tls_device_sk_destruct); |
| 1069 | } |
| 1070 | } |
| 1071 | |
| 1072 | int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) |
| 1073 | { |
| 1074 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 1075 | struct tls_prot_info *prot = &tls_ctx->prot_info; |
| 1076 | const struct tls_cipher_size_desc *cipher_sz; |
| 1077 | struct tls_record_info *start_marker_record; |
| 1078 | struct tls_offload_context_tx *offload_ctx; |
| 1079 | struct tls_crypto_info *crypto_info; |
| 1080 | struct net_device *netdev; |
| 1081 | char *iv, *rec_seq; |
| 1082 | struct sk_buff *skb; |
| 1083 | __be64 rcd_sn; |
| 1084 | int rc; |
| 1085 | |
| 1086 | if (!ctx) |
| 1087 | return -EINVAL; |
| 1088 | |
| 1089 | if (ctx->priv_ctx_tx) |
| 1090 | return -EEXIST; |
| 1091 | |
| 1092 | netdev = get_netdev_for_sock(sk); |
| 1093 | if (!netdev) { |
| 1094 | pr_err_ratelimited("%s: netdev not found\n", __func__); |
| 1095 | return -EINVAL; |
| 1096 | } |
| 1097 | |
| 1098 | if (!(netdev->features & NETIF_F_HW_TLS_TX)) { |
| 1099 | rc = -EOPNOTSUPP; |
| 1100 | goto release_netdev; |
| 1101 | } |
| 1102 | |
| 1103 | crypto_info = &ctx->crypto_send.info; |
| 1104 | if (crypto_info->version != TLS_1_2_VERSION) { |
| 1105 | rc = -EOPNOTSUPP; |
| 1106 | goto release_netdev; |
| 1107 | } |
| 1108 | |
| 1109 | switch (crypto_info->cipher_type) { |
| 1110 | case TLS_CIPHER_AES_GCM_128: |
| 1111 | iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; |
| 1112 | rec_seq = |
| 1113 | ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; |
| 1114 | break; |
| 1115 | case TLS_CIPHER_AES_GCM_256: |
| 1116 | iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv; |
| 1117 | rec_seq = |
| 1118 | ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq; |
| 1119 | break; |
| 1120 | default: |
| 1121 | rc = -EINVAL; |
| 1122 | goto release_netdev; |
| 1123 | } |
| 1124 | cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type]; |
| 1125 | |
| 1126 | /* Sanity-check the rec_seq_size for stack allocations */ |
| 1127 | if (cipher_sz->rec_seq > TLS_MAX_REC_SEQ_SIZE) { |
| 1128 | rc = -EINVAL; |
| 1129 | goto release_netdev; |
| 1130 | } |
| 1131 | |
| 1132 | prot->version = crypto_info->version; |
| 1133 | prot->cipher_type = crypto_info->cipher_type; |
| 1134 | prot->prepend_size = TLS_HEADER_SIZE + cipher_sz->iv; |
| 1135 | prot->tag_size = cipher_sz->tag; |
| 1136 | prot->overhead_size = prot->prepend_size + prot->tag_size; |
| 1137 | prot->iv_size = cipher_sz->iv; |
| 1138 | prot->salt_size = cipher_sz->salt; |
| 1139 | ctx->tx.iv = kmalloc(cipher_sz->iv + cipher_sz->salt, GFP_KERNEL); |
| 1140 | if (!ctx->tx.iv) { |
| 1141 | rc = -ENOMEM; |
| 1142 | goto release_netdev; |
| 1143 | } |
| 1144 | |
| 1145 | memcpy(ctx->tx.iv + cipher_sz->salt, iv, cipher_sz->iv); |
| 1146 | |
| 1147 | prot->rec_seq_size = cipher_sz->rec_seq; |
| 1148 | ctx->tx.rec_seq = kmemdup(rec_seq, cipher_sz->rec_seq, GFP_KERNEL); |
| 1149 | if (!ctx->tx.rec_seq) { |
| 1150 | rc = -ENOMEM; |
| 1151 | goto free_iv; |
| 1152 | } |
| 1153 | |
| 1154 | start_marker_record = kmalloc(sizeof(*start_marker_record), GFP_KERNEL); |
| 1155 | if (!start_marker_record) { |
| 1156 | rc = -ENOMEM; |
| 1157 | goto free_rec_seq; |
| 1158 | } |
| 1159 | |
| 1160 | offload_ctx = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX, GFP_KERNEL); |
| 1161 | if (!offload_ctx) { |
| 1162 | rc = -ENOMEM; |
| 1163 | goto free_marker_record; |
| 1164 | } |
| 1165 | |
| 1166 | rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info); |
| 1167 | if (rc) |
| 1168 | goto free_offload_ctx; |
| 1169 | |
| 1170 | /* start at rec_seq - 1 to account for the start marker record */ |
| 1171 | memcpy(&rcd_sn, ctx->tx.rec_seq, sizeof(rcd_sn)); |
| 1172 | offload_ctx->unacked_record_sn = be64_to_cpu(rcd_sn) - 1; |
| 1173 | |
| 1174 | start_marker_record->end_seq = tcp_sk(sk)->write_seq; |
| 1175 | start_marker_record->len = 0; |
| 1176 | start_marker_record->num_frags = 0; |
| 1177 | |
| 1178 | INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task); |
| 1179 | offload_ctx->ctx = ctx; |
| 1180 | |
| 1181 | INIT_LIST_HEAD(&offload_ctx->records_list); |
| 1182 | list_add_tail(&start_marker_record->list, &offload_ctx->records_list); |
| 1183 | spin_lock_init(&offload_ctx->lock); |
| 1184 | sg_init_table(offload_ctx->sg_tx_data, |
| 1185 | ARRAY_SIZE(offload_ctx->sg_tx_data)); |
| 1186 | |
| 1187 | clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked); |
| 1188 | ctx->push_pending_record = tls_device_push_pending_record; |
| 1189 | |
| 1190 | /* TLS offload is greatly simplified if we don't send |
| 1191 | * SKBs where only part of the payload needs to be encrypted. |
| 1192 | * So mark the last skb in the write queue as end of record. |
| 1193 | */ |
| 1194 | skb = tcp_write_queue_tail(sk); |
| 1195 | if (skb) |
| 1196 | TCP_SKB_CB(skb)->eor = 1; |
| 1197 | |
| 1198 | /* Avoid offloading if the device is down |
| 1199 | * We don't want to offload new flows after |
| 1200 | * the NETDEV_DOWN event |
| 1201 | * |
| 1202 | * device_offload_lock is taken in tls_devices's NETDEV_DOWN |
| 1203 | * handler thus protecting from the device going down before |
| 1204 | * ctx was added to tls_device_list. |
| 1205 | */ |
| 1206 | down_read(&device_offload_lock); |
| 1207 | if (!(netdev->flags & IFF_UP)) { |
| 1208 | rc = -EINVAL; |
| 1209 | goto release_lock; |
| 1210 | } |
| 1211 | |
| 1212 | ctx->priv_ctx_tx = offload_ctx; |
| 1213 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, |
| 1214 | &ctx->crypto_send.info, |
| 1215 | tcp_sk(sk)->write_seq); |
| 1216 | trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX, |
| 1217 | tcp_sk(sk)->write_seq, rec_seq, rc); |
| 1218 | if (rc) |
| 1219 | goto release_lock; |
| 1220 | |
| 1221 | tls_device_attach(ctx, sk, netdev); |
| 1222 | up_read(&device_offload_lock); |
| 1223 | |
| 1224 | /* following this assignment tls_is_sk_tx_device_offloaded |
| 1225 | * will return true and the context might be accessed |
| 1226 | * by the netdev's xmit function. |
| 1227 | */ |
| 1228 | smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb); |
| 1229 | dev_put(netdev); |
| 1230 | |
| 1231 | return 0; |
| 1232 | |
| 1233 | release_lock: |
| 1234 | up_read(&device_offload_lock); |
| 1235 | clean_acked_data_disable(inet_csk(sk)); |
| 1236 | crypto_free_aead(offload_ctx->aead_send); |
| 1237 | free_offload_ctx: |
| 1238 | kfree(offload_ctx); |
| 1239 | ctx->priv_ctx_tx = NULL; |
| 1240 | free_marker_record: |
| 1241 | kfree(start_marker_record); |
| 1242 | free_rec_seq: |
| 1243 | kfree(ctx->tx.rec_seq); |
| 1244 | free_iv: |
| 1245 | kfree(ctx->tx.iv); |
| 1246 | release_netdev: |
| 1247 | dev_put(netdev); |
| 1248 | return rc; |
| 1249 | } |
| 1250 | |
| 1251 | int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) |
| 1252 | { |
| 1253 | struct tls12_crypto_info_aes_gcm_128 *info; |
| 1254 | struct tls_offload_context_rx *context; |
| 1255 | struct net_device *netdev; |
| 1256 | int rc = 0; |
| 1257 | |
| 1258 | if (ctx->crypto_recv.info.version != TLS_1_2_VERSION) |
| 1259 | return -EOPNOTSUPP; |
| 1260 | |
| 1261 | netdev = get_netdev_for_sock(sk); |
| 1262 | if (!netdev) { |
| 1263 | pr_err_ratelimited("%s: netdev not found\n", __func__); |
| 1264 | return -EINVAL; |
| 1265 | } |
| 1266 | |
| 1267 | if (!(netdev->features & NETIF_F_HW_TLS_RX)) { |
| 1268 | rc = -EOPNOTSUPP; |
| 1269 | goto release_netdev; |
| 1270 | } |
| 1271 | |
| 1272 | /* Avoid offloading if the device is down |
| 1273 | * We don't want to offload new flows after |
| 1274 | * the NETDEV_DOWN event |
| 1275 | * |
| 1276 | * device_offload_lock is taken in tls_devices's NETDEV_DOWN |
| 1277 | * handler thus protecting from the device going down before |
| 1278 | * ctx was added to tls_device_list. |
| 1279 | */ |
| 1280 | down_read(&device_offload_lock); |
| 1281 | if (!(netdev->flags & IFF_UP)) { |
| 1282 | rc = -EINVAL; |
| 1283 | goto release_lock; |
| 1284 | } |
| 1285 | |
| 1286 | context = kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX, GFP_KERNEL); |
| 1287 | if (!context) { |
| 1288 | rc = -ENOMEM; |
| 1289 | goto release_lock; |
| 1290 | } |
| 1291 | context->resync_nh_reset = 1; |
| 1292 | |
| 1293 | ctx->priv_ctx_rx = context; |
| 1294 | rc = tls_set_sw_offload(sk, ctx, 0); |
| 1295 | if (rc) |
| 1296 | goto release_ctx; |
| 1297 | |
| 1298 | rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, |
| 1299 | &ctx->crypto_recv.info, |
| 1300 | tcp_sk(sk)->copied_seq); |
| 1301 | info = (void *)&ctx->crypto_recv.info; |
| 1302 | trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_RX, |
| 1303 | tcp_sk(sk)->copied_seq, info->rec_seq, rc); |
| 1304 | if (rc) |
| 1305 | goto free_sw_resources; |
| 1306 | |
| 1307 | tls_device_attach(ctx, sk, netdev); |
| 1308 | up_read(&device_offload_lock); |
| 1309 | |
| 1310 | dev_put(netdev); |
| 1311 | |
| 1312 | return 0; |
| 1313 | |
| 1314 | free_sw_resources: |
| 1315 | up_read(&device_offload_lock); |
| 1316 | tls_sw_free_resources_rx(sk); |
| 1317 | down_read(&device_offload_lock); |
| 1318 | release_ctx: |
| 1319 | ctx->priv_ctx_rx = NULL; |
| 1320 | release_lock: |
| 1321 | up_read(&device_offload_lock); |
| 1322 | release_netdev: |
| 1323 | dev_put(netdev); |
| 1324 | return rc; |
| 1325 | } |
| 1326 | |
| 1327 | void tls_device_offload_cleanup_rx(struct sock *sk) |
| 1328 | { |
| 1329 | struct tls_context *tls_ctx = tls_get_ctx(sk); |
| 1330 | struct net_device *netdev; |
| 1331 | |
| 1332 | down_read(&device_offload_lock); |
| 1333 | netdev = rcu_dereference_protected(tls_ctx->netdev, |
| 1334 | lockdep_is_held(&device_offload_lock)); |
| 1335 | if (!netdev) |
| 1336 | goto out; |
| 1337 | |
| 1338 | netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx, |
| 1339 | TLS_OFFLOAD_CTX_DIR_RX); |
| 1340 | |
| 1341 | if (tls_ctx->tx_conf != TLS_HW) { |
| 1342 | dev_put(netdev); |
| 1343 | rcu_assign_pointer(tls_ctx->netdev, NULL); |
| 1344 | } else { |
| 1345 | set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags); |
| 1346 | } |
| 1347 | out: |
| 1348 | up_read(&device_offload_lock); |
| 1349 | tls_sw_release_resources_rx(sk); |
| 1350 | } |
| 1351 | |
| 1352 | static int tls_device_down(struct net_device *netdev) |
| 1353 | { |
| 1354 | struct tls_context *ctx, *tmp; |
| 1355 | unsigned long flags; |
| 1356 | LIST_HEAD(list); |
| 1357 | |
| 1358 | /* Request a write lock to block new offload attempts */ |
| 1359 | down_write(&device_offload_lock); |
| 1360 | |
| 1361 | spin_lock_irqsave(&tls_device_lock, flags); |
| 1362 | list_for_each_entry_safe(ctx, tmp, &tls_device_list, list) { |
| 1363 | struct net_device *ctx_netdev = |
| 1364 | rcu_dereference_protected(ctx->netdev, |
| 1365 | lockdep_is_held(&device_offload_lock)); |
| 1366 | |
| 1367 | if (ctx_netdev != netdev || |
| 1368 | !refcount_inc_not_zero(&ctx->refcount)) |
| 1369 | continue; |
| 1370 | |
| 1371 | list_move(&ctx->list, &list); |
| 1372 | } |
| 1373 | spin_unlock_irqrestore(&tls_device_lock, flags); |
| 1374 | |
| 1375 | list_for_each_entry_safe(ctx, tmp, &list, list) { |
| 1376 | /* Stop offloaded TX and switch to the fallback. |
| 1377 | * tls_is_sk_tx_device_offloaded will return false. |
| 1378 | */ |
| 1379 | WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw); |
| 1380 | |
| 1381 | /* Stop the RX and TX resync. |
| 1382 | * tls_dev_resync must not be called after tls_dev_del. |
| 1383 | */ |
| 1384 | rcu_assign_pointer(ctx->netdev, NULL); |
| 1385 | |
| 1386 | /* Start skipping the RX resync logic completely. */ |
| 1387 | set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags); |
| 1388 | |
| 1389 | /* Sync with inflight packets. After this point: |
| 1390 | * TX: no non-encrypted packets will be passed to the driver. |
| 1391 | * RX: resync requests from the driver will be ignored. |
| 1392 | */ |
| 1393 | synchronize_net(); |
| 1394 | |
| 1395 | /* Release the offload context on the driver side. */ |
| 1396 | if (ctx->tx_conf == TLS_HW) |
| 1397 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
| 1398 | TLS_OFFLOAD_CTX_DIR_TX); |
| 1399 | if (ctx->rx_conf == TLS_HW && |
| 1400 | !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags)) |
| 1401 | netdev->tlsdev_ops->tls_dev_del(netdev, ctx, |
| 1402 | TLS_OFFLOAD_CTX_DIR_RX); |
| 1403 | |
| 1404 | dev_put(netdev); |
| 1405 | |
| 1406 | /* Move the context to a separate list for two reasons: |
| 1407 | * 1. When the context is deallocated, list_del is called. |
| 1408 | * 2. It's no longer an offloaded context, so we don't want to |
| 1409 | * run offload-specific code on this context. |
| 1410 | */ |
| 1411 | spin_lock_irqsave(&tls_device_lock, flags); |
| 1412 | list_move_tail(&ctx->list, &tls_device_down_list); |
| 1413 | spin_unlock_irqrestore(&tls_device_lock, flags); |
| 1414 | |
| 1415 | /* Device contexts for RX and TX will be freed in on sk_destruct |
| 1416 | * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW. |
| 1417 | * Now release the ref taken above. |
| 1418 | */ |
| 1419 | if (refcount_dec_and_test(&ctx->refcount)) { |
| 1420 | /* sk_destruct ran after tls_device_down took a ref, and |
| 1421 | * it returned early. Complete the destruction here. |
| 1422 | */ |
| 1423 | list_del(&ctx->list); |
| 1424 | tls_device_free_ctx(ctx); |
| 1425 | } |
| 1426 | } |
| 1427 | |
| 1428 | up_write(&device_offload_lock); |
| 1429 | |
| 1430 | flush_workqueue(destruct_wq); |
| 1431 | |
| 1432 | return NOTIFY_DONE; |
| 1433 | } |
| 1434 | |
| 1435 | static int tls_dev_event(struct notifier_block *this, unsigned long event, |
| 1436 | void *ptr) |
| 1437 | { |
| 1438 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
| 1439 | |
| 1440 | if (!dev->tlsdev_ops && |
| 1441 | !(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX))) |
| 1442 | return NOTIFY_DONE; |
| 1443 | |
| 1444 | switch (event) { |
| 1445 | case NETDEV_REGISTER: |
| 1446 | case NETDEV_FEAT_CHANGE: |
| 1447 | if (netif_is_bond_master(dev)) |
| 1448 | return NOTIFY_DONE; |
| 1449 | if ((dev->features & NETIF_F_HW_TLS_RX) && |
| 1450 | !dev->tlsdev_ops->tls_dev_resync) |
| 1451 | return NOTIFY_BAD; |
| 1452 | |
| 1453 | if (dev->tlsdev_ops && |
| 1454 | dev->tlsdev_ops->tls_dev_add && |
| 1455 | dev->tlsdev_ops->tls_dev_del) |
| 1456 | return NOTIFY_DONE; |
| 1457 | else |
| 1458 | return NOTIFY_BAD; |
| 1459 | case NETDEV_DOWN: |
| 1460 | return tls_device_down(dev); |
| 1461 | } |
| 1462 | return NOTIFY_DONE; |
| 1463 | } |
| 1464 | |
| 1465 | static struct notifier_block tls_dev_notifier = { |
| 1466 | .notifier_call = tls_dev_event, |
| 1467 | }; |
| 1468 | |
| 1469 | int __init tls_device_init(void) |
| 1470 | { |
| 1471 | int err; |
| 1472 | |
| 1473 | destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0); |
| 1474 | if (!destruct_wq) |
| 1475 | return -ENOMEM; |
| 1476 | |
| 1477 | err = register_netdevice_notifier(&tls_dev_notifier); |
| 1478 | if (err) |
| 1479 | destroy_workqueue(destruct_wq); |
| 1480 | |
| 1481 | return err; |
| 1482 | } |
| 1483 | |
| 1484 | void __exit tls_device_cleanup(void) |
| 1485 | { |
| 1486 | unregister_netdevice_notifier(&tls_dev_notifier); |
| 1487 | destroy_workqueue(destruct_wq); |
| 1488 | clean_acked_data_flush(); |
| 1489 | } |