| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Multipath TCP |
| 3 | * |
| 4 | * Copyright (c) 2019, Intel Corporation. |
| 5 | */ |
| 6 | #define pr_fmt(fmt) "MPTCP: " fmt |
| 7 | |
| 8 | #include <linux/kernel.h> |
| 9 | #include <net/tcp.h> |
| 10 | #include <net/mptcp.h> |
| 11 | #include "protocol.h" |
| 12 | |
| 13 | #include "mib.h" |
| 14 | |
| 15 | /* path manager command handlers */ |
| 16 | |
| 17 | int mptcp_pm_announce_addr(struct mptcp_sock *msk, |
| 18 | const struct mptcp_addr_info *addr, |
| 19 | bool echo) |
| 20 | { |
| 21 | u8 add_addr = READ_ONCE(msk->pm.addr_signal); |
| 22 | |
| 23 | pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo); |
| 24 | |
| 25 | lockdep_assert_held(&msk->pm.lock); |
| 26 | |
| 27 | if (add_addr & |
| 28 | (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) { |
| 29 | pr_warn("addr_signal error, add_addr=%d, echo=%d", add_addr, echo); |
| 30 | return -EINVAL; |
| 31 | } |
| 32 | |
| 33 | if (echo) { |
| 34 | msk->pm.remote = *addr; |
| 35 | add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); |
| 36 | } else { |
| 37 | msk->pm.local = *addr; |
| 38 | add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); |
| 39 | } |
| 40 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
| 41 | return 0; |
| 42 | } |
| 43 | |
| 44 | int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) |
| 45 | { |
| 46 | u8 rm_addr = READ_ONCE(msk->pm.addr_signal); |
| 47 | |
| 48 | pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); |
| 49 | |
| 50 | if (rm_addr) { |
| 51 | pr_warn("addr_signal error, rm_addr=%d", rm_addr); |
| 52 | return -EINVAL; |
| 53 | } |
| 54 | |
| 55 | msk->pm.rm_list_tx = *rm_list; |
| 56 | rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); |
| 57 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
| 58 | mptcp_pm_nl_addr_send_ack(msk); |
| 59 | return 0; |
| 60 | } |
| 61 | |
| 62 | int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) |
| 63 | { |
| 64 | pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); |
| 65 | |
| 66 | spin_lock_bh(&msk->pm.lock); |
| 67 | mptcp_pm_nl_rm_subflow_received(msk, rm_list); |
| 68 | spin_unlock_bh(&msk->pm.lock); |
| 69 | return 0; |
| 70 | } |
| 71 | |
| 72 | /* path manager event handlers */ |
| 73 | |
| 74 | void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) |
| 75 | { |
| 76 | struct mptcp_pm_data *pm = &msk->pm; |
| 77 | |
| 78 | pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); |
| 79 | |
| 80 | WRITE_ONCE(pm->server_side, server_side); |
| 81 | mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); |
| 82 | } |
| 83 | |
| 84 | bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) |
| 85 | { |
| 86 | struct mptcp_pm_data *pm = &msk->pm; |
| 87 | unsigned int subflows_max; |
| 88 | int ret = 0; |
| 89 | |
| 90 | if (mptcp_pm_is_userspace(msk)) |
| 91 | return mptcp_userspace_pm_active(msk); |
| 92 | |
| 93 | subflows_max = mptcp_pm_get_subflows_max(msk); |
| 94 | |
| 95 | pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, |
| 96 | subflows_max, READ_ONCE(pm->accept_subflow)); |
| 97 | |
| 98 | /* try to avoid acquiring the lock below */ |
| 99 | if (!READ_ONCE(pm->accept_subflow)) |
| 100 | return false; |
| 101 | |
| 102 | spin_lock_bh(&pm->lock); |
| 103 | if (READ_ONCE(pm->accept_subflow)) { |
| 104 | ret = pm->subflows < subflows_max; |
| 105 | if (ret && ++pm->subflows == subflows_max) |
| 106 | WRITE_ONCE(pm->accept_subflow, false); |
| 107 | } |
| 108 | spin_unlock_bh(&pm->lock); |
| 109 | |
| 110 | return ret; |
| 111 | } |
| 112 | |
| 113 | /* return true if the new status bit is currently cleared, that is, this event |
| 114 | * can be server, eventually by an already scheduled work |
| 115 | */ |
| 116 | static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, |
| 117 | enum mptcp_pm_status new_status) |
| 118 | { |
| 119 | pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, |
| 120 | BIT(new_status)); |
| 121 | if (msk->pm.status & BIT(new_status)) |
| 122 | return false; |
| 123 | |
| 124 | msk->pm.status |= BIT(new_status); |
| 125 | mptcp_schedule_work((struct sock *)msk); |
| 126 | return true; |
| 127 | } |
| 128 | |
| 129 | void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp) |
| 130 | { |
| 131 | struct mptcp_pm_data *pm = &msk->pm; |
| 132 | bool announce = false; |
| 133 | |
| 134 | pr_debug("msk=%p", msk); |
| 135 | |
| 136 | spin_lock_bh(&pm->lock); |
| 137 | |
| 138 | /* mptcp_pm_fully_established() can be invoked by multiple |
| 139 | * racing paths - accept() and check_fully_established() |
| 140 | * be sure to serve this event only once. |
| 141 | */ |
| 142 | if (READ_ONCE(pm->work_pending) && |
| 143 | !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) |
| 144 | mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); |
| 145 | |
| 146 | if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) |
| 147 | announce = true; |
| 148 | |
| 149 | msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); |
| 150 | spin_unlock_bh(&pm->lock); |
| 151 | |
| 152 | if (announce) |
| 153 | mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, gfp); |
| 154 | } |
| 155 | |
| 156 | void mptcp_pm_connection_closed(struct mptcp_sock *msk) |
| 157 | { |
| 158 | pr_debug("msk=%p", msk); |
| 159 | } |
| 160 | |
| 161 | void mptcp_pm_subflow_established(struct mptcp_sock *msk) |
| 162 | { |
| 163 | struct mptcp_pm_data *pm = &msk->pm; |
| 164 | |
| 165 | pr_debug("msk=%p", msk); |
| 166 | |
| 167 | if (!READ_ONCE(pm->work_pending)) |
| 168 | return; |
| 169 | |
| 170 | spin_lock_bh(&pm->lock); |
| 171 | |
| 172 | if (READ_ONCE(pm->work_pending)) |
| 173 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); |
| 174 | |
| 175 | spin_unlock_bh(&pm->lock); |
| 176 | } |
| 177 | |
| 178 | void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk, |
| 179 | const struct mptcp_subflow_context *subflow) |
| 180 | { |
| 181 | struct mptcp_pm_data *pm = &msk->pm; |
| 182 | bool update_subflows; |
| 183 | |
| 184 | update_subflows = (subflow->request_join || subflow->mp_join) && |
| 185 | mptcp_pm_is_kernel(msk); |
| 186 | if (!READ_ONCE(pm->work_pending) && !update_subflows) |
| 187 | return; |
| 188 | |
| 189 | spin_lock_bh(&pm->lock); |
| 190 | if (update_subflows) |
| 191 | __mptcp_pm_close_subflow(msk); |
| 192 | |
| 193 | /* Even if this subflow is not really established, tell the PM to try |
| 194 | * to pick the next ones, if possible. |
| 195 | */ |
| 196 | if (mptcp_pm_nl_check_work_pending(msk)) |
| 197 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); |
| 198 | |
| 199 | spin_unlock_bh(&pm->lock); |
| 200 | } |
| 201 | |
| 202 | void mptcp_pm_add_addr_received(const struct sock *ssk, |
| 203 | const struct mptcp_addr_info *addr) |
| 204 | { |
| 205 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
| 206 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
| 207 | struct mptcp_pm_data *pm = &msk->pm; |
| 208 | |
| 209 | pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, |
| 210 | READ_ONCE(pm->accept_addr)); |
| 211 | |
| 212 | mptcp_event_addr_announced(ssk, addr); |
| 213 | |
| 214 | spin_lock_bh(&pm->lock); |
| 215 | |
| 216 | if (mptcp_pm_is_userspace(msk)) { |
| 217 | if (mptcp_userspace_pm_active(msk)) { |
| 218 | mptcp_pm_announce_addr(msk, addr, true); |
| 219 | mptcp_pm_add_addr_send_ack(msk); |
| 220 | } else { |
| 221 | __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); |
| 222 | } |
| 223 | } else if (!READ_ONCE(pm->accept_addr)) { |
| 224 | mptcp_pm_announce_addr(msk, addr, true); |
| 225 | mptcp_pm_add_addr_send_ack(msk); |
| 226 | } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { |
| 227 | pm->remote = *addr; |
| 228 | } else { |
| 229 | __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); |
| 230 | } |
| 231 | |
| 232 | spin_unlock_bh(&pm->lock); |
| 233 | } |
| 234 | |
| 235 | void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, |
| 236 | const struct mptcp_addr_info *addr) |
| 237 | { |
| 238 | struct mptcp_pm_data *pm = &msk->pm; |
| 239 | |
| 240 | pr_debug("msk=%p", msk); |
| 241 | |
| 242 | spin_lock_bh(&pm->lock); |
| 243 | |
| 244 | if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) |
| 245 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); |
| 246 | |
| 247 | spin_unlock_bh(&pm->lock); |
| 248 | } |
| 249 | |
| 250 | void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) |
| 251 | { |
| 252 | if (!mptcp_pm_should_add_signal(msk)) |
| 253 | return; |
| 254 | |
| 255 | mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); |
| 256 | } |
| 257 | |
| 258 | void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, |
| 259 | const struct mptcp_rm_list *rm_list) |
| 260 | { |
| 261 | struct mptcp_pm_data *pm = &msk->pm; |
| 262 | u8 i; |
| 263 | |
| 264 | pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); |
| 265 | |
| 266 | for (i = 0; i < rm_list->nr; i++) |
| 267 | mptcp_event_addr_removed(msk, rm_list->ids[i]); |
| 268 | |
| 269 | spin_lock_bh(&pm->lock); |
| 270 | if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED)) |
| 271 | pm->rm_list_rx = *rm_list; |
| 272 | else |
| 273 | __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP); |
| 274 | spin_unlock_bh(&pm->lock); |
| 275 | } |
| 276 | |
| 277 | void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup) |
| 278 | { |
| 279 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
| 280 | struct sock *sk = subflow->conn; |
| 281 | struct mptcp_sock *msk; |
| 282 | |
| 283 | pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); |
| 284 | msk = mptcp_sk(sk); |
| 285 | if (subflow->backup != bkup) { |
| 286 | subflow->backup = bkup; |
| 287 | mptcp_data_lock(sk); |
| 288 | if (!sock_owned_by_user(sk)) |
| 289 | msk->last_snd = NULL; |
| 290 | else |
| 291 | __set_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags); |
| 292 | mptcp_data_unlock(sk); |
| 293 | } |
| 294 | |
| 295 | mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC); |
| 296 | } |
| 297 | |
| 298 | void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) |
| 299 | { |
| 300 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
| 301 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); |
| 302 | struct sock *s = (struct sock *)msk; |
| 303 | |
| 304 | pr_debug("fail_seq=%llu", fail_seq); |
| 305 | |
| 306 | if (!READ_ONCE(msk->allow_infinite_fallback)) |
| 307 | return; |
| 308 | |
| 309 | if (!READ_ONCE(subflow->mp_fail_response_expect)) { |
| 310 | pr_debug("send MP_FAIL response and infinite map"); |
| 311 | |
| 312 | subflow->send_mp_fail = 1; |
| 313 | subflow->send_infinite_map = 1; |
| 314 | } else if (!sock_flag(sk, SOCK_DEAD)) { |
| 315 | pr_debug("MP_FAIL response received"); |
| 316 | |
| 317 | sk_stop_timer(s, &s->sk_timer); |
| 318 | } |
| 319 | } |
| 320 | |
| 321 | /* path manager helpers */ |
| 322 | |
| 323 | bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb, |
| 324 | unsigned int opt_size, unsigned int remaining, |
| 325 | struct mptcp_addr_info *addr, bool *echo, |
| 326 | bool *drop_other_suboptions) |
| 327 | { |
| 328 | int ret = false; |
| 329 | u8 add_addr; |
| 330 | u8 family; |
| 331 | bool port; |
| 332 | |
| 333 | spin_lock_bh(&msk->pm.lock); |
| 334 | |
| 335 | /* double check after the lock is acquired */ |
| 336 | if (!mptcp_pm_should_add_signal(msk)) |
| 337 | goto out_unlock; |
| 338 | |
| 339 | /* always drop every other options for pure ack ADD_ADDR; this is a |
| 340 | * plain dup-ack from TCP perspective. The other MPTCP-relevant info, |
| 341 | * if any, will be carried by the 'original' TCP ack |
| 342 | */ |
| 343 | if (skb && skb_is_tcp_pure_ack(skb)) { |
| 344 | remaining += opt_size; |
| 345 | *drop_other_suboptions = true; |
| 346 | } |
| 347 | |
| 348 | *echo = mptcp_pm_should_add_signal_echo(msk); |
| 349 | port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port); |
| 350 | |
| 351 | family = *echo ? msk->pm.remote.family : msk->pm.local.family; |
| 352 | if (remaining < mptcp_add_addr_len(family, *echo, port)) |
| 353 | goto out_unlock; |
| 354 | |
| 355 | if (*echo) { |
| 356 | *addr = msk->pm.remote; |
| 357 | add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO); |
| 358 | } else { |
| 359 | *addr = msk->pm.local; |
| 360 | add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL); |
| 361 | } |
| 362 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
| 363 | ret = true; |
| 364 | |
| 365 | out_unlock: |
| 366 | spin_unlock_bh(&msk->pm.lock); |
| 367 | return ret; |
| 368 | } |
| 369 | |
| 370 | bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, |
| 371 | struct mptcp_rm_list *rm_list) |
| 372 | { |
| 373 | int ret = false, len; |
| 374 | u8 rm_addr; |
| 375 | |
| 376 | spin_lock_bh(&msk->pm.lock); |
| 377 | |
| 378 | /* double check after the lock is acquired */ |
| 379 | if (!mptcp_pm_should_rm_signal(msk)) |
| 380 | goto out_unlock; |
| 381 | |
| 382 | rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL); |
| 383 | len = mptcp_rm_addr_len(&msk->pm.rm_list_tx); |
| 384 | if (len < 0) { |
| 385 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
| 386 | goto out_unlock; |
| 387 | } |
| 388 | if (remaining < len) |
| 389 | goto out_unlock; |
| 390 | |
| 391 | *rm_list = msk->pm.rm_list_tx; |
| 392 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
| 393 | ret = true; |
| 394 | |
| 395 | out_unlock: |
| 396 | spin_unlock_bh(&msk->pm.lock); |
| 397 | return ret; |
| 398 | } |
| 399 | |
| 400 | int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) |
| 401 | { |
| 402 | return mptcp_pm_nl_get_local_id(msk, skc); |
| 403 | } |
| 404 | |
| 405 | void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) |
| 406 | { |
| 407 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
| 408 | u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp); |
| 409 | |
| 410 | /* keep track of rtx periods with no progress */ |
| 411 | if (!subflow->stale_count) { |
| 412 | subflow->stale_rcv_tstamp = rcv_tstamp; |
| 413 | subflow->stale_count++; |
| 414 | } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { |
| 415 | if (subflow->stale_count < U8_MAX) |
| 416 | subflow->stale_count++; |
| 417 | mptcp_pm_nl_subflow_chk_stale(msk, ssk); |
| 418 | } else { |
| 419 | subflow->stale_count = 0; |
| 420 | mptcp_subflow_set_active(subflow); |
| 421 | } |
| 422 | } |
| 423 | |
| 424 | void mptcp_pm_data_reset(struct mptcp_sock *msk) |
| 425 | { |
| 426 | u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk)); |
| 427 | struct mptcp_pm_data *pm = &msk->pm; |
| 428 | |
| 429 | pm->add_addr_signaled = 0; |
| 430 | pm->add_addr_accepted = 0; |
| 431 | pm->local_addr_used = 0; |
| 432 | pm->subflows = 0; |
| 433 | pm->rm_list_tx.nr = 0; |
| 434 | pm->rm_list_rx.nr = 0; |
| 435 | WRITE_ONCE(pm->pm_type, pm_type); |
| 436 | |
| 437 | if (pm_type == MPTCP_PM_TYPE_KERNEL) { |
| 438 | bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk); |
| 439 | |
| 440 | /* pm->work_pending must be only be set to 'true' when |
| 441 | * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL |
| 442 | */ |
| 443 | WRITE_ONCE(pm->work_pending, |
| 444 | (!!mptcp_pm_get_local_addr_max(msk) && |
| 445 | subflows_allowed) || |
| 446 | !!mptcp_pm_get_add_addr_signal_max(msk)); |
| 447 | WRITE_ONCE(pm->accept_addr, |
| 448 | !!mptcp_pm_get_add_addr_accept_max(msk) && |
| 449 | subflows_allowed); |
| 450 | WRITE_ONCE(pm->accept_subflow, subflows_allowed); |
| 451 | } else { |
| 452 | WRITE_ONCE(pm->work_pending, 0); |
| 453 | WRITE_ONCE(pm->accept_addr, 0); |
| 454 | WRITE_ONCE(pm->accept_subflow, 0); |
| 455 | } |
| 456 | |
| 457 | WRITE_ONCE(pm->addr_signal, 0); |
| 458 | WRITE_ONCE(pm->remote_deny_join_id0, false); |
| 459 | pm->status = 0; |
| 460 | bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); |
| 461 | } |
| 462 | |
| 463 | void mptcp_pm_data_init(struct mptcp_sock *msk) |
| 464 | { |
| 465 | spin_lock_init(&msk->pm.lock); |
| 466 | INIT_LIST_HEAD(&msk->pm.anno_list); |
| 467 | INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list); |
| 468 | mptcp_pm_data_reset(msk); |
| 469 | } |
| 470 | |
| 471 | void __init mptcp_pm_init(void) |
| 472 | { |
| 473 | mptcp_pm_nl_init(); |
| 474 | } |