| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Multipath TCP |
| 3 | * |
| 4 | * Copyright (c) 2019, Intel Corporation. |
| 5 | */ |
| 6 | #define pr_fmt(fmt) "MPTCP: " fmt |
| 7 | |
| 8 | #include <linux/kernel.h> |
| 9 | #include <net/tcp.h> |
| 10 | #include <net/mptcp.h> |
| 11 | #include "protocol.h" |
| 12 | |
| 13 | /* path manager command handlers */ |
| 14 | |
| 15 | int mptcp_pm_announce_addr(struct mptcp_sock *msk, |
| 16 | const struct mptcp_addr_info *addr, |
| 17 | bool echo, bool port) |
| 18 | { |
| 19 | u8 add_addr = READ_ONCE(msk->pm.addr_signal); |
| 20 | |
| 21 | pr_debug("msk=%p, local_id=%d", msk, addr->id); |
| 22 | |
| 23 | if (add_addr) { |
| 24 | pr_warn("addr_signal error, add_addr=%d", add_addr); |
| 25 | return -EINVAL; |
| 26 | } |
| 27 | |
| 28 | msk->pm.local = *addr; |
| 29 | add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); |
| 30 | if (echo) |
| 31 | add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); |
| 32 | if (addr->family == AF_INET6) |
| 33 | add_addr |= BIT(MPTCP_ADD_ADDR_IPV6); |
| 34 | if (port) |
| 35 | add_addr |= BIT(MPTCP_ADD_ADDR_PORT); |
| 36 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
| 37 | return 0; |
| 38 | } |
| 39 | |
| 40 | int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id) |
| 41 | { |
| 42 | u8 rm_addr = READ_ONCE(msk->pm.addr_signal); |
| 43 | |
| 44 | pr_debug("msk=%p, local_id=%d", msk, local_id); |
| 45 | |
| 46 | if (rm_addr) { |
| 47 | pr_warn("addr_signal error, rm_addr=%d", rm_addr); |
| 48 | return -EINVAL; |
| 49 | } |
| 50 | |
| 51 | msk->pm.rm_id = local_id; |
| 52 | rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); |
| 53 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
| 54 | return 0; |
| 55 | } |
| 56 | |
| 57 | int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id) |
| 58 | { |
| 59 | pr_debug("msk=%p, local_id=%d", msk, local_id); |
| 60 | |
| 61 | spin_lock_bh(&msk->pm.lock); |
| 62 | mptcp_pm_nl_rm_subflow_received(msk, local_id); |
| 63 | spin_unlock_bh(&msk->pm.lock); |
| 64 | return 0; |
| 65 | } |
| 66 | |
| 67 | /* path manager event handlers */ |
| 68 | |
| 69 | void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side) |
| 70 | { |
| 71 | struct mptcp_pm_data *pm = &msk->pm; |
| 72 | |
| 73 | pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); |
| 74 | |
| 75 | WRITE_ONCE(pm->server_side, server_side); |
| 76 | } |
| 77 | |
| 78 | bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) |
| 79 | { |
| 80 | struct mptcp_pm_data *pm = &msk->pm; |
| 81 | int ret = 0; |
| 82 | |
| 83 | pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, |
| 84 | pm->subflows_max, READ_ONCE(pm->accept_subflow)); |
| 85 | |
| 86 | /* try to avoid acquiring the lock below */ |
| 87 | if (!READ_ONCE(pm->accept_subflow)) |
| 88 | return false; |
| 89 | |
| 90 | spin_lock_bh(&pm->lock); |
| 91 | if (READ_ONCE(pm->accept_subflow)) { |
| 92 | ret = pm->subflows < pm->subflows_max; |
| 93 | if (ret && ++pm->subflows == pm->subflows_max) |
| 94 | WRITE_ONCE(pm->accept_subflow, false); |
| 95 | } |
| 96 | spin_unlock_bh(&pm->lock); |
| 97 | |
| 98 | return ret; |
| 99 | } |
| 100 | |
| 101 | /* return true if the new status bit is currently cleared, that is, this event |
| 102 | * can be server, eventually by an already scheduled work |
| 103 | */ |
| 104 | static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, |
| 105 | enum mptcp_pm_status new_status) |
| 106 | { |
| 107 | pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, |
| 108 | BIT(new_status)); |
| 109 | if (msk->pm.status & BIT(new_status)) |
| 110 | return false; |
| 111 | |
| 112 | msk->pm.status |= BIT(new_status); |
| 113 | mptcp_schedule_work((struct sock *)msk); |
| 114 | return true; |
| 115 | } |
| 116 | |
| 117 | void mptcp_pm_fully_established(struct mptcp_sock *msk) |
| 118 | { |
| 119 | struct mptcp_pm_data *pm = &msk->pm; |
| 120 | |
| 121 | pr_debug("msk=%p", msk); |
| 122 | |
| 123 | /* try to avoid acquiring the lock below */ |
| 124 | if (!READ_ONCE(pm->work_pending)) |
| 125 | return; |
| 126 | |
| 127 | spin_lock_bh(&pm->lock); |
| 128 | |
| 129 | /* mptcp_pm_fully_established() can be invoked by multiple |
| 130 | * racing paths - accept() and check_fully_established() |
| 131 | * be sure to serve this event only once. |
| 132 | */ |
| 133 | if (READ_ONCE(pm->work_pending) && |
| 134 | !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) |
| 135 | mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); |
| 136 | msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); |
| 137 | |
| 138 | spin_unlock_bh(&pm->lock); |
| 139 | } |
| 140 | |
| 141 | void mptcp_pm_connection_closed(struct mptcp_sock *msk) |
| 142 | { |
| 143 | pr_debug("msk=%p", msk); |
| 144 | } |
| 145 | |
| 146 | void mptcp_pm_subflow_established(struct mptcp_sock *msk, |
| 147 | struct mptcp_subflow_context *subflow) |
| 148 | { |
| 149 | struct mptcp_pm_data *pm = &msk->pm; |
| 150 | |
| 151 | pr_debug("msk=%p", msk); |
| 152 | |
| 153 | if (!READ_ONCE(pm->work_pending)) |
| 154 | return; |
| 155 | |
| 156 | spin_lock_bh(&pm->lock); |
| 157 | |
| 158 | if (READ_ONCE(pm->work_pending)) |
| 159 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); |
| 160 | |
| 161 | spin_unlock_bh(&pm->lock); |
| 162 | } |
| 163 | |
| 164 | void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id) |
| 165 | { |
| 166 | pr_debug("msk=%p", msk); |
| 167 | } |
| 168 | |
| 169 | void mptcp_pm_add_addr_received(struct mptcp_sock *msk, |
| 170 | const struct mptcp_addr_info *addr) |
| 171 | { |
| 172 | struct mptcp_pm_data *pm = &msk->pm; |
| 173 | |
| 174 | pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, |
| 175 | READ_ONCE(pm->accept_addr)); |
| 176 | |
| 177 | spin_lock_bh(&pm->lock); |
| 178 | |
| 179 | if (!READ_ONCE(pm->accept_addr)) { |
| 180 | mptcp_pm_announce_addr(msk, addr, true, addr->port); |
| 181 | mptcp_pm_add_addr_send_ack(msk); |
| 182 | } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { |
| 183 | pm->remote = *addr; |
| 184 | } |
| 185 | |
| 186 | spin_unlock_bh(&pm->lock); |
| 187 | } |
| 188 | |
| 189 | void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) |
| 190 | { |
| 191 | if (!mptcp_pm_should_add_signal_ipv6(msk) && |
| 192 | !mptcp_pm_should_add_signal_port(msk)) |
| 193 | return; |
| 194 | |
| 195 | mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); |
| 196 | } |
| 197 | |
| 198 | void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id) |
| 199 | { |
| 200 | struct mptcp_pm_data *pm = &msk->pm; |
| 201 | |
| 202 | pr_debug("msk=%p remote_id=%d", msk, rm_id); |
| 203 | |
| 204 | spin_lock_bh(&pm->lock); |
| 205 | mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED); |
| 206 | pm->rm_id = rm_id; |
| 207 | spin_unlock_bh(&pm->lock); |
| 208 | } |
| 209 | |
| 210 | void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup) |
| 211 | { |
| 212 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
| 213 | |
| 214 | pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); |
| 215 | subflow->backup = bkup; |
| 216 | } |
| 217 | |
| 218 | /* path manager helpers */ |
| 219 | |
| 220 | bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining, |
| 221 | struct mptcp_addr_info *saddr, bool *echo, bool *port) |
| 222 | { |
| 223 | int ret = false; |
| 224 | |
| 225 | spin_lock_bh(&msk->pm.lock); |
| 226 | |
| 227 | /* double check after the lock is acquired */ |
| 228 | if (!mptcp_pm_should_add_signal(msk)) |
| 229 | goto out_unlock; |
| 230 | |
| 231 | *echo = mptcp_pm_should_add_signal_echo(msk); |
| 232 | *port = mptcp_pm_should_add_signal_port(msk); |
| 233 | |
| 234 | if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo, *port)) |
| 235 | goto out_unlock; |
| 236 | |
| 237 | *saddr = msk->pm.local; |
| 238 | WRITE_ONCE(msk->pm.addr_signal, 0); |
| 239 | ret = true; |
| 240 | |
| 241 | out_unlock: |
| 242 | spin_unlock_bh(&msk->pm.lock); |
| 243 | return ret; |
| 244 | } |
| 245 | |
| 246 | bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, |
| 247 | u8 *rm_id) |
| 248 | { |
| 249 | int ret = false; |
| 250 | |
| 251 | spin_lock_bh(&msk->pm.lock); |
| 252 | |
| 253 | /* double check after the lock is acquired */ |
| 254 | if (!mptcp_pm_should_rm_signal(msk)) |
| 255 | goto out_unlock; |
| 256 | |
| 257 | if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE) |
| 258 | goto out_unlock; |
| 259 | |
| 260 | *rm_id = msk->pm.rm_id; |
| 261 | WRITE_ONCE(msk->pm.addr_signal, 0); |
| 262 | ret = true; |
| 263 | |
| 264 | out_unlock: |
| 265 | spin_unlock_bh(&msk->pm.lock); |
| 266 | return ret; |
| 267 | } |
| 268 | |
| 269 | int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) |
| 270 | { |
| 271 | return mptcp_pm_nl_get_local_id(msk, skc); |
| 272 | } |
| 273 | |
| 274 | void mptcp_pm_data_init(struct mptcp_sock *msk) |
| 275 | { |
| 276 | msk->pm.add_addr_signaled = 0; |
| 277 | msk->pm.add_addr_accepted = 0; |
| 278 | msk->pm.local_addr_used = 0; |
| 279 | msk->pm.subflows = 0; |
| 280 | msk->pm.rm_id = 0; |
| 281 | WRITE_ONCE(msk->pm.work_pending, false); |
| 282 | WRITE_ONCE(msk->pm.addr_signal, 0); |
| 283 | WRITE_ONCE(msk->pm.accept_addr, false); |
| 284 | WRITE_ONCE(msk->pm.accept_subflow, false); |
| 285 | msk->pm.status = 0; |
| 286 | |
| 287 | spin_lock_init(&msk->pm.lock); |
| 288 | INIT_LIST_HEAD(&msk->pm.anno_list); |
| 289 | |
| 290 | mptcp_pm_nl_data_init(msk); |
| 291 | } |
| 292 | |
| 293 | void __init mptcp_pm_init(void) |
| 294 | { |
| 295 | mptcp_pm_nl_init(); |
| 296 | } |