Commit | Line | Data |
---|---|---|
1b1c7a0e PK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP | |
3 | * | |
4 | * Copyright (c) 2019, Intel Corporation. | |
5 | */ | |
c85adced GT |
6 | #define pr_fmt(fmt) "MPTCP: " fmt |
7 | ||
1b1c7a0e PK |
8 | #include <linux/kernel.h> |
9 | #include <net/tcp.h> | |
10 | #include <net/mptcp.h> | |
11 | #include "protocol.h" | |
12 | ||
fc1b4e3b PA |
13 | #include "mib.h" |
14 | ||
1b1c7a0e PK |
15 | /* path manager command handlers */ |
16 | ||
17 | int mptcp_pm_announce_addr(struct mptcp_sock *msk, | |
6a6c05a8 | 18 | const struct mptcp_addr_info *addr, |
f7efc777 | 19 | bool echo) |
1b1c7a0e | 20 | { |
13ad9f01 | 21 | u8 add_addr = READ_ONCE(msk->pm.addr_signal); |
d91d322a | 22 | |
18fc1a92 | 23 | pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo); |
926bdeab | 24 | |
3abc05d9 FW |
25 | lockdep_assert_held(&msk->pm.lock); |
26 | ||
18fc1a92 YL |
27 | if (add_addr & |
28 | (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) { | |
29 | pr_warn("addr_signal error, add_addr=%d, echo=%d", add_addr, echo); | |
42842a42 GT |
30 | return -EINVAL; |
31 | } | |
32 | ||
18fc1a92 YL |
33 | if (echo) { |
34 | msk->pm.remote = *addr; | |
d91d322a | 35 | add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); |
18fc1a92 YL |
36 | } else { |
37 | msk->pm.local = *addr; | |
38 | add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); | |
39 | } | |
13ad9f01 | 40 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
926bdeab | 41 | return 0; |
1b1c7a0e PK |
42 | } |
43 | ||
cbde2787 | 44 | int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) |
1b1c7a0e | 45 | { |
13ad9f01 | 46 | u8 rm_addr = READ_ONCE(msk->pm.addr_signal); |
42842a42 | 47 | |
cbde2787 | 48 | pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); |
b6c08380 | 49 | |
42842a42 GT |
50 | if (rm_addr) { |
51 | pr_warn("addr_signal error, rm_addr=%d", rm_addr); | |
52 | return -EINVAL; | |
53 | } | |
54 | ||
cbde2787 | 55 | msk->pm.rm_list_tx = *rm_list; |
42842a42 | 56 | rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); |
13ad9f01 | 57 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
b46a0238 | 58 | mptcp_pm_nl_addr_send_ack(msk); |
b6c08380 | 59 | return 0; |
1b1c7a0e PK |
60 | } |
61 | ||
ddd14bb8 | 62 | int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) |
1b1c7a0e | 63 | { |
ddd14bb8 | 64 | pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); |
0ee4261a GT |
65 | |
66 | spin_lock_bh(&msk->pm.lock); | |
ddd14bb8 | 67 | mptcp_pm_nl_rm_subflow_received(msk, rm_list); |
0ee4261a GT |
68 | spin_unlock_bh(&msk->pm.lock); |
69 | return 0; | |
1b1c7a0e PK |
70 | } |
71 | ||
72 | /* path manager event handlers */ | |
73 | ||
6c714f1b | 74 | void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) |
1b1c7a0e PK |
75 | { |
76 | struct mptcp_pm_data *pm = &msk->pm; | |
77 | ||
78 | pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); | |
79 | ||
80 | WRITE_ONCE(pm->server_side, server_side); | |
b911c97c | 81 | mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); |
1b1c7a0e PK |
82 | } |
83 | ||
84 | bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) | |
85 | { | |
926bdeab | 86 | struct mptcp_pm_data *pm = &msk->pm; |
a914e586 | 87 | unsigned int subflows_max; |
f58f065a | 88 | int ret = 0; |
926bdeab | 89 | |
4d25247d KM |
90 | if (mptcp_pm_is_userspace(msk)) |
91 | return mptcp_userspace_pm_active(msk); | |
92 | ||
a914e586 GT |
93 | subflows_max = mptcp_pm_get_subflows_max(msk); |
94 | ||
926bdeab | 95 | pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, |
a914e586 | 96 | subflows_max, READ_ONCE(pm->accept_subflow)); |
926bdeab PK |
97 | |
98 | /* try to avoid acquiring the lock below */ | |
99 | if (!READ_ONCE(pm->accept_subflow)) | |
100 | return false; | |
101 | ||
102 | spin_lock_bh(&pm->lock); | |
f58f065a | 103 | if (READ_ONCE(pm->accept_subflow)) { |
a914e586 GT |
104 | ret = pm->subflows < subflows_max; |
105 | if (ret && ++pm->subflows == subflows_max) | |
f58f065a GT |
106 | WRITE_ONCE(pm->accept_subflow, false); |
107 | } | |
926bdeab PK |
108 | spin_unlock_bh(&pm->lock); |
109 | ||
110 | return ret; | |
111 | } | |
112 | ||
113 | /* return true if the new status bit is currently cleared, that is, this event | |
114 | * can be server, eventually by an already scheduled work | |
115 | */ | |
116 | static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, | |
117 | enum mptcp_pm_status new_status) | |
118 | { | |
119 | pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, | |
120 | BIT(new_status)); | |
121 | if (msk->pm.status & BIT(new_status)) | |
122 | return false; | |
123 | ||
124 | msk->pm.status |= BIT(new_status); | |
ba8f48f7 | 125 | mptcp_schedule_work((struct sock *)msk); |
926bdeab | 126 | return true; |
1b1c7a0e PK |
127 | } |
128 | ||
7a486c44 | 129 | void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk) |
1b1c7a0e | 130 | { |
926bdeab | 131 | struct mptcp_pm_data *pm = &msk->pm; |
b911c97c | 132 | bool announce = false; |
926bdeab | 133 | |
1b1c7a0e | 134 | pr_debug("msk=%p", msk); |
926bdeab | 135 | |
926bdeab PK |
136 | spin_lock_bh(&pm->lock); |
137 | ||
5b950ff4 PA |
138 | /* mptcp_pm_fully_established() can be invoked by multiple |
139 | * racing paths - accept() and check_fully_established() | |
140 | * be sure to serve this event only once. | |
141 | */ | |
142 | if (READ_ONCE(pm->work_pending) && | |
143 | !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) | |
926bdeab PK |
144 | mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); |
145 | ||
b911c97c FW |
146 | if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) |
147 | announce = true; | |
148 | ||
149 | msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); | |
926bdeab | 150 | spin_unlock_bh(&pm->lock); |
b911c97c FW |
151 | |
152 | if (announce) | |
7a486c44 | 153 | mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, GFP_ATOMIC); |
1b1c7a0e PK |
154 | } |
155 | ||
156 | void mptcp_pm_connection_closed(struct mptcp_sock *msk) | |
157 | { | |
158 | pr_debug("msk=%p", msk); | |
159 | } | |
160 | ||
62535200 | 161 | void mptcp_pm_subflow_established(struct mptcp_sock *msk) |
1b1c7a0e | 162 | { |
926bdeab PK |
163 | struct mptcp_pm_data *pm = &msk->pm; |
164 | ||
1b1c7a0e | 165 | pr_debug("msk=%p", msk); |
926bdeab PK |
166 | |
167 | if (!READ_ONCE(pm->work_pending)) | |
168 | return; | |
169 | ||
170 | spin_lock_bh(&pm->lock); | |
171 | ||
172 | if (READ_ONCE(pm->work_pending)) | |
173 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); | |
174 | ||
175 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
176 | } |
177 | ||
a88c9e49 PA |
178 | void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk, |
179 | const struct mptcp_subflow_context *subflow) | |
1b1c7a0e | 180 | { |
a88c9e49 PA |
181 | struct mptcp_pm_data *pm = &msk->pm; |
182 | bool update_subflows; | |
183 | ||
d7e6f583 | 184 | update_subflows = (subflow->request_join || subflow->mp_join) && |
4d25247d | 185 | mptcp_pm_is_kernel(msk); |
a88c9e49 PA |
186 | if (!READ_ONCE(pm->work_pending) && !update_subflows) |
187 | return; | |
188 | ||
189 | spin_lock_bh(&pm->lock); | |
190 | if (update_subflows) | |
95d68651 | 191 | __mptcp_pm_close_subflow(msk); |
a88c9e49 PA |
192 | |
193 | /* Even if this subflow is not really established, tell the PM to try | |
194 | * to pick the next ones, if possible. | |
195 | */ | |
196 | if (mptcp_pm_nl_check_work_pending(msk)) | |
197 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); | |
198 | ||
199 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
200 | } |
201 | ||
d1ace2d9 | 202 | void mptcp_pm_add_addr_received(const struct sock *ssk, |
1b1c7a0e PK |
203 | const struct mptcp_addr_info *addr) |
204 | { | |
d1ace2d9 KM |
205 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
206 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); | |
926bdeab PK |
207 | struct mptcp_pm_data *pm = &msk->pm; |
208 | ||
209 | pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, | |
210 | READ_ONCE(pm->accept_addr)); | |
211 | ||
d1ace2d9 | 212 | mptcp_event_addr_announced(ssk, addr); |
b911c97c | 213 | |
926bdeab PK |
214 | spin_lock_bh(&pm->lock); |
215 | ||
4d25247d KM |
216 | if (mptcp_pm_is_userspace(msk)) { |
217 | if (mptcp_userspace_pm_active(msk)) { | |
218 | mptcp_pm_announce_addr(msk, addr, true); | |
219 | mptcp_pm_add_addr_send_ack(msk); | |
220 | } else { | |
221 | __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); | |
222 | } | |
223 | } else if (!READ_ONCE(pm->accept_addr)) { | |
f7efc777 | 224 | mptcp_pm_announce_addr(msk, addr, true); |
84dfe367 GT |
225 | mptcp_pm_add_addr_send_ack(msk); |
226 | } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { | |
926bdeab | 227 | pm->remote = *addr; |
f73c1194 PA |
228 | } else { |
229 | __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); | |
84dfe367 | 230 | } |
926bdeab PK |
231 | |
232 | spin_unlock_bh(&pm->lock); | |
84dfe367 GT |
233 | } |
234 | ||
557963c3 | 235 | void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, |
90d93088 | 236 | const struct mptcp_addr_info *addr) |
557963c3 GT |
237 | { |
238 | struct mptcp_pm_data *pm = &msk->pm; | |
239 | ||
240 | pr_debug("msk=%p", msk); | |
241 | ||
242 | spin_lock_bh(&pm->lock); | |
243 | ||
244 | if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) | |
245 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); | |
246 | ||
247 | spin_unlock_bh(&pm->lock); | |
248 | } | |
249 | ||
84dfe367 GT |
250 | void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) |
251 | { | |
b5a7acd3 | 252 | if (!mptcp_pm_should_add_signal(msk)) |
84dfe367 GT |
253 | return; |
254 | ||
255 | mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); | |
1b1c7a0e PK |
256 | } |
257 | ||
5c4a824d GT |
258 | void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, |
259 | const struct mptcp_rm_list *rm_list) | |
d0876b22 GT |
260 | { |
261 | struct mptcp_pm_data *pm = &msk->pm; | |
5c4a824d | 262 | u8 i; |
d0876b22 | 263 | |
5c4a824d | 264 | pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); |
d0876b22 | 265 | |
5c4a824d GT |
266 | for (i = 0; i < rm_list->nr; i++) |
267 | mptcp_event_addr_removed(msk, rm_list->ids[i]); | |
b911c97c | 268 | |
d0876b22 | 269 | spin_lock_bh(&pm->lock); |
f73c1194 PA |
270 | if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED)) |
271 | pm->rm_list_rx = *rm_list; | |
272 | else | |
273 | __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP); | |
d0876b22 GT |
274 | spin_unlock_bh(&pm->lock); |
275 | } | |
276 | ||
43f5b111 | 277 | void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup) |
40453a5c | 278 | { |
43f5b111 PA |
279 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
280 | struct sock *sk = subflow->conn; | |
281 | struct mptcp_sock *msk; | |
40453a5c GT |
282 | |
283 | pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); | |
43f5b111 PA |
284 | msk = mptcp_sk(sk); |
285 | if (subflow->backup != bkup) { | |
286 | subflow->backup = bkup; | |
287 | mptcp_data_lock(sk); | |
288 | if (!sock_owned_by_user(sk)) | |
289 | msk->last_snd = NULL; | |
290 | else | |
291 | __set_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags); | |
292 | mptcp_data_unlock(sk); | |
293 | } | |
b911c97c | 294 | |
43f5b111 | 295 | mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC); |
40453a5c GT |
296 | } |
297 | ||
5580d41b GT |
298 | void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) |
299 | { | |
1e39e5a3 GT |
300 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
301 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); | |
302 | ||
5580d41b | 303 | pr_debug("fail_seq=%llu", fail_seq); |
1e39e5a3 | 304 | |
7b16871f | 305 | if (!READ_ONCE(msk->allow_infinite_fallback)) |
9c81be0d GT |
306 | return; |
307 | ||
76a13b31 | 308 | if (!subflow->fail_tout) { |
9c81be0d GT |
309 | pr_debug("send MP_FAIL response and infinite map"); |
310 | ||
311 | subflow->send_mp_fail = 1; | |
1e39e5a3 | 312 | subflow->send_infinite_map = 1; |
76a13b31 GT |
313 | tcp_send_ack(sk); |
314 | } else { | |
49fa1919 | 315 | pr_debug("MP_FAIL response received"); |
76a13b31 | 316 | WRITE_ONCE(subflow->fail_tout, 0); |
9c81be0d | 317 | } |
5580d41b GT |
318 | } |
319 | ||
1b1c7a0e PK |
320 | /* path manager helpers */ |
321 | ||
90d93088 | 322 | bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb, |
1f5e9e2f | 323 | unsigned int opt_size, unsigned int remaining, |
f462a446 | 324 | struct mptcp_addr_info *addr, bool *echo, |
af7939f3 | 325 | bool *drop_other_suboptions) |
1b1c7a0e | 326 | { |
926bdeab | 327 | int ret = false; |
119c0220 | 328 | u8 add_addr; |
f462a446 | 329 | u8 family; |
af7939f3 | 330 | bool port; |
926bdeab PK |
331 | |
332 | spin_lock_bh(&msk->pm.lock); | |
333 | ||
334 | /* double check after the lock is acquired */ | |
f643b803 | 335 | if (!mptcp_pm_should_add_signal(msk)) |
926bdeab PK |
336 | goto out_unlock; |
337 | ||
1f5e9e2f YL |
338 | /* always drop every other options for pure ack ADD_ADDR; this is a |
339 | * plain dup-ack from TCP perspective. The other MPTCP-relevant info, | |
340 | * if any, will be carried by the 'original' TCP ack | |
341 | */ | |
342 | if (skb && skb_is_tcp_pure_ack(skb)) { | |
343 | remaining += opt_size; | |
344 | *drop_other_suboptions = true; | |
345 | } | |
346 | ||
d91d322a | 347 | *echo = mptcp_pm_should_add_signal_echo(msk); |
af7939f3 | 348 | port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port); |
456afe01 | 349 | |
f462a446 | 350 | family = *echo ? msk->pm.remote.family : msk->pm.local.family; |
af7939f3 | 351 | if (remaining < mptcp_add_addr_len(family, *echo, port)) |
926bdeab PK |
352 | goto out_unlock; |
353 | ||
f462a446 YL |
354 | if (*echo) { |
355 | *addr = msk->pm.remote; | |
119c0220 | 356 | add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO); |
f462a446 YL |
357 | } else { |
358 | *addr = msk->pm.local; | |
119c0220 | 359 | add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL); |
f462a446 | 360 | } |
119c0220 | 361 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
926bdeab PK |
362 | ret = true; |
363 | ||
364 | out_unlock: | |
365 | spin_unlock_bh(&msk->pm.lock); | |
366 | return ret; | |
1b1c7a0e PK |
367 | } |
368 | ||
5cb104ae | 369 | bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, |
6445e17a | 370 | struct mptcp_rm_list *rm_list) |
5cb104ae | 371 | { |
cbde2787 | 372 | int ret = false, len; |
119c0220 | 373 | u8 rm_addr; |
5cb104ae GT |
374 | |
375 | spin_lock_bh(&msk->pm.lock); | |
376 | ||
377 | /* double check after the lock is acquired */ | |
378 | if (!mptcp_pm_should_rm_signal(msk)) | |
379 | goto out_unlock; | |
380 | ||
119c0220 | 381 | rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL); |
cbde2787 GT |
382 | len = mptcp_rm_addr_len(&msk->pm.rm_list_tx); |
383 | if (len < 0) { | |
119c0220 | 384 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
cbde2787 GT |
385 | goto out_unlock; |
386 | } | |
387 | if (remaining < len) | |
5cb104ae GT |
388 | goto out_unlock; |
389 | ||
cbde2787 | 390 | *rm_list = msk->pm.rm_list_tx; |
119c0220 | 391 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
5cb104ae GT |
392 | ret = true; |
393 | ||
394 | out_unlock: | |
395 | spin_unlock_bh(&msk->pm.lock); | |
396 | return ret; | |
397 | } | |
398 | ||
1b1c7a0e PK |
399 | int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) |
400 | { | |
01cacb00 | 401 | return mptcp_pm_nl_get_local_id(msk, skc); |
1b1c7a0e PK |
402 | } |
403 | ||
71b7dec2 PA |
404 | void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) |
405 | { | |
406 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
407 | u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp); | |
408 | ||
409 | /* keep track of rtx periods with no progress */ | |
410 | if (!subflow->stale_count) { | |
411 | subflow->stale_rcv_tstamp = rcv_tstamp; | |
412 | subflow->stale_count++; | |
413 | } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { | |
414 | if (subflow->stale_count < U8_MAX) | |
415 | subflow->stale_count++; | |
ff5a0b42 | 416 | mptcp_pm_nl_subflow_chk_stale(msk, ssk); |
71b7dec2 PA |
417 | } else { |
418 | subflow->stale_count = 0; | |
ff5a0b42 | 419 | mptcp_subflow_set_active(subflow); |
71b7dec2 PA |
420 | } |
421 | } | |
422 | ||
fb00ee4f MB |
423 | /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses, |
424 | * otherwise allow any matching local/remote pair | |
425 | */ | |
426 | bool mptcp_pm_addr_families_match(const struct sock *sk, | |
427 | const struct mptcp_addr_info *loc, | |
428 | const struct mptcp_addr_info *rem) | |
429 | { | |
430 | bool mptcp_is_v4 = sk->sk_family == AF_INET; | |
431 | ||
432 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
433 | bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6); | |
434 | bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6); | |
435 | ||
436 | if (mptcp_is_v4) | |
437 | return loc_is_v4 && rem_is_v4; | |
438 | ||
439 | if (ipv6_only_sock(sk)) | |
440 | return !loc_is_v4 && !rem_is_v4; | |
441 | ||
442 | return loc_is_v4 == rem_is_v4; | |
443 | #else | |
444 | return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET; | |
445 | #endif | |
446 | } | |
447 | ||
b29fcfb5 | 448 | void mptcp_pm_data_reset(struct mptcp_sock *msk) |
1b1c7a0e | 449 | { |
6bb63ccc | 450 | u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk)); |
9273b9d5 | 451 | struct mptcp_pm_data *pm = &msk->pm; |
1b1c7a0e | 452 | |
9273b9d5 MM |
453 | pm->add_addr_signaled = 0; |
454 | pm->add_addr_accepted = 0; | |
455 | pm->local_addr_used = 0; | |
456 | pm->subflows = 0; | |
457 | pm->rm_list_tx.nr = 0; | |
458 | pm->rm_list_rx.nr = 0; | |
6bb63ccc MM |
459 | WRITE_ONCE(pm->pm_type, pm_type); |
460 | ||
461 | if (pm_type == MPTCP_PM_TYPE_KERNEL) { | |
462 | bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk); | |
463 | ||
464 | /* pm->work_pending must be only be set to 'true' when | |
465 | * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL | |
466 | */ | |
467 | WRITE_ONCE(pm->work_pending, | |
468 | (!!mptcp_pm_get_local_addr_max(msk) && | |
469 | subflows_allowed) || | |
470 | !!mptcp_pm_get_add_addr_signal_max(msk)); | |
471 | WRITE_ONCE(pm->accept_addr, | |
472 | !!mptcp_pm_get_add_addr_accept_max(msk) && | |
473 | subflows_allowed); | |
474 | WRITE_ONCE(pm->accept_subflow, subflows_allowed); | |
475 | } else { | |
476 | WRITE_ONCE(pm->work_pending, 0); | |
477 | WRITE_ONCE(pm->accept_addr, 0); | |
478 | WRITE_ONCE(pm->accept_subflow, 0); | |
479 | } | |
480 | ||
9273b9d5 | 481 | WRITE_ONCE(pm->addr_signal, 0); |
9273b9d5 MM |
482 | WRITE_ONCE(pm->remote_deny_join_id0, false); |
483 | pm->status = 0; | |
484 | bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); | |
b29fcfb5 PA |
485 | } |
486 | ||
487 | void mptcp_pm_data_init(struct mptcp_sock *msk) | |
488 | { | |
1b1c7a0e | 489 | spin_lock_init(&msk->pm.lock); |
b6c08380 | 490 | INIT_LIST_HEAD(&msk->pm.anno_list); |
4638de5a | 491 | INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list); |
b29fcfb5 | 492 | mptcp_pm_data_reset(msk); |
1b1c7a0e PK |
493 | } |
494 | ||
d39dceca | 495 | void __init mptcp_pm_init(void) |
1b1c7a0e | 496 | { |
01cacb00 | 497 | mptcp_pm_nl_init(); |
1b1c7a0e | 498 | } |