Commit | Line | Data |
---|---|---|
1b1c7a0e PK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP | |
3 | * | |
4 | * Copyright (c) 2019, Intel Corporation. | |
5 | */ | |
c85adced GT |
6 | #define pr_fmt(fmt) "MPTCP: " fmt |
7 | ||
1b1c7a0e | 8 | #include <linux/kernel.h> |
1b1c7a0e PK |
9 | #include <net/mptcp.h> |
10 | #include "protocol.h" | |
11 | ||
fc1b4e3b PA |
12 | #include "mib.h" |
13 | ||
1b1c7a0e PK |
14 | /* path manager command handlers */ |
15 | ||
16 | int mptcp_pm_announce_addr(struct mptcp_sock *msk, | |
6a6c05a8 | 17 | const struct mptcp_addr_info *addr, |
f7efc777 | 18 | bool echo) |
1b1c7a0e | 19 | { |
13ad9f01 | 20 | u8 add_addr = READ_ONCE(msk->pm.addr_signal); |
d91d322a | 21 | |
18fc1a92 | 22 | pr_debug("msk=%p, local_id=%d, echo=%d", msk, addr->id, echo); |
926bdeab | 23 | |
3abc05d9 FW |
24 | lockdep_assert_held(&msk->pm.lock); |
25 | ||
18fc1a92 YL |
26 | if (add_addr & |
27 | (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) { | |
45b1a122 PA |
28 | MPTCP_INC_STATS(sock_net((struct sock *)msk), |
29 | echo ? MPTCP_MIB_ECHOADDTXDROP : MPTCP_MIB_ADDADDRTXDROP); | |
42842a42 GT |
30 | return -EINVAL; |
31 | } | |
32 | ||
18fc1a92 YL |
33 | if (echo) { |
34 | msk->pm.remote = *addr; | |
d91d322a | 35 | add_addr |= BIT(MPTCP_ADD_ADDR_ECHO); |
18fc1a92 YL |
36 | } else { |
37 | msk->pm.local = *addr; | |
38 | add_addr |= BIT(MPTCP_ADD_ADDR_SIGNAL); | |
39 | } | |
13ad9f01 | 40 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
926bdeab | 41 | return 0; |
1b1c7a0e PK |
42 | } |
43 | ||
cbde2787 | 44 | int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) |
1b1c7a0e | 45 | { |
13ad9f01 | 46 | u8 rm_addr = READ_ONCE(msk->pm.addr_signal); |
42842a42 | 47 | |
cbde2787 | 48 | pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); |
b6c08380 | 49 | |
42842a42 | 50 | if (rm_addr) { |
45b1a122 PA |
51 | MPTCP_ADD_STATS(sock_net((struct sock *)msk), |
52 | MPTCP_MIB_RMADDRTXDROP, rm_list->nr); | |
42842a42 GT |
53 | return -EINVAL; |
54 | } | |
55 | ||
cbde2787 | 56 | msk->pm.rm_list_tx = *rm_list; |
42842a42 | 57 | rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); |
13ad9f01 | 58 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
b46a0238 | 59 | mptcp_pm_nl_addr_send_ack(msk); |
b6c08380 | 60 | return 0; |
1b1c7a0e PK |
61 | } |
62 | ||
ddd14bb8 | 63 | int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) |
1b1c7a0e | 64 | { |
ddd14bb8 | 65 | pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); |
0ee4261a GT |
66 | |
67 | spin_lock_bh(&msk->pm.lock); | |
ddd14bb8 | 68 | mptcp_pm_nl_rm_subflow_received(msk, rm_list); |
0ee4261a GT |
69 | spin_unlock_bh(&msk->pm.lock); |
70 | return 0; | |
1b1c7a0e PK |
71 | } |
72 | ||
73 | /* path manager event handlers */ | |
74 | ||
6c714f1b | 75 | void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side) |
1b1c7a0e PK |
76 | { |
77 | struct mptcp_pm_data *pm = &msk->pm; | |
78 | ||
b9f45543 | 79 | pr_debug("msk=%p, token=%u side=%d", msk, READ_ONCE(msk->token), server_side); |
1b1c7a0e PK |
80 | |
81 | WRITE_ONCE(pm->server_side, server_side); | |
b911c97c | 82 | mptcp_event(MPTCP_EVENT_CREATED, msk, ssk, GFP_ATOMIC); |
1b1c7a0e PK |
83 | } |
84 | ||
85 | bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) | |
86 | { | |
926bdeab | 87 | struct mptcp_pm_data *pm = &msk->pm; |
a914e586 | 88 | unsigned int subflows_max; |
f58f065a | 89 | int ret = 0; |
926bdeab | 90 | |
77e4b94a GT |
91 | if (mptcp_pm_is_userspace(msk)) { |
92 | if (mptcp_userspace_pm_active(msk)) { | |
93 | spin_lock_bh(&pm->lock); | |
94 | pm->subflows++; | |
95 | spin_unlock_bh(&pm->lock); | |
96 | return true; | |
97 | } | |
98 | return false; | |
99 | } | |
4d25247d | 100 | |
a914e586 GT |
101 | subflows_max = mptcp_pm_get_subflows_max(msk); |
102 | ||
926bdeab | 103 | pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, |
a914e586 | 104 | subflows_max, READ_ONCE(pm->accept_subflow)); |
926bdeab PK |
105 | |
106 | /* try to avoid acquiring the lock below */ | |
107 | if (!READ_ONCE(pm->accept_subflow)) | |
108 | return false; | |
109 | ||
110 | spin_lock_bh(&pm->lock); | |
f58f065a | 111 | if (READ_ONCE(pm->accept_subflow)) { |
a914e586 GT |
112 | ret = pm->subflows < subflows_max; |
113 | if (ret && ++pm->subflows == subflows_max) | |
f58f065a GT |
114 | WRITE_ONCE(pm->accept_subflow, false); |
115 | } | |
926bdeab PK |
116 | spin_unlock_bh(&pm->lock); |
117 | ||
118 | return ret; | |
119 | } | |
120 | ||
121 | /* return true if the new status bit is currently cleared, that is, this event | |
122 | * can be server, eventually by an already scheduled work | |
123 | */ | |
124 | static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, | |
125 | enum mptcp_pm_status new_status) | |
126 | { | |
127 | pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, | |
128 | BIT(new_status)); | |
129 | if (msk->pm.status & BIT(new_status)) | |
130 | return false; | |
131 | ||
132 | msk->pm.status |= BIT(new_status); | |
ba8f48f7 | 133 | mptcp_schedule_work((struct sock *)msk); |
926bdeab | 134 | return true; |
1b1c7a0e PK |
135 | } |
136 | ||
7a486c44 | 137 | void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk) |
1b1c7a0e | 138 | { |
926bdeab | 139 | struct mptcp_pm_data *pm = &msk->pm; |
b911c97c | 140 | bool announce = false; |
926bdeab | 141 | |
1b1c7a0e | 142 | pr_debug("msk=%p", msk); |
926bdeab | 143 | |
926bdeab PK |
144 | spin_lock_bh(&pm->lock); |
145 | ||
5b950ff4 PA |
146 | /* mptcp_pm_fully_established() can be invoked by multiple |
147 | * racing paths - accept() and check_fully_established() | |
148 | * be sure to serve this event only once. | |
149 | */ | |
150 | if (READ_ONCE(pm->work_pending) && | |
151 | !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) | |
926bdeab PK |
152 | mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); |
153 | ||
b911c97c FW |
154 | if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) |
155 | announce = true; | |
156 | ||
157 | msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); | |
926bdeab | 158 | spin_unlock_bh(&pm->lock); |
b911c97c FW |
159 | |
160 | if (announce) | |
7a486c44 | 161 | mptcp_event(MPTCP_EVENT_ESTABLISHED, msk, ssk, GFP_ATOMIC); |
1b1c7a0e PK |
162 | } |
163 | ||
164 | void mptcp_pm_connection_closed(struct mptcp_sock *msk) | |
165 | { | |
166 | pr_debug("msk=%p", msk); | |
167 | } | |
168 | ||
62535200 | 169 | void mptcp_pm_subflow_established(struct mptcp_sock *msk) |
1b1c7a0e | 170 | { |
926bdeab PK |
171 | struct mptcp_pm_data *pm = &msk->pm; |
172 | ||
1b1c7a0e | 173 | pr_debug("msk=%p", msk); |
926bdeab PK |
174 | |
175 | if (!READ_ONCE(pm->work_pending)) | |
176 | return; | |
177 | ||
178 | spin_lock_bh(&pm->lock); | |
179 | ||
180 | if (READ_ONCE(pm->work_pending)) | |
181 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); | |
182 | ||
183 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
184 | } |
185 | ||
74cbb0c6 | 186 | void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, |
a88c9e49 | 187 | const struct mptcp_subflow_context *subflow) |
1b1c7a0e | 188 | { |
a88c9e49 PA |
189 | struct mptcp_pm_data *pm = &msk->pm; |
190 | bool update_subflows; | |
191 | ||
77e4b94a GT |
192 | update_subflows = subflow->request_join || subflow->mp_join; |
193 | if (mptcp_pm_is_userspace(msk)) { | |
194 | if (update_subflows) { | |
195 | spin_lock_bh(&pm->lock); | |
196 | pm->subflows--; | |
197 | spin_unlock_bh(&pm->lock); | |
198 | } | |
199 | return; | |
200 | } | |
201 | ||
a88c9e49 PA |
202 | if (!READ_ONCE(pm->work_pending) && !update_subflows) |
203 | return; | |
204 | ||
205 | spin_lock_bh(&pm->lock); | |
206 | if (update_subflows) | |
95d68651 | 207 | __mptcp_pm_close_subflow(msk); |
a88c9e49 PA |
208 | |
209 | /* Even if this subflow is not really established, tell the PM to try | |
210 | * to pick the next ones, if possible. | |
211 | */ | |
212 | if (mptcp_pm_nl_check_work_pending(msk)) | |
213 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); | |
214 | ||
215 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
216 | } |
217 | ||
d1ace2d9 | 218 | void mptcp_pm_add_addr_received(const struct sock *ssk, |
1b1c7a0e PK |
219 | const struct mptcp_addr_info *addr) |
220 | { | |
d1ace2d9 KM |
221 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
222 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); | |
926bdeab PK |
223 | struct mptcp_pm_data *pm = &msk->pm; |
224 | ||
225 | pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, | |
226 | READ_ONCE(pm->accept_addr)); | |
227 | ||
d1ace2d9 | 228 | mptcp_event_addr_announced(ssk, addr); |
b911c97c | 229 | |
926bdeab PK |
230 | spin_lock_bh(&pm->lock); |
231 | ||
4d25247d KM |
232 | if (mptcp_pm_is_userspace(msk)) { |
233 | if (mptcp_userspace_pm_active(msk)) { | |
234 | mptcp_pm_announce_addr(msk, addr, true); | |
235 | mptcp_pm_add_addr_send_ack(msk); | |
236 | } else { | |
237 | __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); | |
238 | } | |
239 | } else if (!READ_ONCE(pm->accept_addr)) { | |
f7efc777 | 240 | mptcp_pm_announce_addr(msk, addr, true); |
84dfe367 GT |
241 | mptcp_pm_add_addr_send_ack(msk); |
242 | } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) { | |
926bdeab | 243 | pm->remote = *addr; |
f73c1194 PA |
244 | } else { |
245 | __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); | |
84dfe367 | 246 | } |
926bdeab PK |
247 | |
248 | spin_unlock_bh(&pm->lock); | |
84dfe367 GT |
249 | } |
250 | ||
557963c3 | 251 | void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, |
90d93088 | 252 | const struct mptcp_addr_info *addr) |
557963c3 GT |
253 | { |
254 | struct mptcp_pm_data *pm = &msk->pm; | |
255 | ||
256 | pr_debug("msk=%p", msk); | |
257 | ||
258 | spin_lock_bh(&pm->lock); | |
259 | ||
260 | if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) | |
261 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); | |
262 | ||
263 | spin_unlock_bh(&pm->lock); | |
264 | } | |
265 | ||
84dfe367 GT |
266 | void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) |
267 | { | |
b5a7acd3 | 268 | if (!mptcp_pm_should_add_signal(msk)) |
84dfe367 GT |
269 | return; |
270 | ||
271 | mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); | |
1b1c7a0e PK |
272 | } |
273 | ||
5c4a824d GT |
274 | void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, |
275 | const struct mptcp_rm_list *rm_list) | |
d0876b22 GT |
276 | { |
277 | struct mptcp_pm_data *pm = &msk->pm; | |
5c4a824d | 278 | u8 i; |
d0876b22 | 279 | |
5c4a824d | 280 | pr_debug("msk=%p remote_ids_nr=%d", msk, rm_list->nr); |
d0876b22 | 281 | |
5c4a824d GT |
282 | for (i = 0; i < rm_list->nr; i++) |
283 | mptcp_event_addr_removed(msk, rm_list->ids[i]); | |
b911c97c | 284 | |
d0876b22 | 285 | spin_lock_bh(&pm->lock); |
f73c1194 PA |
286 | if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED)) |
287 | pm->rm_list_rx = *rm_list; | |
288 | else | |
289 | __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP); | |
d0876b22 GT |
290 | spin_unlock_bh(&pm->lock); |
291 | } | |
292 | ||
43f5b111 | 293 | void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup) |
40453a5c | 294 | { |
43f5b111 PA |
295 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
296 | struct sock *sk = subflow->conn; | |
297 | struct mptcp_sock *msk; | |
40453a5c GT |
298 | |
299 | pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); | |
43f5b111 | 300 | msk = mptcp_sk(sk); |
ebc1e08f | 301 | if (subflow->backup != bkup) |
43f5b111 | 302 | subflow->backup = bkup; |
b911c97c | 303 | |
43f5b111 | 304 | mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC); |
40453a5c GT |
305 | } |
306 | ||
5580d41b GT |
307 | void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) |
308 | { | |
1e39e5a3 GT |
309 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); |
310 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); | |
311 | ||
5580d41b | 312 | pr_debug("fail_seq=%llu", fail_seq); |
1e39e5a3 | 313 | |
7b16871f | 314 | if (!READ_ONCE(msk->allow_infinite_fallback)) |
9c81be0d GT |
315 | return; |
316 | ||
76a13b31 | 317 | if (!subflow->fail_tout) { |
9c81be0d GT |
318 | pr_debug("send MP_FAIL response and infinite map"); |
319 | ||
320 | subflow->send_mp_fail = 1; | |
1e39e5a3 | 321 | subflow->send_infinite_map = 1; |
76a13b31 GT |
322 | tcp_send_ack(sk); |
323 | } else { | |
49fa1919 | 324 | pr_debug("MP_FAIL response received"); |
76a13b31 | 325 | WRITE_ONCE(subflow->fail_tout, 0); |
9c81be0d | 326 | } |
5580d41b GT |
327 | } |
328 | ||
1b1c7a0e PK |
329 | /* path manager helpers */ |
330 | ||
90d93088 | 331 | bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb, |
1f5e9e2f | 332 | unsigned int opt_size, unsigned int remaining, |
f462a446 | 333 | struct mptcp_addr_info *addr, bool *echo, |
af7939f3 | 334 | bool *drop_other_suboptions) |
1b1c7a0e | 335 | { |
926bdeab | 336 | int ret = false; |
119c0220 | 337 | u8 add_addr; |
f462a446 | 338 | u8 family; |
af7939f3 | 339 | bool port; |
926bdeab PK |
340 | |
341 | spin_lock_bh(&msk->pm.lock); | |
342 | ||
343 | /* double check after the lock is acquired */ | |
f643b803 | 344 | if (!mptcp_pm_should_add_signal(msk)) |
926bdeab PK |
345 | goto out_unlock; |
346 | ||
1f5e9e2f YL |
347 | /* always drop every other options for pure ack ADD_ADDR; this is a |
348 | * plain dup-ack from TCP perspective. The other MPTCP-relevant info, | |
349 | * if any, will be carried by the 'original' TCP ack | |
350 | */ | |
351 | if (skb && skb_is_tcp_pure_ack(skb)) { | |
352 | remaining += opt_size; | |
353 | *drop_other_suboptions = true; | |
354 | } | |
355 | ||
d91d322a | 356 | *echo = mptcp_pm_should_add_signal_echo(msk); |
af7939f3 | 357 | port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port); |
456afe01 | 358 | |
f462a446 | 359 | family = *echo ? msk->pm.remote.family : msk->pm.local.family; |
af7939f3 | 360 | if (remaining < mptcp_add_addr_len(family, *echo, port)) |
926bdeab PK |
361 | goto out_unlock; |
362 | ||
f462a446 YL |
363 | if (*echo) { |
364 | *addr = msk->pm.remote; | |
119c0220 | 365 | add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_ECHO); |
f462a446 YL |
366 | } else { |
367 | *addr = msk->pm.local; | |
119c0220 | 368 | add_addr = msk->pm.addr_signal & ~BIT(MPTCP_ADD_ADDR_SIGNAL); |
f462a446 | 369 | } |
119c0220 | 370 | WRITE_ONCE(msk->pm.addr_signal, add_addr); |
926bdeab PK |
371 | ret = true; |
372 | ||
373 | out_unlock: | |
374 | spin_unlock_bh(&msk->pm.lock); | |
375 | return ret; | |
1b1c7a0e PK |
376 | } |
377 | ||
5cb104ae | 378 | bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, |
6445e17a | 379 | struct mptcp_rm_list *rm_list) |
5cb104ae | 380 | { |
cbde2787 | 381 | int ret = false, len; |
119c0220 | 382 | u8 rm_addr; |
5cb104ae GT |
383 | |
384 | spin_lock_bh(&msk->pm.lock); | |
385 | ||
386 | /* double check after the lock is acquired */ | |
387 | if (!mptcp_pm_should_rm_signal(msk)) | |
388 | goto out_unlock; | |
389 | ||
119c0220 | 390 | rm_addr = msk->pm.addr_signal & ~BIT(MPTCP_RM_ADDR_SIGNAL); |
cbde2787 GT |
391 | len = mptcp_rm_addr_len(&msk->pm.rm_list_tx); |
392 | if (len < 0) { | |
119c0220 | 393 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
cbde2787 GT |
394 | goto out_unlock; |
395 | } | |
396 | if (remaining < len) | |
5cb104ae GT |
397 | goto out_unlock; |
398 | ||
cbde2787 | 399 | *rm_list = msk->pm.rm_list_tx; |
119c0220 | 400 | WRITE_ONCE(msk->pm.addr_signal, rm_addr); |
5cb104ae GT |
401 | ret = true; |
402 | ||
403 | out_unlock: | |
404 | spin_unlock_bh(&msk->pm.lock); | |
405 | return ret; | |
406 | } | |
407 | ||
1b1c7a0e PK |
408 | int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) |
409 | { | |
9bbec87e GT |
410 | struct mptcp_addr_info skc_local; |
411 | struct mptcp_addr_info msk_local; | |
412 | ||
413 | if (WARN_ON_ONCE(!msk)) | |
414 | return -1; | |
415 | ||
416 | /* The 0 ID mapping is defined by the first subflow, copied into the msk | |
417 | * addr | |
418 | */ | |
419 | mptcp_local_address((struct sock_common *)msk, &msk_local); | |
420 | mptcp_local_address((struct sock_common *)skc, &skc_local); | |
421 | if (mptcp_addresses_equal(&msk_local, &skc_local, false)) | |
422 | return 0; | |
423 | ||
424 | if (mptcp_pm_is_userspace(msk)) | |
425 | return mptcp_userspace_pm_get_local_id(msk, &skc_local); | |
426 | return mptcp_pm_nl_get_local_id(msk, &skc_local); | |
1b1c7a0e PK |
427 | } |
428 | ||
f40be0db GT |
429 | int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, |
430 | u8 *flags, int *ifindex) | |
431 | { | |
432 | *flags = 0; | |
433 | *ifindex = 0; | |
434 | ||
435 | if (!id) | |
436 | return 0; | |
437 | ||
438 | if (mptcp_pm_is_userspace(msk)) | |
439 | return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); | |
440 | return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); | |
441 | } | |
442 | ||
564ae679 GT |
443 | int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info) |
444 | { | |
445 | if (info->attrs[MPTCP_PM_ATTR_TOKEN]) | |
446 | return mptcp_userspace_pm_get_addr(skb, info); | |
447 | return mptcp_pm_nl_get_addr(skb, info); | |
448 | } | |
449 | ||
9ae7846c GT |
450 | int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb) |
451 | { | |
452 | const struct genl_info *info = genl_info_dump(cb); | |
453 | ||
454 | if (info->attrs[MPTCP_PM_ATTR_TOKEN]) | |
455 | return mptcp_userspace_pm_dump_addr(msg, cb); | |
456 | return mptcp_pm_nl_dump_addr(msg, cb); | |
457 | } | |
458 | ||
6a42477f | 459 | int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info) |
6ba7ce89 | 460 | { |
6a42477f GT |
461 | if (info->attrs[MPTCP_PM_ATTR_TOKEN]) |
462 | return mptcp_userspace_pm_set_flags(skb, info); | |
463 | return mptcp_pm_nl_set_flags(skb, info); | |
6ba7ce89 GT |
464 | } |
465 | ||
71b7dec2 PA |
466 | void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) |
467 | { | |
468 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
469 | u32 rcv_tstamp = READ_ONCE(tcp_sk(ssk)->rcv_tstamp); | |
470 | ||
471 | /* keep track of rtx periods with no progress */ | |
472 | if (!subflow->stale_count) { | |
473 | subflow->stale_rcv_tstamp = rcv_tstamp; | |
474 | subflow->stale_count++; | |
475 | } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { | |
476 | if (subflow->stale_count < U8_MAX) | |
477 | subflow->stale_count++; | |
ff5a0b42 | 478 | mptcp_pm_nl_subflow_chk_stale(msk, ssk); |
71b7dec2 PA |
479 | } else { |
480 | subflow->stale_count = 0; | |
ff5a0b42 | 481 | mptcp_subflow_set_active(subflow); |
71b7dec2 PA |
482 | } |
483 | } | |
484 | ||
fb00ee4f MB |
485 | /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses, |
486 | * otherwise allow any matching local/remote pair | |
487 | */ | |
488 | bool mptcp_pm_addr_families_match(const struct sock *sk, | |
489 | const struct mptcp_addr_info *loc, | |
490 | const struct mptcp_addr_info *rem) | |
491 | { | |
492 | bool mptcp_is_v4 = sk->sk_family == AF_INET; | |
493 | ||
494 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
495 | bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6); | |
496 | bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6); | |
497 | ||
498 | if (mptcp_is_v4) | |
499 | return loc_is_v4 && rem_is_v4; | |
500 | ||
501 | if (ipv6_only_sock(sk)) | |
502 | return !loc_is_v4 && !rem_is_v4; | |
503 | ||
504 | return loc_is_v4 == rem_is_v4; | |
505 | #else | |
506 | return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET; | |
507 | #endif | |
508 | } | |
509 | ||
b29fcfb5 | 510 | void mptcp_pm_data_reset(struct mptcp_sock *msk) |
1b1c7a0e | 511 | { |
6bb63ccc | 512 | u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk)); |
9273b9d5 | 513 | struct mptcp_pm_data *pm = &msk->pm; |
1b1c7a0e | 514 | |
9273b9d5 MM |
515 | pm->add_addr_signaled = 0; |
516 | pm->add_addr_accepted = 0; | |
517 | pm->local_addr_used = 0; | |
518 | pm->subflows = 0; | |
519 | pm->rm_list_tx.nr = 0; | |
520 | pm->rm_list_rx.nr = 0; | |
6bb63ccc MM |
521 | WRITE_ONCE(pm->pm_type, pm_type); |
522 | ||
523 | if (pm_type == MPTCP_PM_TYPE_KERNEL) { | |
524 | bool subflows_allowed = !!mptcp_pm_get_subflows_max(msk); | |
525 | ||
526 | /* pm->work_pending must be only be set to 'true' when | |
527 | * pm->pm_type is set to MPTCP_PM_TYPE_KERNEL | |
528 | */ | |
529 | WRITE_ONCE(pm->work_pending, | |
530 | (!!mptcp_pm_get_local_addr_max(msk) && | |
531 | subflows_allowed) || | |
532 | !!mptcp_pm_get_add_addr_signal_max(msk)); | |
533 | WRITE_ONCE(pm->accept_addr, | |
534 | !!mptcp_pm_get_add_addr_accept_max(msk) && | |
535 | subflows_allowed); | |
536 | WRITE_ONCE(pm->accept_subflow, subflows_allowed); | |
537 | } else { | |
538 | WRITE_ONCE(pm->work_pending, 0); | |
539 | WRITE_ONCE(pm->accept_addr, 0); | |
540 | WRITE_ONCE(pm->accept_subflow, 0); | |
541 | } | |
542 | ||
9273b9d5 | 543 | WRITE_ONCE(pm->addr_signal, 0); |
9273b9d5 MM |
544 | WRITE_ONCE(pm->remote_deny_join_id0, false); |
545 | pm->status = 0; | |
546 | bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); | |
b29fcfb5 PA |
547 | } |
548 | ||
549 | void mptcp_pm_data_init(struct mptcp_sock *msk) | |
550 | { | |
1b1c7a0e | 551 | spin_lock_init(&msk->pm.lock); |
b6c08380 | 552 | INIT_LIST_HEAD(&msk->pm.anno_list); |
4638de5a | 553 | INIT_LIST_HEAD(&msk->pm.userspace_pm_local_addr_list); |
b29fcfb5 | 554 | mptcp_pm_data_reset(msk); |
1b1c7a0e PK |
555 | } |
556 | ||
d39dceca | 557 | void __init mptcp_pm_init(void) |
1b1c7a0e | 558 | { |
01cacb00 | 559 | mptcp_pm_nl_init(); |
1b1c7a0e | 560 | } |