selftests/bpf: Fix some bugs in map_lookup_percpu_elem testcase
[linux-block.git] / net / mptcp / pm_netlink.c
CommitLineData
01cacb00
PA
1// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2020, Red Hat, Inc.
5 */
6
c85adced
GT
7#define pr_fmt(fmt) "MPTCP: " fmt
8
01cacb00
PA
9#include <linux/inet.h>
10#include <linux/kernel.h>
11#include <net/tcp.h>
12#include <net/netns/generic.h>
13#include <net/mptcp.h>
14#include <net/genetlink.h>
15#include <uapi/linux/mptcp.h>
16
17#include "protocol.h"
7a7e52e3 18#include "mib.h"
01cacb00
PA
19
20/* forward declaration */
21static struct genl_family mptcp_genl_family;
22
23static int pm_nl_pernet_id;
24
25struct mptcp_pm_addr_entry {
26 struct list_head list;
01cacb00 27 struct mptcp_addr_info addr;
daa83ab0
GT
28 u8 flags;
29 int ifindex;
1729cf18 30 struct socket *lsk;
01cacb00
PA
31};
32
0abd40f8
GT
33struct mptcp_pm_add_entry {
34 struct list_head list;
35 struct mptcp_addr_info addr;
00cfd77b
GT
36 struct timer_list add_timer;
37 struct mptcp_sock *sock;
38 u8 retrans_times;
0abd40f8
GT
39};
40
01cacb00
PA
41struct pm_nl_pernet {
42 /* protects pernet updates */
43 spinlock_t lock;
44 struct list_head local_addr_list;
45 unsigned int addrs;
ff5a0b42 46 unsigned int stale_loss_cnt;
01cacb00
PA
47 unsigned int add_addr_signal_max;
48 unsigned int add_addr_accept_max;
49 unsigned int local_addr_max;
50 unsigned int subflows_max;
51 unsigned int next_id;
86e39e04 52 DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
01cacb00
PA
53};
54
55#define MPTCP_PM_ADDR_MAX 8
00cfd77b 56#define ADD_ADDR_RETRANS_MAX 3
01cacb00 57
c682bf53
GT
58static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net)
59{
60 return net_generic(net, pm_nl_pernet_id);
61}
62
63static struct pm_nl_pernet *
64pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk)
65{
66 return pm_nl_get_pernet(sock_net((struct sock *)msk));
67}
68
01cacb00 69static bool addresses_equal(const struct mptcp_addr_info *a,
86e39e04 70 const struct mptcp_addr_info *b, bool use_port)
01cacb00
PA
71{
72 bool addr_equals = false;
73
7b9b0f7e
MB
74 if (a->family == b->family) {
75 if (a->family == AF_INET)
76 addr_equals = a->addr.s_addr == b->addr.s_addr;
01cacb00 77#if IS_ENABLED(CONFIG_MPTCP_IPV6)
7b9b0f7e
MB
78 else
79 addr_equals = !ipv6_addr_cmp(&a->addr6, &b->addr6);
80 } else if (a->family == AF_INET) {
81 if (ipv6_addr_v4mapped(&b->addr6))
82 addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3];
83 } else if (b->family == AF_INET) {
84 if (ipv6_addr_v4mapped(&a->addr6))
85 addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr;
01cacb00 86#endif
7b9b0f7e 87 }
01cacb00
PA
88
89 if (!addr_equals)
90 return false;
91 if (!use_port)
92 return true;
93
94 return a->port == b->port;
95}
96
97static void local_address(const struct sock_common *skc,
98 struct mptcp_addr_info *addr)
99{
01cacb00 100 addr->family = skc->skc_family;
5bc56388 101 addr->port = htons(skc->skc_num);
01cacb00
PA
102 if (addr->family == AF_INET)
103 addr->addr.s_addr = skc->skc_rcv_saddr;
104#if IS_ENABLED(CONFIG_MPTCP_IPV6)
105 else if (addr->family == AF_INET6)
106 addr->addr6 = skc->skc_v6_rcv_saddr;
107#endif
108}
109
110static void remote_address(const struct sock_common *skc,
111 struct mptcp_addr_info *addr)
112{
113 addr->family = skc->skc_family;
114 addr->port = skc->skc_dport;
115 if (addr->family == AF_INET)
116 addr->addr.s_addr = skc->skc_daddr;
117#if IS_ENABLED(CONFIG_MPTCP_IPV6)
118 else if (addr->family == AF_INET6)
119 addr->addr6 = skc->skc_v6_daddr;
120#endif
121}
122
123static bool lookup_subflow_by_saddr(const struct list_head *list,
90d93088 124 const struct mptcp_addr_info *saddr)
01cacb00
PA
125{
126 struct mptcp_subflow_context *subflow;
127 struct mptcp_addr_info cur;
128 struct sock_common *skc;
129
130 list_for_each_entry(subflow, list, node) {
131 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
132
133 local_address(skc, &cur);
60b57bf7 134 if (addresses_equal(&cur, saddr, saddr->port))
01cacb00
PA
135 return true;
136 }
137
138 return false;
139}
140
d84ad049 141static bool lookup_subflow_by_daddr(const struct list_head *list,
90d93088 142 const struct mptcp_addr_info *daddr)
d84ad049
GT
143{
144 struct mptcp_subflow_context *subflow;
145 struct mptcp_addr_info cur;
146 struct sock_common *skc;
147
148 list_for_each_entry(subflow, list, node) {
149 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
150
151 remote_address(skc, &cur);
152 if (addresses_equal(&cur, daddr, daddr->port))
153 return true;
154 }
155
156 return false;
157}
158
01cacb00
PA
159static struct mptcp_pm_addr_entry *
160select_local_address(const struct pm_nl_pernet *pernet,
90d93088 161 const struct mptcp_sock *msk)
01cacb00 162{
90d93088 163 const struct sock *sk = (const struct sock *)msk;
01cacb00
PA
164 struct mptcp_pm_addr_entry *entry, *ret = NULL;
165
3abc05d9
FW
166 msk_owned_by_me(msk);
167
01cacb00 168 rcu_read_lock();
01cacb00 169 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
daa83ab0 170 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
01cacb00
PA
171 continue;
172
86e39e04
PA
173 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
174 continue;
175
7b9b0f7e
MB
176 if (entry->addr.family != sk->sk_family) {
177#if IS_ENABLED(CONFIG_MPTCP_IPV6)
178 if ((entry->addr.family == AF_INET &&
179 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ||
180 (sk->sk_family == AF_INET &&
181 !ipv6_addr_v4mapped(&entry->addr.addr6)))
182#endif
183 continue;
184 }
185
86e39e04
PA
186 ret = entry;
187 break;
01cacb00 188 }
01cacb00
PA
189 rcu_read_unlock();
190 return ret;
191}
192
193static struct mptcp_pm_addr_entry *
90d93088 194select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
01cacb00
PA
195{
196 struct mptcp_pm_addr_entry *entry, *ret = NULL;
01cacb00
PA
197
198 rcu_read_lock();
199 /* do not keep any additional per socket state, just signal
200 * the address list in order.
201 * Note: removal from the local address list during the msk life-cycle
202 * can lead to additional addresses not being announced.
203 */
204 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
86e39e04
PA
205 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
206 continue;
207
daa83ab0 208 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
01cacb00 209 continue;
86e39e04
PA
210
211 ret = entry;
212 break;
01cacb00
PA
213 }
214 rcu_read_unlock();
215 return ret;
216}
217
90d93088 218unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
a914e586 219{
c682bf53 220 const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
a914e586 221
a914e586
GT
222 return READ_ONCE(pernet->add_addr_signal_max);
223}
224EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max);
225
90d93088 226unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
a914e586 227{
c682bf53 228 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
a914e586 229
a914e586
GT
230 return READ_ONCE(pernet->add_addr_accept_max);
231}
232EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max);
233
90d93088 234unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
a914e586 235{
c682bf53 236 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
a914e586 237
a914e586
GT
238 return READ_ONCE(pernet->subflows_max);
239}
240EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max);
241
90d93088 242unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
a914e586 243{
c682bf53 244 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
a914e586 245
a914e586
GT
246 return READ_ONCE(pernet->local_addr_max);
247}
0caf3ada 248EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max);
a914e586 249
a88c9e49 250bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk)
01cacb00 251{
c682bf53 252 struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
86e39e04
PA
253
254 if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
255 (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
a88c9e49 256 MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) {
01cacb00 257 WRITE_ONCE(msk->pm.work_pending, false);
a88c9e49
PA
258 return false;
259 }
260 return true;
01cacb00
PA
261}
262
d88c476f 263struct mptcp_pm_add_entry *
90d93088
PA
264mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
265 const struct mptcp_addr_info *addr)
b6c08380 266{
0abd40f8 267 struct mptcp_pm_add_entry *entry;
b6c08380 268
3abc05d9
FW
269 lockdep_assert_held(&msk->pm.lock);
270
b6c08380 271 list_for_each_entry(entry, &msk->pm.anno_list, list) {
60b57bf7 272 if (addresses_equal(&entry->addr, addr, true))
00cfd77b 273 return entry;
b6c08380
GT
274 }
275
00cfd77b
GT
276 return NULL;
277}
278
5bc56388
GT
279bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
280{
281 struct mptcp_pm_add_entry *entry;
282 struct mptcp_addr_info saddr;
283 bool ret = false;
284
285 local_address((struct sock_common *)sk, &saddr);
286
287 spin_lock_bh(&msk->pm.lock);
288 list_for_each_entry(entry, &msk->pm.anno_list, list) {
289 if (addresses_equal(&entry->addr, &saddr, true)) {
290 ret = true;
291 goto out;
292 }
293 }
294
295out:
296 spin_unlock_bh(&msk->pm.lock);
297 return ret;
298}
299
00cfd77b
GT
300static void mptcp_pm_add_timer(struct timer_list *timer)
301{
302 struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer);
303 struct mptcp_sock *msk = entry->sock;
304 struct sock *sk = (struct sock *)msk;
305
306 pr_debug("msk=%p", msk);
307
308 if (!msk)
309 return;
310
311 if (inet_sk_state_load(sk) == TCP_CLOSE)
312 return;
313
314 if (!entry->addr.id)
315 return;
316
18fc1a92 317 if (mptcp_pm_should_add_signal_addr(msk)) {
00cfd77b
GT
318 sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8);
319 goto out;
320 }
321
322 spin_lock_bh(&msk->pm.lock);
323
18fc1a92 324 if (!mptcp_pm_should_add_signal_addr(msk)) {
00cfd77b 325 pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
f7efc777 326 mptcp_pm_announce_addr(msk, &entry->addr, false);
84dfe367 327 mptcp_pm_add_addr_send_ack(msk);
00cfd77b
GT
328 entry->retrans_times++;
329 }
330
331 if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
93f323b9 332 sk_reset_timer(sk, timer,
724d06b4 333 jiffies + mptcp_get_add_addr_timeout(sock_net(sk)));
00cfd77b
GT
334
335 spin_unlock_bh(&msk->pm.lock);
336
348d5c1d
GT
337 if (entry->retrans_times == ADD_ADDR_RETRANS_MAX)
338 mptcp_pm_subflow_established(msk);
339
00cfd77b
GT
340out:
341 __sock_put(sk);
342}
343
344struct mptcp_pm_add_entry *
345mptcp_pm_del_add_timer(struct mptcp_sock *msk,
90d93088 346 const struct mptcp_addr_info *addr, bool check_id)
00cfd77b
GT
347{
348 struct mptcp_pm_add_entry *entry;
349 struct sock *sk = (struct sock *)msk;
350
351 spin_lock_bh(&msk->pm.lock);
d88c476f 352 entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
d58300c3 353 if (entry && (!check_id || entry->addr.id == addr->id))
00cfd77b
GT
354 entry->retrans_times = ADD_ADDR_RETRANS_MAX;
355 spin_unlock_bh(&msk->pm.lock);
356
d58300c3 357 if (entry && (!check_id || entry->addr.id == addr->id))
00cfd77b
GT
358 sk_stop_timer_sync(sk, &entry->add_timer);
359
360 return entry;
b6c08380
GT
361}
362
363static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
90d93088 364 const struct mptcp_pm_addr_entry *entry)
b6c08380 365{
0abd40f8 366 struct mptcp_pm_add_entry *add_entry = NULL;
00cfd77b 367 struct sock *sk = (struct sock *)msk;
93f323b9 368 struct net *net = sock_net(sk);
b6c08380 369
3abc05d9
FW
370 lockdep_assert_held(&msk->pm.lock);
371
d88c476f 372 if (mptcp_lookup_anno_list_by_saddr(msk, &entry->addr))
b6c08380
GT
373 return false;
374
0abd40f8
GT
375 add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
376 if (!add_entry)
b6c08380
GT
377 return false;
378
0abd40f8 379 list_add(&add_entry->list, &msk->pm.anno_list);
b6c08380 380
00cfd77b
GT
381 add_entry->addr = entry->addr;
382 add_entry->sock = msk;
383 add_entry->retrans_times = 0;
384
385 timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
93f323b9
GT
386 sk_reset_timer(sk, &add_entry->add_timer,
387 jiffies + mptcp_get_add_addr_timeout(net));
00cfd77b 388
b6c08380
GT
389 return true;
390}
391
392void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
393{
0abd40f8 394 struct mptcp_pm_add_entry *entry, *tmp;
00cfd77b
GT
395 struct sock *sk = (struct sock *)msk;
396 LIST_HEAD(free_list);
b6c08380
GT
397
398 pr_debug("msk=%p", msk);
399
400 spin_lock_bh(&msk->pm.lock);
00cfd77b
GT
401 list_splice_init(&msk->pm.anno_list, &free_list);
402 spin_unlock_bh(&msk->pm.lock);
403
404 list_for_each_entry_safe(entry, tmp, &free_list, list) {
405 sk_stop_timer_sync(sk, &entry->add_timer);
b6c08380
GT
406 kfree(entry);
407 }
b6c08380
GT
408}
409
90d93088
PA
410static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned int nr,
411 const struct mptcp_addr_info *addr)
2843ff6f
GT
412{
413 int i;
414
415 for (i = 0; i < nr; i++) {
416 if (addresses_equal(&addrs[i], addr, addr->port))
417 return true;
418 }
419
420 return false;
421}
422
423/* Fill all the remote addresses into the array addrs[],
424 * and return the array size.
425 */
426static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullmesh,
427 struct mptcp_addr_info *addrs)
428{
a88c9e49 429 bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
2843ff6f
GT
430 struct sock *sk = (struct sock *)msk, *ssk;
431 struct mptcp_subflow_context *subflow;
432 struct mptcp_addr_info remote = { 0 };
433 unsigned int subflows_max;
434 int i = 0;
435
436 subflows_max = mptcp_pm_get_subflows_max(msk);
a88c9e49 437 remote_address((struct sock_common *)sk, &remote);
2843ff6f
GT
438
439 /* Non-fullmesh endpoint, fill in the single entry
440 * corresponding to the primary MPC subflow remote address
441 */
442 if (!fullmesh) {
a88c9e49
PA
443 if (deny_id0)
444 return 0;
445
2843ff6f
GT
446 msk->pm.subflows++;
447 addrs[i++] = remote;
448 } else {
449 mptcp_for_each_subflow(msk, subflow) {
450 ssk = mptcp_subflow_tcp_sock(subflow);
a88c9e49
PA
451 remote_address((struct sock_common *)ssk, &addrs[i]);
452 if (deny_id0 && addresses_equal(&addrs[i], &remote, false))
453 continue;
454
455 if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
2843ff6f
GT
456 msk->pm.subflows < subflows_max) {
457 msk->pm.subflows++;
a88c9e49 458 i++;
2843ff6f
GT
459 }
460 }
461 }
462
463 return i;
464}
465
86e39e04
PA
466static struct mptcp_pm_addr_entry *
467__lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
468{
469 struct mptcp_pm_addr_entry *entry;
470
471 list_for_each_entry(entry, &pernet->local_addr_list, list) {
472 if (entry->addr.id == id)
473 return entry;
474 }
475 return NULL;
476}
477
8e9eacad
PA
478static struct mptcp_pm_addr_entry *
479__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
480 bool lookup_by_id)
481{
482 struct mptcp_pm_addr_entry *entry;
483
484 list_for_each_entry(entry, &pernet->local_addr_list, list) {
485 if ((!lookup_by_id && addresses_equal(&entry->addr, info, true)) ||
486 (lookup_by_id && entry->addr.id == info->id))
487 return entry;
488 }
489 return NULL;
490}
491
86e39e04 492static int
90d93088 493lookup_id_by_addr(const struct pm_nl_pernet *pernet, const struct mptcp_addr_info *addr)
86e39e04 494{
90d93088 495 const struct mptcp_pm_addr_entry *entry;
86e39e04
PA
496 int ret = -1;
497
498 rcu_read_lock();
499 list_for_each_entry(entry, &pernet->local_addr_list, list) {
500 if (addresses_equal(&entry->addr, addr, entry->addr.port)) {
501 ret = entry->addr.id;
502 break;
503 }
504 }
505 rcu_read_unlock();
506 return ret;
507}
508
01cacb00
PA
509static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
510{
511 struct sock *sk = (struct sock *)msk;
512 struct mptcp_pm_addr_entry *local;
a914e586
GT
513 unsigned int add_addr_signal_max;
514 unsigned int local_addr_max;
01cacb00 515 struct pm_nl_pernet *pernet;
a914e586 516 unsigned int subflows_max;
01cacb00 517
c682bf53 518 pernet = pm_nl_get_pernet(sock_net(sk));
01cacb00 519
a914e586
GT
520 add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk);
521 local_addr_max = mptcp_pm_get_local_addr_max(msk);
522 subflows_max = mptcp_pm_get_subflows_max(msk);
523
86e39e04
PA
524 /* do lazy endpoint usage accounting for the MPC subflows */
525 if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
a88c9e49 526 struct mptcp_addr_info mpc_addr;
86e39e04
PA
527 int mpc_id;
528
a88c9e49
PA
529 local_address((struct sock_common *)msk->first, &mpc_addr);
530 mpc_id = lookup_id_by_addr(pernet, &mpc_addr);
531 if (mpc_id >= 0)
86e39e04
PA
532 __clear_bit(mpc_id, msk->pm.id_avail_bitmap);
533
534 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
535 }
536
01cacb00 537 pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
a914e586
GT
538 msk->pm.local_addr_used, local_addr_max,
539 msk->pm.add_addr_signaled, add_addr_signal_max,
540 msk->pm.subflows, subflows_max);
01cacb00
PA
541
542 /* check first for announce */
a914e586 543 if (msk->pm.add_addr_signaled < add_addr_signal_max) {
86e39e04 544 local = select_signal_address(pernet, msk);
01cacb00 545
98247bc1
PA
546 /* due to racing events on both ends we can reach here while
547 * previous add address is still running: if we invoke now
548 * mptcp_pm_announce_addr(), that will fail and the
549 * corresponding id will be marked as used.
550 * Instead let the PM machinery reschedule us when the
551 * current address announce will be completed.
552 */
553 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
554 return;
555
01cacb00 556 if (local) {
b6c08380 557 if (mptcp_pm_alloc_anno_list(msk, local)) {
86e39e04 558 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
b6c08380 559 msk->pm.add_addr_signaled++;
f7efc777 560 mptcp_pm_announce_addr(msk, &local->addr, false);
b46a0238 561 mptcp_pm_nl_addr_send_ack(msk);
b6c08380 562 }
01cacb00 563 }
01cacb00
PA
564 }
565
566 /* check if should create a new subflow */
a88c9e49
PA
567 while (msk->pm.local_addr_used < local_addr_max &&
568 msk->pm.subflows < subflows_max) {
569 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
570 bool fullmesh;
571 int i, nr;
572
01cacb00 573 local = select_local_address(pernet, msk);
a88c9e49
PA
574 if (!local)
575 break;
1f2f1931 576
a88c9e49
PA
577 fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
578
579 msk->pm.local_addr_used++;
580 nr = fill_remote_addresses_vec(msk, fullmesh, addrs);
581 if (nr)
582 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
583 spin_unlock_bh(&msk->pm.lock);
584 for (i = 0; i < nr; i++)
585 __mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
586 spin_lock_bh(&msk->pm.lock);
01cacb00 587 }
a88c9e49 588 mptcp_pm_nl_check_work_pending(msk);
01cacb00
PA
589}
590
e9801430 591static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk)
01cacb00
PA
592{
593 mptcp_pm_create_subflow_or_signal_addr(msk);
594}
595
e9801430 596static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
01cacb00
PA
597{
598 mptcp_pm_create_subflow_or_signal_addr(msk);
599}
600
1a0d6136
GT
601/* Fill all the local addresses into the array addrs[],
602 * and return the array size.
603 */
604static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
605 struct mptcp_addr_info *addrs)
606{
607 struct sock *sk = (struct sock *)msk;
608 struct mptcp_pm_addr_entry *entry;
609 struct mptcp_addr_info local;
610 struct pm_nl_pernet *pernet;
611 unsigned int subflows_max;
612 int i = 0;
613
c682bf53 614 pernet = pm_nl_get_pernet_from_msk(msk);
1a0d6136
GT
615 subflows_max = mptcp_pm_get_subflows_max(msk);
616
617 rcu_read_lock();
1a0d6136
GT
618 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
619 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
620 continue;
621
622 if (entry->addr.family != sk->sk_family) {
623#if IS_ENABLED(CONFIG_MPTCP_IPV6)
624 if ((entry->addr.family == AF_INET &&
625 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ||
626 (sk->sk_family == AF_INET &&
627 !ipv6_addr_v4mapped(&entry->addr.addr6)))
628#endif
629 continue;
630 }
631
632 if (msk->pm.subflows < subflows_max) {
633 msk->pm.subflows++;
634 addrs[i++] = entry->addr;
635 }
636 }
637 rcu_read_unlock();
638
639 /* If the array is empty, fill in the single
640 * 'IPADDRANY' local address
641 */
642 if (!i) {
643 memset(&local, 0, sizeof(local));
644 local.family = msk->pm.remote.family;
645
646 msk->pm.subflows++;
647 addrs[i++] = local;
648 }
649
650 return i;
651}
652
e9801430 653static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
01cacb00 654{
1a0d6136 655 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
01cacb00 656 struct sock *sk = (struct sock *)msk;
a914e586 657 unsigned int add_addr_accept_max;
01cacb00 658 struct mptcp_addr_info remote;
a914e586 659 unsigned int subflows_max;
1a0d6136 660 int i, nr;
01cacb00 661
a914e586
GT
662 add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
663 subflows_max = mptcp_pm_get_subflows_max(msk);
664
01cacb00 665 pr_debug("accepted %d:%d remote family %d",
a914e586 666 msk->pm.add_addr_accepted, add_addr_accept_max,
01cacb00 667 msk->pm.remote.family);
d84ad049 668
837cf45d 669 remote = msk->pm.remote;
12a18341
YL
670 mptcp_pm_announce_addr(msk, &remote, true);
671 mptcp_pm_nl_addr_send_ack(msk);
672
837cf45d 673 if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
12a18341 674 return;
d84ad049 675
837cf45d 676 /* pick id 0 port, if none is provided the remote address */
12a18341 677 if (!remote.port)
837cf45d 678 remote.port = sk->sk_dport;
837cf45d 679
01cacb00
PA
680 /* connect to the specified remote address, using whatever
681 * local address the routing configuration will pick.
682 */
1a0d6136
GT
683 nr = fill_local_addresses_vec(msk, addrs);
684
685 msk->pm.add_addr_accepted++;
686 if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
687 msk->pm.subflows >= subflows_max)
688 WRITE_ONCE(msk->pm.accept_addr, false);
01cacb00
PA
689
690 spin_unlock_bh(&msk->pm.lock);
1a0d6136
GT
691 for (i = 0; i < nr; i++)
692 __mptcp_subflow_connect(sk, &addrs[i], &remote);
01cacb00 693 spin_lock_bh(&msk->pm.lock);
84dfe367
GT
694}
695
b46a0238 696void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
84dfe367
GT
697{
698 struct mptcp_subflow_context *subflow;
699
3abc05d9
FW
700 msk_owned_by_me(msk);
701 lockdep_assert_held(&msk->pm.lock);
702
8dd5efb1
GT
703 if (!mptcp_pm_should_add_signal(msk) &&
704 !mptcp_pm_should_rm_signal(msk))
84dfe367
GT
705 return;
706
84dfe367
GT
707 subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
708 if (subflow) {
709 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
84dfe367
GT
710
711 spin_unlock_bh(&msk->pm.lock);
c233ef13
YL
712 pr_debug("send ack for %s",
713 mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr");
fbe0f87a 714
340fa666 715 mptcp_subflow_send_ack(ssk);
84dfe367 716 spin_lock_bh(&msk->pm.lock);
84dfe367 717 }
01cacb00
PA
718}
719
3828c514
MM
720static int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
721 struct mptcp_addr_info *addr,
722 u8 bkup)
06706542
GT
723{
724 struct mptcp_subflow_context *subflow;
725
726 pr_debug("bkup=%d", bkup);
727
728 mptcp_for_each_subflow(msk, subflow) {
729 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
0be2ac28 730 struct sock *sk = (struct sock *)msk;
06706542
GT
731 struct mptcp_addr_info local;
732
733 local_address((struct sock_common *)ssk, &local);
734 if (!addresses_equal(&local, addr, addr->port))
735 continue;
736
0e203c32
PA
737 if (subflow->backup != bkup)
738 msk->last_snd = NULL;
06706542
GT
739 subflow->backup = bkup;
740 subflow->send_mp_prio = 1;
741 subflow->request_bkup = bkup;
0be2ac28 742 __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIOTX);
06706542
GT
743
744 spin_unlock_bh(&msk->pm.lock);
745 pr_debug("send ack for mp_prio");
340fa666 746 mptcp_subflow_send_ack(ssk);
06706542
GT
747 spin_lock_bh(&msk->pm.lock);
748
749 return 0;
750 }
751
752 return -EINVAL;
753}
754
9f12e97b
GT
755static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
756 const struct mptcp_rm_list *rm_list,
757 enum linux_mptcp_mib_field rm_type)
d0876b22
GT
758{
759 struct mptcp_subflow_context *subflow, *tmp;
760 struct sock *sk = (struct sock *)msk;
d0b698ca 761 u8 i;
d0876b22 762
9f12e97b
GT
763 pr_debug("%s rm_list_nr %d",
764 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
d0876b22 765
3abc05d9
FW
766 msk_owned_by_me(msk);
767
b0cdc5db
PA
768 if (sk->sk_state == TCP_LISTEN)
769 return;
770
9f12e97b 771 if (!rm_list->nr)
d0876b22
GT
772 return;
773
774 if (list_empty(&msk->conn_list))
775 return;
776
9f12e97b 777 for (i = 0; i < rm_list->nr; i++) {
f7d6a237
PA
778 bool removed = false;
779
d0b698ca
GT
780 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
781 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
782 int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
9f12e97b
GT
783 u8 id = subflow->local_id;
784
785 if (rm_type == MPTCP_MIB_RMADDR)
786 id = subflow->remote_id;
d0876b22 787
9f12e97b 788 if (rm_list->ids[i] != id)
d0b698ca 789 continue;
d0876b22 790
9f12e97b
GT
791 pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u",
792 rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
793 i, rm_list->ids[i], subflow->local_id, subflow->remote_id);
d0b698ca
GT
794 spin_unlock_bh(&msk->pm.lock);
795 mptcp_subflow_shutdown(sk, ssk, how);
a88c9e49
PA
796
797 /* the following takes care of updating the subflows counter */
d0b698ca
GT
798 mptcp_close_ssk(sk, ssk, subflow);
799 spin_lock_bh(&msk->pm.lock);
d0876b22 800
f7d6a237 801 removed = true;
9f12e97b 802 __MPTCP_INC_STATS(sock_net(sk), rm_type);
d0b698ca 803 }
a4c0214f 804 __set_bit(rm_list->ids[i], msk->pm.id_avail_bitmap);
f7d6a237
PA
805 if (!removed)
806 continue;
807
808 if (rm_type == MPTCP_MIB_RMADDR) {
809 msk->pm.add_addr_accepted--;
810 WRITE_ONCE(msk->pm.accept_addr, true);
811 } else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
812 msk->pm.local_addr_used--;
813 }
d0876b22
GT
814 }
815}
816
9f12e97b
GT
817static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
818{
819 mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
820}
821
822void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
823 const struct mptcp_rm_list *rm_list)
824{
825 mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
826}
827
e9801430
FW
828void mptcp_pm_nl_work(struct mptcp_sock *msk)
829{
830 struct mptcp_pm_data *pm = &msk->pm;
831
832 msk_owned_by_me(msk);
833
86e39e04
PA
834 if (!(pm->status & MPTCP_PM_WORK_MASK))
835 return;
836
e9801430
FW
837 spin_lock_bh(&msk->pm.lock);
838
839 pr_debug("msk=%p status=%x", msk, pm->status);
840 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
841 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
842 mptcp_pm_nl_add_addr_received(msk);
843 }
844 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) {
845 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK);
b46a0238 846 mptcp_pm_nl_addr_send_ack(msk);
e9801430
FW
847 }
848 if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
849 pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
850 mptcp_pm_nl_rm_addr_received(msk);
851 }
852 if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
853 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
854 mptcp_pm_nl_fully_established(msk);
855 }
856 if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
857 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
858 mptcp_pm_nl_subflow_established(msk);
859 }
860
861 spin_unlock_bh(&msk->pm.lock);
862}
863
01cacb00
PA
864static bool address_use_port(struct mptcp_pm_addr_entry *entry)
865{
daa83ab0 866 return (entry->flags &
01cacb00
PA
867 (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) ==
868 MPTCP_PM_ADDR_FLAG_SIGNAL;
869}
870
d045b9eb
PA
871/* caller must ensure the RCU grace period is already elapsed */
872static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
873{
874 if (entry->lsk)
875 sock_release(entry->lsk);
876 kfree(entry);
877}
878
01cacb00
PA
879static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
880 struct mptcp_pm_addr_entry *entry)
881{
d045b9eb 882 struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
72603d20 883 unsigned int addr_max;
01cacb00
PA
884 int ret = -EINVAL;
885
886 spin_lock_bh(&pernet->lock);
887 /* to keep the code simple, don't do IDR-like allocation for address ID,
888 * just bail when we exceed limits
889 */
86e39e04 890 if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID)
efd5a4c0 891 pernet->next_id = 1;
01cacb00
PA
892 if (pernet->addrs >= MPTCP_PM_ADDR_MAX)
893 goto out;
efd5a4c0
GT
894 if (test_bit(entry->addr.id, pernet->id_bitmap))
895 goto out;
01cacb00
PA
896
897 /* do not insert duplicate address, differentiate on port only
898 * singled addresses
899 */
900 list_for_each_entry(cur, &pernet->local_addr_list, list) {
901 if (addresses_equal(&cur->addr, &entry->addr,
902 address_use_port(entry) &&
d045b9eb
PA
903 address_use_port(cur))) {
904 /* allow replacing the exiting endpoint only if such
905 * endpoint is an implicit one and the user-space
906 * did not provide an endpoint id
907 */
908 if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT))
909 goto out;
910 if (entry->addr.id)
911 goto out;
912
913 pernet->addrs--;
914 entry->addr.id = cur->addr.id;
915 list_del_rcu(&cur->list);
916 del_entry = cur;
917 break;
918 }
01cacb00
PA
919 }
920
efd5a4c0
GT
921 if (!entry->addr.id) {
922find_next:
923 entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
86e39e04 924 MPTCP_PM_MAX_ADDR_ID + 1,
efd5a4c0 925 pernet->next_id);
59060a47 926 if (!entry->addr.id && pernet->next_id != 1) {
efd5a4c0
GT
927 pernet->next_id = 1;
928 goto find_next;
929 }
930 }
931
59060a47 932 if (!entry->addr.id)
efd5a4c0
GT
933 goto out;
934
935 __set_bit(entry->addr.id, pernet->id_bitmap);
936 if (entry->addr.id > pernet->next_id)
937 pernet->next_id = entry->addr.id;
938
daa83ab0 939 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
72603d20
GT
940 addr_max = pernet->add_addr_signal_max;
941 WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1);
942 }
daa83ab0 943 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
72603d20
GT
944 addr_max = pernet->local_addr_max;
945 WRITE_ONCE(pernet->local_addr_max, addr_max + 1);
946 }
01cacb00 947
01cacb00
PA
948 pernet->addrs++;
949 list_add_tail_rcu(&entry->list, &pernet->local_addr_list);
950 ret = entry->addr.id;
951
952out:
953 spin_unlock_bh(&pernet->lock);
d045b9eb
PA
954
955 /* just replaced an existing entry, free it */
956 if (del_entry) {
957 synchronize_rcu();
958 __mptcp_pm_release_addr_entry(del_entry);
959 }
01cacb00
PA
960 return ret;
961}
962
1729cf18
GT
963static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
964 struct mptcp_pm_addr_entry *entry)
965{
029744cd 966 int addrlen = sizeof(struct sockaddr_in);
1729cf18
GT
967 struct sockaddr_storage addr;
968 struct mptcp_sock *msk;
969 struct socket *ssock;
970 int backlog = 1024;
971 int err;
972
973 err = sock_create_kern(sock_net(sk), entry->addr.family,
974 SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk);
975 if (err)
976 return err;
977
978 msk = mptcp_sk(entry->lsk->sk);
979 if (!msk) {
980 err = -EINVAL;
981 goto out;
982 }
983
984 ssock = __mptcp_nmpc_socket(msk);
985 if (!ssock) {
986 err = -EINVAL;
987 goto out;
988 }
989
990 mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
029744cd
KM
991#if IS_ENABLED(CONFIG_MPTCP_IPV6)
992 if (entry->addr.family == AF_INET6)
993 addrlen = sizeof(struct sockaddr_in6);
994#endif
995 err = kernel_bind(ssock, (struct sockaddr *)&addr, addrlen);
1729cf18
GT
996 if (err) {
997 pr_warn("kernel_bind error, err=%d", err);
998 goto out;
999 }
1000
1001 err = kernel_listen(ssock, backlog);
1002 if (err) {
1003 pr_warn("kernel_listen error, err=%d", err);
1004 goto out;
1005 }
1006
1007 return 0;
1008
1009out:
1010 sock_release(entry->lsk);
1011 return err;
1012}
1013
01cacb00
PA
1014int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
1015{
1016 struct mptcp_pm_addr_entry *entry;
1017 struct mptcp_addr_info skc_local;
1018 struct mptcp_addr_info msk_local;
1019 struct pm_nl_pernet *pernet;
1020 int ret = -1;
1021
1022 if (WARN_ON_ONCE(!msk))
1023 return -1;
1024
1025 /* The 0 ID mapping is defined by the first subflow, copied into the msk
1026 * addr
1027 */
1028 local_address((struct sock_common *)msk, &msk_local);
57025817 1029 local_address((struct sock_common *)skc, &skc_local);
01cacb00
PA
1030 if (addresses_equal(&msk_local, &skc_local, false))
1031 return 0;
1032
c682bf53 1033 pernet = pm_nl_get_pernet_from_msk(msk);
01cacb00
PA
1034
1035 rcu_read_lock();
1036 list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
60b57bf7 1037 if (addresses_equal(&entry->addr, &skc_local, entry->addr.port)) {
01cacb00
PA
1038 ret = entry->addr.id;
1039 break;
1040 }
1041 }
1042 rcu_read_unlock();
1043 if (ret >= 0)
1044 return ret;
1045
1046 /* address not found, add to local list */
f612eb76 1047 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
01cacb00
PA
1048 if (!entry)
1049 return -ENOMEM;
1050
01cacb00 1051 entry->addr = skc_local;
efd5a4c0 1052 entry->addr.id = 0;
1729cf18 1053 entry->addr.port = 0;
daa83ab0 1054 entry->ifindex = 0;
d045b9eb 1055 entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
1729cf18 1056 entry->lsk = NULL;
01cacb00
PA
1057 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
1058 if (ret < 0)
1059 kfree(entry);
1060
1061 return ret;
1062}
1063
1064void mptcp_pm_nl_data_init(struct mptcp_sock *msk)
1065{
1066 struct mptcp_pm_data *pm = &msk->pm;
01cacb00
PA
1067 bool subflows;
1068
a914e586
GT
1069 subflows = !!mptcp_pm_get_subflows_max(msk);
1070 WRITE_ONCE(pm->work_pending, (!!mptcp_pm_get_local_addr_max(msk) && subflows) ||
1071 !!mptcp_pm_get_add_addr_signal_max(msk));
1072 WRITE_ONCE(pm->accept_addr, !!mptcp_pm_get_add_addr_accept_max(msk) && subflows);
01cacb00
PA
1073 WRITE_ONCE(pm->accept_subflow, subflows);
1074}
1075
b911c97c
FW
1076#define MPTCP_PM_CMD_GRP_OFFSET 0
1077#define MPTCP_PM_EV_GRP_OFFSET 1
01cacb00
PA
1078
1079static const struct genl_multicast_group mptcp_pm_mcgrps[] = {
1080 [MPTCP_PM_CMD_GRP_OFFSET] = { .name = MPTCP_PM_CMD_GRP_NAME, },
b911c97c
FW
1081 [MPTCP_PM_EV_GRP_OFFSET] = { .name = MPTCP_PM_EV_GRP_NAME,
1082 .flags = GENL_UNS_ADMIN_PERM,
1083 },
01cacb00
PA
1084};
1085
1086static const struct nla_policy
1087mptcp_pm_addr_policy[MPTCP_PM_ADDR_ATTR_MAX + 1] = {
1088 [MPTCP_PM_ADDR_ATTR_FAMILY] = { .type = NLA_U16, },
1089 [MPTCP_PM_ADDR_ATTR_ID] = { .type = NLA_U8, },
1090 [MPTCP_PM_ADDR_ATTR_ADDR4] = { .type = NLA_U32, },
8140860c
JB
1091 [MPTCP_PM_ADDR_ATTR_ADDR6] =
1092 NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
01cacb00
PA
1093 [MPTCP_PM_ADDR_ATTR_PORT] = { .type = NLA_U16 },
1094 [MPTCP_PM_ADDR_ATTR_FLAGS] = { .type = NLA_U32 },
1095 [MPTCP_PM_ADDR_ATTR_IF_IDX] = { .type = NLA_S32 },
1096};
1097
1098static const struct nla_policy mptcp_pm_policy[MPTCP_PM_ATTR_MAX + 1] = {
1099 [MPTCP_PM_ATTR_ADDR] =
1100 NLA_POLICY_NESTED(mptcp_pm_addr_policy),
1101 [MPTCP_PM_ATTR_RCV_ADD_ADDRS] = { .type = NLA_U32, },
1102 [MPTCP_PM_ATTR_SUBFLOWS] = { .type = NLA_U32, },
1103};
1104
ff5a0b42
PA
1105void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
1106{
1107 struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk);
1108 struct sock *sk = (struct sock *)msk;
1109 unsigned int active_max_loss_cnt;
1110 struct net *net = sock_net(sk);
1111 unsigned int stale_loss_cnt;
1112 bool slow;
1113
1114 stale_loss_cnt = mptcp_stale_loss_cnt(net);
1115 if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt)
1116 return;
1117
1118 /* look for another available subflow not in loss state */
1119 active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1);
1120 mptcp_for_each_subflow(msk, iter) {
1121 if (iter != subflow && mptcp_subflow_active(iter) &&
1122 iter->stale_count < active_max_loss_cnt) {
1123 /* we have some alternatives, try to mark this subflow as idle ...*/
1124 slow = lock_sock_fast(ssk);
1125 if (!tcp_rtx_and_write_queues_empty(ssk)) {
1126 subflow->stale = 1;
1127 __mptcp_retransmit_pending_data(sk);
fc1b4e3b 1128 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_SUBFLOWSTALE);
ff5a0b42
PA
1129 }
1130 unlock_sock_fast(ssk, slow);
1131
1132 /* always try to push the pending data regarless of re-injections:
1133 * we can possibly use backup subflows now, and subflow selection
1134 * is cheap under the msk socket lock
1135 */
1136 __mptcp_push_pending(sk, 0);
1137 return;
1138 }
1139 }
1140}
1141
01cacb00
PA
1142static int mptcp_pm_family_to_addr(int family)
1143{
1144#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1145 if (family == AF_INET6)
1146 return MPTCP_PM_ADDR_ATTR_ADDR6;
1147#endif
1148 return MPTCP_PM_ADDR_ATTR_ADDR4;
1149}
1150
1151static int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
1152 bool require_family,
1153 struct mptcp_pm_addr_entry *entry)
1154{
1155 struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
1156 int err, addr_addr;
1157
1158 if (!attr) {
1159 GENL_SET_ERR_MSG(info, "missing address info");
1160 return -EINVAL;
1161 }
1162
1163 /* no validation needed - was already done via nested policy */
1164 err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
1165 mptcp_pm_addr_policy, info->extack);
1166 if (err)
1167 return err;
1168
1169 memset(entry, 0, sizeof(*entry));
1170 if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) {
1171 if (!require_family)
1172 goto skip_family;
1173
1174 NL_SET_ERR_MSG_ATTR(info->extack, attr,
1175 "missing family");
1176 return -EINVAL;
1177 }
1178
1179 entry->addr.family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]);
1180 if (entry->addr.family != AF_INET
1181#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1182 && entry->addr.family != AF_INET6
1183#endif
1184 ) {
1185 NL_SET_ERR_MSG_ATTR(info->extack, attr,
1186 "unknown address family");
1187 return -EINVAL;
1188 }
1189 addr_addr = mptcp_pm_family_to_addr(entry->addr.family);
1190 if (!tb[addr_addr]) {
1191 NL_SET_ERR_MSG_ATTR(info->extack, attr,
1192 "missing address data");
1193 return -EINVAL;
1194 }
1195
1196#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1197 if (entry->addr.family == AF_INET6)
1198 entry->addr.addr6 = nla_get_in6_addr(tb[addr_addr]);
1199 else
1200#endif
1201 entry->addr.addr.s_addr = nla_get_in_addr(tb[addr_addr]);
1202
1203skip_family:
ef0da3b8
PA
1204 if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) {
1205 u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
1206
daa83ab0 1207 entry->ifindex = val;
ef0da3b8 1208 }
01cacb00
PA
1209
1210 if (tb[MPTCP_PM_ADDR_ATTR_ID])
1211 entry->addr.id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]);
1212
1213 if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
daa83ab0 1214 entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
01cacb00 1215
09f12c3a 1216 if (tb[MPTCP_PM_ADDR_ATTR_PORT])
a77e9179
GT
1217 entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
1218
01cacb00
PA
1219 return 0;
1220}
1221
1222static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
1223{
c682bf53 1224 return pm_nl_get_pernet(genl_info_net(info));
01cacb00
PA
1225}
1226
875b7671
GT
1227static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
1228{
1229 struct mptcp_sock *msk;
1230 long s_slot = 0, s_num = 0;
1231
1232 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1233 struct sock *sk = (struct sock *)msk;
1234
1235 if (!READ_ONCE(msk->fully_established))
1236 goto next;
1237
1238 lock_sock(sk);
1239 spin_lock_bh(&msk->pm.lock);
1240 mptcp_pm_create_subflow_or_signal_addr(msk);
1241 spin_unlock_bh(&msk->pm.lock);
1242 release_sock(sk);
1243
1244next:
1245 sock_put(sk);
1246 cond_resched();
1247 }
1248
1249 return 0;
1250}
1251
01cacb00
PA
1252static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
1253{
1254 struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
1255 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1256 struct mptcp_pm_addr_entry addr, *entry;
1257 int ret;
1258
1259 ret = mptcp_pm_parse_addr(attr, info, true, &addr);
1260 if (ret < 0)
1261 return ret;
1262
09f12c3a
GT
1263 if (addr.addr.port && !(addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
1264 GENL_SET_ERR_MSG(info, "flags must have signal when using port");
1265 return -EINVAL;
1266 }
1267
0dc626e5
GT
1268 if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL &&
1269 addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
1270 GENL_SET_ERR_MSG(info, "flags mustn't have both signal and fullmesh");
1271 return -EINVAL;
1272 }
1273
d045b9eb
PA
1274 if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) {
1275 GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint");
1276 return -EINVAL;
1277 }
1278
01cacb00
PA
1279 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1280 if (!entry) {
1281 GENL_SET_ERR_MSG(info, "can't allocate addr");
1282 return -ENOMEM;
1283 }
1284
1285 *entry = addr;
1729cf18
GT
1286 if (entry->addr.port) {
1287 ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry);
1288 if (ret) {
1289 GENL_SET_ERR_MSG(info, "create listen socket error");
1290 kfree(entry);
1291 return ret;
1292 }
1293 }
01cacb00
PA
1294 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
1295 if (ret < 0) {
1296 GENL_SET_ERR_MSG(info, "too many addresses or duplicate one");
1729cf18
GT
1297 if (entry->lsk)
1298 sock_release(entry->lsk);
01cacb00
PA
1299 kfree(entry);
1300 return ret;
1301 }
1302
875b7671
GT
1303 mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
1304
01cacb00
PA
1305 return 0;
1306}
1307
ee285257
GT
1308int mptcp_pm_get_flags_and_ifindex_by_id(struct net *net, unsigned int id,
1309 u8 *flags, int *ifindex)
1310{
1311 struct mptcp_pm_addr_entry *entry;
1312
1313 *flags = 0;
1314 *ifindex = 0;
1315
1316 if (id) {
1317 rcu_read_lock();
c682bf53 1318 entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id);
ee285257
GT
1319 if (entry) {
1320 *flags = entry->flags;
1321 *ifindex = entry->ifindex;
1322 }
1323 rcu_read_unlock();
1324 }
1325
1326 return 0;
1327}
1328
b6c08380 1329static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
90d93088 1330 const struct mptcp_addr_info *addr)
b6c08380 1331{
00cfd77b 1332 struct mptcp_pm_add_entry *entry;
b6c08380 1333
d58300c3 1334 entry = mptcp_pm_del_add_timer(msk, addr, false);
00cfd77b
GT
1335 if (entry) {
1336 list_del(&entry->list);
1337 kfree(entry);
1338 return true;
b6c08380
GT
1339 }
1340
1341 return false;
1342}
1343
1344static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
90d93088 1345 const struct mptcp_addr_info *addr,
b6c08380
GT
1346 bool force)
1347{
cbde2787 1348 struct mptcp_rm_list list = { .nr = 0 };
b6c08380
GT
1349 bool ret;
1350
cbde2787
GT
1351 list.ids[list.nr++] = addr->id;
1352
b6c08380 1353 ret = remove_anno_list_by_saddr(msk, addr);
00cfd77b
GT
1354 if (ret || force) {
1355 spin_lock_bh(&msk->pm.lock);
cbde2787 1356 mptcp_pm_remove_addr(msk, &list);
00cfd77b
GT
1357 spin_unlock_bh(&msk->pm.lock);
1358 }
b6c08380
GT
1359 return ret;
1360}
1361
1362static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
d045b9eb 1363 const struct mptcp_pm_addr_entry *entry)
b6c08380 1364{
d045b9eb 1365 const struct mptcp_addr_info *addr = &entry->addr;
ddd14bb8 1366 struct mptcp_rm_list list = { .nr = 0 };
d045b9eb
PA
1367 long s_slot = 0, s_num = 0;
1368 struct mptcp_sock *msk;
b6c08380
GT
1369
1370 pr_debug("remove_id=%d", addr->id);
1371
ddd14bb8
GT
1372 list.ids[list.nr++] = addr->id;
1373
b6c08380
GT
1374 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1375 struct sock *sk = (struct sock *)msk;
1376 bool remove_subflow;
1377
1378 if (list_empty(&msk->conn_list)) {
1379 mptcp_pm_remove_anno_addr(msk, addr, false);
1380 goto next;
1381 }
1382
1383 lock_sock(sk);
1384 remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
d045b9eb
PA
1385 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
1386 !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
b6c08380 1387 if (remove_subflow)
ddd14bb8 1388 mptcp_pm_remove_subflow(msk, &list);
b6c08380
GT
1389 release_sock(sk);
1390
1391next:
1392 sock_put(sk);
1393 cond_resched();
1394 }
1395
1396 return 0;
1397}
1398
740d798e
GT
1399static int mptcp_nl_remove_id_zero_address(struct net *net,
1400 struct mptcp_addr_info *addr)
1401{
1402 struct mptcp_rm_list list = { .nr = 0 };
1403 long s_slot = 0, s_num = 0;
1404 struct mptcp_sock *msk;
1405
1406 list.ids[list.nr++] = 0;
1407
1408 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1409 struct sock *sk = (struct sock *)msk;
1410 struct mptcp_addr_info msk_local;
1411
1412 if (list_empty(&msk->conn_list))
1413 goto next;
1414
1415 local_address((struct sock_common *)msk, &msk_local);
1416 if (!addresses_equal(&msk_local, addr, addr->port))
1417 goto next;
1418
1419 lock_sock(sk);
1420 spin_lock_bh(&msk->pm.lock);
1421 mptcp_pm_remove_addr(msk, &list);
1422 mptcp_pm_nl_rm_subflow_received(msk, &list);
1423 spin_unlock_bh(&msk->pm.lock);
1424 release_sock(sk);
1425
1426next:
1427 sock_put(sk);
1428 cond_resched();
1429 }
1430
1431 return 0;
1432}
1433
01cacb00
PA
1434static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
1435{
1436 struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
1437 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1438 struct mptcp_pm_addr_entry addr, *entry;
72603d20 1439 unsigned int addr_max;
01cacb00
PA
1440 int ret;
1441
1442 ret = mptcp_pm_parse_addr(attr, info, false, &addr);
1443 if (ret < 0)
1444 return ret;
1445
740d798e
GT
1446 /* the zero id address is special: the first address used by the msk
1447 * always gets such an id, so different subflows can have different zero
1448 * id addresses. Additionally zero id is not accounted for in id_bitmap.
1449 * Let's use an 'mptcp_rm_list' instead of the common remove code.
1450 */
1451 if (addr.addr.id == 0)
1452 return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr);
1453
01cacb00
PA
1454 spin_lock_bh(&pernet->lock);
1455 entry = __lookup_addr_by_id(pernet, addr.addr.id);
1456 if (!entry) {
1457 GENL_SET_ERR_MSG(info, "address not found");
b6c08380
GT
1458 spin_unlock_bh(&pernet->lock);
1459 return -EINVAL;
01cacb00 1460 }
daa83ab0 1461 if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
72603d20
GT
1462 addr_max = pernet->add_addr_signal_max;
1463 WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1);
1464 }
daa83ab0 1465 if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
72603d20
GT
1466 addr_max = pernet->local_addr_max;
1467 WRITE_ONCE(pernet->local_addr_max, addr_max - 1);
1468 }
01cacb00
PA
1469
1470 pernet->addrs--;
1471 list_del_rcu(&entry->list);
efd5a4c0 1472 __clear_bit(entry->addr.id, pernet->id_bitmap);
01cacb00 1473 spin_unlock_bh(&pernet->lock);
b6c08380 1474
d045b9eb 1475 mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry);
a0eea5f1
PA
1476 synchronize_rcu();
1477 __mptcp_pm_release_addr_entry(entry);
b6c08380 1478
01cacb00
PA
1479 return ret;
1480}
1481
06faa227
GT
1482static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
1483 struct list_head *rm_list)
1484{
1485 struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
1486 struct mptcp_pm_addr_entry *entry;
1487
1488 list_for_each_entry(entry, rm_list, list) {
1489 if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
6fa0174a 1490 slist.nr < MPTCP_RM_IDS_MAX)
06faa227 1491 slist.ids[slist.nr++] = entry->addr.id;
6fa0174a
PA
1492
1493 if (remove_anno_list_by_saddr(msk, &entry->addr) &&
1494 alist.nr < MPTCP_RM_IDS_MAX)
06faa227 1495 alist.ids[alist.nr++] = entry->addr.id;
06faa227
GT
1496 }
1497
1498 if (alist.nr) {
1499 spin_lock_bh(&msk->pm.lock);
1500 mptcp_pm_remove_addr(msk, &alist);
1501 spin_unlock_bh(&msk->pm.lock);
1502 }
1503 if (slist.nr)
1504 mptcp_pm_remove_subflow(msk, &slist);
1505}
1506
1507static void mptcp_nl_remove_addrs_list(struct net *net,
1508 struct list_head *rm_list)
1509{
1510 long s_slot = 0, s_num = 0;
1511 struct mptcp_sock *msk;
1512
1513 if (list_empty(rm_list))
1514 return;
1515
1516 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1517 struct sock *sk = (struct sock *)msk;
1518
1519 lock_sock(sk);
1520 mptcp_pm_remove_addrs_and_subflows(msk, rm_list);
1521 release_sock(sk);
1522
1523 sock_put(sk);
1524 cond_resched();
1525 }
1526}
1527
a0eea5f1 1528/* caller must ensure the RCU grace period is already elapsed */
0e4a3e68 1529static void __flush_addrs(struct list_head *list)
01cacb00 1530{
141694df 1531 while (!list_empty(list)) {
01cacb00
PA
1532 struct mptcp_pm_addr_entry *cur;
1533
141694df 1534 cur = list_entry(list->next,
01cacb00
PA
1535 struct mptcp_pm_addr_entry, list);
1536 list_del_rcu(&cur->list);
a0eea5f1 1537 __mptcp_pm_release_addr_entry(cur);
01cacb00
PA
1538 }
1539}
1540
1541static void __reset_counters(struct pm_nl_pernet *pernet)
1542{
72603d20
GT
1543 WRITE_ONCE(pernet->add_addr_signal_max, 0);
1544 WRITE_ONCE(pernet->add_addr_accept_max, 0);
1545 WRITE_ONCE(pernet->local_addr_max, 0);
01cacb00
PA
1546 pernet->addrs = 0;
1547}
1548
1549static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
1550{
1551 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
141694df 1552 LIST_HEAD(free_list);
01cacb00
PA
1553
1554 spin_lock_bh(&pernet->lock);
141694df 1555 list_splice_init(&pernet->local_addr_list, &free_list);
01cacb00 1556 __reset_counters(pernet);
efd5a4c0 1557 pernet->next_id = 1;
86e39e04 1558 bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
01cacb00 1559 spin_unlock_bh(&pernet->lock);
0e4a3e68 1560 mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
a0eea5f1 1561 synchronize_rcu();
0e4a3e68 1562 __flush_addrs(&free_list);
01cacb00
PA
1563 return 0;
1564}
1565
1566static int mptcp_nl_fill_addr(struct sk_buff *skb,
1567 struct mptcp_pm_addr_entry *entry)
1568{
1569 struct mptcp_addr_info *addr = &entry->addr;
1570 struct nlattr *attr;
1571
1572 attr = nla_nest_start(skb, MPTCP_PM_ATTR_ADDR);
1573 if (!attr)
1574 return -EMSGSIZE;
1575
1576 if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_FAMILY, addr->family))
1577 goto nla_put_failure;
a77e9179
GT
1578 if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_PORT, ntohs(addr->port)))
1579 goto nla_put_failure;
01cacb00
PA
1580 if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id))
1581 goto nla_put_failure;
daa83ab0 1582 if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags))
01cacb00 1583 goto nla_put_failure;
daa83ab0
GT
1584 if (entry->ifindex &&
1585 nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex))
01cacb00
PA
1586 goto nla_put_failure;
1587
b4e0f9a9
BY
1588 if (addr->family == AF_INET &&
1589 nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4,
1590 addr->addr.s_addr))
1591 goto nla_put_failure;
01cacb00 1592#if IS_ENABLED(CONFIG_MPTCP_IPV6)
b4e0f9a9
BY
1593 else if (addr->family == AF_INET6 &&
1594 nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6))
1595 goto nla_put_failure;
01cacb00
PA
1596#endif
1597 nla_nest_end(skb, attr);
1598 return 0;
1599
1600nla_put_failure:
1601 nla_nest_cancel(skb, attr);
1602 return -EMSGSIZE;
1603}
1604
1605static int mptcp_nl_cmd_get_addr(struct sk_buff *skb, struct genl_info *info)
1606{
1607 struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
1608 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1609 struct mptcp_pm_addr_entry addr, *entry;
1610 struct sk_buff *msg;
1611 void *reply;
1612 int ret;
1613
1614 ret = mptcp_pm_parse_addr(attr, info, false, &addr);
1615 if (ret < 0)
1616 return ret;
1617
1618 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1619 if (!msg)
1620 return -ENOMEM;
1621
1622 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
1623 info->genlhdr->cmd);
1624 if (!reply) {
1625 GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
1626 ret = -EMSGSIZE;
1627 goto fail;
1628 }
1629
1630 spin_lock_bh(&pernet->lock);
1631 entry = __lookup_addr_by_id(pernet, addr.addr.id);
1632 if (!entry) {
1633 GENL_SET_ERR_MSG(info, "address not found");
1634 ret = -EINVAL;
1635 goto unlock_fail;
1636 }
1637
1638 ret = mptcp_nl_fill_addr(msg, entry);
1639 if (ret)
1640 goto unlock_fail;
1641
1642 genlmsg_end(msg, reply);
1643 ret = genlmsg_reply(msg, info);
1644 spin_unlock_bh(&pernet->lock);
1645 return ret;
1646
1647unlock_fail:
1648 spin_unlock_bh(&pernet->lock);
1649
1650fail:
1651 nlmsg_free(msg);
1652 return ret;
1653}
1654
1655static int mptcp_nl_cmd_dump_addrs(struct sk_buff *msg,
1656 struct netlink_callback *cb)
1657{
1658 struct net *net = sock_net(msg->sk);
1659 struct mptcp_pm_addr_entry *entry;
1660 struct pm_nl_pernet *pernet;
1661 int id = cb->args[0];
1662 void *hdr;
efd5a4c0 1663 int i;
01cacb00 1664
c682bf53 1665 pernet = pm_nl_get_pernet(net);
01cacb00
PA
1666
1667 spin_lock_bh(&pernet->lock);
86e39e04 1668 for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) {
efd5a4c0
GT
1669 if (test_bit(i, pernet->id_bitmap)) {
1670 entry = __lookup_addr_by_id(pernet, i);
1671 if (!entry)
1672 break;
1673
1674 if (entry->addr.id <= id)
1675 continue;
1676
1677 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid,
1678 cb->nlh->nlmsg_seq, &mptcp_genl_family,
1679 NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR);
1680 if (!hdr)
1681 break;
1682
1683 if (mptcp_nl_fill_addr(msg, entry) < 0) {
1684 genlmsg_cancel(msg, hdr);
1685 break;
1686 }
01cacb00 1687
efd5a4c0
GT
1688 id = entry->addr.id;
1689 genlmsg_end(msg, hdr);
01cacb00 1690 }
01cacb00
PA
1691 }
1692 spin_unlock_bh(&pernet->lock);
1693
1694 cb->args[0] = id;
1695 return msg->len;
1696}
1697
1698static int parse_limit(struct genl_info *info, int id, unsigned int *limit)
1699{
1700 struct nlattr *attr = info->attrs[id];
1701
1702 if (!attr)
1703 return 0;
1704
1705 *limit = nla_get_u32(attr);
1706 if (*limit > MPTCP_PM_ADDR_MAX) {
1707 GENL_SET_ERR_MSG(info, "limit greater than maximum");
1708 return -EINVAL;
1709 }
1710 return 0;
1711}
1712
1713static int
1714mptcp_nl_cmd_set_limits(struct sk_buff *skb, struct genl_info *info)
1715{
1716 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1717 unsigned int rcv_addrs, subflows;
1718 int ret;
1719
1720 spin_lock_bh(&pernet->lock);
1721 rcv_addrs = pernet->add_addr_accept_max;
1722 ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs);
1723 if (ret)
1724 goto unlock;
1725
1726 subflows = pernet->subflows_max;
1727 ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows);
1728 if (ret)
1729 goto unlock;
1730
1731 WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs);
1732 WRITE_ONCE(pernet->subflows_max, subflows);
1733
1734unlock:
1735 spin_unlock_bh(&pernet->lock);
1736 return ret;
1737}
1738
1739static int
1740mptcp_nl_cmd_get_limits(struct sk_buff *skb, struct genl_info *info)
1741{
1742 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1743 struct sk_buff *msg;
1744 void *reply;
1745
1746 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1747 if (!msg)
1748 return -ENOMEM;
1749
1750 reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
1751 MPTCP_PM_CMD_GET_LIMITS);
1752 if (!reply)
1753 goto fail;
1754
1755 if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS,
1756 READ_ONCE(pernet->add_addr_accept_max)))
1757 goto fail;
1758
1759 if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS,
1760 READ_ONCE(pernet->subflows_max)))
1761 goto fail;
1762
1763 genlmsg_end(msg, reply);
1764 return genlmsg_reply(msg, info);
1765
1766fail:
1767 GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
1768 nlmsg_free(msg);
1769 return -EMSGSIZE;
1770}
1771
73c762c1
GT
1772static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
1773 struct mptcp_addr_info *addr)
1774{
1775 struct mptcp_rm_list list = { .nr = 0 };
1776
1777 list.ids[list.nr++] = addr->id;
1778
1779 mptcp_pm_nl_rm_subflow_received(msk, &list);
1780 mptcp_pm_create_subflow_or_signal_addr(msk);
1781}
1782
1783static int mptcp_nl_set_flags(struct net *net,
1784 struct mptcp_addr_info *addr,
1785 u8 bkup, u8 changed)
0f9f696a
GT
1786{
1787 long s_slot = 0, s_num = 0;
1788 struct mptcp_sock *msk;
1789 int ret = -EINVAL;
1790
1791 while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1792 struct sock *sk = (struct sock *)msk;
1793
1794 if (list_empty(&msk->conn_list))
1795 goto next;
1796
1797 lock_sock(sk);
1798 spin_lock_bh(&msk->pm.lock);
73c762c1
GT
1799 if (changed & MPTCP_PM_ADDR_FLAG_BACKUP)
1800 ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, bkup);
1801 if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH)
1802 mptcp_pm_nl_fullmesh(msk, addr);
0f9f696a
GT
1803 spin_unlock_bh(&msk->pm.lock);
1804 release_sock(sk);
1805
1806next:
1807 sock_put(sk);
1808 cond_resched();
1809 }
1810
1811 return ret;
1812}
1813
1814static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
1815{
602837e8 1816 struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }, *entry;
0f9f696a
GT
1817 struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
1818 struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
73c762c1
GT
1819 u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP |
1820 MPTCP_PM_ADDR_FLAG_FULLMESH;
0f9f696a 1821 struct net *net = sock_net(skb->sk);
602837e8 1822 u8 bkup = 0, lookup_by_id = 0;
0f9f696a
GT
1823 int ret;
1824
602837e8 1825 ret = mptcp_pm_parse_addr(attr, info, false, &addr);
0f9f696a
GT
1826 if (ret < 0)
1827 return ret;
1828
daa83ab0 1829 if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
0f9f696a 1830 bkup = 1;
602837e8
DC
1831 if (addr.addr.family == AF_UNSPEC) {
1832 lookup_by_id = 1;
1833 if (!addr.addr.id)
1834 return -EOPNOTSUPP;
1835 }
0f9f696a 1836
8e9eacad
PA
1837 spin_lock_bh(&pernet->lock);
1838 entry = __lookup_addr(pernet, &addr.addr, lookup_by_id);
1839 if (!entry) {
1840 spin_unlock_bh(&pernet->lock);
1841 return -EINVAL;
0f9f696a 1842 }
73c762c1
GT
1843 if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
1844 (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
1845 spin_unlock_bh(&pernet->lock);
1846 return -EINVAL;
1847 }
0f9f696a 1848
73c762c1
GT
1849 changed = (addr.flags ^ entry->flags) & mask;
1850 entry->flags = (entry->flags & ~mask) | (addr.flags & mask);
8e9eacad
PA
1851 addr = *entry;
1852 spin_unlock_bh(&pernet->lock);
1853
73c762c1 1854 mptcp_nl_set_flags(net, &addr.addr, bkup, changed);
0f9f696a
GT
1855 return 0;
1856}
1857
b911c97c
FW
1858static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp)
1859{
1860 genlmsg_multicast_netns(&mptcp_genl_family, net,
1861 nlskb, 0, MPTCP_PM_EV_GRP_OFFSET, gfp);
1862}
1863
1864static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk)
1865{
1866 const struct inet_sock *issk = inet_sk(ssk);
1867 const struct mptcp_subflow_context *sf;
1868
1869 if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family))
1870 return -EMSGSIZE;
1871
1872 switch (ssk->sk_family) {
1873 case AF_INET:
1874 if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr))
1875 return -EMSGSIZE;
1876 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, issk->inet_daddr))
1877 return -EMSGSIZE;
1878 break;
1879#if IS_ENABLED(CONFIG_MPTCP_IPV6)
1880 case AF_INET6: {
1881 const struct ipv6_pinfo *np = inet6_sk(ssk);
1882
1883 if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr))
1884 return -EMSGSIZE;
1885 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &ssk->sk_v6_daddr))
1886 return -EMSGSIZE;
1887 break;
1888 }
1889#endif
1890 default:
1891 WARN_ON_ONCE(1);
1892 return -EMSGSIZE;
1893 }
1894
1895 if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport))
1896 return -EMSGSIZE;
1897 if (nla_put_be16(skb, MPTCP_ATTR_DPORT, issk->inet_dport))
1898 return -EMSGSIZE;
1899
1900 sf = mptcp_subflow_ctx(ssk);
1901 if (WARN_ON_ONCE(!sf))
1902 return -EINVAL;
1903
1904 if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, sf->local_id))
1905 return -EMSGSIZE;
1906
1907 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id))
1908 return -EMSGSIZE;
1909
1910 return 0;
1911}
1912
1913static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
1914 const struct mptcp_sock *msk,
1915 const struct sock *ssk)
1916{
1917 const struct sock *sk = (const struct sock *)msk;
1918 const struct mptcp_subflow_context *sf;
1919 u8 sk_err;
1920
1921 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
1922 return -EMSGSIZE;
1923
1924 if (mptcp_event_add_subflow(skb, ssk))
1925 return -EMSGSIZE;
1926
1927 sf = mptcp_subflow_ctx(ssk);
1928 if (WARN_ON_ONCE(!sf))
1929 return -EINVAL;
1930
1931 if (nla_put_u8(skb, MPTCP_ATTR_BACKUP, sf->backup))
1932 return -EMSGSIZE;
1933
1934 if (ssk->sk_bound_dev_if &&
1935 nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if))
1936 return -EMSGSIZE;
1937
1938 sk_err = ssk->sk_err;
1939 if (sk_err && sk->sk_state == TCP_ESTABLISHED &&
1940 nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err))
1941 return -EMSGSIZE;
1942
1943 return 0;
1944}
1945
1946static int mptcp_event_sub_established(struct sk_buff *skb,
1947 const struct mptcp_sock *msk,
1948 const struct sock *ssk)
1949{
1950 return mptcp_event_put_token_and_ssk(skb, msk, ssk);
1951}
1952
1953static int mptcp_event_sub_closed(struct sk_buff *skb,
1954 const struct mptcp_sock *msk,
1955 const struct sock *ssk)
1956{
dc87efdb
FW
1957 const struct mptcp_subflow_context *sf;
1958
b911c97c
FW
1959 if (mptcp_event_put_token_and_ssk(skb, msk, ssk))
1960 return -EMSGSIZE;
1961
dc87efdb
FW
1962 sf = mptcp_subflow_ctx(ssk);
1963 if (!sf->reset_seen)
1964 return 0;
1965
1966 if (nla_put_u32(skb, MPTCP_ATTR_RESET_REASON, sf->reset_reason))
1967 return -EMSGSIZE;
1968
1969 if (nla_put_u32(skb, MPTCP_ATTR_RESET_FLAGS, sf->reset_transient))
1970 return -EMSGSIZE;
1971
b911c97c
FW
1972 return 0;
1973}
1974
1975static int mptcp_event_created(struct sk_buff *skb,
1976 const struct mptcp_sock *msk,
1977 const struct sock *ssk)
1978{
1979 int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token);
1980
1981 if (err)
1982 return err;
1983
1984 return mptcp_event_add_subflow(skb, ssk);
1985}
1986
1987void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id)
1988{
1989 struct net *net = sock_net((const struct sock *)msk);
1990 struct nlmsghdr *nlh;
1991 struct sk_buff *skb;
1992
1993 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
1994 return;
1995
1996 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1997 if (!skb)
1998 return;
1999
2000 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_REMOVED);
2001 if (!nlh)
2002 goto nla_put_failure;
2003
2004 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
2005 goto nla_put_failure;
2006
2007 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id))
2008 goto nla_put_failure;
2009
2010 genlmsg_end(skb, nlh);
2011 mptcp_nl_mcast_send(net, skb, GFP_ATOMIC);
2012 return;
2013
2014nla_put_failure:
2015 kfree_skb(skb);
2016}
2017
2018void mptcp_event_addr_announced(const struct mptcp_sock *msk,
2019 const struct mptcp_addr_info *info)
2020{
2021 struct net *net = sock_net((const struct sock *)msk);
2022 struct nlmsghdr *nlh;
2023 struct sk_buff *skb;
2024
2025 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
2026 return;
2027
2028 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2029 if (!skb)
2030 return;
2031
2032 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0,
2033 MPTCP_EVENT_ANNOUNCED);
2034 if (!nlh)
2035 goto nla_put_failure;
2036
2037 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
2038 goto nla_put_failure;
2039
2040 if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id))
2041 goto nla_put_failure;
2042
2043 if (nla_put_be16(skb, MPTCP_ATTR_DPORT, info->port))
2044 goto nla_put_failure;
2045
2046 switch (info->family) {
2047 case AF_INET:
2048 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, info->addr.s_addr))
2049 goto nla_put_failure;
2050 break;
2051#if IS_ENABLED(CONFIG_MPTCP_IPV6)
2052 case AF_INET6:
2053 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &info->addr6))
2054 goto nla_put_failure;
2055 break;
2056#endif
2057 default:
2058 WARN_ON_ONCE(1);
2059 goto nla_put_failure;
2060 }
2061
2062 genlmsg_end(skb, nlh);
2063 mptcp_nl_mcast_send(net, skb, GFP_ATOMIC);
2064 return;
2065
2066nla_put_failure:
2067 kfree_skb(skb);
2068}
2069
2070void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
2071 const struct sock *ssk, gfp_t gfp)
2072{
2073 struct net *net = sock_net((const struct sock *)msk);
2074 struct nlmsghdr *nlh;
2075 struct sk_buff *skb;
2076
2077 if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
2078 return;
2079
2080 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
2081 if (!skb)
2082 return;
2083
2084 nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, type);
2085 if (!nlh)
2086 goto nla_put_failure;
2087
2088 switch (type) {
2089 case MPTCP_EVENT_UNSPEC:
2090 WARN_ON_ONCE(1);
2091 break;
2092 case MPTCP_EVENT_CREATED:
2093 case MPTCP_EVENT_ESTABLISHED:
2094 if (mptcp_event_created(skb, msk, ssk) < 0)
2095 goto nla_put_failure;
2096 break;
2097 case MPTCP_EVENT_CLOSED:
2098 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token) < 0)
2099 goto nla_put_failure;
2100 break;
2101 case MPTCP_EVENT_ANNOUNCED:
2102 case MPTCP_EVENT_REMOVED:
2103 /* call mptcp_event_addr_announced()/removed instead */
2104 WARN_ON_ONCE(1);
2105 break;
2106 case MPTCP_EVENT_SUB_ESTABLISHED:
2107 case MPTCP_EVENT_SUB_PRIORITY:
2108 if (mptcp_event_sub_established(skb, msk, ssk) < 0)
2109 goto nla_put_failure;
2110 break;
2111 case MPTCP_EVENT_SUB_CLOSED:
2112 if (mptcp_event_sub_closed(skb, msk, ssk) < 0)
2113 goto nla_put_failure;
2114 break;
2115 }
2116
2117 genlmsg_end(skb, nlh);
2118 mptcp_nl_mcast_send(net, skb, gfp);
2119 return;
2120
2121nla_put_failure:
2122 kfree_skb(skb);
2123}
2124
674d3ab9 2125static const struct genl_small_ops mptcp_pm_ops[] = {
01cacb00
PA
2126 {
2127 .cmd = MPTCP_PM_CMD_ADD_ADDR,
2128 .doit = mptcp_nl_cmd_add_addr,
2129 .flags = GENL_ADMIN_PERM,
2130 },
2131 {
2132 .cmd = MPTCP_PM_CMD_DEL_ADDR,
2133 .doit = mptcp_nl_cmd_del_addr,
2134 .flags = GENL_ADMIN_PERM,
2135 },
2136 {
2137 .cmd = MPTCP_PM_CMD_FLUSH_ADDRS,
2138 .doit = mptcp_nl_cmd_flush_addrs,
2139 .flags = GENL_ADMIN_PERM,
2140 },
2141 {
2142 .cmd = MPTCP_PM_CMD_GET_ADDR,
2143 .doit = mptcp_nl_cmd_get_addr,
2144 .dumpit = mptcp_nl_cmd_dump_addrs,
2145 },
2146 {
2147 .cmd = MPTCP_PM_CMD_SET_LIMITS,
2148 .doit = mptcp_nl_cmd_set_limits,
2149 .flags = GENL_ADMIN_PERM,
2150 },
2151 {
2152 .cmd = MPTCP_PM_CMD_GET_LIMITS,
2153 .doit = mptcp_nl_cmd_get_limits,
2154 },
0f9f696a
GT
2155 {
2156 .cmd = MPTCP_PM_CMD_SET_FLAGS,
2157 .doit = mptcp_nl_cmd_set_flags,
2158 .flags = GENL_ADMIN_PERM,
2159 },
01cacb00
PA
2160};
2161
2162static struct genl_family mptcp_genl_family __ro_after_init = {
2163 .name = MPTCP_PM_NAME,
2164 .version = MPTCP_PM_VER,
2165 .maxattr = MPTCP_PM_ATTR_MAX,
2166 .policy = mptcp_pm_policy,
2167 .netnsok = true,
2168 .module = THIS_MODULE,
66a9b928
JK
2169 .small_ops = mptcp_pm_ops,
2170 .n_small_ops = ARRAY_SIZE(mptcp_pm_ops),
01cacb00
PA
2171 .mcgrps = mptcp_pm_mcgrps,
2172 .n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps),
2173};
2174
2175static int __net_init pm_nl_init_net(struct net *net)
2176{
c682bf53 2177 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
01cacb00
PA
2178
2179 INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
72bcbc46
PA
2180
2181 /* Cit. 2 subflows ought to be enough for anybody. */
2182 pernet->subflows_max = 2;
01cacb00 2183 pernet->next_id = 1;
ff5a0b42 2184 pernet->stale_loss_cnt = 4;
01cacb00 2185 spin_lock_init(&pernet->lock);
ae514983
JW
2186
2187 /* No need to initialize other pernet fields, the struct is zeroed at
2188 * allocation time.
2189 */
2190
01cacb00
PA
2191 return 0;
2192}
2193
2194static void __net_exit pm_nl_exit_net(struct list_head *net_list)
2195{
2196 struct net *net;
2197
2198 list_for_each_entry(net, net_list, exit_list) {
c682bf53 2199 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
141694df 2200
01cacb00 2201 /* net is removed from namespace list, can't race with
a0eea5f1
PA
2202 * other modifiers, also netns core already waited for a
2203 * RCU grace period.
01cacb00 2204 */
0e4a3e68 2205 __flush_addrs(&pernet->local_addr_list);
01cacb00
PA
2206 }
2207}
2208
2209static struct pernet_operations mptcp_pm_pernet_ops = {
2210 .init = pm_nl_init_net,
2211 .exit_batch = pm_nl_exit_net,
2212 .id = &pm_nl_pernet_id,
2213 .size = sizeof(struct pm_nl_pernet),
2214};
2215
d39dceca 2216void __init mptcp_pm_nl_init(void)
01cacb00
PA
2217{
2218 if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0)
2219 panic("Failed to register MPTCP PM pernet subsystem.\n");
2220
2221 if (genl_register_family(&mptcp_genl_family))
2222 panic("Failed to register MPTCP PM netlink family\n");
2223}